code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from telegram.ext import Updater,CommandHandler
import subprocess
updater = Updater("TOKEN",use_context = True)
def start_method(update,context):
context.bot.sendMessage(update.message.chat_id,"Connected !")
def run_command(update,context):
command = ""
for i in context.args:
command += i+" "
print(str(update.message.chat_id)+" : "+command)
proc = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,stdin=subprocess.PIPE)
command_result = proc.stdout.read() + proc.stderr.read()
context.bot.sendMessage(update.message.chat_id,command_result.decode())
updater.dispatcher.add_handler(CommandHandler("start",start_method))
updater.dispatcher.add_handler(CommandHandler("run",run_command))
updater.start_polling()
| [
"subprocess.Popen",
"telegram.ext.CommandHandler",
"telegram.ext.Updater"
] | [((77, 111), 'telegram.ext.Updater', 'Updater', (['"""TOKEN"""'], {'use_context': '(True)'}), "('TOKEN', use_context=True)\n", (84, 111), False, 'from telegram.ext import Updater, CommandHandler\n'), ((381, 494), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, stdin=subprocess.PIPE)\n', (397, 494), False, 'import subprocess\n'), ((655, 692), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start_method'], {}), "('start', start_method)\n", (669, 692), False, 'from telegram.ext import Updater, CommandHandler\n'), ((724, 758), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""run"""', 'run_command'], {}), "('run', run_command)\n", (738, 758), False, 'from telegram.ext import Updater, CommandHandler\n')] |
import socket
import select
import time
import datetime
import random
from collections import deque, namedtuple
BOARD_LENGTH = 32
OFFSET = 16
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
DIRECTIONS = namedtuple('DIRECTIONS',
['Up', 'Down', 'Left', 'Right'])(0, 1, 2, 3)
SNAKE_STATE = namedtuple('SNAKE_STATE', ['Alive', 'Dead'])(0, 1)
class Snake(object):
def __init__(self, direction=DIRECTIONS.Right,
point=(0, 0, RED), color=None):
self.tailmax = 4
self.direction = direction
self.deque = deque()
self.deque.append(point)
self.color = color
self.nextDir = deque()
self.state = SNAKE_STATE.Alive
def get_color(self):
if self.color is None:
return rand_color()
else:
return self.color
# for the client
# def populate_nextDir(self, events, identifier):
# if (identifier == "arrows"):
# for event in events:
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_UP:
# self.nextDir.appendleft(DIRECTIONS.Up)
# elif event.key == pygame.K_DOWN:
# self.nextDir.appendleft(DIRECTIONS.Down)
# elif event.key == pygame.K_RIGHT:
# self.nextDir.appendleft(DIRECTIONS.Right)
# elif event.key == pygame.K_LEFT:
# self.nextDir.appendleft(DIRECTIONS.Left)
# if (identifier == "wasd"):
# for event in events:
# if event.type == pygame.KEYDOWN:
# if event.key == pygame.K_w:
# self.nextDir.appendleft(DIRECTIONS.Up)
# elif event.key == pygame.K_s:
# self.nextDir.appendleft(DIRECTIONS.Down)
# elif event.key == pygame.K_d:
# self.nextDir.appendleft(DIRECTIONS.Right)
# elif event.key == pygame.K_a:
# self.nextDir.appendleft(DIRECTIONS.Left)
def find_point(spots):
while True:
point = random.randrange(BOARD_LENGTH), random.randrange(BOARD_LENGTH)
if (not (spots[point[0]][point[1]] == 1 or
spots[point[0]][point[1]] == 2)):
break
return point
def end_condition(board, coord):
if (coord[0] < 0 or coord[0] >= BOARD_LENGTH or coord[1] < 0 or
coord[1] >= BOARD_LENGTH):
return True
if (board[coord[0]][coord[1]] == 1):
return True
return False
def make_board():
spots = [[] for i in range(BOARD_LENGTH)]
for row in spots:
for i in range(BOARD_LENGTH):
row.append(0)
return spots
def network_nextDirs(net_data, num_snakes):
moves = []
for i in range(num_snakes):
moves.append(deque())
while net_data != "":
#net_data comes in the form
#'1l0r1u'
snake_id = int(net_data[0])
snake_dir = net_data[1]
if snake_dir == "u":
moves[snake_id].appendleft(DIRECTIONS.Up)
elif snake_dir == "d":
moves[snake_id].appendleft(DIRECTIONS.Down)
elif snake_dir == "r":
moves[snake_id].appendleft(DIRECTIONS.Right)
elif snake_dir == "l":
moves[snake_id].appendleft(DIRECTIONS.Left)
net_data = net_data[2:]
return moves
def move(snake):
if len(snake.nextDir) != 0:
next_dir = snake.nextDir.pop()
else:
next_dir = snake.direction
head = snake.deque.pop()
snake.deque.append(head)
next_move = head
if (next_dir == DIRECTIONS.Up):
if snake.direction != DIRECTIONS.Down:
next_move = (head[0] - 1, head[1], snake.get_color())
snake.direction = next_dir
else:
next_move = (head[0] + 1, head[1], snake.get_color())
elif (next_dir == DIRECTIONS.Down):
if snake.direction != DIRECTIONS.Up:
next_move = (head[0] + 1, head[1], snake.get_color())
snake.direction = next_dir
else:
next_move = (head[0] - 1, head[1], snake.get_color())
elif (next_dir == DIRECTIONS.Left):
if snake.direction != DIRECTIONS.Right:
next_move = (head[0], head[1] - 1, snake.get_color())
snake.direction = next_dir
else:
next_move = (head[0], head[1] + 1, snake.get_color())
elif (next_dir == DIRECTIONS.Right):
if snake.direction != DIRECTIONS.Left:
next_move = (head[0], head[1] + 1, snake.get_color())
snake.direction = next_dir
else:
next_move = (head[0], head[1] - 1, snake.get_color())
return next_move
def encode_point(point, obj):
# makes a string of the form
# (15 23 bk)
enc_str = "(" + str(point[0]) + " " + str(point[1])
if obj == "snake":
if point[2] == RED:
enc_str += " rd)"
elif point[2] == WHITE:
enc_str += " wh)"
elif point[2] == BLACK:
enc_str += " bk)"
elif point[2] == BLUE:
enc_str += " bl)"
elif obj == "food":
enc_str += " fo)"
elif obj == "remove":
enc_str += " rm)"
return enc_str
def network_update_board(snakes, food):
# update_board with all the drawing code removed
spots = [[] for i in range(BOARD_LENGTH)]
for row in spots:
for i in range(BOARD_LENGTH):
row.append(0)
spots[food[0]][food[1]] = 2
for snake in snakes:
for coord in snake.deque:
spots[coord[0]][coord[1]] = 1
return spots
def is_food(board, point):
return board[point[0]][point[1]] == 2
def snake_server():
HOST, PORT = "172.16.17.32", 9999
num_snakes = 2
spots = make_board()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(2)
snakes = [Snake(DIRECTIONS.Right, (0, 0, RED), RED), Snake(DIRECTIONS.Right, (5, 5, BLUE), BLUE)]
socks = []
for i in range(num_snakes):
socks.append(s.accept())
print("connected {}".format(i))
for i, s in enumerate(socks):
socks[i] = s[0]
s[0].sendall("{}".format(i).encode("utf-8"))
s[0].setblocking(False)
send_data = ""
for snake in snakes:
point = snake.deque.pop()
spots[point[0]][point[1]] = 1
send_data += encode_point(point, "snake")
snake.deque.append(point)
food = find_point(spots)
send_data += encode_point(food, "food")
time_stamp = datetime.datetime.now()
time.sleep(0.15) # just in case...
while True:
# overview of the game loop:
# send add/remove data to clients
# calculate framerate
# check for data from clients
# parse client data & append data to snake objects
# run game logic & create send_data
# send add/remove data to clients
_polls, writes, _exceps = select.select([], socks, [], 0)
for w in writes:
w.sendall(send_data.encode("utf-8"))
# calculate framerate
at_framerate = False
while not at_framerate:
temp_time = datetime.datetime.now()
time_delta = ((temp_time.minute * 60000000 + temp_time.second * 1000000 + temp_time.microsecond) -
(time_stamp.minute *60000000 + time_stamp.second *1000000 + time_stamp.microsecond))
if time_delta > 70000:
at_framerate = True
else:
time.sleep((70000 - time_delta) / 1000000)
time_stamp = temp_time
# check for data from clients
polls, _writes, _excep = select.select(socks, [], [], 0)
poll_data = b""
for p in polls:
poll_data += p.recv(1024)
if poll_data != "":
poll_data = poll_data.decode("utf-8")
# parse client data & append data to snake objects
moves = network_nextDirs(poll_data, num_snakes) # parse next dirs
# append network directions to internal directions
for i, m in enumerate(moves):
while len(m) > 0:
snakes[i].nextDir.appendleft(m.pop())
# run game logic & create send_data
send_data = ""
for snake in snakes:
if snake.state == SNAKE_STATE.Alive:
next_head = move(snake)
if (end_condition(spots, next_head)):
snake.state = SNAKE_STATE.Dead
break
if is_food(spots, next_head):
snake.tailmax += 4
food = find_point(spots)
snake.deque.append(next_head)
send_data += encode_point(next_head, "snake")
if len(snake.deque) > snake.tailmax:
remove_point = snake.deque.popleft()
send_data += encode_point(remove_point, "remove")
send_data += encode_point(food, "food")
elif snake.state == SNAKE_STATE.Dead:
if len(snake.deque) == 0:
snake.direction = DIRECTIONS.Right
point = find_point(spots)
new_head = (point[0], point[1], snake.get_color())
snake.tailmax = 4
snake.deque.append(new_head)
snake.state = SNAKE_STATE.Alive
send_data += encode_point(new_head, "snake")
else:
remove_point = snake.deque.popleft()
send_data += encode_point(remove_point, "remove")
spots = network_update_board(snakes, food)
for i in socks:
i.shutdown(socket.SHUT_RDWR)
i.close()
if __name__ == "__main__":
snake_server()
| [
"select.select",
"collections.namedtuple",
"collections.deque",
"socket.socket",
"random.randrange",
"time.sleep",
"datetime.datetime.now"
] | [((236, 293), 'collections.namedtuple', 'namedtuple', (['"""DIRECTIONS"""', "['Up', 'Down', 'Left', 'Right']"], {}), "('DIRECTIONS', ['Up', 'Down', 'Left', 'Right'])\n", (246, 293), False, 'from collections import deque, namedtuple\n'), ((329, 373), 'collections.namedtuple', 'namedtuple', (['"""SNAKE_STATE"""', "['Alive', 'Dead']"], {}), "('SNAKE_STATE', ['Alive', 'Dead'])\n", (339, 373), False, 'from collections import deque, namedtuple\n'), ((5891, 5940), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5904, 5940), False, 'import socket\n'), ((6651, 6674), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6672, 6674), False, 'import datetime\n'), ((6679, 6695), 'time.sleep', 'time.sleep', (['(0.15)'], {}), '(0.15)\n', (6689, 6695), False, 'import time\n'), ((585, 592), 'collections.deque', 'deque', ([], {}), '()\n', (590, 592), False, 'from collections import deque, namedtuple\n'), ((676, 683), 'collections.deque', 'deque', ([], {}), '()\n', (681, 683), False, 'from collections import deque, namedtuple\n'), ((7073, 7104), 'select.select', 'select.select', (['[]', 'socks', '[]', '(0)'], {}), '([], socks, [], 0)\n', (7086, 7104), False, 'import select\n'), ((7800, 7831), 'select.select', 'select.select', (['socks', '[]', '[]', '(0)'], {}), '(socks, [], [], 0)\n', (7813, 7831), False, 'import select\n'), ((2201, 2231), 'random.randrange', 'random.randrange', (['BOARD_LENGTH'], {}), '(BOARD_LENGTH)\n', (2217, 2231), False, 'import random\n'), ((2233, 2263), 'random.randrange', 'random.randrange', (['BOARD_LENGTH'], {}), '(BOARD_LENGTH)\n', (2249, 2263), False, 'import random\n'), ((2918, 2925), 'collections.deque', 'deque', ([], {}), '()\n', (2923, 2925), False, 'from collections import deque, namedtuple\n'), ((7295, 7318), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7316, 7318), False, 'import datetime\n'), ((7646, 7688), 'time.sleep', 'time.sleep', (['((70000 - time_delta) / 1000000)'], {}), '((70000 - time_delta) / 1000000)\n', (7656, 7688), False, 'import time\n')] |
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from config import config
bootstrap = Bootstrap()
moment = Moment()
db = SQLAlchemy()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app, db
| [
"flask_sqlalchemy.SQLAlchemy",
"flask_moment.Moment",
"flask.Flask",
"flask_bootstrap.Bootstrap"
] | [((175, 186), 'flask_bootstrap.Bootstrap', 'Bootstrap', ([], {}), '()\n', (184, 186), False, 'from flask_bootstrap import Bootstrap\n'), ((196, 204), 'flask_moment.Moment', 'Moment', ([], {}), '()\n', (202, 204), False, 'from flask_moment import Moment\n'), ((210, 222), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (220, 222), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((261, 276), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (266, 276), False, 'from flask import Flask\n')] |
import pandas as pd
import glob
# alpha3Codeをindexに設定して読み込む
country_data = pd.read_csv('../../country.csv', keep_default_na=False, index_col=0)
# 空行を除いて3行目からデータがスタートする(適宜要調整)
# 2列目の「Country Code」をindexに設定する。country.csvと同じ値が入るカラムをindexに指定する必要がある(適宜要調整)
raw_csv = glob.glob('API*.csv')[0]
data = pd.read_csv(raw_csv, header=2, index_col=1)
# country.csvのデータをdataに突合させて挿入していく
# 順番は、「Country Code(世銀カラム),alpha2Code,Country Name(世銀カラム),alias,region,flag_image_url」
data.insert(0, 'alpha2Code', '')
data.insert(2, 'alias', '')
data.insert(3, 'region', '')
data.insert(4, 'flagImageUrl', '')
data = data.assign(
alpha2Code=country_data.loc[:, 'alpha2Code'],
alias=country_data.loc[:, 'alias'],
region=country_data.loc[:, 'region'],
flagImageUrl='https://www.countryflags.io/' + country_data.loc[:, 'alpha2Code'] + '/flat/64.png'
)
# 「"Arab World","ARB"」のような地域を表現するデータが世銀データには入っているので要注意
# regeonが空白のものがあると、カテゴリ表示にNaNが加わってしまうためregionが空なら削除する
data.dropna(subset=['region'], inplace=True)
# 世銀のデータから削除したい不要なカラム
delete_cols = ['Indicator Name', 'Indicator Code']
data.drop(delete_cols, axis=1, inplace=True)
# 列の値すべてが欠損していたら列を削除する(1960年代ごろから一応カラムは用意されているが、データが入っていない場合がある)
data.dropna(thresh=2, how='all', axis=1, inplace=True)
data.to_csv('data.csv')
| [
"glob.glob",
"pandas.read_csv"
] | [((76, 144), 'pandas.read_csv', 'pd.read_csv', (['"""../../country.csv"""'], {'keep_default_na': '(False)', 'index_col': '(0)'}), "('../../country.csv', keep_default_na=False, index_col=0)\n", (87, 144), True, 'import pandas as pd\n'), ((296, 339), 'pandas.read_csv', 'pd.read_csv', (['raw_csv'], {'header': '(2)', 'index_col': '(1)'}), '(raw_csv, header=2, index_col=1)\n', (307, 339), True, 'import pandas as pd\n'), ((264, 285), 'glob.glob', 'glob.glob', (['"""API*.csv"""'], {}), "('API*.csv')\n", (273, 285), False, 'import glob\n')] |
# Generated by Django 2.2 on 2020-09-18 06:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0007_contact'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='private',
field=models.BooleanField(default=False),
),
]
| [
"django.db.models.BooleanField"
] | [((328, 362), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (347, 362), False, 'from django.db import migrations, models\n')] |
import os
# 获取当前文件目录的根目录
DIR = os.path.dirname(os.path.dirname(__file__))
# 定义测试数据的存放目录
DATA_DIR = os.path.join(DIR, 'data')
# 定义用例存放的根目录
CASE_DIR = os.path.join(os.getcwd(), "testcases")
# 定义报告存放的根目录
REPORT_DIR = os.path.join(os.getcwd(), 'reports')
# 定义配置文件存放的根目录
CONFIG_DIR = os.path.join(DIR, 'config')
# 测试文件目录
TESTS_DIR = os.path.join(os.getcwd(), "testcases")
| [
"os.path.dirname",
"os.path.join",
"os.getcwd"
] | [((100, 125), 'os.path.join', 'os.path.join', (['DIR', '"""data"""'], {}), "(DIR, 'data')\n", (112, 125), False, 'import os\n'), ((280, 307), 'os.path.join', 'os.path.join', (['DIR', '"""config"""'], {}), "(DIR, 'config')\n", (292, 307), False, 'import os\n'), ((48, 73), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (63, 73), False, 'import os\n'), ((163, 174), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (172, 174), False, 'import os\n'), ((228, 239), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (237, 239), False, 'import os\n'), ((342, 353), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (351, 353), False, 'import os\n')] |
import tqdm
import mediapipe
import requests
import cv2
import numpy as np
import matplotlib
class FullBodyPoseEmbedder(object):
"""Converts 3D pose landmarks into 3D embedding."""
def __init__(self, torso_size_multiplier=2.5):
# Multiplier to apply to the torso to get minimal body size.
self._torso_size_multiplier = torso_size_multiplier
# Names of the landmarks as they appear in the prediction.
self._landmark_names = [
'nose',
'left_eye_inner', 'left_eye', 'left_eye_outer',
'right_eye_inner', 'right_eye', 'right_eye_outer',
'left_ear', 'right_ear',
'mouth_left', 'mouth_right',
'left_shoulder', 'right_shoulder',
'left_elbow', 'right_elbow',
'left_wrist', 'right_wrist',
'left_pinky_1', 'right_pinky_1',
'left_index_1', 'right_index_1',
'left_thumb_2', 'right_thumb_2',
'left_hip', 'right_hip',
'left_knee', 'right_knee',
'left_ankle', 'right_ankle',
'left_heel', 'right_heel',
'left_foot_index', 'right_foot_index',
]
def __call__(self, landmarks):
"""Normalizes pose landmarks and converts to embedding
Args:
landmarks - NumPy array with 3D landmarks of shape (N, 3).
Result:
Numpy array with pose embedding of shape (M, 3) where `M` is the number of
pairwise distances defined in `_get_pose_distance_embedding`.
"""
assert landmarks.shape[0] == len(self._landmark_names), 'Unexpected number of landmarks: {}'.format(
landmarks.shape[0])
# Get pose landmarks.
landmarks = np.copy(landmarks)
# Normalize landmarks.
landmarks = self._normalize_pose_landmarks(landmarks)
# Get embedding.
embedding = self._get_pose_distance_embedding(landmarks)
return embedding
def _normalize_pose_landmarks(self, landmarks):
"""Normalizes landmarks translation and scale."""
landmarks = np.copy(landmarks)
# Normalize translation.
pose_center = self._get_pose_center(landmarks)
landmarks -= pose_center
# Normalize scale.
pose_size = self._get_pose_size(landmarks, self._torso_size_multiplier)
landmarks /= pose_size
# Multiplication by 100 is not required, but makes it eaasier to debug.
landmarks *= 100
return landmarks
def _get_pose_center(self, landmarks):
"""Calculates pose center as point between hips."""
left_hip = landmarks[self._landmark_names.index('left_hip')]
right_hip = landmarks[self._landmark_names.index('right_hip')]
center = (left_hip + right_hip) * 0.5
return center
def _get_pose_size(self, landmarks, torso_size_multiplier):
"""Calculates pose size.
It is the maximum of two values:
* Torso size multiplied by `torso_size_multiplier`
* Maximum distance from pose center to any pose landmark
"""
# This approach uses only 2D landmarks to compute pose size.
landmarks = landmarks[:, :2]
# Hips center.
left_hip = landmarks[self._landmark_names.index('left_hip')]
right_hip = landmarks[self._landmark_names.index('right_hip')]
hips = (left_hip + right_hip) * 0.5
# Shoulders center.
left_shoulder = landmarks[self._landmark_names.index('left_shoulder')]
right_shoulder = landmarks[self._landmark_names.index('right_shoulder')]
shoulders = (left_shoulder + right_shoulder) * 0.5
# Torso size as the minimum body size.
torso_size = np.linalg.norm(shoulders - hips)
# Max dist to pose center.
pose_center = self._get_pose_center(landmarks)
max_dist = np.max(np.linalg.norm(landmarks - pose_center, axis=1))
return max(torso_size * torso_size_multiplier, max_dist)
def _get_pose_distance_embedding(self, landmarks):
"""Converts pose landmarks into 3D embedding.
We use several pairwise 3D distances to form pose embedding. All distances
include X and Y components with sign. We differnt types of pairs to cover
different pose classes. Feel free to remove some or add new.
Args:
landmarks - NumPy array with 3D landmarks of shape (N, 3).
Result:
Numpy array with pose embedding of shape (M, 3) where `M` is the number of
pairwise distances.
"""
embedding = np.array([
# One joint.
self._get_distance(
self._get_average_by_names(landmarks, 'left_hip', 'right_hip'),
self._get_average_by_names(landmarks, 'left_shoulder', 'right_shoulder')),
self._get_distance_by_names(landmarks, 'left_shoulder', 'left_elbow'),
self._get_distance_by_names(landmarks, 'right_shoulder', 'right_elbow'),
self._get_distance_by_names(landmarks, 'left_elbow', 'left_wrist'),
self._get_distance_by_names(landmarks, 'right_elbow', 'right_wrist'),
self._get_distance_by_names(landmarks, 'left_hip', 'left_knee'),
self._get_distance_by_names(landmarks, 'right_hip', 'right_knee'),
self._get_distance_by_names(landmarks, 'left_knee', 'left_ankle'),
self._get_distance_by_names(landmarks, 'right_knee', 'right_ankle'),
# Two joints.
self._get_distance_by_names(landmarks, 'left_shoulder', 'left_wrist'),
self._get_distance_by_names(landmarks, 'right_shoulder', 'right_wrist'),
self._get_distance_by_names(landmarks, 'left_hip', 'left_ankle'),
self._get_distance_by_names(landmarks, 'right_hip', 'right_ankle'),
# Four joints.
self._get_distance_by_names(landmarks, 'left_hip', 'left_wrist'),
self._get_distance_by_names(landmarks, 'right_hip', 'right_wrist'),
# Five joints.
self._get_distance_by_names(landmarks, 'left_shoulder', 'left_ankle'),
self._get_distance_by_names(landmarks, 'right_shoulder', 'right_ankle'),
self._get_distance_by_names(landmarks, 'left_hip', 'left_wrist'),
self._get_distance_by_names(landmarks, 'right_hip', 'right_wrist'),
# Cross body.
self._get_distance_by_names(landmarks, 'left_elbow', 'right_elbow'),
self._get_distance_by_names(landmarks, 'left_knee', 'right_knee'),
self._get_distance_by_names(landmarks, 'left_wrist', 'right_wrist'),
self._get_distance_by_names(landmarks, 'left_ankle', 'right_ankle'),
# Body bent direction.
# self._get_distance(
# self._get_average_by_names(landmarks, 'left_wrist', 'left_ankle'),
# landmarks[self._landmark_names.index('left_hip')]),
# self._get_distance(
# self._get_average_by_names(landmarks, 'right_wrist', 'right_ankle'),
# landmarks[self._landmark_names.index('right_hip')]),
])
return embedding
def _get_average_by_names(self, landmarks, name_from, name_to):
lmk_from = landmarks[self._landmark_names.index(name_from)]
lmk_to = landmarks[self._landmark_names.index(name_to)]
return (lmk_from + lmk_to) * 0.5
def _get_distance_by_names(self, landmarks, name_from, name_to):
lmk_from = landmarks[self._landmark_names.index(name_from)]
lmk_to = landmarks[self._landmark_names.index(name_to)]
return self._get_distance(lmk_from, lmk_to)
def _get_distance(self, lmk_from, lmk_to):
return lmk_to - lmk_from
| [
"numpy.copy",
"numpy.linalg.norm"
] | [((1734, 1752), 'numpy.copy', 'np.copy', (['landmarks'], {}), '(landmarks)\n', (1741, 1752), True, 'import numpy as np\n'), ((2095, 2113), 'numpy.copy', 'np.copy', (['landmarks'], {}), '(landmarks)\n', (2102, 2113), True, 'import numpy as np\n'), ((3729, 3761), 'numpy.linalg.norm', 'np.linalg.norm', (['(shoulders - hips)'], {}), '(shoulders - hips)\n', (3743, 3761), True, 'import numpy as np\n'), ((3879, 3926), 'numpy.linalg.norm', 'np.linalg.norm', (['(landmarks - pose_center)'], {'axis': '(1)'}), '(landmarks - pose_center, axis=1)\n', (3893, 3926), True, 'import numpy as np\n')] |
import pytest
from project import create_app, db
from project.models import User
@pytest.fixture(scope='module')
def new_user():
user = User('<EMAIL>', '<PASSWORD>')
return user
@pytest.fixture(scope='module')
def test_client():
flask_app = create_app('flask_test.cfg')
# Create a test client using the Flask application configured for testing
with flask_app.test_client() as testing_client:
# Establish an application context
with flask_app.app_context():
yield testing_client # this is where the testing happens!
@pytest.fixture(scope='module')
def init_database(test_client):
# Create the database and the database table
db.create_all()
# Insert user data
user1 = User(email='<EMAIL>', password_plaintext='<PASSWORD>')
user2 = User(email='<EMAIL>', password_plaintext='PaSsWoRd')
db.session.add(user1)
db.session.add(user2)
# Commit the changes for the users
db.session.commit()
yield # this is where the testing happens!
db.drop_all()
@pytest.fixture(scope='function')
def login_default_user(test_client):
test_client.post('/login',
data=dict(email='<EMAIL>', password='<PASSWORD>'),
follow_redirects=True)
yield # this is where the testing happens!
test_client.get('/logout', follow_redirects=True)
| [
"project.models.User",
"project.db.drop_all",
"project.db.create_all",
"project.create_app",
"project.db.session.add",
"pytest.fixture",
"project.db.session.commit"
] | [((84, 114), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (98, 114), False, 'import pytest\n'), ((191, 221), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (205, 221), False, 'import pytest\n'), ((572, 602), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (586, 602), False, 'import pytest\n'), ((1047, 1079), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1061, 1079), False, 'import pytest\n'), ((142, 171), 'project.models.User', 'User', (['"""<EMAIL>"""', '"""<PASSWORD>"""'], {}), "('<EMAIL>', '<PASSWORD>')\n", (146, 171), False, 'from project.models import User\n'), ((257, 285), 'project.create_app', 'create_app', (['"""flask_test.cfg"""'], {}), "('flask_test.cfg')\n", (267, 285), False, 'from project import create_app, db\n'), ((688, 703), 'project.db.create_all', 'db.create_all', ([], {}), '()\n', (701, 703), False, 'from project import create_app, db\n'), ((740, 794), 'project.models.User', 'User', ([], {'email': '"""<EMAIL>"""', 'password_plaintext': '"""<PASSWORD>"""'}), "(email='<EMAIL>', password_plaintext='<PASSWORD>')\n", (744, 794), False, 'from project.models import User\n'), ((807, 859), 'project.models.User', 'User', ([], {'email': '"""<EMAIL>"""', 'password_plaintext': '"""PaSsWoRd"""'}), "(email='<EMAIL>', password_plaintext='PaSsWoRd')\n", (811, 859), False, 'from project.models import User\n'), ((864, 885), 'project.db.session.add', 'db.session.add', (['user1'], {}), '(user1)\n', (878, 885), False, 'from project import create_app, db\n'), ((890, 911), 'project.db.session.add', 'db.session.add', (['user2'], {}), '(user2)\n', (904, 911), False, 'from project import create_app, db\n'), ((956, 975), 'project.db.session.commit', 'db.session.commit', ([], {}), '()\n', (973, 975), False, 'from project import create_app, db\n'), ((1030, 1043), 'project.db.drop_all', 'db.drop_all', ([], {}), '()\n', (1041, 1043), False, 'from project import create_app, db\n')] |
import csv
import itertools
def _boolean(data):
if data == "False":
result = False
else:
result = True
return result
def row_to_location(row):
if row[4] == "0":
sub = False
nosub = True
else:
sub = True
nosub = False
tss = _boolean(row[6])
term = _boolean(row[8])
return {"have no sub-operons": nosub, "have sub-operons": sub,
"start with tss": tss, "stop with terminator": term}
def plus_num(num_total, strain, type_):
num_total["total"][type_] += 1
num_total[strain][type_] += 1
num_total["total"]["total"] += 1
num_total[strain]["total"] += 1
def print_stat(operons, total_num, class_operon, out):
num_features = {}
out.write("Total number of operons is {0}\n".format(total_num))
out.write("The sub operon and features:\n")
for operon in operons:
for it in range(1, 5):
for features in itertools.combinations(operon.keys(), it):
check_key = 0
for key in features:
if operon[key]:
if it == 1:
if key in num_features.keys():
num_features[key] += 1
else:
num_features[key] = 1
check_key += 1
if (check_key == it) and (it != 1):
key = " and ".join(features)
if key in num_features.keys():
num_features[key] += 1
else:
num_features[key] = 1
for key, value in num_features.items():
out.write("\tthe number of operons which {0} = {1} ({2})\n".format(
key, value, float(value) / float(total_num)))
out.write("mono/polycistronic:\n")
out.write("\tmonocistronic: {0} ({1})\n".format(
class_operon["mono"],
float(class_operon["mono"]) / float(class_operon["total"])))
out.write("\tpolycistronic: {0} ({1})\n".format(
class_operon["poly"],
float(class_operon["poly"]) / float(class_operon["total"])))
def stat(input_file, out_file):
out = open(out_file, "w")
operons = {}
operons_all = []
tmp_id = ""
f_h = open(input_file, "r")
pre_seq_id = ""
total_num = {}
total_num_all = 0
class_operon = {}
class_operon["total"] = {"na": 0, "mono": 0, "poly": 0, "total": 0}
for row in csv.reader(f_h, delimiter="\t"):
if row[0] != "Operon_ID":
if row[0] != tmp_id:
if pre_seq_id != row[1]:
pre_seq_id = row[1]
operons[row[1]] = []
total_num[row[1]] = 0
class_operon[row[1]] = {"na": 0, "mono": 0,
"poly": 0, "total": 0}
operons[row[1]].append(row_to_location(row))
operons_all.append(row_to_location(row))
total_num[row[1]] += 1
total_num_all += 1
if row[-1] == "NA":
plus_num(class_operon, row[1], "na")
elif len(row[-1].split(",")) == 1:
plus_num(class_operon, row[1], "mono")
elif len(row[-1].split(",")) > 1:
plus_num(class_operon, row[1], "poly")
tmp_id = row[0]
if len(operons) > 1:
out.write("All genomes:\n")
print_stat(operons_all, total_num_all, class_operon["total"], out)
for strain in operons.keys():
out.write("\n" + strain + ":\n")
print_stat(operons[strain], total_num[strain],
class_operon[strain], out)
out.close()
f_h.close()
| [
"csv.reader"
] | [((2506, 2537), 'csv.reader', 'csv.reader', (['f_h'], {'delimiter': '"""\t"""'}), "(f_h, delimiter='\\t')\n", (2516, 2537), False, 'import csv\n')] |
import numpy as np
import math
def GMM(alpha, x, u, conv,dim):
covdet = np.linalg.det(conv + np.eye(dim) * 0.001)
covinv = np.linalg.inv(conv + np.eye(dim) * 0.001)
T1 = 1 / ( (2 * math.pi)**(dim/2) * np.sqrt(covdet))
T2 = np.exp((-0.5) * ((np.transpose(x - u)).dot(covinv).dot(x - u)))
prob = T1 * T2
return alpha * prob[0]
def EM_GMM(weights,mean,cov,data,M,dim):
initial_value = 0
for i in data:
i = np.expand_dims(i, 1)
all_value = 0
for k in range(M):
value = GMM(weights[k], i, mean[k], cov[k],dim)
all_value = all_value + value
intial_value_temp = math.log(all_value + 0.00001)
initial_value = initial_value + intial_value_temp
flag = 10000
num = 0
while (flag > 0.00001):
print("flag",flag)
num = num + 1
P = []
for m in range(M):
l = [] * (m + 1)
P.append(l)
# E step
for i in data:
i = np.reshape(i, (dim, 1))
value = [GMM(weights[k], i, mean[k], cov[k],dim) for k in range(M)]
value = np.array(value)
sum_value = np.sum(value)
for m in range(M):
p = GMM(weights[m], i, mean[m], cov[m],dim) / sum_value
P[m].append(p)
for m in range(M):
P[m] = np.array(P[m]) # 1000*1
# M step
# update alpha
for m in range(M):
weights[m] = np.sum(P[m]) / len(data)
# update u
for m in range(M):
result_list = []
for i in range(len(data)):
W = np.expand_dims(data[i], 1)
result = P[m][i] * W
result_list.append(result)
result_list = np.array(result_list)
mean_sum = np.sum(result_list, 0)
mean[m] = mean_sum / np.sum(P[m])
# update cov
for m in range(M):
result_list = []
for i in range(len(data)):
W = np.expand_dims(data[i], 1) # 2 * 1
T = W - mean[m]
Q = np.transpose(T)
temp = (T.dot(Q)) * P[m][i]
result_list.append(temp)
result_list = np.array(result_list)
cov_sum = np.sum(result_list, 0)
cov[m] = cov_sum / np.sum(P[m])
update_value = 0
for i in data:
i = np.expand_dims(i, 1)
all_value = 0
for k in range(M):
value = GMM(weights[k], i, mean[k], cov[k],dim)
all_value = all_value + value
update_value_temp = math.log(all_value)
update_value = update_value + update_value_temp
flag = abs(update_value - initial_value)
initial_value = update_value
return weights,mean,cov,num | [
"numpy.eye",
"numpy.reshape",
"numpy.sqrt",
"math.log",
"numpy.array",
"numpy.sum",
"numpy.expand_dims",
"numpy.transpose"
] | [((446, 466), 'numpy.expand_dims', 'np.expand_dims', (['i', '(1)'], {}), '(i, 1)\n', (460, 466), True, 'import numpy as np\n'), ((647, 674), 'math.log', 'math.log', (['(all_value + 1e-05)'], {}), '(all_value + 1e-05)\n', (655, 674), False, 'import math\n'), ((215, 230), 'numpy.sqrt', 'np.sqrt', (['covdet'], {}), '(covdet)\n', (222, 230), True, 'import numpy as np\n'), ((994, 1017), 'numpy.reshape', 'np.reshape', (['i', '(dim, 1)'], {}), '(i, (dim, 1))\n', (1004, 1017), True, 'import numpy as np\n'), ((1119, 1134), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (1127, 1134), True, 'import numpy as np\n'), ((1159, 1172), 'numpy.sum', 'np.sum', (['value'], {}), '(value)\n', (1165, 1172), True, 'import numpy as np\n'), ((1355, 1369), 'numpy.array', 'np.array', (['P[m]'], {}), '(P[m])\n', (1363, 1369), True, 'import numpy as np\n'), ((1766, 1787), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (1774, 1787), True, 'import numpy as np\n'), ((1811, 1833), 'numpy.sum', 'np.sum', (['result_list', '(0)'], {}), '(result_list, 0)\n', (1817, 1833), True, 'import numpy as np\n'), ((2232, 2253), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (2240, 2253), True, 'import numpy as np\n'), ((2276, 2298), 'numpy.sum', 'np.sum', (['result_list', '(0)'], {}), '(result_list, 0)\n', (2282, 2298), True, 'import numpy as np\n'), ((2408, 2428), 'numpy.expand_dims', 'np.expand_dims', (['i', '(1)'], {}), '(i, 1)\n', (2422, 2428), True, 'import numpy as np\n'), ((2629, 2648), 'math.log', 'math.log', (['all_value'], {}), '(all_value)\n', (2637, 2648), False, 'import math\n'), ((99, 110), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (105, 110), True, 'import numpy as np\n'), ((154, 165), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (160, 165), True, 'import numpy as np\n'), ((1473, 1485), 'numpy.sum', 'np.sum', (['P[m]'], {}), '(P[m])\n', (1479, 1485), True, 'import numpy as np\n'), ((1633, 1659), 'numpy.expand_dims', 'np.expand_dims', (['data[i]', '(1)'], {}), '(data[i], 1)\n', (1647, 1659), True, 'import numpy as np\n'), ((1867, 1879), 'numpy.sum', 'np.sum', (['P[m]'], {}), '(P[m])\n', (1873, 1879), True, 'import numpy as np\n'), ((2017, 2043), 'numpy.expand_dims', 'np.expand_dims', (['data[i]', '(1)'], {}), '(data[i], 1)\n', (2031, 2043), True, 'import numpy as np\n'), ((2105, 2120), 'numpy.transpose', 'np.transpose', (['T'], {}), '(T)\n', (2117, 2120), True, 'import numpy as np\n'), ((2330, 2342), 'numpy.sum', 'np.sum', (['P[m]'], {}), '(P[m])\n', (2336, 2342), True, 'import numpy as np\n'), ((259, 278), 'numpy.transpose', 'np.transpose', (['(x - u)'], {}), '(x - u)\n', (271, 278), True, 'import numpy as np\n')] |
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict
import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from kedro.extras.datasets.matplotlib import MatplotlibWriter
def make_plot(hr_data: pd.DataFrame):
fig,ax=plt.subplots()
hr_data.groupby('Education').Age.count().plot.bar()
fig.set_size_inches(12,12)
return fig
def dummies(hr_data: pd.DataFrame)-> list:
p=pd.get_dummies(hr_data['EnvironmentSatisfaction'] , prefix='ES')
hr_data=pd.concat([hr_data,p],axis=1)
hr_data.drop('EnvironmentSatisfaction',axis=1,inplace=True)
logger = logging.getLogger(__name__)
logger.info('EnvironmentSatisfaction dummied up')
return [hr_data,p]
def plot_dummies(data: pd.DataFrame):
fig,ax=plt.subplots()
sns.countplot(data['ES_4'])
fig.set_size_inches(12,12)
return fig
| [
"logging.getLogger",
"seaborn.countplot",
"pandas.get_dummies",
"pandas.concat",
"matplotlib.pyplot.subplots"
] | [((1681, 1695), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1693, 1695), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1911), 'pandas.get_dummies', 'pd.get_dummies', (["hr_data['EnvironmentSatisfaction']"], {'prefix': '"""ES"""'}), "(hr_data['EnvironmentSatisfaction'], prefix='ES')\n", (1862, 1911), True, 'import pandas as pd\n'), ((1925, 1956), 'pandas.concat', 'pd.concat', (['[hr_data, p]'], {'axis': '(1)'}), '([hr_data, p], axis=1)\n', (1934, 1956), True, 'import pandas as pd\n'), ((2032, 2059), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2049, 2059), False, 'import logging\n'), ((2187, 2201), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2199, 2201), True, 'import matplotlib.pyplot as plt\n'), ((2206, 2233), 'seaborn.countplot', 'sns.countplot', (["data['ES_4']"], {}), "(data['ES_4'])\n", (2219, 2233), True, 'import seaborn as sns\n')] |
import logging
import logging.config
import json
import re
import emoji
logger = logging.getLogger(__name__)
API_EXCEPTIONS = {
10: 'sticker set name is already occupied',
11: 'STICKERSET_INVALID', # eg. trying to remove a sticker from a set the bot doesn't own
12: 'STICKERSET_NOT_MODIFIED',
13: 'sticker set name invalid', # eg. starting with a number
14: 'STICKERS_TOO_MUCH', # pack is full
15: 'file is too big', # png size > 350 kb
# 16: 'Stickerset_invalid' # pack name doesn't exist, or pack has been deleted
17: 'Sticker_png_dimensions' # invalid png size
}
def load_logging_config(file_name='logging.json'):
with open(file_name, 'r') as f:
logging_config = json.load(f)
logging.config.dictConfig(logging_config)
def name2link(name, bot_username=None):
if bot_username and not name.endswith('_by_' + bot_username):
name += '_by_' + bot_username
return 'https://t.me/addstickers/{}'.format(name)
def get_emojis(text):
return ''.join(c for c in text if c in emoji.UNICODE_EMOJI)
| [
"logging.getLogger",
"logging.config.dictConfig",
"json.load"
] | [((83, 110), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (100, 110), False, 'import logging\n'), ((739, 780), 'logging.config.dictConfig', 'logging.config.dictConfig', (['logging_config'], {}), '(logging_config)\n', (764, 780), False, 'import logging\n'), ((721, 733), 'json.load', 'json.load', (['f'], {}), '(f)\n', (730, 733), False, 'import json\n')] |
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0604_conv1D.py
@Version : v0.1
@Time : 2019-11-26 11:08
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec0604,P188
@Desc : 深度学习用于文本和序列,用卷积神经网络处理序列
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras.activations import relu
from keras.datasets import imdb
from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D
from keras.layers import Dense
from keras.losses import binary_crossentropy
from keras.metrics import binary_accuracy
from keras.models import Sequential
from keras.optimizers import rmsprop
from keras.preprocessing.sequence import pad_sequences
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
from tools import plot_classes_results
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# ----------------------------------------------------------------------
max_features = 10000
max_len = 500
embedding_size = 128
epochs = 15
batch_size = 128
verbose = 2
validation_split = 0.2
print("Listing 6.45:准备 IMDB 数据集...")
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words = max_features)
print('\t', len(x_train), 'train sequences(训练序列)')
print('\t', len(x_test), 'test sequences(测试序列)')
print('Pad sequences (samples x time)')
x_train = pad_sequences(x_train, maxlen = max_len)
x_test = pad_sequences(x_test, maxlen = max_len)
print('\t x_train shape:', x_train.shape)
print('\t x_test shape:', x_test.shape)
# ----------------------------------------------------------------------
def simple_conv1d():
print("Listing 6.46:在 IMDB 数据上训练并且评估一个简单的一维卷积神经网络")
model = Sequential(name = "简单的一维卷积神经网络")
model.add(Embedding(max_features, embedding_size, input_length = max_len))
model.add(Conv1D(32, 7, activation = relu))
model.add(MaxPooling1D(5))
model.add(Conv1D(32, 7, activation = relu))
model.add(GlobalMaxPooling1D())
model.add(Dense(1))
model.summary()
model.compile(optimizer = rmsprop(lr = 1e-4), loss = binary_crossentropy, metrics = [binary_accuracy])
history = model.fit(x_train, y_train, epochs = epochs, batch_size = batch_size,
validation_split = validation_split, verbose = verbose, use_multiprocessing = True)
title = "应用简单的一维卷积神经网络在 IMDB 数据集"
plot_classes_results(history, title, epochs)
pass
# ----------------------------------------------------------------------
simple_conv1d()
# 6.4.4 结合 CNN 和 RNN 来处理长序列
# 因为使用的是温度数据集,因此实现在 ch0603_predict_temperature.py 中,方便对比
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| [
"keras.layers.MaxPooling1D",
"keras.optimizers.rmsprop",
"keras.datasets.imdb.load_data",
"tools.plot_classes_results",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_fignums",
"keras.layers.GlobalMaxPooling1D",
"keras.models.Sequential",
"numpy.random.seed",
"winsound.Beep",
"keras.layers.Den... | [((1192, 1277), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)', 'threshold': 'np.inf', 'linewidth': '(200)'}), '(precision=3, suppress=True, threshold=np.inf, linewidth=200\n )\n', (1211, 1277), True, 'import numpy as np\n'), ((1343, 1363), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1357, 1363), True, 'import numpy as np\n'), ((1767, 1805), 'keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (1781, 1805), False, 'from keras.datasets import imdb\n'), ((1958, 1996), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_train'], {'maxlen': 'max_len'}), '(x_train, maxlen=max_len)\n', (1971, 1996), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2008, 2045), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_test'], {'maxlen': 'max_len'}), '(x_test, maxlen=max_len)\n', (2021, 2045), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3195, 3218), 'winsound.Beep', 'winsound.Beep', (['(600)', '(500)'], {}), '(600, 500)\n', (3208, 3218), False, 'import winsound\n'), ((2294, 2324), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""简单的一维卷积神经网络"""'}), "(name='简单的一维卷积神经网络')\n", (2304, 2324), False, 'from keras.models import Sequential\n'), ((2954, 2998), 'tools.plot_classes_results', 'plot_classes_results', (['history', 'title', 'epochs'], {}), '(history, title, epochs)\n', (2974, 2998), False, 'from tools import plot_classes_results\n'), ((3255, 3265), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3263, 3265), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2402), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_size'], {'input_length': 'max_len'}), '(max_features, embedding_size, input_length=max_len)\n', (2350, 2402), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2420, 2450), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(7)'], {'activation': 'relu'}), '(32, 7, activation=relu)\n', (2426, 2450), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2468, 2483), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(5)'], {}), '(5)\n', (2480, 2483), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2499, 2529), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(7)'], {'activation': 'relu'}), '(32, 7, activation=relu)\n', (2505, 2529), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2547, 2567), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (2565, 2567), False, 'from keras.layers import Conv1D, Embedding, GlobalMaxPooling1D, MaxPooling1D\n'), ((2583, 2591), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2588, 2591), False, 'from keras.layers import Dense\n'), ((3226, 3243), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (3241, 3243), True, 'import matplotlib.pyplot as plt\n'), ((2643, 2661), 'keras.optimizers.rmsprop', 'rmsprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (2650, 2661), False, 'from keras.optimizers import rmsprop\n')] |
#! /usr/bin/env python3
"""
Stein PPO: Sample-efficient Policy Optimization with Stein Control Variate
Motivated by the Stein’s identity, Stein PPO extends the previous
control variate methods used in REINFORCE and advantage actor-critic
by introducing more general action-dependent baseline functions.
Details see the following papers:
Stein PPO:
https://arxiv.org/pdf/1710.11198.pdf
Distributed PPO:
https://arxiv.org/abs/1707.02286
Proximal Policy Optimization Algorithms
https://arxiv.org/pdf/1707.06347.pdf
Generalized Advantage Estimation:
https://arxiv.org/pdf/1506.02438.pdf
Code modified from this Github repo: https://github.com/pat-coady/trpo
This GitHub repo is also helpful.
https://github.com/joschu/modular_rl
This implementation learns policies for continuous environments
in the OpenAI Gym (https://gym.openai.com/). Testing was focused on
the MuJoCo control tasks.
"""
import os
import gym
import random
import numpy as np
import tb_logger as logger
import scipy.signal
from gym import wrappers
from utils import Scaler
from policy import Policy
from datetime import datetime
from value_function import NNValueFunction
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
def init_gym(env_name):
"""
Initialize gym environment, return dimension of observation
and action spaces.
Args:
env_name: str environment name (e.g. "Humanoid-v1")
Returns: 3-tuple
gym environment (object)
number of observation dimensions (int)
number of action dimensions (int)
"""
env = gym.make(env_name)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
return env, obs_dim, act_dim
def run_episode(env, policy, scaler, max_timesteps, animate=False):
""" Run single episode with option to animate
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
animate: boolean, True uses env.render() method to animate episode
Returns: 4-tuple of NumPy arrays
observes: shape = (episode len, obs_dim)
actions: shape = (episode len, act_dim)
rewards: shape = (episode len,)
unscaled_obs: useful for training scaler, shape = (episode len, obs_dim)
"""
obs = env.reset()
observes, actions, rewards, unscaled_obs = [], [], [], []
done = False
step = 0.0
scale, offset = scaler.get()
scale[-1] = 1.0 # don't scale time step feature
offset[-1] = 0.0 # don't offset time step feature
for _ in range(max_timesteps):
if animate:
env.render()
obs = obs.astype(np.float32).reshape((1, -1))
obs = np.append(obs, [[step]], axis=1) # add time step feature
unscaled_obs.append(obs)
obs = (obs - offset) * scale # center and scale observations
observes.append(obs)
action = policy.sample(obs).reshape((1, -1)).astype(np.float32)
actions.append(action)
obs, reward, done, _ = env.step(np.squeeze(action, axis=0))
if not isinstance(reward, float):
reward = np.asscalar(reward)
rewards.append(reward)
step += 1e-3 # increment time step feature
if done:
break
return (np.concatenate(observes), np.concatenate(actions),
np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs))
def run_policy(env, policy, scaler, batch_size, max_timesteps):
""" Run policy and collect data for a minimum of min_steps and min_episodes
Args:
env: ai gym environment
policy: policy object with sample() method
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
episodes: total episodes to run
max_timesteps: max timesteps per episode to run
Returns: list of trajectory dictionaries, list length = number of episodes
'observes' : NumPy array of states from episode
'actions' : NumPy array of actions from episode
'rewards' : NumPy array of (un-discounted) rewards from episode
'unscaled_obs' : NumPy array of (un-discounted) rewards from episode
"""
total_steps = 0
trajectories = []
while total_steps < batch_size:
observes, actions, rewards, unscaled_obs = run_episode(env, \
policy, scaler, max_timesteps=max_timesteps)
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
trajectories.append(trajectory)
unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])
scaler.update(unscaled) # update running statistics for scaling observations
logger.record_dicts({
"_MeanReward":np.mean([t['rewards'].sum() for t in trajectories]),
'Steps': total_steps,})
return trajectories
def discount(x, gamma):
""" Calculate discounted forward sum of a sequence at each point """
return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
def add_disc_sum_rew(trajectories, gamma):
""" Adds discounted sum of rewards to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
gamma: discount
Returns:
None (mutates trajectories dictionary to add 'disc_sum_rew')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
disc_sum_rew = discount(rewards, gamma)
trajectory['disc_sum_rew'] = disc_sum_rew
def add_value(trajectories, val_func):
""" Adds estimated value to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
val_func: object with predict() method, takes observations
and returns predicted state value
Returns:
None (mutates trajectories dictionary to add 'values')
"""
for trajectory in trajectories:
observes = trajectory['observes']
values = val_func.predict(observes)
trajectory['values'] = values
def add_gae(trajectories, gamma, lam):
""" Add generalized advantage estimator.
https://arxiv.org/pdf/1506.02438.pdf
Args:
trajectories: as returned by run_policy(), must include 'values'
key from add_value().
gamma: reward discount
lam: lambda (see paper).
lam=0 : use TD residuals
lam=1 : A = Sum Discounted Rewards - V_hat(s)
Returns:
None (mutates trajectories dictionary to add 'advantages')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
values = trajectory['values']
# temporal differences
tds = rewards - values + np.append(values[1:] * gamma, 0)
advantages = discount(tds, gamma * lam)
trajectory['advantages'] = advantages
def build_train_set(trajectories):
"""
Args:
trajectories: trajectories after processing by add_disc_sum_rew(),
add_value(), and add_gae()
Returns: 4-tuple of NumPy arrays
observes: shape = (N, obs_dim)
actions: shape = (N, act_dim)
advantages: shape = (N,)
disc_sum_rew: shape = (N,)
"""
observes = np.concatenate([t['observes'] for t in trajectories])
actions = np.concatenate([t['actions'] for t in trajectories])
disc_sum_rew = np.concatenate([t['disc_sum_rew'] for t in trajectories])
advantages = np.concatenate([t['advantages'] for t in trajectories])
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6)
return observes, actions, advantages, disc_sum_rew
def log_batch_stats(observes, actions, advantages, disc_sum_rew):
""" Log batch statistics """
logger.record_dicts({
'_mean_obs': np.mean(observes),
'_min_obs': np.min(observes),
'_max_obs': np.max(observes),
'_mean_act': np.mean(actions),
'_max_act': np.max(actions),
'_std_act': np.mean(np.var(actions, axis=0)),
'_mean_adv': np.mean(advantages),
'_min_adv': np.min(advantages),
'_max_adv': np.max(advantages),
'_std_adv': np.var(advantages),
'_mean_discrew': np.mean(disc_sum_rew),
'_min_discrew': np.min(disc_sum_rew),
'_max_discrew': np.max(disc_sum_rew),
'_std_discrew': np.var(disc_sum_rew)})
logger.dump_tabular()
def main(env_name, num_iterations, gamma, lam, kl_targ,
batch_size,hid1_mult, policy_logvar, coef, use_lr_adjust, ada_kl_penalty,
seed, epochs, phi_epochs, max_timesteps,
reg_scale, phi_lr,
phi_hs,
policy_size,
phi_obj):
""" Main training loop
Args:
env_name: OpenAI Gym environment name, e.g. 'Hopper-v1'
num_iterations: maximum number of iterations to run
gamma: reward discount factor (float)
lam: lambda from Generalized Advantage Estimate
kl_targ: D_KL target for policy update [D_KL(pi_old || pi_new)
batch_size: number of episodes per policy training batch
hid1_mult: hid1 size for policy and value_f (mutliplier of obs dimension)
policy_logvar: natural log of initial policy variance
coef: coefficient of Stein control variate
use_lr_adjust: whether adjust lr based on kl
ada_kl_penalty: whether adjust kl penalty
max_timesteps: maximum time steps per trajectory
reg_scale: regularization coefficient
policy_size: policy network size
phi_obj: FitQ or MinVar
"""
env, obs_dim, act_dim = init_gym(env_name)
set_global_seeds(seed)
env.seed(seed)
env._max_episode_steps = max_timesteps
obs_dim += 1 # add 1 to obs dimension for time step feature (see run_episode())
now = datetime.utcnow().strftime("%b-%d_%H:%M:%S")
aigym_path = os.path.join('log-files/', env_name, now)
env = wrappers.Monitor(env, aigym_path, force=True, video_callable=False)
scaler = Scaler(obs_dim)
val_func = NNValueFunction(obs_dim, hid1_mult)
policy = Policy(obs_dim, act_dim, kl_targ,
hid1_mult, policy_logvar,
epochs, phi_epochs,
policy_size=policy_size,
phi_hidden_sizes=phi_hs,
c_ph=coef,
reg_scale=reg_scale,
lr_phi=phi_lr,
phi_obj=phi_obj)
# run a few episodes of untrained policy to initialize scaler:
run_policy(env, policy, scaler, batch_size=1000, max_timesteps=max_timesteps)
for _ in range(num_iterations):
logger.log("\n#Training Iter %d"%(_))
logger.log("Draw Samples..")
trajectories = run_policy(env, policy, scaler,
batch_size=batch_size, max_timesteps=max_timesteps)
add_value(trajectories, val_func) # add estimated values to episodes
add_disc_sum_rew(trajectories, gamma) # calculated discounted sum of Rs
add_gae(trajectories, gamma, lam) # calculate advantage
# concatenate all episodes into single NumPy arrays
observes, actions, advantages, disc_sum_rew = build_train_set(trajectories)
# add various stats to training log:
log_batch_stats(observes, actions, advantages, disc_sum_rew)
logger.log("Starting Training...")
policy.update(observes, actions, advantages, \
use_lr_adjust, ada_kl_penalty) # update policy
val_func.fit(observes, disc_sum_rew) # update value function
logger.log('--------------------------------\n')
policy.close_sess()
val_func.close_sess() | [
"utils.Scaler",
"numpy.array",
"gym.wrappers.Monitor",
"tensorflow.set_random_seed",
"gym.make",
"numpy.mean",
"numpy.max",
"numpy.random.seed",
"numpy.concatenate",
"numpy.min",
"policy.Policy",
"tb_logger.log",
"value_function.NNValueFunction",
"numpy.squeeze",
"datetime.datetime.utcno... | [((1299, 1316), 'numpy.random.seed', 'np.random.seed', (['i'], {}), '(i)\n', (1313, 1316), True, 'import numpy as np\n'), ((1321, 1335), 'random.seed', 'random.seed', (['i'], {}), '(i)\n', (1332, 1335), False, 'import random\n'), ((1689, 1707), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1697, 1707), False, 'import gym\n'), ((4897, 4954), 'numpy.concatenate', 'np.concatenate', (["[t['unscaled_obs'] for t in trajectories]"], {}), "([t['unscaled_obs'] for t in trajectories])\n", (4911, 4954), True, 'import numpy as np\n'), ((7806, 7859), 'numpy.concatenate', 'np.concatenate', (["[t['observes'] for t in trajectories]"], {}), "([t['observes'] for t in trajectories])\n", (7820, 7859), True, 'import numpy as np\n'), ((7874, 7926), 'numpy.concatenate', 'np.concatenate', (["[t['actions'] for t in trajectories]"], {}), "([t['actions'] for t in trajectories])\n", (7888, 7926), True, 'import numpy as np\n'), ((7946, 8003), 'numpy.concatenate', 'np.concatenate', (["[t['disc_sum_rew'] for t in trajectories]"], {}), "([t['disc_sum_rew'] for t in trajectories])\n", (7960, 8003), True, 'import numpy as np\n'), ((8021, 8076), 'numpy.concatenate', 'np.concatenate', (["[t['advantages'] for t in trajectories]"], {}), "([t['advantages'] for t in trajectories])\n", (8035, 8076), True, 'import numpy as np\n'), ((8970, 8991), 'tb_logger.dump_tabular', 'logger.dump_tabular', ([], {}), '()\n', (8989, 8991), True, 'import tb_logger as logger\n'), ((10446, 10487), 'os.path.join', 'os.path.join', (['"""log-files/"""', 'env_name', 'now'], {}), "('log-files/', env_name, now)\n", (10458, 10487), False, 'import os\n'), ((10498, 10565), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', 'aigym_path'], {'force': '(True)', 'video_callable': '(False)'}), '(env, aigym_path, force=True, video_callable=False)\n', (10514, 10565), False, 'from gym import wrappers\n'), ((10584, 10599), 'utils.Scaler', 'Scaler', (['obs_dim'], {}), '(obs_dim)\n', (10590, 10599), False, 'from utils import Scaler\n'), ((10615, 10650), 'value_function.NNValueFunction', 'NNValueFunction', (['obs_dim', 'hid1_mult'], {}), '(obs_dim, hid1_mult)\n', (10630, 10650), False, 'from value_function import NNValueFunction\n'), ((10669, 10870), 'policy.Policy', 'Policy', (['obs_dim', 'act_dim', 'kl_targ', 'hid1_mult', 'policy_logvar', 'epochs', 'phi_epochs'], {'policy_size': 'policy_size', 'phi_hidden_sizes': 'phi_hs', 'c_ph': 'coef', 'reg_scale': 'reg_scale', 'lr_phi': 'phi_lr', 'phi_obj': 'phi_obj'}), '(obs_dim, act_dim, kl_targ, hid1_mult, policy_logvar, epochs,\n phi_epochs, policy_size=policy_size, phi_hidden_sizes=phi_hs, c_ph=coef,\n reg_scale=reg_scale, lr_phi=phi_lr, phi_obj=phi_obj)\n', (10675, 10870), False, 'from policy import Policy\n'), ((1273, 1294), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['i'], {}), '(i)\n', (1291, 1294), True, 'import tensorflow as tf\n'), ((2905, 2937), 'numpy.append', 'np.append', (['obs', '[[step]]'], {'axis': '(1)'}), '(obs, [[step]], axis=1)\n', (2914, 2937), True, 'import numpy as np\n'), ((3480, 3504), 'numpy.concatenate', 'np.concatenate', (['observes'], {}), '(observes)\n', (3494, 3504), True, 'import numpy as np\n'), ((3506, 3529), 'numpy.concatenate', 'np.concatenate', (['actions'], {}), '(actions)\n', (3520, 3529), True, 'import numpy as np\n'), ((3543, 3578), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float64'}), '(rewards, dtype=np.float64)\n', (3551, 3578), True, 'import numpy as np\n'), ((3580, 3608), 'numpy.concatenate', 'np.concatenate', (['unscaled_obs'], {}), '(unscaled_obs)\n', (3594, 3608), True, 'import numpy as np\n'), ((11161, 11201), 'tb_logger.log', 'logger.log', (['("""\n#Training Iter %d""" % _)'], {}), '("""\n#Training Iter %d""" % _)\n', (11171, 11201), True, 'import tb_logger as logger\n'), ((11207, 11235), 'tb_logger.log', 'logger.log', (['"""Draw Samples.."""'], {}), "('Draw Samples..')\n", (11217, 11235), True, 'import tb_logger as logger\n'), ((11884, 11918), 'tb_logger.log', 'logger.log', (['"""Starting Training..."""'], {}), "('Starting Training...')\n", (11894, 11918), True, 'import tb_logger as logger\n'), ((12119, 12167), 'tb_logger.log', 'logger.log', (['"""--------------------------------\n"""'], {}), "('--------------------------------\\n')\n", (12129, 12167), True, 'import tb_logger as logger\n'), ((3238, 3264), 'numpy.squeeze', 'np.squeeze', (['action'], {'axis': '(0)'}), '(action, axis=0)\n', (3248, 3264), True, 'import numpy as np\n'), ((3329, 3348), 'numpy.asscalar', 'np.asscalar', (['reward'], {}), '(reward)\n', (3340, 3348), True, 'import numpy as np\n'), ((7303, 7335), 'numpy.append', 'np.append', (['(values[1:] * gamma)', '(0)'], {}), '(values[1:] * gamma, 0)\n', (7312, 7335), True, 'import numpy as np\n'), ((8387, 8404), 'numpy.mean', 'np.mean', (['observes'], {}), '(observes)\n', (8394, 8404), True, 'import numpy as np\n'), ((8426, 8442), 'numpy.min', 'np.min', (['observes'], {}), '(observes)\n', (8432, 8442), True, 'import numpy as np\n'), ((8464, 8480), 'numpy.max', 'np.max', (['observes'], {}), '(observes)\n', (8470, 8480), True, 'import numpy as np\n'), ((8503, 8519), 'numpy.mean', 'np.mean', (['actions'], {}), '(actions)\n', (8510, 8519), True, 'import numpy as np\n'), ((8541, 8556), 'numpy.max', 'np.max', (['actions'], {}), '(actions)\n', (8547, 8556), True, 'import numpy as np\n'), ((8633, 8652), 'numpy.mean', 'np.mean', (['advantages'], {}), '(advantages)\n', (8640, 8652), True, 'import numpy as np\n'), ((8674, 8692), 'numpy.min', 'np.min', (['advantages'], {}), '(advantages)\n', (8680, 8692), True, 'import numpy as np\n'), ((8714, 8732), 'numpy.max', 'np.max', (['advantages'], {}), '(advantages)\n', (8720, 8732), True, 'import numpy as np\n'), ((8754, 8772), 'numpy.var', 'np.var', (['advantages'], {}), '(advantages)\n', (8760, 8772), True, 'import numpy as np\n'), ((8799, 8820), 'numpy.mean', 'np.mean', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8806, 8820), True, 'import numpy as np\n'), ((8846, 8866), 'numpy.min', 'np.min', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8852, 8866), True, 'import numpy as np\n'), ((8892, 8912), 'numpy.max', 'np.max', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8898, 8912), True, 'import numpy as np\n'), ((8938, 8958), 'numpy.var', 'np.var', (['disc_sum_rew'], {}), '(disc_sum_rew)\n', (8944, 8958), True, 'import numpy as np\n'), ((10384, 10401), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (10399, 10401), False, 'from datetime import datetime\n'), ((8586, 8609), 'numpy.var', 'np.var', (['actions'], {'axis': '(0)'}), '(actions, axis=0)\n', (8592, 8609), True, 'import numpy as np\n')] |
import sys
sys.path.append('..')
from FrozenLake.Game import Config, FrozenLake
from random import randint
import pprint
class qAlgorithm(object):
EPOCH = 1000
def __init__(self):
conf = Config()
conf.setSize(10,10)
conf.setEnd(9,9)
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
conf.addHole(randint(0,9),randint(0,9))
game = FrozenLake(conf)
game.print()
game.onGameOver(self.gameOver)
self.config = conf
self.game = game
self.gamma = 0.8
self.gameRuning = True
def gameOver(self, status):
self.gameRuning = False
def createNewGame(self, startX, startY):
self.game.resetGame()
self.game.setPosition(startX,startY)
self.gameRuning = True
def main(self):
self.gameMap = self.game.getMap()
self.initQRTable()
self.train()
self.playGame()
def getRandomPosition(self):
x = randint(0,len(self.gameMap[0])-1)
y = randint(0,len(self.gameMap)-1)
return (x,y)
def playGame(self):
self.createNewGame(0,0)
i=0
while self.gameRuning:
print(' Epoch:',i)
i+=1
self.game.print()
pos = self.game.getPosition()
direction = max(self.qTable[pos], key=self.qTable[pos].get)
self.move(direction)
i+=1
print(' Epoch:',i)
self.game.print()
def train(self):
for i in range(self.EPOCH):
print(' Epoch:',i+1,'/',self.EPOCH,end='\r')
pos = self.getRandomPosition()
(posX, posY) = pos
self.createNewGame(posX,posY)
while self.gameRuning:
r = self.rTable[pos]
nextMove = randint(0,len(r)-1)
nextMove = [*r][nextMove]
self.move(nextMove)
newPos = self.game.getPosition()
maxQ = max(self.qTable[newPos], key=self.qTable[newPos].get)
self.qTable[pos][nextMove] = r[nextMove] + self.gamma * self.qTable[newPos][maxQ]
pos = newPos
print()
def move(self, nextMove):
if nextMove == 'right':
self.game.moveRight()
elif nextMove == 'left':
self.game.moveLeft()
elif nextMove == 'up':
self.game.moveUp()
elif nextMove == 'down':
self.game.moveDown()
def initQRTable(self):
self.rTable = {}
self.qTable = {}
for i in range(len(self.gameMap)):
for j in range(len(self.gameMap[0])):
t = {}
t2 = {}
if j > 0:
t['left'] = self.getReward(i,j-1)
t2['left'] = 0
if j < len(self.gameMap[0])-1:
t['right'] = self.getReward(i,j+1)
t2['right'] = 0
if i > 0:
t['up'] = self.getReward(i-1,j)
t2['up'] = 0
if i < len(self.gameMap)-1:
t['down'] = self.getReward(i+1,j)
t2['down'] = 0
self.rTable[(i,j)]=t
self.qTable[(i,j)]=t2
def getReward(self, x,y):
v = self.gameMap[x][y]
if v == 1: return -200
if v == 2: return 200
return 0
def run():
q = qAlgorithm()
q.main()
| [
"FrozenLake.Game.Config",
"sys.path.append",
"random.randint",
"FrozenLake.Game.FrozenLake"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((193, 201), 'FrozenLake.Game.Config', 'Config', ([], {}), '()\n', (199, 201), False, 'from FrozenLake.Game import Config, FrozenLake\n'), ((715, 731), 'FrozenLake.Game.FrozenLake', 'FrozenLake', (['conf'], {}), '(conf)\n', (725, 731), False, 'from FrozenLake.Game import Config, FrozenLake\n'), ((258, 271), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (265, 271), False, 'from random import randint\n'), ((271, 284), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (278, 284), False, 'from random import randint\n'), ((300, 313), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (307, 313), False, 'from random import randint\n'), ((313, 326), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (320, 326), False, 'from random import randint\n'), ((342, 355), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (349, 355), False, 'from random import randint\n'), ((355, 368), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (362, 368), False, 'from random import randint\n'), ((384, 397), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (391, 397), False, 'from random import randint\n'), ((397, 410), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (404, 410), False, 'from random import randint\n'), ((426, 439), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (433, 439), False, 'from random import randint\n'), ((439, 452), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (446, 452), False, 'from random import randint\n'), ((468, 481), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (475, 481), False, 'from random import randint\n'), ((481, 494), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (488, 494), False, 'from random import randint\n'), ((510, 523), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (517, 523), False, 'from random import randint\n'), ((523, 536), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (530, 536), False, 'from random import randint\n'), ((552, 565), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (559, 565), False, 'from random import randint\n'), ((565, 578), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (572, 578), False, 'from random import randint\n'), ((594, 607), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (601, 607), False, 'from random import randint\n'), ((607, 620), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (614, 620), False, 'from random import randint\n'), ((636, 649), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (643, 649), False, 'from random import randint\n'), ((649, 662), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (656, 662), False, 'from random import randint\n'), ((678, 691), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (685, 691), False, 'from random import randint\n'), ((691, 704), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (698, 704), False, 'from random import randint\n')] |
import argparse
import os
class Opts:
def __init__(self):
self.parser = argparse.ArgumentParser()
def init(self):
self.parser.add_argument('-expID', default='default', help='Experiment ID')
self.parser.add_argument('-data', default='default', help='Input data folder')
self.parser.add_argument('-nThreads', default=4, type=int, help='Number of threads')
self.parser.add_argument('-expDir', default='../exp', help='Experiments directory')
self.parser.add_argument('-scaleAugFactor', default=0.25, type=float, help='Scale augment factor')
self.parser.add_argument('-rotAugProb', default=0.4, type=float, help='Rotation augment probability')
self.parser.add_argument('-flipAugProb', default=0.5, type=float, help='Flip augment probability')
self.parser.add_argument('-rotAugFactor', default=30, type=float, help='Rotation augment factor')
self.parser.add_argument('-colorAugFactor', default=0.2, type=float, help='Colo augment factor')
self.parser.add_argument('-imgSize', default=368, type=int, help='Number of threads')
self.parser.add_argument('-hmSize', default=46, type=int, help='Number of threads')
self.parser.add_argument('-DEBUG', type=int, default=0, help='Debug')
self.parser.add_argument('-sigmaPAF', default=5, type=int, help='Width of PAF')
self.parser.add_argument('-sigmaHM', default=7, type=int, help='Std. of Heatmap')
self.parser.add_argument('-variableWidthPAF', dest='variableWidthPAF', action='store_true', help='Variable width PAF based on length of part')
self.parser.add_argument('-dataset', default='coco', help='Dataset')
self.parser.add_argument('-model', default='vgg', help='Model')
self.parser.add_argument('-batchSize', default=8, type=int, help='Batch Size')
self.parser.add_argument('-LR', default=1e-3, type=float, help='Learn Rate')
self.parser.add_argument('-nEpoch', default=150, type=int, help='Number of Epochs')
self.parser.add_argument('-dropLR', type=float, default=50, help='Drop LR')
self.parser.add_argument('-valInterval', type=int, default=1, help='Val Interval')
self.parser.add_argument('-loadModel', default='none', help='Load pre-trained')
self.parser.add_argument('-train', dest='train', action='store_true', help='Train')
self.parser.add_argument('-vizOut', dest='vizOut', action='store_true', help='Visualize output?')
self.parser.add_argument('-criterionHm', default='mse', help='Heatmap Criterion')
self.parser.add_argument('-criterionPaf', default='mse', help='PAF Criterion')
def parse(self):
self.init()
self.opt = self.parser.parse_args()
self.opt.saveDir = os.path.join(self.opt.expDir, self.opt.expID)
if self.opt.DEBUG > 0:
self.opt.nThreads = 1
args = dict((name, getattr(self.opt, name)) for name in dir(self.opt)
if not name.startswith('_'))
if not os.path.exists(self.opt.saveDir):
os.makedirs(self.opt.saveDir)
file_name = os.path.join(self.opt.saveDir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('==> Args:\n')
for k, v in sorted(args.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
return self.opt | [
"os.makedirs",
"os.path.exists",
"os.path.join",
"argparse.ArgumentParser"
] | [((85, 110), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (108, 110), False, 'import argparse\n'), ((2780, 2825), 'os.path.join', 'os.path.join', (['self.opt.expDir', 'self.opt.expID'], {}), '(self.opt.expDir, self.opt.expID)\n', (2792, 2825), False, 'import os\n'), ((3130, 3171), 'os.path.join', 'os.path.join', (['self.opt.saveDir', '"""opt.txt"""'], {}), "(self.opt.saveDir, 'opt.txt')\n", (3142, 3171), False, 'import os\n'), ((3034, 3066), 'os.path.exists', 'os.path.exists', (['self.opt.saveDir'], {}), '(self.opt.saveDir)\n', (3048, 3066), False, 'import os\n'), ((3080, 3109), 'os.makedirs', 'os.makedirs', (['self.opt.saveDir'], {}), '(self.opt.saveDir)\n', (3091, 3109), False, 'import os\n')] |
from flask import Blueprint
from flask import flash
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
from werkzeug.exceptions import abort
from cooking.routers.auth import login_required
from cooking.db import get_db
bp = Blueprint("participant", __name__, url_prefix="/participant")
@bp.route("/")
def index():
"""Show all the commands for the participant"""
db = get_db()
participants = db.execute(
"SELECT p.id, name, mail, command_details,command_id,restaurant,command_day,command_hour "
" FROM participant p JOIN command c ON p.command_id = c.id "
" ORDER BY command_day DESC "
).fetchall()
return render_template("participant/index.html", participants=participants)
@bp.route("/commands")
def index_2():
"""Show all the commands for the participant, most recent first."""
db = get_db()
commands = db.execute(
"SELECT id, restaurant, menu, Command_day, command_hour"
" FROM command"
" ORDER BY Command_day DESC"
).fetchall()
return render_template("participant/command_index.html", commands=commands)
@bp.route("/order/command/<int:command_id>", methods=("GET", "POST"))
@login_required
def order(command_id):
"""Let the participant order a command."""
if request.method == "POST":
mail = request.form["mail"]
name = request.form["name"]
command_details = request.form["command_details"]
error = None
if not mail or not name or not command_details:
error = "One field is missing is required."
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
"INSERT INTO participant (mail, name, command_details, command_id) VALUES (?, ?, ?,?)",
(mail, name, command_details,command_id),
)
db.commit()
return redirect(url_for("participant.index_2"))
return render_template("participant/order.html")
def get_participant(id, ):
"""Get a participant and its author by id.
Checks that the id exists.
:param id: id of participant to get
:return: the participant
:raise 404: if a participant with the given id doesn't exist
:raise 403: if the current user isn't the author
"""
participant = (
get_db().execute(
"SELECT p.id, name, mail, command_details, command_id,restaurant"
" FROM participant p JOIN command c ON p.command_id = c.id "
" WHERE p.id = ?",
(id,),
).fetchone()
)
if participant is None:
abort(404, "participant id {0} doesn't exist.".format(id))
return participant
@bp.route("/create", methods=("GET", "POST"))
@login_required
def create():
"""Create a new participant."""
db = get_db()
commands = db.execute(
"SELECT id, restaurant "
" FROM command"
).fetchall()
if request.method == "POST":
mail = request.form["mail"]
name = request.form["name"]
command_details = request.form["command_details"]
command_id = request.form["command_id"]
error = None
if not mail or not name or not command_details:
error = "One field is missing is required."
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
"INSERT INTO participant (mail, name, command_details, command_id) VALUES (?, ?, ?,?)",
(mail, name, command_details,command_id),
)
db.commit()
return redirect(url_for("participant.index"))
return render_template("participant/create.html",commands=commands)
@bp.route("/<int:id>/update", methods=("GET", "POST"))
@login_required
def update(id):
"""Update a participant"""
participant = get_participant(id)
db = get_db()
commands = db.execute(
"SELECT id, restaurant "
" FROM command"
).fetchall()
if request.method == "POST":
name = request.form["name"]
mail = request.form["mail"]
error = None
if not name:
error = "name is required."
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
"UPDATE participant SET name = ?, mail = ? WHERE id = ?", (name, mail, id)
)
db.commit()
return redirect(url_for("participant.index"))
return render_template("participant/update.html", participant=participant,commands=commands)
@bp.route("/<int:id>/delete", methods=("POST",))
@login_required
def delete(id):
"""Delete a participant.
Ensures that the post exists.
"""
get_participant(id)
db = get_db()
db.execute("DELETE FROM participant WHERE id = ?", (id,))
db.commit()
return redirect(url_for("participant.index"))
| [
"flask.render_template",
"flask.flash",
"flask.url_for",
"cooking.db.get_db",
"flask.Blueprint"
] | [((288, 349), 'flask.Blueprint', 'Blueprint', (['"""participant"""', '__name__'], {'url_prefix': '"""/participant"""'}), "('participant', __name__, url_prefix='/participant')\n", (297, 349), False, 'from flask import Blueprint\n'), ((440, 448), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (446, 448), False, 'from cooking.db import get_db\n'), ((714, 782), 'flask.render_template', 'render_template', (['"""participant/index.html"""'], {'participants': 'participants'}), "('participant/index.html', participants=participants)\n", (729, 782), False, 'from flask import render_template\n'), ((903, 911), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (909, 911), False, 'from cooking.db import get_db\n'), ((1093, 1161), 'flask.render_template', 'render_template', (['"""participant/command_index.html"""'], {'commands': 'commands'}), "('participant/command_index.html', commands=commands)\n", (1108, 1161), False, 'from flask import render_template\n'), ((2008, 2049), 'flask.render_template', 'render_template', (['"""participant/order.html"""'], {}), "('participant/order.html')\n", (2023, 2049), False, 'from flask import render_template\n'), ((2872, 2880), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (2878, 2880), False, 'from cooking.db import get_db\n'), ((3717, 3778), 'flask.render_template', 'render_template', (['"""participant/create.html"""'], {'commands': 'commands'}), "('participant/create.html', commands=commands)\n", (3732, 3778), False, 'from flask import render_template\n'), ((3945, 3953), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (3951, 3953), False, 'from cooking.db import get_db\n'), ((4562, 4652), 'flask.render_template', 'render_template', (['"""participant/update.html"""'], {'participant': 'participant', 'commands': 'commands'}), "('participant/update.html', participant=participant,\n commands=commands)\n", (4577, 4652), False, 'from flask import render_template\n'), ((4836, 4844), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (4842, 4844), False, 'from cooking.db import get_db\n'), ((4943, 4971), 'flask.url_for', 'url_for', (['"""participant.index"""'], {}), "('participant.index')\n", (4950, 4971), False, 'from flask import url_for\n'), ((1659, 1671), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (1664, 1671), False, 'from flask import flash\n'), ((1703, 1711), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (1709, 1711), False, 'from cooking.db import get_db\n'), ((3370, 3382), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (3375, 3382), False, 'from flask import flash\n'), ((3414, 3422), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (3420, 3422), False, 'from cooking.db import get_db\n'), ((4286, 4298), 'flask.flash', 'flash', (['error'], {}), '(error)\n', (4291, 4298), False, 'from flask import flash\n'), ((4330, 4338), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (4336, 4338), False, 'from cooking.db import get_db\n'), ((1964, 1994), 'flask.url_for', 'url_for', (['"""participant.index_2"""'], {}), "('participant.index_2')\n", (1971, 1994), False, 'from flask import url_for\n'), ((3675, 3703), 'flask.url_for', 'url_for', (['"""participant.index"""'], {}), "('participant.index')\n", (3682, 3703), False, 'from flask import url_for\n'), ((4520, 4548), 'flask.url_for', 'url_for', (['"""participant.index"""'], {}), "('participant.index')\n", (4527, 4548), False, 'from flask import url_for\n'), ((2383, 2391), 'cooking.db.get_db', 'get_db', ([], {}), '()\n', (2389, 2391), False, 'from cooking.db import get_db\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_messaging
import testtools
from senlin.common import consts
from senlin.common import messaging
class TestUtilFunctions(testtools.TestCase):
@mock.patch.object(oslo_messaging, "get_rpc_server")
@mock.patch("senlin.common.messaging.RequestContextSerializer")
@mock.patch("senlin.common.messaging.JsonPayloadSerializer")
def test_get_rpc_server(self, mock_json_serializer,
mock_context_serializer,
mock_get_rpc_server):
x_target = mock.Mock()
x_endpoint = mock.Mock()
x_json_serializer = mock.Mock()
mock_json_serializer.return_value = x_json_serializer
x_context_serializer = mock.Mock()
mock_context_serializer.return_value = x_context_serializer
x_rpc_server = mock.Mock()
mock_get_rpc_server.return_value = x_rpc_server
res = messaging.get_rpc_server(x_target, x_endpoint)
self.assertEqual(x_rpc_server, res)
mock_json_serializer.assert_called_once_with()
mock_context_serializer.assert_called_once_with(x_json_serializer)
mock_get_rpc_server.assert_called_once_with(
messaging.TRANSPORT, x_target, [x_endpoint],
executor='eventlet', serializer=x_context_serializer)
@mock.patch.object(oslo_messaging, "get_rpc_server")
@mock.patch("senlin.common.messaging.RequestContextSerializer")
@mock.patch("senlin.common.messaging.JsonPayloadSerializer")
def test_get_rpc_server_with_serializer(self, mock_json_serializer,
mock_context_serializer,
mock_get_rpc_server):
x_target = mock.Mock()
x_endpoint = mock.Mock()
x_serializer = mock.Mock()
x_context_serializer = mock.Mock()
mock_context_serializer.return_value = x_context_serializer
x_rpc_server = mock.Mock()
mock_get_rpc_server.return_value = x_rpc_server
res = messaging.get_rpc_server(x_target, x_endpoint,
serializer=x_serializer)
self.assertEqual(x_rpc_server, res)
self.assertEqual(0, mock_json_serializer.call_count)
mock_context_serializer.assert_called_once_with(x_serializer)
mock_get_rpc_server.assert_called_once_with(
messaging.TRANSPORT, x_target, [x_endpoint],
executor='eventlet', serializer=x_context_serializer)
@mock.patch("oslo_messaging.RPCClient")
@mock.patch("senlin.common.messaging.RequestContextSerializer")
@mock.patch("senlin.common.messaging.JsonPayloadSerializer")
@mock.patch("oslo_messaging.Target")
def test_get_rpc_client(self, mock_target, mock_json_serializer,
mock_context_serializer,
mock_rpc_client):
x_topic = mock.Mock()
x_server = mock.Mock()
x_target = mock.Mock()
mock_target.return_value = x_target
x_json_serializer = mock.Mock()
mock_json_serializer.return_value = x_json_serializer
x_context_serializer = mock.Mock()
mock_context_serializer.return_value = x_context_serializer
x_rpc_client = mock.Mock()
mock_rpc_client.return_value = x_rpc_client
res = messaging.get_rpc_client(x_topic, x_server)
self.assertEqual(x_rpc_client, res)
mock_target.assert_called_once_with(
topic=x_topic, server=x_server,
version=consts.RPC_API_VERSION_BASE)
mock_json_serializer.assert_called_once_with()
mock_context_serializer.assert_called_once_with(x_json_serializer)
mock_rpc_client.assert_called_once_with(
messaging.TRANSPORT, x_target, serializer=x_context_serializer)
@mock.patch("oslo_messaging.RPCClient")
@mock.patch("senlin.common.messaging.RequestContextSerializer")
@mock.patch("senlin.common.messaging.JsonPayloadSerializer")
@mock.patch("oslo_messaging.Target")
def test_get_rpc_client_with_serializer(self, mock_target,
mock_json_serializer,
mock_context_serializer,
mock_rpc_client):
x_topic = mock.Mock()
x_server = mock.Mock()
x_target = mock.Mock()
x_serializer = mock.Mock()
mock_target.return_value = x_target
x_context_serializer = mock.Mock()
mock_context_serializer.return_value = x_context_serializer
x_rpc_client = mock.Mock()
mock_rpc_client.return_value = x_rpc_client
res = messaging.get_rpc_client(x_topic, x_server,
serializer=x_serializer)
self.assertEqual(x_rpc_client, res)
mock_target.assert_called_once_with(
topic=x_topic, server=x_server,
version=consts.RPC_API_VERSION_BASE)
self.assertEqual(0, mock_json_serializer.call_count)
mock_context_serializer.assert_called_once_with(x_serializer)
mock_rpc_client.assert_called_once_with(
messaging.TRANSPORT, x_target, serializer=x_context_serializer)
| [
"senlin.common.messaging.get_rpc_client",
"mock.patch",
"mock.Mock",
"mock.patch.object",
"senlin.common.messaging.get_rpc_server"
] | [((723, 774), 'mock.patch.object', 'mock.patch.object', (['oslo_messaging', '"""get_rpc_server"""'], {}), "(oslo_messaging, 'get_rpc_server')\n", (740, 774), False, 'import mock\n'), ((780, 842), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.RequestContextSerializer"""'], {}), "('senlin.common.messaging.RequestContextSerializer')\n", (790, 842), False, 'import mock\n'), ((848, 907), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.JsonPayloadSerializer"""'], {}), "('senlin.common.messaging.JsonPayloadSerializer')\n", (858, 907), False, 'import mock\n'), ((1854, 1905), 'mock.patch.object', 'mock.patch.object', (['oslo_messaging', '"""get_rpc_server"""'], {}), "(oslo_messaging, 'get_rpc_server')\n", (1871, 1905), False, 'import mock\n'), ((1911, 1973), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.RequestContextSerializer"""'], {}), "('senlin.common.messaging.RequestContextSerializer')\n", (1921, 1973), False, 'import mock\n'), ((1979, 2038), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.JsonPayloadSerializer"""'], {}), "('senlin.common.messaging.JsonPayloadSerializer')\n", (1989, 2038), False, 'import mock\n'), ((3031, 3069), 'mock.patch', 'mock.patch', (['"""oslo_messaging.RPCClient"""'], {}), "('oslo_messaging.RPCClient')\n", (3041, 3069), False, 'import mock\n'), ((3075, 3137), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.RequestContextSerializer"""'], {}), "('senlin.common.messaging.RequestContextSerializer')\n", (3085, 3137), False, 'import mock\n'), ((3143, 3202), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.JsonPayloadSerializer"""'], {}), "('senlin.common.messaging.JsonPayloadSerializer')\n", (3153, 3202), False, 'import mock\n'), ((3208, 3243), 'mock.patch', 'mock.patch', (['"""oslo_messaging.Target"""'], {}), "('oslo_messaging.Target')\n", (3218, 3243), False, 'import mock\n'), ((4351, 4389), 'mock.patch', 'mock.patch', (['"""oslo_messaging.RPCClient"""'], {}), "('oslo_messaging.RPCClient')\n", (4361, 4389), False, 'import mock\n'), ((4395, 4457), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.RequestContextSerializer"""'], {}), "('senlin.common.messaging.RequestContextSerializer')\n", (4405, 4457), False, 'import mock\n'), ((4463, 4522), 'mock.patch', 'mock.patch', (['"""senlin.common.messaging.JsonPayloadSerializer"""'], {}), "('senlin.common.messaging.JsonPayloadSerializer')\n", (4473, 4522), False, 'import mock\n'), ((4528, 4563), 'mock.patch', 'mock.patch', (['"""oslo_messaging.Target"""'], {}), "('oslo_messaging.Target')\n", (4538, 4563), False, 'import mock\n'), ((1086, 1097), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1095, 1097), False, 'import mock\n'), ((1119, 1130), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1128, 1130), False, 'import mock\n'), ((1159, 1170), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1168, 1170), False, 'import mock\n'), ((1264, 1275), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1273, 1275), False, 'import mock\n'), ((1367, 1378), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1376, 1378), False, 'import mock\n'), ((1450, 1496), 'senlin.common.messaging.get_rpc_server', 'messaging.get_rpc_server', (['x_target', 'x_endpoint'], {}), '(x_target, x_endpoint)\n', (1474, 1496), False, 'from senlin.common import messaging\n'), ((2265, 2276), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2274, 2276), False, 'import mock\n'), ((2298, 2309), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2307, 2309), False, 'import mock\n'), ((2333, 2344), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2342, 2344), False, 'import mock\n'), ((2376, 2387), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2385, 2387), False, 'import mock\n'), ((2479, 2490), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2488, 2490), False, 'import mock\n'), ((2562, 2633), 'senlin.common.messaging.get_rpc_server', 'messaging.get_rpc_server', (['x_target', 'x_endpoint'], {'serializer': 'x_serializer'}), '(x_target, x_endpoint, serializer=x_serializer)\n', (2586, 2633), False, 'from senlin.common import messaging\n'), ((3430, 3441), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3439, 3441), False, 'import mock\n'), ((3461, 3472), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3470, 3472), False, 'import mock\n'), ((3492, 3503), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3501, 3503), False, 'import mock\n'), ((3576, 3587), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3585, 3587), False, 'import mock\n'), ((3681, 3692), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3690, 3692), False, 'import mock\n'), ((3784, 3795), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (3793, 3795), False, 'import mock\n'), ((3863, 3906), 'senlin.common.messaging.get_rpc_client', 'messaging.get_rpc_client', (['x_topic', 'x_server'], {}), '(x_topic, x_server)\n', (3887, 3906), False, 'from senlin.common import messaging\n'), ((4842, 4853), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4851, 4853), False, 'import mock\n'), ((4873, 4884), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4882, 4884), False, 'import mock\n'), ((4904, 4915), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4913, 4915), False, 'import mock\n'), ((4939, 4950), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (4948, 4950), False, 'import mock\n'), ((5026, 5037), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5035, 5037), False, 'import mock\n'), ((5129, 5140), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5138, 5140), False, 'import mock\n'), ((5208, 5276), 'senlin.common.messaging.get_rpc_client', 'messaging.get_rpc_client', (['x_topic', 'x_server'], {'serializer': 'x_serializer'}), '(x_topic, x_server, serializer=x_serializer)\n', (5232, 5276), False, 'from senlin.common import messaging\n')] |
#!/usr/bin/python3
import os
from shutil import rmtree
from os.path import join
from common.bash import execute_bash_script
feature_branch_name = os.environ["FEATURE_BRANCH"]
feature_branch_folder = join("./cluster/development/kon", feature_branch_name)
rmtree(feature_branch_folder)
execute_bash_script(
[
"git config user.name github-actions",
"git config user.email <EMAIL>",
"git add .",
"git commit -m "
f"'Remove generated feature branch {feature_branch_name}'",
"git push",
]
)
| [
"common.bash.execute_bash_script",
"os.path.join",
"shutil.rmtree"
] | [((203, 257), 'os.path.join', 'join', (['"""./cluster/development/kon"""', 'feature_branch_name'], {}), "('./cluster/development/kon', feature_branch_name)\n", (207, 257), False, 'from os.path import join\n'), ((258, 287), 'shutil.rmtree', 'rmtree', (['feature_branch_folder'], {}), '(feature_branch_folder)\n', (264, 287), False, 'from shutil import rmtree\n'), ((289, 493), 'common.bash.execute_bash_script', 'execute_bash_script', (['[\'git config user.name github-actions\', \'git config user.email <EMAIL>\',\n \'git add .\',\n f"git commit -m \'Remove generated feature branch {feature_branch_name}\'",\n \'git push\']'], {}), '([\'git config user.name github-actions\',\n \'git config user.email <EMAIL>\', \'git add .\',\n f"git commit -m \'Remove generated feature branch {feature_branch_name}\'",\n \'git push\'])\n', (308, 493), False, 'from common.bash import execute_bash_script\n')] |
"""Create materialized view for unique adverts
Revision ID: <KEY>
Revises: ec6065fc7ea3
Create Date: 2020-10-01 11:47:32.801222
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "ec6065fc7ea3"
branch_labels = None
depends_on = None
def upgrade():
op.execute(
"""
CREATE MATERIALIZED VIEW mv_unique_adverts_by_date
AS SELECT adverts.page_id,
md5(adverts.ad_creative_body) AS body_hash,
to_char(adverts.ad_creation_time::date::timestamp with time zone, 'yyyy-mm-dd'::text) AS ad_creation_date
FROM adverts
WHERE adverts.ad_creative_body <> '{{product.brand}}'::text AND adverts.ad_creative_body IS NOT NULL
GROUP BY adverts.page_id, (md5(adverts.ad_creative_body)), (to_char(adverts.ad_creation_time::date::timestamp with time zone, 'yyyy-mm-dd'::text));
"""
)
def downgrade():
op.execute("DROP IF EXISTS MATERIALIZED VIEW mv_unique_adverts_by_date;")
| [
"alembic.op.execute"
] | [((334, 942), 'alembic.op.execute', 'op.execute', (['"""\n CREATE MATERIALIZED VIEW mv_unique_adverts_by_date\n AS SELECT adverts.page_id,\n md5(adverts.ad_creative_body) AS body_hash,\n to_char(adverts.ad_creation_time::date::timestamp with time zone, \'yyyy-mm-dd\'::text) AS ad_creation_date\n FROM adverts\n WHERE adverts.ad_creative_body <> \'{{product.brand}}\'::text AND adverts.ad_creative_body IS NOT NULL\n GROUP BY adverts.page_id, (md5(adverts.ad_creative_body)), (to_char(adverts.ad_creation_time::date::timestamp with time zone, \'yyyy-mm-dd\'::text));\n """'], {}), '(\n """\n CREATE MATERIALIZED VIEW mv_unique_adverts_by_date\n AS SELECT adverts.page_id,\n md5(adverts.ad_creative_body) AS body_hash,\n to_char(adverts.ad_creation_time::date::timestamp with time zone, \'yyyy-mm-dd\'::text) AS ad_creation_date\n FROM adverts\n WHERE adverts.ad_creative_body <> \'{{product.brand}}\'::text AND adverts.ad_creative_body IS NOT NULL\n GROUP BY adverts.page_id, (md5(adverts.ad_creative_body)), (to_char(adverts.ad_creation_time::date::timestamp with time zone, \'yyyy-mm-dd\'::text));\n """\n )\n', (344, 942), False, 'from alembic import op\n'), ((970, 1043), 'alembic.op.execute', 'op.execute', (['"""DROP IF EXISTS MATERIALIZED VIEW mv_unique_adverts_by_date;"""'], {}), "('DROP IF EXISTS MATERIALIZED VIEW mv_unique_adverts_by_date;')\n", (980, 1043), False, 'from alembic import op\n')] |
import theano
import numpy
# CRF implementation based on Lample et al.
# "Neural Architectures for Named Entity Recognition"
floatX=theano.config.floatX
def log_sum(x, axis=None):
x_max_value = x.max(axis=axis)
x_max_tensor = x.max(axis=axis, keepdims=True)
return x_max_value + theano.tensor.log(theano.tensor.exp(x - x_max_tensor).sum(axis=axis))
def forward(observation_weights, transition_weights, return_best_sequence=False):
def recurrence(observation_weights, previous_scores, transition_weights):
previous_scores = previous_scores.dimshuffle(0, 1, 'x')
observation_weights = observation_weights.dimshuffle(0, 'x', 1)
scores = previous_scores + observation_weights + transition_weights.dimshuffle('x', 0, 1)
if return_best_sequence:
best_scores = scores.max(axis=1)
best_states = scores.argmax(axis=1)
return best_scores, best_states
else:
return log_sum(scores, axis=1)
initial = observation_weights[0]
crf_states, _ = theano.scan(
fn=recurrence,
outputs_info=(initial, None) if return_best_sequence else initial,
sequences=[observation_weights[1:],],
non_sequences=transition_weights
)
if return_best_sequence:
sequence, _ = theano.scan(
fn=lambda beta_i, previous: beta_i[theano.tensor.arange(previous.shape[0]), previous],
outputs_info=theano.tensor.cast(theano.tensor.argmax(crf_states[0][-1], axis=1), 'int32'),
sequences=theano.tensor.cast(crf_states[1][::-1], 'int32')
)
sequence = theano.tensor.concatenate([sequence[::-1], [theano.tensor.argmax(crf_states[0][-1], axis=1)]])
return sequence, crf_states[0]
else:
return log_sum(crf_states[-1], axis=1)
def construct(name, input_tensor, n_labels, gold_labels, fn_create_parameter_matrix):
transition_weights = fn_create_parameter_matrix(name + "_crf_transition_weights", (n_labels + 2, n_labels + 2))
small = -1000.0
padding_start = theano.tensor.zeros((input_tensor.shape[0], 1, n_labels + 2)) + small
padding_start = theano.tensor.set_subtensor(padding_start[:,:,-2], 0.0)
padding_end = theano.tensor.zeros((input_tensor.shape[0], 1, n_labels + 2)) + small
padding_end = theano.tensor.set_subtensor(padding_end[:,:,-1], 0.0)
observation_weights = theano.tensor.concatenate([input_tensor, theano.tensor.zeros((input_tensor.shape[0], input_tensor.shape[1], 2)) + small], axis=2)
observation_weights = theano.tensor.concatenate([padding_start, observation_weights, padding_end], axis=1)
observation_weights = observation_weights.dimshuffle(1,0,2) # reordering the tensor (words, sentences, labels)
# Score from tags
real_paths_scores = input_tensor[theano.tensor.arange(input_tensor.shape[0])[:, numpy.newaxis], theano.tensor.arange(input_tensor.shape[1]), gold_labels].sum(axis=1)
# Score from transition_weights
padding_id_start = theano.tensor.zeros((gold_labels.shape[0], 1), dtype=numpy.int32) + n_labels
padding_id_end = theano.tensor.zeros((gold_labels.shape[0], 1), dtype=numpy.int32) + n_labels + 1
padded_gold_labels = theano.tensor.concatenate([padding_id_start, gold_labels, padding_id_end], axis=1)
real_paths_scores += transition_weights[
padded_gold_labels[theano.tensor.arange(gold_labels.shape[0])[:, numpy.newaxis], theano.tensor.arange(gold_labels.shape[1] + 1)],
padded_gold_labels[theano.tensor.arange(gold_labels.shape[0])[:, numpy.newaxis], theano.tensor.arange(gold_labels.shape[1] + 1) + 1]
].sum(axis=1)
all_paths_scores = forward(observation_weights, transition_weights)
best_sequence, scores = forward(observation_weights, transition_weights, return_best_sequence=True)
scores = scores.dimshuffle(1,0,2)[:,:-1,:-2]
best_sequence = best_sequence.dimshuffle(1,0)[:,1:-1]
return all_paths_scores, real_paths_scores, best_sequence, scores
| [
"theano.tensor.exp",
"theano.scan",
"theano.tensor.cast",
"theano.tensor.argmax",
"theano.tensor.arange",
"theano.tensor.zeros",
"theano.tensor.set_subtensor",
"theano.tensor.concatenate"
] | [((1045, 1217), 'theano.scan', 'theano.scan', ([], {'fn': 'recurrence', 'outputs_info': '((initial, None) if return_best_sequence else initial)', 'sequences': '[observation_weights[1:]]', 'non_sequences': 'transition_weights'}), '(fn=recurrence, outputs_info=(initial, None) if\n return_best_sequence else initial, sequences=[observation_weights[1:]],\n non_sequences=transition_weights)\n', (1056, 1217), False, 'import theano\n'), ((2142, 2199), 'theano.tensor.set_subtensor', 'theano.tensor.set_subtensor', (['padding_start[:, :, -2]', '(0.0)'], {}), '(padding_start[:, :, -2], 0.0)\n', (2169, 2199), False, 'import theano\n'), ((2304, 2359), 'theano.tensor.set_subtensor', 'theano.tensor.set_subtensor', (['padding_end[:, :, -1]', '(0.0)'], {}), '(padding_end[:, :, -1], 0.0)\n', (2331, 2359), False, 'import theano\n'), ((2540, 2628), 'theano.tensor.concatenate', 'theano.tensor.concatenate', (['[padding_start, observation_weights, padding_end]'], {'axis': '(1)'}), '([padding_start, observation_weights, padding_end],\n axis=1)\n', (2565, 2628), False, 'import theano\n'), ((3197, 3283), 'theano.tensor.concatenate', 'theano.tensor.concatenate', (['[padding_id_start, gold_labels, padding_id_end]'], {'axis': '(1)'}), '([padding_id_start, gold_labels, padding_id_end],\n axis=1)\n', (3222, 3283), False, 'import theano\n'), ((2052, 2113), 'theano.tensor.zeros', 'theano.tensor.zeros', (['(input_tensor.shape[0], 1, n_labels + 2)'], {}), '((input_tensor.shape[0], 1, n_labels + 2))\n', (2071, 2113), False, 'import theano\n'), ((2216, 2277), 'theano.tensor.zeros', 'theano.tensor.zeros', (['(input_tensor.shape[0], 1, n_labels + 2)'], {}), '((input_tensor.shape[0], 1, n_labels + 2))\n', (2235, 2277), False, 'import theano\n'), ((2993, 3058), 'theano.tensor.zeros', 'theano.tensor.zeros', (['(gold_labels.shape[0], 1)'], {'dtype': 'numpy.int32'}), '((gold_labels.shape[0], 1), dtype=numpy.int32)\n', (3012, 3058), False, 'import theano\n'), ((3091, 3156), 'theano.tensor.zeros', 'theano.tensor.zeros', (['(gold_labels.shape[0], 1)'], {'dtype': 'numpy.int32'}), '((gold_labels.shape[0], 1), dtype=numpy.int32)\n', (3110, 3156), False, 'import theano\n'), ((1538, 1586), 'theano.tensor.cast', 'theano.tensor.cast', (['crf_states[1][::-1]', '"""int32"""'], {}), "(crf_states[1][::-1], 'int32')\n", (1556, 1586), False, 'import theano\n'), ((2425, 2495), 'theano.tensor.zeros', 'theano.tensor.zeros', (['(input_tensor.shape[0], input_tensor.shape[1], 2)'], {}), '((input_tensor.shape[0], input_tensor.shape[1], 2))\n', (2444, 2495), False, 'import theano\n'), ((312, 347), 'theano.tensor.exp', 'theano.tensor.exp', (['(x - x_max_tensor)'], {}), '(x - x_max_tensor)\n', (329, 347), False, 'import theano\n'), ((1457, 1504), 'theano.tensor.argmax', 'theano.tensor.argmax', (['crf_states[0][-1]'], {'axis': '(1)'}), '(crf_states[0][-1], axis=1)\n', (1477, 1504), False, 'import theano\n'), ((1660, 1707), 'theano.tensor.argmax', 'theano.tensor.argmax', (['crf_states[0][-1]'], {'axis': '(1)'}), '(crf_states[0][-1], axis=1)\n', (1680, 1707), False, 'import theano\n'), ((2863, 2906), 'theano.tensor.arange', 'theano.tensor.arange', (['input_tensor.shape[1]'], {}), '(input_tensor.shape[1])\n', (2883, 2906), False, 'import theano\n'), ((2800, 2843), 'theano.tensor.arange', 'theano.tensor.arange', (['input_tensor.shape[0]'], {}), '(input_tensor.shape[0])\n', (2820, 2843), False, 'import theano\n'), ((1361, 1400), 'theano.tensor.arange', 'theano.tensor.arange', (['previous.shape[0]'], {}), '(previous.shape[0])\n', (1381, 1400), False, 'import theano\n'), ((3414, 3460), 'theano.tensor.arange', 'theano.tensor.arange', (['(gold_labels.shape[1] + 1)'], {}), '(gold_labels.shape[1] + 1)\n', (3434, 3460), False, 'import theano\n'), ((3352, 3394), 'theano.tensor.arange', 'theano.tensor.arange', (['gold_labels.shape[0]'], {}), '(gold_labels.shape[0])\n', (3372, 3394), False, 'import theano\n'), ((3490, 3532), 'theano.tensor.arange', 'theano.tensor.arange', (['gold_labels.shape[0]'], {}), '(gold_labels.shape[0])\n', (3510, 3532), False, 'import theano\n'), ((3552, 3598), 'theano.tensor.arange', 'theano.tensor.arange', (['(gold_labels.shape[1] + 1)'], {}), '(gold_labels.shape[1] + 1)\n', (3572, 3598), False, 'import theano\n')] |
from flask import Flask
from lib.router import Router
app = Flask(__name__)
Router.run(app) | [
"lib.router.Router.run",
"flask.Flask"
] | [((61, 76), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (66, 76), False, 'from flask import Flask\n'), ((78, 93), 'lib.router.Router.run', 'Router.run', (['app'], {}), '(app)\n', (88, 93), False, 'from lib.router import Router\n')] |
from psycopg2 import sql
'''
@name -> retrieveOldCustomTime
@param (dbConnection) -> db connection object
@param (cursor) -> db cursor object
@return -> a python list of 5 integers that means [year, month, day, hour, minute]
@about -> This model will retrieve the last row from the same table with every call.
-> The column 'time_custom' will have a value that constantly changes every five minutes.
'''
def retrieveOldCustomTime(dbConnection, cursor):
query = 'SELECT time_custom FROM last_custom_time_stamp ORDER BY id DESC LIMIT 1'
cursor.execute(query)
oldTimeTuple = cursor.fetchone()
return oldTimeTuple[0]
'''
@name -> insertNewCustomTime
@param (dbConnection) -> db connection object
@param (cursor) -> db cursor object
@return -> None
'''
def insertNewCustomTime(dbConnection, cursor, time):
query = sql.SQL('INSERT INTO last_custom_time_stamp (time_custom) VALUES ({})').format(sql.Literal(time))
cursor.execute(query)
dbConnection.commit() | [
"psycopg2.sql.Literal",
"psycopg2.sql.SQL"
] | [((916, 933), 'psycopg2.sql.Literal', 'sql.Literal', (['time'], {}), '(time)\n', (927, 933), False, 'from psycopg2 import sql\n'), ((837, 908), 'psycopg2.sql.SQL', 'sql.SQL', (['"""INSERT INTO last_custom_time_stamp (time_custom) VALUES ({})"""'], {}), "('INSERT INTO last_custom_time_stamp (time_custom) VALUES ({})')\n", (844, 908), False, 'from psycopg2 import sql\n')] |
from django.conf.urls import *
from djangocms_comments.views import SaveComment
urlpatterns = [
url(r'^ajax/save_comment$', SaveComment.as_view(), name='djangocms_comments_save_comment'),
]
| [
"djangocms_comments.views.SaveComment.as_view"
] | [((130, 151), 'djangocms_comments.views.SaveComment.as_view', 'SaveComment.as_view', ([], {}), '()\n', (149, 151), False, 'from djangocms_comments.views import SaveComment\n')] |
"""
Functions to load and process Ausgrid dataset.
The dataset contains 300 users with their location, PV production and electrical consumption.
The timeline for this dataset is 3 years separated in 3 files.
"""
import os
import pickle
import numpy as np
import pandas as pd
from pandas.tseries.offsets import Day
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from ...common import enable_cuda
DATA_PATH_ROOT = os.path.expanduser('~/Documents/Deep_Learning_Resources/datasets/ausgrid')
FILE_PATH_DICT = {
'2010-2011': '2010-2011 Solar home electricity data.csv',
'2011-2012': '2011-2012 Solar home electricity data.csv',
'2012-2013': '2012-2013 Solar home electricity data.csv'
}
DATA_FRAME_PATH_DICT = {
'2011-2012': '2011-2012 Solar home electricity data.pkl'
}
def process_reshape_data_frame(year='2011-2012'):
assert year in FILE_PATH_DICT
fname = os.path.join(DATA_PATH_ROOT, FILE_PATH_DICT[year])
d_raw = pd.read_csv(fname, skiprows=1, parse_dates=['date'], dayfirst=True, na_filter=False,
dtype={'Row Quality': str})
d0, d1 = d_raw.date.min(), d_raw.date.max()
index = pd.date_range(d0, d1 + Day(1), freq='30T', closed='left')
customers = sorted(d_raw.Customer.unique())
channels = ['GC', 'GG', 'CL']
empty_cols = pd.MultiIndex(levels=[customers, channels], labels=[[], []], names=['Customer', 'Channel'])
df = pd.DataFrame(index=index, columns=empty_cols)
missing_records = []
for c in customers:
d_c = d_raw[d_raw.Customer == c]
for ch in channels:
d_c_ch = d_c[d_c['Consumption Category'] == ch]
ts = d_c_ch.iloc[:, 5:-1].values.ravel()
if len(ts) != len(index):
missing_records.append((c, ch, len(ts)))
else:
df[c, ch] = ts
d_customer_cap = d_raw[['Customer', 'Generator Capacity']]
gen_cap = d_customer_cap.groupby('Customer')['Generator Capacity'].mean()
d_customer_post = d_raw[['Customer', 'Postcode']]
postcode = d_customer_post.groupby('Customer')['Postcode'].mean()
return df, missing_records, gen_cap, postcode
def save_data_frame(year='2011-2012'):
path = os.path.join(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year])
df, missing_records, gen_cap, postcode = process_reshape_data_frame(year)
data_dict = {
'df': df,
'miss_records': missing_records,
'gen_cap': gen_cap,
'postcode': postcode
}
with open(path, 'wb') as f:
pickle.dump(data_dict, f)
def load_data_frame(year='2011-2012'):
path = os.path.join(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year])
with open(path, 'rb') as f:
data_dict = pickle.load(f)
df, missing_records, gen_cap, postcode = data_dict['df'], data_dict['miss_records'], data_dict['gen_cap'], \
data_dict['postcode']
return df, missing_records, gen_cap, postcode
def create_training_data(year='2011-2012'):
""" Create numpy format of training data.
We treat generation capacity and postcode as user profiles
Args:
year: aus year to extract
Returns: data with shape (300, 366, 48, 2) -> (user, day, half hour, consumption/PV)
generation capacity (300,)
postcode (300,)
user_id (300,)
"""
df, _, gen_cap, postcode = load_data_frame(year)
data = np.zeros(shape=(300, 366, 48, 2))
for i in range(300):
data[i] = df[i + 1].values.reshape(366, 48, -1)[:, :, :2]
user_id = np.arange(0, 300)
return data, gen_cap.values, postcode.values, user_id
class AusgridDataSet(Dataset):
def __init__(self, year='2011-2012', train=True, transform=None, target_transform=None):
data, gen_cap, postcode, user_id = create_training_data(year)
if train:
self.data = data[:, :300, :, :]
else:
self.data = data[:, 300:, :, :]
self.num_days = self.data.shape[1]
self.data = self.data.reshape((-1, 48, 2)).transpose((0, 2, 1))
self.gen_cap = gen_cap
self.postcode = postcode
self.user_id = user_id
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
data = self.data[index]
gen_cap = self.gen_cap[index // self.num_days]
postcode = self.postcode[index // self.num_days]
user_id = self.user_id[index // self.num_days]
if self.transform is not None:
data = self.transform(data)
if self.target_transform is not None:
gen_cap, postcode, user_id = self.target_transform(gen_cap, postcode, user_id)
return data, (gen_cap, postcode, user_id)
def __len__(self):
return len(self.data)
def get_ausgrid_default_transform():
return None
def get_ausgrid_dataset(train, transform=None):
if transform is None:
transform = get_ausgrid_default_transform()
return AusgridDataSet(train=train, transform=transform)
def get_ausgrid_dataloader(train, batch_size=128, transform=None):
kwargs = {'num_workers': 1, 'pin_memory': True} if enable_cuda else {}
dataset = get_ausgrid_dataset(train=train, transform=transform)
data_loader = DataLoader(dataset, batch_size, shuffle=True, **kwargs)
return data_loader
| [
"pickle.dump",
"pandas.MultiIndex",
"pandas.read_csv",
"numpy.arange",
"torch.utils.data.dataloader.DataLoader",
"os.path.join",
"pickle.load",
"numpy.zeros",
"pandas.DataFrame",
"pandas.tseries.offsets.Day",
"os.path.expanduser"
] | [((465, 539), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Documents/Deep_Learning_Resources/datasets/ausgrid"""'], {}), "('~/Documents/Deep_Learning_Resources/datasets/ausgrid')\n", (483, 539), False, 'import os\n'), ((934, 984), 'os.path.join', 'os.path.join', (['DATA_PATH_ROOT', 'FILE_PATH_DICT[year]'], {}), '(DATA_PATH_ROOT, FILE_PATH_DICT[year])\n', (946, 984), False, 'import os\n'), ((997, 1113), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'skiprows': '(1)', 'parse_dates': "['date']", 'dayfirst': '(True)', 'na_filter': '(False)', 'dtype': "{'Row Quality': str}"}), "(fname, skiprows=1, parse_dates=['date'], dayfirst=True,\n na_filter=False, dtype={'Row Quality': str})\n", (1008, 1113), True, 'import pandas as pd\n'), ((1351, 1447), 'pandas.MultiIndex', 'pd.MultiIndex', ([], {'levels': '[customers, channels]', 'labels': '[[], []]', 'names': "['Customer', 'Channel']"}), "(levels=[customers, channels], labels=[[], []], names=[\n 'Customer', 'Channel'])\n", (1364, 1447), True, 'import pandas as pd\n'), ((1452, 1497), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'index', 'columns': 'empty_cols'}), '(index=index, columns=empty_cols)\n', (1464, 1497), True, 'import pandas as pd\n'), ((2243, 2299), 'os.path.join', 'os.path.join', (['DATA_PATH_ROOT', 'DATA_FRAME_PATH_DICT[year]'], {}), '(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year])\n', (2255, 2299), False, 'import os\n'), ((2636, 2692), 'os.path.join', 'os.path.join', (['DATA_PATH_ROOT', 'DATA_FRAME_PATH_DICT[year]'], {}), '(DATA_PATH_ROOT, DATA_FRAME_PATH_DICT[year])\n', (2648, 2692), False, 'import os\n'), ((3454, 3487), 'numpy.zeros', 'np.zeros', ([], {'shape': '(300, 366, 48, 2)'}), '(shape=(300, 366, 48, 2))\n', (3462, 3487), True, 'import numpy as np\n'), ((3593, 3610), 'numpy.arange', 'np.arange', (['(0)', '(300)'], {}), '(0, 300)\n', (3602, 3610), True, 'import numpy as np\n'), ((5306, 5361), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)'}), '(dataset, batch_size, shuffle=True, **kwargs)\n', (5316, 5361), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((2558, 2583), 'pickle.dump', 'pickle.dump', (['data_dict', 'f'], {}), '(data_dict, f)\n', (2569, 2583), False, 'import pickle\n'), ((2745, 2759), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2756, 2759), False, 'import pickle\n'), ((1217, 1223), 'pandas.tseries.offsets.Day', 'Day', (['(1)'], {}), '(1)\n', (1220, 1223), False, 'from pandas.tseries.offsets import Day\n')] |
"""
Manages the network negotiation portion of a client connection.
Node connects here first to get the node and Pool ID, then is redirected to the PUB/SUB ZMQ interface.
"""
import zmq
import sqlalchemy as sa
from sqlalchemy.orm import Session
import server.constants as const
import threading
import pickle
from proto.negotiation_pb2 import Negotiation
from server.db.mappings import Pool, Node
class Broker:
"""
"""
def __init__(self, host, port, user, password, dbname, verbose=False):
self.run = True
self.pool_id = const.POOL_ID
context = zmq.Context()
self.socket = context.socket(zmq.REP)
self.socket.bind(f"tcp://*:{const.BROKER_PORT}")
self.host = host
self.port = port
self.user = user
self.password = password
self.dbname = dbname
self.engine = sa.create_engine(f"mysql://{self.user}:{self.password}@{self.host}:{self.port}/"
f"{self.dbname}", echo=verbose)
self.session = Session(self.engine)
self.pool = self.session.get(Pool, self.pool_id)
if self.pool is None:
self.pool = Pool(id=self.pool_id)
self.session.add(self.pool)
self.session.commit()
self.work_thread = threading.Thread(target=self._work)
self.work_thread.start()
def _work(self):
while self.run:
message = self.socket.recv()
negotiation = Negotiation()
negotiation.ParseFromString(message)
node_name = negotiation.node_name or 'Node'
response = Negotiation()
if negotiation.node_proposes_id:
db_node = self.session.get(Node, {'id': negotiation.node_id, 'pool_id': self.pool_id})
valid = Broker.check_id_exists(negotiation.node_id) and negotiation.pool_id == self.pool_id and\
db_node is not None
print(f"Node connection ({node_name}) proposes ID {negotiation.node_id}. Valid: {valid}.")
if valid:
db_node.name = node_name
self.session.commit()
response.server_approve = True
response.node_id = negotiation.node_id
response.pool_id = negotiation.pool_id
response.collector_port = const.COLLECTOR_PORT
else:
response.server_approve = False
else:
node_id = Broker.generate_new_id()
print(f"Node connection ({node_name}) assigned new ID {node_id}.")
self.session.add(Node(id=node_id, pool_id=self.pool_id, name=node_name))
self.session.commit()
response.server_approve = True
response.pool_id = self.pool_id
response.node_id = node_id
response.collector_port = const.COLLECTOR_PORT
self.socket.send(response.SerializeToString())
@staticmethod
def generate_new_id():
"""
Checks the BROKER_FILE if the current node has an id, if not creates a new id at the end of the file.
:return: ID generated for the node
"""
if not const.BROKER_FILE.exists():
data = [0, ]
Broker.to_broker_file(data)
return 0
else:
try:
data = Broker.from_broker_file()
assert type(data) is list
except Exception as e:
print(f"Broker unable to load existing Node ID file at {const.BROKER_FILE}")
raise e
last_id = max(data)
allocated_id = last_id + 1
data.append(allocated_id)
Broker.to_broker_file(data)
return allocated_id
@staticmethod
def check_id_exists(node_id: int):
"""
Checks if node_id exists and is taken by another node in the pool.
:param node_id: ID that will be checked
:return: True if exists, false if not
"""
if not const.BROKER_FILE.exists():
return False
data = Broker.from_broker_file()
assert type(data) is list
return node_id in data
@staticmethod
def release_id(node_id: int):
"""
Releases node_id if the node is removed from the pool.
:param node_id: ID to be removed
"""
if not const.BROKER_FILE.exists():
raise IOError("Broker File {const.BROKER_FILE} does not exist!")
data = Broker.from_broker_file()
assert type(data) is list
data: list
data.remove(node_id)
Broker.to_broker_file(data)
@staticmethod
def from_broker_file():
"""
Reads data from the broker file
:return:
"""
with open(const.BROKER_FILE, 'rb') as f:
return pickle.load(f)
@staticmethod
def to_broker_file(data):
"""
Writes data to the broker file
:param data:
"""
with open(const.BROKER_FILE, 'wb') as f:
pickle.dump(data, f)
| [
"pickle.dump",
"sqlalchemy.create_engine",
"sqlalchemy.orm.Session",
"pickle.load",
"server.db.mappings.Node",
"proto.negotiation_pb2.Negotiation",
"server.constants.BROKER_FILE.exists",
"threading.Thread",
"server.db.mappings.Pool",
"zmq.Context"
] | [((586, 599), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (597, 599), False, 'import zmq\n'), ((863, 981), 'sqlalchemy.create_engine', 'sa.create_engine', (['f"""mysql://{self.user}:{self.password}@{self.host}:{self.port}/{self.dbname}"""'], {'echo': 'verbose'}), "(\n f'mysql://{self.user}:{self.password}@{self.host}:{self.port}/{self.dbname}'\n , echo=verbose)\n", (879, 981), True, 'import sqlalchemy as sa\n'), ((1038, 1058), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (1045, 1058), False, 'from sqlalchemy.orm import Session\n'), ((1295, 1330), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._work'}), '(target=self._work)\n', (1311, 1330), False, 'import threading\n'), ((1171, 1192), 'server.db.mappings.Pool', 'Pool', ([], {'id': 'self.pool_id'}), '(id=self.pool_id)\n', (1175, 1192), False, 'from server.db.mappings import Pool, Node\n'), ((1477, 1490), 'proto.negotiation_pb2.Negotiation', 'Negotiation', ([], {}), '()\n', (1488, 1490), False, 'from proto.negotiation_pb2 import Negotiation\n'), ((1619, 1632), 'proto.negotiation_pb2.Negotiation', 'Negotiation', ([], {}), '()\n', (1630, 1632), False, 'from proto.negotiation_pb2 import Negotiation\n'), ((3241, 3267), 'server.constants.BROKER_FILE.exists', 'const.BROKER_FILE.exists', ([], {}), '()\n', (3265, 3267), True, 'import server.constants as const\n'), ((4076, 4102), 'server.constants.BROKER_FILE.exists', 'const.BROKER_FILE.exists', ([], {}), '()\n', (4100, 4102), True, 'import server.constants as const\n'), ((4431, 4457), 'server.constants.BROKER_FILE.exists', 'const.BROKER_FILE.exists', ([], {}), '()\n', (4455, 4457), True, 'import server.constants as const\n'), ((4891, 4905), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4902, 4905), False, 'import pickle\n'), ((5100, 5120), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (5111, 5120), False, 'import pickle\n'), ((2649, 2703), 'server.db.mappings.Node', 'Node', ([], {'id': 'node_id', 'pool_id': 'self.pool_id', 'name': 'node_name'}), '(id=node_id, pool_id=self.pool_id, name=node_name)\n', (2653, 2703), False, 'from server.db.mappings import Pool, Node\n')] |
import multiprocessing as mp
import os
import re
import string
from collections import OrderedDict
from typing import Callable, List, Optional, Union
import spacy
import vaex
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from textacy.preprocessing import make_pipeline, normalize, remove, replace
from .configs import Languages
# more [here](https://github.com/fastai/fastai/blob/master/fastai/text/core.py#L42)
# and [here](https://textacy.readthedocs.io/en/latest/api_reference/preprocessing.html)
# fmt: off
_re_normalize_acronyms = re.compile(r"(?:[a-zA-Z]\.){2,}")
def normalize_acronyms(t: str) -> str:
return _re_normalize_acronyms.sub(t.translate(str.maketrans("", "", string.punctuation)).upper(), t)
_re_non_word = re.compile(r"[^A-Za-z]+")
def remove_non_word(t: str) -> str:
"Removes non-words characters and digits from the text using the regex `[^A-Za-z]+`"
return _re_non_word.sub(" ", t)
_re_space = re.compile(r" {2,}")
def normalize_useless_spaces(t: str) -> str:
return _re_space.sub(" ", t)
_re_rep = re.compile(r"(\S)(\1{2,})")
def normalize_repeating_chars(t: str) -> str:
def _replace_rep(m):
c, cc = m.groups()
return c
return _re_rep.sub(_replace_rep, t)
_re_wrep = re.compile(r"(?:\s|^)(\w+)\s+((?:\1\s+)+)\1(\s|\W|$)")
def normalize_repeating_words(t: str) -> str:
def _replace_wrep(m):
c, cc, e = m.groups()
return c
return _re_wrep.sub(_replace_wrep, t)
_re_remove_numbers = re.compile(r"\d+")
def remove_numbers(t: str) -> str:
return _re_remove_numbers.sub(" ", t)
def lowercase(t: str) -> str:
"Lowercases the text"
return t.lower()
def strip(t: str) -> str:
return t.strip()
def lemmatize_remove_stopwords(doc: spacy.tokens.doc.Doc) -> str:
return " ".join(
[t.lemma_ for t in doc if t.lemma_ != "-PRON-" and not t.is_stop]
)
def remove_stopwords(doc: spacy.tokens.doc.Doc) -> str:
return " ".join([t.text for t in doc if not t.is_stop])
def lemmatize_keep_stopwords(doc: spacy.tokens.doc.Doc) -> str:
return " ".join([t.lemma_ for t in doc if t.lemma_ != "-PRON-"])
def identity(t):
return t
# fmt: on
class PreprocessingPipeline:
def __init__(
self,
language: str,
pre_steps: Optional[List[str]],
lemmatization_step: Optional[str],
post_steps: Optional[List[str]],
):
self.language = language
self.pre_steps = pre_steps
self.lemmatization_step = lemmatization_step
self.post_steps = post_steps
self.pre = self.make_pipe_component(self.pre_steps, self.language)
self.post = self.make_pipe_component(self.post_steps, self.language)
self.nlp = self.make_nlp(self.lemmatization_step, self.language)
self.lemma = self.make_lemma(self.lemmatization_step, self.language)
# def apply_multiproc(fn, series):
# with mp.Pool(mp.cpu_count()) as pool:
# new_series = pool.map(fn, series)
# return new_series
def vaex_process(self, df: DataFrame, text_column: str) -> DataFrame:
def fn(t):
return self.post(self.lemma(self.nlp(self.pre(t))))
vdf = vaex.from_pandas(df)
vdf["processed_text"] = vdf.apply(
fn, arguments=[vdf[text_column]], vectorize=False
)
df = vdf.to_pandas_df()
return df
# def __call__(self, series: Series) -> Series:
# if self.pre:
# series = series.map(self.pre)
# if self.lemma:
# total_steps = len(series) // 100
# res = []
# pbar = st.progress(0)
# for i, doc in enumerate(
# self.nlp.pipe(series, batch_size=500, n_process=os.cpu_count())
# ):
# res.append(self.lemma(doc))
# if i % total_steps == 0:
# pbar.progress(1)
# series = pd.Series(res)
# if self.post:
# series = series.map(self.post)
# return series
@classmethod
def make_pipe_component(cls, steps: Optional[List[str]], language: str) -> Callable:
if not steps:
return identity
elif language in ("MultiLanguage", "Chinese") and "remove_non_words" in steps:
idx = steps.index("remove_non_words")
steps = (
steps[:idx]
+ ["remove_numbers", "remove_punctuation"]
+ steps[idx + 1 :]
)
components = [cls.pipeline_components()[step] for step in steps]
return make_pipeline(*components)
@staticmethod
def make_nlp(
lemmatization_step: Optional[str], language: str
) -> Union[spacy.language.Language, Callable]:
if (
lemmatization_step is None
or lemmatization_step == "Disable lemmatizer"
or (
lemmatization_step == "Spacy lemmatizer (keep stopwords)"
and language in ("MultiLanguage", "Chinese")
)
):
return identity
return spacy.load(Languages[language].value, disable=["parser", "ner"])
@classmethod
def make_lemma(cls, lemmatization_step: Optional[str], language: str) -> Callable:
if (
lemmatization_step is None
or lemmatization_step == "Disable lemmatizer"
or (
lemmatization_step == "Spacy lemmatizer (keep stopwords)"
and language in ("MultiLanguage", "Chinese")
)
):
return identity
elif (
lemmatization_step == "Spacy lemmatizer (remove stopwords)"
and language in ("MultiLanguage", "Chinese")
):
return cls.lemmatization_component().get("Remove stopwords")
return cls.lemmatization_component().get(lemmatization_step)
@staticmethod
def pipeline_components() -> "OrderedDict[str, Callable]":
"""Returns available cleaning steps in order"""
return OrderedDict(
[
("lowercase", lowercase),
("normalize_unicode", normalize.unicode),
("normalize_bullet_points", normalize.bullet_points),
("normalize_hyphenated_words", normalize.hyphenated_words),
("normalize_quotation_marks", normalize.quotation_marks),
("normalize_whitespaces", normalize.whitespace),
("replace_urls", replace.urls),
("replace_currency_symbols", replace.currency_symbols),
("replace_emails", replace.emails),
("replace_emojis", replace.emojis),
("replace_hashtags", replace.hashtags),
("replace_numbers", replace.numbers),
("replace_phone_numbers", replace.phone_numbers),
("replace_user_handles", replace.user_handles),
("normalize_acronyms", normalize_acronyms),
("remove_accents", remove.accents),
("remove_brackets", remove.brackets),
("remove_html_tags", remove.html_tags),
("remove_punctuation", remove.punctuation),
("remove_non_words", remove_non_word),
("remove_numbers", remove_numbers),
("normalize_useless_spaces", normalize_useless_spaces),
("normalize_repeating_chars", normalize_repeating_chars),
("normalize_repeating_words", normalize_repeating_words),
("strip", strip),
]
)
@staticmethod
def lemmatization_component() -> "OrderedDict[str, Optional[Callable]]":
return OrderedDict(
[
("Spacy lemmatizer (keep stopwords)", lemmatize_keep_stopwords),
("Spacy lemmatizer (remove stopwords)", lemmatize_remove_stopwords),
("Disable lemmatizer", identity),
("Remove stopwords", remove_stopwords),
]
)
| [
"collections.OrderedDict",
"re.compile",
"spacy.load",
"vaex.from_pandas",
"textacy.preprocessing.make_pipeline"
] | [((571, 604), 're.compile', 're.compile', (['"""(?:[a-zA-Z]\\\\.){2,}"""'], {}), "('(?:[a-zA-Z]\\\\.){2,}')\n", (581, 604), False, 'import re\n'), ((766, 790), 're.compile', 're.compile', (['"""[^A-Za-z]+"""'], {}), "('[^A-Za-z]+')\n", (776, 790), False, 'import re\n'), ((967, 986), 're.compile', 're.compile', (['""" {2,}"""'], {}), "(' {2,}')\n", (977, 986), False, 'import re\n'), ((1078, 1106), 're.compile', 're.compile', (['"""(\\\\S)(\\\\1{2,})"""'], {}), "('(\\\\S)(\\\\1{2,})')\n", (1088, 1106), False, 'import re\n'), ((1275, 1336), 're.compile', 're.compile', (['"""(?:\\\\s|^)(\\\\w+)\\\\s+((?:\\\\1\\\\s+)+)\\\\1(\\\\s|\\\\W|$)"""'], {}), "('(?:\\\\s|^)(\\\\w+)\\\\s+((?:\\\\1\\\\s+)+)\\\\1(\\\\s|\\\\W|$)')\n", (1285, 1336), False, 'import re\n'), ((1515, 1533), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (1525, 1533), False, 'import re\n'), ((3219, 3239), 'vaex.from_pandas', 'vaex.from_pandas', (['df'], {}), '(df)\n', (3235, 3239), False, 'import vaex\n'), ((4602, 4628), 'textacy.preprocessing.make_pipeline', 'make_pipeline', (['*components'], {}), '(*components)\n', (4615, 4628), False, 'from textacy.preprocessing import make_pipeline, normalize, remove, replace\n'), ((5104, 5168), 'spacy.load', 'spacy.load', (['Languages[language].value'], {'disable': "['parser', 'ner']"}), "(Languages[language].value, disable=['parser', 'ner'])\n", (5114, 5168), False, 'import spacy\n'), ((6042, 7223), 'collections.OrderedDict', 'OrderedDict', (["[('lowercase', lowercase), ('normalize_unicode', normalize.unicode), (\n 'normalize_bullet_points', normalize.bullet_points), (\n 'normalize_hyphenated_words', normalize.hyphenated_words), (\n 'normalize_quotation_marks', normalize.quotation_marks), (\n 'normalize_whitespaces', normalize.whitespace), ('replace_urls',\n replace.urls), ('replace_currency_symbols', replace.currency_symbols),\n ('replace_emails', replace.emails), ('replace_emojis', replace.emojis),\n ('replace_hashtags', replace.hashtags), ('replace_numbers', replace.\n numbers), ('replace_phone_numbers', replace.phone_numbers), (\n 'replace_user_handles', replace.user_handles), ('normalize_acronyms',\n normalize_acronyms), ('remove_accents', remove.accents), (\n 'remove_brackets', remove.brackets), ('remove_html_tags', remove.\n html_tags), ('remove_punctuation', remove.punctuation), (\n 'remove_non_words', remove_non_word), ('remove_numbers', remove_numbers\n ), ('normalize_useless_spaces', normalize_useless_spaces), (\n 'normalize_repeating_chars', normalize_repeating_chars), (\n 'normalize_repeating_words', normalize_repeating_words), ('strip', strip)]"], {}), "([('lowercase', lowercase), ('normalize_unicode', normalize.\n unicode), ('normalize_bullet_points', normalize.bullet_points), (\n 'normalize_hyphenated_words', normalize.hyphenated_words), (\n 'normalize_quotation_marks', normalize.quotation_marks), (\n 'normalize_whitespaces', normalize.whitespace), ('replace_urls',\n replace.urls), ('replace_currency_symbols', replace.currency_symbols),\n ('replace_emails', replace.emails), ('replace_emojis', replace.emojis),\n ('replace_hashtags', replace.hashtags), ('replace_numbers', replace.\n numbers), ('replace_phone_numbers', replace.phone_numbers), (\n 'replace_user_handles', replace.user_handles), ('normalize_acronyms',\n normalize_acronyms), ('remove_accents', remove.accents), (\n 'remove_brackets', remove.brackets), ('remove_html_tags', remove.\n html_tags), ('remove_punctuation', remove.punctuation), (\n 'remove_non_words', remove_non_word), ('remove_numbers', remove_numbers\n ), ('normalize_useless_spaces', normalize_useless_spaces), (\n 'normalize_repeating_chars', normalize_repeating_chars), (\n 'normalize_repeating_words', normalize_repeating_words), ('strip', strip)])\n", (6053, 7223), False, 'from collections import OrderedDict\n'), ((7696, 7926), 'collections.OrderedDict', 'OrderedDict', (["[('Spacy lemmatizer (keep stopwords)', lemmatize_keep_stopwords), (\n 'Spacy lemmatizer (remove stopwords)', lemmatize_remove_stopwords), (\n 'Disable lemmatizer', identity), ('Remove stopwords', remove_stopwords)]"], {}), "([('Spacy lemmatizer (keep stopwords)', lemmatize_keep_stopwords\n ), ('Spacy lemmatizer (remove stopwords)', lemmatize_remove_stopwords),\n ('Disable lemmatizer', identity), ('Remove stopwords', remove_stopwords)])\n", (7707, 7926), False, 'from collections import OrderedDict\n')] |
'''
Vortex OpenSplice
This software and documentation are Copyright 2006 to TO_YEAR ADLINK
Technology Limited, its affiliated companies and licensors. All rights
reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import threading
import time
import redis
from api.emulation import Config, EmulationStatus
from api.emulation.task_queue import TaskQueue
from api.emulation.task_worker import TaskWorker
class TaskManager:
def __init__(self):
self.task_queue = TaskQueue()
self.task_worker = TaskWorker()
self.redis_connection = redis.StrictRedis(
host=Config.FRONTEND_IP, port=Config.REDIS_PORT, password=Config.REDIS_PASSWORD,
encoding="utf-8", decode_responses=True)
self.wait_task()
def wait_task(self):
self.execution_thread = threading.Thread(target=self._exectue_task, name='execution_thread', args=())
self.execution_thread.daemon = True
self.execution_thread.start()
def get_available_device(self):
def check_heartbeat(ip_address):
TIME_LIMIT = 2
current_time = float(self.redis_connection.time()[0])
worker_time = float(self.redis_connection.hget(ip_address, "time"))
return current_time - worker_time < TIME_LIMIT
time.sleep(1)
avaliable_ip_address = []
for ip in self.redis_connection.scan_iter("ip:*"):
if check_heartbeat(ip):
avaliable_ip_address.append(ip)
return avaliable_ip_address
def get_all_tasks(self):
pending_tasks = self.task_queue.get_all_tasks()
executing_task = self.task_worker.executing_task
executing_task['emulation_status'] = self.get_executing_task_status()
return [executing_task] + pending_tasks
def get_task_size(self):
return self.task_queue.get_pending_task_size() + int(self.task_worker.get_executing_task_id() != '0')
def add_task_into_queue(self, task: dict):
new_task = self.task_queue.add_task(task)
return new_task
def _manager_is_running(self):
"""
This function is used to testting.
"""
return True
def _exectue_task(self):
while self._manager_is_running():
if self.task_queue.get_pending_task_size() > 0:
print('execute task')
task = self.task_queue.get_first_task()
self.task_worker.execute_task(task)
print('finish task')
time.sleep(1)
def _abort_executing_task(self):
aborted_task = self.task_worker.abort_executing_task()
return aborted_task
def _cancel_task_from_queue(self, task_id):
canceled_task = self.task_queue.cancel_pending_task(task_id)
return canceled_task
def delete_task(self, task_id):
if self.task_worker.get_executing_task_id() == task_id:
print('abort')
deleted_task = self._abort_executing_task()
else:
deleted_task = self._cancel_task_from_queue(task_id)
return deleted_task
def get_executing_task(self):
return self.task_worker.executing_task
def get_executing_task_status(self):
return self.task_worker.get_executing_task_status()
| [
"api.emulation.task_worker.TaskWorker",
"time.sleep",
"redis.StrictRedis",
"threading.Thread",
"api.emulation.task_queue.TaskQueue"
] | [((944, 955), 'api.emulation.task_queue.TaskQueue', 'TaskQueue', ([], {}), '()\n', (953, 955), False, 'from api.emulation.task_queue import TaskQueue\n'), ((983, 995), 'api.emulation.task_worker.TaskWorker', 'TaskWorker', ([], {}), '()\n', (993, 995), False, 'from api.emulation.task_worker import TaskWorker\n'), ((1028, 1172), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'Config.FRONTEND_IP', 'port': 'Config.REDIS_PORT', 'password': 'Config.REDIS_PASSWORD', 'encoding': '"""utf-8"""', 'decode_responses': '(True)'}), "(host=Config.FRONTEND_IP, port=Config.REDIS_PORT, password\n =Config.REDIS_PASSWORD, encoding='utf-8', decode_responses=True)\n", (1045, 1172), False, 'import redis\n'), ((1276, 1353), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._exectue_task', 'name': '"""execution_thread"""', 'args': '()'}), "(target=self._exectue_task, name='execution_thread', args=())\n", (1292, 1353), False, 'import threading\n'), ((1755, 1768), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1765, 1768), False, 'import time\n'), ((2964, 2977), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2974, 2977), False, 'import time\n')] |
#!/usr/bin/env python
"""Configuration values."""
from os import path
import click
EXP_DATA_FP = path.abspath(path.join(path.dirname(__file__), "data"))
@click.command()
@click.argument("key", type=click.STRING)
def cli(key):
"""Print a configuration value."""
if key in globals().keys():
print(globals()[key])
else:
raise Exception(
"Requested configuration key not available! Available keys: "
+ str([key_name for key_name in globals().keys() if key_name.isupper()])
+ "."
)
if __name__ == "__main__":
cli() # pylint: disable=no-value-for-parameter
| [
"os.path.dirname",
"click.argument",
"click.command"
] | [((158, 173), 'click.command', 'click.command', ([], {}), '()\n', (171, 173), False, 'import click\n'), ((175, 215), 'click.argument', 'click.argument', (['"""key"""'], {'type': 'click.STRING'}), "('key', type=click.STRING)\n", (189, 215), False, 'import click\n'), ((122, 144), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (134, 144), False, 'from os import path\n')] |
import logging
from arago.hiro.actionhandler.plugin.stonebranch.action.stonebranch_exec_unix_command_action import \
StonebranchExecUnixCommandAction
from arago.hiro.actionhandler.plugin.stonebranch.stonebranch_instance import StonebranchInstance
from arago.hiro.actionhandler.plugin.stonebranch.stonebranch_rest_client import StonebranchRestClient
clientRepository = {}
clientRepository['prototype'] = StonebranchRestClient(StonebranchInstance(
host='stonebranch.cloud',
username='username',
password='password',
))
def test():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('test')
client = clientRepository['prototype']
StonebranchExecUnixCommandAction.exec_task(client, {
'instance': 'prototype',
'agent': 'name',
'command': 'true',
})
logger.info('done')
test()
| [
"arago.hiro.actionhandler.plugin.stonebranch.stonebranch_instance.StonebranchInstance",
"logging.getLogger",
"logging.basicConfig",
"arago.hiro.actionhandler.plugin.stonebranch.action.stonebranch_exec_unix_command_action.StonebranchExecUnixCommandAction.exec_task"
] | [((431, 523), 'arago.hiro.actionhandler.plugin.stonebranch.stonebranch_instance.StonebranchInstance', 'StonebranchInstance', ([], {'host': '"""stonebranch.cloud"""', 'username': '"""username"""', 'password': '"""password"""'}), "(host='stonebranch.cloud', username='username', password\n ='password')\n", (450, 523), False, 'from arago.hiro.actionhandler.plugin.stonebranch.stonebranch_instance import StonebranchInstance\n'), ((553, 593), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (572, 593), False, 'import logging\n'), ((607, 632), 'logging.getLogger', 'logging.getLogger', (['"""test"""'], {}), "('test')\n", (624, 632), False, 'import logging\n'), ((681, 798), 'arago.hiro.actionhandler.plugin.stonebranch.action.stonebranch_exec_unix_command_action.StonebranchExecUnixCommandAction.exec_task', 'StonebranchExecUnixCommandAction.exec_task', (['client', "{'instance': 'prototype', 'agent': 'name', 'command': 'true'}"], {}), "(client, {'instance': 'prototype',\n 'agent': 'name', 'command': 'true'})\n", (723, 798), False, 'from arago.hiro.actionhandler.plugin.stonebranch.action.stonebranch_exec_unix_command_action import StonebranchExecUnixCommandAction\n')] |
# -*- coding: utf-8 -*-
from six.moves import cStringIO as StringIO
import codecs
import collections
import datetime
import imghdr
import json
import logging
import os
import pkg_resources
import pprint
import re
import six
import socket
import subprocess
import sys
import unittest
import uuid
from jinja2 import Template
from pygments import highlight
from pygments import formatters
from pygments import lexers
from .color_text import red, yellow, green
# Default stdout
stdout = sys.stdout
status_dict = {
'success': (green, 'Success'),
'fail': (red, 'Fail'),
'error': (red, 'Error'),
'skip': (yellow, 'Skip'),
}
pygments_css = formatters.HtmlFormatter().get_style_defs()
def safe_unicode(s):
if not s:
return six.u('')
try:
if six.PY2:
if isinstance(s, unicode):
return s
if isinstance(s, str):
return unicode(s, 'utf-8', errors='replace')
return s
else:
return str(s)
except Exception as e:
return six.u(e)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = \
super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
@six.add_metaclass(Singleton)
class Config(object):
@property
def dest_path(self):
if not hasattr(self, '_dest_path'):
self.dest_path = 'html'
return self._dest_path
@dest_path.setter
def dest_path(self, x):
if not os.path.exists(x):
os.makedirs(x)
self._dest_path = x
def get_context(self):
return {
'hostname': socket.gethostname(),
'date': datetime.datetime.now(),
'links': getattr(self, 'links', []),
}
class TestFormatter(logging.Formatter):
"""
Format log entry as json (logger, level, message)
"""
def format(self, record):
if record.args:
msg = record.msg % record.args
else:
msg = record.msg
return json.dumps((record.name, record.levelname, msg))
class ImageResult(object):
def __init__(self, result, expected=None):
self.result = self.write_img(result)
self.expected = self.write_img(expected)
if expected:
self.rmse = self.compare_rmse(self.result, self.expected)
else:
self.rmse = None
def write_img(self, data):
"""
Save image and return filename.
"""
if data is None:
return
img_type = imghdr.what(None, data)
if not img_type:
try:
with open(data, 'rb') as infile:
data = infile.read()
except:
return
img_type = imghdr.what(None, data)
if not img_type:
return
filename = 'img-%s.%s' % (str(uuid.uuid4()), img_type)
destpath = os.path.join(Config().dest_path, 'img')
if not os.path.exists(destpath):
os.makedirs(destpath)
with open(os.path.join(destpath, filename), 'wb') as outfile:
outfile.write(data)
return os.path.join('img', filename)
def compare_rmse(self, img1, img2):
"""
Compute RMSE difference between `img1` and `img2` and return filename
for the diff image.
"""
filename = os.path.join('img', 'img-%s.png' % str(uuid.uuid4()))
dest_path = Config().dest_path
cmd = ['compare', '-metric', 'rmse', img1, img2, filename]
try:
subprocess.call(cmd, cwd=dest_path)
except Exception as e:
stdout.write("Enable to run ImageMagic compare: %s: %s\n"
% (e.__class__.__name__, e))
if os.path.exists(os.path.join(dest_path, filename)):
return filename
def to_dict(self):
return {
'result': self.result,
'expected': self.expected,
'rmse': self.rmse,
}
class FileResult(object):
"""
File to be add to test report.
"""
def __init__(self, **kwargs):
path = os.path.join(Config().dest_path, 'data')
if not os.path.exists(path):
os.makedirs(path)
self.filename = 'file-%s' % str(uuid.uuid4())
self.filepath = os.path.join(path, self.filename)
content = kwargs.get('content', "")
with open(self.filepath, 'w') as outfile:
outfile.write(content)
self.title = safe_unicode(kwargs.get('title', self.filename))
def to_dict(self):
return {
'title': self.title,
'filename': os.path.join('data', self.filename),
}
class MethodResult(object):
"""
Report for one test.
"""
def __init__(self, status, test, logs):
if hasattr(test, 'test'):
# We are using nosetest. `test` is a nose wrapper.
test = test.test
self.uid = str(uuid.uuid4())
self.status = status
try:
color, self.status_title = status_dict[status]
self.status_color = color(self.status_title, stdout)
except KeyError:
self.status_color = self.status_title = 'Unknown'
test_class = test.__class__
self.name = "%s.%s.%s" % (
test_class.__module__, test_class.__name__,
getattr(test, '_testMethodName', 'test')
)
images = []
for img in getattr(test, '_images', ()):
img = ImageResult(img.get('result'), img.get('expected'))
images.append(img.to_dict())
files = []
for f in getattr(test, '_files', ()):
files.append(FileResult(**f).to_dict())
context = {
'name': self.name,
'status': status,
'status_title': self.status_title,
'doc_class': safe_unicode(test_class.__doc__),
'doc_test': safe_unicode(getattr(test, '_testMethodDoc', '')),
'console': safe_unicode(logs.get('console')),
'logs': logs.get('log'),
'tracebacks': logs.get('tracebacks'),
'reason': safe_unicode(logs.get('reason')),
'images': images,
'files': files,
'pygments_css': pygments_css,
}
context.update(Config().get_context())
template = Template(
pkg_resources.resource_string(
'html_test_report',
os.path.join('templates', 'test-case.html')).decode('utf-8')
)
self.url = self.name + '.html'
filename = os.path.join(Config().dest_path, self.url)
with codecs.open(filename, 'w', encoding="utf-8") as outfile:
outfile.write(template.render(context))
class TbFrame(object):
"""
Expose one frame of a traceback to jinja2.
"""
CodeLine = collections.namedtuple(
'CodeLine', ('lineno', 'code', 'highlight', 'extended'))
VarLine = collections.namedtuple('VarLine', ('name', 'value'))
coding_regex = re.compile(
six.b(r"^[ \t\f]*#.*?coding[:=][ \t]*(?P<coding>[-_.a-zA-Z0-9]+)"))
def __init__(self, frame, lineno):
self.frame = frame
self.filename = frame.f_code.co_filename
self.lineno = lineno
self.name = frame.f_code.co_name
self.id = str(uuid.uuid4())
@staticmethod
def get_charset(filename):
with open(filename, 'rb') as srcfile:
for i in range(2):
l = srcfile.readline()
m = TbFrame.coding_regex.match(l)
if m:
return m.group('coding').decode('ascii')
if six.PY2:
return u'ascii'
else:
return u'utf-8'
@property
def code_fragment(self):
fragment_length = 50
start = max(1, self.lineno - fragment_length)
stop = self.lineno + fragment_length
lexer = lexers.Python3Lexer(stripnl=False)
formatter = formatters.HtmlFormatter(full=False, linenos=False)
loader = self.frame.f_globals.get('__loader__')
module_name = self.frame.f_globals.get('__name__') or ''
source = None
if loader is not None and hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is None:
try:
charset = self.get_charset(self.filename)
with codecs.open(self.filename, 'r', encoding=charset) as infile:
source = infile.read()
except IOError:
return
try:
for lineno, frag in enumerate(
formatter._highlight_lines(
formatter._format_lines(
lexer.get_tokens(source))),
start=1):
if lineno >= start:
yield self.CodeLine(
lineno, frag[1].rstrip(),
lineno == self.lineno,
lineno <= self.lineno - 2 or lineno >= self.lineno + 2
)
if lineno >= stop:
break
except UnicodeDecodeError as e:
yield self.CodeLine(None, six.u(str(e)), True, False)
@property
def loc_vars(self):
lexer_text = lexers.TextLexer()
lexer = lexers.Python3Lexer(stripnl=False)
formatter = formatters.HtmlFormatter(full=False, linenos=False)
for name, value in sorted(self.frame.f_locals.items()):
try:
value = pprint.pformat(value, indent=4)
value = highlight(value, lexer, formatter)
except Exception:
try:
value = six.u(repr(value))
except Exception as e:
try:
value = six.u(e)
except Exception:
value = ''
value = highlight(value, lexer_text, formatter)
yield self.VarLine(name, value)
class Traceback(object):
"""
Expose one traceback to jinja2.
"""
def __init__(self, name, msg, tb):
self.name = name
lines = msg.splitlines()
self.title = lines[0]
if len(lines) > 1:
self.description = u'\n'.join(lines[1:])
else:
self.description = None
self.tb = tb
def __iter__(self):
tb = self.tb
while tb:
yield TbFrame(tb.tb_frame, tb.tb_lineno)
tb = tb.tb_next
class TracebackHandler(list):
"""
Expose traceback list to jinja2.
"""
@staticmethod
def get_msg(ev):
if six.PY2:
try:
return six.binary_type(ev).decode('utf-8')
except UnicodeEncodeError:
try:
return six.text_type(ev)
except Exception:
return u"encoding error while retreiving message"
else:
try:
return six.text_type(ev)
except Exception:
return u"encoding error while retreiving message"
def __init__(self, exc_info):
etype, evalue, tb = exc_info
if six.PY2:
self.append(Traceback(evalue.__class__.__name__,
self.get_msg(evalue), tb))
else:
while evalue:
self.append(Traceback(evalue.__class__.__name__,
self.get_msg(evalue),
evalue.__traceback__))
evalue = evalue.__context__
self.reverse()
class TestIndex(dict):
def __init__(self, name=None, status=None, url=None):
self._name = name
self._status = status
self._url = url
def append(self, name, status, url):
toks = name.split('.', 1)
if len(toks) == 1:
self[name] = TestIndex(name, status, url)
else:
root, path = toks
if root not in self:
self[root] = TestIndex(root)
self[root].append(path, status, url)
def get_status(self):
if self._status is None:
status_count = {
'success': 0,
'fail': 0,
'error': 0,
'skip': 0,
}
for child in self.values():
status_count[child.get_status()] += 1
for name in ('error', 'fail', 'skip', 'success'):
if status_count[name]:
self._status = name
break
return self._status
def as_json(self):
return {
'title': self._name,
'url': self._url,
'status': self.get_status(),
'childs': [x[1].as_json() for x in sorted(self.items())]
}
class ResultMixIn(object):
"""
Shared code of HtmlTestResult with nose plugin.
"""
def __init__(self, *args, **kwargs):
super(ResultMixIn, self).__init__(*args, **kwargs)
self._results = []
def add_result_method(self, status, test, exc_info=None, reason=None):
"""
Add test result.
"""
logs = {}
if exc_info is not None:
logs['tracebacks'] = TracebackHandler(exc_info)
logs['reason'] = reason
try:
console = self._buffer_console.getvalue()
except AttributeError:
console = None
logs['console'] = console
try:
log = self._buffer_log.getvalue()
log = [json.loads(x) for x in log.splitlines()]
except AttributeError:
log = None
if log:
logs['log'] = log
result = MethodResult(status, test, logs)
stdout.write(result.status_color + "\n")
self._results.append(result)
def startTest(self, test):
stdout.write(
"Run test: %s.%s... " %
(test.__class__.__name__, test._testMethodName))
# Capture stdout and stderr.
self._old_stderr = sys.stderr
self._old_stdout = sys.stdout
self._buffer_console = StringIO()
sys.stdout = sys.stderr = self._buffer_console
# Capture logs
self._old_handlers = []
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
self._old_handlers.append(handler)
self._buffer_log = StringIO()
handler = logging.StreamHandler(stream=self._buffer_log)
handler.setFormatter(TestFormatter())
handler.setLevel(logging.DEBUG)
logging.root.addHandler(handler)
def stopTest(self, test):
# Restore stdout and stderr.
sys.stdout = self._old_stdout
sys.stderr = self._old_stderr
self._buffer_console.close()
self._buffer_console = None
# Restore logs
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
for handler in self._old_handlers:
logging.root.addHandler(handler)
self._buffer_log.close()
self._buffer_log = None
def get_index(self):
index = TestIndex()
for result in self._results:
index.append(result.name, result.status, result.url)
return index
class HtmlTestResult(ResultMixIn, unittest.TestResult):
def addError(self, test, err):
super(HtmlTestResult, self).addError(test, err)
self.add_result_method('error', test, exc_info=err)
def addFailure(self, test, err):
super(HtmlTestResult, self).addFailure(test, err)
self.add_result_method('fail', test, exc_info=err)
def addSuccess(self, test):
super(HtmlTestResult, self).addSuccess(test)
self.add_result_method('success', test)
def addSkip(self, test, reason):
super(HtmlTestResult, self).addSkip(test, reason)
self.add_result_method('skip', test, reason=reason)
def addExpectedFailure(self, test, err):
super(HtmlTestResult, self).addExpectedFailure(self, test, err)
self.add_result_method('fail', test, exc_info=err)
def addUnexpectedSuccess(self, test):
super(HtmlTestResult, self).addUnexpectedSuccess(self, test)
self.add_result_method('fail', test)
class HtmlTestRunner(object):
"""
Alternative to standard unittest TextTestRunner rendering test with full
logs, image, attached file, in a nice html way.
Can be use:
* standalone, just replace `python -m unittest` with `html-test`
"""
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = stream
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
self.resultclass = resultclass
self.start_time = datetime.datetime.now()
def run(self, test):
result = HtmlTestResult(self.verbosity)
test(result)
self.stop_time = datetime.datetime.now()
Report(result).make_report()
print('Time Elapsed: %s' % (self.stop_time - self.start_time))
return result
class Report(object):
def __init__(self, result):
self.result = result
def make_report(self):
"""
Create html report for the tests results.
"""
# Create index data
template = Template("var index = {{data|safe}};")
index_js = os.path.join(Config().dest_path, 'index.js')
with open(index_js, 'w') as outfile:
outfile.write(template.render({
'data': json.dumps(self.result.get_index().as_json(), indent=4)
}))
# Create index page
template = Template(
pkg_resources.resource_string(
'html_test_report',
os.path.join('templates', 'test-case.html')).decode('utf-8')
)
filename = os.path.join(Config().dest_path, 'index.html')
with codecs.open(filename, 'w', encoding="utf-8") as outfile:
outfile.write(template.render(Config().get_context()))
| [
"logging.StreamHandler",
"six.b",
"pygments.highlight",
"jinja2.Template",
"six.text_type",
"six.u",
"os.path.exists",
"json.dumps",
"six.binary_type",
"pygments.lexers.TextLexer",
"subprocess.call",
"pygments.lexers.Python3Lexer",
"socket.gethostname",
"six.moves.cStringIO",
"json.loads... | [((1327, 1355), 'six.add_metaclass', 'six.add_metaclass', (['Singleton'], {}), '(Singleton)\n', (1344, 1355), False, 'import six\n'), ((6947, 7026), 'collections.namedtuple', 'collections.namedtuple', (['"""CodeLine"""', "('lineno', 'code', 'highlight', 'extended')"], {}), "('CodeLine', ('lineno', 'code', 'highlight', 'extended'))\n", (6969, 7026), False, 'import collections\n'), ((7050, 7102), 'collections.namedtuple', 'collections.namedtuple', (['"""VarLine"""', "('name', 'value')"], {}), "('VarLine', ('name', 'value'))\n", (7072, 7102), False, 'import collections\n'), ((657, 683), 'pygments.formatters.HtmlFormatter', 'formatters.HtmlFormatter', ([], {}), '()\n', (681, 683), False, 'from pygments import formatters\n'), ((753, 762), 'six.u', 'six.u', (['""""""'], {}), "('')\n", (758, 762), False, 'import six\n'), ((2132, 2180), 'json.dumps', 'json.dumps', (['(record.name, record.levelname, msg)'], {}), '((record.name, record.levelname, msg))\n', (2142, 2180), False, 'import json\n'), ((2645, 2668), 'imghdr.what', 'imghdr.what', (['None', 'data'], {}), '(None, data)\n', (2656, 2668), False, 'import imghdr\n'), ((3249, 3278), 'os.path.join', 'os.path.join', (['"""img"""', 'filename'], {}), "('img', filename)\n", (3261, 3278), False, 'import os\n'), ((4406, 4439), 'os.path.join', 'os.path.join', (['path', 'self.filename'], {}), '(path, self.filename)\n', (4418, 4439), False, 'import os\n'), ((7142, 7210), 'six.b', 'six.b', (['"""^[ \\\\t\\\\f]*#.*?coding[:=][ \\\\t]*(?P<coding>[-_.a-zA-Z0-9]+)"""'], {}), "('^[ \\\\t\\\\f]*#.*?coding[:=][ \\\\t]*(?P<coding>[-_.a-zA-Z0-9]+)')\n", (7147, 7210), False, 'import six\n'), ((8009, 8043), 'pygments.lexers.Python3Lexer', 'lexers.Python3Lexer', ([], {'stripnl': '(False)'}), '(stripnl=False)\n', (8028, 8043), False, 'from pygments import lexers\n'), ((8064, 8115), 'pygments.formatters.HtmlFormatter', 'formatters.HtmlFormatter', ([], {'full': '(False)', 'linenos': '(False)'}), '(full=False, linenos=False)\n', (8088, 8115), False, 'from pygments import formatters\n'), ((9471, 9489), 'pygments.lexers.TextLexer', 'lexers.TextLexer', ([], {}), '()\n', (9487, 9489), False, 'from pygments import lexers\n'), ((9506, 9540), 'pygments.lexers.Python3Lexer', 'lexers.Python3Lexer', ([], {'stripnl': '(False)'}), '(stripnl=False)\n', (9525, 9540), False, 'from pygments import lexers\n'), ((9561, 9612), 'pygments.formatters.HtmlFormatter', 'formatters.HtmlFormatter', ([], {'full': '(False)', 'linenos': '(False)'}), '(full=False, linenos=False)\n', (9585, 9612), False, 'from pygments import formatters\n'), ((14323, 14333), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (14331, 14333), True, 'from six.moves import cStringIO as StringIO\n'), ((14616, 14626), 'six.moves.cStringIO', 'StringIO', ([], {}), '()\n', (14624, 14626), True, 'from six.moves import cStringIO as StringIO\n'), ((14645, 14691), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'self._buffer_log'}), '(stream=self._buffer_log)\n', (14666, 14691), False, 'import logging\n'), ((14786, 14818), 'logging.root.addHandler', 'logging.root.addHandler', (['handler'], {}), '(handler)\n', (14809, 14818), False, 'import logging\n'), ((17111, 17134), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17132, 17134), False, 'import datetime\n'), ((17255, 17278), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17276, 17278), False, 'import datetime\n'), ((17644, 17682), 'jinja2.Template', 'Template', (['"""var index = {{data|safe}};"""'], {}), "('var index = {{data|safe}};')\n", (17652, 17682), False, 'from jinja2 import Template\n'), ((1055, 1063), 'six.u', 'six.u', (['e'], {}), '(e)\n', (1060, 1063), False, 'import six\n'), ((1595, 1612), 'os.path.exists', 'os.path.exists', (['x'], {}), '(x)\n', (1609, 1612), False, 'import os\n'), ((1626, 1640), 'os.makedirs', 'os.makedirs', (['x'], {}), '(x)\n', (1637, 1640), False, 'import os\n'), ((1738, 1758), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1756, 1758), False, 'import socket\n'), ((1780, 1803), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1801, 1803), False, 'import datetime\n'), ((2867, 2890), 'imghdr.what', 'imghdr.what', (['None', 'data'], {}), '(None, data)\n', (2878, 2890), False, 'import imghdr\n'), ((3072, 3096), 'os.path.exists', 'os.path.exists', (['destpath'], {}), '(destpath)\n', (3086, 3096), False, 'import os\n'), ((3110, 3131), 'os.makedirs', 'os.makedirs', (['destpath'], {}), '(destpath)\n', (3121, 3131), False, 'import os\n'), ((3654, 3689), 'subprocess.call', 'subprocess.call', (['cmd'], {'cwd': 'dest_path'}), '(cmd, cwd=dest_path)\n', (3669, 3689), False, 'import subprocess\n'), ((3871, 3904), 'os.path.join', 'os.path.join', (['dest_path', 'filename'], {}), '(dest_path, filename)\n', (3883, 3904), False, 'import os\n'), ((4276, 4296), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4290, 4296), False, 'import os\n'), ((4310, 4327), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4321, 4327), False, 'import os\n'), ((4737, 4772), 'os.path.join', 'os.path.join', (['"""data"""', 'self.filename'], {}), "('data', self.filename)\n", (4749, 4772), False, 'import os\n'), ((5049, 5061), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5059, 5061), False, 'import uuid\n'), ((6734, 6778), 'codecs.open', 'codecs.open', (['filename', '"""w"""'], {'encoding': '"""utf-8"""'}), "(filename, 'w', encoding='utf-8')\n", (6745, 6778), False, 'import codecs\n'), ((7418, 7430), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7428, 7430), False, 'import uuid\n'), ((14506, 14541), 'logging.root.removeHandler', 'logging.root.removeHandler', (['handler'], {}), '(handler)\n', (14532, 14541), False, 'import logging\n'), ((15120, 15155), 'logging.root.removeHandler', 'logging.root.removeHandler', (['handler'], {}), '(handler)\n', (15146, 15155), False, 'import logging\n'), ((15211, 15243), 'logging.root.addHandler', 'logging.root.addHandler', (['handler'], {}), '(handler)\n', (15234, 15243), False, 'import logging\n'), ((18234, 18278), 'codecs.open', 'codecs.open', (['filename', '"""w"""'], {'encoding': '"""utf-8"""'}), "(filename, 'w', encoding='utf-8')\n", (18245, 18278), False, 'import codecs\n'), ((3150, 3182), 'os.path.join', 'os.path.join', (['destpath', 'filename'], {}), '(destpath, filename)\n', (3162, 3182), False, 'import os\n'), ((4368, 4380), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4378, 4380), False, 'import uuid\n'), ((9718, 9749), 'pprint.pformat', 'pprint.pformat', (['value'], {'indent': '(4)'}), '(value, indent=4)\n', (9732, 9749), False, 'import pprint\n'), ((9774, 9808), 'pygments.highlight', 'highlight', (['value', 'lexer', 'formatter'], {}), '(value, lexer, formatter)\n', (9783, 9808), False, 'from pygments import highlight\n'), ((11180, 11197), 'six.text_type', 'six.text_type', (['ev'], {}), '(ev)\n', (11193, 11197), False, 'import six\n'), ((13751, 13764), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (13761, 13764), False, 'import json\n'), ((2973, 2985), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2983, 2985), False, 'import uuid\n'), ((3508, 3520), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3518, 3520), False, 'import uuid\n'), ((8574, 8623), 'codecs.open', 'codecs.open', (['self.filename', '"""r"""'], {'encoding': 'charset'}), "(self.filename, 'r', encoding=charset)\n", (8585, 8623), False, 'import codecs\n'), ((10109, 10148), 'pygments.highlight', 'highlight', (['value', 'lexer_text', 'formatter'], {}), '(value, lexer_text, formatter)\n', (10118, 10148), False, 'from pygments import highlight\n'), ((6548, 6591), 'os.path.join', 'os.path.join', (['"""templates"""', '"""test-case.html"""'], {}), "('templates', 'test-case.html')\n", (6560, 6591), False, 'import os\n'), ((10881, 10900), 'six.binary_type', 'six.binary_type', (['ev'], {}), '(ev)\n', (10896, 10900), False, 'import six\n'), ((11004, 11021), 'six.text_type', 'six.text_type', (['ev'], {}), '(ev)\n', (11017, 11021), False, 'import six\n'), ((18084, 18127), 'os.path.join', 'os.path.join', (['"""templates"""', '"""test-case.html"""'], {}), "('templates', 'test-case.html')\n", (18096, 18127), False, 'import os\n'), ((10003, 10011), 'six.u', 'six.u', (['e'], {}), '(e)\n', (10008, 10011), False, 'import six\n')] |
# Generated by Django 3.1.3 on 2020-11-26 07:32
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
replaces = [('annotators', '0001_initial'), ('annotators', '0002_auto_20201110_0257'), ('annotators', '0003_auto_20201125_1704'), ('annotators', '0004_auto_20201125_1728')]
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)),
('email', models.EmailField(max_length=254, unique=True)),
('password', models.CharField(max_length=60, null=True)),
('is_verified', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'accounts',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)),
('name', models.CharField(max_length=50)),
('status', models.IntegerField(choices=[(0, 'Just Created'), (1, 'In Progress'), (2, 'Finished'), (3, 'Dropped')], default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annotators.account')),
('rules', models.TextField(default='')),
],
options={
'db_table': 'projects',
},
),
migrations.CreateModel(
name='Sentence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)),
('words', models.JSONField()),
('categories', models.JSONField()),
('derivations', models.JSONField()),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='annotators.project')),
('created_at', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now)),
('updated_at', models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now)),
],
options={
'db_table': 'sentences',
},
),
]
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.JSONField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.model... | [((555, 648), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (571, 648), False, 'from django.db import migrations, models\n'), ((672, 739), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'db_index': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(db_index=True, default=uuid.uuid4, editable=False)\n', (688, 739), False, 'from django.db import migrations, models\n'), ((768, 814), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)', 'unique': '(True)'}), '(max_length=254, unique=True)\n', (785, 814), False, 'from django.db import migrations, models\n'), ((846, 888), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)', 'null': '(True)'}), '(max_length=60, null=True)\n', (862, 888), False, 'from django.db import migrations, models\n'), ((923, 957), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (942, 957), False, 'from django.db import migrations, models\n'), ((991, 1030), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1011, 1030), False, 'from django.db import migrations, models\n'), ((1064, 1103), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1084, 1103), False, 'from django.db import migrations, models\n'), ((1313, 1406), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1329, 1406), False, 'from django.db import migrations, models\n'), ((1430, 1497), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'db_index': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(db_index=True, default=uuid.uuid4, editable=False)\n', (1446, 1497), False, 'from django.db import migrations, models\n'), ((1525, 1556), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1541, 1556), False, 'from django.db import migrations, models\n'), ((1586, 1704), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'Just Created'), (1, 'In Progress'), (2, 'Finished'), (3, 'Dropped')]", 'default': '(0)'}), "(choices=[(0, 'Just Created'), (1, 'In Progress'), (2,\n 'Finished'), (3, 'Dropped')], default=0)\n", (1605, 1704), False, 'from django.db import migrations, models\n'), ((1734, 1773), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1754, 1773), False, 'from django.db import migrations, models\n'), ((1807, 1846), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1827, 1846), False, 'from django.db import migrations, models\n'), ((1876, 1968), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""annotators.account"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'annotators.account')\n", (1893, 1968), False, 'from django.db import migrations, models\n'), ((1992, 2020), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (2008, 2020), False, 'from django.db import migrations, models\n'), ((2231, 2324), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2247, 2324), False, 'from django.db import migrations, models\n'), ((2348, 2415), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'db_index': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(db_index=True, default=uuid.uuid4, editable=False)\n', (2364, 2415), False, 'from django.db import migrations, models\n'), ((2444, 2462), 'django.db.models.JSONField', 'models.JSONField', ([], {}), '()\n', (2460, 2462), False, 'from django.db import migrations, models\n'), ((2496, 2514), 'django.db.models.JSONField', 'models.JSONField', ([], {}), '()\n', (2512, 2514), False, 'from django.db import migrations, models\n'), ((2549, 2567), 'django.db.models.JSONField', 'models.JSONField', ([], {}), '()\n', (2565, 2567), False, 'from django.db import migrations, models\n'), ((2598, 2690), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""annotators.project"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'annotators.project')\n", (2615, 2690), False, 'from django.db import migrations, models\n'), ((2719, 2793), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (2739, 2793), False, 'from django.db import migrations, models\n'), ((2827, 2901), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'default': 'django.utils.timezone.now'}), '(auto_now_add=True, default=django.utils.timezone.now)\n', (2847, 2901), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
"""
Estimates the static background in a STORM movie.
The estimate is performed by averaging this might
not be the best choice for movies with a high density
of real localizations.
This may be a good choice if you have a largish
fixed background and a relatively low density of
real localizations.
Hazen 8/16
"""
import numpy
class StaticBackgroundException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class StaticBGEstimator(object):
"""
Estimates the background using a simple boxcar average.
In the case of movies with activation frames, these frames will
be ignored in the background estimate.
Note: This expects to be asked for estimates in a sequential
fashion as would occur during normal STORM movie analysis.
"""
def __init__(self, frame_reader = None, start_frame = 0, sample_size = 100, descriptor = "1", **kwds):
self.cur_frame = start_frame - 1
self.descriptor = descriptor
self.descriptor_len = len(descriptor)
self.frame_reader = frame_reader
self.number_averaged = 0
self.sample_size = sample_size
[movie_w, movie_h, self.movie_l] = frame_reader.filmSize()
# Figure out where to start and end the average.
end_frame = start_frame + int(self.sample_size/2)
start_frame = start_frame - int(self.sample_size/2)
if (start_frame < 0):
start_frame = 0
end_frame = start_frame + self.sample_size
if (end_frame > self.movie_l):
end_frame = self.movie_l
self.sample_size = self.movie_l
if (end_frame > self.movie_l):
end_frame = self.movie_l
start_frame = end_frame - self.sample_size
if (start_frame < 0):
start_frame = 0
self.sample_size = self.movie_l
self.running_sum = numpy.zeros((movie_h, movie_w))
for i in range(start_frame, end_frame):
if not self.shouldIgnore(i):
self.number_averaged += 1
self.running_sum += self.frame_reader.loadAFrame(i)
def estimateBG(self, frame_number):
if (frame_number != (self.cur_frame + 1)):
raise StaticBackgroundException("Received request for an estimate of a non-sequential frame " + str(self.cur_frame) + " " + str(frame_number))
else:
self.cur_frame = frame_number
# Move average forward by 1 frame if possible.
start_frame = frame_number - int(self.sample_size/2)
end_frame = frame_number + int(self.sample_size/2)
if (start_frame > 0) and (end_frame < self.movie_l):
# Remove old frame.
if not self.shouldIgnore(start_frame - 1):
self.number_averaged -= 1
self.running_sum -= self.frame_reader.loadAFrame(start_frame - 1)
# Add new frame.
if not self.shouldIgnore(end_frame):
self.number_averaged += 1
self.running_sum += self.frame_reader.loadAFrame(end_frame)
# Return the current average.
return self.running_sum/self.number_averaged
def shouldIgnore(self, frame_number):
desc = self.descriptor[frame_number % self.descriptor_len]
if (desc == "0"):
#print("Ignoring frame", frame_number)
return True
else:
return False
if (__name__ == "__main__"):
import argparse
import storm_analysis.sa_library.datareader as datareader
import storm_analysis.sa_library.datawriter as datawriter
import storm_analysis.sa_library.parameters as params
# Process command line arguments.
parser = argparse.ArgumentParser(description = 'Running average background subtraction')
parser.add_argument('--in_movie', dest='in_movie', type=str, required=True,
help = "The name of the movie to analyze, can be .dax, .tiff or .spe format.")
parser.add_argument('--out_movie', dest='out_movie', type=str, required=True,
help = "The name of the output movie (with background subtracted). This will be in .dax format.")
parser.add_argument('--xml', dest='settings', type=str, required=True,
help = "The name of the settings xml file.")
args = parser.parse_args()
# Load movies and parameters.
input_movie = datareader.inferReader(args.in_movie)
[w, h, l] = input_movie.filmSize()
output_movie = datawriter.DaxWriter(args.out_movie)
parameters = params.ParametersCommon().initFromFile(args.settings)
n_frames = parameters.getAttr("max_frame")
if (n_frames > l) or (n_frames == -1):
n_frames = l
# Default to a sample size if the settings file does not specify this.
sample_size = 100
if (parameters.getAttr("static_background_estimate", 0) > 0):
sample_size = parameters.getAttr("static_background_estimate")
else:
print("Did not find parameter 'static_background_estimate' in parameters file, defaulting to", sample_size)
sbge = StaticBGEstimator(input_movie,
sample_size = sample_size,
descriptor = parameters.getAttr("descriptor"))
for i in range(n_frames):
diff = input_movie.loadAFrame(i) - sbge.estimateBG(i) + 100
output_movie.addFrame(diff)
output_movie.close()
#
# The MIT License
#
# Copyright (c) 2016 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| [
"storm_analysis.sa_library.datareader.inferReader",
"storm_analysis.sa_library.datawriter.DaxWriter",
"argparse.ArgumentParser",
"numpy.zeros",
"storm_analysis.sa_library.parameters.ParametersCommon"
] | [((3756, 3833), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Running average background subtraction"""'}), "(description='Running average background subtraction')\n", (3779, 3833), False, 'import argparse\n'), ((4453, 4490), 'storm_analysis.sa_library.datareader.inferReader', 'datareader.inferReader', (['args.in_movie'], {}), '(args.in_movie)\n', (4475, 4490), True, 'import storm_analysis.sa_library.datareader as datareader\n'), ((4554, 4590), 'storm_analysis.sa_library.datawriter.DaxWriter', 'datawriter.DaxWriter', (['args.out_movie'], {}), '(args.out_movie)\n', (4574, 4590), True, 'import storm_analysis.sa_library.datawriter as datawriter\n'), ((1947, 1978), 'numpy.zeros', 'numpy.zeros', (['(movie_h, movie_w)'], {}), '((movie_h, movie_w))\n', (1958, 1978), False, 'import numpy\n'), ((4608, 4633), 'storm_analysis.sa_library.parameters.ParametersCommon', 'params.ParametersCommon', ([], {}), '()\n', (4631, 4633), True, 'import storm_analysis.sa_library.parameters as params\n')] |
# Generated by Django 2.1.7 on 2019-02-19 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20190218_1809'),
]
operations = [
migrations.AlterField(
model_name='userprofiles',
name='head_portrait',
field=models.ImageField(default='users/static/users/image/head_portrait/default.png', max_length=200, upload_to='users/static/users/image/head_portrait/%Y/%m', verbose_name='头像'),
),
migrations.AlterField(
model_name='viewpage',
name='image',
field=models.ImageField(max_length=500, upload_to='users/static/users/image/view_page/%Y/%m', verbose_name='轮播图'),
),
]
| [
"django.db.models.ImageField"
] | [((347, 533), 'django.db.models.ImageField', 'models.ImageField', ([], {'default': '"""users/static/users/image/head_portrait/default.png"""', 'max_length': '(200)', 'upload_to': '"""users/static/users/image/head_portrait/%Y/%m"""', 'verbose_name': '"""头像"""'}), "(default=\n 'users/static/users/image/head_portrait/default.png', max_length=200,\n upload_to='users/static/users/image/head_portrait/%Y/%m', verbose_name='头像'\n )\n", (364, 533), False, 'from django.db import migrations, models\n'), ((642, 754), 'django.db.models.ImageField', 'models.ImageField', ([], {'max_length': '(500)', 'upload_to': '"""users/static/users/image/view_page/%Y/%m"""', 'verbose_name': '"""轮播图"""'}), "(max_length=500, upload_to=\n 'users/static/users/image/view_page/%Y/%m', verbose_name='轮播图')\n", (659, 754), False, 'from django.db import migrations, models\n')] |
import argparse
import ast
import os
import numpy as np
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('OutputDirectory', help='The directory where the generated images will be saved')
parser.add_argument('--numberOfImages', help='The number of generated images. Default: 100', type=int, default=100)
parser.add_argument('--imageSize', help="The size of images. Default: '(320, 240)'", default='(320, 240)')
parser.add_argument('--circleCenter', help="The circle center. Default: '(160, 120)'", default='(160, 120)')
parser.add_argument('--circleDiameter', help='The circle diameter. Default: 180', type=int, default=180)
parser.add_argument('--squareCenter', help="The square center. Default: '(210, 150)'", default='(210, 150)')
parser.add_argument('--squareSize', help='The square side length. Default: 120', type=int, default=120)
args = parser.parse_args()
imageSize = ast.literal_eval(args.imageSize)
circleCenter = ast.literal_eval(args.circleCenter)
squareCenter = ast.literal_eval(args.squareCenter)
def main():
print ("generateToyImages.py main()")
for imageNdx in range(args.numberOfImages):
imageFilepath = os.path.join(args.OutputDirectory, 'image' + str(imageNdx) + '.png')
image = np.ones((imageSize[1], imageSize[0]), dtype=np.uint8) * np.random.randint(256)
cv2.circle(image, circleCenter, args.circleDiameter//2, np.random.randint(256), thickness=cv2.FILLED)
cv2.rectangle(image, (squareCenter[0] - args.squareSize//2, squareCenter[1] - args.squareSize//2),
(squareCenter[0] + args.squareSize // 2, squareCenter[1] + args.squareSize // 2),
np.random.randint(256), thickness=cv2.FILLED)
cv2.imwrite(imageFilepath, image)
if __name__ == '__main__':
main() | [
"cv2.imwrite",
"numpy.ones",
"argparse.ArgumentParser",
"ast.literal_eval",
"numpy.random.randint"
] | [((77, 102), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (100, 102), False, 'import argparse\n'), ((895, 927), 'ast.literal_eval', 'ast.literal_eval', (['args.imageSize'], {}), '(args.imageSize)\n', (911, 927), False, 'import ast\n'), ((943, 978), 'ast.literal_eval', 'ast.literal_eval', (['args.circleCenter'], {}), '(args.circleCenter)\n', (959, 978), False, 'import ast\n'), ((994, 1029), 'ast.literal_eval', 'ast.literal_eval', (['args.squareCenter'], {}), '(args.squareCenter)\n', (1010, 1029), False, 'import ast\n'), ((1720, 1753), 'cv2.imwrite', 'cv2.imwrite', (['imageFilepath', 'image'], {}), '(imageFilepath, image)\n', (1731, 1753), False, 'import cv2\n'), ((1245, 1298), 'numpy.ones', 'np.ones', (['(imageSize[1], imageSize[0])'], {'dtype': 'np.uint8'}), '((imageSize[1], imageSize[0]), dtype=np.uint8)\n', (1252, 1298), True, 'import numpy as np\n'), ((1301, 1323), 'numpy.random.randint', 'np.random.randint', (['(256)'], {}), '(256)\n', (1318, 1323), True, 'import numpy as np\n'), ((1388, 1410), 'numpy.random.randint', 'np.random.randint', (['(256)'], {}), '(256)\n', (1405, 1410), True, 'import numpy as np\n'), ((1666, 1688), 'numpy.random.randint', 'np.random.randint', (['(256)'], {}), '(256)\n', (1683, 1688), True, 'import numpy as np\n')] |
# Python packages
import re, json
class Blog:
"""
Read config file and store site-wide variables.
"""
def __init__(self, config_file="./config.json"):
"""
When initializing a Blog instance, populate it with settings in the
config file.
In case of KeyError (user not providing settings in config.json),
revert to default settings provided below.
Args:
config_file: specify the path to config file.
"""
# Read config file.
with open(config_file, "r") as fin:
conf = json.load(fin)
# Public variables
self.name = conf.get("blog_name", "Another Blog")
self.default_author = conf.get("default_author", "Anonymous")
self.baseurl = conf.get("baseurl", "localhost:5000/")
self.description = conf.get("description", "Some description.")
self.contact = conf.get("contact", "Your contact information.")
self.highlight_style = conf.get("highlight_style", "default")
# Private variables
self._posts_dir = conf.get("posts_dir", "posts/")
self._assets_dir = conf.get("assets_dir", "assets/")
self._theme_dir = (conf.get("styles_dir", "templates/css/")
+ conf.get("theme", "default") + "/")
self._js_dir = conf.get("js_dir", "templates/js/")
self._templates_dir = conf.get("templates_dir", "templates/")
self._static_dir = conf.get("static_dir", "static/")
# ignore files with these patterns
self._ignore_posts = [re.compile(pattern) for pattern
in conf.get("ignore_posts", ["ignore"])]
# Accessor methods
def get_posts_dir(self): return self._posts_dir
def get_assets_dir(self): return self._assets_dir
def get_theme_dir(self): return self._theme_dir
def get_js_dir(self): return self._js_dir
def get_templates_dir(self): return self._templates_dir
def get_static_dir(self): return self._static_dir
def get_ignore_posts(self): return self._ignore_posts
def check_included(self, filename):
"""
Check if a post should be included on the index page.
Args:
filename: the path to the markdown file.
"""
# exclude non-markdown files
if not filename.endswith(".md"):
return False
# exclude patterns specified in ignore_posts
for REGEX in self.get_ignore_posts():
if re.match(REGEX, filename):
return False
# passed all the tests, include this post
return True
| [
"json.load",
"re.match",
"re.compile"
] | [((579, 593), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (588, 593), False, 'import re, json\n'), ((1571, 1590), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (1581, 1590), False, 'import re, json\n'), ((2487, 2512), 're.match', 're.match', (['REGEX', 'filename'], {}), '(REGEX, filename)\n', (2495, 2512), False, 'import re, json\n')] |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
import swapper
from accelerator_abstract.models import BasePartnerApplicationInterest
class PartnerApplicationInterest(BasePartnerApplicationInterest):
class Meta(BasePartnerApplicationInterest.Meta):
swappable = swapper.swappable_setting(
BasePartnerApplicationInterest.Meta.app_label,
"PartnerApplicationInterest")
| [
"swapper.swappable_setting"
] | [((284, 390), 'swapper.swappable_setting', 'swapper.swappable_setting', (['BasePartnerApplicationInterest.Meta.app_label', '"""PartnerApplicationInterest"""'], {}), "(BasePartnerApplicationInterest.Meta.app_label,\n 'PartnerApplicationInterest')\n", (309, 390), False, 'import swapper\n')] |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import io
import unittest
import mock
from ebcli.operations import spotops
class TestSpotOps(unittest.TestCase):
@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')
def test_get_spot_instance_types_from_customer__success(
self,
prompt_for_instance_types_mock,
):
enable_spot=True
interactive=True
prompt_for_instance_types_mock.return_value='t2.micro, t3.micro'
self.assertEqual('t2.micro, t3.micro', spotops.get_spot_instance_types_from_customer(interactive, enable_spot))
@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')
def test_get_spot_instance_types_from_customer__test_for_prompting(
self,
prompt_for_instance_types_mock,
):
prompt_for_instance_types_mock.return_value=''
self.assertEqual(
'',
spotops.get_spot_instance_types_from_customer(
interactive=True,
enable_spot=True,
)
)
prompt_for_instance_types_mock.assert_called_once_with()
@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')
def test_get_spot_instance_types_from_customer__enable_spot_not_passed(
self,
prompt_for_instance_types_mock,
):
enable_spot=None
interactive=True
prompt_for_instance_types_mock.assert_not_called()
self.assertFalse(spotops.get_spot_instance_types_from_customer(interactive, enable_spot))
@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')
def test_get_spot_instance_types_from_customer__interactive_is_disabled(
self,
prompt_for_instance_types_mock,
):
enable_spot=False
interactive=False
prompt_for_instance_types_mock.assert_not_called()
self.assertFalse(spotops.get_spot_instance_types_from_customer(interactive, enable_spot))
| [
"mock.patch",
"ebcli.operations.spotops.get_spot_instance_types_from_customer"
] | [((708, 772), 'mock.patch', 'mock.patch', (['"""ebcli.operations.spotops.prompt_for_instance_types"""'], {}), "('ebcli.operations.spotops.prompt_for_instance_types')\n", (718, 772), False, 'import mock\n'), ((1152, 1216), 'mock.patch', 'mock.patch', (['"""ebcli.operations.spotops.prompt_for_instance_types"""'], {}), "('ebcli.operations.spotops.prompt_for_instance_types')\n", (1162, 1216), False, 'import mock\n'), ((1677, 1741), 'mock.patch', 'mock.patch', (['"""ebcli.operations.spotops.prompt_for_instance_types"""'], {}), "('ebcli.operations.spotops.prompt_for_instance_types')\n", (1687, 1741), False, 'import mock\n'), ((2100, 2164), 'mock.patch', 'mock.patch', (['"""ebcli.operations.spotops.prompt_for_instance_types"""'], {}), "('ebcli.operations.spotops.prompt_for_instance_types')\n", (2110, 2164), False, 'import mock\n'), ((1073, 1144), 'ebcli.operations.spotops.get_spot_instance_types_from_customer', 'spotops.get_spot_instance_types_from_customer', (['interactive', 'enable_spot'], {}), '(interactive, enable_spot)\n', (1118, 1144), False, 'from ebcli.operations import spotops\n'), ((1467, 1553), 'ebcli.operations.spotops.get_spot_instance_types_from_customer', 'spotops.get_spot_instance_types_from_customer', ([], {'interactive': '(True)', 'enable_spot': '(True)'}), '(interactive=True, enable_spot\n =True)\n', (1512, 1553), False, 'from ebcli.operations import spotops\n'), ((2021, 2092), 'ebcli.operations.spotops.get_spot_instance_types_from_customer', 'spotops.get_spot_instance_types_from_customer', (['interactive', 'enable_spot'], {}), '(interactive, enable_spot)\n', (2066, 2092), False, 'from ebcli.operations import spotops\n'), ((2447, 2518), 'ebcli.operations.spotops.get_spot_instance_types_from_customer', 'spotops.get_spot_instance_types_from_customer', (['interactive', 'enable_spot'], {}), '(interactive, enable_spot)\n', (2492, 2518), False, 'from ebcli.operations import spotops\n')] |
# Author: StevenChaoo
# -*- coding:UTF-8 -*-
import json
import logging
import time
import random
import sys
from sklearn_crfsuite import CRF
from sklearn.metrics import classification_report
from util import tools
from tqdm import tqdm
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
logger = logging.getLogger("root")
def sentence2feature(sentences):
# Extract context features
features = []
for sentence in tqdm(sentences):
result = []
for i in range(len(sentence)):
# Previous and next word
word = sentence[i]
previous_word = '<start>' if i == 0 else sentence[i-1]
next_word = '<end>' if i == (len(sentence)-1) else sentence[i+1]
# Contains five features
feature = {
"w": word,
"w-1": previous_word,
"w+1": next_word,
"w-1:w": previous_word+word,
"w:w+1": word+next_word
}
result.append(feature)
features.append(result)
# Return results
return features
def normalizationLabel(label_lists):
labels = []
for label_list in label_lists:
for label in label_list:
if len(label) > 1:
labels.append(label[2:])
else:
labels.append(label)
return labels
class CRFModel(object):
def __init__(self):
self.model = CRF(algorithm='l2sgd',
c2=0.1,
max_iterations=100)
def train(self, features, tag_lists):
self.model.fit(features, tag_lists)
def evaluate(self, features, tag_lists):
predict_tag = self.model.predict(features)
real_tag = normalizationLabel(tag_lists)
pred_tag = normalizationLabel(predict_tag)
print(classification_report(real_tag, pred_tag))
def dataProcess(path):
f = open(path, "r")
word_lists = []
label_lists = []
word_list = []
label_list = []
for line in f.readlines():
line_list = line.strip().split(" ")
if len(line_list) > 1:
word_list.append(line_list[0])
label_list.append(line_list[1])
else:
word_lists.append(word_list)
label_lists.append(label_list)
return word_lists, label_lists
def main():
# Prepare dataset
train_word_lists, train_label_lists = dataProcess("./data/dis/train.txt")
test_word_lists, test_label_lists = dataProcess("./data/dis/test.txt")
# Extract features
logger.info("Prepare train data")
train_features = sentence2feature(train_word_lists)
logger.info("Prepare test data")
test_features = sentence2feature(test_word_lists)
# Build CRF model
logger.info("Build CRF model")
crf = CRFModel()
logger.info("Success!")
# Train model
logger.info("Begin training")
crf.train(train_features, train_label_lists)
logger.info("Finish training")
# Evaluate model
logger.info("Begin evaluating")
crf.evaluate(test_features, test_label_lists)
logger.info("Finish evaluating")
if __name__ == "__main__":
# Main function
main()
| [
"logging.basicConfig",
"logging.getLogger",
"sklearn.metrics.classification_report",
"tqdm.tqdm",
"sklearn_crfsuite.CRF"
] | [((242, 383), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (261, 383), False, 'import logging\n'), ((397, 422), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (414, 422), False, 'import logging\n'), ((527, 542), 'tqdm.tqdm', 'tqdm', (['sentences'], {}), '(sentences)\n', (531, 542), False, 'from tqdm import tqdm\n'), ((1523, 1573), 'sklearn_crfsuite.CRF', 'CRF', ([], {'algorithm': '"""l2sgd"""', 'c2': '(0.1)', 'max_iterations': '(100)'}), "(algorithm='l2sgd', c2=0.1, max_iterations=100)\n", (1526, 1573), False, 'from sklearn_crfsuite import CRF\n'), ((1922, 1963), 'sklearn.metrics.classification_report', 'classification_report', (['real_tag', 'pred_tag'], {}), '(real_tag, pred_tag)\n', (1943, 1963), False, 'from sklearn.metrics import classification_report\n')] |
from setuptools import setup
setup(name='geo',
version='0.1',
description='Useful geoprospection processing methods',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['geo'],
zip_safe=False)
| [
"setuptools.setup"
] | [((32, 219), 'setuptools.setup', 'setup', ([], {'name': '"""geo"""', 'version': '"""0.1"""', 'description': '"""Useful geoprospection processing methods"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'packages': "['geo']", 'zip_safe': '(False)'}), "(name='geo', version='0.1', description=\n 'Useful geoprospection processing methods', author='<NAME>',\n author_email='<EMAIL>', license='MIT', packages=['geo'], zip_safe=False)\n", (37, 219), False, 'from setuptools import setup\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = 'setup'
__author__ = 'zhouzhuan'
__mtime__ = '2017/6/29'
"""
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__),'README.rst')) as readme:
README = readme.read()
#allow setup to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__),os.pardir)))
setup(
name="django-polls",
version='0.1',
packages=['polls'],
include_package_data=True,
license='BSD License',
description='A simple Django app to conduct Web-based polls.',
long_description=README,
url='https://github.com/fanandactuaility/mysite',
author='zhouzhuan',
author_email='<EMAIL>',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developer',
'License :: OSI Approved :: BSD Licentse',
'Operation System :: OS Independment',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'
]
) | [
"os.path.abspath",
"os.path.dirname",
"setuptools.setup"
] | [((398, 1041), 'setuptools.setup', 'setup', ([], {'name': '"""django-polls"""', 'version': '"""0.1"""', 'packages': "['polls']", 'include_package_data': '(True)', 'license': '"""BSD License"""', 'description': '"""A simple Django app to conduct Web-based polls."""', 'long_description': 'README', 'url': '"""https://github.com/fanandactuaility/mysite"""', 'author': '"""zhouzhuan"""', 'author_email': '"""<EMAIL>"""', 'classifiers': "['Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developer',\n 'License :: OSI Approved :: BSD Licentse',\n 'Operation System :: OS Independment', 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']"}), "(name='django-polls', version='0.1', packages=['polls'],\n include_package_data=True, license='BSD License', description=\n 'A simple Django app to conduct Web-based polls.', long_description=\n README, url='https://github.com/fanandactuaility/mysite', author=\n 'zhouzhuan', author_email='<EMAIL>', classifiers=[\n 'Environment :: Web Environment', 'Framework :: Django',\n 'Intended Audience :: Developer',\n 'License :: OSI Approved :: BSD Licentse',\n 'Operation System :: OS Independment', 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'])\n", (403, 1041), False, 'from setuptools import setup\n'), ((196, 221), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (211, 221), False, 'import os\n'), ((356, 381), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'import os\n')] |
from setuptools import setup
setup(
name="timely-beliefs",
description="Data modelled as beliefs (at a certain time) about events (at a certain time).",
author="<NAME>",
author_email="<EMAIL>",
keywords=[
"time series",
"forecasting",
"analytics",
"visualization",
"uncertainty",
"lineage",
],
version="0.0.6",
install_requires=[
"pytz",
"pandas>=0.24",
"numpy",
"pyerf",
"SQLAlchemy",
"psycopg2-binary",
"isodate",
"openturns",
"properscoring",
"altair==3.0.0",
"selenium",
],
setup_requires=["pytest-runner"],
tests_require=["pytest"],
packages=[
"timely_beliefs",
"timely_beliefs.beliefs",
"timely_beliefs.sensors",
"timely_beliefs.sensors.func_store",
"timely_beliefs.sources",
"timely_beliefs.examples",
"timely_beliefs.visualization",
],
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Information Analysis",
],
long_description="""\
Model to represent data as beliefs about events, stored in the form of
a multi-index pandas DataFrame enriched with attributes to get out convenient representations of the data.
""",
)
| [
"setuptools.setup"
] | [((30, 1422), 'setuptools.setup', 'setup', ([], {'name': '"""timely-beliefs"""', 'description': '"""Data modelled as beliefs (at a certain time) about events (at a certain time)."""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'keywords': "['time series', 'forecasting', 'analytics', 'visualization', 'uncertainty',\n 'lineage']", 'version': '"""0.0.6"""', 'install_requires': "['pytz', 'pandas>=0.24', 'numpy', 'pyerf', 'SQLAlchemy', 'psycopg2-binary',\n 'isodate', 'openturns', 'properscoring', 'altair==3.0.0', 'selenium']", 'setup_requires': "['pytest-runner']", 'tests_require': "['pytest']", 'packages': "['timely_beliefs', 'timely_beliefs.beliefs', 'timely_beliefs.sensors',\n 'timely_beliefs.sensors.func_store', 'timely_beliefs.sources',\n 'timely_beliefs.examples', 'timely_beliefs.visualization']", 'include_package_data': '(True)', 'classifiers': "['Programming Language :: Python', 'Programming Language :: Python :: 3',\n 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Scientific/Engineering :: Information Analysis']", 'long_description': '""" Model to represent data as beliefs about events, stored in the form of\n a multi-index pandas DataFrame enriched with attributes to get out convenient representations of the data.\n """'}), '(name=\'timely-beliefs\', description=\n \'Data modelled as beliefs (at a certain time) about events (at a certain time).\'\n , author=\'<NAME>\', author_email=\'<EMAIL>\', keywords=[\'time series\',\n \'forecasting\', \'analytics\', \'visualization\', \'uncertainty\', \'lineage\'],\n version=\'0.0.6\', install_requires=[\'pytz\', \'pandas>=0.24\', \'numpy\',\n \'pyerf\', \'SQLAlchemy\', \'psycopg2-binary\', \'isodate\', \'openturns\',\n \'properscoring\', \'altair==3.0.0\', \'selenium\'], setup_requires=[\n \'pytest-runner\'], tests_require=[\'pytest\'], packages=[\'timely_beliefs\',\n \'timely_beliefs.beliefs\', \'timely_beliefs.sensors\',\n \'timely_beliefs.sensors.func_store\', \'timely_beliefs.sources\',\n \'timely_beliefs.examples\', \'timely_beliefs.visualization\'],\n include_package_data=True, classifiers=[\n \'Programming Language :: Python\', \'Programming Language :: Python :: 3\',\n \'Development Status :: 3 - Alpha\', \'Intended Audience :: Developers\',\n \'License :: OSI Approved :: MIT License\',\n \'Operating System :: OS Independent\',\n \'Topic :: Software Development :: Libraries :: Python Modules\',\n \'Topic :: Scientific/Engineering :: Information Analysis\'],\n long_description=\n """ Model to represent data as beliefs about events, stored in the form of\n a multi-index pandas DataFrame enriched with attributes to get out convenient representations of the data.\n """\n )\n', (35, 1422), False, 'from setuptools import setup\n')] |
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mahalanobis metric."""
import numpy as np
import datasets
_DESCRIPTION = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. <NAME> in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_CITATION = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={<NAME>, <NAME> <NAME> <NAME>{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_KWARGS_DESCRIPTION = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mahalanobis(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float", id="sequence"), id="X"),
}
),
)
def _compute(self, X, reference_distribution):
# convert to numpy arrays
X = np.array(X)
reference_distribution = np.array(reference_distribution)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("Expected `X` to be a 2D vector")
if len(reference_distribution.shape) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector")
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension"
)
# Get mahalanobis distance for each prediction
X_minus_mu = X - np.mean(reference_distribution)
cov = np.cov(reference_distribution.T)
try:
inv_covmat = np.linalg.inv(cov)
except np.linalg.LinAlgError:
inv_covmat = np.linalg.pinv(cov)
left_term = np.dot(X_minus_mu, inv_covmat)
mahal_dist = np.dot(left_term, X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| [
"numpy.mean",
"numpy.linalg.pinv",
"datasets.utils.file_utils.add_start_docstrings",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"datasets.Value",
"numpy.cov"
] | [((1945, 2030), 'datasets.utils.file_utils.add_start_docstrings', 'datasets.utils.file_utils.add_start_docstrings', (['_DESCRIPTION', '_KWARGS_DESCRIPTION'], {}), '(_DESCRIPTION,\n _KWARGS_DESCRIPTION)\n', (1991, 2030), False, 'import datasets\n'), ((2534, 2545), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2542, 2545), True, 'import numpy as np\n'), ((2579, 2611), 'numpy.array', 'np.array', (['reference_distribution'], {}), '(reference_distribution)\n', (2587, 2611), True, 'import numpy as np\n'), ((3216, 3248), 'numpy.cov', 'np.cov', (['reference_distribution.T'], {}), '(reference_distribution.T)\n', (3222, 3248), True, 'import numpy as np\n'), ((3409, 3439), 'numpy.dot', 'np.dot', (['X_minus_mu', 'inv_covmat'], {}), '(X_minus_mu, inv_covmat)\n', (3415, 3439), True, 'import numpy as np\n'), ((3170, 3201), 'numpy.mean', 'np.mean', (['reference_distribution'], {}), '(reference_distribution)\n', (3177, 3201), True, 'import numpy as np\n'), ((3287, 3305), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (3300, 3305), True, 'import numpy as np\n'), ((3369, 3388), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cov'], {}), '(cov)\n', (3383, 3388), True, 'import numpy as np\n'), ((3461, 3492), 'numpy.dot', 'np.dot', (['left_term', 'X_minus_mu.T'], {}), '(left_term, X_minus_mu.T)\n', (3467, 3492), True, 'import numpy as np\n'), ((2343, 2381), 'datasets.Value', 'datasets.Value', (['"""float"""'], {'id': '"""sequence"""'}), "('float', id='sequence')\n", (2357, 2381), False, 'import datasets\n')] |
from sys import argv
from context import TGB
if __name__ == '__main__':
print('Running communication smoketest.')
if len(argv) == 1:
print('please input local address for testing the communication as an argument\nEx. %s 111.111.11.11' % (
argv[0]))
quit()
if len(argv) == 2:
_testNodes = [argv[1]]
_testBC = TGB.Blockchain(_testNodes)
_testNetwork = TGB.Network(_testBC)
_testNetwork.smokeTestRxTxLocal()
| [
"context.TGB.Blockchain",
"context.TGB.Network"
] | [((369, 395), 'context.TGB.Blockchain', 'TGB.Blockchain', (['_testNodes'], {}), '(_testNodes)\n', (383, 395), False, 'from context import TGB\n'), ((419, 439), 'context.TGB.Network', 'TGB.Network', (['_testBC'], {}), '(_testBC)\n', (430, 439), False, 'from context import TGB\n')] |
#
# test template
#
# @ author becxer
# @ email <EMAIL>
#
from test_pytrain import test_Suite
class test_Template(test_Suite):
def __init__(self, logging = True):
test_Suite.__init__(self, logging)
def test_process(self):
global_value = self.get_global_value('some_key')
self.set_global_value('another_key', 'new_value')
self.tlog('logging somthing')
assert 1 == 1
| [
"test_pytrain.test_Suite.__init__"
] | [((178, 212), 'test_pytrain.test_Suite.__init__', 'test_Suite.__init__', (['self', 'logging'], {}), '(self, logging)\n', (197, 212), False, 'from test_pytrain import test_Suite\n')] |
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
# @method_decorator(cache_page(60 * 10), name='dispatch')
class HomePageView(TemplateView):
template_name = 'home/home.html'
# @method_decorator(cache_page(60 * 10), name='dispatch')
class CauseView(TemplateView):
template_name = 'home/cause.html'
def handler404(request, exception):
return render(request, 'error_404.html', status=404)
def handler500(request):
return render(request, 'error_500.html', status=500)
| [
"django.shortcuts.render"
] | [((497, 542), 'django.shortcuts.render', 'render', (['request', '"""error_404.html"""'], {'status': '(404)'}), "(request, 'error_404.html', status=404)\n", (503, 542), False, 'from django.shortcuts import render\n'), ((581, 626), 'django.shortcuts.render', 'render', (['request', '"""error_500.html"""'], {'status': '(500)'}), "(request, 'error_500.html', status=500)\n", (587, 626), False, 'from django.shortcuts import render\n')] |
import plotly.graph_objects as go
large_rockwell_template = dict(
layout=go.Layout(title_font=dict(family="Rockwell", size=24))
)
fig = go.Figure()
fig.update_layout(title='Figure Title', template=large_rockwell_template)
fig.show()
| [
"plotly.graph_objects.Figure"
] | [((142, 153), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (151, 153), True, 'import plotly.graph_objects as go\n')] |
from django.conf.urls import url
from ratelimitbackend import admin
from ratelimitbackend.views import login
from .forms import CustomAuthForm, TokenOnlyAuthForm
urlpatterns = [
url(r'^login/$', login,
{'template_name': 'admin/login.html'}, name='login'),
url(r'^custom_login/$', login,
{'template_name': 'custom_login.html',
'authentication_form': CustomAuthForm},
name='custom_login'),
url(r'^token_login/$', login,
{'template_name': 'token_only_login.html',
'authentication_form': TokenOnlyAuthForm},
name='token_only_login'),
url(r'^admin/', admin.site.urls),
]
| [
"django.conf.urls.url"
] | [((186, 261), 'django.conf.urls.url', 'url', (['"""^login/$"""', 'login', "{'template_name': 'admin/login.html'}"], {'name': '"""login"""'}), "('^login/$', login, {'template_name': 'admin/login.html'}, name='login')\n", (189, 261), False, 'from django.conf.urls import url\n'), ((276, 409), 'django.conf.urls.url', 'url', (['"""^custom_login/$"""', 'login', "{'template_name': 'custom_login.html', 'authentication_form': CustomAuthForm}"], {'name': '"""custom_login"""'}), "('^custom_login/$', login, {'template_name': 'custom_login.html',\n 'authentication_form': CustomAuthForm}, name='custom_login')\n", (279, 409), False, 'from django.conf.urls import url\n'), ((437, 580), 'django.conf.urls.url', 'url', (['"""^token_login/$"""', 'login', "{'template_name': 'token_only_login.html', 'authentication_form':\n TokenOnlyAuthForm}"], {'name': '"""token_only_login"""'}), "('^token_login/$', login, {'template_name': 'token_only_login.html',\n 'authentication_form': TokenOnlyAuthForm}, name='token_only_login')\n", (440, 580), False, 'from django.conf.urls import url\n'), ((608, 639), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (611, 639), False, 'from django.conf.urls import url\n')] |
#!/usr/bin/python
# This is the second program shown in Kunal chawla's prank project in udacity's Programming foundation with Python.
import os
def rename_files():
#(1) get fle names in an array from a directory
# give your directory path based on OS
file_list = os.listdir(r"/home/sanjeev/prank")
saved_path = os.getcwd()
print("Current working directory is "+saved_path )
os.chdir(r"/home/sanjeev/prank")
#(2) Rename each file by removing integer from it from the beginning of the file
for file_name in file_list:
print("Old File Name - "+file_name)
print("New File Name - "+file_name.translate(NONE, "0123456789"))
os.rename(file_name, file_name.translate(NONE, "0123456789") )
rename_files()
| [
"os.chdir",
"os.listdir",
"os.getcwd"
] | [((271, 304), 'os.listdir', 'os.listdir', (['"""/home/sanjeev/prank"""'], {}), "('/home/sanjeev/prank')\n", (281, 304), False, 'import os\n'), ((321, 332), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (330, 332), False, 'import os\n'), ((388, 419), 'os.chdir', 'os.chdir', (['"""/home/sanjeev/prank"""'], {}), "('/home/sanjeev/prank')\n", (396, 419), False, 'import os\n')] |
""" integrity checks for the wxyz repo
"""
import re
# pylint: disable=redefined-outer-name
import sys
import tempfile
from pathlib import Path
import pytest
from . import _paths as P
PYTEST_INI = """
[pytest]
junit_family = xunit2
"""
@pytest.fixture(scope="module")
def contributing_text():
"""the text of CONTRIBUTING.md"""
return P.CONTRIBUTING.read_text(encoding="utf-8")
@pytest.fixture(scope="module")
def readme_text():
"""the text of README.md"""
return P.README.read_text(encoding="utf-8")
@pytest.fixture(scope="module")
def postbuild():
"""the text of postBuild"""
return P.POSTBUILD.read_text(encoding="utf-8")
@pytest.fixture(scope="module")
def wxyz_notebook_cfg():
"""the notebook setup.cfg"""
pys = [pys for pys in P.PY_SETUP if pys.parent.name == "wxyz_notebooks"][0]
return (pys.parent / "setup.cfg").read_text(encoding="utf-8")
def test_contributing_locks(contributing_text):
"""do lockfiles mentioned exist?"""
found_lock = 0
for lock in P.LOCKS.glob("*"):
if str(lock.relative_to(P.ROOT).as_posix()) in contributing_text:
found_lock += 1
assert found_lock == 2
def test_binder_locks(postbuild):
"""is the binder lock right?"""
for lock in P.LOCKS.glob("conda.binder.*.lock"):
lock_path = str(lock.relative_to(P.ROOT).as_posix())
assert lock_path in postbuild
@pytest.mark.parametrize(
"pkg",
[setup_py.parent.name for setup_py in P.PY_VERSION],
)
def test_readme_py_pkgs(pkg, readme_text):
"""Are all of the python packages mentioned in the readme?"""
assert pkg in readme_text
@pytest.mark.parametrize(
"pkg_name,pkg_path",
[[setup_py.parent.name, setup_py.parent] for setup_py in P.PY_VERSION],
)
def test_manifest(pkg_name, pkg_path):
"""are manifest files proper?"""
manifest = pkg_path / "MANIFEST.in"
manifest_txt = manifest.read_text(encoding="utf-8")
assert re.findall(
r"include .*js/LICENSE.txt", manifest_txt
), f"{pkg_name} missing nested license in {manifest}"
assert re.findall(
r"global-exclude\s+.ipynb_checkpoints", manifest_txt
), f"{pkg_name} missing checkpoint exclude in {manifest}"
assert re.findall(
r"global-exclude\s+node_modules", manifest_txt
), f"{pkg_name} missing node_modules exclude in {manifest}"
@pytest.mark.parametrize("pkg_path", P.PY_SETUP)
def test_notebook_deps(wxyz_notebook_cfg, pkg_path):
"""does the notebook example package depend on all other packages?"""
pkg = pkg_path.parent.name
assert pkg in wxyz_notebook_cfg, f"add {pkg} to wxyz_notebook/setup.cfg!"
def check_integrity():
"""actually run the tests"""
args = ["-vv", "-o", f"junit_suite_name=integrity_{P.OS}_{P.PY_VER}", __file__]
with tempfile.TemporaryDirectory() as tmp:
ini = Path(tmp) / "pytest.ini"
ini.write_text(PYTEST_INI)
args += ["-c", str(ini)]
return pytest.main(args)
if __name__ == "__main__":
sys.exit(check_integrity())
| [
"tempfile.TemporaryDirectory",
"pathlib.Path",
"pytest.main",
"pytest.mark.parametrize",
"pytest.fixture",
"re.findall"
] | [((243, 273), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (257, 273), False, 'import pytest\n'), ((394, 424), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (408, 424), False, 'import pytest\n'), ((527, 557), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (541, 557), False, 'import pytest\n'), ((661, 691), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (675, 691), False, 'import pytest\n'), ((1398, 1486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pkg"""', '[setup_py.parent.name for setup_py in P.PY_VERSION]'], {}), "('pkg', [setup_py.parent.name for setup_py in P.\n PY_VERSION])\n", (1421, 1486), False, 'import pytest\n'), ((1635, 1755), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pkg_name,pkg_path"""', '[[setup_py.parent.name, setup_py.parent] for setup_py in P.PY_VERSION]'], {}), "('pkg_name,pkg_path', [[setup_py.parent.name,\n setup_py.parent] for setup_py in P.PY_VERSION])\n", (1658, 1755), False, 'import pytest\n'), ((2358, 2405), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pkg_path"""', 'P.PY_SETUP'], {}), "('pkg_path', P.PY_SETUP)\n", (2381, 2405), False, 'import pytest\n'), ((1947, 1999), 're.findall', 're.findall', (['"""include .*js/LICENSE.txt"""', 'manifest_txt'], {}), "('include .*js/LICENSE.txt', manifest_txt)\n", (1957, 1999), False, 'import re\n'), ((2078, 2142), 're.findall', 're.findall', (['"""global-exclude\\\\s+.ipynb_checkpoints"""', 'manifest_txt'], {}), "('global-exclude\\\\s+.ipynb_checkpoints', manifest_txt)\n", (2088, 2142), False, 'import re\n'), ((2224, 2282), 're.findall', 're.findall', (['"""global-exclude\\\\s+node_modules"""', 'manifest_txt'], {}), "('global-exclude\\\\s+node_modules', manifest_txt)\n", (2234, 2282), False, 'import re\n'), ((2794, 2823), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2821, 2823), False, 'import tempfile\n'), ((2956, 2973), 'pytest.main', 'pytest.main', (['args'], {}), '(args)\n', (2967, 2973), False, 'import pytest\n'), ((2846, 2855), 'pathlib.Path', 'Path', (['tmp'], {}), '(tmp)\n', (2850, 2855), False, 'from pathlib import Path\n')] |
import datetime
import hashlib
import logging
import os
import tarfile
from azure.storage.blob import BlockBlobService
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
from boto.s3.key import Key
import dj_database_url
import django # Provides django.setup()
from django.apps import apps as django_apps
import envoy
# ISO 8601 YYYY-MM-DDTHH:MM:SS
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S"
# Would be '[]' without the '--indent=4' argument to dumpdata
EMPTY_FIXTURE = '[\n]\n'
logging.basicConfig(
filename='backup.log',
level=logging.INFO,
format="%(asctime)s [%(levelname)s]: %(message)s",
datefmt=DATE_FORMAT,
)
logger = logging.getLogger(__name__)
def instrument(function):
def wrapper(*args, **kwargs):
start = datetime.datetime.now()
r = function(*args, **kwargs)
end = datetime.datetime.now() - start
milliseconds = end.microseconds / 1000
return (milliseconds, r)
return wrapper
def capture_command(function):
def wrapper(*args, **kwargs):
if args:
command = args[0]
elif 'command' in kwargs:
command = kwargs['command']
else:
logger.warning(
"What's the command?.\n\targs: %r\n\tkwargs: %r" % (args, kwargs))
command = '?'
# Being much too clever: Dynamically decorate the given function with
# the 'instrument' function so that we can capture timing data. Once
# the function is decorated, we'll call it with the original
# parameters.
milliseconds, r = instrument(function)(*args, **kwargs)
bytes = len(r.std_out)
logger.info("cmd=\"%s\" real=%dms bytes=%d" %
(command, milliseconds, bytes))
if hasattr(r, 'std_err'):
# Print each non-blank line separately
lines = [line for line in r.std_err.split('\n') if line]
map(logger.error, lines)
return r
return wrapper
def capture_function(function):
def wrapper(*args, **kwargs):
milliseconds, r = instrument(function)(*args, **kwargs)
logger.info("func=\"%s\" real=%dms" %
(function.__name__, milliseconds))
return r
return wrapper
# Keep a log of the calls through envoy
envoy.run = capture_command(envoy.run)
def get_database_name(env='DATABASE_URL'):
db_config = dj_database_url.config(env)
return db_config['NAME']
def get_installed_app_names():
django.setup()
apps = django_apps.get_app_configs()
# Labels default to the last component of the app name, but not guaranteed:
# - Default: 'django.contrib.auth' -> 'auth'
# - Changed: 'raven.contrib.django.raven_compat' -> 'raven_contrib_django'
labels = [appconfig.label for appconfig in apps]
return labels
def get_s3_credentials():
access = os.environ['S3_ACCESS_KEY']
secret = os.environ['S3_SECRET_KEY']
return (access, secret)
def get_s3_bucket_name():
return os.environ['S3_BUCKET_NAME']
def get_az_credentials():
account_name = os.environ['AZ_STORAGE_ACCOUNT_NAME']
account_key = os.environ['AZ_STORAGE_ACCOUNT_KEY']
return (account_name, account_key)
def get_az_container_name():
return os.environ['AZ_BLOB_CONTAINER_NAME']
def dump_postgres():
database = get_database_name()
r = envoy.run("pg_dump %s" % database)
filename = "%s.sql" % database
with open(filename, 'w') as f:
f.write(r.std_out)
return filename
def dump_django_fixtures():
filenames = []
for name in get_installed_app_names():
r = envoy.run('django-admin.py dumpdata %s --indent=4' % name)
if r.std_out != EMPTY_FIXTURE:
filename = "%s.json" % name
with open(filename, 'w') as f:
f.write(r.std_out)
filenames.append(filename)
else:
logger.warning("Skipping empty fixture for '%s'" % name)
return filenames
def sha1sum(file):
m = hashlib.sha1()
try:
logger.debug("Treating '%s' as an open file" % file)
m.update(file.read())
except AttributeError:
logger.debug("Now trying '%s' as a file path" % file)
with open(file, 'rb') as f:
m.update(f.read())
return m.hexdigest()
@capture_function
def update_necessary():
latest = 'latest.tar.gz'
# Shortcut if we have nothing to compare against (e.g. the first run)
if not os.path.exists(latest):
logger.debug("Could not access '%s', assuming first run" % latest)
return True
to_compare = ('auth.json', 'checkouts.json')
for name in to_compare:
proposed = sha1sum(name)
with tarfile.open(latest, 'r:gz') as tar:
for tarinfo in tar:
if tarinfo.name == name:
original = sha1sum(tar.extractfile(tarinfo))
logger.debug("Original: %s" % original)
logger.debug("Proposed: %s" % proposed)
if proposed != original:
logger.debug("Digests differ for '%s'" % name)
return True
return False
@capture_function
def package(filenames):
latest = 'latest.tar.gz'
logger.info("Packaging archive")
logger.debug("Contents: %s" % filenames)
date = datetime.datetime.utcnow().strftime(DATE_FORMAT)
filename = "%s.tar.gz" % date
with tarfile.open(filename, 'w:gz') as archive:
[archive.add(name) for name in filenames]
if os.path.lexists(latest):
os.unlink(latest)
os.symlink(filename, latest)
return filename
@capture_function
def encrypt(filename):
encrypted_filename = '%s.gpg' % filename
if os.path.isfile(encrypted_filename):
os.path.remove(encrypted_filename)
r = envoy.run("./encrypt.sh %s" % filename)
return encrypted_filename
@capture_function
def az_upload(filename):
if not filename.endswith('.gpg'):
logger.warning(
"Upload requested for '%s' which appears to be plaintext (not encrypted)" % filename)
account_name, account_key = get_az_credentials()
container_name = get_az_container_name()
local_path = os.path.abspath(os.path.dirname(__file__))
full_path_to_file = os.path.join(local_path, filename)
block_blob_service = BlockBlobService(account_name, account_key)
block_blob_service.create_blob_from_path(
container_name, filename, full_path_to_file)
logger.info("Uploaded '%s' to '%s:%s'" %
(filename, account_name, container_name))
return os.path.getsize(filename)
@capture_function
def s3_upload(filename):
if not filename.endswith('.gpg'):
logger.warning(
"Upload requested for '%s' which appears to be plaintext (not encrypted)" % filename)
access, secret = get_s3_credentials()
bucket_name = get_s3_bucket_name()
key_name = 'db/%s' % filename
connection = S3Connection(access, secret)
bucket = Bucket(connection, bucket_name)
key = Key(bucket)
key.key = key_name
key.set_contents_from_filename(filename)
logger.info("Uploaded '%s' to '%s:%s'" % (filename, bucket_name, key_name))
return os.path.getsize(filename)
filenames = [dump_postgres(), ]
filenames.extend(dump_django_fixtures())
if update_necessary():
archive = package(filenames)
secured = encrypt(archive)
filesize = az_upload(secured)
logger.info(
"---- Uploading complete ---- bytes=%d --------------------" % filesize)
else:
logger.info("---- Digests match, ceasing activity ---------------------")
map(os.remove, filenames)
| [
"logging.getLogger",
"tarfile.open",
"os.path.remove",
"boto.s3.connection.S3Connection",
"hashlib.sha1",
"boto.s3.bucket.Bucket",
"os.path.exists",
"dj_database_url.config",
"os.path.lexists",
"os.unlink",
"os.path.getsize",
"django.apps.apps.get_app_configs",
"os.path.isfile",
"boto.s3.k... | [((505, 644), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""backup.log"""', 'level': 'logging.INFO', 'format': '"""%(asctime)s [%(levelname)s]: %(message)s"""', 'datefmt': 'DATE_FORMAT'}), "(filename='backup.log', level=logging.INFO, format=\n '%(asctime)s [%(levelname)s]: %(message)s', datefmt=DATE_FORMAT)\n", (524, 644), False, 'import logging\n'), ((668, 695), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (685, 695), False, 'import logging\n'), ((2406, 2433), 'dj_database_url.config', 'dj_database_url.config', (['env'], {}), '(env)\n', (2428, 2433), False, 'import dj_database_url\n'), ((2500, 2514), 'django.setup', 'django.setup', ([], {}), '()\n', (2512, 2514), False, 'import django\n'), ((2526, 2555), 'django.apps.apps.get_app_configs', 'django_apps.get_app_configs', ([], {}), '()\n', (2553, 2555), True, 'from django.apps import apps as django_apps\n'), ((3365, 3399), 'envoy.run', 'envoy.run', (["('pg_dump %s' % database)"], {}), "('pg_dump %s' % database)\n", (3374, 3399), False, 'import envoy\n'), ((4009, 4023), 'hashlib.sha1', 'hashlib.sha1', ([], {}), '()\n', (4021, 4023), False, 'import hashlib\n'), ((5527, 5550), 'os.path.lexists', 'os.path.lexists', (['latest'], {}), '(latest)\n', (5542, 5550), False, 'import os\n'), ((5582, 5610), 'os.symlink', 'os.symlink', (['filename', 'latest'], {}), '(filename, latest)\n', (5592, 5610), False, 'import os\n'), ((5726, 5760), 'os.path.isfile', 'os.path.isfile', (['encrypted_filename'], {}), '(encrypted_filename)\n', (5740, 5760), False, 'import os\n'), ((5813, 5852), 'envoy.run', 'envoy.run', (["('./encrypt.sh %s' % filename)"], {}), "('./encrypt.sh %s' % filename)\n", (5822, 5852), False, 'import envoy\n'), ((6270, 6304), 'os.path.join', 'os.path.join', (['local_path', 'filename'], {}), '(local_path, filename)\n', (6282, 6304), False, 'import os\n'), ((6330, 6373), 'azure.storage.blob.BlockBlobService', 'BlockBlobService', (['account_name', 'account_key'], {}), '(account_name, account_key)\n', (6346, 6373), False, 'from azure.storage.blob import BlockBlobService\n'), ((6587, 6612), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (6602, 6612), False, 'import os\n'), ((6950, 6978), 'boto.s3.connection.S3Connection', 'S3Connection', (['access', 'secret'], {}), '(access, secret)\n', (6962, 6978), False, 'from boto.s3.connection import S3Connection\n'), ((6992, 7023), 'boto.s3.bucket.Bucket', 'Bucket', (['connection', 'bucket_name'], {}), '(connection, bucket_name)\n', (6998, 7023), False, 'from boto.s3.bucket import Bucket\n'), ((7034, 7045), 'boto.s3.key.Key', 'Key', (['bucket'], {}), '(bucket)\n', (7037, 7045), False, 'from boto.s3.key import Key\n'), ((7205, 7230), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (7220, 7230), False, 'import os\n'), ((774, 797), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (795, 797), False, 'import datetime\n'), ((3621, 3679), 'envoy.run', 'envoy.run', (["('django-admin.py dumpdata %s --indent=4' % name)"], {}), "('django-admin.py dumpdata %s --indent=4' % name)\n", (3630, 3679), False, 'import envoy\n'), ((4463, 4485), 'os.path.exists', 'os.path.exists', (['latest'], {}), '(latest)\n', (4477, 4485), False, 'import os\n'), ((5427, 5457), 'tarfile.open', 'tarfile.open', (['filename', '"""w:gz"""'], {}), "(filename, 'w:gz')\n", (5439, 5457), False, 'import tarfile\n'), ((5560, 5577), 'os.unlink', 'os.unlink', (['latest'], {}), '(latest)\n', (5569, 5577), False, 'import os\n'), ((5770, 5804), 'os.path.remove', 'os.path.remove', (['encrypted_filename'], {}), '(encrypted_filename)\n', (5784, 5804), False, 'import os\n'), ((6219, 6244), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6234, 6244), False, 'import os\n'), ((850, 873), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (871, 873), False, 'import datetime\n'), ((4705, 4733), 'tarfile.open', 'tarfile.open', (['latest', '"""r:gz"""'], {}), "(latest, 'r:gz')\n", (4717, 4733), False, 'import tarfile\n'), ((5335, 5361), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5359, 5361), False, 'import datetime\n')] |
"""
Script to control mouse cursor with gamepad stick.
Specifically: right stick x axis controls mouse x position.
Requires FreePIE: https://github.com/AndersMalmgren/FreePIE
2020-06-11 JAO
"""
if starting:
"""This block only runs once."""
import time
joy_id = 0 # gamepad device number
x_axis_mult = 0.25 # pos mode movement multiplier
def axis_to_mouse_pos(axis):
"""Position Mode. Stick deflection sets relative mouse cursor position."""
return filters.delta(axis) * x_axis_mult
frame_start = time.time() # setup frame limiter
mouse.deltaX = axis_to_mouse_pos(axis=joystick[joy_id].xRotation)
time.sleep(max(1./60 - (time.time() - frame_start), 0)) # frame limiter
| [
"time.time"
] | [((523, 534), 'time.time', 'time.time', ([], {}), '()\n', (532, 534), False, 'import time\n'), ((647, 658), 'time.time', 'time.time', ([], {}), '()\n', (656, 658), False, 'import time\n')] |
import json as _json
import os as _os
import retry as _retry
from flytekit.common.tasks import sdk_runnable as _sdk_runnable
from flytekit.engines import common as _common_engine
SM_RESOURCE_CONFIG_FILE = "/opt/ml/input/config/resourceconfig.json"
SM_ENV_VAR_CURRENT_HOST = "SM_CURRENT_HOST"
SM_ENV_VAR_HOSTS = "SM_HOSTS"
SM_ENV_VAR_NETWORK_INTERFACE_NAME = "SM_NETWORK_INTERFACE_NAME"
# SageMaker suggests "Hostname information might not be immediately available to the processing container.
# We recommend adding a retry policy on hostname resolution operations as nodes become available in the cluster."
# https://docs.aws.amazon.com/sagemaker/latest/dg/build-your-own-processing-container.html#byoc-config
@_retry.retry(exceptions=KeyError, delay=1, tries=10, backoff=1)
def get_sagemaker_distributed_training_context_from_env() -> dict:
distributed_training_context = {}
if (
not _os.environ.get(SM_ENV_VAR_CURRENT_HOST)
or not _os.environ.get(SM_ENV_VAR_HOSTS)
or not _os.environ.get(SM_ENV_VAR_NETWORK_INTERFACE_NAME)
):
raise KeyError
distributed_training_context[DistributedTrainingContextKey.CURRENT_HOST] = _os.environ.get(SM_ENV_VAR_CURRENT_HOST)
distributed_training_context[DistributedTrainingContextKey.HOSTS] = _json.loads(_os.environ.get(SM_ENV_VAR_HOSTS))
distributed_training_context[DistributedTrainingContextKey.NETWORK_INTERFACE_NAME] = _os.environ.get(
SM_ENV_VAR_NETWORK_INTERFACE_NAME
)
return distributed_training_context
@_retry.retry(exceptions=FileNotFoundError, delay=1, tries=10, backoff=1)
def get_sagemaker_distributed_training_context_from_file() -> dict:
with open(SM_RESOURCE_CONFIG_FILE, "r") as rc_file:
return _json.load(rc_file)
# The default output-persisting predicate.
# With this predicate, only the copy running on the first host in the list of hosts would persist its output
class DefaultOutputPersistPredicate(object):
def __call__(self, distributed_training_context):
return (
distributed_training_context[DistributedTrainingContextKey.CURRENT_HOST]
== distributed_training_context[DistributedTrainingContextKey.HOSTS][0]
)
class DistributedTrainingContextKey(object):
CURRENT_HOST = "current_host"
HOSTS = "hosts"
NETWORK_INTERFACE_NAME = "network_interface_name"
class DistributedTrainingEngineContext(_common_engine.EngineContext):
def __init__(
self,
execution_date,
tmp_dir,
stats,
execution_id,
logging,
raw_output_data_prefix=None,
distributed_training_context=None,
):
super().__init__(
execution_date=execution_date,
tmp_dir=tmp_dir,
stats=stats,
execution_id=execution_id,
logging=logging,
raw_output_data_prefix=raw_output_data_prefix,
)
self._distributed_training_context = distributed_training_context
@property
def distributed_training_context(self) -> dict:
return self._distributed_training_context
class DistributedTrainingExecutionParam(_sdk_runnable.ExecutionParameters):
def __init__(self, execution_date, tmp_dir, stats, execution_id, logging, distributed_training_context):
super().__init__(
execution_date=execution_date, tmp_dir=tmp_dir, stats=stats, execution_id=execution_id, logging=logging
)
self._distributed_training_context = distributed_training_context
@property
def distributed_training_context(self):
"""
This contains the resource information for distributed training. Currently this information is only available
for SageMaker training jobs.
:rtype: dict
"""
return self._distributed_training_context
| [
"json.load",
"retry.retry",
"os.environ.get"
] | [((716, 779), 'retry.retry', '_retry.retry', ([], {'exceptions': 'KeyError', 'delay': '(1)', 'tries': '(10)', 'backoff': '(1)'}), '(exceptions=KeyError, delay=1, tries=10, backoff=1)\n', (728, 779), True, 'import retry as _retry\n'), ((1530, 1602), 'retry.retry', '_retry.retry', ([], {'exceptions': 'FileNotFoundError', 'delay': '(1)', 'tries': '(10)', 'backoff': '(1)'}), '(exceptions=FileNotFoundError, delay=1, tries=10, backoff=1)\n', (1542, 1602), True, 'import retry as _retry\n'), ((1172, 1212), 'os.environ.get', '_os.environ.get', (['SM_ENV_VAR_CURRENT_HOST'], {}), '(SM_ENV_VAR_CURRENT_HOST)\n', (1187, 1212), True, 'import os as _os\n'), ((1421, 1471), 'os.environ.get', '_os.environ.get', (['SM_ENV_VAR_NETWORK_INTERFACE_NAME'], {}), '(SM_ENV_VAR_NETWORK_INTERFACE_NAME)\n', (1436, 1471), True, 'import os as _os\n'), ((1297, 1330), 'os.environ.get', '_os.environ.get', (['SM_ENV_VAR_HOSTS'], {}), '(SM_ENV_VAR_HOSTS)\n', (1312, 1330), True, 'import os as _os\n'), ((1742, 1761), 'json.load', '_json.load', (['rc_file'], {}), '(rc_file)\n', (1752, 1761), True, 'import json as _json\n'), ((906, 946), 'os.environ.get', '_os.environ.get', (['SM_ENV_VAR_CURRENT_HOST'], {}), '(SM_ENV_VAR_CURRENT_HOST)\n', (921, 946), True, 'import os as _os\n'), ((962, 995), 'os.environ.get', '_os.environ.get', (['SM_ENV_VAR_HOSTS'], {}), '(SM_ENV_VAR_HOSTS)\n', (977, 995), True, 'import os as _os\n'), ((1011, 1061), 'os.environ.get', '_os.environ.get', (['SM_ENV_VAR_NETWORK_INTERFACE_NAME'], {}), '(SM_ENV_VAR_NETWORK_INTERFACE_NAME)\n', (1026, 1061), True, 'import os as _os\n')] |
#
# betatest setup script
#
# Copyright (c) 2018 Beta Five Ltd
#
# SPDX-License-Identifier: Apache-2.0
#
import re
import runpy
import setuptools
import sys
# Check minimum python version
if sys.version_info < (3,6):
print('ERROR: betatest requires Python 3.6+')
sys.exit(1)
# Pick up the version number from the source code
__version__ = runpy.run_path('betatest/version.py')['__version__']
# Pick up the long description from our readme file
long_description = open('README.md', 'r').read()
# Use the first listed maintainer as our author
first_maintainer = open('MAINTAINERS', 'r').readline().strip()
m = re.match('(.*) <(.*)>$', first_maintainer)
author_name = m.group(1)
author_email = m.group(2)
setuptools.setup(
name = 'betatest',
version = __version__,
author = author_name,
author_email = author_email,
description = 'Testing helpers for Python 3.6+.',
long_description = long_description,
long_description_content_type = 'text/markdown',
url = 'https://gitlab.com/b5/betatest',
packages = setuptools.find_packages(),
classifiers = (
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Testing'
),
python_requires = '>=3.6',
license = 'Apache License 2.0',
platforms = ('Any'),
include_package_data=True,
)
| [
"runpy.run_path",
"setuptools.find_packages",
"re.match",
"sys.exit"
] | [((621, 663), 're.match', 're.match', (['"""(.*) <(.*)>$"""', 'first_maintainer'], {}), "('(.*) <(.*)>$', first_maintainer)\n", (629, 663), False, 'import re\n'), ((273, 284), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (281, 284), False, 'import sys\n'), ((350, 387), 'runpy.run_path', 'runpy.run_path', (['"""betatest/version.py"""'], {}), "('betatest/version.py')\n", (364, 387), False, 'import runpy\n'), ((1050, 1076), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1074, 1076), False, 'import setuptools\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
These are the set of attributes that can be computed for each run. The
configuration specifies which subset of these should be stored, how
and where.
The attributes provide a fairly expressive language to collect
information recursively. Each attribute can combine multiple other
attributes' values to generate new values
The default module that is instrumented is statsmodels.formula.api
and the storage is local.
"""
import os, sys
import hashlib
from datetime import datetime
from json import JSONEncoder
from .helpers import *
import pickle
attribute_overlay = {
'model': {
'parameters': {
'datashape': {
'params': {
'dataset': 'model.parameters.data'
},
'compute': lambda run, args: args['dataset'].shape
},
'datacolumns': {
'params': {
'dataset': 'model.parameters.data'
},
'compute': lambda run, args: list(args['dataset'].columns)
},
},
},
'attributes': {
'generic': {
'timestamp': {
'compute': lambda run, args : datetime.now().strftime("%Y-%b-%d-%H:%M:%S")
},
},
'output': {
'relative-path': {
'params': [
'model-output',
'spec.experiment.scope',
'spec.experiment.run',
'spec.experiment.version',
'model.library.function',
'uuid',
'attributes.generic.timestamp'
],
'compute': lambda run, args: os.path.join(args)
},
'default-signature': {
'params': {
'uuid': 'uuid',
'experiment': {
'scope': 'spec.experiment.scope',
'run': 'spec.experiment.run',
'version': 'spec.experiment.version'
},
'data': {
'name': 'dataset',
'shape': 'model.parameters.datashape',
'columns': 'model.parameters.datacolumns'
},
'model': {
'module': 'model.library.module',
'function': 'model.library.function',
'formula': 'model.parameters.formula'
}
},
'compute': lambda run, args: json.dumps(args, indent=4)
},
'full-pickle': {
'params': {
'result': 'model.result'
},
'compute': lambda run, args: pickle.dumps(args['result'].fit())
},
'summary-pickle': {
'params': {
'result': 'model.result'
},
'compute': lambda run, args: pickle.dumps(args['result'].fit().summary())
},
},
'storage': {
'local': {
'params': {
'output': 'spec.output',
},
'compute': lambda run, args: local_storage(run, args)
}
}
}
}
| [
"datetime.datetime.now",
"os.path.join"
] | [((1770, 1788), 'os.path.join', 'os.path.join', (['args'], {}), '(args)\n', (1782, 1788), False, 'import os, sys\n'), ((1255, 1269), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1267, 1269), False, 'from datetime import datetime\n')] |
from django.contrib import admin
from cliente.models import Cliente
admin.site.register(Cliente)
| [
"django.contrib.admin.site.register"
] | [((69, 97), 'django.contrib.admin.site.register', 'admin.site.register', (['Cliente'], {}), '(Cliente)\n', (88, 97), False, 'from django.contrib import admin\n')] |
"""
Author: <NAME>
Date last modified: 01-03-2019
"""
from unittest import TestCase
from graphy.utils import dict as my_dict
class TestDict(TestCase):
def test_calc_percentage(self):
""" Tests calc_percentage function. """
test_dict = {'0': 1, '1': 1, '2': 2}
right_result_dict = {'0': 0.25, '1': 0.25, '2': 0.5}
wrong_result_dict = {'0': 0.3, '1': 0.1, '2': 0.6}
self.assertIsInstance(my_dict.calc_percentage(test_dict), dict)
self.assertEqual(my_dict.calc_percentage(test_dict), right_result_dict)
self.assertNotEqual(my_dict.calc_percentage(test_dict), wrong_result_dict)
def test_filter(self):
""" Tests filter function. """
test_dict = {'0': 1, '1': 1, '2': {'0': 3}}
right_filtered_dict = {'0': 1, '2': 3}
wrong_filtered_dict = {'0': 1, '1': 1}
self.assertIsInstance(my_dict.filter(test_dict, '0'), dict)
self.assertEqual(my_dict.filter(test_dict, '0'), right_filtered_dict)
self.assertNotEqual(my_dict.filter(test_dict, '0'), wrong_filtered_dict)
def test_sort(self):
""" Tests sort function. """
test_dict = {'0': 1, '1': 1, '2': 2}
right_sorted_list = {'2': 2, '0': 1, '1': 1}
wrong_sorted_list = {'1': 1, '2': 2, '3': 1}
self.assertIsInstance(my_dict.sort(test_dict), dict)
self.assertEqual(my_dict.sort(test_dict), right_sorted_list)
self.assertNotEqual(my_dict.sort(test_dict), wrong_sorted_list)
def test_update(self):
""" Test update function. """
test_dict = {'0': 1, '1': 1, '2': 2}
right_updated_dict = {'0': 1, '1': 1, '2': 20}
wrong_updated_dict = {'0': 1, '1': 1, '2': 2}
def update_func(value):
return value * 10
my_dict.update(test_dict, '2', update_func)
self.assertEqual(test_dict, right_updated_dict)
self.assertNotEqual(test_dict, wrong_updated_dict)
| [
"graphy.utils.dict.update",
"graphy.utils.dict.sort",
"graphy.utils.dict.calc_percentage",
"graphy.utils.dict.filter"
] | [((1805, 1848), 'graphy.utils.dict.update', 'my_dict.update', (['test_dict', '"""2"""', 'update_func'], {}), "(test_dict, '2', update_func)\n", (1819, 1848), True, 'from graphy.utils import dict as my_dict\n'), ((444, 478), 'graphy.utils.dict.calc_percentage', 'my_dict.calc_percentage', (['test_dict'], {}), '(test_dict)\n', (467, 478), True, 'from graphy.utils import dict as my_dict\n'), ((512, 546), 'graphy.utils.dict.calc_percentage', 'my_dict.calc_percentage', (['test_dict'], {}), '(test_dict)\n', (535, 546), True, 'from graphy.utils import dict as my_dict\n'), ((596, 630), 'graphy.utils.dict.calc_percentage', 'my_dict.calc_percentage', (['test_dict'], {}), '(test_dict)\n', (619, 630), True, 'from graphy.utils import dict as my_dict\n'), ((895, 925), 'graphy.utils.dict.filter', 'my_dict.filter', (['test_dict', '"""0"""'], {}), "(test_dict, '0')\n", (909, 925), True, 'from graphy.utils import dict as my_dict\n'), ((959, 989), 'graphy.utils.dict.filter', 'my_dict.filter', (['test_dict', '"""0"""'], {}), "(test_dict, '0')\n", (973, 989), True, 'from graphy.utils import dict as my_dict\n'), ((1041, 1071), 'graphy.utils.dict.filter', 'my_dict.filter', (['test_dict', '"""0"""'], {}), "(test_dict, '0')\n", (1055, 1071), True, 'from graphy.utils import dict as my_dict\n'), ((1339, 1362), 'graphy.utils.dict.sort', 'my_dict.sort', (['test_dict'], {}), '(test_dict)\n', (1351, 1362), True, 'from graphy.utils import dict as my_dict\n'), ((1396, 1419), 'graphy.utils.dict.sort', 'my_dict.sort', (['test_dict'], {}), '(test_dict)\n', (1408, 1419), True, 'from graphy.utils import dict as my_dict\n'), ((1469, 1492), 'graphy.utils.dict.sort', 'my_dict.sort', (['test_dict'], {}), '(test_dict)\n', (1481, 1492), True, 'from graphy.utils import dict as my_dict\n')] |
from django.urls import path
from xcx_interface.controller.menu.menu_msg import getMenuList
urlpatterns = [
# 获取菜单列表
path('getMenuList', getMenuList, name='getMenuList')
]
| [
"django.urls.path"
] | [((126, 178), 'django.urls.path', 'path', (['"""getMenuList"""', 'getMenuList'], {'name': '"""getMenuList"""'}), "('getMenuList', getMenuList, name='getMenuList')\n", (130, 178), False, 'from django.urls import path\n')] |
#!/usr/bin/env python
# Copyright (C) 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:n
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This tool has a couple of helpful macros to process Wasm files from the wasm.json.
from generateWasm import *
import optparse
import sys
import re
parser = optparse.OptionParser(usage="usage: %prog <wasm.json> <WasmOps.h>")
(options, args) = parser.parse_args(sys.argv[0:])
if len(args) != 3:
parser.error(parser.usage)
wasm = Wasm(args[0], args[1])
opcodes = wasm.opcodes
wasmB3IRGeneratorHFile = open(args[2], "w")
opcodeRegex = re.compile('([a-zA-Z0-9]+)')
argumentRegex = re.compile('(\@[0-9]+)')
decimalRegex = re.compile('([-]?[0-9]+)')
whitespaceRegex = re.compile('\s+')
commaRegex = re.compile('(,)')
oparenRegex = re.compile('(\()')
cparenRegex = re.compile('(\))')
class Source:
def __init__(self, contents, offset=0):
self.contents = contents
self.offset = offset
def read(regex, source):
match = regex.match(source.contents, source.offset)
if not match:
return None
source.offset = match.end()
return match.group()
def lex(source):
result = []
while source.offset != len(source.contents):
read(whitespaceRegex, source)
opcode = read(opcodeRegex, source)
if opcode:
result.append(opcode)
continue
argument = read(argumentRegex, source)
if argument:
result.append(argument)
continue
number = read(decimalRegex, source)
if number:
result.append(int(number))
continue
oparen = read(oparenRegex, source)
if oparen:
result.append(oparen)
continue
cparen = read(cparenRegex, source)
if cparen:
result.append(cparen)
continue
comma = read(commaRegex, source)
if comma:
# Skip commas
continue
raise Exception("Lexing Error: could not lex token from: " + source.contents + " at offset: " + str(source.offset) + " (" + source.contents[source.offset:] + "). With tokens: [" + ", ".join(result) + "]")
return result
class CodeGenerator:
def __init__(self, tokens):
self.tokens = tokens
self.index = 0
self.code = []
def advance(self):
self.index += 1
def token(self):
return self.tokens[self.index]
def parseError(self, string):
raise Exception("Parse error " + string)
def consume(self, string):
if self.token() != string:
self.parseError("Expected " + string + " but got " + self.token())
self.advance()
def generateParameters(self):
self.advance()
params = []
tokens = self.tokens
while self.index < len(tokens):
if self.token() == ")":
self.advance()
return params
params.append(self.generateOpcode())
self.parseError("Parsing arguments fell off end")
def generateOpcode(self):
result = None
if self.token() == "i32" or self.token() == "i64":
type = "Int32"
if self.token() == "i64":
type = "Int64"
self.advance()
self.consume("(")
self.code.append(generateConstCode(self.index, self.token(), type))
result = temp(self.index)
self.advance()
self.consume(")")
elif argumentRegex.match(self.token()):
result = "arg" + self.token()[1:]
self.advance()
else:
op = self.token()
index = self.index
self.advance()
params = self.generateParameters()
self.code.append(generateB3OpCode(index, op, params))
result = temp(index)
return result
def generate(self, wasmOp):
if len(self.tokens) == 1:
params = ["arg" + str(param) for param in range(len(wasmOp["parameter"]))]
return " result = m_currentBlock->appendNew<Value>(m_proc, B3::" + self.token() + ", origin(), " + ", ".join(params) + ")"
result = self.generateOpcode()
self.code.append("result = " + result)
return " " + " \n".join(self.code)
def temp(index):
return "temp" + str(index)
def generateB3OpCode(index, op, params):
return "Value* " + temp(index) + " = m_currentBlock->appendNew<Value>(m_proc, B3::" + op + ", origin(), " + ", ".join(params) + ");"
def generateConstCode(index, value, type):
return "Value* " + temp(index) + " = constant(" + type + ", " + value + ");"
def generateB3Code(wasmOp, source):
tokens = lex(Source(source))
parser = CodeGenerator(tokens)
return parser.generate(wasmOp)
def generateSimpleCode(op):
opcode = op["opcode"]
b3op = opcode["b3op"]
args = ["ExpressionType arg" + str(param) for param in range(len(opcode["parameter"]))]
args.append("ExpressionType& result")
return """
template<> auto B3IRGenerator::addOp<OpType::""" + wasm.toCpp(op["name"]) + ">(" + ", ".join(args) + """) -> PartialResult
{
""" + generateB3Code(opcode, b3op) + """;
return { };
}
"""
definitions = [generateSimpleCode(op) for op in wasm.opcodeIterator(lambda op: isSimple(op) and (isBinary(op) or isUnary(op)))]
contents = wasm.header + """
#pragma once
#if ENABLE(WEBASSEMBLY)
namespace JSC { namespace Wasm {
""" + "".join(definitions) + """
} } // namespace JSC::Wasm
#endif // ENABLE(WEBASSEMBLY)
"""
wasmB3IRGeneratorHFile.write(contents)
wasmB3IRGeneratorHFile.close()
| [
"optparse.OptionParser",
"re.compile"
] | [((1504, 1571), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'usage': '"""usage: %prog <wasm.json> <WasmOps.h>"""'}), "(usage='usage: %prog <wasm.json> <WasmOps.h>')\n", (1525, 1571), False, 'import optparse\n'), ((1785, 1813), 're.compile', 're.compile', (['"""([a-zA-Z0-9]+)"""'], {}), "('([a-zA-Z0-9]+)')\n", (1795, 1813), False, 'import re\n'), ((1830, 1855), 're.compile', 're.compile', (['"""(\\\\@[0-9]+)"""'], {}), "('(\\\\@[0-9]+)')\n", (1840, 1855), False, 'import re\n'), ((1870, 1896), 're.compile', 're.compile', (['"""([-]?[0-9]+)"""'], {}), "('([-]?[0-9]+)')\n", (1880, 1896), False, 'import re\n'), ((1915, 1933), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (1925, 1933), False, 'import re\n'), ((1946, 1963), 're.compile', 're.compile', (['"""(,)"""'], {}), "('(,)')\n", (1956, 1963), False, 'import re\n'), ((1978, 1997), 're.compile', 're.compile', (['"""(\\\\()"""'], {}), "('(\\\\()')\n", (1988, 1997), False, 'import re\n'), ((2011, 2030), 're.compile', 're.compile', (['"""(\\\\))"""'], {}), "('(\\\\))')\n", (2021, 2030), False, 'import re\n')] |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image, CameraInfo
import os.path
import cv2
from cv_bridge import CvBridge, CvBridgeError
import time
IMAGE_MESSAGE_TOPIC = 'image_color'
CAMERA_MESSAGE_TOPIC = 'cam_info'
rospy.init_node('video_2_camera_stream')
device_path = rospy.get_param('~dev', '/dev/video0')
left_topic_name = rospy.get_param('~left_topic', 'left')
right_topic_name = rospy.get_param('~right_topic', 'right')
queue_size = 10
left_img_pub = rospy.Publisher(left_topic_name + '/' + IMAGE_MESSAGE_TOPIC, Image, queue_size=queue_size)
left_cam_pub = rospy.Publisher(left_topic_name + '/' + CAMERA_MESSAGE_TOPIC, CameraInfo, queue_size=queue_size)
right_img_pub = rospy.Publisher(right_topic_name + '/' + IMAGE_MESSAGE_TOPIC, Image, queue_size=queue_size)
right_cam_pub = rospy.Publisher(right_topic_name + '/' + CAMERA_MESSAGE_TOPIC, CameraInfo, queue_size=queue_size)
rate = rospy.Rate(30)
capture = cv2.VideoCapture(device_path)
if not capture.isOpened():
rospy.logerr('Failed to open device')
exit(1)
req_width = 320
req_height = 240
capture.set(cv2.CAP_PROP_FRAME_WIDTH, req_width * 2);
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, req_height);
fr_width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
fr_height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
fr_width_2 = int(fr_width/2)
print(fr_width, fr_height)
bridge = CvBridge()
cam_info = CameraInfo()
cam_info.height = fr_height
cam_info.width = fr_width_2
def main():
while not rospy.is_shutdown():
meta, frame = capture.read()
frame_left = frame[:, :fr_width_2, :]
frame_right = frame[:, fr_width_2:]
# frame_gaus = cv2.GaussianBlur(frame, (3, 3), 0)
# frame_gray = cv2.cvtColor(frame_gaus, cv2.COLOR_BGR2GRAY)
# I want to publish the Canny Edge Image and the original Image
cam_info.header.stamp = rospy.Time.from_sec(time.time())
left_cam_pub.publish(cam_info)
right_cam_pub.publish(cam_info)
left_img_pub.publish(bridge.cv2_to_imgmsg(frame_left, "bgr8"))
right_img_pub.publish(bridge.cv2_to_imgmsg(frame_right, "bgr8"))
# cv2.imshow('l', frame_left)
# cv2.imshow('r', frame_right)
# cv2.waitKey(1)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
rospy.loginfo('Exception caught') | [
"rospy.logerr",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_param",
"cv_bridge.CvBridge",
"sensor_msgs.msg.CameraInfo",
"rospy.Rate",
"cv2.VideoCapture",
"rospy.Publisher",
"time.time",
"rospy.loginfo"
] | [((240, 280), 'rospy.init_node', 'rospy.init_node', (['"""video_2_camera_stream"""'], {}), "('video_2_camera_stream')\n", (255, 280), False, 'import rospy\n'), ((296, 334), 'rospy.get_param', 'rospy.get_param', (['"""~dev"""', '"""/dev/video0"""'], {}), "('~dev', '/dev/video0')\n", (311, 334), False, 'import rospy\n'), ((353, 391), 'rospy.get_param', 'rospy.get_param', (['"""~left_topic"""', '"""left"""'], {}), "('~left_topic', 'left')\n", (368, 391), False, 'import rospy\n'), ((411, 451), 'rospy.get_param', 'rospy.get_param', (['"""~right_topic"""', '"""right"""'], {}), "('~right_topic', 'right')\n", (426, 451), False, 'import rospy\n'), ((485, 579), 'rospy.Publisher', 'rospy.Publisher', (["(left_topic_name + '/' + IMAGE_MESSAGE_TOPIC)", 'Image'], {'queue_size': 'queue_size'}), "(left_topic_name + '/' + IMAGE_MESSAGE_TOPIC, Image,\n queue_size=queue_size)\n", (500, 579), False, 'import rospy\n'), ((591, 691), 'rospy.Publisher', 'rospy.Publisher', (["(left_topic_name + '/' + CAMERA_MESSAGE_TOPIC)", 'CameraInfo'], {'queue_size': 'queue_size'}), "(left_topic_name + '/' + CAMERA_MESSAGE_TOPIC, CameraInfo,\n queue_size=queue_size)\n", (606, 691), False, 'import rospy\n'), ((704, 799), 'rospy.Publisher', 'rospy.Publisher', (["(right_topic_name + '/' + IMAGE_MESSAGE_TOPIC)", 'Image'], {'queue_size': 'queue_size'}), "(right_topic_name + '/' + IMAGE_MESSAGE_TOPIC, Image,\n queue_size=queue_size)\n", (719, 799), False, 'import rospy\n'), ((812, 913), 'rospy.Publisher', 'rospy.Publisher', (["(right_topic_name + '/' + CAMERA_MESSAGE_TOPIC)", 'CameraInfo'], {'queue_size': 'queue_size'}), "(right_topic_name + '/' + CAMERA_MESSAGE_TOPIC, CameraInfo,\n queue_size=queue_size)\n", (827, 913), False, 'import rospy\n'), ((918, 932), 'rospy.Rate', 'rospy.Rate', (['(30)'], {}), '(30)\n', (928, 932), False, 'import rospy\n'), ((944, 973), 'cv2.VideoCapture', 'cv2.VideoCapture', (['device_path'], {}), '(device_path)\n', (960, 973), False, 'import cv2\n'), ((1365, 1375), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1373, 1375), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((1395, 1407), 'sensor_msgs.msg.CameraInfo', 'CameraInfo', ([], {}), '()\n', (1405, 1407), False, 'from sensor_msgs.msg import Image, CameraInfo\n'), ((1006, 1043), 'rospy.logerr', 'rospy.logerr', (['"""Failed to open device"""'], {}), "('Failed to open device')\n", (1018, 1043), False, 'import rospy\n'), ((1492, 1511), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1509, 1511), False, 'import rospy\n'), ((1894, 1905), 'time.time', 'time.time', ([], {}), '()\n', (1903, 1905), False, 'import time\n'), ((2356, 2389), 'rospy.loginfo', 'rospy.loginfo', (['"""Exception caught"""'], {}), "('Exception caught')\n", (2369, 2389), False, 'import rospy\n')] |
from typing import List
from collections import Counter
def top_k_frequent(words: List[str], k: int) -> List[str]:
words = sorted(words)
freq = Counter(words)
freq = sorted(freq.items(), key=lambda x: x[1], reverse=True)
return [i[0] for i in freq[:k]]
if __name__ == '__main__':
print(top_k_frequent(["i", "love", "leetcode", "i", "love", "coding"], k=2))
| [
"collections.Counter"
] | [((162, 176), 'collections.Counter', 'Counter', (['words'], {}), '(words)\n', (169, 176), False, 'from collections import Counter\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
freeze_backbone=False,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.freeze_backbone = freeze_backbone
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
#this call to the super class function adds batch_image_shape
#to img_metas dict
super(SingleStageDetector, self).forward_train(img, img_metas)
if self.freeze_backbone:
with torch.no_grad():
x = self.extract_feat(img)
else:
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
return losses
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
| [
"warnings.warn",
"torch.no_grad",
"torch._shape_as_tensor"
] | [((1035, 1172), 'warnings.warn', 'warnings.warn', (['"""Warning! MultiheadAttention in DETR does not support flops computation! Do not use the results in your papers!"""'], {}), "(\n 'Warning! MultiheadAttention in DETR does not support flops computation! Do not use the results in your papers!'\n )\n", (1048, 1172), False, 'import warnings\n'), ((4030, 4057), 'torch._shape_as_tensor', 'torch._shape_as_tensor', (['img'], {}), '(img)\n', (4052, 4057), False, 'import torch\n'), ((2921, 2936), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2934, 2936), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
from .integrate import Integrate
class Riemann(Integrate):
"""
Compute the Riemann sum of f(x) over the interval [a,b].
Parameters
----------
f : function
A single variable function f(x), ex: lambda x:np.exp(x**2)
"""
def __init__(self, f):
Integrate.__init__(self, f)
self.N = 25
def compute_integral(self, a, b, N = 25, method='midpoint'):
"""
Approximate the value of the integral of f(x) dx from a to b with N sub-intervals using left, right or midpoint method
Parameters
----------
a , b : any numbers
Endpoints of the interval [a,b]
N : integer
Number of subintervals of equal length in the partition of [a,b]
method : string
Determines the kind of Riemann sum:\n
right : Riemann sum using right endpoints\n
left : Riemann sum using left endpoints\n
midpoint (default) : Riemann sum using midpoints
Returns
-------
float
Approximation of the integral given by the Riemann sum.
Examples
--------
>>> compute_integral(0,np.pi/2,1000), f = lambda x:1 / (1 + x**2)
approx = 1.3731040812301096
actual = 1.373400766945016
"""
self.a = a
self.b = b
self.N = N
dx = (self.b - self.a) / self.N
x = np.linspace(self.a, self.b, self.N+1)
if method == 'left':
x_left = x[:-1] # from 0 to N-1
return np.sum(self.f(x_left)*dx)
elif method == 'right':
x_right = x[1:] # from 1 to N
return np.sum(self.f(x_right)*dx)
elif method == 'midpoint':
x_mid = (x[:-1] + x[1:])/2 # all N but averaged
return np.sum(self.f(x_mid)*dx)
else:
raise ValueError("Method must be 'left', 'right' or 'midpoint'.")
def plot_function(self):
x = np.linspace(self.a, self.b, self.N+1)
y = self.f(x)
X = np.linspace(self.a, self.b, 5*self.N+1)
Y = self.f(X)
plt.figure(figsize=(15,5))
plt.subplot(1,3,1)
plt.plot(X,Y,'b')
x_left = x[:-1] # Left endpoints
y_left = y[:-1]
plt.plot(x_left,y_left,'b.',markersize=10)
plt.bar(x_left, y_left,width=(self.b - self.a) / self.N, alpha=0.2, align='edge', edgecolor='b')
plt.title('Left Riemann Sum, N = {}'.format(self.N))
plt.subplot(1,3,2)
plt.plot(X,Y,'b')
x_mid = (x[:-1] + x[1:])/2 # Midpoints
y_mid = self.f(x_mid)
plt.plot(x_mid, y_mid, 'b.', markersize=10)
plt.bar(x_mid, y_mid,width=(self.b - self.a) / self.N, alpha=0.2, edgecolor='b')
plt.title('Midpoint Riemann Sum, N = {}'.format(self.N))
plt.subplot(1, 3, 3)
plt.plot(X, Y, 'b')
x_right = x[1:] # Right endpoints
y_right = y[1:]
plt.plot(x_right, y_right,'b.', markersize=10)
plt.bar(x_right, y_right,width=-(self.b - self.a) / self.N, alpha=0.2, align='edge', edgecolor='b')
plt.title('Right Riemann Sum, N = {}'.format(self.N))
plt.show()
| [
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1479, 1518), 'numpy.linspace', 'np.linspace', (['self.a', 'self.b', '(self.N + 1)'], {}), '(self.a, self.b, self.N + 1)\n', (1490, 1518), True, 'import numpy as np\n'), ((2043, 2082), 'numpy.linspace', 'np.linspace', (['self.a', 'self.b', '(self.N + 1)'], {}), '(self.a, self.b, self.N + 1)\n', (2054, 2082), True, 'import numpy as np\n'), ((2116, 2159), 'numpy.linspace', 'np.linspace', (['self.a', 'self.b', '(5 * self.N + 1)'], {}), '(self.a, self.b, 5 * self.N + 1)\n', (2127, 2159), True, 'import numpy as np\n'), ((2187, 2214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (2197, 2214), True, 'import matplotlib.pyplot as plt\n'), ((2223, 2243), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (2234, 2243), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2269), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""b"""'], {}), "(X, Y, 'b')\n", (2258, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2341, 2386), 'matplotlib.pyplot.plot', 'plt.plot', (['x_left', 'y_left', '"""b."""'], {'markersize': '(10)'}), "(x_left, y_left, 'b.', markersize=10)\n", (2349, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2392, 2494), 'matplotlib.pyplot.bar', 'plt.bar', (['x_left', 'y_left'], {'width': '((self.b - self.a) / self.N)', 'alpha': '(0.2)', 'align': '"""edge"""', 'edgecolor': '"""b"""'}), "(x_left, y_left, width=(self.b - self.a) / self.N, alpha=0.2, align=\n 'edge', edgecolor='b')\n", (2399, 2494), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2579), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (2570, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2586, 2605), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""b"""'], {}), "(X, Y, 'b')\n", (2594, 2605), True, 'import matplotlib.pyplot as plt\n'), ((2689, 2732), 'matplotlib.pyplot.plot', 'plt.plot', (['x_mid', 'y_mid', '"""b."""'], {'markersize': '(10)'}), "(x_mid, y_mid, 'b.', markersize=10)\n", (2697, 2732), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2826), 'matplotlib.pyplot.bar', 'plt.bar', (['x_mid', 'y_mid'], {'width': '((self.b - self.a) / self.N)', 'alpha': '(0.2)', 'edgecolor': '"""b"""'}), "(x_mid, y_mid, width=(self.b - self.a) / self.N, alpha=0.2,\n edgecolor='b')\n", (2748, 2826), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2916), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (2907, 2916), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2944), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""b"""'], {}), "(X, Y, 'b')\n", (2933, 2944), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3066), 'matplotlib.pyplot.plot', 'plt.plot', (['x_right', 'y_right', '"""b."""'], {'markersize': '(10)'}), "(x_right, y_right, 'b.', markersize=10)\n", (3027, 3066), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3178), 'matplotlib.pyplot.bar', 'plt.bar', (['x_right', 'y_right'], {'width': '(-(self.b - self.a) / self.N)', 'alpha': '(0.2)', 'align': '"""edge"""', 'edgecolor': '"""b"""'}), "(x_right, y_right, width=-(self.b - self.a) / self.N, alpha=0.2,\n align='edge', edgecolor='b')\n", (3081, 3178), True, 'import matplotlib.pyplot as plt\n'), ((3245, 3255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3253, 3255), True, 'import matplotlib.pyplot as plt\n')] |
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import utils
from torch.utils.data import Dataset, DataLoader
from net import model
import math
from tqdm import tqdm
import matplotlib.pyplot as plt
import os
from sklearn.preprocessing import StandardScaler
from torch.optim import lr_scheduler
from sklearn.metrics import mean_absolute_percentage_error, mean_absolute_error
#OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# RuntimeError: CUDA error: unspecified launch failure
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
if torch.cuda.is_available():
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(0)
torch.cuda.empty_cache()
else:
device = torch.device('cpu')
n = 10 # 取前n天的資料作為特徵
#載入資料集
train_x = pd.read_csv(r'D:\dataset\lilium_price\train_data.csv', encoding='utf-8', index_col = 0)
train_y = pd.read_csv(r'D:\dataset\lilium_price\train_label.csv', encoding='utf-8', index_col = 0)
val_x = pd.read_csv(r'D:\dataset\lilium_price\val_data.csv', encoding='utf-8', index_col = 0)
val_y = pd.read_csv(r'D:\dataset\lilium_price\val_label.csv', encoding='utf-8', index_col = 0)
#正規化
x_scaler = StandardScaler().fit(train_x)
train_x = x_scaler.transform(train_x)
val_x = x_scaler.transform(val_x)
# to tensor
train_x = torch.Tensor(train_x)
train_y = torch.Tensor(np.array(train_y))
val_x = torch.Tensor(val_x)
val_y = torch.Tensor(np.array(val_y))
# Setloader
trainset = utils.Setloader(train_x, train_y)
valset = utils.Setloader(val_x, val_y)
# train
batch_size = train_x.shape[0]
val_batch_size = val_x.shape[0]
LR = 0.1
num_epochs = 3000
model = model.RNN_modelv1(input_dim=train_x.shape[1], output_dim=train_y.shape[1]).to(device)
# 選擇優化器與損失函數
optimizer = torch.optim.AdamW(model.parameters(), lr=LR)
criterion = nn.MSELoss().to(device)
scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.9)
# scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
# T_max=10,
# eta_min=1e-6,
# last_epoch=-1)
trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
valloader = DataLoader(valset, batch_size=val_batch_size, shuffle=True)
train_epoch_size = math.ceil(len(trainloader.dataset)/batch_size)
val_epoch_size = math.ceil(len(valloader.dataset)/val_batch_size)
loss_list = []
val_loss_list = []
mae_list = []
lr_list = []
for epoch in range(num_epochs):
epoch += 1
print('running epoch: {} / {}'.format(epoch, num_epochs))
#訓練模式
model.train()
total_loss = 0
with tqdm(total=train_epoch_size) as pbar:
for inputs, target in trainloader:
inputs, target = inputs.to(device), target.to(device)
output = model(torch.unsqueeze(inputs, dim=0))
loss = criterion(torch.squeeze(output), target)
running_loss = loss.item()
total_loss += running_loss*inputs.shape[0]
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # back propagation, compute gradients
optimizer.step()
#更新進度條
pbar.set_description('train')
pbar.set_postfix(
**{
'running_loss': running_loss,
})
pbar.update(1)
loss = total_loss/len(trainloader.dataset)
loss_list.append(loss)
#評估模式
model.eval()
total_val_loss = 0
total_mae = 0
with tqdm(total=val_epoch_size) as pbar:
with torch.no_grad():
for inputs, target in valloader:
inputs, target = inputs.to(device), target.to(device)
output = model(torch.unsqueeze(inputs, dim=0))
running_val_loss = criterion(torch.squeeze(output), target).item()
running_mae = mean_absolute_error(target.cpu(), torch.squeeze(output).cpu())
total_val_loss += running_val_loss*inputs.shape[0]
total_mae += running_mae*inputs.shape[0]
#更新進度條
pbar.set_description('validation')
pbar.set_postfix(
**{
'running_val_loss': running_val_loss,
'mae': running_mae
})
pbar.update(1)
lr_list.append(scheduler.get_last_lr())
scheduler.step()
val_loss = total_val_loss/len(valloader.dataset)
mae = total_mae/len(valloader.dataset)
val_loss_list.append(val_loss)
mae_list.append(mae)
print('train_loss: {:.4f}, valid_loss: {:.4f}, MAE:{:.2f}, lr:{:.1e}'.format(loss, val_loss, mae, scheduler.get_last_lr()[0]) )
#每10個epochs及最後一個epoch儲存模型
if (not epoch % 10) or (epoch == num_epochs) :
torch.save(model.state_dict(), './logs/epoch%d-loss%.4f-val_loss%.4f-mae%.2f.pth' %(epoch, loss, val_loss, mae))
#繪製圖
plt.figure()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.plot(loss_list, label='Loss')
plt.plot(val_loss_list, label='Val Loss')
plt.legend(loc='best')
plt.savefig('./images/loss.jpg')
plt.show()
plt.figure()
plt.xlabel('Epochs')
plt.ylabel('mae')
plt.plot(mae_list)
plt.savefig('./images/mae.jpg')
plt.show()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"net.model.RNN_modelv1",
"numpy.array",
"torch.nn.MSELoss",
"utils.Setloader",
"torch.cuda.is_available",
"torch.squeeze",
"net.model.train",
"net.model.eval",
"torch.unsqueeze",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"net.model... | [((635, 660), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (658, 660), False, 'import torch\n'), ((876, 967), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\train_data.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\train_data.csv', encoding='utf-8',\n index_col=0)\n", (887, 967), True, 'import pandas as pd\n'), ((974, 1066), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\train_label.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\train_label.csv', encoding='utf-8',\n index_col=0)\n", (985, 1066), True, 'import pandas as pd\n'), ((1071, 1160), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\val_data.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\val_data.csv', encoding='utf-8',\n index_col=0)\n", (1082, 1160), True, 'import pandas as pd\n'), ((1165, 1255), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\dataset\\\\lilium_price\\\\val_label.csv"""'], {'encoding': '"""utf-8"""', 'index_col': '(0)'}), "('D:\\\\dataset\\\\lilium_price\\\\val_label.csv', encoding='utf-8',\n index_col=0)\n", (1176, 1255), True, 'import pandas as pd\n'), ((1394, 1415), 'torch.Tensor', 'torch.Tensor', (['train_x'], {}), '(train_x)\n', (1406, 1415), False, 'import torch\n'), ((1466, 1485), 'torch.Tensor', 'torch.Tensor', (['val_x'], {}), '(val_x)\n', (1478, 1485), False, 'import torch\n'), ((1547, 1580), 'utils.Setloader', 'utils.Setloader', (['train_x', 'train_y'], {}), '(train_x, train_y)\n', (1562, 1580), False, 'import utils\n'), ((1590, 1619), 'utils.Setloader', 'utils.Setloader', (['val_x', 'val_y'], {}), '(val_x, val_y)\n', (1605, 1619), False, 'import utils\n'), ((1932, 1987), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(10)', 'gamma': '(0.9)'}), '(optimizer, step_size=10, gamma=0.9)\n', (1951, 1987), False, 'from torch.optim import lr_scheduler\n'), ((2245, 2302), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (2255, 2302), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2315, 2374), 'torch.utils.data.DataLoader', 'DataLoader', (['valset'], {'batch_size': 'val_batch_size', 'shuffle': '(True)'}), '(valset, batch_size=val_batch_size, shuffle=True)\n', (2325, 2374), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((5076, 5088), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5086, 5088), True, 'import matplotlib.pyplot as plt\n'), ((5089, 5109), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5099, 5109), True, 'import matplotlib.pyplot as plt\n'), ((5110, 5128), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (5120, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5129, 5162), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_list'], {'label': '"""Loss"""'}), "(loss_list, label='Loss')\n", (5137, 5162), True, 'import matplotlib.pyplot as plt\n'), ((5163, 5204), 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss_list'], {'label': '"""Val Loss"""'}), "(val_loss_list, label='Val Loss')\n", (5171, 5204), True, 'import matplotlib.pyplot as plt\n'), ((5205, 5227), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5215, 5227), True, 'import matplotlib.pyplot as plt\n'), ((5228, 5260), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/loss.jpg"""'], {}), "('./images/loss.jpg')\n", (5239, 5260), True, 'import matplotlib.pyplot as plt\n'), ((5261, 5271), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5269, 5271), True, 'import matplotlib.pyplot as plt\n'), ((5273, 5285), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5283, 5285), True, 'import matplotlib.pyplot as plt\n'), ((5286, 5306), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (5296, 5306), True, 'import matplotlib.pyplot as plt\n'), ((5307, 5324), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mae"""'], {}), "('mae')\n", (5317, 5324), True, 'import matplotlib.pyplot as plt\n'), ((5325, 5343), 'matplotlib.pyplot.plot', 'plt.plot', (['mae_list'], {}), '(mae_list)\n', (5333, 5343), True, 'import matplotlib.pyplot as plt\n'), ((5344, 5375), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./images/mae.jpg"""'], {}), "('./images/mae.jpg')\n", (5355, 5375), True, 'import matplotlib.pyplot as plt\n'), ((5376, 5386), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5384, 5386), True, 'import matplotlib.pyplot as plt\n'), ((675, 695), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (687, 695), False, 'import torch\n'), ((742, 766), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (763, 766), False, 'import torch\n'), ((771, 795), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (793, 795), False, 'import torch\n'), ((815, 834), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (827, 834), False, 'import torch\n'), ((1439, 1456), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (1447, 1456), True, 'import numpy as np\n'), ((1507, 1522), 'numpy.array', 'np.array', (['val_y'], {}), '(val_y)\n', (1515, 1522), True, 'import numpy as np\n'), ((1856, 1874), 'net.model.parameters', 'model.parameters', ([], {}), '()\n', (1872, 1874), False, 'from net import model\n'), ((2692, 2705), 'net.model.train', 'model.train', ([], {}), '()\n', (2703, 2705), False, 'from net import model\n'), ((3599, 3611), 'net.model.eval', 'model.eval', ([], {}), '()\n', (3609, 3611), False, 'from net import model\n'), ((1269, 1285), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1283, 1285), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1727, 1801), 'net.model.RNN_modelv1', 'model.RNN_modelv1', ([], {'input_dim': 'train_x.shape[1]', 'output_dim': 'train_y.shape[1]'}), '(input_dim=train_x.shape[1], output_dim=train_y.shape[1])\n', (1744, 1801), False, 'from net import model\n'), ((1896, 1908), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1906, 1908), True, 'import torch.nn as nn\n'), ((2739, 2767), 'tqdm.tqdm', 'tqdm', ([], {'total': 'train_epoch_size'}), '(total=train_epoch_size)\n', (2743, 2767), False, 'from tqdm import tqdm\n'), ((3662, 3688), 'tqdm.tqdm', 'tqdm', ([], {'total': 'val_epoch_size'}), '(total=val_epoch_size)\n', (3666, 3688), False, 'from tqdm import tqdm\n'), ((3711, 3726), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3724, 3726), False, 'import torch\n'), ((4967, 4985), 'net.model.state_dict', 'model.state_dict', ([], {}), '()\n', (4983, 4985), False, 'from net import model\n'), ((2913, 2943), 'torch.unsqueeze', 'torch.unsqueeze', (['inputs'], {'dim': '(0)'}), '(inputs, dim=0)\n', (2928, 2943), False, 'import torch\n'), ((2974, 2995), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (2987, 2995), False, 'import torch\n'), ((3874, 3904), 'torch.unsqueeze', 'torch.unsqueeze', (['inputs'], {'dim': '(0)'}), '(inputs, dim=0)\n', (3889, 3904), False, 'import torch\n'), ((3951, 3972), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (3964, 3972), False, 'import torch\n'), ((4053, 4074), 'torch.squeeze', 'torch.squeeze', (['output'], {}), '(output)\n', (4066, 4074), False, 'import torch\n')] |
from django.db import models
class User(models.Model):
id = models.AutoField(primary_key=True, blank=True)
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
login = models.CharField(max_length=200, unique=True)
password = models.CharField(max_length=200)
email = models.EmailField(max_length=200, unique=True)
job_title = models.CharField(max_length=200)
is_admin = models.BooleanField(default=False) | [
"django.db.models.EmailField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((66, 112), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'blank': '(True)'}), '(primary_key=True, blank=True)\n', (82, 112), False, 'from django.db import models\n'), ((130, 162), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (146, 162), False, 'from django.db import models\n'), ((179, 211), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (195, 211), False, 'from django.db import models\n'), ((224, 269), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'unique': '(True)'}), '(max_length=200, unique=True)\n', (240, 269), False, 'from django.db import models\n'), ((285, 317), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (301, 317), False, 'from django.db import models\n'), ((330, 376), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(200)', 'unique': '(True)'}), '(max_length=200, unique=True)\n', (347, 376), False, 'from django.db import models\n'), ((393, 425), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (409, 425), False, 'from django.db import models\n'), ((441, 475), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (460, 475), False, 'from django.db import models\n')] |
import numpy as np
from scipy import stats
__all__ = ['register_stat']
statistic_factory = {
'min': np.min,
'max': np.max,
'mean': np.mean,
'median': np.median,
'std': np.std,
'sum': np.sum,
'cumsum': np.cumsum,
}
def register_stat(func=None, name=None):
"""Function to register stats callables.
Decorator to register user defined stats function. Can be called
as a function to register things like closures or partial functions.
Stats are registered as their function name.
Args:
func (``Callable``): Function to register.
name (``str``, optional): If not provided, function is
registered as it's name.
Returns:
``Callable``: If used as decorator.
``None``: If used as a function
Raises:
``KeyError``: If function exists in ``tiko.stats.statistic_factory``
Examples:
>>>
"""
def inner(func_):
nonlocal name
if name is None:
name = func_.__name__
if name in statistic_factory:
raise KeyError('Function already exists. Chose a new name for %s' % name)
statistic_factory[name] = func_
return func_
if func is None:
return inner
else:
inner(func)
@register_stat
def mode_count(a):
return stats.mode(a, axis=None)[1]
@register_stat
def mode(a):
return stats.mode(a, axis=None)[0]
@register_stat
def first(a):
return a[0]
@register_stat
def last(a):
return a[-1]
@register_stat
def delta(a):
return last(a) - first(a)
| [
"scipy.stats.mode"
] | [((1322, 1346), 'scipy.stats.mode', 'stats.mode', (['a'], {'axis': 'None'}), '(a, axis=None)\n', (1332, 1346), False, 'from scipy import stats\n'), ((1391, 1415), 'scipy.stats.mode', 'stats.mode', (['a'], {'axis': 'None'}), '(a, axis=None)\n', (1401, 1415), False, 'from scipy import stats\n')] |
import argparse
import numpy as np
import utils.loader as l
def get_arguments():
"""Gets arguments from the command line.
Returns:
A parser with the input arguments.
"""
# Creates the ArgumentParser
parser = argparse.ArgumentParser(
usage='Digitizes a numpy array into intervals in order to create targets.')
parser.add_argument(
'input', help='Path to the .npy file', type=str)
parser.add_argument(
'-n_bins', help='Number of intervals to digitize', type=int, default=5)
return parser.parse_args()
if __name__ == "__main__":
# Gathers the input arguments
args = get_arguments()
# Gathering variables from arguments
input_array = args.input
n_bins = args.n_bins
# Loads the array
features = l.load_npy(input_array)
# Gathering minimum and maximum feature values
min_features = features.min(axis=0)
max_features = features.max(axis=0)
# Pre-allocating targets array
y = np.zeros((features.shape[0], features.shape[1]), dtype=np.int)
print('Creating targets ...')
# For every possible feature
for i, (min_f, max_f) in enumerate(zip(min_features, max_features)):
# Creating equally-spaced intervals
bins = np.linspace(min_f, max_f, n_bins+1)
# If iteration corresponds to FID or MSE metric
if i == 0 or i == 1:
# Digitizing the features array with flipped intervals
y[:, i] = np.digitize(features[:, i], np.flip(bins), right=True)
# If not
else:
# Digitizing the features array
y[:, i] = np.digitize(features[:, i], bins)
# Gathering most voted `y` along the features
targets = np.asarray([(np.argmax(np.bincount(y[i, :]))) for i in range(features.shape[0])])
print(f'Labels, Counts: {np.unique(targets, return_counts=True)}')
# Saving targets array as a .npy file
l.save_npy(targets, f'targets.npy')
| [
"numpy.flip",
"utils.loader.load_npy",
"numpy.unique",
"argparse.ArgumentParser",
"numpy.digitize",
"numpy.linspace",
"utils.loader.save_npy",
"numpy.zeros",
"numpy.bincount"
] | [((243, 347), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""Digitizes a numpy array into intervals in order to create targets."""'}), "(usage=\n 'Digitizes a numpy array into intervals in order to create targets.')\n", (266, 347), False, 'import argparse\n'), ((796, 819), 'utils.loader.load_npy', 'l.load_npy', (['input_array'], {}), '(input_array)\n', (806, 819), True, 'import utils.loader as l\n'), ((1000, 1062), 'numpy.zeros', 'np.zeros', (['(features.shape[0], features.shape[1])'], {'dtype': 'np.int'}), '((features.shape[0], features.shape[1]), dtype=np.int)\n', (1008, 1062), True, 'import numpy as np\n'), ((1928, 1963), 'utils.loader.save_npy', 'l.save_npy', (['targets', 'f"""targets.npy"""'], {}), "(targets, f'targets.npy')\n", (1938, 1963), True, 'import utils.loader as l\n'), ((1264, 1301), 'numpy.linspace', 'np.linspace', (['min_f', 'max_f', '(n_bins + 1)'], {}), '(min_f, max_f, n_bins + 1)\n', (1275, 1301), True, 'import numpy as np\n'), ((1628, 1661), 'numpy.digitize', 'np.digitize', (['features[:, i]', 'bins'], {}), '(features[:, i], bins)\n', (1639, 1661), True, 'import numpy as np\n'), ((1503, 1516), 'numpy.flip', 'np.flip', (['bins'], {}), '(bins)\n', (1510, 1516), True, 'import numpy as np\n'), ((1750, 1770), 'numpy.bincount', 'np.bincount', (['y[i, :]'], {}), '(y[i, :])\n', (1761, 1770), True, 'import numpy as np\n'), ((1839, 1877), 'numpy.unique', 'np.unique', (['targets'], {'return_counts': '(True)'}), '(targets, return_counts=True)\n', (1848, 1877), True, 'import numpy as np\n')] |
import json
import queue
from concurrent.futures import ThreadPoolExecutor
import flask
from flask import Response, jsonify, request
from jsonschema import validate
from node.gpg_utils import *
from node.schema import property_schema
from node.utils import BlockList, get_hash
# currently its using inmemory, can be changed it to pg or mysql
# when going to scale, queue should be externalized and should be run gunicorn/uwsgi
queue = queue.Queue(5)
executor = ThreadPoolExecutor(8)
counter = 0
blocks = BlockList()
app = flask.Flask(__name__)
def create_block():
messages = []
while not queue.empty():
message = queue.get()
# you may just want to send decrypted
# TODO
messages.append(message)
block = json.dumps(dict(message=messages, prev_hash=blocks.last_hash))
block_hash = get_hash(block)
signed_hash = gpg.sign(block_hash, keyid=node_keyid)
blocks.add(block, block_hash, signed_hash.data.decode('utf-8'))
queue.all_tasks_done()
def invalid_input(message):
resp = jsonify(message=message)
resp.status_code = 400
return resp
def invalid_error(message):
resp = jsonify(message=message)
resp.status_code = 500
return resp
@app.route('/message', methods=['POST'])
def recieve_message():
body = request.get_json()
try:
message: str = body.get("message", {})
decrypted = json.loads(gpg.decrypt(
message, passphrase=passphrase).data)
if type(decrypted) != dict:
return invalid_input('invalid message, message has to json')
try:
validate(instance=decrypted, schema=property_schema)
except:
return invalid_input('invalid message schema')
if decrypted:
if queue.full():
executor.submit(create_block)
# queue.join()
queue.put({"decrypted": decrypted, "message": message})
return jsonify({'message': 'valid'})
return invalid_input("invalid pgp key")
except:
pass
return invalid_error('unknown error')
@app.route('/register', methods=['POST'])
def register():
body = request.get_json()
try:
pgp_key: str = body.get("pgp", "")
if pgp_key.startswith("-----BEGIN PGP PUBLIC KEY BLOCK-----") and pgp_key.endswith("-----END PGP PUBLIC KEY BLOCK-----\n"):
gpg.import_keys(pgp_key)
ascii_armored_public_keys = gpg.export_keys(node_fingerprint)
return jsonify({"message": "imported_successfully", 'public_key': ascii_armored_public_keys})
response = jsonify({"message": "invalid pgp key"})
response.status_code = 400
return response
except:
pass
response = jsonify({"message": "invalid pgp key"})
response.status_code = 400
return response
@app.route('/block/count', methods=['GET'])
def count_blocks():
return str(len(blocks.blocks))
@app.route('/block/index/<int:index>', methods=['GET'])
def get_by_index(index):
if len(blocks.blocks) > index:
return jsonify(blocks.blocks[index])
return invalid_input('invalid index')
@app.route('/block/latest/', methods=['GET'])
def get_latest_block():
return blocks.last_hash
@app.route('/block/previous/<hash>', methods=['GET'])
def get_previous_block(hash):
if hash in blocks.block_map:
block = blocks.block_map[hash]
prev_hash = block['prev_hash']
if prev_hash in blocks.block_map:
return jsonify(blocks.block_map[prev_hash])
return invalid_input('first block woudn\'t have prev block')
return invalid_input('invalid hash')
@app.route('/block/list', methods=['GET'])
def list_blocks():
return jsonify(blocks.all_blocks())
@app.route('/block/<pick>', methods=['GET'])
def get_block(pick):
if pick in blocks.block_map:
return blocks.block_map[pick]
return invalid_input("invalid hash")
if __name__ == "__main__":
app.run(host="0.0.0.0")
| [
"flask.Flask",
"concurrent.futures.ThreadPoolExecutor",
"queue.empty",
"queue.full",
"queue.get",
"queue.Queue",
"flask.request.get_json",
"jsonschema.validate",
"queue.put",
"queue.all_tasks_done",
"node.utils.BlockList",
"node.utils.get_hash",
"flask.jsonify"
] | [((438, 452), 'queue.Queue', 'queue.Queue', (['(5)'], {}), '(5)\n', (449, 452), False, 'import queue\n'), ((466, 487), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(8)'], {}), '(8)\n', (484, 487), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((510, 521), 'node.utils.BlockList', 'BlockList', ([], {}), '()\n', (519, 521), False, 'from node.utils import BlockList, get_hash\n'), ((529, 550), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (540, 550), False, 'import flask\n'), ((836, 851), 'node.utils.get_hash', 'get_hash', (['block'], {}), '(block)\n', (844, 851), False, 'from node.utils import BlockList, get_hash\n'), ((981, 1003), 'queue.all_tasks_done', 'queue.all_tasks_done', ([], {}), '()\n', (1001, 1003), False, 'import queue\n'), ((1045, 1069), 'flask.jsonify', 'jsonify', ([], {'message': 'message'}), '(message=message)\n', (1052, 1069), False, 'from flask import Response, jsonify, request\n'), ((1154, 1178), 'flask.jsonify', 'jsonify', ([], {'message': 'message'}), '(message=message)\n', (1161, 1178), False, 'from flask import Response, jsonify, request\n'), ((1299, 1317), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1315, 1317), False, 'from flask import Response, jsonify, request\n'), ((2161, 2179), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2177, 2179), False, 'from flask import Response, jsonify, request\n'), ((2739, 2778), 'flask.jsonify', 'jsonify', (["{'message': 'invalid pgp key'}"], {}), "({'message': 'invalid pgp key'})\n", (2746, 2778), False, 'from flask import Response, jsonify, request\n'), ((605, 618), 'queue.empty', 'queue.empty', ([], {}), '()\n', (616, 618), False, 'import queue\n'), ((638, 649), 'queue.get', 'queue.get', ([], {}), '()\n', (647, 649), False, 'import queue\n'), ((2600, 2639), 'flask.jsonify', 'jsonify', (["{'message': 'invalid pgp key'}"], {}), "({'message': 'invalid pgp key'})\n", (2607, 2639), False, 'from flask import Response, jsonify, request\n'), ((3064, 3093), 'flask.jsonify', 'jsonify', (['blocks.blocks[index]'], {}), '(blocks.blocks[index])\n', (3071, 3093), False, 'from flask import Response, jsonify, request\n'), ((1602, 1654), 'jsonschema.validate', 'validate', ([], {'instance': 'decrypted', 'schema': 'property_schema'}), '(instance=decrypted, schema=property_schema)\n', (1610, 1654), False, 'from jsonschema import validate\n'), ((1767, 1779), 'queue.full', 'queue.full', ([], {}), '()\n', (1777, 1779), False, 'import queue\n'), ((1870, 1925), 'queue.put', 'queue.put', (["{'decrypted': decrypted, 'message': message}"], {}), "({'decrypted': decrypted, 'message': message})\n", (1879, 1925), False, 'import queue\n'), ((1945, 1974), 'flask.jsonify', 'jsonify', (["{'message': 'valid'}"], {}), "({'message': 'valid'})\n", (1952, 1974), False, 'from flask import Response, jsonify, request\n'), ((2494, 2584), 'flask.jsonify', 'jsonify', (["{'message': 'imported_successfully', 'public_key': ascii_armored_public_keys}"], {}), "({'message': 'imported_successfully', 'public_key':\n ascii_armored_public_keys})\n", (2501, 2584), False, 'from flask import Response, jsonify, request\n'), ((3494, 3530), 'flask.jsonify', 'jsonify', (['blocks.block_map[prev_hash]'], {}), '(blocks.block_map[prev_hash])\n', (3501, 3530), False, 'from flask import Response, jsonify, request\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2020 <NAME> <<EMAIL>>
import time
from preggy import expect
from tornado.ioloop import IOLoop
from tornado.testing import AsyncHTTPTestCase, gen_test
from tests.fixtures.fixtures import IMAGE_BYTES
from thumbor.app import ThumborServiceApp
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
from thumbor.importer import Importer
from thumbor_mongodb.storages.mongo_storage import Storage as MongoStorage
class BaseMongoStorageTestCase(AsyncHTTPTestCase):
def get_app(self):
self.context = self.get_context()
return ThumborServiceApp(self.context)
def get_server(self): # pylint: disable=no-self-use
server = ServerParameters(
8888, "localhost", "thumbor.conf", None, "info", None
)
server.security_key = "ACME-SEC"
return server
def get_importer(self):
importer = Importer(self.config)
importer.import_modules()
return importer
def get_request_handler(self): # pylint: disable=no-self-use
return None
def get_context(self):
self.config = ( # pylint: disable=attribute-defined-outside-init
self.get_config()
)
# pylint: disable=attribute-defined-outside-init
# pylint: disable=assignment-from-none
self.server = (
self.get_server()
)
self.importer = ( # pylint: disable=attribute-defined-outside-init
self.get_importer()
)
# pylint: disable=attribute-defined-outside-init
# pylint: disable=assignment-from-none
self.request_handler = (
self.get_request_handler()
)
self.importer.import_modules()
return Context(
self.server, self.config, self.importer, self.request_handler
)
def get_new_ioloop(self):
return IOLoop.instance()
def get_image_url(self, name):
return "s.glbimg.com/some/{0}".format(name)
def setUp(self, *args, **kw):
super(BaseMongoStorageTestCase, self).setUp(*args, **kw)
self.storage = MongoStorage(Context(
config=self.get_config(), server=self.get_server()
))
def get_config(self):
return Config(
MONGO_STORAGE_URI='mongodb://localhost:27017',
MONGO_STORAGE_SERVER_DB='thumbor',
MONGO_STORAGE_SERVER_COLLECTION='images',
MONGODB_STORAGE_IGNORE_ERRORS=False,
)
@gen_test
async def test_can_store_image_with_old_host_port(self):
config = self.get_config()
config.MONGO_STORAGE_URI = ""
config.MONGO_STORAGE_SERVER_HOST = "localhost"
config.MONGO_STORAGE_SERVER_PORT = 27017
storage = MongoStorage(Context(
config=config, server=self.get_server()
))
url = self.get_image_url("image_old_mongo.png")
await storage.put(url, IMAGE_BYTES)
result = await storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
@gen_test
async def test_can_store_image_should_be_in_catalog(self):
url = self.get_image_url("image.png")
await self.storage.put(url, IMAGE_BYTES)
result = await self.storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
@gen_test
async def test_can_store_image_than_delete(self):
url = self.get_image_url("image_and_delete.png")
await self.storage.put(url, IMAGE_BYTES)
result = await self.storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
await self.storage.remove(url)
result = await self.storage.get(url)
expect(result).to_be_null()
expect(result).not_to_be_an_error()
@gen_test
async def test_can_store_image_than_check_exist(self):
url = self.get_image_url("image_exist.png")
await self.storage.put(url, IMAGE_BYTES)
result = await self.storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
result = await self.storage.exists(url)
expect(result).to_equal(True)
expect(result).not_to_be_an_error()
@gen_test
async def test_can_store_image_with_spaces(self):
url = self.get_image_url("image .jpg")
await self.storage.put(url, IMAGE_BYTES)
result = await self.storage.get(url)
expect(result).not_to_be_null()
expect(result).not_to_be_an_error()
expect(result).to_equal(IMAGE_BYTES)
@gen_test
async def test_can_store_image_with_spaces_encoded(self):
url = self.get_image_url("image%20.jpg")
await self.storage.put(url, IMAGE_BYTES)
got = await self.storage.get(url)
expect(got).not_to_be_null()
expect(got).not_to_be_an_error()
expect(got).to_equal(IMAGE_BYTES)
@gen_test
async def test_can_get_image(self):
iurl = self.get_image_url("image_2.jpg")
await self.storage.put(iurl, IMAGE_BYTES)
got = await self.storage.get(iurl)
expect(got).not_to_be_null()
expect(got).not_to_be_an_error()
expect(got).to_equal(IMAGE_BYTES)
@gen_test
async def test_does_not_store_if_config_says_not_to(self):
iurl = self.get_image_url("image_5.jpg")
await self.storage.put(iurl, IMAGE_BYTES)
await self.storage.put_crypto(iurl)
got = await self.storage.get_crypto(iurl)
expect(got).to_be_null()
@gen_test
async def test_detector_can_store_detector_data(self):
iurl = self.get_image_url("image_7.jpg")
await self.storage.put(iurl, IMAGE_BYTES)
await self.storage.put_detector_data(iurl, "some-data")
got = await self.storage.get_detector_data(iurl)
expect(got).not_to_be_null()
expect(got).not_to_be_an_error()
expect(got).to_equal("some-data")
@gen_test
async def test_detector_returns_none_if_no_detector_data(self):
iurl = self.get_image_url("image_10000.jpg")
got = await self.storage.get_detector_data(iurl)
expect(got).to_be_null()
@gen_test
async def test_cannot_get_expired_image(self):
iurl = self.get_image_url("image_2.jpg")
config = self.get_config()
config.STORAGE_EXPIRATION_SECONDS = 5
storage = MongoStorage(Context(
config=config, server=self.get_server()
))
await storage.put(iurl, IMAGE_BYTES)
time.sleep(5)
got = await storage.get(iurl)
expect(got).to_be_null()
expect(got).not_to_be_an_error()
@gen_test
async def test_can_get_if_expire_set_to_none(self):
iurl = self.get_image_url("image_2.jpg")
config = self.get_config()
config.STORAGE_EXPIRATION_SECONDS = None
storage = MongoStorage(Context(
config=config, server=self.get_server()
))
await storage.put(iurl, IMAGE_BYTES)
got = await storage.get(iurl)
expect(got).not_to_be_null()
expect(got).not_to_be_an_error()
@gen_test
async def test_should_be_an_error(self):
iurl = self.get_image_url("image_3.jpg")
server = self.get_server()
server.security_key = ""
storage = MongoStorage(Context(
config=self.get_config(), server=server
))
await storage.put(iurl, IMAGE_BYTES)
msg = ("STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True \
if no SECURITY_KEY specified")
await storage.put_crypto(iurl)
expect.error_to_happen(RuntimeError, message=msg)
@gen_test
async def test_getting_crypto_for_a_new_image_returns_none(self):
iurl = self.get_image_url("image_9999.jpg")
config = self.get_config()
config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE = True
storage = MongoStorage(Context(
config=config, server=self.get_server()
))
got = await storage.get_crypto(iurl)
expect(got).to_be_null()
@gen_test
async def test_can_store_crypto(self):
iurl = self.get_image_url("image_6.jpg")
config = self.get_config()
config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE = True
storage = MongoStorage(Context(
config=config, server=self.get_server()
))
await storage.put(iurl, IMAGE_BYTES)
await storage.put_crypto(iurl)
got = await storage.get_crypto(iurl)
expect(got).not_to_be_null()
expect(got).not_to_be_an_error()
expect(got).to_equal("ACME-SEC")
| [
"thumbor.context.Context",
"thumbor.app.ThumborServiceApp",
"preggy.expect",
"thumbor.config.Config",
"time.sleep",
"tornado.ioloop.IOLoop.instance",
"thumbor.context.ServerParameters",
"preggy.expect.error_to_happen",
"thumbor.importer.Importer"
] | [((788, 819), 'thumbor.app.ThumborServiceApp', 'ThumborServiceApp', (['self.context'], {}), '(self.context)\n', (805, 819), False, 'from thumbor.app import ThumborServiceApp\n'), ((895, 966), 'thumbor.context.ServerParameters', 'ServerParameters', (['(8888)', '"""localhost"""', '"""thumbor.conf"""', 'None', '"""info"""', 'None'], {}), "(8888, 'localhost', 'thumbor.conf', None, 'info', None)\n", (911, 966), False, 'from thumbor.context import Context, ServerParameters\n'), ((1100, 1121), 'thumbor.importer.Importer', 'Importer', (['self.config'], {}), '(self.config)\n', (1108, 1121), False, 'from thumbor.importer import Importer\n'), ((1935, 2005), 'thumbor.context.Context', 'Context', (['self.server', 'self.config', 'self.importer', 'self.request_handler'], {}), '(self.server, self.config, self.importer, self.request_handler)\n', (1942, 2005), False, 'from thumbor.context import Context, ServerParameters\n'), ((2074, 2091), 'tornado.ioloop.IOLoop.instance', 'IOLoop.instance', ([], {}), '()\n', (2089, 2091), False, 'from tornado.ioloop import IOLoop\n'), ((2442, 2618), 'thumbor.config.Config', 'Config', ([], {'MONGO_STORAGE_URI': '"""mongodb://localhost:27017"""', 'MONGO_STORAGE_SERVER_DB': '"""thumbor"""', 'MONGO_STORAGE_SERVER_COLLECTION': '"""images"""', 'MONGODB_STORAGE_IGNORE_ERRORS': '(False)'}), "(MONGO_STORAGE_URI='mongodb://localhost:27017',\n MONGO_STORAGE_SERVER_DB='thumbor', MONGO_STORAGE_SERVER_COLLECTION=\n 'images', MONGODB_STORAGE_IGNORE_ERRORS=False)\n", (2448, 2618), False, 'from thumbor.config import Config\n'), ((6744, 6757), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6754, 6757), False, 'import time\n'), ((7820, 7869), 'preggy.expect.error_to_happen', 'expect.error_to_happen', (['RuntimeError'], {'message': 'msg'}), '(RuntimeError, message=msg)\n', (7842, 7869), False, 'from preggy import expect\n'), ((3173, 3187), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3179, 3187), False, 'from preggy import expect\n'), ((3213, 3227), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3219, 3227), False, 'from preggy import expect\n'), ((3475, 3489), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3481, 3489), False, 'from preggy import expect\n'), ((3515, 3529), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3521, 3529), False, 'from preggy import expect\n'), ((3779, 3793), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3785, 3793), False, 'from preggy import expect\n'), ((3819, 3833), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3825, 3833), False, 'from preggy import expect\n'), ((3948, 3962), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3954, 3962), False, 'from preggy import expect\n'), ((3984, 3998), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (3990, 3998), False, 'from preggy import expect\n'), ((4248, 4262), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (4254, 4262), False, 'from preggy import expect\n'), ((4288, 4302), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (4294, 4302), False, 'from preggy import expect\n'), ((4381, 4395), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (4387, 4395), False, 'from preggy import expect\n'), ((4419, 4433), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (4425, 4433), False, 'from preggy import expect\n'), ((4673, 4687), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (4679, 4687), False, 'from preggy import expect\n'), ((4713, 4727), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (4719, 4727), False, 'from preggy import expect\n'), ((4757, 4771), 'preggy.expect', 'expect', (['result'], {}), '(result)\n', (4763, 4771), False, 'from preggy import expect\n'), ((5019, 5030), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (5025, 5030), False, 'from preggy import expect\n'), ((5056, 5067), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (5062, 5067), False, 'from preggy import expect\n'), ((5097, 5108), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (5103, 5108), False, 'from preggy import expect\n'), ((5336, 5347), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (5342, 5347), False, 'from preggy import expect\n'), ((5373, 5384), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (5379, 5384), False, 'from preggy import expect\n'), ((5414, 5425), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (5420, 5425), False, 'from preggy import expect\n'), ((5727, 5738), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (5733, 5738), False, 'from preggy import expect\n'), ((6054, 6065), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (6060, 6065), False, 'from preggy import expect\n'), ((6091, 6102), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (6097, 6102), False, 'from preggy import expect\n'), ((6132, 6143), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (6138, 6143), False, 'from preggy import expect\n'), ((6367, 6378), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (6373, 6378), False, 'from preggy import expect\n'), ((6804, 6815), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (6810, 6815), False, 'from preggy import expect\n'), ((6837, 6848), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (6843, 6848), False, 'from preggy import expect\n'), ((7268, 7279), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (7274, 7279), False, 'from preggy import expect\n'), ((7305, 7316), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (7311, 7316), False, 'from preggy import expect\n'), ((8253, 8264), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (8259, 8264), False, 'from preggy import expect\n'), ((8715, 8726), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (8721, 8726), False, 'from preggy import expect\n'), ((8752, 8763), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (8758, 8763), False, 'from preggy import expect\n'), ((8793, 8804), 'preggy.expect', 'expect', (['got'], {}), '(got)\n', (8799, 8804), False, 'from preggy import expect\n')] |
from base import BaseHandler
from functions import *
from models import User
class SignupHandler(BaseHandler):
"""Sign up handler that is used to signup users."""
def get(self):
self.render("signup.html")
def post(self):
error = False
self.username = self.request.get("username")
self.password = self.request.get("password")
self.password_check = self.request.get("password_check")
self.email = self.request.get("email")
template_vars = dict(username=self.username,
email=self.email)
if not valid_username(self.username):
template_vars['error_username'] = "That's not a valid username."
error = True
elif User.by_username(self.username):
template_vars['error_username'] = "This username already exists."
error = True
if not valid_password(self.password):
template_vars['error_password'] = "<PASSWORD> a valid password."
error = True
elif self.password != self.password_check:
template_vars['error_check'] = "Your passwords didn't match."
error = True
if not valid_email(self.email):
template_vars['error_email'] = "That's not a valid email."
error = True
if error:
self.render('signup.html', **template_vars)
else:
u = User.register(self.username,
self.password,
self.email)
u.put()
self.login(u)
self.redirect('/?')
| [
"models.User.by_username",
"models.User.register"
] | [((748, 779), 'models.User.by_username', 'User.by_username', (['self.username'], {}), '(self.username)\n', (764, 779), False, 'from models import User\n'), ((1425, 1480), 'models.User.register', 'User.register', (['self.username', 'self.password', 'self.email'], {}), '(self.username, self.password, self.email)\n', (1438, 1480), False, 'from models import User\n')] |
from BasicRL.BasicRL import BasicRL
from BasicRL.MyPlotter import MyPlotter
import gym, glob
if __name__ == "__main__":
print("Hello Basic RL example!")
# Load Gym Env
env = gym.make("CartPole-v1")
# Run PPO Algorithm
learner = BasicRL("PPO", gym_env=env, verbose=2, gamma=0.99, sigma=1.0, exploration_decay=0.99)
learner.learn(300)
# Run DQN Algorithm
learner = BasicRL("DQN", gym_env=env, verbose=2, gamma=0.99, memory_size=10000, exploration_decay=0.99, batch_size=128)
learner.learn(300)
# Plot The Results
plotter = MyPlotter(x_label="Episode", y_label="Reward", title="CartPole v1")
plotter.load_array([
glob.glob("data/reward_PPO_*.txt"),
glob.glob("data/reward_DQN_*.txt")
])
plotter.process_data( rolling_window=100, starting_pointer=30 )
plotter.render_std( labels=["PPO", "DQN"], colors=["g", "r"] )
| [
"BasicRL.MyPlotter.MyPlotter",
"BasicRL.BasicRL.BasicRL",
"gym.make",
"glob.glob"
] | [((180, 203), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (188, 203), False, 'import gym, glob\n'), ((237, 326), 'BasicRL.BasicRL.BasicRL', 'BasicRL', (['"""PPO"""'], {'gym_env': 'env', 'verbose': '(2)', 'gamma': '(0.99)', 'sigma': '(1.0)', 'exploration_decay': '(0.99)'}), "('PPO', gym_env=env, verbose=2, gamma=0.99, sigma=1.0,\n exploration_decay=0.99)\n", (244, 326), False, 'from BasicRL.BasicRL import BasicRL\n'), ((377, 490), 'BasicRL.BasicRL.BasicRL', 'BasicRL', (['"""DQN"""'], {'gym_env': 'env', 'verbose': '(2)', 'gamma': '(0.99)', 'memory_size': '(10000)', 'exploration_decay': '(0.99)', 'batch_size': '(128)'}), "('DQN', gym_env=env, verbose=2, gamma=0.99, memory_size=10000,\n exploration_decay=0.99, batch_size=128)\n", (384, 490), False, 'from BasicRL.BasicRL import BasicRL\n'), ((540, 607), 'BasicRL.MyPlotter.MyPlotter', 'MyPlotter', ([], {'x_label': '"""Episode"""', 'y_label': '"""Reward"""', 'title': '"""CartPole v1"""'}), "(x_label='Episode', y_label='Reward', title='CartPole v1')\n", (549, 607), False, 'from BasicRL.MyPlotter import MyPlotter\n'), ((633, 667), 'glob.glob', 'glob.glob', (['"""data/reward_PPO_*.txt"""'], {}), "('data/reward_PPO_*.txt')\n", (642, 667), False, 'import gym, glob\n'), ((672, 706), 'glob.glob', 'glob.glob', (['"""data/reward_DQN_*.txt"""'], {}), "('data/reward_DQN_*.txt')\n", (681, 706), False, 'import gym, glob\n')] |
import unittest
from pathlib import Path
from coldtype.grid import Grid
from coldtype.geometry import *
from coldtype.color import hsl
from coldtype.text.composer import StSt, Glyphwise, Style, Font
from coldtype.pens.draftingpens import DraftingPens
from coldtype.pens.svgpen import SVGPen
tf = Path(__file__).parent
class TestGlyphwise(unittest.TestCase):
def _test_glyph_kerning(self, font_path, kern, pj=False):
txt = "AVAMANAV"
#txt = "AVAV"
ss = StSt(txt, font_path, 100, wdth=0, kern=kern)
gw = Glyphwise(txt, lambda g: Style(font_path, 100, wdth=0, kern=kern, ro=1))
gwo = Glyphwise(txt, lambda g: Style(font_path, 100, wdth=0, kern=(not kern), ro=1))
self.assertEqual(len(ss), len(txt))
self.assertEqual(ss[0].glyphName, "A")
self.assertEqual(ss[-1].glyphName, "V")
self.assertEqual(len(gw), len(txt))
self.assertEqual(ss[0].glyphName, "A")
self.assertEqual(ss[-1].glyphName, "V")
if pj:
r = Rect(1500, 500)
DraftingPens([
ss,
gw.f(None).s(0).sw(5),
gwo.copy().f(None).s(hsl(0.9)).sw(5),
]).translate(20, 20).scale(5, point=Point(0, 0)).picklejar(r, name=f"gw_kern_{kern}")
self.assertEqual(ss.ambit(), gw.ambit())
self.assertNotEqual(ss.ambit(), gwo.ambit())
fp = Path(font_path)
op = (tf / f"ignorables/__{fp.name}.svg")
op.parent.mkdir(exist_ok=True)
op.write_text(SVGPen.Composite(DraftingPens([ss, gw.translate(0, 10)]), ss.ambit(), viewBox=True))
return ss, gw
def test_format_equality(self):
fnt = "~/Type/fonts/fonts/OhnoFatfaceVariable.ttf"
self._test_glyph_kerning(fnt, False, pj=False)
self._test_glyph_kerning(fnt, True, pj=True)
def test_ligature(self):
clarette = Font.Cacheable("~/Type/fonts/fonts/_wdths/ClaretteGX.ttf")
r = Rect(1080, 300)
gl = (Glyphwise(["fi", "j", "o", "ff"],
lambda g: Style(clarette, 200, wdth=g.i/3))
.align(r))
self.assertEqual(len(gl), 4)
self.assertEqual(gl[0].glyphName, "f_i")
self.assertEqual(gl[-1].glyphName, "f_f")
gl.picklejar(r)
# def test_grouped(self):
# fnt = "~/Type/fonts/fonts/OhnoFatfaceVariable.ttf"
# r = Rect(1000, 200)
# gw = (Glyphwise(["AB", "CD"], lambda g:
# Style(fnt, 250, wdth=i, opsz=i))
# .align(r))
# gw.picklejar(r)
def test_variable_args(self):
fnt = Font.Find("OhnoFatfaceV")
r = Rect(1080, 300)
(Glyphwise("FATFACE 1 ARG", lambda g:
Style(fnt, 200, wdth=g.e))
.align(r)
.picklejar(r))
es = []
def print_e(i, g):
es.append(g.e)
return Style(fnt, 200, opsz=g.e, wdth=1-g.e)
(Glyphwise("FATFACE 2 ARG", print_e)
.align(r)
.picklejar(r))
self.assertEqual(es[0], 0)
self.assertEqual(es[-1], 1)
if __name__ == "__main__":
unittest.main() | [
"coldtype.text.composer.Font.Find",
"pathlib.Path",
"coldtype.text.composer.Style",
"coldtype.text.composer.Font.Cacheable",
"coldtype.color.hsl",
"coldtype.text.composer.StSt",
"coldtype.text.composer.Glyphwise",
"unittest.main"
] | [((297, 311), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (301, 311), False, 'from pathlib import Path\n'), ((3143, 3158), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3156, 3158), False, 'import unittest\n'), ((482, 526), 'coldtype.text.composer.StSt', 'StSt', (['txt', 'font_path', '(100)'], {'wdth': '(0)', 'kern': 'kern'}), '(txt, font_path, 100, wdth=0, kern=kern)\n', (486, 526), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((1388, 1403), 'pathlib.Path', 'Path', (['font_path'], {}), '(font_path)\n', (1392, 1403), False, 'from pathlib import Path\n'), ((1880, 1938), 'coldtype.text.composer.Font.Cacheable', 'Font.Cacheable', (['"""~/Type/fonts/fonts/_wdths/ClaretteGX.ttf"""'], {}), "('~/Type/fonts/fonts/_wdths/ClaretteGX.ttf')\n", (1894, 1938), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((2595, 2620), 'coldtype.text.composer.Font.Find', 'Font.Find', (['"""OhnoFatfaceV"""'], {}), "('OhnoFatfaceV')\n", (2604, 2620), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((2890, 2929), 'coldtype.text.composer.Style', 'Style', (['fnt', '(200)'], {'opsz': 'g.e', 'wdth': '(1 - g.e)'}), '(fnt, 200, opsz=g.e, wdth=1 - g.e)\n', (2895, 2929), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((565, 611), 'coldtype.text.composer.Style', 'Style', (['font_path', '(100)'], {'wdth': '(0)', 'kern': 'kern', 'ro': '(1)'}), '(font_path, 100, wdth=0, kern=kern, ro=1)\n', (570, 611), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((652, 702), 'coldtype.text.composer.Style', 'Style', (['font_path', '(100)'], {'wdth': '(0)', 'kern': '(not kern)', 'ro': '(1)'}), '(font_path, 100, wdth=0, kern=not kern, ro=1)\n', (657, 702), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((2037, 2071), 'coldtype.text.composer.Style', 'Style', (['clarette', '(200)'], {'wdth': '(g.i / 3)'}), '(clarette, 200, wdth=g.i / 3)\n', (2042, 2071), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((2946, 2981), 'coldtype.text.composer.Glyphwise', 'Glyphwise', (['"""FATFACE 2 ARG"""', 'print_e'], {}), "('FATFACE 2 ARG', print_e)\n", (2955, 2981), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((2716, 2741), 'coldtype.text.composer.Style', 'Style', (['fnt', '(200)'], {'wdth': 'g.e'}), '(fnt, 200, wdth=g.e)\n', (2721, 2741), False, 'from coldtype.text.composer import StSt, Glyphwise, Style, Font\n'), ((1156, 1164), 'coldtype.color.hsl', 'hsl', (['(0.9)'], {}), '(0.9)\n', (1159, 1164), False, 'from coldtype.color import hsl\n')] |
#!/usr/local/bin/python3
import os
import sys
from PIL import Image
import re
# Converts multiple video files to images in corresponding individual folder:
# - dir
# - images
# - frame_000001.PNG
# - frame_000002.PNG
#
#
def video_to_images(dir):
dir = os.path.join(dir, 'images')
for filename in sorted(os.listdir(dir)):
path = os.path.join(dir, filename)
if os.path.isfile(path):
prefix_match = re.search('(frame_\\d+)\\.PNG', filename, re.IGNORECASE)
if prefix_match:
print("converting " + filename + "...")
out_name = prefix_match.group(1)
jpg_path = os.path.join(dir, out_name + '.jpg')
im = Image.open(path)
rgb_im = im.convert('RGB')
rgb_im.save(jpg_path)
os.remove(path)
if __name__ == '__main__':
dir = sys.argv[1]
video_to_images(dir)
| [
"os.listdir",
"PIL.Image.open",
"os.path.join",
"os.path.isfile",
"os.remove",
"re.search"
] | [((278, 305), 'os.path.join', 'os.path.join', (['dir', '"""images"""'], {}), "(dir, 'images')\n", (290, 305), False, 'import os\n'), ((333, 348), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (343, 348), False, 'import os\n'), ((366, 393), 'os.path.join', 'os.path.join', (['dir', 'filename'], {}), '(dir, filename)\n', (378, 393), False, 'import os\n'), ((405, 425), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (419, 425), False, 'import os\n'), ((454, 510), 're.search', 're.search', (['"""(frame_\\\\d+)\\\\.PNG"""', 'filename', 're.IGNORECASE'], {}), "('(frame_\\\\d+)\\\\.PNG', filename, re.IGNORECASE)\n", (463, 510), False, 'import re\n'), ((672, 708), 'os.path.join', 'os.path.join', (['dir', "(out_name + '.jpg')"], {}), "(dir, out_name + '.jpg')\n", (684, 708), False, 'import os\n'), ((730, 746), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (740, 746), False, 'from PIL import Image\n'), ((844, 859), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (853, 859), False, 'import os\n')] |
import numpy as np
import pandas as pd
import tensorflow as tf
tfd = tf.contrib.distributions
def create_german_datasets(batch=64):
def gather_labels(df):
labels = []
for j in range(df.shape[1]):
if type(df[0, j]) is str:
labels.append(np.unique(df[:, j]).tolist())
else:
labels.append(np.median(df[:, j]))
return labels
def transform_to_binary(df, labels):
d = np.zeros([df.shape[0], 58])
u = np.zeros([df.shape[0], 1])
y = np.zeros([df.shape[0], 1])
idx = 0
for j in range(len(labels)):
if type(labels[j]) is list:
if len(labels[j]) > 2:
for i in range(df.shape[0]):
d[i, idx + int(labels[j].index(df[i, j]))] = 1
idx += len(labels[j])
else:
for i in range(df.shape[0]):
d[i, idx] = int(labels[j].index(df[i, j]))
idx += 1
else:
if j != 12 and j != len(labels) - 1:
for i in range(df.shape[0]):
d[i, idx] = float(df[i, j] > labels[j])
idx += 1
elif j == len(labels) - 1:
for i in range(df.shape[0]):
y[i] = float(df[i, j] > labels[j])
else:
for i in range(df.shape[0]):
u[i] = float(df[i, j] > labels[j])
return d.astype(np.bool), u.astype(np.bool), y.astype(np.bool) # observation, protected, label
d = pd.read_csv('german.data.txt', header=None, sep=' ').as_matrix()
t = pd.read_csv('german.data.txt', header=None, sep=' ').as_matrix()
labels = gather_labels(d)
ds = transform_to_binary(d, labels)
ts = transform_to_binary(t, labels)
idx = np.arange(d.shape[0])
np.random.seed(4)
np.random.shuffle(idx)
cf = int(d.shape[0] * 0.9)
german = tuple([a[idx[:cf]].astype(np.float32) for a in ds])
german_test = tuple([a[idx[:cf]].astype(np.float32) for a in ts])
train = tf.data.Dataset.from_tensor_slices(german).shuffle(800).batch(batch)
test = tf.data.Dataset.from_tensor_slices(german_test).batch(batch)
pu = tfd.Bernoulli(probs=np.mean(german[1]))
return train, test, pu | [
"numpy.mean",
"numpy.median",
"numpy.unique",
"pandas.read_csv",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.zeros",
"numpy.random.seed",
"numpy.arange",
"numpy.random.shuffle"
] | [((1895, 1916), 'numpy.arange', 'np.arange', (['d.shape[0]'], {}), '(d.shape[0])\n', (1904, 1916), True, 'import numpy as np\n'), ((1921, 1938), 'numpy.random.seed', 'np.random.seed', (['(4)'], {}), '(4)\n', (1935, 1938), True, 'import numpy as np\n'), ((1943, 1965), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1960, 1965), True, 'import numpy as np\n'), ((462, 489), 'numpy.zeros', 'np.zeros', (['[df.shape[0], 58]'], {}), '([df.shape[0], 58])\n', (470, 489), True, 'import numpy as np\n'), ((502, 528), 'numpy.zeros', 'np.zeros', (['[df.shape[0], 1]'], {}), '([df.shape[0], 1])\n', (510, 528), True, 'import numpy as np\n'), ((541, 567), 'numpy.zeros', 'np.zeros', (['[df.shape[0], 1]'], {}), '([df.shape[0], 1])\n', (549, 567), True, 'import numpy as np\n'), ((1636, 1688), 'pandas.read_csv', 'pd.read_csv', (['"""german.data.txt"""'], {'header': 'None', 'sep': '""" """'}), "('german.data.txt', header=None, sep=' ')\n", (1647, 1688), True, 'import pandas as pd\n'), ((1709, 1761), 'pandas.read_csv', 'pd.read_csv', (['"""german.data.txt"""'], {'header': 'None', 'sep': '""" """'}), "('german.data.txt', header=None, sep=' ')\n", (1720, 1761), True, 'import pandas as pd\n'), ((2225, 2272), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['german_test'], {}), '(german_test)\n', (2259, 2272), True, 'import tensorflow as tf\n'), ((2315, 2333), 'numpy.mean', 'np.mean', (['german[1]'], {}), '(german[1])\n', (2322, 2333), True, 'import numpy as np\n'), ((365, 384), 'numpy.median', 'np.median', (['df[:, j]'], {}), '(df[:, j])\n', (374, 384), True, 'import numpy as np\n'), ((2145, 2187), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['german'], {}), '(german)\n', (2179, 2187), True, 'import tensorflow as tf\n'), ((287, 306), 'numpy.unique', 'np.unique', (['df[:, j]'], {}), '(df[:, j])\n', (296, 306), True, 'import numpy as np\n')] |
# coding=utf-8
from tqdm import tqdm
from base.base_train import BaseTrain
from utils.signal_process import plot_alignment
class TacotronTrainer(BaseTrain):
def __init__(self, sess, model, data_loader, config, logger):
super(TacotronTrainer, self).__init__(sess, model, data_loader, config, logger)
def train_epoch(self):
"""
Implement the logic of epoch:
-loop on the number of iterations in the config and call the train step
-add any summaries you want using the summary
"""
loop = tqdm(range(self.config.iter_per_epoch))
for _ in loop:
loss, mel_loss, mag_loss, lr, mel_gt, mel_hat, mag_gt, mag_hat = self.train_step()
cur_it = self.model.global_step_tensor.eval(self.sess)
self.logger.summarize(cur_it, scope=self.config.mode, summaries_dict={'loss': loss,
'mel_loss': mel_loss,
'mag_loss': mag_loss,
'lr': lr,
})
self.logger.summarize_static_img(scope=self.config.mode, summaries_dict={'mel_gt': mel_gt,
'mel_hat': mel_hat,
'mag_gt': mag_gt,
'mag_hat': mag_hat})
# plot attention alignments
if cur_it % 100 == 0:
# plot the first alignments for logging
al = self.sess.run(self.model.alignments)
plot_alignment(al[0], cur_it, self.config.align_dir)
self.model.save(self.sess)
def train_step(self):
"""
Implement the logic of the train step
- run the tf.Session
- return any metrics you need to summarize
"""
_, loss, mel_loss, mag_loss, lr, mel_gt, mel_hat, mag_gt, mag_hat = self.sess.run([self.model.train_op,
self.model.loss,
self.model.mel_loss,
self.model.mag_loss,
self.model.lr,
self.model.mel_gt,
self.model.mel_hat,
self.model.mag_gt,
self.model.mag_hat,
])
return loss, mel_loss, mag_loss, lr, mel_gt, mel_hat, mag_gt, mag_hat
| [
"utils.signal_process.plot_alignment"
] | [((1881, 1933), 'utils.signal_process.plot_alignment', 'plot_alignment', (['al[0]', 'cur_it', 'self.config.align_dir'], {}), '(al[0], cur_it, self.config.align_dir)\n', (1895, 1933), False, 'from utils.signal_process import plot_alignment\n')] |
from steem import Steem
from datetime import datetime, date, timedelta
from math import ceil, log, isnan
import requests
API = 'https://api.steemjs.com/'
def tag_filter(tag, limit = 10):
tag_search = Steem()
tag_query = {
"tag":tag,
"limit": limit
}
tag_filters = tag_search.get_discussions_by_created(tag_query)
#yesterday_post = []
#import ipdb; ipdb.set_trace()
# for _tag in tag_filters:
# _create_time = datetime.strptime(_tag['created'], '%Y-%m-%dT%H:%M:%S')
# _yersterday = date.today() - timedelta(1)
# if _yersterday.day == _create_time.day:
# yesterday_post.append(_tag)
return tag_filters
def get_vp_rp(steemit_name):
url = '{}get_accounts?names[]=%5B%22{}%22%5D'.format(API, steemit_name)
data = requests.get(url).json()[0]
vp = data['voting_power']
_reputation = data['reputation']
_reputation = int(_reputation)
rep = str(_reputation)
neg = True if rep[0] == '-' else False
rep = rep[1:] if neg else rep
srt = rep
leadingDigits = int(srt[0:4])
log_n = log(leadingDigits / log(10), 2.71828)
n = len(srt) - 1
out = n + (log_n - int(log_n))
if isnan(out): out = 0
out = max(out - 9, 0)
out = (-1 * out) if neg else (1 * out)
out = out * 9 + 25
out = int(out)
return [ceil(vp / 100), out] | [
"math.ceil",
"requests.get",
"math.log",
"steem.Steem",
"math.isnan"
] | [((206, 213), 'steem.Steem', 'Steem', ([], {}), '()\n', (211, 213), False, 'from steem import Steem\n'), ((1214, 1224), 'math.isnan', 'isnan', (['out'], {}), '(out)\n', (1219, 1224), False, 'from math import ceil, log, isnan\n'), ((1358, 1372), 'math.ceil', 'ceil', (['(vp / 100)'], {}), '(vp / 100)\n', (1362, 1372), False, 'from math import ceil, log, isnan\n'), ((1132, 1139), 'math.log', 'log', (['(10)'], {}), '(10)\n', (1135, 1139), False, 'from math import ceil, log, isnan\n'), ((817, 834), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (829, 834), False, 'import requests\n')] |
from typing import Union
import flask_restx
import flask
from keepachangelog._changelog import to_dict
def add_changelog_endpoint(
namespace: Union[flask_restx.Namespace, flask_restx.Api], changelog_path: str
):
"""
Create /changelog: Changelog endpoint parsing https://keepachangelog.com/en/1.0.0/
:param namespace: The Flask-RestX namespace.
:param changelog_path: Path to CHANGELOG.md.
"""
@namespace.route("/changelog")
@namespace.doc(
responses={
200: (
"Service changelog.",
[
namespace.model(
"ChangelogReleaseModel",
{
"metadata": namespace.model(
"ChangelogReleaseMetaDataModel",
{
"version": flask_restx.fields.String(
description="Release version following semantic versioning.",
required=True,
example="3.12.5",
),
"release_date": flask_restx.fields.Date(
description="Release date.",
required=True,
example="2019-12-31",
),
},
),
"added": flask_restx.fields.List(
flask_restx.fields.String(description="New features.")
),
"changed": flask_restx.fields.List(
flask_restx.fields.String(
description="Changes in existing functionaliy."
)
),
"deprecated": flask_restx.fields.List(
flask_restx.fields.String(
description="Soon-to-be removed features."
)
),
"removed": flask_restx.fields.List(
flask_restx.fields.String(
description="Removed features."
)
),
"fixed": flask_restx.fields.List(
flask_restx.fields.String(description="Any bug fixes.")
),
"security": flask_restx.fields.List(
flask_restx.fields.String(
description="Vulnerabilities."
)
),
},
)
],
)
}
)
class Changelog(flask_restx.Resource):
def get(self):
"""
Retrieve service changelog.
"""
try:
return flask.jsonify(to_dict(changelog_path))
except FileNotFoundError:
return flask.jsonify({})
| [
"flask_restx.fields.Date",
"keepachangelog._changelog.to_dict",
"flask_restx.fields.String",
"flask.jsonify"
] | [((3242, 3265), 'keepachangelog._changelog.to_dict', 'to_dict', (['changelog_path'], {}), '(changelog_path)\n', (3249, 3265), False, 'from keepachangelog._changelog import to_dict\n'), ((3328, 3345), 'flask.jsonify', 'flask.jsonify', (['{}'], {}), '({})\n', (3341, 3345), False, 'import flask\n'), ((1631, 1685), 'flask_restx.fields.String', 'flask_restx.fields.String', ([], {'description': '"""New features."""'}), "(description='New features.')\n", (1656, 1685), False, 'import flask_restx\n'), ((1813, 1887), 'flask_restx.fields.String', 'flask_restx.fields.String', ([], {'description': '"""Changes in existing functionaliy."""'}), "(description='Changes in existing functionaliy.')\n", (1838, 1887), False, 'import flask_restx\n'), ((2088, 2157), 'flask_restx.fields.String', 'flask_restx.fields.String', ([], {'description': '"""Soon-to-be removed features."""'}), "(description='Soon-to-be removed features.')\n", (2113, 2157), False, 'import flask_restx\n'), ((2355, 2413), 'flask_restx.fields.String', 'flask_restx.fields.String', ([], {'description': '"""Removed features."""'}), "(description='Removed features.')\n", (2380, 2413), False, 'import flask_restx\n'), ((2609, 2664), 'flask_restx.fields.String', 'flask_restx.fields.String', ([], {'description': '"""Any bug fixes."""'}), "(description='Any bug fixes.')\n", (2634, 2664), False, 'import flask_restx\n'), ((2793, 2850), 'flask_restx.fields.String', 'flask_restx.fields.String', ([], {'description': '"""Vulnerabilities."""'}), "(description='Vulnerabilities.')\n", (2818, 2850), False, 'import flask_restx\n'), ((888, 1017), 'flask_restx.fields.String', 'flask_restx.fields.String', ([], {'description': '"""Release version following semantic versioning."""', 'required': '(True)', 'example': '"""3.12.5"""'}), "(description=\n 'Release version following semantic versioning.', required=True,\n example='3.12.5')\n", (913, 1017), False, 'import flask_restx\n'), ((1221, 1315), 'flask_restx.fields.Date', 'flask_restx.fields.Date', ([], {'description': '"""Release date."""', 'required': '(True)', 'example': '"""2019-12-31"""'}), "(description='Release date.', required=True, example\n ='2019-12-31')\n", (1244, 1315), False, 'import flask_restx\n')] |
import bisect
import math
import operator
from datetime import timedelta
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.font_manager as font_manager
import matplotlib.patheffects as patheffects
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from matplotlib.offsetbox import AnchoredText
from historical_hrrr import historical_hrrr_snow
from nohrsc_plotting import nohrsc_snow
from plot_cities import get_cities
for font in font_manager.findSystemFonts(["."]):
font_manager.fontManager.addfont(font)
# Set font family globally
matplotlib.rcParams['font.family'] = 'Inter'
DIFF = 0.2
OPP_DIFF = (0.2, 0.2)
ZOOM_LEVEL = 1
LONLAT = (-89.3866, 43.07295)
GO_OUT_LONLAT = (3, 1.75)
if LONLAT:
extent = (
LONLAT[0] - GO_OUT_LONLAT[0], LONLAT[0] + GO_OUT_LONLAT[0],
LONLAT[1] - GO_OUT_LONLAT[1], LONLAT[1] + GO_OUT_LONLAT[1]
)
else:
extent = (-109.291992, -101.887207, 36.862043, 41.393294)
extent_lim = (extent[0] - DIFF, extent[1] + DIFF, extent[2] - DIFF, extent[3] + DIFF)
extent_opp = (extent[0] + OPP_DIFF[0], extent[1] - OPP_DIFF[0], extent[2] + OPP_DIFF[1], extent[3] - OPP_DIFF[1])
lons_extent = extent[:2]
lats_extent = extent[2:]
fig: plt.Figure = plt.figure(figsize=(12, 6))
ax: plt.Axes = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
ax.set_extent(extent)
ax.add_feature(cfeature.LAND.with_scale("50m"))
ax.add_feature(cfeature.OCEAN.with_scale("50m"), zorder=100)
ax.add_feature(cfeature.STATES.with_scale("50m"), lw=1.25, zorder=200)
all_cities = get_cities(extent_opp, spacing_lat=0.5, spacing_long=0.5, min_pop=10000)
lons_n, lats_n, snow_n, date, accum_time = nohrsc_snow(extent_lim)
coords = historical_hrrr_snow(date, extent_lim, accum_time, goes_out=24, occ=2)
all_keys = [*coords.keys()]
def distance(tup, lon_, lat_):
return (abs(tup[0] - lon_) ** 2 + abs(tup[1] - lat_) ** 2) ** 0.5
def regrid_hrrr(use_closest=False, target=0.25):
snow_h = []
for lat in lats_n:
snow_h.append([])
lat = round(lat, 2)
for lon in lons_n:
lon = round(lon, 2)
try:
snow_h[-1].append(coords[(lon, lat)])
except KeyError:
if use_closest:
idx = bisect.bisect_left(all_keys, (lon, lat))
dists = ((distance(tup, lon, lat), tup) for tup in all_keys[idx:])
for dist in dists:
if dist[0] <= target:
closest = dist[1]
break
else:
closest = all_keys[bisect.bisect_left(all_keys, (lon, lat))]
snow_h[-1].append(coords[closest])
snow_h = np.array(snow_h)
return snow_h
snow_h = regrid_hrrr(use_closest=True, target=0.1)
diff_snow = snow_n - snow_h
diff_snow[np.isnan(diff_snow)] = 0
diff_snow = gaussian_filter(diff_snow, ZOOM_LEVEL)
diff_snow[np.where(diff_snow >= 4.75)] = 4.75
diff_snow[np.where(diff_snow < -5)] = -5
if diff_snow.max() < 4.75 and diff_snow.min() > -5:
abs_min, abs_max = abs(diff_snow.min()), abs(diff_snow.max())
if abs_min > abs_max:
levels = np.arange(math.floor(diff_snow.min()), -math.floor(diff_snow.min()), 0.25)
else:
levels = np.arange(-math.ceil(diff_snow.max()), math.ceil(diff_snow.max()), 0.25)
else:
levels = np.arange(-5, 5, 0.25)
levels_c = np.arange(-5, 5.01, 1)
cmap = cm.get_cmap("coolwarm_r")
cmap_c = cm.get_cmap("viridis")
norm = colors.BoundaryNorm(levels, cmap.N)
norm_c = colors.BoundaryNorm(levels_c, cmap_c.N)
# These colormaps are used for debugging to see individual snow
levels_s = [0.1, 1, 2, 3, 4, 6, 8, 12, 16, 20, 24, 30, 36, 48, 60, 72]
cmap_s = colors.ListedColormap(
[
'#bdd7e7', '#6baed6', '#3182bd', '#08519c', '#082694', '#ffff96',
'#ffc400', '#ff8700', '#db1400', '#9e0000', '#690000', '#ccccff',
'#9f8cd8', '#7c52a5', '#561c72', '#40dfff'
]
)
norm_s = colors.BoundaryNorm(levels_s, cmap_s.N)
# C = ax.contourf(
# gaussian_filter(lons_n, ZOOM_LEVEL), gaussian_filter(lats_n, ZOOM_LEVEL), diff_snow, levels,
# cmap=cmap, norm=norm, transform=ccrs.PlateCarree(), antialiased=True, alpha=0.75
# )
C = ax.contourf(
lons_n, lats_n, snow_n, levels_s,
cmap=cmap_s, norm=norm_s, alpha=0.5, transform=ccrs.PlateCarree(), antialiased=True
)
# CS = ax.contour(
# gaussian_filter(lons_n, ZOOM_LEVEL), gaussian_filter(lats_n, ZOOM_LEVEL), diff_snow,
# levels=levels_c, cmap=cmap_c, norm=norm_c, transform=ccrs.PlateCarree()
# )
# ax.clabel(
# CS, levels_c,
# fmt=lambda amt: f"{'-' if amt < 0 else ('+' if amt > 0 else '')}{amt:.0f}\"", inline=True, fontsize=10
# )
# Add all cities to map
for city, (lon, lat) in all_cities:
txt = ax.text(
lon, lat, city,
fontdict={"size": 10, "color": "black"}, horizontalalignment="center",
verticalalignment="center", transform=ccrs.PlateCarree(), zorder=350
)
txt.set_path_effects([patheffects.withStroke(linewidth=2, foreground="white")])
fig.colorbar(
C,
label="Difference Between Total Snow and Forecasted Snow (in.)",
extend="max"
)
ax.set_title(
f"Bust or Boom?: from {(date - timedelta(hours=accum_time)).strftime('%B %d, %Y')} to {date.strftime('%B %d, %Y')}",
fontweight="bold"
)
ax.add_artist(
AnchoredText(
"Made by @AtlanticWx",
loc="lower right",
prop={"size": 10},
frameon=True,
zorder=300
)
)
plt.show()
| [
"scipy.ndimage.filters.gaussian_filter",
"numpy.array",
"datetime.timedelta",
"matplotlib.patheffects.withStroke",
"historical_hrrr.historical_hrrr_snow",
"numpy.arange",
"numpy.where",
"nohrsc_plotting.nohrsc_snow",
"plot_cities.get_cities",
"matplotlib.colors.ListedColormap",
"matplotlib.offse... | [((593, 628), 'matplotlib.font_manager.findSystemFonts', 'font_manager.findSystemFonts', (["['.']"], {}), "(['.'])\n", (621, 628), True, 'import matplotlib.font_manager as font_manager\n'), ((1358, 1385), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1368, 1385), True, 'import matplotlib.pyplot as plt\n'), ((1675, 1747), 'plot_cities.get_cities', 'get_cities', (['extent_opp'], {'spacing_lat': '(0.5)', 'spacing_long': '(0.5)', 'min_pop': '(10000)'}), '(extent_opp, spacing_lat=0.5, spacing_long=0.5, min_pop=10000)\n', (1685, 1747), False, 'from plot_cities import get_cities\n'), ((1792, 1815), 'nohrsc_plotting.nohrsc_snow', 'nohrsc_snow', (['extent_lim'], {}), '(extent_lim)\n', (1803, 1815), False, 'from nohrsc_plotting import nohrsc_snow\n'), ((1825, 1895), 'historical_hrrr.historical_hrrr_snow', 'historical_hrrr_snow', (['date', 'extent_lim', 'accum_time'], {'goes_out': '(24)', 'occ': '(2)'}), '(date, extent_lim, accum_time, goes_out=24, occ=2)\n', (1845, 1895), False, 'from historical_hrrr import historical_hrrr_snow\n'), ((3016, 3054), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['diff_snow', 'ZOOM_LEVEL'], {}), '(diff_snow, ZOOM_LEVEL)\n', (3031, 3054), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((3534, 3556), 'numpy.arange', 'np.arange', (['(-5)', '(5.01)', '(1)'], {}), '(-5, 5.01, 1)\n', (3543, 3556), True, 'import numpy as np\n'), ((3565, 3590), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""coolwarm_r"""'], {}), "('coolwarm_r')\n", (3576, 3590), True, 'import matplotlib.cm as cm\n'), ((3600, 3622), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (3611, 3622), True, 'import matplotlib.cm as cm\n'), ((3631, 3666), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['levels', 'cmap.N'], {}), '(levels, cmap.N)\n', (3650, 3666), True, 'import matplotlib.colors as colors\n'), ((3676, 3715), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['levels_c', 'cmap_c.N'], {}), '(levels_c, cmap_c.N)\n', (3695, 3715), True, 'import matplotlib.colors as colors\n'), ((3861, 4068), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (["['#bdd7e7', '#6baed6', '#3182bd', '#08519c', '#082694', '#ffff96',\n '#ffc400', '#ff8700', '#db1400', '#9e0000', '#690000', '#ccccff',\n '#9f8cd8', '#7c52a5', '#561c72', '#40dfff']"], {}), "(['#bdd7e7', '#6baed6', '#3182bd', '#08519c',\n '#082694', '#ffff96', '#ffc400', '#ff8700', '#db1400', '#9e0000',\n '#690000', '#ccccff', '#9f8cd8', '#7c52a5', '#561c72', '#40dfff'])\n", (3882, 4068), True, 'import matplotlib.colors as colors\n'), ((4106, 4145), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['levels_s', 'cmap_s.N'], {}), '(levels_s, cmap_s.N)\n', (4125, 4145), True, 'import matplotlib.colors as colors\n'), ((5625, 5635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5633, 5635), True, 'import matplotlib.pyplot as plt\n'), ((634, 672), 'matplotlib.font_manager.fontManager.addfont', 'font_manager.fontManager.addfont', (['font'], {}), '(font)\n', (666, 672), True, 'import matplotlib.font_manager as font_manager\n'), ((1496, 1527), 'cartopy.feature.LAND.with_scale', 'cfeature.LAND.with_scale', (['"""50m"""'], {}), "('50m')\n", (1520, 1527), True, 'import cartopy.feature as cfeature\n'), ((1544, 1576), 'cartopy.feature.OCEAN.with_scale', 'cfeature.OCEAN.with_scale', (['"""50m"""'], {}), "('50m')\n", (1569, 1576), True, 'import cartopy.feature as cfeature\n'), ((1605, 1638), 'cartopy.feature.STATES.with_scale', 'cfeature.STATES.with_scale', (['"""50m"""'], {}), "('50m')\n", (1631, 1638), True, 'import cartopy.feature as cfeature\n'), ((2853, 2869), 'numpy.array', 'np.array', (['snow_h'], {}), '(snow_h)\n', (2861, 2869), True, 'import numpy as np\n'), ((2979, 2998), 'numpy.isnan', 'np.isnan', (['diff_snow'], {}), '(diff_snow)\n', (2987, 2998), True, 'import numpy as np\n'), ((3066, 3093), 'numpy.where', 'np.where', (['(diff_snow >= 4.75)'], {}), '(diff_snow >= 4.75)\n', (3074, 3093), True, 'import numpy as np\n'), ((3112, 3136), 'numpy.where', 'np.where', (['(diff_snow < -5)'], {}), '(diff_snow < -5)\n', (3120, 3136), True, 'import numpy as np\n'), ((3499, 3521), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.25)'], {}), '(-5, 5, 0.25)\n', (3508, 3521), True, 'import numpy as np\n'), ((5477, 5580), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (['"""Made by @AtlanticWx"""'], {'loc': '"""lower right"""', 'prop': "{'size': 10}", 'frameon': '(True)', 'zorder': '(300)'}), "('Made by @AtlanticWx', loc='lower right', prop={'size': 10},\n frameon=True, zorder=300)\n", (5489, 5580), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((1437, 1455), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1453, 1455), True, 'import cartopy.crs as ccrs\n'), ((4462, 4480), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4478, 4480), True, 'import cartopy.crs as ccrs\n'), ((5068, 5086), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5084, 5086), True, 'import cartopy.crs as ccrs\n'), ((5131, 5186), 'matplotlib.patheffects.withStroke', 'patheffects.withStroke', ([], {'linewidth': '(2)', 'foreground': '"""white"""'}), "(linewidth=2, foreground='white')\n", (5153, 5186), True, 'import matplotlib.patheffects as patheffects\n'), ((2390, 2430), 'bisect.bisect_left', 'bisect.bisect_left', (['all_keys', '(lon, lat)'], {}), '(all_keys, (lon, lat))\n', (2408, 2430), False, 'import bisect\n'), ((5348, 5375), 'datetime.timedelta', 'timedelta', ([], {'hours': 'accum_time'}), '(hours=accum_time)\n', (5357, 5375), False, 'from datetime import timedelta\n'), ((2745, 2785), 'bisect.bisect_left', 'bisect.bisect_left', (['all_keys', '(lon, lat)'], {}), '(all_keys, (lon, lat))\n', (2763, 2785), False, 'import bisect\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Scheduler Resource"""
from hiispider.resources.base import BaseResource
class SchedulerResource(BaseResource):
isLeaf = True
def __init__(self, schedulerserver):
self.schedulerserver = schedulerserver
BaseResource.__init__(self)
def render(self, request):
request.setHeader('Content-type', 'text/javascript; charset=UTF-8')
return "{}"
| [
"hiispider.resources.base.BaseResource.__init__"
] | [((279, 306), 'hiispider.resources.base.BaseResource.__init__', 'BaseResource.__init__', (['self'], {}), '(self)\n', (300, 306), False, 'from hiispider.resources.base import BaseResource\n')] |
# -*- coding: utf-8 -*-
from sdgen._configurable_mixin import ConfigurableMixin
from sdgen.utils import helpers
class Field(ConfigurableMixin):
"""Abstract base *field* class.
Each field represents simple or complex graphical element, for example:
:class:`sdgen.fields.character.Character` or
:class:`sdgen.fields.simple_arrow.SimpleArrow` described with set of
params, like padding, width, font_size, etc.
Fields are created by views (you can find them in :module:`sdgen.views`)
with all needed parameters and then called to render by specific *Builder*.
"""
marked = False # field can be highlighted, default: False
def __init__(self, *args, **kwargs):
"""
All frontend-configuration (such as font_color and so on) can be
prefixed by marked_, which will be used when field/view is marked
(marked=True).
:param marked: True, if field should be marked.
:type marked: bool.
"""
super(Field, self).__init__(*args, **kwargs)
def __getattribute__(self, attrname):
"""
When self.marked is set to True, then try to return self.marked_{name}
if setted, else returns value assigned to passed attribute name
"""
if attrname not in ['marked']:
marked_name = "_".join(("marked", attrname))
if getattr(self, "marked", False) and hasattr(self, marked_name):
return object.__getattribute__(self, marked_name)
return object.__getattribute__(self, attrname)
def to_png(self):
"""Render field in png
This method should be overwritted in derived classes to render png
image of field.
"""
raise NotImplementedError()
def to_svg(self):
"""Render field in svg.
This method should be overwritted in derived classes to render svg
image of field.
"""
raise NotImplementedError()
def pt_to_px(self, points):
"""Mapping :function:`pt_to_px` from :mod:`sdgen.lib.helpers`.
Result of this method can be modified by setting antialiasing decorator
on class.
"""
return helpers.pt_to_px(points)
def px_to_pt(self, points):
"""Mapping :function:`px_to_pt` from :mod:`sdgen.lib.helpers`.
Result of this method can be modified by setting antialiasing decorator
on class.
"""
return helpers.px_to_pt(points)
| [
"sdgen.utils.helpers.pt_to_px",
"sdgen.utils.helpers.px_to_pt"
] | [((2216, 2240), 'sdgen.utils.helpers.pt_to_px', 'helpers.pt_to_px', (['points'], {}), '(points)\n', (2232, 2240), False, 'from sdgen.utils import helpers\n'), ((2479, 2503), 'sdgen.utils.helpers.px_to_pt', 'helpers.px_to_pt', (['points'], {}), '(points)\n', (2495, 2503), False, 'from sdgen.utils import helpers\n')] |
import tensorflow as tf
import numpy as np
import os
import pickle
from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell
from utils import functions, regularization, helpers, pretty_print
import argparse
def main(results_dir='results/sho/test', trials=1, learning_rate=1e-2, reg_weight=2e-4, timesteps=25, batch_size=129,
n_epochs1=2001, n_epochs2=5001, n_epochs3=5001):
# Hyperparameters
summary_step = 500
timesteps0 = 1
primitive_funcs = [
*[functions.Constant()] * 2,
*[functions.Identity()] * 4,
*[functions.Square()] * 4,
*[functions.Sin()] * 2,
*[functions.Exp()] * 2,
*[functions.Sigmoid()] * 2,
*[functions.Product(norm=0.1)] * 2,
]
# Import parabola data
data = np.load('dataset/sho.npz')
x_d = np.asarray(data["x_d"])
x_v = np.asarray(data["x_v"])
y_d = np.asarray(data["y_d"])
y_v = np.asarray(data["y_v"])
omega2_data = data["omega2"]
N = data["N"]
# Prepare data
x = np.stack((x_d, x_v), axis=2) # Shape (N, NT, 2)
y0 = np.stack((y_d[:, 0], y_v[:, 0]), axis=1) # Initial conditions for prediction y, fed into propagator
y_data = np.stack((y_d[:, 1:timesteps + 1], y_v[:, 1:timesteps + 1]), axis=2) # shape(NG, LENGTH, 2)
# Tensorflow placeholders for x, y0, y
x_input = tf.placeholder(shape=(None, x.shape[1], x.shape[2]), dtype=tf.float32, name="enc_input")
y0_input = tf.placeholder(shape=(None, 2), dtype=tf.float32, name="prop_input") # input is d, v
y_input = tf.placeholder(shape=(None, timesteps, 2), dtype=tf.float32, name="label_input")
length_input = tf.placeholder(dtype=tf.int32, shape=())
# Dynamics encoder
encoder = helpers.Encoder()
training = tf.placeholder_with_default(False, [])
z = encoder(x_input, training=training)
z_data = omega2_data[:, np.newaxis]
# Propagating decoders
prop_d = SymbolicNet(2, funcs=primitive_funcs)
prop_v = SymbolicNet(2, funcs=primitive_funcs)
prop_d.build(4)
prop_v.build(4)
# Building recurrent structure
rnn = tf.keras.layers.RNN(SymbolicCell(prop_d, prop_v), return_sequences=True)
y0_rnn = tf.concat([tf.expand_dims(y0_input, axis=1), tf.zeros((tf.shape(y0_input)[0], length_input - 1, 2))],
axis=1)
prop_input = tf.concat([y0_rnn, tf.keras.backend.repeat(z, length_input),
tf.ones((tf.shape(y0_input)[0], length_input, 1))], axis=2)
prop_output = rnn(prop_input)
epoch = tf.placeholder(tf.float32)
reg_freq = np.pi / (n_epochs1 + n_epochs2) / 1.1
reg_loss = tf.sin(reg_freq * epoch) ** 2 * regularization.l12_smooth(prop_d.get_weights()) + \
tf.sin(reg_freq * epoch) ** 2 * regularization.l12_smooth(prop_v.get_weights())
# reg_loss = regularization.l12_smooth(prop_d.get_weights()) + regularization.l12_smooth(prop_v.get_weights())
# Training
learning_rate_ph = tf.placeholder(tf.float32)
opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate_ph)
reg_weight_ph = tf.placeholder(tf.float32)
error = tf.losses.mean_squared_error(labels=y_input[:, :length_input, :], predictions=prop_output)
loss = error + reg_weight_ph * reg_loss
train = tf.group([opt.minimize(loss), encoder.bn.updates])
batch = helpers.batch_generator([x, y_data, y0, z_data], N=N, batch_size=batch_size)
# Training session
with tf.Session() as sess:
for _ in range(trials):
loss_i = np.nan
while np.isnan(loss_i):
loss_list = []
error_list = []
reg_list = []
sess.run(tf.global_variables_initializer())
for i in range(n_epochs1 + n_epochs2):
if i < n_epochs1:
reg_weight_i = reg_weight / 5
learning_rate_i = learning_rate
length_i = min(i // 500 * 2 + timesteps0, timesteps)
else:
reg_weight_i = reg_weight
learning_rate_i = learning_rate / 5
length_i = timesteps
x_batch, y_batch, y0_batch, z_batch = next(batch)
feed_dict = {x_input: x_batch, y0_input: y0_batch, y_input: y_batch,
epoch: i, learning_rate_ph: learning_rate_i, training: True,
reg_weight_ph: reg_weight_i, length_input: length_i}
_ = sess.run(train, feed_dict=feed_dict)
if i % summary_step == 0 or i == n_epochs1 - 1:
feed_dict[training] = False
loss_i, error_i, reg_i = sess.run((loss, error, reg_loss), feed_dict=feed_dict)
z_arr = sess.run(z, feed_dict=feed_dict)
r = np.corrcoef(z_batch[:, 0], z_arr[:, 0])[1, 0]
loss_list.append(loss_i)
error_list.append(error_i)
reg_list.append(reg_i)
print("Epoch %d\tTotal loss: %f\tError: %f\tReg loss: %f\tCorrelation: %f"
% (i, loss_i, error_i, reg_i, r))
if np.isnan(loss_i):
break
# Setting small weights to 0 and freezing them
prop_d_masked = MaskedSymbolicNet(sess, prop_d, threshold=0.01)
prop_v_masked = MaskedSymbolicNet(sess, prop_v, threshold=0.01)
# Keep track of currently existing variables. When we rebuild the rnn, it makes new variables that we need
# to initialize. Later, we will use this to figure out what the uninitialized variables are.
temp = set(tf.global_variables())
# Rebuilding the decoding propagator. Remove regularization
rnn = tf.keras.layers.RNN(SymbolicCell(prop_d_masked, prop_v_masked), return_sequences=True)
prop_output = rnn(prop_input)
loss = tf.losses.mean_squared_error(labels=y_input[:, :length_input, :], predictions=prop_output)
train = tf.group([opt.minimize(loss), encoder.bn.updates])
weights_d = sess.run(prop_d_masked.get_weights())
expr_d = pretty_print.network(weights_d, primitive_funcs, ["d", "v", "z", 1])
print(expr_d)
weights_v = sess.run(prop_v_masked.get_weights())
expr_v = pretty_print.network(weights_v, primitive_funcs, ["d", "v", "z", 1])
print(expr_v)
print("Frozen weights. Next stage of training.")
# Initialize only the uninitialized variables.
sess.run(tf.variables_initializer(set(tf.global_variables()) - temp))
for i in range(n_epochs3):
x_batch, y_batch, y0_batch, z_batch = next(batch)
feed_dict = {x_input: x_batch, y0_input: y0_batch, y_input: y_batch,
epoch: 0, learning_rate_ph: learning_rate / 10, training: True, reg_weight_ph: 0,
length_input: length_i}
_ = sess.run(train, feed_dict=feed_dict)
if i % summary_step == 0:
feed_dict[training] = False
loss_i, error_i, reg_i = sess.run((loss, error, reg_loss), feed_dict=feed_dict)
z_arr = sess.run(z, feed_dict=feed_dict)
r = np.corrcoef(z_batch[:, 0], z_arr[:, 0])[1, 0]
loss_list.append(loss_i)
error_list.append(error_i)
reg_list.append(reg_i)
print("Epoch %d\tError: %g\tCorrelation: %f" % (i, error_i, r))
weights_d = sess.run(prop_d_masked.get_weights())
expr_d = pretty_print.network(weights_d, primitive_funcs, ["d", "v", "z", 1])
print(expr_d)
weights_v = sess.run(prop_v_masked.get_weights())
expr_v = pretty_print.network(weights_v, primitive_funcs, ["d", "v", "z", 1])
print(expr_v)
# Save results
results = {
"summary_step": summary_step,
"learning_rate": learning_rate,
"n_epochs1": n_epochs1,
"n_epochs2": n_epochs2,
"reg_weight": reg_weight,
"timesteps": timesteps,
"timesteps0": timesteps0,
"weights_d": weights_d,
"weights_v": weights_v,
"loss_plot": loss_list,
"error_plot": error_list,
"reg_plot": reg_list,
"expr_d": expr_d,
"expr_v": expr_v
}
trial_dir = helpers.get_trial_path(results_dir) # Get directory in which to save trial results
tf.saved_model.simple_save(sess, trial_dir,
inputs={"x": x_input, "y0": y0_input, "training": training},
outputs={"z": z, "y": prop_output})
# Save a summary of the parameters and results
with open(os.path.join(trial_dir, 'summary.pickle'), "wb+") as f:
pickle.dump(results, f)
with open(os.path.join(results_dir, 'eq_summary.txt'), 'a') as f:
f.write(str(expr_d) + "\n")
f.write(str(expr_v) + "\n")
f.write("Error: %f\n\n" % error_list[-1])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train the EQL network on simple harmonic oscillator (SHO) task.")
parser.add_argument("--results-dir", type=str, default='results/sho/test')
parser.add_argument("--reg-weight", type=float, default=2e-4, help='Regularization weight, lambda')
parser.add_argument('--learning-rate', type=float, default=1e-2, help='Base learning rate for training')
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument("--n-epochs1", type=int, default=2001, help="Number of epochs to train in 1st stage")
parser.add_argument("--n-epochs2", type=int, default=5001, help="Number of epochs to train in 2nd stage")
parser.add_argument("--n-epochs3", type=int, default=5001, help="Number of epochs to train in 3rd stage")
parser.add_argument("--timesteps", type=int, default=25, help="Number of time steps to predict")
parser.add_argument('--trials', type=int, default=1, help="Number of trials to train.")
args = parser.parse_args()
kwargs = vars(args)
print(kwargs)
if not os.path.exists(kwargs['results_dir']):
os.makedirs(kwargs['results_dir'])
meta = open(os.path.join(kwargs['results_dir'], 'args.txt'), 'a')
import json
meta.write(json.dumps(kwargs))
meta.close()
main(**kwargs)
| [
"utils.symbolic_network.SymbolicCell",
"tensorflow.shape",
"utils.functions.Square",
"tensorflow.sin",
"utils.functions.Exp",
"os.path.exists",
"argparse.ArgumentParser",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.asarray",
"json.dumps",
"numpy.stack",
"utils.helpers.batch_genera... | [((795, 821), 'numpy.load', 'np.load', (['"""dataset/sho.npz"""'], {}), "('dataset/sho.npz')\n", (802, 821), True, 'import numpy as np\n'), ((832, 855), 'numpy.asarray', 'np.asarray', (["data['x_d']"], {}), "(data['x_d'])\n", (842, 855), True, 'import numpy as np\n'), ((866, 889), 'numpy.asarray', 'np.asarray', (["data['x_v']"], {}), "(data['x_v'])\n", (876, 889), True, 'import numpy as np\n'), ((900, 923), 'numpy.asarray', 'np.asarray', (["data['y_d']"], {}), "(data['y_d'])\n", (910, 923), True, 'import numpy as np\n'), ((934, 957), 'numpy.asarray', 'np.asarray', (["data['y_v']"], {}), "(data['y_v'])\n", (944, 957), True, 'import numpy as np\n'), ((1037, 1065), 'numpy.stack', 'np.stack', (['(x_d, x_v)'], {'axis': '(2)'}), '((x_d, x_v), axis=2)\n', (1045, 1065), True, 'import numpy as np\n'), ((1097, 1137), 'numpy.stack', 'np.stack', (['(y_d[:, 0], y_v[:, 0])'], {'axis': '(1)'}), '((y_d[:, 0], y_v[:, 0]), axis=1)\n', (1105, 1137), True, 'import numpy as np\n'), ((1212, 1280), 'numpy.stack', 'np.stack', (['(y_d[:, 1:timesteps + 1], y_v[:, 1:timesteps + 1])'], {'axis': '(2)'}), '((y_d[:, 1:timesteps + 1], y_v[:, 1:timesteps + 1]), axis=2)\n', (1220, 1280), True, 'import numpy as np\n'), ((1366, 1459), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, x.shape[1], x.shape[2])', 'dtype': 'tf.float32', 'name': '"""enc_input"""'}), "(shape=(None, x.shape[1], x.shape[2]), dtype=tf.float32, name\n ='enc_input')\n", (1380, 1459), True, 'import tensorflow as tf\n'), ((1470, 1538), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, 2)', 'dtype': 'tf.float32', 'name': '"""prop_input"""'}), "(shape=(None, 2), dtype=tf.float32, name='prop_input')\n", (1484, 1538), True, 'import tensorflow as tf\n'), ((1570, 1655), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '(None, timesteps, 2)', 'dtype': 'tf.float32', 'name': '"""label_input"""'}), "(shape=(None, timesteps, 2), dtype=tf.float32, name='label_input'\n )\n", (1584, 1655), True, 'import tensorflow as tf\n'), ((1670, 1710), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '()'}), '(dtype=tf.int32, shape=())\n', (1684, 1710), True, 'import tensorflow as tf\n'), ((1749, 1766), 'utils.helpers.Encoder', 'helpers.Encoder', ([], {}), '()\n', (1764, 1766), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((1782, 1820), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(False)', '[]'], {}), '(False, [])\n', (1809, 1820), True, 'import tensorflow as tf\n'), ((1946, 1983), 'utils.symbolic_network.SymbolicNet', 'SymbolicNet', (['(2)'], {'funcs': 'primitive_funcs'}), '(2, funcs=primitive_funcs)\n', (1957, 1983), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((1997, 2034), 'utils.symbolic_network.SymbolicNet', 'SymbolicNet', (['(2)'], {'funcs': 'primitive_funcs'}), '(2, funcs=primitive_funcs)\n', (2008, 2034), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((2552, 2578), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2566, 2578), True, 'import tensorflow as tf\n'), ((2980, 3006), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (2994, 3006), True, 'import tensorflow as tf\n'), ((3017, 3074), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'learning_rate_ph'}), '(learning_rate=learning_rate_ph)\n', (3042, 3074), True, 'import tensorflow as tf\n'), ((3095, 3121), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (3109, 3121), True, 'import tensorflow as tf\n'), ((3134, 3228), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'y_input[:, :length_input, :]', 'predictions': 'prop_output'}), '(labels=y_input[:, :length_input, :],\n predictions=prop_output)\n', (3162, 3228), True, 'import tensorflow as tf\n'), ((3345, 3421), 'utils.helpers.batch_generator', 'helpers.batch_generator', (['[x, y_data, y0, z_data]'], {'N': 'N', 'batch_size': 'batch_size'}), '([x, y_data, y0, z_data], N=N, batch_size=batch_size)\n', (3368, 3421), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((9513, 9620), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train the EQL network on simple harmonic oscillator (SHO) task."""'}), "(description=\n 'Train the EQL network on simple harmonic oscillator (SHO) task.')\n", (9536, 9620), False, 'import argparse\n'), ((2140, 2168), 'utils.symbolic_network.SymbolicCell', 'SymbolicCell', (['prop_d', 'prop_v'], {}), '(prop_d, prop_v)\n', (2152, 2168), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((3455, 3467), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3465, 3467), True, 'import tensorflow as tf\n'), ((10580, 10617), 'os.path.exists', 'os.path.exists', (["kwargs['results_dir']"], {}), "(kwargs['results_dir'])\n", (10594, 10617), False, 'import os\n'), ((10627, 10661), 'os.makedirs', 'os.makedirs', (["kwargs['results_dir']"], {}), "(kwargs['results_dir'])\n", (10638, 10661), False, 'import os\n'), ((10678, 10725), 'os.path.join', 'os.path.join', (["kwargs['results_dir']", '"""args.txt"""'], {}), "(kwargs['results_dir'], 'args.txt')\n", (10690, 10725), False, 'import os\n'), ((10764, 10782), 'json.dumps', 'json.dumps', (['kwargs'], {}), '(kwargs)\n', (10774, 10782), False, 'import json\n'), ((2217, 2249), 'tensorflow.expand_dims', 'tf.expand_dims', (['y0_input'], {'axis': '(1)'}), '(y0_input, axis=1)\n', (2231, 2249), True, 'import tensorflow as tf\n'), ((2375, 2415), 'tensorflow.keras.backend.repeat', 'tf.keras.backend.repeat', (['z', 'length_input'], {}), '(z, length_input)\n', (2398, 2415), True, 'import tensorflow as tf\n'), ((3556, 3572), 'numpy.isnan', 'np.isnan', (['loss_i'], {}), '(loss_i)\n', (3564, 3572), True, 'import numpy as np\n'), ((5432, 5479), 'utils.symbolic_network.MaskedSymbolicNet', 'MaskedSymbolicNet', (['sess', 'prop_d'], {'threshold': '(0.01)'}), '(sess, prop_d, threshold=0.01)\n', (5449, 5479), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((5508, 5555), 'utils.symbolic_network.MaskedSymbolicNet', 'MaskedSymbolicNet', (['sess', 'prop_v'], {'threshold': '(0.01)'}), '(sess, prop_v, threshold=0.01)\n', (5525, 5555), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((6064, 6158), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'y_input[:, :length_input, :]', 'predictions': 'prop_output'}), '(labels=y_input[:, :length_input, :],\n predictions=prop_output)\n', (6092, 6158), True, 'import tensorflow as tf\n'), ((6310, 6378), 'utils.pretty_print.network', 'pretty_print.network', (['weights_d', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_d, primitive_funcs, ['d', 'v', 'z', 1])\n", (6330, 6378), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((6488, 6556), 'utils.pretty_print.network', 'pretty_print.network', (['weights_v', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_v, primitive_funcs, ['d', 'v', 'z', 1])\n", (6508, 6556), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((7823, 7891), 'utils.pretty_print.network', 'pretty_print.network', (['weights_d', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_d, primitive_funcs, ['d', 'v', 'z', 1])\n", (7843, 7891), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((8001, 8069), 'utils.pretty_print.network', 'pretty_print.network', (['weights_v', 'primitive_funcs', "['d', 'v', 'z', 1]"], {}), "(weights_v, primitive_funcs, ['d', 'v', 'z', 1])\n", (8021, 8069), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((8752, 8787), 'utils.helpers.get_trial_path', 'helpers.get_trial_path', (['results_dir'], {}), '(results_dir)\n', (8774, 8787), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((8849, 8993), 'tensorflow.saved_model.simple_save', 'tf.saved_model.simple_save', (['sess', 'trial_dir'], {'inputs': "{'x': x_input, 'y0': y0_input, 'training': training}", 'outputs': "{'z': z, 'y': prop_output}"}), "(sess, trial_dir, inputs={'x': x_input, 'y0':\n y0_input, 'training': training}, outputs={'z': z, 'y': prop_output})\n", (8875, 8993), True, 'import tensorflow as tf\n'), ((2647, 2671), 'tensorflow.sin', 'tf.sin', (['(reg_freq * epoch)'], {}), '(reg_freq * epoch)\n', (2653, 2671), True, 'import tensorflow as tf\n'), ((2746, 2770), 'tensorflow.sin', 'tf.sin', (['(reg_freq * epoch)'], {}), '(reg_freq * epoch)\n', (2752, 2770), True, 'import tensorflow as tf\n'), ((5803, 5824), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (5822, 5824), True, 'import tensorflow as tf\n'), ((5936, 5978), 'utils.symbolic_network.SymbolicCell', 'SymbolicCell', (['prop_d_masked', 'prop_v_masked'], {}), '(prop_d_masked, prop_v_masked)\n', (5948, 5978), False, 'from utils.symbolic_network import SymbolicNet, MaskedSymbolicNet, SymbolicCell\n'), ((9222, 9245), 'pickle.dump', 'pickle.dump', (['results', 'f'], {}), '(results, f)\n', (9233, 9245), False, 'import pickle\n'), ((507, 527), 'utils.functions.Constant', 'functions.Constant', ([], {}), '()\n', (525, 527), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((544, 564), 'utils.functions.Identity', 'functions.Identity', ([], {}), '()\n', (562, 564), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((581, 599), 'utils.functions.Square', 'functions.Square', ([], {}), '()\n', (597, 599), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((616, 631), 'utils.functions.Sin', 'functions.Sin', ([], {}), '()\n', (629, 631), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((648, 663), 'utils.functions.Exp', 'functions.Exp', ([], {}), '()\n', (661, 663), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((680, 699), 'utils.functions.Sigmoid', 'functions.Sigmoid', ([], {}), '()\n', (697, 699), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((716, 743), 'utils.functions.Product', 'functions.Product', ([], {'norm': '(0.1)'}), '(norm=0.1)\n', (733, 743), False, 'from utils import functions, regularization, helpers, pretty_print\n'), ((3693, 3726), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3724, 3726), True, 'import tensorflow as tf\n'), ((9150, 9191), 'os.path.join', 'os.path.join', (['trial_dir', '"""summary.pickle"""'], {}), "(trial_dir, 'summary.pickle')\n", (9162, 9191), False, 'import os\n'), ((9269, 9312), 'os.path.join', 'os.path.join', (['results_dir', '"""eq_summary.txt"""'], {}), "(results_dir, 'eq_summary.txt')\n", (9281, 9312), False, 'import os\n'), ((2261, 2279), 'tensorflow.shape', 'tf.shape', (['y0_input'], {}), '(y0_input)\n', (2269, 2279), True, 'import tensorflow as tf\n'), ((2454, 2472), 'tensorflow.shape', 'tf.shape', (['y0_input'], {}), '(y0_input)\n', (2462, 2472), True, 'import tensorflow as tf\n'), ((5292, 5308), 'numpy.isnan', 'np.isnan', (['loss_i'], {}), '(loss_i)\n', (5300, 5308), True, 'import numpy as np\n'), ((7474, 7513), 'numpy.corrcoef', 'np.corrcoef', (['z_batch[:, 0]', 'z_arr[:, 0]'], {}), '(z_batch[:, 0], z_arr[:, 0])\n', (7485, 7513), True, 'import numpy as np\n'), ((4909, 4948), 'numpy.corrcoef', 'np.corrcoef', (['z_batch[:, 0]', 'z_arr[:, 0]'], {}), '(z_batch[:, 0], z_arr[:, 0])\n', (4920, 4948), True, 'import numpy as np\n'), ((6755, 6776), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (6774, 6776), True, 'import tensorflow as tf\n')] |
from unittest import TestCase
from urllib.parse import urljoin
import pytest
from osbot_browser.javascript.Javascript_Parser import Javascript_Parser
from osbot_utils.testing.Duration import Duration
from osbot_utils.decorators.methods.cache_on_tmp import cache_on_tmp
from osbot_utils.utils.Http import GET
from osbot_browser.py_query.Py_Query import py_query_from_GET
JS_SIMPLE_CODE_1 = "var answer = 6 * 7;"
class test_Javascript_Parser(TestCase):
def setUp(self) -> None:
self.test_js_code_1 = JS_SIMPLE_CODE_1
self.test_js_code_2 = self.get_test_code_2()
#self.test_js_code_3 = self.get_test_code_3()
#self.test_js_code_4 = self.get_test_code_4()
#self.test_js_code_5 = self.get_test_code_5()
@cache_on_tmp()
def get_test_code_2(self):
base_url = 'https://httpbin.org/'
py_query = py_query_from_GET('https://httpbin.org/')
js_srcs = []
for key in py_query.query('script').indexed_by_attribute('src'):
js_srcs.append(urljoin(base_url, key))
js_85k = js_srcs.pop()
return GET(js_85k)
# todo find test JS with the sizes below
# @cache_on_tmp()
# def get_test_code_3(self):
# url_36k = ""
# return GET(url_36k)
#
# @cache_on_tmp()
# def get_test_code_4(self):
# url_85k = ""
# return GET(url_85k)
#
# @cache_on_tmp()
# def get_test_code_5(self):
# url_697k = ""
# return GET(url_697k)
def js_parser_for_code(self, js_code):
javascript_parser = Javascript_Parser()
javascript_parser.process_js_code(js_code)
return javascript_parser
def test_ast_from_py_js_parser(self):
js_code = "var answer = 6 * 7;"
expected_ast = { "type": "Program",
"body": [{ "declarations": [ { "type": "VariableDeclarator",
"id": { "type": "Identifier", "name": "answer" },
"init": { "type": "BinaryExpression",
"operator": "*",
"left": { "type": "Literal", "value": 6.0, "raw": "6" },
"right": { "type": "Literal", "value": 7.0, "raw": "7" }} }
],
"kind": "var",
'type': 'VariableDeclaration' },
{'type': 'EmptyStatement'}]
}
js_ast = Javascript_Parser().ast_from_py_js_parser(js_code)
assert js_ast == expected_ast
@pytest.mark.skip("todo: write test for this function (that uses larger js file)")
def test_ast_to_dom(self):
javascript_parser = Javascript_Parser()
js_code = self.test_js_code_3
javascript_parser.process_js_code(js_code)
stats = javascript_parser.all_nodes__stats()
#pprint(stats)
#javascript_parser.get_functions()
#javascript_parser.get_literals()
def test_ast_to_dom_multiple_files(self):
print()
print()
def process_file(js_code):
with Duration(print_result=False) as duration:
javascript_parser = Javascript_Parser()
javascript_parser.process_js_code(js_code=js_code)
js_dom = javascript_parser.js_dom
print(f"{duration.seconds()} seconds | JS Code: {len(js_code)} | JS_Dom: {len(str(js_dom))}")
process_file(self.test_js_code_1)
process_file(self.test_js_code_2)
#process_file(self.test_js_code_3)
#process_file(self.test_js_code_4)
#process_file(self.test_js_code_5)
# 0.0003 seconds | JS Code: 19 | JS_Dom: 241
# 0.4842 seconds | JS Code: 85578 | JS_Dom: 129
# 0.1555 seconds | JS Code: 36174 | JS_Dom: 2741
# 0.2768 seconds | JS Code: 85139 | JS_Dom: 5341
# 3.6960 seconds | JS Code: 696948 | JS_Dom: 13929
def test_literal_names(self):
js_code = self.test_js_code_1
js_parser = self.js_parser_for_code(js_code)
assert js_parser.literal_names(min_name_size=0) == ['6', '7']
@pytest.mark.skip("todo: write test for this function (that uses larger js file)")
def test_method_names(self):
js_code = self.test_js_code_3
js_parser = self.js_parser_for_code(js_code)
assert len(js_parser.function_names(min_name_size=3)) == 25
def test_var_names(self):
js_code = self.test_js_code_1
js_parser = self.js_parser_for_code(js_code)
assert js_parser.var_names(min_name_size=0) == ['answer'] | [
"osbot_utils.utils.Http.GET",
"osbot_browser.py_query.Py_Query.py_query_from_GET",
"pytest.mark.skip",
"osbot_utils.testing.Duration.Duration",
"urllib.parse.urljoin",
"osbot_utils.decorators.methods.cache_on_tmp.cache_on_tmp",
"osbot_browser.javascript.Javascript_Parser.Javascript_Parser"
] | [((756, 770), 'osbot_utils.decorators.methods.cache_on_tmp.cache_on_tmp', 'cache_on_tmp', ([], {}), '()\n', (768, 770), False, 'from osbot_utils.decorators.methods.cache_on_tmp import cache_on_tmp\n'), ((2791, 2877), 'pytest.mark.skip', 'pytest.mark.skip', (['"""todo: write test for this function (that uses larger js file)"""'], {}), "(\n 'todo: write test for this function (that uses larger js file)')\n", (2807, 2877), False, 'import pytest\n'), ((4371, 4457), 'pytest.mark.skip', 'pytest.mark.skip', (['"""todo: write test for this function (that uses larger js file)"""'], {}), "(\n 'todo: write test for this function (that uses larger js file)')\n", (4387, 4457), False, 'import pytest\n'), ((863, 904), 'osbot_browser.py_query.Py_Query.py_query_from_GET', 'py_query_from_GET', (['"""https://httpbin.org/"""'], {}), "('https://httpbin.org/')\n", (880, 904), False, 'from osbot_browser.py_query.Py_Query import py_query_from_GET\n'), ((1097, 1108), 'osbot_utils.utils.Http.GET', 'GET', (['js_85k'], {}), '(js_85k)\n', (1100, 1108), False, 'from osbot_utils.utils.Http import GET\n'), ((1565, 1584), 'osbot_browser.javascript.Javascript_Parser.Javascript_Parser', 'Javascript_Parser', ([], {}), '()\n', (1582, 1584), False, 'from osbot_browser.javascript.Javascript_Parser import Javascript_Parser\n'), ((2932, 2951), 'osbot_browser.javascript.Javascript_Parser.Javascript_Parser', 'Javascript_Parser', ([], {}), '()\n', (2949, 2951), False, 'from osbot_browser.javascript.Javascript_Parser import Javascript_Parser\n'), ((1026, 1048), 'urllib.parse.urljoin', 'urljoin', (['base_url', 'key'], {}), '(base_url, key)\n', (1033, 1048), False, 'from urllib.parse import urljoin\n'), ((2696, 2715), 'osbot_browser.javascript.Javascript_Parser.Javascript_Parser', 'Javascript_Parser', ([], {}), '()\n', (2713, 2715), False, 'from osbot_browser.javascript.Javascript_Parser import Javascript_Parser\n'), ((3344, 3372), 'osbot_utils.testing.Duration.Duration', 'Duration', ([], {'print_result': '(False)'}), '(print_result=False)\n', (3352, 3372), False, 'from osbot_utils.testing.Duration import Duration\n'), ((3422, 3441), 'osbot_browser.javascript.Javascript_Parser.Javascript_Parser', 'Javascript_Parser', ([], {}), '()\n', (3439, 3441), False, 'from osbot_browser.javascript.Javascript_Parser import Javascript_Parser\n')] |
from setuptools import setup, find_packages
from os import path
import re
def read_file(file_name: str) -> str:
here = path.abspath(path.dirname(__file__))
with open(path.join(here, file_name), encoding="utf-8") as f:
return f.read()
long_description = read_file("README.md")
version = re.sub("\s+", "", read_file("version.txt"))
setup(
name="signal-ocean",
version=version,
description="Access Signal Ocean Platform data using Python.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Signal Ocean Developers",
author_email="<EMAIL>",
license="Apache 2.0",
url="https://apis.signalocean.com/",
packages=find_packages(exclude=["tests", "tests.*"]),
python_requires=">=3.7",
install_requires=[
"requests>=2.23.0,<3",
"python-dateutil>=2.8.1,<3",
"pandas>=1.0.3,<2",
],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: Apache Software License",
],
project_urls={
"The Signal Group": "https://www.thesignalgroup.com/",
"Signal Ocean": "https://www.signalocean.com/",
"The Signal Ocean Platform": "https://app.signalocean.com",
},
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
] | [((138, 160), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (150, 160), False, 'from os import path\n'), ((707, 750), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', 'tests.*']"}), "(exclude=['tests', 'tests.*'])\n", (720, 750), False, 'from setuptools import setup, find_packages\n'), ((176, 202), 'os.path.join', 'path.join', (['here', 'file_name'], {}), '(here, file_name)\n', (185, 202), False, 'from os import path\n')] |
from dotenv import load_dotenv
import os
import tweepy
load_dotenv()
APIkey = os.getenv('APIkey')
APIsecretkey = os.getenv('APIsecretkey')
AccessToken = os.getenv("AccessToken")
AccessTokenSecret = os.getenv("AccessTokenSecret")
auth = tweepy.OAuthHandler(APIkey, APIsecretkey)
auth.set_access_token(AccessToken, AccessTokenSecret)
api = tweepy.API(auth)
print(api.me().name) | [
"tweepy.API",
"tweepy.OAuthHandler",
"os.getenv",
"dotenv.load_dotenv"
] | [((56, 69), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (67, 69), False, 'from dotenv import load_dotenv\n'), ((79, 98), 'os.getenv', 'os.getenv', (['"""APIkey"""'], {}), "('APIkey')\n", (88, 98), False, 'import os\n'), ((114, 139), 'os.getenv', 'os.getenv', (['"""APIsecretkey"""'], {}), "('APIsecretkey')\n", (123, 139), False, 'import os\n'), ((154, 178), 'os.getenv', 'os.getenv', (['"""AccessToken"""'], {}), "('AccessToken')\n", (163, 178), False, 'import os\n'), ((199, 229), 'os.getenv', 'os.getenv', (['"""AccessTokenSecret"""'], {}), "('AccessTokenSecret')\n", (208, 229), False, 'import os\n'), ((238, 279), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['APIkey', 'APIsecretkey'], {}), '(APIkey, APIsecretkey)\n', (257, 279), False, 'import tweepy\n'), ((340, 356), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (350, 356), False, 'import tweepy\n')] |
""" make_prefixsums.py
For generating prefix sums dataset for the
DeepThinking project.
<NAME> and <NAME>
July 2021
"""
import collections as col
import torch
def binary(x, bits):
mask = 2**torch.arange(bits)
return x.unsqueeze(-1).bitwise_and(mask).ne(0).long()
def get_target(inputs):
cumsum = torch.cumsum(inputs, 0)
targets = cumsum % 2
return targets
if __name__ == "__main__":
# Change this variable, digits, to make datasets with different numbers of binary digits
for digits in list(range(16, 65)) + [72, 128, 256, 512]:
inputs = torch.zeros(10000, digits)
targets = torch.zeros(10000, digits)
if digits < 24:
rand_tensor = torch.arange(2 ** digits)[torch.randperm(2 ** digits)[:10000]]
rand_tensor = torch.stack([binary(r, digits) for r in rand_tensor])
else:
rand_tensor = torch.rand(10000, digits) >= 0.5
for i in range(10000):
target = get_target(rand_tensor[i])
inputs[i] = rand_tensor[i]
targets[i] = target
torch.save(inputs, f"{digits}_data.pth")
torch.save(targets, f"{digits}_targets.pth")
# Check for repeats
inputs = inputs.numpy()
t_dict = {}
t_dict = col.defaultdict(lambda: 0) # t_dict = {*:0}
for t in inputs:
t_dict[t.tobytes()] += 1 # t_dict[input] += 1
repeats = 0
for i in inputs:
if t_dict[i.tobytes()] > 1:
repeats += 1
print(f"There are {repeats} repeats in the dataset.")
| [
"torch.cumsum",
"torch.randperm",
"torch.rand",
"collections.defaultdict",
"torch.save",
"torch.zeros",
"torch.arange"
] | [((330, 353), 'torch.cumsum', 'torch.cumsum', (['inputs', '(0)'], {}), '(inputs, 0)\n', (342, 353), False, 'import torch\n'), ((214, 232), 'torch.arange', 'torch.arange', (['bits'], {}), '(bits)\n', (226, 232), False, 'import torch\n'), ((599, 625), 'torch.zeros', 'torch.zeros', (['(10000)', 'digits'], {}), '(10000, digits)\n', (610, 625), False, 'import torch\n'), ((644, 670), 'torch.zeros', 'torch.zeros', (['(10000)', 'digits'], {}), '(10000, digits)\n', (655, 670), False, 'import torch\n'), ((1097, 1137), 'torch.save', 'torch.save', (['inputs', 'f"""{digits}_data.pth"""'], {}), "(inputs, f'{digits}_data.pth')\n", (1107, 1137), False, 'import torch\n'), ((1146, 1190), 'torch.save', 'torch.save', (['targets', 'f"""{digits}_targets.pth"""'], {}), "(targets, f'{digits}_targets.pth')\n", (1156, 1190), False, 'import torch\n'), ((1289, 1316), 'collections.defaultdict', 'col.defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (1304, 1316), True, 'import collections as col\n'), ((721, 746), 'torch.arange', 'torch.arange', (['(2 ** digits)'], {}), '(2 ** digits)\n', (733, 746), False, 'import torch\n'), ((904, 929), 'torch.rand', 'torch.rand', (['(10000)', 'digits'], {}), '(10000, digits)\n', (914, 929), False, 'import torch\n'), ((747, 774), 'torch.randperm', 'torch.randperm', (['(2 ** digits)'], {}), '(2 ** digits)\n', (761, 774), False, 'import torch\n')] |
import unittest
import mongomock
from dasbot.db.stats_repo import StatsRepo
class TestStatsRepo(unittest.TestCase):
def setUp(self):
scores_col = mongomock.MongoClient().db.collection
stats_col = mongomock.MongoClient().db.collection
self.stats_repo = StatsRepo(scores_col, stats_col)
def test_get_stats_no_data(self):
stats = self.stats_repo.get_stats(1)
self.assertDictEqual(
{'touched': 0, 'mistakes_30days': [], 'mistakes_alltime': []}, stats)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"mongomock.MongoClient",
"dasbot.db.stats_repo.StatsRepo"
] | [((546, 561), 'unittest.main', 'unittest.main', ([], {}), '()\n', (559, 561), False, 'import unittest\n'), ((284, 316), 'dasbot.db.stats_repo.StatsRepo', 'StatsRepo', (['scores_col', 'stats_col'], {}), '(scores_col, stats_col)\n', (293, 316), False, 'from dasbot.db.stats_repo import StatsRepo\n'), ((162, 185), 'mongomock.MongoClient', 'mongomock.MongoClient', ([], {}), '()\n', (183, 185), False, 'import mongomock\n'), ((220, 243), 'mongomock.MongoClient', 'mongomock.MongoClient', ([], {}), '()\n', (241, 243), False, 'import mongomock\n')] |
# FinSim
# Copyright 2018 <NAME>. All Rights Reserved.
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
### ---
# @file resources.py
# @brief endpoints to interact with finsim-trans server
## @list of endpoints
# ListAccounts (frontend)
# GetAccountInfo (frontend)
# ListTransactions (frontend)
# FlagAsFraud
# ProcessCard
# BankWithdrawal
# BankDeposit
# AccountCreation
# UserRegistration
# UserLogin
# TokenRefresh
### ---
from finsim_trans.helper import bnk_login, check_login_list
from flask_restful import Resource, reqparse, marshal_with, fields
from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType
from flask_jwt_extended import ( create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt )
from flask import jsonify
from decimal import Decimal
import requests
current_component = []
#Used to control what exactly gets returned by resources as a response
#account_fields used by ListAccounts and GetAccountInfo resources
account_fields = { 'id': fields.String, 'account_number': fields.String, 'balance': fields.String }
#transaction fields used by ListTransactions resource
transaction_fields = { 'id': fields.String, 'amount': fields.String, 'type': fields.String, 'account_id': fields.String, 'account': fields.String, 'counterpart_name': fields.String, 'counterpart_acct': fields.String }
# These nearly identical parsers pull arguments out of the request and return an error if anything is missing
#parser is used in UserRegistration
parser = reqparse.RequestParser()
parser.add_argument( 'username', help = 'This field cannot be blank', required = True )
parser.add_argument( 'password', help = 'This field cannot be blank', required = True )
parser.add_argument( 'role', help = 'This field cannot be blank', required = True )
#parserLogin is used in UserLogin resource
parserLogin = reqparse.RequestParser()
parserLogin.add_argument( 'username', help = 'This field cannot be blank', required = True )
parserLogin.add_argument( 'password', help = 'This field cannot be blank', required = True )
#accountParser is used in GetAccountInfo, ListTransactions, and AccountCreation resources
accountParser = reqparse.RequestParser()
accountParser.add_argument( 'number', help = 'This field cannot be blank', required = True )
#depositParser is used in BankDeposit resource
depositParser = reqparse.RequestParser()
depositParser.add_argument( 'amount', help = 'This field cannot be blank', required = True )
depositParser.add_argument( 'account', help = 'This field cannot be blank', required = True )
depositParser.add_argument( 'counterpart_name', help = 'This field cannot be blank', required = True )
depositParser.add_argument( 'counterpart_acct', help = 'This field cannot be blank', required = False )
#withdrawalParser is used in BankWithdrawal resource
withdrawalParser = reqparse.RequestParser()
withdrawalParser.add_argument( 'amount', help = 'This field cannot be blank', required = True )
withdrawalParser.add_argument( 'account', help = 'This field cannot be blank', required = True )
withdrawalParser.add_argument( 'counterpart_name', help = 'This field cannot be blank', required = True )
withdrawalParser.add_argument( 'counterpart_acct', help = 'This field cannot be blank', required = True )
#cardParser is used in ProcessCard resource
cardParser = reqparse.RequestParser()
cardParser.add_argument( 'source', help = 'This field cannot be blank', required = True )
cardParser.add_argument( 'destination', help = 'This field cannot be blank', required = True )
cardParser.add_argument( 'amount', help = 'This field cannot be blank', required = True )
#fraudParser is used in FlagAsFraud resource
fraudParser = reqparse.RequestParser()
fraudParser.add_argument( 'account', help = 'This field cannot be blank', required = True )
fraudParser.add_argument( 'transaction', help = 'This field cannot be blank', required = True )
## ---
# @brief ListAccounts returns a list of all the accounts that the currently logged in user owns
# Primarily used by finsim-web Angular frontend for bank
# User must be logged into finsim-web/trans in order to be able to get their account information (authenticated via @jwt_required)
# @return list of accounts
## ---
class ListAccounts( Resource ):
@jwt_required
@marshal_with( account_fields )
def get( self ):
user = UserModel.find_by_username( get_jwt_identity()['username'] )
accounts = AccountModel.find_all_by_id( user.id );
for a in accounts:
print( a.id, a.account_number, a.balance )
print( type( accounts ) )
return accounts
## ---
# @brief GetAccountInfo returns the account info for one bank account owned by the logged in user
# Primarily used by finsim-web Angular frontend for bank
# @input number - account number that the user account wants to access
# User must be logged into finsim-web/trans in order to be able to get their account information (authenticated via @jwt_required)
# @return account id, number, and balance for the requested account number IF the user does own the requested account
## ---
class GetAccountInfo( Resource ):
@jwt_required
@marshal_with( account_fields )
def get( self ):
data = accountParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity()['username'] )
if AccountModel.account_exists( user.id, data['number'] ) == True:
account = AccountModel.find_by_account_number( data['number'] )
return { 'id': account.id, 'account_number': account.account_number, 'balance': account.balance }
return { 'message': 'Account does not exist or is not owned by current user.' }
## ---
# @brief ListTransactions returns a list of all the transactions that involves the input account
# Primarily used by finsim-web Angular frontend for bank
# @input number - account number they want the transactions history of
# User must be logged into finsim-web/trans in order to be able to get their account information (authenticated via @jwt_required)
# @return list of transactions
## ---
class ListTransactions( Resource):
@jwt_required
@marshal_with( transaction_fields )
def get( self ):
data = accountParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity()['username'] )
if AccountModel.account_exists( user.id, data['number'] ) == True:
account = AccountModel.find_by_account_number( data['number'] )
transactions = TransactionModel.find_all_by_account_id( account.id )
for t in transactions:
print( t.id, t.amount, t.type, t.account_id )
return transactions
return { 'message': 'Account does not exist or is not owned by current user.' }
## ---
# @brief FlagAsFraud flags a transaction as fraud
# @input account, transaction - account number and the transaction the user wants to flag as fraud
# User must be logged into finsim-trans in order to successfully flag a transaction as fraud
# @return message specifiying success or failure with flagging the transaction
## ---
class FlagAsFraud( Resource ):
@jwt_required
def post( self ):
data = fraudParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity()['username'] )
if AccountModel.account_exists( user.id, data['account'] ) == True:
account = AccountModel.find_by_account_number( data['account'] )
transaction = TransactionModel.find_by_id( account.id, data['transaction'] )
if transaction != None:
transaction.fraud_flag = FraudType.FLAG
transaction.save_to_db()
print(transaction.fraud_flag)
return { 'message': 'Selected transaction has been flagged as fraud.' }
return { 'message': 'Transaction does not exist or is not made by the specified account.' }
return { 'message': 'Account does not exist or is not owned by current user.' }
## ---
# @brief ProcessCard flags a transaction as fraud
# Primarily used by finsim-cc. finsim-trans responding to this web request is always the bank for the destination account
# If source account is with another bank, this bank will make login and make a web request to the other bank's /bank/withdrawal endpoint
# @input source, destination, amount - source is the user account, destination is the merchant account, amount is what will be transferred from user to destination accounts
# finsim-cc component must be logged into finsim-trans
# @return message specifiying success or failure with processing the transaction
## ---
class ProcessCard( Resource ):
@jwt_required
def post( self ):
global current_component
#Always start by checking whether current_component is filled out or not
if len(current_component) == 0:
current_component = check_login_list()
if len(current_component) == 0:
return { 'message': 'There was a problem with the current component functionality.' }
data = cardParser.parse_args()
if get_jwt_identity()['role'] != 'CC': #maybe compare this using the enum instead of a string?
return { 'message': 'Only a credit card processor can initiate a card processing transaction.' }
user = UserModel.find_by_username( get_jwt_identity()['username'] )
destAccount = AccountModel.find_by_account_number( data['destination'] )
destUser = UserModel.find_by_id( destAccount.owner_id )
depositTrans = TransactionModel( amount = Decimal( data['amount'] ), type = TxType.CREDIT, account_id = destAccount.id, counterpart_acct = data['source'][-4:], fraud_flag = FraudType.NONE )
# First we have to confirm that the destination account resides at this bank
# If not, send an error message.
if not destAccount.account_number.startswith( str(current_component[5]) ):
return { 'message': 'Cannot process a card transaction that does not terminate at this bank.' }
# Next, figure out if this transaction takes place within this bank or across different banks
if not data['source'].startswith( str(current_component[5]) ):
# For a back-to-bank transaction
for bank in current_component[4]:
if len(bank) != 1:
if data['source'].startswith( str(bank[3]) ):
#cc_login is a helper function in helper.py used to take care of logging into the correct bank
token = bnk_login(bank[1], current_component[3], bank[2])
header = { "Authorization": "Bearer " + token['access_token'], "Content-Type": "application/json" }
# Make the request to the remote bank
bankResponse = requests.post( bank[1] + "/bank/withdrawal", json = { 'amount': data['amount'], 'account': data['source'], 'counterpart_name': destUser.username, 'counterpart_acct': destAccount.account_number[-4:] }, headers = header, verify=False ).json()
print( "Withdrawal request response: " )
print( bankResponse['message'] )
if bankResponse['message'] == "Withdrawal successful.":
depositTrans.counterpart_name = bankResponse['counterpart_name']
destAccount.applyTransaction( depositTrans )
depositTrans.save_to_db()
destAccount.save_to_db()
return { 'message': 'Card processed successfully 2.' }
else:
return { 'message': 'Insufficient funds in source account 2.' }
break
else:
# For a local transaction
srcAccount = AccountModel.find_by_account_number( data['source'] )
withdrawalTrans = TransactionModel( amount = Decimal( data['amount'] ), type = TxType.DEBIT, account_id = srcAccount.id, counterpart_acct = destAccount.account_number[-4:], fraud_flag = FraudType.NONE )
try:
withdrawalTrans.counterpart_name = destUser.username
debitOk = srcAccount.applyTransaction( withdrawalTrans )
if debitOk:
srcUser = UserModel.find_by_id( srcAccount.owner_id )
depositTrans.counterpart_name = srcUser.username
destAccount.applyTransaction( depositTrans )
depositTrans.save_to_db()
withdrawalTrans.save_to_db()
srcAccount.save_to_db()
destAccount.save_to_db()
return { 'message': 'Card processed successfully.' }
else:
return { 'message': 'Insufficient funds in source account.' }
except Exception as e:
print( e )
return { 'message': 'There was a problem processing this card transaction.' }
## ---
# @brief BankWithdrawal withdraws a specified amount from the bank account
# @input amount, account, counterpart_name, counterpart_acct - account is the account that will potentially have the amount taken out of it's balance. Counterpart refers to who initiated the withdrawal
# A BANK user must be logged in in order to initiate a withdrawal. No other users may do so
# @return id, balance, owner, message, counterpart - counterpart indicates who initiated the withdrawal
## ---
class BankWithdrawal( Resource ):
@jwt_required
def post( self ):
data = withdrawalParser.parse_args()
if get_jwt_identity()['role'] != 'BANK': #maybe compare this using the enum instead of a string?
return { 'message': 'Only a bank can initiate a withdrawal.' }
user = UserModel.find_by_username( get_jwt_identity() )
try:
account = AccountModel.find_by_account_number( data['account'] )
#TODO Add logic to prevent account from going negative
# Good approach would be to do this in the applyTransaction() method. make it return a bool
# True signifies successful transaction, False means it failed (would go negative) then check
# the result here and react accordingly.
trans = TransactionModel( amount = data['amount'], type = TxType.DEBIT, account_id = account.id, counterpart_name = data['counterpart_name'], counterpart_acct = data['counterpart_acct'], fraud_flag = FraudType.NONE )
acctUser = UserModel.find_by_id( account.owner_id )
trans.save_to_db()
account.applyTransaction( trans )
account.save_to_db()
return { 'id': account.id, 'balance': str(account.balance), 'owner': account.owner_id, 'message': 'Withdrawal successful.', 'counterpart_name': acctUser.username}
except Exception as e:
print( e )
return { 'message': 'There was a problem processing the credit transaction.' }
## ---
# @brief BankDeposit deposits a specified amount into the bank account
# @input amount, account, counterpart_name, counterpart_acct - account is the account that will potentially have the amount added to its balance. Counterpart refers to who initiated the deposit
# A user must be logged in in order to initiate a deposit
# @return id, balance, owner - counterpart indicates who initiated the deposit
## ---
class BankDeposit( Resource ):
@jwt_required
def post( self ):
data = depositParser.parse_args()
user = UserModel.find_by_username( get_jwt_identity() )
try:
account = AccountModel.find_by_account_number( data['account'] )
trans = TransactionModel( amount = data['amount'], type = TxType.CREDIT, account_id = account.id, counterpart_name = data['counterpart_name'], counterpart_acct = data['counterpart_acct'], fraud_flag = FraudType.NONE )
trans.save_to_db()
account.applyTransaction( trans )
account.save_to_db()
return jsonify( { 'id': account.id, 'balance': str(account.balance), 'owner': account.owner_id } )
except Exception as e:
print( e )
return { 'message': 'There was a problem processing the credit transaction.' }
## ---
# @brief AccountCreation creates a bank account for the logged in user
# @input number - account number to create
# A user must be logged in in order to create an account
# @return message - indicates result of account creation
## ---
class AccountCreation( Resource ):
@jwt_required
def post( self ):
# TODO maybe restrict this feature to certain roles (admin only)
data = accountParser.parse_args()
identity = get_jwt_identity()
print( "{}".format( identity['role'] ) )
user = UserModel.find_by_username( identity['username'] )
print( "Creating account for {}".format( user.username ) )
newAccount = AccountModel( balance = 0.0, owner_id = user.id, account_number = data['number'] )
try:
newAccount.save_to_db()
return { 'message': 'Account created successfully' }
except Exception as e:
print( e )
return { 'message': 'Could not create account' }
## ---
# @brief UserRegistration is the endpoint used to register users to finsim-trans
# @input username, password, role
# @return message, access_token, refresh_token - message indicates successful registration
# or failure message
## ---
class UserRegistration( Resource ):
def post( self ):
data = parser.parse_args()
if UserModel.find_by_username( data['username'] ):
return { 'message': 'User {} already exists.'.format(data['username']) }
new_user = UserModel( username = data['username'], password = UserModel.generate_hash( data['password'] ), role = RoleType[data['role']] )
try:
new_user.save_to_db()
identity = { 'username': new_user.username, 'role': new_user.role.name }
access_token = create_access_token( identity = identity )
refresh_token = create_refresh_token( identity = identity )
return { 'message': 'User {} was created.'.format( data['username'] ), 'access_token': access_token, 'refresh_token': refresh_token }
except Exception as e:
print( e )
return { 'message': 'Something went wrong.' }, 500
## ---
# @brief UserLogin is used by users to connect to finsim-trans
# @input username, password
# @return message, access_token, refresh_token - message indicates successful login
# or failure message
## ---
class UserLogin( Resource ):
def post( self ):
data = parserLogin.parse_args()
current_user = UserModel.find_by_username( data['username'] )
if not current_user:
return {'message':'User {} does not exist.'.format(data['username']) }, 401
if UserModel.verify_hash( data['password'], current_user.password ):
identity = { 'username': data['username'], 'role': current_user.role.name }
access_token = create_access_token( identity = identity )
refresh_token = create_refresh_token( identity = identity )
return {'message': 'Logged in as {}.'.format( current_user.username ), 'access_token': access_token, 'refresh_token': refresh_token}
else:
return {'message': 'Wrong credentials'}, 401
## ---
# @brief TokenRefresh endpoint to provide a new access_token
# @return access_token
## ---
class TokenRefresh( Resource ):
@jwt_refresh_token_required
def post( self ):
current_user = get_jwt_identity()
access_token = create_access_token( identity = current_user )
return { 'access_token': access_token }
| [
"requests.post",
"flask_restful.marshal_with",
"flask_jwt_extended.get_jwt_identity",
"finsim_trans.models.TransactionModel.find_by_id",
"finsim_trans.models.UserModel.find_by_username",
"flask_restful.reqparse.RequestParser",
"flask_jwt_extended.create_access_token",
"finsim_trans.models.AccountModel... | [((2149, 2173), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (2171, 2173), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((2492, 2516), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (2514, 2516), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((2810, 2834), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (2832, 2834), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((2992, 3016), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (3014, 3016), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((3484, 3508), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (3506, 3508), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((3972, 3996), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (3994, 3996), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((4332, 4356), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (4354, 4356), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((4930, 4958), 'flask_restful.marshal_with', 'marshal_with', (['account_fields'], {}), '(account_fields)\n', (4942, 4958), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((5809, 5837), 'flask_restful.marshal_with', 'marshal_with', (['account_fields'], {}), '(account_fields)\n', (5821, 5837), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((6792, 6824), 'flask_restful.marshal_with', 'marshal_with', (['transaction_fields'], {}), '(transaction_fields)\n', (6804, 6824), False, 'from flask_restful import Resource, reqparse, marshal_with, fields\n'), ((5077, 5113), 'finsim_trans.models.AccountModel.find_all_by_id', 'AccountModel.find_all_by_id', (['user.id'], {}), '(user.id)\n', (5104, 5113), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((10053, 10109), 'finsim_trans.models.AccountModel.find_by_account_number', 'AccountModel.find_by_account_number', (["data['destination']"], {}), "(data['destination'])\n", (10088, 10109), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((10131, 10173), 'finsim_trans.models.UserModel.find_by_id', 'UserModel.find_by_id', (['destAccount.owner_id'], {}), '(destAccount.owner_id)\n', (10151, 10173), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((17452, 17470), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (17468, 17470), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((17535, 17583), 'finsim_trans.models.UserModel.find_by_username', 'UserModel.find_by_username', (["identity['username']"], {}), "(identity['username'])\n", (17561, 17583), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((17674, 17748), 'finsim_trans.models.AccountModel', 'AccountModel', ([], {'balance': '(0.0)', 'owner_id': 'user.id', 'account_number': "data['number']"}), "(balance=0.0, owner_id=user.id, account_number=data['number'])\n", (17686, 17748), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((18346, 18390), 'finsim_trans.models.UserModel.find_by_username', 'UserModel.find_by_username', (["data['username']"], {}), "(data['username'])\n", (18372, 18390), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((19502, 19546), 'finsim_trans.models.UserModel.find_by_username', 'UserModel.find_by_username', (["data['username']"], {}), "(data['username'])\n", (19528, 19546), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((19678, 19740), 'finsim_trans.models.UserModel.verify_hash', 'UserModel.verify_hash', (["data['password']", 'current_user.password'], {}), "(data['password'], current_user.password)\n", (19699, 19740), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((20401, 20419), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (20417, 20419), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((20443, 20485), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'current_user'}), '(identity=current_user)\n', (20462, 20485), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((5990, 6042), 'finsim_trans.models.AccountModel.account_exists', 'AccountModel.account_exists', (['user.id', "data['number']"], {}), "(user.id, data['number'])\n", (6017, 6042), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((6076, 6127), 'finsim_trans.models.AccountModel.find_by_account_number', 'AccountModel.find_by_account_number', (["data['number']"], {}), "(data['number'])\n", (6111, 6127), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((6977, 7029), 'finsim_trans.models.AccountModel.account_exists', 'AccountModel.account_exists', (['user.id', "data['number']"], {}), "(user.id, data['number'])\n", (7004, 7029), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((7063, 7114), 'finsim_trans.models.AccountModel.find_by_account_number', 'AccountModel.find_by_account_number', (["data['number']"], {}), "(data['number'])\n", (7098, 7114), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((7144, 7195), 'finsim_trans.models.TransactionModel.find_all_by_account_id', 'TransactionModel.find_all_by_account_id', (['account.id'], {}), '(account.id)\n', (7183, 7195), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((7952, 8005), 'finsim_trans.models.AccountModel.account_exists', 'AccountModel.account_exists', (['user.id', "data['account']"], {}), "(user.id, data['account'])\n", (7979, 8005), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((8039, 8091), 'finsim_trans.models.AccountModel.find_by_account_number', 'AccountModel.find_by_account_number', (["data['account']"], {}), "(data['account'])\n", (8074, 8091), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((8120, 8180), 'finsim_trans.models.TransactionModel.find_by_id', 'TransactionModel.find_by_id', (['account.id', "data['transaction']"], {}), "(account.id, data['transaction'])\n", (8147, 8180), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((9536, 9554), 'finsim_trans.helper.check_login_list', 'check_login_list', ([], {}), '()\n', (9552, 9554), False, 'from finsim_trans.helper import bnk_login, check_login_list\n'), ((12513, 12564), 'finsim_trans.models.AccountModel.find_by_account_number', 'AccountModel.find_by_account_number', (["data['source']"], {}), "(data['source'])\n", (12548, 12564), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((14538, 14556), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (14554, 14556), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((14595, 14647), 'finsim_trans.models.AccountModel.find_by_account_number', 'AccountModel.find_by_account_number', (["data['account']"], {}), "(data['account'])\n", (14630, 14647), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((15012, 15216), 'finsim_trans.models.TransactionModel', 'TransactionModel', ([], {'amount': "data['amount']", 'type': 'TxType.DEBIT', 'account_id': 'account.id', 'counterpart_name': "data['counterpart_name']", 'counterpart_acct': "data['counterpart_acct']", 'fraud_flag': 'FraudType.NONE'}), "(amount=data['amount'], type=TxType.DEBIT, account_id=\n account.id, counterpart_name=data['counterpart_name'], counterpart_acct\n =data['counterpart_acct'], fraud_flag=FraudType.NONE)\n", (15028, 15216), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((15244, 15282), 'finsim_trans.models.UserModel.find_by_id', 'UserModel.find_by_id', (['account.owner_id'], {}), '(account.owner_id)\n', (15264, 15282), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((16290, 16308), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (16306, 16308), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((16347, 16399), 'finsim_trans.models.AccountModel.find_by_account_number', 'AccountModel.find_by_account_number', (["data['account']"], {}), "(data['account'])\n", (16382, 16399), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((16422, 16627), 'finsim_trans.models.TransactionModel', 'TransactionModel', ([], {'amount': "data['amount']", 'type': 'TxType.CREDIT', 'account_id': 'account.id', 'counterpart_name': "data['counterpart_name']", 'counterpart_acct': "data['counterpart_acct']", 'fraud_flag': 'FraudType.NONE'}), "(amount=data['amount'], type=TxType.CREDIT, account_id=\n account.id, counterpart_name=data['counterpart_name'], counterpart_acct\n =data['counterpart_acct'], fraud_flag=FraudType.NONE)\n", (16438, 16627), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((18786, 18824), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'identity'}), '(identity=identity)\n', (18805, 18824), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((18857, 18896), 'flask_jwt_extended.create_refresh_token', 'create_refresh_token', ([], {'identity': 'identity'}), '(identity=identity)\n', (18877, 18896), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((19860, 19898), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'identity'}), '(identity=identity)\n', (19879, 19898), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((19931, 19970), 'flask_jwt_extended.create_refresh_token', 'create_refresh_token', ([], {'identity': 'identity'}), '(identity=identity)\n', (19951, 19970), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((5025, 5043), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (5041, 5043), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((5946, 5964), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (5962, 5964), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((6933, 6951), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (6949, 6951), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((7908, 7926), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (7924, 7926), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((9752, 9770), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (9768, 9770), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((9997, 10015), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (10013, 10015), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((10226, 10249), 'decimal.Decimal', 'Decimal', (["data['amount']"], {}), "(data['amount'])\n", (10233, 10249), False, 'from decimal import Decimal\n'), ((14325, 14343), 'flask_jwt_extended.get_jwt_identity', 'get_jwt_identity', ([], {}), '()\n', (14341, 14343), False, 'from flask_jwt_extended import create_access_token, create_refresh_token, jwt_required, jwt_refresh_token_required, get_jwt_identity, get_raw_jwt\n'), ((18550, 18591), 'finsim_trans.models.UserModel.generate_hash', 'UserModel.generate_hash', (["data['password']"], {}), "(data['password'])\n", (18573, 18591), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((12624, 12647), 'decimal.Decimal', 'Decimal', (["data['amount']"], {}), "(data['amount'])\n", (12631, 12647), False, 'from decimal import Decimal\n'), ((12999, 13040), 'finsim_trans.models.UserModel.find_by_id', 'UserModel.find_by_id', (['srcAccount.owner_id'], {}), '(srcAccount.owner_id)\n', (13019, 13040), False, 'from finsim_trans.models import UserModel, AccountModel, TransactionModel, TxType, FraudType, RoleType\n'), ((11209, 11258), 'finsim_trans.helper.bnk_login', 'bnk_login', (['bank[1]', 'current_component[3]', 'bank[2]'], {}), '(bank[1], current_component[3], bank[2])\n', (11218, 11258), False, 'from finsim_trans.helper import bnk_login, check_login_list\n'), ((11484, 11721), 'requests.post', 'requests.post', (["(bank[1] + '/bank/withdrawal')"], {'json': "{'amount': data['amount'], 'account': data['source'], 'counterpart_name':\n destUser.username, 'counterpart_acct': destAccount.account_number[-4:]}", 'headers': 'header', 'verify': '(False)'}), "(bank[1] + '/bank/withdrawal', json={'amount': data['amount'],\n 'account': data['source'], 'counterpart_name': destUser.username,\n 'counterpart_acct': destAccount.account_number[-4:]}, headers=header,\n verify=False)\n", (11497, 11721), False, 'import requests\n')] |
"""Test `with` tag parsing and rendering."""
# pylint: disable=missing-class-docstring
from dataclasses import dataclass
from dataclasses import field
from typing import Dict
from unittest import TestCase
from liquid import Environment
from liquid_extra.tags import WithTag
@dataclass
class Case:
"""Table driven test helper."""
description: str
template: str
expect: str
globals: Dict[str, object] = field(default_factory=dict)
class RenderWithTagTestCase(TestCase):
def test_render_with_tag(self):
"""Test that we can render a `with` tag."""
test_cases = [
Case(
description="block scoped variable",
template=r"{{ x }}{% with x: 'foo' %}{{ x }}{% endwith %}{{ x }}",
expect="foo",
),
Case(
description="block scoped alias",
template=(
r"{% with p: collection.products.first %}"
r"{{ p.title }}"
r"{% endwith %}"
r"{{ p.title }}"
r"{{ collection.products.first.title }}"
),
expect="A ShoeA Shoe",
globals={"collection": {"products": [{"title": "A Shoe"}]}},
),
Case(
description="multiple block scoped variables",
template=(
r"{% with a: 1, b: 3.4 %}"
r"{{ a }} + {{ b }} = {{ a | plus: b }}"
r"{% endwith %}"
),
expect="1 + 3.4 = 4.4",
),
]
env = Environment()
env.add_tag(WithTag)
for case in test_cases:
with self.subTest(msg=case.description):
template = env.from_string(case.template, globals=case.globals)
self.assertEqual(template.render(), case.expect)
| [
"liquid.Environment",
"dataclasses.field"
] | [((429, 456), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (434, 456), False, 'from dataclasses import field\n'), ((1640, 1653), 'liquid.Environment', 'Environment', ([], {}), '()\n', (1651, 1653), False, 'from liquid import Environment\n')] |
from mpkg.common import Soft
from mpkg.load import Load
from mpkg.utils import Search
class Package(Soft):
ID = 'qbittorrent'
def _prepare(self):
data = self.data
data.args='/S'
parser = Load('http/common-zpcc.py', sync=False)[0][0].sourceforge
url = 'https://sourceforge.net/projects/qbittorrent/files/qbittorrent-win32/'
data.changelog = 'https://www.qbittorrent.org/news.php'
ver, data.date = parser(url)[0]
data.ver = ver.split('qbittorrent-')[1]
links = [parser(url+ver+'/'+item[0]+'/download')
for item in parser(url+ver)]
for link in links:
if link.endswith('.exe'):
if link.endswith('_x64_setup.exe'):
data.arch['64bit'] = link
else:
data.arch['32bit'] = link
| [
"mpkg.load.Load"
] | [((222, 261), 'mpkg.load.Load', 'Load', (['"""http/common-zpcc.py"""'], {'sync': '(False)'}), "('http/common-zpcc.py', sync=False)\n", (226, 261), False, 'from mpkg.load import Load\n')] |
import sys
def index_equals_value(arr):
l, h = 0, len(arr) - 1
while l <= h:
m = (l + h) // 2
if arr[m] < m:
l = m + 1
elif m < arr[m]:
h = m - 1
elif l != h:
h = m
else:
return m
# Case if not found
return -1
if __name__ == "__main__":
if len(sys.argv) == 1:
inp = [int(i) for i in input('>>> Enter numbers with comma-separated: ').split(',')]
print(index_equals_value(arr=inp))
else:
sys.exit(1)
| [
"sys.exit"
] | [((527, 538), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (535, 538), False, 'import sys\n')] |
import torch
import torch.utils.data
from torch import nn
from torch.nn import functional as F
from rlkit.pythonplusplus import identity
from rlkit.torch import pytorch_util as ptu
import numpy as np
from rlkit.torch.conv_networks import CNN, DCNN
from rlkit.torch.vae.vae_base import GaussianLatentVAE
imsize48_default_architecture = dict(
conv_args=dict( # conv layers
kernel_sizes=[5, 3, 3],
n_channels=[16, 32, 64],
strides=[3, 2, 2],
output_size=6,
),
conv_kwargs=dict(
hidden_sizes=[], # linear layers after conv
batch_norm_conv=False,
batch_norm_fc=False,
),
LSTM_args=dict(
input_size=6,
hidden_size=128,
),
LSTM_kwargs=dict(
num_layers=2,
),
deconv_args=dict(
hidden_sizes=[],
deconv_input_width=3,
deconv_input_height=3,
deconv_input_channels=64,
deconv_output_kernel_size=6,
deconv_output_strides=3,
deconv_output_channels=3,
kernel_sizes=[3, 3],
n_channels=[32, 16],
strides=[2, 2],
),
deconv_kwargs=dict(
batch_norm_deconv=False,
batch_norm_fc=False,
)
)
class ConvLSTM2(nn.Module):
def __init__(
self,
representation_size,
architecture,
encoder_class=CNN,
decoder_class=DCNN,
decoder_output_activation=identity,
decoder_distribution='gaussian_identity_variance',
input_channels=3,
imsize=48,
init_w=1e-3,
min_variance=1e-3,
hidden_init=ptu.fanin_init,
detach_vae_output=True,
):
super(ConvLSTM2, self).__init__()
self.representation_size = representation_size
# record the empirical statistics of latents, when not sample from true prior, sample from them.
self.dist_mu = np.zeros(self.representation_size)
self.dist_std = np.ones(self.representation_size)
if min_variance is None:
self.log_min_variance = None
else:
self.log_min_variance = float(np.log(min_variance))
self.input_channels = input_channels
self.imsize = imsize
self.imlength = self.imsize * self.imsize * self.input_channels
self.detach_vae_output = detach_vae_output
conv_args, conv_kwargs, deconv_args, deconv_kwargs = \
architecture['conv_args'], architecture['conv_kwargs'], \
architecture['deconv_args'], architecture['deconv_kwargs']
self.encoder = encoder_class(
**conv_args,
paddings=np.zeros(len(conv_args['kernel_sizes']), dtype=np.int64),
input_height=self.imsize,
input_width=self.imsize,
input_channels=self.input_channels,
init_w=init_w,
hidden_init=hidden_init,
**conv_kwargs)
self.lstm_args, self.lstm_kwargs = architecture['LSTM_args'], architecture['LSTM_kwargs']
self.lstm = nn.LSTM(**self.lstm_args, **self.lstm_kwargs)
self.lstm_num_layers = self.lstm_kwargs['num_layers']
self.lstm_hidden_size = self.lstm_args['hidden_size']
assert representation_size == self.lstm_args['input_size'], "lstm input is vae latent, \
so lstm input size should be equal to representation_size!"
self.vae_fc1 = nn.Linear(conv_args['output_size'], representation_size)
self.vae_fc2 = nn.Linear(conv_args['output_size'], representation_size)
self.vae_fc1.weight.data.uniform_(-init_w, init_w)
self.vae_fc1.bias.data.uniform_(-init_w, init_w)
self.vae_fc2.weight.data.uniform_(-init_w, init_w)
self.vae_fc2.bias.data.uniform_(-init_w, init_w)
self.lstm_fc = nn.Linear(self.lstm_hidden_size, representation_size)
self.lstm_fc.weight.data.uniform_(-init_w, init_w)
self.lstm_fc.bias.data.uniform_(-init_w, init_w)
self.decoder = decoder_class(
**deconv_args,
fc_input_size=representation_size,
init_w=init_w,
output_activation=decoder_output_activation,
paddings=np.zeros(len(deconv_args['kernel_sizes']), dtype=np.int64),
hidden_init=hidden_init,
**deconv_kwargs)
self.decoder_distribution = decoder_distribution
def from_vae_latents_to_lstm_latents(self, latents, lstm_hidden=None):
batch_size, feature_size = latents.shape
# print(latents.shape)
lstm_input = latents
lstm_input = lstm_input.view((1, batch_size, -1))
if lstm_hidden is None:
lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \
ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size))
h, hidden = self.lstm(lstm_input, lstm_hidden) # [seq_len, batch_size, lstm_hidden_size]
lstm_latent = self.lstm_fc(h)
lstm_latent = lstm_latent.view((batch_size, -1))
return lstm_latent
def encode(self, input, lstm_hidden=None, return_hidden=False, return_vae_latent=False):
'''
input: [seq_len x batch x flatten_img_dim] of flattened images
lstm_hidden: [lstm_layers x batch x lstm_hidden_size]
mark: change depends on how latent distribution parameters are used
'''
seq_len, batch_size, feature_size = input.shape
# print("in lstm encode: ", seq_len, batch_size, feature_size)
input = input.reshape((-1, feature_size))
feature = self.encoder(input) # [seq_len x batch x conv_output_size]
vae_mu = self.vae_fc1(feature)
if self.log_min_variance is None:
vae_logvar = self.vae_fc2(feature)
else:
vae_logvar = self.log_min_variance + torch.abs(self.vae_fc2(feature))
# lstm_input = self.rsample((vae_mu, vae_logvar))
# if self.detach_vae_output:
# lstm_input = lstm_input.detach()
if self.detach_vae_output:
lstm_input = vae_mu.detach().clone()
else:
lstm_input = vae_mu
lstm_input = lstm_input.view((seq_len, batch_size, -1))
# if self.detach_vae_output:
# lstm_input = lstm_input.detach()
if lstm_hidden is None:
lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \
ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size))
h, hidden = self.lstm(lstm_input, lstm_hidden) # [seq_len, batch_size, lstm_hidden_size]
lstm_latent = self.lstm_fc(h)
ret = (lstm_latent, ptu.ones_like(lstm_latent))
if return_vae_latent:
ret += (vae_mu, vae_logvar)
if return_hidden:
return ret, hidden
return ret #, lstm_input # [seq_len, batch_size, representation_size]
def forward(self, input, lstm_hidden=None, return_hidden=False):
"""
:param input:
:return: reconstructed input, obs_distribution_params, latent_distribution_params
mark: change to return the feature latents and the lstm latents
"""
if return_hidden:
latent_distribution_params, hidden = self.encode(input, lstm_hidden, return_hidden=True, return_vae_latent=True) # seq_len, batch_size, representation_size
else:
latent_distribution_params = self.encode(input, lstm_hidden, return_hidden=False, return_vae_latent=True)
vae_latent_distribution_params = latent_distribution_params[2:]
lstm_latent_encodings = latent_distribution_params[0]
vae_latents = self.reparameterize(vae_latent_distribution_params)
reconstructions, obs_distribution_params = self.decode(vae_latents) # [seq_len * batch_size, representation_size]
if return_hidden:
return reconstructions, obs_distribution_params, vae_latent_distribution_params, lstm_latent_encodings, hidden
return reconstructions, obs_distribution_params, vae_latent_distribution_params, lstm_latent_encodings
def reparameterize(self, latent_distribution_params):
if self.training:
return self.rsample(latent_distribution_params)
else:
return latent_distribution_params[0]
def kl_divergence(self, latent_distribution_params):
mu, logvar = latent_distribution_params
mu = mu.view((-1, self.representation_size)) # fold the possible seq_len dim
logvar = logvar.view((-1, self.representation_size))
return - 0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1).mean()
def get_encoding_from_latent_distribution_params(self, latent_distribution_params):
return latent_distribution_params[0].cpu()
def rsample(self, latent_distribution_params):
mu, logvar = latent_distribution_params
stds = (0.5 * logvar).exp()
epsilon = ptu.randn(*mu.size())
latents = epsilon * stds + mu
return latents
def decode(self, latents):
decoded = self.decoder(latents).view(-1,
self.imsize * self.imsize * self.input_channels)
if self.decoder_distribution == 'bernoulli':
return decoded, [decoded]
elif self.decoder_distribution == 'gaussian_identity_variance':
return torch.clamp(decoded, 0, 1), [torch.clamp(decoded, 0, 1),
torch.ones_like(decoded)]
else:
raise NotImplementedError('Distribution {} not supported'.format(
self.decoder_distribution))
def logprob(self, inputs, obs_distribution_params):
seq_len, batch_size, feature_size = inputs.shape
inputs = inputs.view((-1, feature_size))
if self.decoder_distribution == 'bernoulli':
inputs = inputs.narrow(start=0, length=self.imlength,
dim=1).contiguous().view(-1, self.imlength)
# obs_distribution_params[0] = obs_distribution_params[0].view((-1, feature_size))
log_prob = - F.binary_cross_entropy(
obs_distribution_params[0],
inputs,
reduction='elementwise_mean'
) * self.imlength
return log_prob
if self.decoder_distribution == 'gaussian_identity_variance':
# obs_distribution_params[0] = obs_distribution_params[0].view((-1, feature_size))
inputs = inputs.narrow(start=0, length=self.imlength,
dim=1).contiguous().view(-1, self.imlength)
log_prob = -1 * F.mse_loss(inputs, obs_distribution_params[0],
reduction='elementwise_mean')
return log_prob
else:
raise NotImplementedError('Distribution {} not supported'.format(
self.decoder_distribution))
def init_hidden(self, batch_size=1):
lstm_hidden = (ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size), \
ptu.zeros(self.lstm_num_layers, batch_size, self.lstm_hidden_size))
return lstm_hidden | [
"torch.ones_like",
"torch.nn.functional.mse_loss",
"numpy.ones",
"torch.nn.LSTM",
"numpy.log",
"torch.nn.functional.binary_cross_entropy",
"numpy.zeros",
"torch.nn.Linear",
"rlkit.torch.pytorch_util.ones_like",
"torch.clamp",
"rlkit.torch.pytorch_util.zeros"
] | [((1868, 1902), 'numpy.zeros', 'np.zeros', (['self.representation_size'], {}), '(self.representation_size)\n', (1876, 1902), True, 'import numpy as np\n'), ((1928, 1961), 'numpy.ones', 'np.ones', (['self.representation_size'], {}), '(self.representation_size)\n', (1935, 1961), True, 'import numpy as np\n'), ((2993, 3038), 'torch.nn.LSTM', 'nn.LSTM', ([], {}), '(**self.lstm_args, **self.lstm_kwargs)\n', (3000, 3038), False, 'from torch import nn\n'), ((3357, 3413), 'torch.nn.Linear', 'nn.Linear', (["conv_args['output_size']", 'representation_size'], {}), "(conv_args['output_size'], representation_size)\n", (3366, 3413), False, 'from torch import nn\n'), ((3437, 3493), 'torch.nn.Linear', 'nn.Linear', (["conv_args['output_size']", 'representation_size'], {}), "(conv_args['output_size'], representation_size)\n", (3446, 3493), False, 'from torch import nn\n'), ((3752, 3805), 'torch.nn.Linear', 'nn.Linear', (['self.lstm_hidden_size', 'representation_size'], {}), '(self.lstm_hidden_size, representation_size)\n', (3761, 3805), False, 'from torch import nn\n'), ((6637, 6663), 'rlkit.torch.pytorch_util.ones_like', 'ptu.ones_like', (['lstm_latent'], {}), '(lstm_latent)\n', (6650, 6663), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((10989, 11055), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (10998, 11055), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((11080, 11146), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (11089, 11146), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((2093, 2113), 'numpy.log', 'np.log', (['min_variance'], {}), '(min_variance)\n', (2099, 2113), True, 'import numpy as np\n'), ((4626, 4692), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (4635, 4692), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((4717, 4783), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (4726, 4783), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((6304, 6370), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (6313, 6370), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((6395, 6461), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['self.lstm_num_layers', 'batch_size', 'self.lstm_hidden_size'], {}), '(self.lstm_num_layers, batch_size, self.lstm_hidden_size)\n', (6404, 6461), True, 'from rlkit.torch import pytorch_util as ptu\n'), ((10644, 10720), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['inputs', 'obs_distribution_params[0]'], {'reduction': '"""elementwise_mean"""'}), "(inputs, obs_distribution_params[0], reduction='elementwise_mean')\n", (10654, 10720), True, 'from torch.nn import functional as F\n'), ((9363, 9389), 'torch.clamp', 'torch.clamp', (['decoded', '(0)', '(1)'], {}), '(decoded, 0, 1)\n', (9374, 9389), False, 'import torch\n'), ((10111, 10204), 'torch.nn.functional.binary_cross_entropy', 'F.binary_cross_entropy', (['obs_distribution_params[0]', 'inputs'], {'reduction': '"""elementwise_mean"""'}), "(obs_distribution_params[0], inputs, reduction=\n 'elementwise_mean')\n", (10133, 10204), True, 'from torch.nn import functional as F\n'), ((9392, 9418), 'torch.clamp', 'torch.clamp', (['decoded', '(0)', '(1)'], {}), '(decoded, 0, 1)\n', (9403, 9418), False, 'import torch\n'), ((9468, 9492), 'torch.ones_like', 'torch.ones_like', (['decoded'], {}), '(decoded)\n', (9483, 9492), False, 'import torch\n')] |
from tkinter import Tk
from src.homework.widget.main_frame import MainFrame
class ClockApp(Tk):
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
frame = MainFrame(self)
frame.pack()
if __name__ == '__main__':
app = ClockApp()
app.mainloop()
| [
"tkinter.Tk.__init__",
"src.homework.widget.main_frame.MainFrame"
] | [((146, 180), 'tkinter.Tk.__init__', 'Tk.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (157, 180), False, 'from tkinter import Tk\n'), ((198, 213), 'src.homework.widget.main_frame.MainFrame', 'MainFrame', (['self'], {}), '(self)\n', (207, 213), False, 'from src.homework.widget.main_frame import MainFrame\n')] |
import pandas as pd
from data_science_pipeline_code.feature_engineering_functions import *
from data_science_layer.pipeline.basic_regressor_pipeline import BasicRegressorPipeline
from data_science_layer.preprocessing.default_scaler import DefaultScaler
import pickle, os
model_params = [dict(), dict(), dict(), dict()]
import plotly.express as px
def train(dataset):
script_dir = os.path.dirname(__file__)
# Prep Dataset
dataset['hr'] = dataset['hr'].astype(int)
dataset['workingday'] = dataset['workingday'].astype(int)
dataset['holiday'] = dataset['holiday'].astype(int)
dataset['dt'] = pd.to_datetime(dataset['dteday'])
dataset['month'] = dataset['dt'].dt.month
dataset['year'] = dataset['dt'].dt.year
dataset['day'] = dataset['dt'].dt.day
dataset['previous_day_count'] = 0.0
# Engineer Features (Previous Day's Hourly Rate for daily model, baseline growth models for long term)
cutoff_date = pd.to_datetime('2012-06-01')
datasets_per_hour = [get_previous_days_count(dataset, x) for x in range(24)]
datasets_per_hour = [x.set_index(['dt']) for x in datasets_per_hour]
# Training and model selection Data Science Pipeline (start of day model)
train_sets = [x.loc[:cutoff_date] for x in datasets_per_hour]
test_sets = [x.loc[cutoff_date:] for x in datasets_per_hour]
pipelines = []
test_set_results = []
for idx, x in enumerate(train_sets):
features = ['atemp', 'workingday', 'holiday', 'windspeed', 'previous_week_count']
y_val = ['cnt']
# Split data into x/y
x_train = x[features]
y_train = x[y_val]
# copy data
y_train_scaled = y_train.copy()
x_train_scaled = x_train.copy()
# prep hold out test data
current_test = test_sets[idx]
x_test = current_test[features]
x_test_scaled = x_test.copy()
y_test = current_test[y_val]
y_test_scaled = y_test.copy()
# get output results DF started
output_container = y_test.copy()
output_container.reset_index()
# create and fit pipeline
pipeline = BasicRegressorPipeline()
pipeline.preprocess_y.append(DefaultScaler())
pipeline.fit(x_train_scaled, y_train_scaled)
# predict on hold out data
predictions = pipeline(x_test, y_result=y_test)
output_container['predicted'] = predictions
output_container['actual'] = y_test_scaled[y_val].values
# Write out pipeline to file for later use/deployment
with open(script_dir + '/pipeline{}.pkl'.format(idx), "wb") as f:
pickle.dump(pipeline, f)
pipelines.append(pipeline)
test_set_results.append(output_container)
# Return pipelines and results on holdout set
return pipelines, test_set_results
if __name__ == '__main__':
# Load in data
dataset = pd.read_csv('dataset.csv')
# Train pipelines
pipelines, test_results = train(dataset)
# Plot Results
results = pd.concat(test_results)
fig = px.scatter(results, x='actual', y='predicted')
fig.show()
# You could run a deployment on future data below using pickled pipelines
# I would refactor the data prep in train into functions, call those then call the pipelines on the new data
| [
"plotly.express.scatter",
"pickle.dump",
"pandas.read_csv",
"data_science_layer.preprocessing.default_scaler.DefaultScaler",
"os.path.dirname",
"data_science_layer.pipeline.basic_regressor_pipeline.BasicRegressorPipeline",
"pandas.concat",
"pandas.to_datetime"
] | [((387, 412), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (402, 412), False, 'import pickle, os\n'), ((618, 651), 'pandas.to_datetime', 'pd.to_datetime', (["dataset['dteday']"], {}), "(dataset['dteday'])\n", (632, 651), True, 'import pandas as pd\n'), ((950, 978), 'pandas.to_datetime', 'pd.to_datetime', (['"""2012-06-01"""'], {}), "('2012-06-01')\n", (964, 978), True, 'import pandas as pd\n'), ((2883, 2909), 'pandas.read_csv', 'pd.read_csv', (['"""dataset.csv"""'], {}), "('dataset.csv')\n", (2894, 2909), True, 'import pandas as pd\n'), ((3010, 3033), 'pandas.concat', 'pd.concat', (['test_results'], {}), '(test_results)\n', (3019, 3033), True, 'import pandas as pd\n'), ((3044, 3090), 'plotly.express.scatter', 'px.scatter', (['results'], {'x': '"""actual"""', 'y': '"""predicted"""'}), "(results, x='actual', y='predicted')\n", (3054, 3090), True, 'import plotly.express as px\n'), ((2132, 2156), 'data_science_layer.pipeline.basic_regressor_pipeline.BasicRegressorPipeline', 'BasicRegressorPipeline', ([], {}), '()\n', (2154, 2156), False, 'from data_science_layer.pipeline.basic_regressor_pipeline import BasicRegressorPipeline\n'), ((2194, 2209), 'data_science_layer.preprocessing.default_scaler.DefaultScaler', 'DefaultScaler', ([], {}), '()\n', (2207, 2209), False, 'from data_science_layer.preprocessing.default_scaler import DefaultScaler\n'), ((2622, 2646), 'pickle.dump', 'pickle.dump', (['pipeline', 'f'], {}), '(pipeline, f)\n', (2633, 2646), False, 'import pickle, os\n')] |
# Generated by Django 3.0.2 on 2020-01-16 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alacode', '0002_auto_20200116_1512'),
]
operations = [
migrations.AlterField(
model_name='code',
name='q10',
field=models.IntegerField(choices=[(0, 'No'), (1, 'Yes')], default=0, help_text='Does the tweet express feelings of anger?', verbose_name='q10'),
),
migrations.AlterField(
model_name='code',
name='q11',
field=models.IntegerField(choices=[(0, 'No'), (1, 'Yes')], default=0, help_text='Does the tweet express feelings of fear?', verbose_name='q11'),
),
migrations.AlterField(
model_name='code',
name='q12',
field=models.CharField(default='no notes', help_text='Notes', max_length=500, verbose_name='q12'),
),
migrations.AlterField(
model_name='code',
name='q9',
field=models.IntegerField(choices=[(0, 'No'), (1, 'Yes')], default=0, help_text='Does the tweet express feelings of enthusiasm?', verbose_name='q9'),
),
]
| [
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((331, 474), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'No'), (1, 'Yes')]", 'default': '(0)', 'help_text': '"""Does the tweet express feelings of anger?"""', 'verbose_name': '"""q10"""'}), "(choices=[(0, 'No'), (1, 'Yes')], default=0, help_text=\n 'Does the tweet express feelings of anger?', verbose_name='q10')\n", (350, 474), False, 'from django.db import migrations, models\n'), ((586, 728), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'No'), (1, 'Yes')]", 'default': '(0)', 'help_text': '"""Does the tweet express feelings of fear?"""', 'verbose_name': '"""q11"""'}), "(choices=[(0, 'No'), (1, 'Yes')], default=0, help_text=\n 'Does the tweet express feelings of fear?', verbose_name='q11')\n", (605, 728), False, 'from django.db import migrations, models\n'), ((840, 935), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""no notes"""', 'help_text': '"""Notes"""', 'max_length': '(500)', 'verbose_name': '"""q12"""'}), "(default='no notes', help_text='Notes', max_length=500,\n verbose_name='q12')\n", (856, 935), False, 'from django.db import migrations, models\n'), ((1047, 1194), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(0, 'No'), (1, 'Yes')]", 'default': '(0)', 'help_text': '"""Does the tweet express feelings of enthusiasm?"""', 'verbose_name': '"""q9"""'}), "(choices=[(0, 'No'), (1, 'Yes')], default=0, help_text=\n 'Does the tweet express feelings of enthusiasm?', verbose_name='q9')\n", (1066, 1194), False, 'from django.db import migrations, models\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
data = open( "cols.csv", "r" )
xs = [ ]
ys = [ ]
zs = [ ]
for l in data.readlines( ):
cs = l.split( ',' )
cs = list( map( lambda c: int( c ), cs ) )
xs.append( cs[ 0 ] )
ys.append( cs[ 1 ] )
zs.append( cs[ 2 ] )
data.close( )
fig = plt.figure( 1, figsize = ( 1, 2 ) )
ax = Axes3D( fig )
ax.scatter( xs, ys, zs, c = 'r', marker = 'o' )
est = KMeans( n_clusters = 10 )
data = np.array( zip( xs, ys, zs ) )
est.fit( data )
labels = est.labels_
fig = plt.figure( 2, figsize = ( 1, 2 ) )
ax = Axes3D( fig )
ax.scatter( data[ :, 0 ], data[ :, 1 ], data[ :, 2 ],
c = labels.astype( np.float ), edgecolor='k' )
plt.show( )
| [
"sklearn.cluster.KMeans",
"matplotlib.pyplot.figure",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show"
] | [((384, 413), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(1, 2)'}), '(1, figsize=(1, 2))\n', (394, 413), True, 'import matplotlib.pyplot as plt\n'), ((425, 436), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (431, 436), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((496, 517), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(10)'}), '(n_clusters=10)\n', (502, 517), False, 'from sklearn.cluster import KMeans\n'), ((603, 632), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(1, 2)'}), '(2, figsize=(1, 2))\n', (613, 632), True, 'import matplotlib.pyplot as plt\n'), ((644, 655), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (650, 655), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((765, 775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (773, 775), True, 'import matplotlib.pyplot as plt\n')] |
"""
game.py
Game class which contains the player, target, and all the walls.
"""
from math import cos, sin
import matplotlib.collections as mc
import pylab as plt
from numpy import asarray, pi
from config import Config
from environment.robot import Robot
from utils.dictionary import *
from utils.myutils import load_pickle, store_pickle
from utils.vec2d import Vec2d
class Game:
"""
A game environment is built up from the following segments:
* robot: The player manoeuvring in the environment
* target: Robot that must be reached by the robot
"""
__slots__ = {
'bot_config', 'done', 'game_config', 'id', 'init_distance', 'noise', 'player', 'player_angle_noise',
'save_path', 'score', 'silent', 'spawn_function', 'steps_taken', 'stop_if_reached', 'target', 'wall_bound',
'x_axis', 'y_axis'
}
def __init__(self,
game_id: int,
config: Config,
player_noise: float = 0,
noise: bool = True,
overwrite: bool = False,
save_path: str = '',
silent: bool = True,
spawn_func=None,
stop_if_reached: bool = True,
wall_bound: bool = True,
):
"""
Define a new game.
:param game_id: Game id
:param config: Configuration file (only needed to pass during creation)
:param player_noise: The maximum noise added to the player's initial location
:param noise: Add noise when progressing the game
:param overwrite: Overwrite pre-existing games
:param save_path: Save and load the game from different directories
:param silent: Do not print anything
:param spawn_func: Function that determines which target-position should spawn
:param stop_if_reached: Stop the simulation when agent reaches target
:param wall_bound: Bound the position of the agent to be within the walls of the game
"""
assert type(game_id) == int
# Set the game's configuration
self.bot_config = config.bot
self.game_config = config.game
# Environment specific parameters
self.noise: bool = noise # Add noise to the game-environment
self.silent: bool = silent # True: Do not print out statistics
self.save_path: str = save_path if save_path else 'environment/games_db/'
self.wall_bound: bool = wall_bound # Permit robot to go outside of the boundaries
self.stop_if_reached: bool = stop_if_reached # Terminate the simulation ones the target is found
self.player_angle_noise: float = player_noise # The noise added to the player's initial orientation
# Placeholders for parameters
self.done: bool = False # Game has finished
self.id: int = game_id # Game's ID-number
self.init_distance: float = 0 # Denotes the initial distance from target
self.player: Robot = None # Candidate-robot
self.score: int = 0 # Denotes the number of targets found
self.spawn_function = None # Function determining which targets to spawn
self.steps_taken: int = 0 # Number of steps taken by the agent
self.target: Vec2d = None # Target-robot
self.x_axis: int = 0 # Width of the game
self.y_axis: int = 0 # Height of the game
# Check if game already exists, if not create new game
if overwrite or not self.load():
assert spawn_func is not None
self.create_empty_game(spawn_func)
def __str__(self):
return f"game_{self.id:05d}"
# ------------------------------------------------> MAIN METHODS <------------------------------------------------ #
def close(self):
"""Final state of the agent's statistics."""
return {
D_DIST_TO_TARGET: self.get_distance_to_target(),
D_DONE: self.done,
D_GAME_ID: self.id,
D_POS: self.player.pos,
D_SCORE: self.score,
D_TIME_TAKEN: self.steps_taken / self.game_config.fps,
D_INIT_DIST: self.init_distance,
}
def get_observation(self):
"""Get the current observation of the game in the form of a dictionary."""
return {
D_DONE: self.done,
D_SENSOR_LIST: self.player.get_sensor_readings(),
}
def randomize(self):
"""Randomize the maze."""
self.player.randomize(max_noise=self.player_angle_noise)
self.spawn_function.randomize()
self.sample_target()
def reset(self):
"""Reset the game and return initial observations."""
self.done = False
self.score = 0
self.steps_taken = 0
self.spawn_function.reset()
self.sample_target()
self.player.reset(noise=self.noise)
obs = self.get_observation()
self.init_distance = self.get_distance_to_target() # The sensor-values must be read in first!
return obs
def step(self, l: float, r: float):
"""
Progress one step in the game.
:param l: Left wheel speed [-1..1]
:param r: Right wheel speed [-1..1]
:return: Observation (Dictionary), target_reached (Boolean)
"""
dt = 1.0 / self.game_config.fps
return self.step_dt(dt=dt, l=l, r=r)
def step_dt(self, dt: float, l: float, r: float):
"""
Progress one step in the game based on a predefined delta-time. This method should only be used for debugging or
visualization purposes.
:param dt: Delta time
:param l: Left wheel speed [-1..1]
:param r: Right wheel speed [-1..1]
:return: Observation (Dictionary), target_reached (Boolean)
"""
self.steps_taken += 1
self.player.drive(dt, lw=l, rw=r)
# Check if player is not outside of playing-field if the game is wall-bound
if self.wall_bound and \
(not (self.player.radius <= self.player.pos[0] <= self.x_axis - self.player.radius) or
not (self.player.radius <= self.player.pos[1] <= self.y_axis - self.player.radius)):
self.player.set_back()
# Check if target reached
if self.get_distance_to_target() <= self.game_config.target_reached:
self.score += 1
if self.stop_if_reached:
self.done = True
else:
self.sample_target()
# Return the current observations
return self.get_observation()
# -----------------------------------------------> HELPER METHODS <----------------------------------------------- #
def create_empty_game(self, spawn_func):
"""Create an empty game."""
self.x_axis = self.game_config.x_axis
self.y_axis = self.game_config.y_axis
self.spawn_function = spawn_func
self.player = Robot(game=self)
self.set_player_init_angle(a=pi / 2)
self.set_player_init_pos(p=Vec2d(self.game_config.x_axis / 2, self.game_config.y_axis / 2))
# Save the new game
self.save()
if not self.silent: print(f"New game created under id: {self.id}")
def get_distance_to_target(self):
"""Get the distance between robot and target."""
return (self.target - self.player.pos).get_length()
def sample_target(self):
"""Sample a target from the target_list."""
self.target = Vec2d().load_tuple(self.spawn_function())
def set_player_init_angle(self, a: float):
"""Set a new initial angle for the player."""
self.player.set_init_angle(a=a)
def set_player_init_pos(self, p: Vec2d):
"""Set a new initial position for the player."""
self.player.set_init_pos(p=p)
# ---------------------------------------------> FUNCTIONAL METHODS <--------------------------------------------- #
def save(self):
"""Save the current state's state."""
persist_dict = dict()
persist_dict.update({D_X_AXIS: self.x_axis})
persist_dict.update({D_Y_AXIS: self.y_axis})
persist_dict.update({D_WALL_BOUND: self.wall_bound})
persist_dict.update({D_TARGET_REACHED: self.stop_if_reached})
persist_dict.update({D_ANGLE: self.player.init_angle}) # Initial angle of player
persist_dict.update({D_ANGLE_NOISE: self.player_angle_noise}) # Noise added to the initial angle of the player
persist_dict.update({D_POS: tuple(self.player.init_pos)}) # Initial position of player
persist_dict.update({D_SPAWN_F: self.spawn_function}) # Function deciding on which target to use
store_pickle(persist_dict, f'{self.save_path}{self}')
def load(self):
"""Load in a game, specified by its current id and return True if successful."""
try:
game = load_pickle(f'{self.save_path}{self}')
self.x_axis = game.get(D_X_AXIS)
self.y_axis = game.get(D_Y_AXIS)
self.wall_bound = game.get(D_WALL_BOUND)
self.stop_if_reached = game.get(D_TARGET_REACHED)
self.player = Robot(game=self) # Create a dummy-player to set values on
self.set_player_init_angle(game.get(D_ANGLE))
self.player_angle_noise = game.get(D_ANGLE_NOISE)
self.set_player_init_pos(Vec2d().load_tuple(game.get(D_POS)))
self.spawn_function = game.get(D_SPAWN_F)
self.spawn_function.reset()
self.sample_target()
if not self.silent: print(f"Existing game loaded with id: {self.id}")
return True
except FileNotFoundError:
return False
def get_blueprint(self, ax=None, show_player: bool = False, annotate: bool = True):
"""The blueprint map of the board (matplotlib Figure)."""
if not ax: fig, ax = plt.subplots()
# Draw the (implicit) boundary walls
if self.wall_bound:
walls = []
corners = asarray([(0, 0), (0, self.y_axis), (self.x_axis, self.y_axis), (self.x_axis, 0)])
for c in range(4):
walls.append([corners[c], corners[(c + 1) % 4]])
lc = mc.LineCollection(walls, linewidths=5, colors='k')
ax.add_collection(lc)
# Add all possible targets to map
if "locations" in self.spawn_function.__slots__:
for i, t in enumerate(self.spawn_function.locations):
plt.plot(t[0], t[1], 'go')
if annotate and type(self.spawn_function.locations) == list:
plt.annotate(str(i + 1), xy=(t[0] + 0.1, t[1] + 0.1))
# Add player to map if requested
if show_player:
x = self.player.init_pos[0]
y = self.player.init_pos[1]
dx = cos(self.player.noisy_init_angle)
dy = sin(self.player.noisy_init_angle)
plt.arrow(x, y, dx, dy, head_width=0.1, length_includes_head=True)
# Adjust the boundaries
plt.xlim(0, self.x_axis)
plt.ylim(0, self.y_axis)
# Return the figure in its current state
return ax
def get_game(i: int, cfg: Config = None, noise: bool = True):
"""
Create a game-object.
:param i: Game-ID
:param cfg: Config object
:param noise: Add noise to the game
:return: Game or GameCy object
"""
config = cfg if cfg else Config()
return Game(
game_id=i,
config=config,
noise=noise,
silent=True,
)
| [
"pylab.ylim",
"environment.robot.Robot",
"pylab.arrow",
"utils.myutils.load_pickle",
"config.Config",
"utils.vec2d.Vec2d",
"numpy.asarray",
"pylab.plot",
"matplotlib.collections.LineCollection",
"math.cos",
"pylab.xlim",
"pylab.subplots",
"math.sin",
"utils.myutils.store_pickle"
] | [((7103, 7119), 'environment.robot.Robot', 'Robot', ([], {'game': 'self'}), '(game=self)\n', (7108, 7119), False, 'from environment.robot import Robot\n'), ((8882, 8935), 'utils.myutils.store_pickle', 'store_pickle', (['persist_dict', 'f"""{self.save_path}{self}"""'], {}), "(persist_dict, f'{self.save_path}{self}')\n", (8894, 8935), False, 'from utils.myutils import load_pickle, store_pickle\n'), ((11259, 11283), 'pylab.xlim', 'plt.xlim', (['(0)', 'self.x_axis'], {}), '(0, self.x_axis)\n', (11267, 11283), True, 'import pylab as plt\n'), ((11292, 11316), 'pylab.ylim', 'plt.ylim', (['(0)', 'self.y_axis'], {}), '(0, self.y_axis)\n', (11300, 11316), True, 'import pylab as plt\n'), ((11660, 11668), 'config.Config', 'Config', ([], {}), '()\n', (11666, 11668), False, 'from config import Config\n'), ((9082, 9120), 'utils.myutils.load_pickle', 'load_pickle', (['f"""{self.save_path}{self}"""'], {}), "(f'{self.save_path}{self}')\n", (9093, 9120), False, 'from utils.myutils import load_pickle, store_pickle\n'), ((9352, 9368), 'environment.robot.Robot', 'Robot', ([], {'game': 'self'}), '(game=self)\n', (9357, 9368), False, 'from environment.robot import Robot\n'), ((10085, 10099), 'pylab.subplots', 'plt.subplots', ([], {}), '()\n', (10097, 10099), True, 'import pylab as plt\n'), ((10227, 10312), 'numpy.asarray', 'asarray', (['[(0, 0), (0, self.y_axis), (self.x_axis, self.y_axis), (self.x_axis, 0)]'], {}), '([(0, 0), (0, self.y_axis), (self.x_axis, self.y_axis), (self.x_axis,\n 0)])\n', (10234, 10312), False, 'from numpy import asarray, pi\n'), ((10422, 10472), 'matplotlib.collections.LineCollection', 'mc.LineCollection', (['walls'], {'linewidths': '(5)', 'colors': '"""k"""'}), "(walls, linewidths=5, colors='k')\n", (10439, 10472), True, 'import matplotlib.collections as mc\n'), ((11046, 11079), 'math.cos', 'cos', (['self.player.noisy_init_angle'], {}), '(self.player.noisy_init_angle)\n', (11049, 11079), False, 'from math import cos, sin\n'), ((11097, 11130), 'math.sin', 'sin', (['self.player.noisy_init_angle'], {}), '(self.player.noisy_init_angle)\n', (11100, 11130), False, 'from math import cos, sin\n'), ((11143, 11209), 'pylab.arrow', 'plt.arrow', (['x', 'y', 'dx', 'dy'], {'head_width': '(0.1)', 'length_includes_head': '(True)'}), '(x, y, dx, dy, head_width=0.1, length_includes_head=True)\n', (11152, 11209), True, 'import pylab as plt\n'), ((7200, 7263), 'utils.vec2d.Vec2d', 'Vec2d', (['(self.game_config.x_axis / 2)', '(self.game_config.y_axis / 2)'], {}), '(self.game_config.x_axis / 2, self.game_config.y_axis / 2)\n', (7205, 7263), False, 'from utils.vec2d import Vec2d\n'), ((7665, 7672), 'utils.vec2d.Vec2d', 'Vec2d', ([], {}), '()\n', (7670, 7672), False, 'from utils.vec2d import Vec2d\n'), ((10697, 10723), 'pylab.plot', 'plt.plot', (['t[0]', 't[1]', '"""go"""'], {}), "(t[0], t[1], 'go')\n", (10705, 10723), True, 'import pylab as plt\n'), ((9568, 9575), 'utils.vec2d.Vec2d', 'Vec2d', ([], {}), '()\n', (9573, 9575), False, 'from utils.vec2d import Vec2d\n')] |
import numpy as np
import torch as th
import torch.nn as nn
from rls.nn.mlps import MLP
from rls.nn.represent_nets import RepresentationNetwork
class QattenMixer(nn.Module):
def __init__(self,
n_agents: int,
state_spec,
rep_net_params,
agent_own_state_size: bool,
query_hidden_units: int,
query_embed_dim: int,
key_embed_dim: int,
head_hidden_units: int,
n_attention_head: int,
constrant_hidden_units: int,
is_weighted: bool = True):
super().__init__()
self.n_agents = n_agents
self.rep_net = RepresentationNetwork(obs_spec=state_spec,
rep_net_params=rep_net_params)
self.u_dim = agent_own_state_size # TODO: implement this
self.query_embed_dim = query_embed_dim
self.key_embed_dim = key_embed_dim
self.n_attention_head = n_attention_head
self.is_weighted = is_weighted
self.query_embedding_layers = nn.ModuleList()
self.key_embedding_layers = nn.ModuleList()
for i in range(self.n_attention_head):
self.query_embedding_layers.append(MLP(input_dim=self.rep_net.h_dim, hidden_units=query_hidden_units,
layer='linear', act_fn='relu', output_shape=query_embed_dim))
self.key_embedding_layers.append(
nn.Linear(self.u_dim, self.key_embed_dim))
self.scaled_product_value = np.sqrt(self.query_embed_dim)
self.head_embedding_layer = MLP(input_dim=self.rep_net.h_dim, hidden_units=head_hidden_units,
layer='linear', act_fn='relu', output_shape=n_attention_head)
self.constrant_value_layer = MLP(input_dim=self.rep_net.h_dim, hidden_units=constrant_hidden_units,
layer='linear', act_fn='relu', output_shape=1)
def forward(self, q_values, state, **kwargs):
"""
params:
q_values: [T, B, 1, N]
state: [T, B, *]
"""
time_step = q_values.shape[0] # T
batch_size = q_values.shape[1] # B
# state: [T, B, *]
state_feat, _ = self.rep_net(state, **kwargs) # [T, B, *]
us = self._get_us(state_feat) # [T, B, N, *]
q_lambda_list = []
for i in range(self.n_attention_head):
state_embedding = self.query_embedding_layers[i](
state_feat) # [T, B, *]
u_embedding = self.key_embedding_layers[i](us) # [T, B, N, *]
state_embedding = state_embedding.unsqueeze(-2) # [T, B, 1, *]
u_embedding = u_embedding.swapaxes(-1, -2) # [T, B, *, N]
raw_lambda = (state_embedding @ u_embedding) / \
self.scaled_product_value # [T, B, 1, N]
q_lambda = raw_lambda.softmax(dim=-1) # [T, B, 1, N]
q_lambda_list.append(q_lambda) # H * [T, B, 1, N]
q_lambda_list = th.cat(q_lambda_list, dim=-2) # [T, B, H, N]
q_lambda_list = q_lambda_list.swapaxes(-1, -2) # [T, B, N, H]
q_h = q_values @ q_lambda_list # [T, B, 1, H]
if self.is_weighted:
# shape: [-1, n_attention_head, 1]
w_h = th.abs(self.head_embedding_layer(state_feat)) # [T, B, H]
w_h = w_h.unsqueeze(-1) # [T, B, H, 1]
sum_q_h = q_h @ w_h # [T, B, 1, 1]
sum_q_h = sum_q_h.view(time_step, batch_size, 1) # [T, B, 1]
else:
sum_q_h = q_h.sum(-1) # [T, B, 1]
c = self.constrant_value_layer(state_feat) # [T, B, 1]
q_tot = sum_q_h + c # [T, B, 1]
return q_tot
def _get_us(self, state_feat):
time_step = state_feat.shape[0] # T
batch_size = state_feat.shape[1] # B
agent_own_state_size = self.u_dim
with th.no_grad():
us = state_feat[:, :, :agent_own_state_size * self.n_agents].view(
time_step, batch_size, self.n_agents, agent_own_state_size) # [T, B, N, *]
return us
| [
"numpy.sqrt",
"torch.nn.ModuleList",
"torch.nn.Linear",
"rls.nn.mlps.MLP",
"torch.no_grad",
"torch.cat",
"rls.nn.represent_nets.RepresentationNetwork"
] | [((714, 787), 'rls.nn.represent_nets.RepresentationNetwork', 'RepresentationNetwork', ([], {'obs_spec': 'state_spec', 'rep_net_params': 'rep_net_params'}), '(obs_spec=state_spec, rep_net_params=rep_net_params)\n', (735, 787), False, 'from rls.nn.represent_nets import RepresentationNetwork\n'), ((1117, 1132), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1130, 1132), True, 'import torch.nn as nn\n'), ((1169, 1184), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1182, 1184), True, 'import torch.nn as nn\n'), ((1601, 1630), 'numpy.sqrt', 'np.sqrt', (['self.query_embed_dim'], {}), '(self.query_embed_dim)\n', (1608, 1630), True, 'import numpy as np\n'), ((1668, 1800), 'rls.nn.mlps.MLP', 'MLP', ([], {'input_dim': 'self.rep_net.h_dim', 'hidden_units': 'head_hidden_units', 'layer': '"""linear"""', 'act_fn': '"""relu"""', 'output_shape': 'n_attention_head'}), "(input_dim=self.rep_net.h_dim, hidden_units=head_hidden_units, layer=\n 'linear', act_fn='relu', output_shape=n_attention_head)\n", (1671, 1800), False, 'from rls.nn.mlps import MLP\n'), ((1874, 1995), 'rls.nn.mlps.MLP', 'MLP', ([], {'input_dim': 'self.rep_net.h_dim', 'hidden_units': 'constrant_hidden_units', 'layer': '"""linear"""', 'act_fn': '"""relu"""', 'output_shape': '(1)'}), "(input_dim=self.rep_net.h_dim, hidden_units=constrant_hidden_units,\n layer='linear', act_fn='relu', output_shape=1)\n", (1877, 1995), False, 'from rls.nn.mlps import MLP\n'), ((3110, 3139), 'torch.cat', 'th.cat', (['q_lambda_list'], {'dim': '(-2)'}), '(q_lambda_list, dim=-2)\n', (3116, 3139), True, 'import torch as th\n'), ((3983, 3995), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (3993, 3995), True, 'import torch as th\n'), ((1279, 1411), 'rls.nn.mlps.MLP', 'MLP', ([], {'input_dim': 'self.rep_net.h_dim', 'hidden_units': 'query_hidden_units', 'layer': '"""linear"""', 'act_fn': '"""relu"""', 'output_shape': 'query_embed_dim'}), "(input_dim=self.rep_net.h_dim, hidden_units=query_hidden_units, layer=\n 'linear', act_fn='relu', output_shape=query_embed_dim)\n", (1282, 1411), False, 'from rls.nn.mlps import MLP\n'), ((1521, 1562), 'torch.nn.Linear', 'nn.Linear', (['self.u_dim', 'self.key_embed_dim'], {}), '(self.u_dim, self.key_embed_dim)\n', (1530, 1562), True, 'import torch.nn as nn\n')] |
import numpy as np
import os
import nibabel as nib
from skimage.transform import resize
from tqdm import tqdm
import matplotlib.pyplot as plt
import SimpleITK as sitk
spacing = {
0: [1.5, 0.8, 0.8],
1: [1.5, 0.8, 0.8],
2: [1.5, 0.8, 0.8],
3: [1.5, 0.8, 0.8],
4: [1.5, 0.8, 0.8],
5: [1.5, 0.8, 0.8],
6: [1.5, 0.8, 0.8],
}
ori_path = './0123456'
new_path = './0123456_spacing_same'
count = -1
for root1, dirs1, _ in os.walk(ori_path):
for i_dirs1 in tqdm(sorted(dirs1)): # 0Liver
# if i_dirs1 != '0Liver':
# continue
###########################################################################
if i_dirs1 == '1Kidney':
for root2, dirs2, files2 in os.walk(os.path.join(root1, i_dirs1)):
for root3, dirs3, files3 in os.walk(os.path.join(root2, 'origin')):
for i_dirs3 in sorted(dirs3): # case_00000
# if int(i_dirs3[-2:])!=4:
# continue
for root4, dirs4, files4 in os.walk(os.path.join(root3, i_dirs3)):
for i_files4 in sorted(files4):
# read img
print("Processing %s" % (i_files4))
img_path = os.path.join(root4, i_files4)
imageITK = sitk.ReadImage(img_path)
image = sitk.GetArrayFromImage(imageITK)
ori_size = np.array(imageITK.GetSize())[[2, 1, 0]]
ori_spacing = np.array(imageITK.GetSpacing())[[2, 1, 0]]
ori_origin = imageITK.GetOrigin()
ori_direction = imageITK.GetDirection()
task_id = int(i_dirs1[0])
target_spacing = np.array(spacing[task_id])
if ori_spacing[0] < 0 or ori_spacing[1] < 0 or ori_spacing[2] < 0:
print("error")
spc_ratio = ori_spacing / target_spacing
data_type = image.dtype
if i_files4 != 'segmentation.nii.gz':
data_type = np.int32
if i_files4 == 'segmentation.nii.gz':
order = 0
mode_ = 'edge'
else:
order = 3
mode_ = 'constant'
image = image.astype(np.float)
image_resize = resize(image, (
int(ori_size[0] * spc_ratio[0]), int(ori_size[1] * spc_ratio[1]),
int(ori_size[2] * spc_ratio[2])), order=order, cval=0, clip=True,
preserve_range=True)
image_resize = np.round(image_resize).astype(data_type)
# save
save_path = os.path.join(new_path, i_dirs1, 'origin', i_dirs3)
if not os.path.exists(save_path):
os.makedirs(save_path)
saveITK = sitk.GetImageFromArray(image_resize)
saveITK.SetSpacing(target_spacing[[2, 1, 0]])
saveITK.SetOrigin(ori_origin)
saveITK.SetDirection(ori_direction)
sitk.WriteImage(saveITK, os.path.join(save_path, i_files4))
#############################################################################
for root2, dirs2, files2 in os.walk(os.path.join(root1, i_dirs1)):
for i_dirs2 in sorted(dirs2): # imagesTr
for root3, dirs3, files3 in os.walk(os.path.join(root2, i_dirs2)):
for i_files3 in sorted(files3):
if i_files3[0] == '.':
continue
# read img
print("Processing %s" % (i_files3))
img_path = os.path.join(root3, i_files3)
imageITK = sitk.ReadImage(img_path)
image = sitk.GetArrayFromImage(imageITK)
ori_size = np.array(imageITK.GetSize())[[2, 1, 0]]
ori_spacing = np.array(imageITK.GetSpacing())[[2, 1, 0]]
ori_origin = imageITK.GetOrigin()
ori_direction = imageITK.GetDirection()
task_id = int(i_dirs1[0])
target_spacing = np.array(spacing[task_id])
spc_ratio = ori_spacing / target_spacing
data_type = image.dtype
if i_dirs2 != 'labelsTr':
data_type = np.int32
if i_dirs2 == 'labelsTr':
order = 0
mode_ = 'edge'
else:
order = 3
mode_ = 'constant'
image = image.astype(np.float)
image_resize = resize(image, (int(ori_size[0] * spc_ratio[0]), int(ori_size[1] * spc_ratio[1]),
int(ori_size[2] * spc_ratio[2])),
order=order, mode=mode_, cval=0, clip=True, preserve_range=True)
image_resize = np.round(image_resize).astype(data_type)
# save
save_path = os.path.join(new_path, i_dirs1, i_dirs2)
if not os.path.exists(save_path):
os.makedirs(save_path)
saveITK = sitk.GetImageFromArray(image_resize)
saveITK.SetSpacing(target_spacing[[2, 1, 0]])
saveITK.SetOrigin(ori_origin)
saveITK.SetDirection(ori_direction)
sitk.WriteImage(saveITK, os.path.join(save_path, i_files3)) | [
"os.path.exists",
"SimpleITK.GetImageFromArray",
"os.makedirs",
"numpy.round",
"os.path.join",
"SimpleITK.GetArrayFromImage",
"numpy.array",
"SimpleITK.ReadImage",
"os.walk"
] | [((445, 462), 'os.walk', 'os.walk', (['ori_path'], {}), '(ori_path)\n', (452, 462), False, 'import os\n'), ((3910, 3938), 'os.path.join', 'os.path.join', (['root1', 'i_dirs1'], {}), '(root1, i_dirs1)\n', (3922, 3938), False, 'import os\n'), ((736, 764), 'os.path.join', 'os.path.join', (['root1', 'i_dirs1'], {}), '(root1, i_dirs1)\n', (748, 764), False, 'import os\n'), ((820, 849), 'os.path.join', 'os.path.join', (['root2', '"""origin"""'], {}), "(root2, 'origin')\n", (832, 849), False, 'import os\n'), ((4048, 4076), 'os.path.join', 'os.path.join', (['root2', 'i_dirs2'], {}), '(root2, i_dirs2)\n', (4060, 4076), False, 'import os\n'), ((4345, 4374), 'os.path.join', 'os.path.join', (['root3', 'i_files3'], {}), '(root3, i_files3)\n', (4357, 4374), False, 'import os\n'), ((4410, 4434), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['img_path'], {}), '(img_path)\n', (4424, 4434), True, 'import SimpleITK as sitk\n'), ((4467, 4499), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['imageITK'], {}), '(imageITK)\n', (4489, 4499), True, 'import SimpleITK as sitk\n'), ((4870, 4896), 'numpy.array', 'np.array', (['spacing[task_id]'], {}), '(spacing[task_id])\n', (4878, 4896), True, 'import numpy as np\n'), ((5881, 5921), 'os.path.join', 'os.path.join', (['new_path', 'i_dirs1', 'i_dirs2'], {}), '(new_path, i_dirs1, i_dirs2)\n', (5893, 5921), False, 'import os\n'), ((6065, 6101), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['image_resize'], {}), '(image_resize)\n', (6087, 6101), True, 'import SimpleITK as sitk\n'), ((1068, 1096), 'os.path.join', 'os.path.join', (['root3', 'i_dirs3'], {}), '(root3, i_dirs3)\n', (1080, 1096), False, 'import os\n'), ((5953, 5978), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (5967, 5978), False, 'import os\n'), ((6008, 6030), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (6019, 6030), False, 'import os\n'), ((6335, 6368), 'os.path.join', 'os.path.join', (['save_path', 'i_files3'], {}), '(save_path, i_files3)\n', (6347, 6368), False, 'import os\n'), ((1313, 1342), 'os.path.join', 'os.path.join', (['root4', 'i_files4'], {}), '(root4, i_files4)\n', (1325, 1342), False, 'import os\n'), ((1386, 1410), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['img_path'], {}), '(img_path)\n', (1400, 1410), True, 'import SimpleITK as sitk\n'), ((1451, 1483), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['imageITK'], {}), '(imageITK)\n', (1473, 1483), True, 'import SimpleITK as sitk\n'), ((1902, 1928), 'numpy.array', 'np.array', (['spacing[task_id]'], {}), '(spacing[task_id])\n', (1910, 1928), True, 'import numpy as np\n'), ((3224, 3274), 'os.path.join', 'os.path.join', (['new_path', 'i_dirs1', '"""origin"""', 'i_dirs3'], {}), "(new_path, i_dirs1, 'origin', i_dirs3)\n", (3236, 3274), False, 'import os\n'), ((3442, 3478), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['image_resize'], {}), '(image_resize)\n', (3464, 3478), True, 'import SimpleITK as sitk\n'), ((5772, 5794), 'numpy.round', 'np.round', (['image_resize'], {}), '(image_resize)\n', (5780, 5794), True, 'import numpy as np\n'), ((3314, 3339), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (3328, 3339), False, 'import os\n'), ((3377, 3399), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (3388, 3399), False, 'import os\n'), ((3744, 3777), 'os.path.join', 'os.path.join', (['save_path', 'i_files4'], {}), '(save_path, i_files4)\n', (3756, 3777), False, 'import os\n'), ((3099, 3121), 'numpy.round', 'np.round', (['image_resize'], {}), '(image_resize)\n', (3107, 3121), True, 'import numpy as np\n')] |
from PIL import Image
from PIL.ImageDraw import Draw
from svglib.svglib import svg2rlg as svg
from reportlab.graphics.renderPM import drawToFile as render
import os
from fcord import relative as rel
def start():
print("Converting images...")
pieces = ["images/chess-bishop.svg", "images/chess-king.svg", "images/chess-knight.svg", "images/chess-pawn.svg", "images/chess-queen.svg", "images/chess-rook.svg"]
_p = []
for p in pieces:
_p.append(rel("mods/chess/" + p))
pieces = _p
for p in pieces:
s = svg(p)
b = p[:p.rfind(".")]
f = b + ".png"
render(s, f, fmt="PNG", bg=0x00ff00)
i = Image.open(f)
i = i.convert("RGBA")
l = i.load()
d = Draw(i)
h = []
for y in range(i.size[1]):
for x in range(i.size[0]):
if l[x, y][1] > 10:
l[x, y] = (0, 0, 0, 0)
else:
h.append((x, y))
darken = lambda c, a: tuple([min(max(x - a, 0), 255) for x in c])
cs = [(darken((53, 46, 36, 255), -40), "black"), (darken((231, 201, 137, 255), -40), "white")]
for x in range(len(cs)):
c = cs[x]
border = []
for n in h:
l[n[0], n[1]] = c[0]
ni = Image.new("RGBA", (512, 512), (0, 0, 0, 0))
ri = i.resize((i.size[0] * 3 // 4, i.size[1] * 3 // 4), Image.NEAREST)
pp = ((512 - ri.size[0]) // 2, (512 - ri.size[1]) // 2)
ni.paste(ri, pp, ri)
o = b + "_" + c[1] + ".png"
ni.save(o)
print("Converted file '" + o + "'!")
os.remove(f)
if __name__ == "__main__":
print("CONVERTING FROM MAIN!")
start()
| [
"PIL.Image.open",
"PIL.Image.new",
"fcord.relative",
"PIL.ImageDraw.Draw",
"svglib.svglib.svg2rlg",
"reportlab.graphics.renderPM.drawToFile",
"os.remove"
] | [((543, 549), 'svglib.svglib.svg2rlg', 'svg', (['p'], {}), '(p)\n', (546, 549), True, 'from svglib.svglib import svg2rlg as svg\n'), ((610, 643), 'reportlab.graphics.renderPM.drawToFile', 'render', (['s', 'f'], {'fmt': '"""PNG"""', 'bg': '(65280)'}), "(s, f, fmt='PNG', bg=65280)\n", (616, 643), True, 'from reportlab.graphics.renderPM import drawToFile as render\n'), ((659, 672), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (669, 672), False, 'from PIL import Image\n'), ((736, 743), 'PIL.ImageDraw.Draw', 'Draw', (['i'], {}), '(i)\n', (740, 743), False, 'from PIL.ImageDraw import Draw\n'), ((1690, 1702), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1699, 1702), False, 'import os\n'), ((468, 490), 'fcord.relative', 'rel', (["('mods/chess/' + p)"], {}), "('mods/chess/' + p)\n", (471, 490), True, 'from fcord import relative as rel\n'), ((1341, 1384), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(512, 512)', '(0, 0, 0, 0)'], {}), "('RGBA', (512, 512), (0, 0, 0, 0))\n", (1350, 1384), False, 'from PIL import Image\n')] |
# Simple single neuron network to model a regression task
from __future__ import print_function
import numpy as np
#np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
#Generate Dataset
X_train = np.random.rand(600,2) * 100.0 - 50.0
Y_train = X_train[:,0] + X_train[:,1]
X_test = np.random.rand(100,2) * 100.0 - 50.0
Y_test = X_test[:,0] + X_test[:,1]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
model = Sequential()
model.add(Dense(1,input_shape=(2,),init='uniform', activation='linear'))
model.compile(loss='mean_absolute_error', optimizer='rmsprop') # Using mse loss results in faster convergence
def GetWeights(chromo):
[w,b] = model.layers[0].get_weights() #get weights and biases
w.shape =(401408,)
b.shape = (512,)
chromo.genes[0,0:401408] = w
chromo.genes[0,401408:401408+512] = b
[w,b] = model.layers[3].get_weights()
w.shape = (262144,)
b.shape = (512,)
chromo.genes[0,401408+512:401408+512+262144] = w
chromo.genes[0,401408+512+262144:401408+512+262144+512] = b
[w,b] = model.layers[6].get_weights()
w.shape = (5120,)
b.shape = (10,)
chromo.genes[0,401408+512+262144+512:401408+512+262144+512+5120] = w
chromo.genes[0,401408+512+262144+512+5120:401408+512+262144+512+5120+10] = b
def SetWeights(chromo):
#There are 8 layers we move in one layer at a time and set the weights
w = chromo.genes[0,0:2]; w.shape = (2,1);
b = chromo.genes[0,2:3]; b.shape = (1,)
model.layers[0].set_weights([w,b])#set weights and biases
def EvalModel(j):
# nb_batch = 16
# p = len(X_train)/nb_batch - 1
# i = int(math.floor(np.random.rand() * p + 0.5) * nb_batch)
# tr_batch = X_train[i:i+nb_batch,:]
# label_batch = Y_train[i:i+nb_batch]
# score = model.evaluate(tr_batch, label_batch, batch_size=nb_batch,verbose=0)
score = model.evaluate(X_train, Y_train,verbose=0)
#print('[',j,'] Eval score:',score)
return score
######################
#Evolutionary Algo for optimizing Neural Netowrks
import numpy as np
import sys
import datetime
import math
#EA Parameters
gene_count = 3
population_size = 100
p_mutation = 0.15 #1.0/gene_count
p_crossover = 0.5 #0.0001
loss_delta = 1
avg_loss_prev = 0;
total_gene_set_time = datetime.datetime.utcnow() - datetime.datetime.utcnow()
class Chromosome:
"""docstring for Chromosome"""
fitness = 0.0
is_fitness_invalid = True # used for computing fitness
def __init__(self, gene_count):
self.gene_count = gene_count
self.genes = np.random.rand(1,gene_count) * 2.0 - 1.0
#GetWeights(self)
self.is_fitness_invalid = True
def ComputeFitness(pop,min_loss):
"""
Computes fitness each chromosome,
returns avgloss, min_loss and min_loss_index
"""
total_fitness = 0.0
min_loss_index = -1
global total_gene_set_time
for i in range(0,pop.size):
if pop[0,i].is_fitness_invalid:
# 1. set the gene to the NN topology
# 2. evaluate against the whole *Training* dataset
# 3. resulting 'TestScore' will be the fitness
t2 = datetime.datetime.utcnow()
SetWeights(pop[0,i])
total_gene_set_time += datetime.datetime.utcnow() - t2
pop[0,i].fitness = EvalModel(i)
#Mock fitness computation
#pop[0,i].fitness = pop[0,i].genes.mean(axis=1)
#print(i,' computed fitness')
pop[0,i].is_fitness_invalid = False
if min_loss >= pop[0,i].fitness:
min_loss = pop[0,i].fitness
min_loss_index = i
total_fitness = total_fitness + pop[0,i].fitness
return (total_fitness / pop.size, min_loss, min_loss_index)
def MutatePart(winner,loser,p_mutation,p_crossover,begin,end):
count = end - begin
if np.random.rand() < p_crossover:
#generate crossover site
cs = math.floor(np.random.rand() * (count-1))
loser.genes[0,begin:end] = winner.genes[0,begin:end]
#mutation factor is amount by which the original chromosome gets
#changed by after applying the mutate decison mask vector
mutation_factor = 2.0 #Weights are mutated by a value in the range of +/- mutation_factor/2
#mutate prep
m1 = np.random.rand(1,count) #mutation decision probability vector
mask = m1 < p_mutation; #decision as a boolean mask
#vector of mutations
m2 = np.random.rand(1,count) * mutation_factor - (mutation_factor/2)
mutation = mask * m2 # vector of mutation to be added
loser.genes[0,begin:end] = loser.genes[0,begin:end] + mutation
def Mutate(winner,loser,p_mutation,p_crossover):
#apply mutation and cross over layer by layer
#layer 0
begin = 0; end = 2;
#for c in enumerate(loser.genes):
# print('c = ',c)
MutatePart(winner,loser,p_mutation,p_crossover,begin,end)
#for c in enumerate(loser.genes):
# print('c = ',c)
#print('++++++++++++++++++++++++++')
#print('-----')
#for k, l in enumerate(model.layers):
# weights = l.get_weights()
# print('len weights =',len(weights))
# for n, param in enumerate(weights):
# for p in enumerate(param):
# print('param = ', p)
begin = 2; end = 3
MutatePart(winner,loser,p_mutation,p_crossover,begin,end)
loser.is_fitness_invalid = True
return loser
#-------------------------------------------------------------------------------------------------
#initialize population
vChromosome = np.vectorize(Chromosome)#vectorize Chromosome constructor
arg_array = np.full((1,population_size),gene_count,dtype=int)#init array with gene_count as value
population = vChromosome(arg_array)#create a population of Chromosomes
#aim is to minimize the loss
t1 = datetime.datetime.utcnow() # for timing
min_loss = sys.maxint
best_so_far = None
generation_count = 0;
while generation_count < 1000: #loss_delta > 0.001:
(avg_loss, min_loss, mi) = ComputeFitness(population,min_loss)
if mi >= 0:
best_so_far = population[0,mi]
loss_delta = math.fabs(avg_loss - avg_loss_prev)
avg_loss_prev = avg_loss
#print('[{}] [{}] best-so-far = {} mi = {} min-loss = {} loss_delta = {}'.format(\
# str(datetime.datetime.utcnow()), \
# generation_count, \
# best_so_far.fitness,\
# mi,\
# min_loss, \
# loss_delta))
#prep for crossover and mutation
idx = np.random.permutation(population_size)
for kk in range(0,population_size/2):
I1 = idx[2*kk]
I2 = idx[2*kk+1]
P1 = population[0,I1]
P2 = population[0,I2]
#print('I1 =',I1,'I2 =',I2,'P2 fitness =',P1.fitness,'P2 fitness =',P2.fitness)
#minimization, so <=
if P1.fitness <= P2.fitness:
#P1 is better, so we replace P2
population[0,I2] = Mutate(P1,P2,p_mutation,p_crossover)
else:
#P2 is better, so we replace P1
population[0,I1] = Mutate(P2,P1,p_mutation,p_crossover)
generation_count += 1
print('==========================================')
#evaluate the 'best so far' on test set
SetWeights(best_so_far)
score_ = model.evaluate(X_test,Y_test, verbose=1)
print('Test score:',score_)
print('time taken =',(datetime.datetime.utcnow() - t1))
print('time taken to set gene =',total_gene_set_time)
for k, l in enumerate(model.layers):
weights = l.get_weights()
print('len weights =',len(weights))
for n, param in enumerate(weights):
for p in enumerate(param):
print('param = ', p)
| [
"numpy.random.rand",
"datetime.datetime.utcnow",
"numpy.random.permutation",
"keras.models.Sequential",
"math.fabs",
"numpy.full",
"numpy.vectorize",
"keras.layers.core.Dense"
] | [((714, 726), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (724, 726), False, 'from keras.models import Sequential\n'), ((5385, 5409), 'numpy.vectorize', 'np.vectorize', (['Chromosome'], {}), '(Chromosome)\n', (5397, 5409), True, 'import numpy as np\n'), ((5455, 5507), 'numpy.full', 'np.full', (['(1, population_size)', 'gene_count'], {'dtype': 'int'}), '((1, population_size), gene_count, dtype=int)\n', (5462, 5507), True, 'import numpy as np\n'), ((5647, 5673), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (5671, 5673), False, 'import datetime\n'), ((737, 800), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'input_shape': '(2,)', 'init': '"""uniform"""', 'activation': '"""linear"""'}), "(1, input_shape=(2,), init='uniform', activation='linear')\n", (742, 800), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((2455, 2481), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2479, 2481), False, 'import datetime\n'), ((2484, 2510), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2508, 2510), False, 'import datetime\n'), ((4219, 4243), 'numpy.random.rand', 'np.random.rand', (['(1)', 'count'], {}), '(1, count)\n', (4233, 4243), True, 'import numpy as np\n'), ((5929, 5964), 'math.fabs', 'math.fabs', (['(avg_loss - avg_loss_prev)'], {}), '(avg_loss - avg_loss_prev)\n', (5938, 5964), False, 'import math\n'), ((6434, 6472), 'numpy.random.permutation', 'np.random.permutation', (['population_size'], {}), '(population_size)\n', (6455, 6472), True, 'import numpy as np\n'), ((397, 419), 'numpy.random.rand', 'np.random.rand', (['(600)', '(2)'], {}), '(600, 2)\n', (411, 419), True, 'import numpy as np\n'), ((482, 504), 'numpy.random.rand', 'np.random.rand', (['(100)', '(2)'], {}), '(100, 2)\n', (496, 504), True, 'import numpy as np\n'), ((3818, 3834), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3832, 3834), True, 'import numpy as np\n'), ((7220, 7246), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (7244, 7246), False, 'import datetime\n'), ((3229, 3255), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3253, 3255), False, 'import datetime\n'), ((4362, 4386), 'numpy.random.rand', 'np.random.rand', (['(1)', 'count'], {}), '(1, count)\n', (4376, 4386), True, 'import numpy as np\n'), ((2718, 2747), 'numpy.random.rand', 'np.random.rand', (['(1)', 'gene_count'], {}), '(1, gene_count)\n', (2732, 2747), True, 'import numpy as np\n'), ((3306, 3332), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (3330, 3332), False, 'import datetime\n'), ((3895, 3911), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3909, 3911), True, 'import numpy as np\n')] |