code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import numpy as np
train_ratings_path = "./../Data/netflix/TrainingRatings.txt"
test_ratings_path = "./../Data/netflix/TestingRatings.txt"
map_users={}
map_titles={}
data_matrix = np.empty((28978,1821),dtype=np.float32)
data_matrix[:] = np.nan
with open(train_ratings_path,'r') as reader:
counter_titles=0
counter_users = 0
for line in reader:
title,user_id,rating = line.split(',')
if not title in map_titles:
map_titles[title] = counter_titles
counter_titles +=1
if not user_id in map_users:
map_users[user_id]=counter_users
counter_users +=1
data_matrix[map_users[user_id]][map_titles[title]] = rating
del reader
mean_rating = np.nanmean(data_matrix,axis=1)
data_matrix[np.isnan(data_matrix)]=0
deviation = data_matrix - mean_rating[:,np.newaxis]
weights = {}
ratings={}
predicted = {}
squared_dev = (deviation**2).sum(axis=1)
act_ratings=[]
pred_ratings=[]
error_rating=[]
with open(test_ratings_path,'r') as reader:
c=0
for line in reader:
title,user_id,rating = line.split(',')
mapped_user = map_users[user_id]
mapped_title = map_titles[title]
if user_id not in weights:
n_correlation = np.abs((deviation[mapped_user] * deviation).sum(axis=1))
d_correlation = np.sqrt(squared_dev[mapped_user] * squared_dev)
weights[user_id]=n_correlation/d_correlation
normalising_constant = weights[user_id].sum()
weighted_sum = (weights[user_id]*(data_matrix[:,mapped_title] - mean_rating)).sum()
predicted[(mapped_title,user_id)] = mean_rating[mapped_user] + weighted_sum/normalising_constant
act_ratings.append(float(rating.replace("\n", "")))
error_rating.append(float(rating.replace("\n", ""))-predicted[(mapped_title,user_id)])
print(c," Acct : ",float(rating.replace("\n", "")), "Pred : ",predicted[(mapped_title,user_id)])
c+=1
| [
"numpy.nanmean",
"numpy.sqrt",
"numpy.empty",
"numpy.isnan"
] | [((184, 225), 'numpy.empty', 'np.empty', (['(28978, 1821)'], {'dtype': 'np.float32'}), '((28978, 1821), dtype=np.float32)\n', (192, 225), True, 'import numpy as np\n'), ((730, 761), 'numpy.nanmean', 'np.nanmean', (['data_matrix'], {'axis': '(1)'}), '(data_matrix, axis=1)\n', (740, 761), True, 'import numpy as np\n'), ((773, 794), 'numpy.isnan', 'np.isnan', (['data_matrix'], {}), '(data_matrix)\n', (781, 794), True, 'import numpy as np\n'), ((1337, 1384), 'numpy.sqrt', 'np.sqrt', (['(squared_dev[mapped_user] * squared_dev)'], {}), '(squared_dev[mapped_user] * squared_dev)\n', (1344, 1384), True, 'import numpy as np\n')] |
import os.path
import pytest
INPUT_TXT = os.path.join(os.path.dirname(__file__), 'input.txt')
def compute(s: str) -> int:
packet = s.strip()
packet = "".join([hexadecimal_to_binary(c) for c in packet])
versions = []
parse_packet(versions, packet, 0)
return sum(versions)
def parse_packet(versions, packet, index):
if "1" not in packet:
return
version = int(packet[index:index + 3], 2)
versions.append(version)
index += 3
type_id = int(packet[index:index + 3], 2)
index += 3
if type_id == 4:
value = ""
while packet[index] == "1":
value += packet[index + 1:index + 5]
index += 5
value += packet[index + 1:index + 5]
index += 5
return index
else:
length_type_id = int(packet[index:index + 1], 2)
index += 1
if length_type_id == 0:
length = int(packet[index:index + 15], 2)
index += 15
end = index + length
while index < end:
index = parse_packet(versions, packet, index)
else:
count = int(packet[index:index + 11], 2)
index += 11
for _ in range(count):
index = parse_packet(versions, packet, index)
return index
def hexadecimal_to_binary(hexadecimal: str) -> str:
return bin(int(hexadecimal, 16))[2:].zfill(4)
INPUT_S = '''\
A0016C880162017C3686B18A3D4780
'''
EXPECTED = 31
@pytest.mark.parametrize(
('input_s', 'expected'),
(
(INPUT_S, EXPECTED),
),
)
def test(input_s: str, expected: int) -> None:
assert compute(input_s) == expected
def main() -> int:
with open(INPUT_TXT, "r") as f:
print(compute(f.read()))
return 0
if __name__ == '__main__':
raise SystemExit(main())
| [
"pytest.mark.parametrize"
] | [((1481, 1553), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('input_s', 'expected')", '((INPUT_S, EXPECTED),)'], {}), "(('input_s', 'expected'), ((INPUT_S, EXPECTED),))\n", (1504, 1553), False, 'import pytest\n')] |
import random
from precise.skaters.managerutil.managertesting import manager_test_run
from precise.skaters.managers.equalmanagers import equal_daily_long_manager, equal_long_manager
from precise.skaters.managers.equalmanagers import equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager
from precise.skatertools.data.equityhistorical import random_cached_equity_dense
from numpy.testing import assert_array_almost_equal
def test_random_manager():
from precise.skaters.managers.allmanagers import LONG_MANAGERS
mgr = random.choice(LONG_MANAGERS)
manager_test_run(mgr=mgr)
def test_daily_equal():
assert_equal_managing(equal_long_manager, equal_daily_long_manager)
def test_weekly_equal():
assert_equal_managing(equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager)
def assert_equal_managing(mgr1,mgr2):
ys = random_cached_equity_dense(k=1, n_obs=50, n_dim=3, as_frame=False)
s1 = {}
s2 = {}
for y in ys:
w1, s1 = mgr1(y=y, s=s1)
w2, s2 = mgr2(y=y, s=s2)
assert_array_almost_equal(w1,w2, err_msg='managers are not the same')
if __name__=='__main__':
test_daily_equal()
test_weekly_equal() | [
"random.choice",
"precise.skatertools.data.equityhistorical.random_cached_equity_dense",
"numpy.testing.assert_array_almost_equal",
"precise.skaters.managerutil.managertesting.manager_test_run"
] | [((540, 568), 'random.choice', 'random.choice', (['LONG_MANAGERS'], {}), '(LONG_MANAGERS)\n', (553, 568), False, 'import random\n'), ((573, 598), 'precise.skaters.managerutil.managertesting.manager_test_run', 'manager_test_run', ([], {'mgr': 'mgr'}), '(mgr=mgr)\n', (589, 598), False, 'from precise.skaters.managerutil.managertesting import manager_test_run\n'), ((867, 933), 'precise.skatertools.data.equityhistorical.random_cached_equity_dense', 'random_cached_equity_dense', ([], {'k': '(1)', 'n_obs': '(50)', 'n_dim': '(3)', 'as_frame': '(False)'}), '(k=1, n_obs=50, n_dim=3, as_frame=False)\n', (893, 933), False, 'from precise.skatertools.data.equityhistorical import random_cached_equity_dense\n'), ((1049, 1119), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['w1', 'w2'], {'err_msg': '"""managers are not the same"""'}), "(w1, w2, err_msg='managers are not the same')\n", (1074, 1119), False, 'from numpy.testing import assert_array_almost_equal\n')] |
# --------------------------------------------------------
# Visual Detection: State-of-the-Art
# Copyright: <NAME>
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
import torch.nn.init as init
from model.utils.config import cfg
import numpy as np
import pdb
import time
from .bbox_transform_grasp import labels2points, points2labels, \
grasp_encode, grasp_decode,jaccard_overlap
class _GraspTargetLayer(nn.Module):
def __init__(self, feat_stride, ratios, scales, angles):
super(_GraspTargetLayer, self).__init__()
self.BBOX_NORMALIZE_MEANS = torch.FloatTensor(cfg.FCGN.BBOX_NORMALIZE_MEANS)
self.BBOX_NORMALIZE_STDS = torch.FloatTensor(cfg.FCGN.BBOX_NORMALIZE_STDS)
self.negpos_ratio = cfg.TRAIN.FCGN.NEG_POS_RATIO
self._feat_stride = feat_stride
def forward(self, conf, gt, priors, xthresh = None, ythresh = None, angle_thresh = None):
self.BBOX_NORMALIZE_MEANS = self.BBOX_NORMALIZE_MEANS.type_as(gt)
self.BBOX_NORMALIZE_STDS = self.BBOX_NORMALIZE_STDS.type_as(gt)
self.batch_size = gt.size(0)
if xthresh is None:
xthresh = self._feat_stride / 2
if ythresh is None:
ythresh = self._feat_stride / 2
if angle_thresh is None:
angle_thresh = cfg.TRAIN.FCGN.ANGLE_THRESH
if cfg.TRAIN.FCGN.ANGLE_MATCH:
loc_t, conf_t = self._match_gt_prior(priors, gt, xthresh, ythresh, angle_thresh)
else:
loc_t, conf_t = self._match_gt_prior_IoUbased(priors, gt)
iw, ow = self._mine_hard_samples(conf_t, conf)
if cfg.TRAIN.COMMON.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
loc_t = ((loc_t - self.BBOX_NORMALIZE_MEANS.expand_as(loc_t))
/ self.BBOX_NORMALIZE_STDS.expand_as(loc_t))
#if ((conf_t == 0).sum()/(conf_t == 1).sum()).item() != 3:
# pdb.set_trace()
return loc_t, conf_t, iw, ow
def _match_gt_prior(self, priors, gt, xthresh, ythresh, angle_thresh):
"""
:param priors: bs x K x 5
:param gt: bs x N x 5
:param angle_thresh:
:return:
"""
num_priors = priors.size(1)
x_gt = gt[:, :, 0:1].transpose(2,1)
y_gt = gt[:, :, 1:2].transpose(2,1)
ang_gt = gt[:, :, 4:5].transpose(2,1)
mask_gt = (torch.sum(gt==0, 2, keepdim = True) != gt.size(2)).transpose(2,1)
xdiff = torch.abs(priors[:, : ,0:1] - x_gt)
ydiff = torch.abs(priors[:, :, 1:2] - y_gt)
angdiff = torch.abs(priors[:, :, 4:5] - ang_gt)
mask = torch.zeros_like(xdiff) + mask_gt.float()
match_mat = (xdiff <= xthresh) \
& (ydiff <= ythresh) \
& (angdiff <= angle_thresh) \
& (mask != 0)
match_num = torch.sum(match_mat, 2, keepdim = True)
label = torch.zeros(self.batch_size, num_priors).type_as(gt).long()
label[(torch.sum(match_mat, 2) > 0)] = 1
# bs x N x K -> K x bs x N -> K x bs x N x 1
match_mat = match_mat.permute(2,0,1).unsqueeze(3)
# bs x K x 5 -> K x bs x 5 -> K x bs x 1 x 5
gt = gt.permute(1,0,2).unsqueeze(2)
# K x bs x N x 5 -> bs x N x 5
# When a prior matches multi gts, it will use
# the mean of all matched gts as its target.
loc = torch.sum(match_mat.float() * gt, dim = 0) + cfg.EPS
# make all nans zeros
keep = (match_num > 0).squeeze()
loc[keep] /= match_num[keep].float()
loc_encode = grasp_encode(loc, priors)
return loc_encode, label
def _match_gt_prior_IoUbased(self, priors, gt):
"""
:param priors: bs x K x 5
:param gt: bs x N x 5
:param angle_thresh:
:return:
"""
num_priors = priors.size(1)
x_gt = gt[:, :, 0:1].transpose(2,1)
y_gt = gt[:, :, 1:2].transpose(2,1)
#ang_gt = gt[:, :, 4:5].transpose(2, 1)
mask_gt = (torch.sum(gt==0, 2, keepdim = True) != gt.size(2)).transpose(2,1)
xdiff = torch.abs(priors[:, : ,0:1] - x_gt)
ydiff = torch.abs(priors[:, :, 1:2] - y_gt)
#angdiff = torch.abs(priors[:, :, 4:5] - ang_gt)
mask = torch.zeros_like(xdiff) + mask_gt.float()
match_mat = (xdiff <= self._feat_stride / 2) \
& (ydiff <= self._feat_stride / 2) \
& (mask != 0)
iou_ind = torch.nonzero(match_mat).data.cpu()
for i in iou_ind:
rec1 = np.array(priors[i[0].item(),i[1].item(),:])
rec2 = np.array(gt[i[0].item(),i[2].item(),:])
if jaccard_overlap(rec1,rec2) < cfg.TRAIN.FCGN.JACCARD_THRESH:
match_mat[i[0].item(),i[1].item(),i[2].item()] = 0
match_num = torch.sum(match_mat, 2, keepdim = True)
label = torch.zeros(self.batch_size, num_priors).type_as(gt).long()
label[(torch.sum(match_mat, 2) > 0)] = 1
# bs x N x K -> K x bs x N -> K x bs x N x 1
match_mat = match_mat.permute(2,0,1).unsqueeze(3)
# bs x K x 5 -> K x bs x 5 -> K x bs x 1 x 5
gt = gt.permute(1,0,2).unsqueeze(2)
# K x bs x N x 5 -> bs x N x 5
# When a prior matches multi gts, it will use
# the mean of all matched gts as its target.
loc = torch.sum(match_mat.float() * gt, dim = 0) + cfg.EPS
# make all nans zeros
keep = (match_num > 0).squeeze()
loc[keep] /= match_num[keep].float()
loc_encode = grasp_encode(loc, priors)
return loc_encode, label
def _mine_hard_samples(self, conf_t, conf):
"""
:param loc_t: bs x N x 5
:param conf_t: bs x N
:param conf: bs x N x 2
:return:
"""
pos = (conf_t > 0)
batch_conf = conf.data.view(-1, 2)
loss_c = self._log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
loss_c = loss_c.view(self.batch_size, -1)
loss_c[pos] = -1 # filter out pos boxes for now
_, loss_idx = loss_c.sort(1, descending=True)
# To find element indexes that indicate elements which have highest confidence loss
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = self.negpos_ratio * num_pos
neg = (idx_rank < num_neg.expand_as(idx_rank)) & (pos != 1)
conf_t[neg.eq(0) & pos.eq(0)] = -1
iw = pos.gt(0).float() * cfg.TRAIN.FCGN.BBOX_INSIDE_WEIGHTS[0]
iw = iw.unsqueeze(2).expand(conf.size(0), -1, 5)
if cfg.TRAIN.FCGN.BBOX_POSITIVE_WEIGHTS < 0:
ow = (pos + neg).gt(0).float() / ((num_pos + num_neg)|1).float()
ow = ow.unsqueeze(2).expand(conf.size(0), -1, 5)
else:
ow = (pos.gt(0).float() * cfg.TRAIN.FCGN.BBOX_POSITIVE_WEIGHTS \
+ neg.gt(0).float()) / ((num_pos + num_neg)|1).float()
ow = ow.unsqueeze(2).expand(conf.size(0), -1, 5)
if (ow != ow).sum().item() > 0:
pdb.set_trace()
if (neg.gt(0) & pos.gt(0)).sum().item() > 0:
pdb.set_trace()
return iw, ow
def _log_sum_exp(self,x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max, _ = x.data.max(dim = 1, keepdim = True)
return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max | [
"torch.abs",
"torch.exp",
"torch.nonzero",
"torch.sum",
"pdb.set_trace",
"torch.zeros",
"torch.zeros_like",
"torch.FloatTensor"
] | [((777, 825), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.FCGN.BBOX_NORMALIZE_MEANS'], {}), '(cfg.FCGN.BBOX_NORMALIZE_MEANS)\n', (794, 825), False, 'import torch\n'), ((861, 908), 'torch.FloatTensor', 'torch.FloatTensor', (['cfg.FCGN.BBOX_NORMALIZE_STDS'], {}), '(cfg.FCGN.BBOX_NORMALIZE_STDS)\n', (878, 908), False, 'import torch\n'), ((2696, 2731), 'torch.abs', 'torch.abs', (['(priors[:, :, 0:1] - x_gt)'], {}), '(priors[:, :, 0:1] - x_gt)\n', (2705, 2731), False, 'import torch\n'), ((2748, 2783), 'torch.abs', 'torch.abs', (['(priors[:, :, 1:2] - y_gt)'], {}), '(priors[:, :, 1:2] - y_gt)\n', (2757, 2783), False, 'import torch\n'), ((2802, 2839), 'torch.abs', 'torch.abs', (['(priors[:, :, 4:5] - ang_gt)'], {}), '(priors[:, :, 4:5] - ang_gt)\n', (2811, 2839), False, 'import torch\n'), ((3088, 3125), 'torch.sum', 'torch.sum', (['match_mat', '(2)'], {'keepdim': '(True)'}), '(match_mat, 2, keepdim=True)\n', (3097, 3125), False, 'import torch\n'), ((4340, 4375), 'torch.abs', 'torch.abs', (['(priors[:, :, 0:1] - x_gt)'], {}), '(priors[:, :, 0:1] - x_gt)\n', (4349, 4375), False, 'import torch\n'), ((4392, 4427), 'torch.abs', 'torch.abs', (['(priors[:, :, 1:2] - y_gt)'], {}), '(priors[:, :, 1:2] - y_gt)\n', (4401, 4427), False, 'import torch\n'), ((5055, 5092), 'torch.sum', 'torch.sum', (['match_mat', '(2)'], {'keepdim': '(True)'}), '(match_mat, 2, keepdim=True)\n', (5064, 5092), False, 'import torch\n'), ((2856, 2879), 'torch.zeros_like', 'torch.zeros_like', (['xdiff'], {}), '(xdiff)\n', (2872, 2879), False, 'import torch\n'), ((4500, 4523), 'torch.zeros_like', 'torch.zeros_like', (['xdiff'], {}), '(xdiff)\n', (4516, 4523), False, 'import torch\n'), ((7287, 7302), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7300, 7302), False, 'import pdb\n'), ((7369, 7384), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7382, 7384), False, 'import pdb\n'), ((3219, 3242), 'torch.sum', 'torch.sum', (['match_mat', '(2)'], {}), '(match_mat, 2)\n', (3228, 3242), False, 'import torch\n'), ((5186, 5209), 'torch.sum', 'torch.sum', (['match_mat', '(2)'], {}), '(match_mat, 2)\n', (5195, 5209), False, 'import torch\n'), ((2613, 2648), 'torch.sum', 'torch.sum', (['(gt == 0)', '(2)'], {'keepdim': '(True)'}), '(gt == 0, 2, keepdim=True)\n', (2622, 2648), False, 'import torch\n'), ((4257, 4292), 'torch.sum', 'torch.sum', (['(gt == 0)', '(2)'], {'keepdim': '(True)'}), '(gt == 0, 2, keepdim=True)\n', (4266, 4292), False, 'import torch\n'), ((4708, 4732), 'torch.nonzero', 'torch.nonzero', (['match_mat'], {}), '(match_mat)\n', (4721, 4732), False, 'import torch\n'), ((7795, 7815), 'torch.exp', 'torch.exp', (['(x - x_max)'], {}), '(x - x_max)\n', (7804, 7815), False, 'import torch\n'), ((3144, 3184), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'num_priors'], {}), '(self.batch_size, num_priors)\n', (3155, 3184), False, 'import torch\n'), ((5111, 5151), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'num_priors'], {}), '(self.batch_size, num_priors)\n', (5122, 5151), False, 'import torch\n')] |
# author: <NAME>
# code: https://github.com/mahmudahsan/thinkdiff
# blog: http://thinkdiff.net
# http://pythonbangla.com
# MIT License
# --------------------------
# Reporting Logs in text file
# --------------------------
import logging
def set_custom_log_info(filename):
logging.basicConfig(filename=filename, level=logging.INFO)
def report(e:Exception):
logging.exception(str(e))
| [
"logging.basicConfig"
] | [((285, 343), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'filename', 'level': 'logging.INFO'}), '(filename=filename, level=logging.INFO)\n', (304, 343), False, 'import logging\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[28]:
import matplotlib.pyplot as plt
import numpy as np
import csv
# In[32]:
listaHigh = []
listaLow = []
listaClose = []
contador = 0
lineas = len(open('GOOGLPrediccion.csv').readlines())
c = input()
if(int(c)!=0):
c = int(c)
else:
c = lineas
cantidad = lineas - c
with open('GOOGLPrediccion.csv', newline='') as File:
reader = csv.reader(File)
for row in reader:
if(contador>cantidad):
listaHigh.append(float(row[1]))
listaLow.append(float(row[2]))
listaClose.append(float(row[3]))
contador = contador + 1
plt.plot(listaHigh) # Dibuja el gráfico
plt.xlabel("Fila") # Inserta el título del eje X
plt.ylabel("Precio") # Inserta el título del eje Y
plt.ioff() # Desactiva modo interactivo de dibujo
plt.plot(listaLow) # No dibuja datos de lista2
plt.ion() # Activa modo interactivo de dibujo
plt.plot(listaLow) # Dibuja datos de lista2 sin borrar datos de lista1
plt.plot(listaClose) # No dibuja datos de lista2
plt.ion() # Activa modo interactivo de dibujo
plt.plot(listaClose) # Dibuja datos de lista2 sin borrar datos de lista1
plt.plot(listaHigh, label = "High", color="b")
plt.plot(listaLow, label = "Low", color="g")
plt.plot(listaClose, label = "Close", color="r")
plt.legend()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.ion",
"csv.reader",
"matplotlib.pyplot.legend"
] | [((640, 659), 'matplotlib.pyplot.plot', 'plt.plot', (['listaHigh'], {}), '(listaHigh)\n', (648, 659), True, 'import matplotlib.pyplot as plt\n'), ((682, 700), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fila"""'], {}), "('Fila')\n", (692, 700), True, 'import matplotlib.pyplot as plt\n'), ((734, 754), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precio"""'], {}), "('Precio')\n", (744, 754), True, 'import matplotlib.pyplot as plt\n'), ((787, 797), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (795, 797), True, 'import matplotlib.pyplot as plt\n'), ((840, 858), 'matplotlib.pyplot.plot', 'plt.plot', (['listaLow'], {}), '(listaLow)\n', (848, 858), True, 'import matplotlib.pyplot as plt\n'), ((889, 898), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (896, 898), True, 'import matplotlib.pyplot as plt\n'), ((937, 955), 'matplotlib.pyplot.plot', 'plt.plot', (['listaLow'], {}), '(listaLow)\n', (945, 955), True, 'import matplotlib.pyplot as plt\n'), ((1011, 1031), 'matplotlib.pyplot.plot', 'plt.plot', (['listaClose'], {}), '(listaClose)\n', (1019, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1071), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1069, 1071), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1130), 'matplotlib.pyplot.plot', 'plt.plot', (['listaClose'], {}), '(listaClose)\n', (1118, 1130), True, 'import matplotlib.pyplot as plt\n'), ((1186, 1230), 'matplotlib.pyplot.plot', 'plt.plot', (['listaHigh'], {'label': '"""High"""', 'color': '"""b"""'}), "(listaHigh, label='High', color='b')\n", (1194, 1230), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1275), 'matplotlib.pyplot.plot', 'plt.plot', (['listaLow'], {'label': '"""Low"""', 'color': '"""g"""'}), "(listaLow, label='Low', color='g')\n", (1241, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1324), 'matplotlib.pyplot.plot', 'plt.plot', (['listaClose'], {'label': '"""Close"""', 'color': '"""r"""'}), "(listaClose, label='Close', color='r')\n", (1286, 1324), True, 'import matplotlib.pyplot as plt\n'), ((1327, 1339), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1337, 1339), True, 'import matplotlib.pyplot as plt\n'), ((396, 412), 'csv.reader', 'csv.reader', (['File'], {}), '(File)\n', (406, 412), False, 'import csv\n')] |
from __future__ import absolute_import, unicode_literals
import os
from .base import *
DEBUG = False
SECRET_KEY = os.environ.get("SECRET_KEY")
try:
from .local import *
except ImportError:
pass
| [
"os.environ.get"
] | [((118, 146), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (132, 146), False, 'import os\n')] |
import re, os, shutil
import lookml.config as conf
import lkml
import time, copy
from string import Template
from lookml.modules.project import *
import lkml, github
def snakeCase(string):
str1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', str1).lower()
def splice(*args):
return ''.join([arg for arg in args])
def removeSpace(string): # removing special character / [|]<>,.?}{+=~!$%^&*()-
return re.sub('(\s|/|\[|\]|\||\,|<|>|\.|\?|\{|\}|#|=|~|!|\+|\$|\%|\^|\&|\*|\(|\)|\-|\:)+', r'', string)
def tidy(string):
'''
cleans a string to remove multiple linebreaks and spaces (trims excess whitespace)
:return: returns input string, with excess whitespace removed
:rtype: str
'''
return re.sub(r'\s{10,}', r'\n ', string)
# return string
def lookCase(string):
return removeSpace(snakeCase(string))
def sortMe(func):
''' returns all the fields sorted first by alpabetical dimensions/filters, then alphabetical measures '''
return sorted(list(func), key=lambda field: field.identifier)
def stringify(collection,delim=conf.NEWLINEINDENT, prefix=True, postfix=False):
'''
calls string and concatinates each item in a collection
'''
# return delim + delim.join([str(item) for item in collection])
return (delim if prefix else '') + delim.join([str(item) for item in collection]) + (delim if postfix else '')
def parseReferences(inputString):
'''
Uses regular expresssions to preduce an iterator of the lookml references in a string.
result has the shape {'raw':'${exact.field_reference}','field':'exact.field_reference', fully_qualified_reference:True}
'''
for match in re.findall(r'(\$\{([a-z\._0-9]*)\}|\{\%\s{1,3}condition\s([a-z\._0-9]*)\s\%\}|\{\%\s{1,3}parameter\s([a-z\._0-9]*)\s\%\}|\{\{\s{0,10}([a-z\._0-9]*)\s{0,10}\}\}| \_filters\[\s{0,10}\'([a-z\._0-9]*)\'\])',inputString):
#Collapse the results from findall
result = ''.join(match[1:])
#Replace the liquid value references
if result.endswith('._value'):
result = result.replace('._value','')
#Check if a fully qualified reference was used
fq = True if '.' in ''.join(match[1:]) else False
yield {'raw':match[0],'field':result, 'fully_qualified_reference': fq }
class File:
'''
A file object represents a file within a LookML project. It can be several types, can contain views, explores
or other properties such as inlcude or data groups
It can be instantiated with a View, an Explore, a filepath on disk, or content from the Github API
'''
class view_collection:
'''
A container for views which allows us to use .operator syntax
'''
def __init__(self,viewlist):
self.views = {}
for view in viewlist:
self.add(view)
def __getattr__(self,key):
return self.views[key]
def __getitem__(self,key):
return self.__getattr__(key)
def add(self, v):
if isinstance(v,dict):
v = View(v)
self.views.update({v.name:v})
return self
def remove(self, v):
if not isinstance(v,str):
v = v.name
self.views.pop(v)
return self
def __iter__(self):
self.iterPointer = iter(self.views.values())
return self
def __next__(self):
try:
return next(self.iterPointer)
except:
raise StopIteration
class explore_collection:
'''
A container for explores which allows us to use .operator syntax
'''
def __init__(self,explorelist):
self.explores = {}
for explore in explorelist:
self.add(explore)
def __getattr__(self,key):
return self.explores[key]
def __getitem__(self,key):
return self.__getattr__(key)
def add(self, e):
if isinstance(e,dict):
e = Explore(e)
self.explores.update({e.name:e})
return self
def remove(self, e):
if not isinstance(e,str):
e = e.name
self.explores.pop(e)
return self
def __iter__(self):
self.iterPointer = iter(self.explores.values())
return self
def __next__(self):
try:
return next(self.iterPointer)
except:
raise StopIteration
def __init__(self, f):
def githubBootstrap():
#custom initialization for github_api type
#Set Basic Attributes
self.name = f._rawData['name']
self.sha = f._rawData['sha']
self.base_name = self.name.replace(".model.lkml", "").replace(".explore.lkml", "").replace(".view.lkml", "")
self.path = f._rawData['path']
#Parse Step: Github content is returned base64 encoded
data = base64.b64decode(f.content).decode('ascii')
self.json_data = lkml.load(data)
def filepathBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = os.path.basename(f)
self.name_components = self.name.split('.')
if len(self.name_components) <= 1:
self.base_name = self.name
elif len(self.name_components) == 2:
self.base_name = self.name_components[0]
else:
self.base_name = '.'.join(self.name_components[:-2])
self.path = os.path.relpath(f)
self.sha = ''
#Parse Step: file is provided
with open(self.path, 'r') as tmp:
self.json_data = lkml.load(tmp)
def viewBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = f.name + '.view.lkml'
self.base_name = f.name
self.path = self.name
self.sha = ''
#load as json_Data for compatibility with the rest of the class
#TODO: revist if this is needed to convert back and forth or if another more direct method would be preferable
self.json_data = lkml.load(str(f))
def exploreBootstrap():
#custom initialization for path type
#Set Basic Attributes
self.name = f.name + '.model.lkml' # What about explore filetypes?
self.base_name = f.name
self.path = self.name
self.sha = ''
#load as json_Data for compatibility with the rest of the class
#TODO: revist if this is needed to convert back and forth or if another more direct method would be preferable
self.json_data = lkml.load(str(f))
#Step 1 -- Data Type introspection
if isinstance(f, github.ContentFile.ContentFile):
self.f_type = "github_api"
githubBootstrap()
elif isinstance(f, View):
self.f_type = "view"
viewBootstrap()
elif isinstance(f, Explore):
self.f_type = "explore"
exploreBootstrap()
elif os.path.isfile(f):
self.f_type = "path"
filepathBootstrap()
#Step 2 -- set a lookml "file type" mostly only used for path info
if self.name.endswith('lkml'):
self.filetype = self.name.split('.')[-2]
else:
raise Exception("Unsupported filename " + self.name)
if 'views' in self.json_data.keys():
self.vws = self.view_collection(self.json_data['views'])
self.json_data.pop('views')
else:
self.vws = self.view_collection({})
if 'explores' in self.json_data.keys():
self.exps = self.explore_collection(self.json_data['explores'])
self.json_data.pop('explores')
else:
self.exps = self.explore_collection({})
self.properties = Properties(self.json_data)
self.props = self.properties.props()
def __getattr__(self, key):
if key in self.__dict__.keys():
return self.__dict__[key]
elif key == 'views':
return self.vws
elif key == 'explores':
return self.exps
#TODO: resolve attribute access issues
elif key in ['datagroups', 'map_layers', 'named_value_formats']:
return self.properties[key]
else:
# raise KeyError
return object.__getattr__(key)
def __getitem__(self,key):
if key == 'views':
return self.vws
elif key == 'explores':
return self.exps
def __str__(self):
return splice(
conf.NEWLINE.join([str(p) for p in self.properties.getProperties()])
,conf.NEWLINE
,conf.NEWLINE.join([ str(e) for e in self.explores] ) if self.exps else ''
,conf.NEWLINE
,conf.NEWLINE.join([ str(v) for v in self.views]) if self.vws else ''
)
def setSha(self,sha):
self.sha = sha
return self
def addView(self,v):
self.vws.add(v)
return self
def addExplore(self,e):
self.exps.add(e)
return self
def _bind_lkml(self, lkmldictraw):
lkmldict = copy.deepcopy(lkmldictraw)
if 'views' in lkmldict.keys():
for view in lkmldict['views']:
self.vws.add(View(view))
lkmldict.pop('views')
if 'explores' in lkmldict.keys():
for explore in lkmldict['explores']:
self.exps.add(Explore(explore))
lkmldict.pop('explores')
for k,v in lkmldict.items():
self.setProperty(k,v)
def __add__(self, other):
if isinstance(other, View):
self.addView(other)
elif isinstance(other, Explore):
self.addExplore(other)
else:
self._bind_lkml(lkml.load(other))
def getProperty(self, prop):
''' Get a property from the properties collection '''
return self.properties[prop]
def setProperty(self, name, value):
''' Set a property in the properties collection '''
self.properties.addProperty(name, value)
return self
def setFolder(self,folder):
self.path = folder + self.name if folder.endswith('/') else folder + '/' + self.name
return self
def write(self,overWriteExisting=True):
''' Checks to see if the file exists before writing'''
print("Writing to: %s" % (self.path) )
if overWriteExisting:
with open(self.path, 'w') as opened_file:
try:
opened_file.write(self.__str__())
except:
pass
else:
try:
fh = open(self.path, 'r')
fh.close()
except FileNotFoundError:
with open(self.path, 'w') as opened_file:
opened_file.write(self.__str__())
class base(object):
class _model:
pass
# Put it under a namespace in __dict__?
# Define types of collections for special types. Fields for example should be unique (but lkml itself passes these split out -- how to define uniqueness across 3-4 dictionaries etc)
class _view:
pass
# Bind model to __str__ (should be kept relatively simple)
class _cont:
''' '''
pass
#
#CU (much more at once?
def __add__(self, other):
self._bind_lkml(lkml.load(other))
# def __sub__(self, other): #←- subtract a key from the model?
# pass
# #R
# def __getattr__(self, attr): #← model / property getting
# pass
# # C,U
# def __setattr__(self, attr, val):
# pass
def __init__(self,input):
self.identifier = ''
self.properties = Properties({})
self.message = ''
self.token = ''
self.indentLevel = 1
if isinstance(input,str):
self.setName(input)
elif isinstance(input,dict):
self._bind_lkml(input)
self.templateMap = {}
def _bind_lkml(self, lkmldict):
# self.setName(lkmldict.pop('name'))
if 'name' in lkmldict.keys():
self.setName(lkmldict.pop('name'))
for k,v in lkmldict.items():
self.setProperty(k,v)
def setName(self, name):
'''
sets the name
:param arg1: name
:type arg1: string
:return: returns the overall object
:rtype: self
'''
self.identifier = name
return self
def setLabel(self, label):
''''''
return self.setProperty('label', label)
def hide(self):
''''''
self.properties.addProperty('hidden', 'yes')
return self
def unHide(self):
''''''
self.properties.delProperty('hidden')
return self
def setMessage(self,message):
self.message = message
return self
def getMessage(self):
if self.message:
return splice('#',self.message,conf.NEWLINE)
else:
return ''
def getProperty(self, prop):
''' Get a property from the properties collection '''
return self.properties[prop]
def setProperty(self, name, value):
''' Set a property in the properties collection '''
self.properties.addProperty(name, value)
return self
def unSetProperty(self, name):
''''''
self.properties.__del__(name)
return self
def getProperties(self):
return self.properties.getProperties()
def hasProp(self, property):
return property in self.properties.props()
def props(self):
return self.properties.props()
def rawProp(self,key):
'''
if dict type schema, needs a prop name. If list type schema needs a number index
'''
return self.properties.rawPropValue(key)
def __repr__(self):
return "%s name: %s id: %s" % (self.__class__, self.identifier, hex(id(self)))
def __len__(self):
return len([f for f in self.getProperties()])
def __iter__(self):
self.valueiterator = iter(self.getProperties())
return self
def __next__(self):
try:
return next(self.valueiterator)
except:
raise StopIteration
def __str__(self):
self.templateMap = {
'message': self.getMessage()
,'identifier': self.identifier
# ,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties() if len(self) == 2])
,'props': stringify([ conf.INDENT + str(p) for p in self.getProperties()], prefix=(len(self) > 2))
,'token': self.token
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
class View(base):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
def __init__(self, input):
self._fields = {}
self.primaryKey = ''
self.message = ''
self.children = {}
self.parent = None
super(View, self).__init__(input)
self.token = 'view'
def __str__(self):
self.templateMap = {
'message':self.getMessage()
,'token':self.token
,'identifier':self.identifier
,'props': stringify([str(p) for p in self.getProperties() if p.name != "sets"])
,'parameters':stringify(sortMe(self.parameters()))
,'filters': stringify(sortMe(self.filters()))
,'dimensions': stringify(sortMe(self.dims()))
,'dimensionGroups': stringify(sortMe(self.dimensionGroups()))
,'measures': stringify(sortMe(self.measures()))
,'sets': stringify([str(p) for p in self.getProperties() if p.name == "sets"])
,'children': stringify(self.children.values()) if self.children else ''
}
return tidy(Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap))
def _bind_lkml(self,jsonDict):
t = 'measures'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Measure(field)
jsonDict.pop(t)
else:
pass
t = 'dimensions'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Dimension(field)
jsonDict.pop(t)
else:
pass
t = 'filters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Filter(field)
jsonDict.pop(t)
else:
pass
t = 'dimension_groups'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + DimensionGroup(field)
jsonDict.pop(t)
else:
pass
t = 'parameters'
if t in jsonDict.keys():
for field in jsonDict[t]:
self + Parameter(field)
jsonDict.pop(t)
else:
pass
super()._bind_lkml(jsonDict)
def getFieldsSorted(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' returns all the fields sorted first by alpabetical dimensions/filters, then alphabetical measures '''
return sorted(self._fields.values(), key=lambda field: ''.join([str(isinstance(field, Measure)), field.identifier]))
def __repr__(self):
return "%s (%r) fields: %s id: %s" % (self.__class__, self.identifier, len(self), hex(id(self)))
def __len__(self):
return len([f for f in self.fields()])
def __add__(self,other):
if isinstance(other, Field):
return self.addField(other)
elif isinstance(other, str):
#TODO: decide if still want to support view + 'id' behavior, and if so check regex first. Maybe a regex string to just ask: is snake str -> dim
if len(other) < 10:
return self.addDimension(dbColumn=other)
else:
self._bind_lkml(lkml.load(other))
else:
raise Exception(str(type(other)) + ' cannot be added to View')
def __radd__(self,other):
return self.__add__(other)
def __sub__(self,other):
if isinstance(other, Field):
return self.removeField(other)
elif isinstance(other, str):
return self.removeField(other)
elif isinstance(other,View):
return self.children.pop(other.identifier,None)
else:
raise Exception(str(type(other)) + ' cannot be subtracted from View')
def __rsub__(self,other):
return self.__sub__(other)
def __invert__(self):
''' hides all dimensions (not measures) '''
for dim in self.dims():
dim.hide()
for dim in self.dimensionGroups():
dim.hide()
for dim in self.parameters():
dim.hide()
for dim in self.filters():
dim.hide()
return self
def __contains__(self,item):
return item in self._fields.keys()
def __getitem__(self,identifier):
return self.field(identifier)
def __getattr__(self, key):
if key in self.__dict__.keys():
return self.__dict__[key]
elif key in self.properties.props():
return self.getProperty(key)
elif key == 'name':
return self.identifier
elif key == 'pk':
return self.getPrimaryKey()
elif key == '__ref__':
return splice('${',self.identifier,'}')
else:
return self.field(key)
def __setattr__(self, name, value):
if name == 'label':
self.setLabel(value)
return self
elif name == 'name':
self.setName(value)
return self
elif name == 'pk':
self.setPrimaryKey(value)
return self
elif name in conf.language_rules.view_props:
self.setProperty(name, value)
else:
object.__setattr__(self, name, value)
def setExtensionRequired(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# ''' Sets the view to be "extension: required" '''
self.properties.addProperty('extension','required')
return self
def getFieldsByTag(self,tag):
'''
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
for field in self.fields():
if tag in field.tags:
yield field
def fields(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''Returns all the fields as a generator'''
for field, literal in self._fields.items():
## Does this yeild only return the first instance it is looped?
yield literal
def fieldNames(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return list(self._fields.keys())
def getFieldsByType(self, t):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
return filter(lambda field: str(field.type) == 'type: '+ t, list(self._fields.values()))
def sumAllNumDimensions(self):
'''
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''
# Adds a "total" measure to the view for all number dimensions
# '''
for field in self.getFieldsByType('number'):
tmpFieldName = 'total_' + field.name
if tmpFieldName not in self.fieldNames() and isinstance(field,Dimension):
self + Measure({
'name': tmpFieldName
,'type':'sum'
,'sql':field.__refs__
})
def field(self, f):
'''
get a field (most commonly, will pass in a field name)
:param field: Field to return
:type field: str or Field (or Dimension, Measure...) object
:return: Returns a subtype of Field
:rtype: Dimension, Measure, Filter or Parameter
'''
# ''' retrieve a field, argument can be the name or a field'''
if isinstance(f,str):
try:
return self._fields[f]
except KeyError:
raise KeyError
elif isinstance(f,Field):
return self._fields[f.identifier]
def search(self, prop, pattern):
'''
pass a regex expression and will return the fields whose sql match
:param prop: name of proprty you'd like to search
:param pattern: the regex pattern
:type prop: str
:type patter: a regex search string
:return: a generator / iteratble set of fields who have a member property matching the pattern
:rtype: Field
'''
if isinstance(pattern,list):
pattern = '('+'|'.join(pattern)+')'
searchString = r''.join([r'.*',pattern,r'.*'])
for field in self.fields():
if re.match(searchString,str(field.getProperty(prop))):
yield field
def addField(self, field):
'''
add a field to the view
* if the field is a dimension and primary key it will be set as the view primary key
* the field will have its view set to so that the view may be referenced from the field object
:param arg1: Field
:type arg1: Field (or subtype)
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# '''Takes a field object as an argument and adds it to the view, if the field is a dimension and primary key it will be set as the view primary key'''
# uses the 'setView' method on field which returns self so that field can fully qualify itself and so that field can be a member of view
self._fields.update({field.identifier: field.setView(self)})
# If a primary key is added it will overwrite the existing primary key....
if isinstance(field, Dimension):
if field.isPrimaryKey():
# field.setPrimaryKey()
self.setPrimaryKey(field.identifier)
return self
def removeField(self,field):
'''
Removes a field from the View
* also unsets primary key
:param arg1: field to remove
:type arg1: Field object or str name of field
:return: returns the removed field
:rtype: Field or None
'''
# '''Removes a field, either by object or by string of identifier, safely checks and de-refs primary key'''
def pk(k):
if k.isPrimaryKey():
self.unSetPrimaryKey()
if isinstance(field,Field):
if isinstance(field,Dimension):
pk(field)
pk(self.field(field.identifier))
return self._fields.pop(field.identifier, None)
elif isinstance(field,str):
dimToDel = self.field(field)
if isinstance(dimToDel,Dimension):
pk(dimToDel)
return self._fields.pop(field, None)
else:
raise Exception('Not a string or Field instance provided')
def addFields(self, fields):
'''
Add multiple fields to a view. An iterable collection of field objects will be passed to the add field function. Helpful for adding many fields at once
:param fields: set or list of fields [field1, field2 ...]
:type fields: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
for field in fields:
self.addField(field)
return self
def setPrimaryKey(self, f, callFromChild=False):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# ''' A string identifier or a field object can be passed, and will be set as the new primary key of the view'''
self.unSetPrimaryKey()
if isinstance(f, Dimension):
if not callFromChild:
f.setPrimaryKey()
self.primaryKey = f.identifier
else:
tmpField = self.field(f)
if isinstance(tmpField, Dimension):
self.primaryKey = tmpField.identifier
if not callFromChild:
tmpField.setPrimaryKey()
# tmpField.setPrimaryKey()
return self
def getPrimaryKey(self):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns the primary key'''
if self.primaryKey:
return self.field(self.primaryKey)
def unSetPrimaryKey(self):
'''
TODO: Complete Desctiption
represents a view onject in LookML
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
# '''Unsets the view primary key returns self'''
# pk = self.field(self.primaryKey)
pk = self.getPrimaryKey()
if isinstance(pk, Dimension):
pk.unSetPrimaryKey()
self.primaryKey = ''
return self
def dims(self):
'''a description of the function
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns iterable of Dimension Fields'''
return filter(lambda dim: isinstance(dim, Dimension), self._fields.values())
def dimensionGroups(self):
'''a description of the function
:param arg1: description
:param arg2: description
:type arg1: type description
:type arg1: type description
:return: return description
:rtype: the return type description
'''
# '''returns iterable of DimensionGroup Fields'''
return filter(lambda dim: isinstance(dim, DimensionGroup), self._fields.values())
def measures(self):
'''returns iterable of Measure Fields'''
return filter(lambda meas: isinstance(meas, Measure), self._fields.values())
def filters(self):
'''returns iterable of Filter Fields'''
return filter(lambda fil: isinstance(fil, Filter), self._fields.values())
def parameters(self):
'''returns iterable of Paramter Fields'''
return filter(lambda par: isinstance(par, Parameter), self._fields.values())
def addDimension(self,dbColumn, type='string'):
'''
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
dim = Dimension(dbColumn)
dim.setType(type)
self.addField(dim)
return self
def sum(self,f):
''' A Synonym for addSum
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
return self.addSum(f)
# def count(self):
# ''' A Synonym for addCount
# :return: return self (allows call chaining i.e. obj.method().method() )
# :rtype: self
# '''
# return self.addCout()
def countDistinct(self,f):
''' A Synonym for addCountDistinct
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
return self.addCountDistinct(f)
def addCount(self):
'''Add a count measure to the view, returns self
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
measure = Measure( 'count' )
measure.setType('count')
self.addField(measure)
return self
def addCountDistinct(self, f):
'''Add a count distinct to the view based on a field object or field name/identifier. returns self
:return: return self (allows call chaining i.e. obj.method().method() )
:rtype: self
'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure( 'count_distinct_' + field.identifier)
measure.sql = field.__refs__
measure.setType('count_distinct')
self.addField(measure)
return self
def addSum(self, f):
'''Add a sum to the view based on a field object or field name/identifier. returns self'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure('total_' + field.identifier)
measure.setType('sum')
self.addField(measure)
return self
def addAverage(self, f):
'''Add a average to the view based on a field object or field name/identifier. returns self'''
if isinstance(f, Field):
field = f
else:
field = self.field(f)
measure = Measure(
identifier=''.join(['average_', field.identifier]), schema={'sql': field.__refs__}
)
measure.setType('average')
self.addField(measure)
return self
def addComparisonPeriod(self,field_to_measure,date, measure_type='count_distinct'):
self.addFields(
[
Filter('reporting_period').setName('reporting_period').setProperty('type','date')
,Filter('comparison_period').setName('comparison_period').setProperty('type','date')
,Measure('reporting_period_measure').setName('reporting_period_measure')
,Measure('comparison_period_measure').setName('comparison_period_measure')
]
)
assert isinstance(field_to_measure,Dimension)
self.reporting_period_measure.setType(measure_type)
self.comparison_period_measure.setType(measure_type)
self.comparison_period.setProperty('sql',
'''
{0}>= {{% date_start comparison_period %}}
AND {0} <= {{% date_end reporting_period %}}
'''.format('${'+date.identifier+'_raw}')
)
self.reporting_period_measure.setProperty(
'sql'
,'''CASE
WHEN {{% condition reporting_period %}} {0} {{% endcondition %}} THEN {1}
ELSE NULL
END
'''.format('${'+date.identifier+'_raw}',field_to_measure.__refs__)
)
self.comparison_period_measure.setProperty('sql',
'''
CASE
WHEN {{% condition comparison_period %}} {0} {{% endcondition %}} THEN {1}
ELSE NULL
END
'''.format('${'+date.identifier+'_raw}',field_to_measure.__refs__)
)
return self
def extend(self, name='', sameFile=True, required=False, *args):
''' Creates an extended view, optionally within the same view file
name (string) -> name of the extended / child view. Will default to the parent + _extended
sameFile (boolean) -> default true, if true will result in the child being printed within the parent's string call / file print
required (boolean) -> default false, if true will result in the parent being set to extension required
returns the child view object
'''
if not name:
if len(args) > 1:
if isinstance(args[0],str):
child = View(args[0])
else:
child = View('_'.join([self.identifier,'extended']))
else:
child = View('_'.join([self.identifier,'extended']))
else:
child = View(name)
if required:
self.setExtensionRequired()
child.properties.addProperty('extends',self.identifier)
child.parent = self
if sameFile:
self.children.update({child.identifier: child})
return child
class Join(base):
''' Instantiates a LookML join object... '''
def __init__(self, input):
self.properties = Properties({})
self.identifier = ''
self._from = ''
self.to = ''
super(Join,self).__init__(input)
self.token = 'join'
def setFrom(self,f):
self._from = f
return self
def setTo(self,t):
if isinstance(t,View):
self.to = t
return self
def on(self,left,operand,right):
statement = splice(left.__ref__ ,operand, right.__ref__)
self.setOn(statement)
return self
def setOn(self,sql_on):
self.properties.addProperty('sql_on', sql_on )
return self
def setSql(self,sql):
self.setProperty('sql', sql)
return self
def setType(self, joinType):
assert joinType in conf.JOIN_TYPES
self.properties.addProperty('type',joinType)
return self
def setRelationship(self,rel):
assert rel in conf.RELATIONSHIPS
self.properties.addProperty('relationship',rel)
return self
def hide(self):
''''''
self.properties.addProperty('view_label', '')
return self
def unHide(self):
''''''
self.properties.delProperty('view_label')
return self
class Explore(base):
''' Represents an explore object in LookML'''
def __init__(self, input):
self.joins = {}
self.base_view = ''
super(Explore, self).__init__(input)
self.token = 'explore'
def _bind_lkml(self,jsonDict):
if 'name' in jsonDict.keys():
self.setName(jsonDict.pop('name'))
if 'joins' in jsonDict.keys():
for join in jsonDict['joins']:
self + Join(join)
jsonDict.pop('joins')
for k,v in jsonDict.items():
self.setProperty(k,v)
def __len__(self):
return len(self.joins)
def __str__(self):
self.templateMap = {
'message': self.getMessage()
,'identifier':self.identifier
,'props': stringify([str(p) for p in self.getProperties()])
,'joins': stringify([str(j) for j in self.getJoins()])
,'token': self.token
}
return Template(getattr(conf.TEMPLATES,self.token)).substitute(**self.templateMap)
def __add__(self,other):
if isinstance(other,View) or isinstance(other,Join):
self.addJoin(other)
elif isinstance(other, str):
self._bind_lkml(lkml.load(other))
else:
raise TypeError
return self
def __radd__(self,other):
return self.__add__(other)
def __getattr__(self, key):
if self.base_view and key == self.base_view.name:
return self.base_view
elif key == 'name':
return self.identifier
elif key in self.joins.keys():
return self.joins[key]
else:
return self.__getitem__(key)
def __setattr__(self, name, value):
if name in self.__dict__.keys():
self.__dict__[name] = value
else:
object.__setattr__(self, name, value)
def __getitem__(self,identifier):
return self.getJoin(identifier)
def createNDT(self,explore_source='', name='',fields=[]):
pass
# TODO: re-impliment
# if name:
# tmpView = View(name)
# else:
# tmpView = View(self.identifier + 'ndt')
# tmpndt = ndt(explore_source)
# for field in fields:
# tmpndt.addColumn(field.__refrs__,field.__refr__)
# tmpView + field.__refrs__
# tmpView.derived_table = tmpndt
# tmpView.tableSource = False
# return tmpView
def setViewName(self,view):
self.properties.addProperty('view_name',view)
def addJoin(self, join):
if isinstance(join,Join):
self.joins.update({join.identifier : join})
return join
elif isinstance(join,View):
tmpjoin = Join(View)
tmpjoin.setName(join.name)
tmpjoin.setTo(join)
self.joins.update({tmpjoin.identifier : tmpjoin})
return tmpjoin
def join(self,join):
return self.addJoin(join)
def getJoins(self):
for field, literal in self.joins.items():
yield literal
def getJoin(self, key):
return self.joins.get(key, {})
class Property(object):
''' A basic property / key value pair.
If the value is a dict it will recusively instantiate properties within itself '''
def __init__(self, name, value):
self.name = name
self.num = 0
if isinstance(value, str):
self.value = value
# lkml.keys.PLURAL_KEYS
# ('view', 'measure', 'dimension', 'dimension_group', 'filter', 'access_filter',
# 'bind_filter', 'map_layer', 'parameter', 'set', 'column', 'derived_column', 'include',
# 'explore', 'link', 'when', 'allowed_value', 'named_value_format', 'join', 'datagroup', 'access_grant',
# 'sql_step', 'action', 'param', 'form_param', 'option', 'user_attribute_param', 'assert', 'test')
elif name in ('links','filters','tags','suggestions',
'actions', 'sets', 'options', 'form_params', 'access_grants','params',
'allowed_values', 'named_value_formats', 'datagroups', 'map_layers', 'columns',
'derived_columns', 'explore_source', 'includes', 'access_filters'):
# elif name+'s' in lkml.keys.PLURAL_KEYS:
self.value = Properties(value, multiValueSpecialHandling=name)
elif isinstance(value, dict) or isinstance(value, list):
self.value = Properties(value)
else:
raise Exception('not a dict, list or string')
def __len__(self):
return len(self.value)
def __add__(self,other):
if isinstance(self.value, str):
raise Exception('`+ and - ` not supported for a single value property, try assigning via the `=` operator')
elif isinstance(self.value, Properties):
self.value.addProperty(self.name,other)
elif isinstance(self.value, list):# and self.multiValueSpecialHandling in ('tags','suggestions'):
self.schema.append(other)
elif self.properties.multiValueSpecialHandling == 'filters':
pass
elif self.properties.multiValueSpecialHandling == 'links':
pass
else:
pass
# def __getattr__(self,key):
# if isinstance(self.value, Properties):
# return self.value[key]
# def __setattr__(self,key, value):
# if isinstance(self.value, Properties):
# return self.value[key]
def __sub__(self,other):
# if isinstance(self.value, Properties) and self.value.multiValueSpecialHandling in ('tags','suggestions'):
if isinstance(self.value, Properties):
self.value.schema.remove(other)
else:
pass
def __iter__(self):
self.num = 0
return self
def __next__(self):
num = self.num
while num <= len(self.value):
return next(self.value)
def __str__(self):
#TODO: multiinstance / plural
#TODO: multivalue / list
#TODO: brackets
#TODO: braces
#TODO: quoted
#TODO: plain
#TODO: SQL / HTML Block ;;
#TODO
def quote_pair():
return splice(self.name, ': "', str(self.value), '"')
def expression_block():
return splice(self.name, ': ', str(self.value), ' ;;')
def brackets():
return splice(self.name, ': [', str(self.value), ']')
def svbrackets():
return splice(self.name, ': [', ''.join(self.value.schema), ']')
def braces():
return splice(self.name, ': {', str(self.value), '}')
def default():
return splice(self.name , ': ' , str(self.value))
def list_member_training_comma():
return splice(str(self.value),',')
def simple():
return str(self.value)
# lkml.keys.PLURAL_KEYS
# ('view', 'measure', 'dimension', 'dimension_group', 'filter', 'access_filter',
# 'bind_filter', 'map_layer', 'parameter', 'set', 'column', 'derived_column', 'include',
# 'explore', 'link', 'when', 'allowed_value', 'named_value_format', 'join', 'datagroup', 'access_grant',
# 'sql_step', 'action', 'param', 'form_param', 'option', 'user_attribute_param', 'assert', 'test')
# lkml.keys.KEYS_WITH_NAME_FIELDS
# ('user_attribute_param', 'param', 'form_param', 'option')
# lkml.keys.QUOTED_LITERAL_KEYS
# ('label', 'view_label', 'group_label', 'group_item_label', 'suggest_persist_for',
# 'default_value', 'direction', 'value_format', 'name', 'url', 'icon_url', 'form_url', 'default', '
# tags', 'value', 'description', 'sortkeys', 'indexes', 'partition_keys', 'connection', 'include',
# 'max_cache_age', 'allowed_values', 'timezone', 'persist_for', 'cluster_keys', 'distribution', 'extents_json_url',
# 'feature_key', 'file', 'property_key', 'property_label_key', 'else')
# lkml.keys.EXPR_BLOCK_KEYS
# ('expression_custom_filter', 'expression', 'html', 'sql_trigger_value', 'sql_table_name', 'sql_distinct_key',
# 'sql_start', 'sql_always_having', 'sql_always_where', 'sql_trigger', 'sql_foreign_key', 'sql_where', 'sql_end',
# 'sql_create', 'sql_latitude', 'sql_longitude', 'sql_step', 'sql_on', 'sql')
# replace with expression block
# if self.name.startswith('sql') or self.name == 'html':
# return splice(self.name, ': ', str(self.value), ' ;;')
if self.name in (
'links','filters','actions','options',
'form_params','sets', 'access_grants',
'params', 'allowed_values', 'named_value_formats',
'datagroups', 'map_layers', 'derived_columns','columns','access_filters'):
return simple()
elif self.name == 'explore_source':
shadow = copy.deepcopy(self.value)
return splice(self.name , ': ' + shadow.schema.pop('name') + ' ', str(shadow))
elif self.name in ('tags'):
return default()
elif self.name in lkml.keys.EXPR_BLOCK_KEYS:
return expression_block()
elif self.name in lkml.keys.QUOTED_LITERAL_KEYS:
return quote_pair()
#single Value brackets
elif self.name in ('extends', 'alias'):
return svbrackets()
elif self.name == "includes":
return splice('include: "',str(self.value),'"')
elif self.name in conf.MULTIVALUE_PROPERTIES:
return default()
elif self.name == ('list_member') and isinstance(self.value,str):
return list_member_training_comma()
elif self.name == 'list_member':
return simple()
elif self.name == 'list_member_quoted':
return simple()
elif self.name == 'field':
return (' '*4 + default())
else:
return default()
class Properties(object):
'''
Treats the collection of properties as a recursive dicitionary
Things that fall outside of uniqueness (special cases):
includes, links, filters, bind_filters
Things that should be their own class:
data_groups, named_value_format, sets
'''
def __init__(self, schema, multiValueSpecialHandling=False):
self.schema = schema
self.num = 0
self.valueiterator = iter(self.schema)
self.multiValueSpecialHandling = multiValueSpecialHandling
def __str__(self):
def process_plural_named_constructs():
singular = self.multiValueSpecialHandling[:-1]
buildString = ""
schemaDeepCopy = copy.deepcopy(self.schema)
for fset in schemaDeepCopy:
buildString += conf.NEWLINEINDENT + conf.INDENT + singular + ': ' + fset.pop('name') + ' '
buildString += str(Property('list_member',fset))
return buildString
def process_plural_unnamed_constructs():
if not self.multiValueSpecialHandling == "filters":
singular = conf.NEWLINE + self.multiValueSpecialHandling[:-1] + ': '
else:
singular = conf.NEWLINE + self.multiValueSpecialHandling + ': '
return splice( singular , singular.join([str(p) for p in self.getProperties()]))
def render(template,delim=' '):
self.templateMap = {
'data': stringify([str(p) for p in self.getProperties()], delim=delim, prefix=False)
}
return Template(getattr(conf.TEMPLATES,template)).substitute(self.templateMap)
if isinstance(self.schema, dict):
return render('array', delim=conf.NEWLINEINDENT)
elif isinstance(self.schema, list) and not self.multiValueSpecialHandling:
return render('_list', delim=' ')
elif isinstance(self.schema, list) and self.multiValueSpecialHandling in ('tags','suggestions'):
return splice(
'[\n ' ,
'\n '.join(['"' + str(p) + '",' for p in self.getProperties()]) ,
'\n ]'
)
elif self.multiValueSpecialHandling in ('filters', 'links', 'actions', 'options', 'form_params','params', "access_filters"):
return process_plural_unnamed_constructs()
elif self.multiValueSpecialHandling in ("access_grants","datagroups","map_layers","named_value_formats","sets", "columns", "derived_columns", "explore_source"):
return process_plural_named_constructs()
elif self.multiValueSpecialHandling == 'allowed_values':
if isinstance(self.schema[0],dict):
return splice('allowed_value: ','\n allowed_value: '.join([str(p) for p in self.getProperties()]))
elif isinstance(self.schema[0],str):
return splice(
'allowed_values: [\n ' ,
'\n '.join(['"' + str(p) + '",' for p in self.getProperties()]) ,
'\n ]'
)
else:
pass
def __getitem__(self, key):
'''
TODO: fix ephemeral properties...
TDOD: Add property subtyping
'''
if isinstance(self.schema, dict):
if key == 'sql':
# return sql_prop(identifier, self.schema.get(identifier, []))
return Property(key, self.schema.get(key, []))
else:
return Property(key, self.schema.get(key, []))
elif isinstance(self.schema, list):
if key == 'sql':
# return sql_prop(identifier, self.schema.get(identifier, []))
return Property(key, self.schema.get(key, []))
else:
return Property(key, self.schema.get(key, []))
def getProperties(self):
if isinstance(self.schema, dict):
for k, v in self.schema.items():
if k in conf.NONUNIQUE_PROPERTIES:
for n in v:
yield Property(k, n)
else:
yield Property(k, v)
elif isinstance(self.schema, list):
for item in self.schema:
if self.multiValueSpecialHandling in ('suggestions','tags','allowed_values'):
yield Property('list_member_quoted',item)
else:
yield Property('list_member',item)
def __iter__(self):
self.valueiterator = iter(self.schema)
return self
def __next__(self):
try:
return next(self.valueiterator)
except:
raise StopIteration
def __add__(self,other):
if isinstance(self.schema, dict):
pass
elif isinstance(self.schema, list) and not self.multiValueSpecialHandling:
pass
elif isinstance(self.schema, list) and self.multiValueSpecialHandling in ('tags','suggestions'):
self.addProperty(self.multiValueSpecialHandling,other)
elif self.multiValueSpecialHandling == 'filters':
pass
elif self.multiValueSpecialHandling == 'links':
pass
else:
pass
def addProperty(self, name, value):
if name in conf.NONUNIQUE_PROPERTIES:
index = self.schema.get(name,[])
index.append(value)
self.schema.update(
{name: index}
)
elif isinstance(self.schema, list):
if value not in self.schema:
self.schema.append(value)
else:
self.schema.update({name: value})
def __delete__(self, identifier):
if isinstance(self.schema,dict):
self.schema.pop(identifier, None)
elif isinstance(self.schema,list):
self.schema.remove(identifier, None)
def isMember(self, property):
if isinstance(self.schema,dict):
return property in self.schema.keys()
elif isinstance(self.schema,list):
return property in self.schema
def props(self):
'''
Returns a list of the property values. Mostly used for membership checking
'''
if isinstance(self.schema, dict):
return self.schema.keys()
elif isinstance(self.schema, list):
return self.schema
def rawPropValue(self,key):
'''
if dict type schema, needs a prop name. If list type schema needs a number index
'''
return self.schema[key]
def __len__(self):
return len(self.schema)
class Field(base):
''' Base class for fields in LookML, only derived/child types should be instantiated '''
def __init__(self, input):
self.db_column = ''
super(Field, self).__init__(input)
self.templateMap = {
}
def children(self):
if self.view:
for dependent in self.view.search('sql',[self.__refsre__,self.__refre__]):
yield dependent
def setName_safe(self, newName):
'''
Change the name of the field and references to it in sql (does not yet perform the same for HTML / Links / Drill Fields / Sets / Actions etc)
'''
#TODO: complete checking all places for dependencies.
old = copy.deepcopy(self.name)
oldrefsre = copy.deepcopy(self.__refsre__)
oldrefre = copy.deepcopy(self.__refre__)
self.setName(newName)
for f in self.view.search('sql',[oldrefsre,oldrefre]):
f.sql = re.sub(oldrefsre, self.__refs__, str(f.sql.value))
f.sql = re.sub(oldrefre, self.__ref__, str(f.sql.value))
self.view.removeField(old)
self.view + self
return self
def __getattr__(self, key):
if key == 'name':
return self.identifier
elif key == 'pk':
return self.getPrimaryKey()
#full reference
elif key == '__ref__':
if self.view:
return splice('${' , self.view.identifier , '.' , self.identifier , '}')
#Short Reference
elif key == '__refs__':
return splice('${' , self.identifier , '}')
#full reference -- regex escaped
elif key == '__refre__':
if self.view:
return splice('\$\{' , self.view.identifier , '\.' , self.identifier , '\}')
#Short reference -- regex escaped
elif key == '__refsre__':
if self.view:
return splice('\$\{' , self.identifier , '\}')
#Raw Reference
elif key == '__refr__':
if self.view:
return splice(self.view.identifier , '.' , self.identifier)
#Raw refence short
elif key == '__refrs__':
if self.view:
return splice(self.identifier)
#Raw Reference regex
elif key == '__refrre__':
if self.view:
return splice(self.view.identifier , '\.' , self.identifier)
else:
return self.getProperty(key)
def __setattr__(self, name, value):
if name == 'label':
self.setLabel(value)
return self
elif name == 'name':
self.setName(value)
return self
# elif name in self.properties.props():
elif name in conf.language_rules.field_props:
return self.setProperty(name,value)
else:
object.__setattr__(self, name, value)
def setDescription(self,value):
return self.setProperty('description', value)
def addTag(self,tag):
if self.properties.isMember('tags'):
if tag not in self.tags:
# self.tags.value.schema['tags'].append(tag)
self.tags.value.schema.append(tag)
#Else it's already a member
else:
self.setProperty('tags',[tag])
def removeTag(self,tag):
if self.properties.isMember('tags'):
self.tags.value.schema.remove(tag)
else:
pass
def setView(self, view):
'''
'''
self.view = view
return self # satisfies a need to linkback (look where setView is called)
def setSql(self, sql):
self.setProperty('sql', sql)
return self
def setType(self, type):
''''''
self.properties.addProperty('type', type)
return self
def setNumber(self):
''''''
return self.setType('number')
def setString(self):
''''''
return self.setType('string')
def setViewLabel(self, viewLabel):
''''''
return self.setProperty('view_label', viewLabel)
def sql_nvl(self,value_if_null):
self.sql = "NVL(" + str(self.sql.value) + "," + value_if_null + ")"
class Dimension(Field):
def __init__(self, input):
super(Dimension, self).__init__(input)
self.token = 'dimension'
def isPrimaryKey(self):
if self.hasProp('primary_key') and self.getProperty('primary_key').value == 'yes':
return True
else:
return False
def setDBColumn(self, dbColumn, changeIdentifier=True):
''''''
self.db_column = dbColumn
self.setProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
if changeIdentifier:
self.identifier =lookCase(self.db_column)
return self
def setAllLabels(self, group: None, item: None, label: None):
if group:
self.setProperty('group_label', group)
if item:
self.setProperty('group_item_label', item)
if label:
self.setProperty('label', label)
return self
def setPrimaryKey(self):
self.setProperty('primary_key', 'yes')
# self.view.setPrimaryKey(self.identifier, callFromChild=True)
return self
def unSetPrimaryKey(self):
self.unSetProperty('primary_key')
return self
def setTier(self, tiers=[]):
if tiers:
self.setProperty('tiers', '[0,5,10,15,20]')
else:
self.setProperty('tiers', '[' + ','.join(tiers) + ']')
return self.setType('tier')
def addLink(self,url,label,icon_url='https://looker.com/favicon.ico'):
self.properties.addProperty('link',{
'url' :url
,'label' :label
,'icon_url':icon_url
})
return self
class DimensionGroup(Field):
def __init__(self, input):
super(DimensionGroup, self).__init__(input)
if not self.properties.isMember('timeframes'):
self.properties.addProperty('timeframes', splice('[','{},'.format(conf.NEWLINEINDENT).join(conf.TIMEFRAMES),']'))
if not self.properties.isMember('type'):
self.properties.addProperty('type', 'time')
# if not self.properties.isMember('sql'):
# self.properties.addProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
self.token = '<PASSWORD>'
def setDBColumn(self, dbColumn, changeIdentifier=True):
''''''
self.db_column = dbColumn
self.setProperty('sql', splice('${TABLE}.' , conf.DB_FIELD_DELIMITER_START , self.db_column , conf.DB_FIELD_DELIMITER_END))
if changeIdentifier:
self.identifier = lookCase(self.db_column)
return self
class Measure(Field):
def __init__(self, input):
super(Measure, self).__init__(input)
self.token = 'measure'
class Filter(Field):
def __init__(self, input):
super(Filter, self).__init__(input)
self.token = 'filter'
class Parameter(Field):
def __init__(self, input):
super(Parameter, self).__init__(input)
self.token = 'parameter'
#next Minor release::
# TODO: set configurations via command line and environment variable
# TODO: make __getatt__ / __setattr__ consistent across classes
# TODO: Implement remaining collections iteration, top level file attributes (data groups, named value format etc)
# TODO: ensure the top level stuff for file works, i.e. accessors for plurals like data groups etc
# Dependency Graphing:
# TODO: Ancenstor functions?
# TODO: Child function support renaming across all properties (html, links, etc)
# TODO: Multi-generation dependency tracing (ancestor / decendangt)
# TODO: cross file / whole project?
# Code Cleanliness / pip:
# TODO: rationally break up the megafile...
# TODO: use the _variable name for all private variables
# TODO: change "identifier" to _name
# Unit Testing:
# TODO: Redesign / modularize test suite
#* Basic parsing loop,
#* network enabled loop, github / shell
# TODO: test iteration behaviors
######### V3+ #########
# TODO: Implement MVC?
# * model -> could eliminate the "phanton property" in that a class instance is only created on get / observation.... (getters and setters should mutate the underlying json at all times to ensure conssistency)
# TODO: Rationalize View rendering
# TODO: elimnate property / properties classes? -> replace with model? Think through getter / setter / render
# TODO: Integrate Tom's script for dependency graphing OO
# TODO: Common Sql Functions added to the SQL paramter
# TODO: Common html Functions added to the html paramter
# TODO: Manifest
# TODO: contants
# TODO: locale
# TODO: slots / performance optimizaiton
# TODO: Interactive CLI
# TODO: Update LKML to support new filters syntax
# TODO: additional documentation
# Finish Documenting every funtion for the autodocs
# Usecase oriented documentation (move to the.rst file):
# loop through all the files in a project make a change and update
# Auto - tune your model
# Looker API Query the database and create a new view file / EAV unnest (superview & multi-model approach)
# BQ Unnest
# Use dependency tracing
# BQML
# DONE: Top N
# Aggregate Awareness Macro (materialization + refinements)
# Calendar Table
# SFDC Waterfall
# Multi Grain period over period
# Drill to vis with constants
# Incremental PDTs? --> This breaks as of Looker 7?
# Negative Intervals Hacking
# Linking macro, Intel linking block?
# Fancy Conditional Formatting examples
# Something with slowly changing dimensions
# lambda / cloud function example?
# TODO: Write a test that would use materialization and refinements
# Meterialization:
# explore: event {
# aggregate_table: monthly_orders {
# materialization: {
# datagroup_trigger: orders_datagroup
# }
# query: {
# dimensions: [orders.created_month]
# measures: [orders.count]
# #filters: [orders.created_date: "1 year", orders.status: "fulfilled"]
# filters: {
# field: orders.created_date
# value: "1 year"
# }
# filters: {
# field: orders.status
# value: "fulfilled"
# }
# timezone: "America/Los_Angeles"
# }
# }
# } | [
"copy.deepcopy",
"os.path.isfile",
"os.path.basename",
"lkml.load",
"re.sub",
"re.findall",
"os.path.relpath"
] | [((201, 246), 're.sub', 're.sub', (['"""(.)([A-Z][a-z]+)"""', '"""\\\\1_\\\\2"""', 'string'], {}), "('(.)([A-Z][a-z]+)', '\\\\1_\\\\2', string)\n", (207, 246), False, 'import re, os, shutil\n'), ((463, 587), 're.sub', 're.sub', (['"""(\\\\s|/|\\\\[|\\\\]|\\\\||\\\\,|<|>|\\\\.|\\\\?|\\\\{|\\\\}|#|=|~|!|\\\\+|\\\\$|\\\\%|\\\\^|\\\\&|\\\\*|\\\\(|\\\\)|\\\\-|\\\\:)+"""', '""""""', 'string'], {}), "(\n '(\\\\s|/|\\\\[|\\\\]|\\\\||\\\\,|<|>|\\\\.|\\\\?|\\\\{|\\\\}|#|=|~|!|\\\\+|\\\\$|\\\\%|\\\\^|\\\\&|\\\\*|\\\\(|\\\\)|\\\\-|\\\\:)+'\n , '', string)\n", (469, 587), False, 'import re, os, shutil\n'), ((776, 811), 're.sub', 're.sub', (['"""\\\\s{10,}"""', '"""\\\\n """', 'string'], {}), "('\\\\s{10,}', '\\\\n ', string)\n", (782, 811), False, 'import re, os, shutil\n'), ((1720, 1979), 're.findall', 're.findall', (['"""(\\\\$\\\\{([a-z\\\\._0-9]*)\\\\}|\\\\{\\\\%\\\\s{1,3}condition\\\\s([a-z\\\\._0-9]*)\\\\s\\\\%\\\\}|\\\\{\\\\%\\\\s{1,3}parameter\\\\s([a-z\\\\._0-9]*)\\\\s\\\\%\\\\}|\\\\{\\\\{\\\\s{0,10}([a-z\\\\._0-9]*)\\\\s{0,10}\\\\}\\\\}| \\\\_filters\\\\[\\\\s{0,10}\\\\\'([a-z\\\\._0-9]*)\\\\\'\\\\])"""', 'inputString'], {}), '(\n "(\\\\$\\\\{([a-z\\\\._0-9]*)\\\\}|\\\\{\\\\%\\\\s{1,3}condition\\\\s([a-z\\\\._0-9]*)\\\\s\\\\%\\\\}|\\\\{\\\\%\\\\s{1,3}parameter\\\\s([a-z\\\\._0-9]*)\\\\s\\\\%\\\\}|\\\\{\\\\{\\\\s{0,10}([a-z\\\\._0-9]*)\\\\s{0,10}\\\\}\\\\}| \\\\_filters\\\\[\\\\s{0,10}\\\\\'([a-z\\\\._0-9]*)\\\\\'\\\\])"\n , inputString)\n', (1730, 1979), False, 'import re, os, shutil\n'), ((9474, 9500), 'copy.deepcopy', 'copy.deepcopy', (['lkmldictraw'], {}), '(lkmldictraw)\n', (9487, 9500), False, 'import time, copy\n'), ((55408, 55432), 'copy.deepcopy', 'copy.deepcopy', (['self.name'], {}), '(self.name)\n', (55421, 55432), False, 'import time, copy\n'), ((55453, 55483), 'copy.deepcopy', 'copy.deepcopy', (['self.__refsre__'], {}), '(self.__refsre__)\n', (55466, 55483), False, 'import time, copy\n'), ((55503, 55532), 'copy.deepcopy', 'copy.deepcopy', (['self.__refre__'], {}), '(self.__refre__)\n', (55516, 55532), False, 'import time, copy\n'), ((257, 301), 're.sub', 're.sub', (['"""([a-z0-9])([A-Z])"""', '"""\\\\1_\\\\2"""', 'str1'], {}), "('([a-z0-9])([A-Z])', '\\\\1_\\\\2', str1)\n", (263, 301), False, 'import re, os, shutil\n'), ((5182, 5197), 'lkml.load', 'lkml.load', (['data'], {}), '(data)\n', (5191, 5197), False, 'import lkml, github\n'), ((5339, 5358), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (5355, 5358), False, 'import re, os, shutil\n'), ((5722, 5740), 'os.path.relpath', 'os.path.relpath', (['f'], {}), '(f)\n', (5737, 5740), False, 'import re, os, shutil\n'), ((11711, 11727), 'lkml.load', 'lkml.load', (['other'], {}), '(other)\n', (11720, 11727), False, 'import lkml, github\n'), ((48664, 48690), 'copy.deepcopy', 'copy.deepcopy', (['self.schema'], {}), '(self.schema)\n', (48677, 48690), False, 'import time, copy\n'), ((5889, 5903), 'lkml.load', 'lkml.load', (['tmp'], {}), '(tmp)\n', (5898, 5903), False, 'import lkml, github\n'), ((46893, 46918), 'copy.deepcopy', 'copy.deepcopy', (['self.value'], {}), '(self.value)\n', (46906, 46918), False, 'import time, copy\n'), ((7325, 7342), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (7339, 7342), False, 'import re, os, shutil\n'), ((10125, 10141), 'lkml.load', 'lkml.load', (['other'], {}), '(other)\n', (10134, 10141), False, 'import lkml, github\n'), ((39228, 39244), 'lkml.load', 'lkml.load', (['other'], {}), '(other)\n', (39237, 39244), False, 'import lkml, github\n'), ((18821, 18837), 'lkml.load', 'lkml.load', (['other'], {}), '(other)\n', (18830, 18837), False, 'import lkml, github\n')] |
import functools
import numpy as np
import pandas as pd
from NN_base import load_network, save_network, create_network
from tic_tac_toe import TicTacToeGameSpec, play_game
from TD_lambda import TD_train
NETWORK_FILE_PATH = None
NUMBER_OF_GAMES_TO_RUN = 500
NUMBER_OF_TEST = 200
NUMBER_OF_ROUNDS = 100
EPSILON = 0.1
TAU = 0.8
LAMBDA = 0.3
DECAY_RATE = 0.95
DECAY_STEP = 1000
ALPHA = 0.04 ## starting learning rate
game_spec = TicTacToeGameSpec()
create_network_func = functools.partial(create_network, input_nodes=9, hidden_nodes=(20,30), output_nodes=1, output_softmax=False)
results = TD_train(game_spec,
create_network_func,
network_file_path = None,
opp_func = None,
number_of_games = NUMBER_OF_GAMES_TO_RUN,
number_of_test = NUMBER_OF_TEST,
number_of_rounds = NUMBER_OF_ROUNDS,
epsilon = EPSILON,
tau = TAU,
lamda = LAMBDA,
decay_rate = DECAY_RATE,
decay_steps = DECAY_STEP,
alpha_start = ALPHA)
| [
"tic_tac_toe.TicTacToeGameSpec",
"functools.partial",
"TD_lambda.TD_train"
] | [((429, 448), 'tic_tac_toe.TicTacToeGameSpec', 'TicTacToeGameSpec', ([], {}), '()\n', (446, 448), False, 'from tic_tac_toe import TicTacToeGameSpec, play_game\n'), ((471, 584), 'functools.partial', 'functools.partial', (['create_network'], {'input_nodes': '(9)', 'hidden_nodes': '(20, 30)', 'output_nodes': '(1)', 'output_softmax': '(False)'}), '(create_network, input_nodes=9, hidden_nodes=(20, 30),\n output_nodes=1, output_softmax=False)\n', (488, 584), False, 'import functools\n'), ((591, 901), 'TD_lambda.TD_train', 'TD_train', (['game_spec', 'create_network_func'], {'network_file_path': 'None', 'opp_func': 'None', 'number_of_games': 'NUMBER_OF_GAMES_TO_RUN', 'number_of_test': 'NUMBER_OF_TEST', 'number_of_rounds': 'NUMBER_OF_ROUNDS', 'epsilon': 'EPSILON', 'tau': 'TAU', 'lamda': 'LAMBDA', 'decay_rate': 'DECAY_RATE', 'decay_steps': 'DECAY_STEP', 'alpha_start': 'ALPHA'}), '(game_spec, create_network_func, network_file_path=None, opp_func=\n None, number_of_games=NUMBER_OF_GAMES_TO_RUN, number_of_test=\n NUMBER_OF_TEST, number_of_rounds=NUMBER_OF_ROUNDS, epsilon=EPSILON, tau\n =TAU, lamda=LAMBDA, decay_rate=DECAY_RATE, decay_steps=DECAY_STEP,\n alpha_start=ALPHA)\n', (599, 901), False, 'from TD_lambda import TD_train\n')] |
import torch.nn as nn
import torch
from function import normal
from function import calc_mean_std
decoder = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, (3, 3)),
nn.ReLU(),
nn.Upsample(scale_factor=2, mode='nearest'),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, (3, 3)),
)
vgg = nn.Sequential(
nn.Conv2d(3, 3, (1, 1)),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, (3, 3)),
nn.ReLU(), # relu1-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, (3, 3)),
nn.ReLU(), # relu1-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, (3, 3)),
nn.ReLU(), # relu2-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, (3, 3)),
nn.ReLU(), # relu2-2
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, (3, 3)),
nn.ReLU(), # relu3-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, (3, 3)),
nn.ReLU(), # relu3-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, (3, 3)),
nn.ReLU(), # relu4-1, this is the last layer used
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu4-4
nn.MaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-1
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-2
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU(), # relu5-3
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, (3, 3)),
nn.ReLU() # relu5-4
)
class SANet(nn.Module):
def __init__(self, in_dim):
super(SANet, self).__init__()
self.f = nn.Conv2d(in_dim , in_dim , (1,1))
self.g = nn.Conv2d(in_dim , in_dim , (1,1))
self.h = nn.Conv2d(in_dim , in_dim , (1,1))
self.softmax = nn.Softmax(dim=-1)
self.out_conv = nn.Conv2d(in_dim, in_dim, (1, 1))
def forward(self,content_feat,style_feat):
B,C,H,W = content_feat.size()
F_Fc_norm = self.f(normal(content_feat)).view(B,-1,H*W).permute(0,2,1)
B,C,H,W = style_feat.size()
G_Fs_norm = self.g(normal(style_feat)).view(B,-1,H*W)
energy = torch.bmm(F_Fc_norm,G_Fs_norm)
attention = self.softmax(energy)
H_Fs = self.h(style_feat).view(B,-1,H*W)
out = torch.bmm(H_Fs,attention.permute(0,2,1) )
B,C,H,W = content_feat.size()
out = out.view(B,C,H,W)
out = self.out_conv(out)
out += content_feat
return out
class Self_Attention_Module(nn.Module):
def __init__(self, in_dim):
super(Self_Attention_Module, self).__init__()
self.SAN1=SANet(in_dim)
self.SAN2=SANet(in_dim)
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.merge_conv_pad = nn.ReflectionPad2d((1, 1, 1, 1))
self.merge_conv = nn.Conv2d(in_dim, in_dim, (3, 3))
def forward(self, content_feats, style_feats):
Fcsc_5 = self.SAN1(content_feats[-1], style_feats[-1])
Fcsc_5_up=self.upsample(Fcsc_5)
Fcsc_4 = self.SAN2(content_feats[-2], style_feats[-2])
Fcsc_4_plus_5=Fcsc_4+Fcsc_5_up
Fcsc_4_plus_5=self.merge_conv_pad(Fcsc_4_plus_5)
Fcsc_m=self.merge_conv(Fcsc_4_plus_5)
return Fcsc_m
class Net(nn.Module):
def __init__(self, encoder, decoder):
super(Net, self).__init__()
enc_layers = list(encoder.children())
self.enc_1 = nn.Sequential(*enc_layers[:4]) # input -> relu1_1
self.enc_2 = nn.Sequential(*enc_layers[4:11]) # relu1_1 -> relu2_1
self.enc_3 = nn.Sequential(*enc_layers[11:18]) # relu2_1 -> relu3_1
self.enc_4 = nn.Sequential(*enc_layers[18:31]) # relu3_1 -> relu4_1
self.enc_5 = nn.Sequential(*enc_layers[31:44]) # relu4_1 -> relu5_1
#transform
self.sa_module = Self_Attention_Module(512)
self.decoder = decoder
self.mse_loss = nn.MSELoss()
# fix the encoder
for name in ['enc_1', 'enc_2', 'enc_3', 'enc_4', 'enc_5']:
for param in getattr(self, name).parameters():
param.requires_grad = False
# extract relu1_1, relu2_1, relu3_1, relu4_1, relu5_1 from input image
def encode_with_intermediate(self, input):
results = [input]
for i in range(5):
func = getattr(self, 'enc_{:d}'.format(i + 1))
results.append(func(results[-1]))
return results[1:]
def calc_content_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
return self.mse_loss(input, target)
def calc_style_loss(self, input, target):
assert (input.size() == target.size())
assert (target.requires_grad is False)
input_mean, input_std = calc_mean_std(input)
target_mean, target_std = calc_mean_std(target)
return self.mse_loss(input_mean, target_mean) + \
self.mse_loss(input_std, target_std)
def forward(self, content, style):
style_feats = self.encode_with_intermediate(style)
content_feats = self.encode_with_intermediate(content)
Ics = self.decoder(self.sa_module(content_feats, style_feats))
Ics_feats = self.encode_with_intermediate(Ics)
# Content loss
loss_c = self.calc_content_loss(normal(Ics_feats[-1]), normal(content_feats[-1]))+self.calc_content_loss(normal(Ics_feats[-2]), normal(content_feats[-2]))
# Style loss
loss_s = self.calc_style_loss(Ics_feats[0], style_feats[0])
for i in range(1, 5):
loss_s += self.calc_style_loss(Ics_feats[i], style_feats[i])
#Identity losses lambda 1
Icc = self.decoder(self.sa_module(content_feats, content_feats))
Iss = self.decoder(self.sa_module(style_feats, style_feats))
loss_lambda1 = self.calc_content_loss(Icc,content)+self.calc_content_loss(Iss,style)
#Identity losses lambda 2
Icc_feats=self.encode_with_intermediate(Icc)
Iss_feats=self.encode_with_intermediate(Iss)
loss_lambda2 = self.calc_content_loss(Icc_feats[0], content_feats[0])+self.calc_content_loss(Iss_feats[0], style_feats[0])
for i in range(1, 5):
loss_lambda2 += self.calc_content_loss(Icc_feats[i], content_feats[i])+self.calc_content_loss(Iss_feats[i], style_feats[i])
return loss_c, loss_s, loss_lambda1, loss_lambda2
| [
"torch.nn.ReLU",
"torch.nn.Softmax",
"function.normal",
"torch.nn.Sequential",
"torch.nn.ReflectionPad2d",
"torch.nn.Conv2d",
"torch.nn.MSELoss",
"torch.nn.MaxPool2d",
"torch.nn.Upsample",
"torch.bmm",
"function.calc_mean_std"
] | [((128, 160), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (146, 160), True, 'import torch.nn as nn\n'), ((166, 193), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)', '(3, 3)'], {}), '(512, 256, (3, 3))\n', (175, 193), True, 'import torch.nn as nn\n'), ((199, 208), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (206, 208), True, 'import torch.nn as nn\n'), ((214, 257), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (225, 257), True, 'import torch.nn as nn\n'), ((263, 295), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (281, 295), True, 'import torch.nn as nn\n'), ((301, 328), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)'], {}), '(256, 256, (3, 3))\n', (310, 328), True, 'import torch.nn as nn\n'), ((334, 343), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (341, 343), True, 'import torch.nn as nn\n'), ((349, 381), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (367, 381), True, 'import torch.nn as nn\n'), ((387, 414), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)'], {}), '(256, 256, (3, 3))\n', (396, 414), True, 'import torch.nn as nn\n'), ((420, 429), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (427, 429), True, 'import torch.nn as nn\n'), ((435, 467), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (453, 467), True, 'import torch.nn as nn\n'), ((473, 500), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)'], {}), '(256, 256, (3, 3))\n', (482, 500), True, 'import torch.nn as nn\n'), ((506, 515), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (513, 515), True, 'import torch.nn as nn\n'), ((521, 553), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (539, 553), True, 'import torch.nn as nn\n'), ((559, 586), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3, 3)'], {}), '(256, 128, (3, 3))\n', (568, 586), True, 'import torch.nn as nn\n'), ((592, 601), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (599, 601), True, 'import torch.nn as nn\n'), ((607, 650), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (618, 650), True, 'import torch.nn as nn\n'), ((656, 688), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (674, 688), True, 'import torch.nn as nn\n'), ((694, 721), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3, 3)'], {}), '(128, 128, (3, 3))\n', (703, 721), True, 'import torch.nn as nn\n'), ((727, 736), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (734, 736), True, 'import torch.nn as nn\n'), ((742, 774), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (760, 774), True, 'import torch.nn as nn\n'), ((780, 806), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3, 3)'], {}), '(128, 64, (3, 3))\n', (789, 806), True, 'import torch.nn as nn\n'), ((812, 821), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (819, 821), True, 'import torch.nn as nn\n'), ((827, 870), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (838, 870), True, 'import torch.nn as nn\n'), ((876, 908), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (894, 908), True, 'import torch.nn as nn\n'), ((914, 939), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3, 3)'], {}), '(64, 64, (3, 3))\n', (923, 939), True, 'import torch.nn as nn\n'), ((945, 954), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (952, 954), True, 'import torch.nn as nn\n'), ((960, 992), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (978, 992), True, 'import torch.nn as nn\n'), ((998, 1022), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)', '(3, 3)'], {}), '(64, 3, (3, 3))\n', (1007, 1022), True, 'import torch.nn as nn\n'), ((1052, 1075), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)', '(1, 1)'], {}), '(3, 3, (1, 1))\n', (1061, 1075), True, 'import torch.nn as nn\n'), ((1081, 1113), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1099, 1113), True, 'import torch.nn as nn\n'), ((1119, 1143), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3, 3)'], {}), '(3, 64, (3, 3))\n', (1128, 1143), True, 'import torch.nn as nn\n'), ((1149, 1158), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1156, 1158), True, 'import torch.nn as nn\n'), ((1175, 1207), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1193, 1207), True, 'import torch.nn as nn\n'), ((1213, 1238), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3, 3)'], {}), '(64, 64, (3, 3))\n', (1222, 1238), True, 'import torch.nn as nn\n'), ((1244, 1253), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1251, 1253), True, 'import torch.nn as nn\n'), ((1270, 1322), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)', '(2, 2)', '(0, 0)'], {'ceil_mode': '(True)'}), '((2, 2), (2, 2), (0, 0), ceil_mode=True)\n', (1282, 1322), True, 'import torch.nn as nn\n'), ((1328, 1360), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1346, 1360), True, 'import torch.nn as nn\n'), ((1366, 1392), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3, 3)'], {}), '(64, 128, (3, 3))\n', (1375, 1392), True, 'import torch.nn as nn\n'), ((1398, 1407), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1405, 1407), True, 'import torch.nn as nn\n'), ((1424, 1456), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1442, 1456), True, 'import torch.nn as nn\n'), ((1462, 1489), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3, 3)'], {}), '(128, 128, (3, 3))\n', (1471, 1489), True, 'import torch.nn as nn\n'), ((1495, 1504), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1502, 1504), True, 'import torch.nn as nn\n'), ((1521, 1573), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)', '(2, 2)', '(0, 0)'], {'ceil_mode': '(True)'}), '((2, 2), (2, 2), (0, 0), ceil_mode=True)\n', (1533, 1573), True, 'import torch.nn as nn\n'), ((1579, 1611), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1597, 1611), True, 'import torch.nn as nn\n'), ((1617, 1644), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3, 3)'], {}), '(128, 256, (3, 3))\n', (1626, 1644), True, 'import torch.nn as nn\n'), ((1650, 1659), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1657, 1659), True, 'import torch.nn as nn\n'), ((1676, 1708), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1694, 1708), True, 'import torch.nn as nn\n'), ((1714, 1741), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)'], {}), '(256, 256, (3, 3))\n', (1723, 1741), True, 'import torch.nn as nn\n'), ((1747, 1756), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1754, 1756), True, 'import torch.nn as nn\n'), ((1773, 1805), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1791, 1805), True, 'import torch.nn as nn\n'), ((1811, 1838), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)'], {}), '(256, 256, (3, 3))\n', (1820, 1838), True, 'import torch.nn as nn\n'), ((1844, 1853), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1851, 1853), True, 'import torch.nn as nn\n'), ((1870, 1902), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (1888, 1902), True, 'import torch.nn as nn\n'), ((1908, 1935), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3, 3)'], {}), '(256, 256, (3, 3))\n', (1917, 1935), True, 'import torch.nn as nn\n'), ((1941, 1950), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1948, 1950), True, 'import torch.nn as nn\n'), ((1967, 2019), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)', '(2, 2)', '(0, 0)'], {'ceil_mode': '(True)'}), '((2, 2), (2, 2), (0, 0), ceil_mode=True)\n', (1979, 2019), True, 'import torch.nn as nn\n'), ((2025, 2057), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2043, 2057), True, 'import torch.nn as nn\n'), ((2063, 2090), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)', '(3, 3)'], {}), '(256, 512, (3, 3))\n', (2072, 2090), True, 'import torch.nn as nn\n'), ((2096, 2105), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2103, 2105), True, 'import torch.nn as nn\n'), ((2151, 2183), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2169, 2183), True, 'import torch.nn as nn\n'), ((2189, 2216), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)'], {}), '(512, 512, (3, 3))\n', (2198, 2216), True, 'import torch.nn as nn\n'), ((2222, 2231), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2229, 2231), True, 'import torch.nn as nn\n'), ((2248, 2280), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2266, 2280), True, 'import torch.nn as nn\n'), ((2286, 2313), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)'], {}), '(512, 512, (3, 3))\n', (2295, 2313), True, 'import torch.nn as nn\n'), ((2319, 2328), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2326, 2328), True, 'import torch.nn as nn\n'), ((2345, 2377), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2363, 2377), True, 'import torch.nn as nn\n'), ((2383, 2410), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)'], {}), '(512, 512, (3, 3))\n', (2392, 2410), True, 'import torch.nn as nn\n'), ((2416, 2425), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2423, 2425), True, 'import torch.nn as nn\n'), ((2442, 2494), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2, 2)', '(2, 2)', '(0, 0)'], {'ceil_mode': '(True)'}), '((2, 2), (2, 2), (0, 0), ceil_mode=True)\n', (2454, 2494), True, 'import torch.nn as nn\n'), ((2500, 2532), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2518, 2532), True, 'import torch.nn as nn\n'), ((2538, 2565), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)'], {}), '(512, 512, (3, 3))\n', (2547, 2565), True, 'import torch.nn as nn\n'), ((2571, 2580), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2578, 2580), True, 'import torch.nn as nn\n'), ((2597, 2629), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2615, 2629), True, 'import torch.nn as nn\n'), ((2635, 2662), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)'], {}), '(512, 512, (3, 3))\n', (2644, 2662), True, 'import torch.nn as nn\n'), ((2668, 2677), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2675, 2677), True, 'import torch.nn as nn\n'), ((2694, 2726), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2712, 2726), True, 'import torch.nn as nn\n'), ((2732, 2759), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)'], {}), '(512, 512, (3, 3))\n', (2741, 2759), True, 'import torch.nn as nn\n'), ((2765, 2774), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2772, 2774), True, 'import torch.nn as nn\n'), ((2791, 2823), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (2809, 2823), True, 'import torch.nn as nn\n'), ((2829, 2856), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)', '(3, 3)'], {}), '(512, 512, (3, 3))\n', (2838, 2856), True, 'import torch.nn as nn\n'), ((2862, 2871), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2869, 2871), True, 'import torch.nn as nn\n'), ((2998, 3031), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', 'in_dim', '(1, 1)'], {}), '(in_dim, in_dim, (1, 1))\n', (3007, 3031), True, 'import torch.nn as nn\n'), ((3050, 3083), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', 'in_dim', '(1, 1)'], {}), '(in_dim, in_dim, (1, 1))\n', (3059, 3083), True, 'import torch.nn as nn\n'), ((3102, 3135), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', 'in_dim', '(1, 1)'], {}), '(in_dim, in_dim, (1, 1))\n', (3111, 3135), True, 'import torch.nn as nn\n'), ((3161, 3179), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (3171, 3179), True, 'import torch.nn as nn\n'), ((3204, 3237), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', 'in_dim', '(1, 1)'], {}), '(in_dim, in_dim, (1, 1))\n', (3213, 3237), True, 'import torch.nn as nn\n'), ((3526, 3557), 'torch.bmm', 'torch.bmm', (['F_Fc_norm', 'G_Fs_norm'], {}), '(F_Fc_norm, G_Fs_norm)\n', (3535, 3557), False, 'import torch\n'), ((4075, 4118), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (4086, 4118), True, 'import torch.nn as nn\n'), ((4149, 4181), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1, 1, 1, 1)'], {}), '((1, 1, 1, 1))\n', (4167, 4181), True, 'import torch.nn as nn\n'), ((4208, 4241), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', 'in_dim', '(3, 3)'], {}), '(in_dim, in_dim, (3, 3))\n', (4217, 4241), True, 'import torch.nn as nn\n'), ((4791, 4821), 'torch.nn.Sequential', 'nn.Sequential', (['*enc_layers[:4]'], {}), '(*enc_layers[:4])\n', (4804, 4821), True, 'import torch.nn as nn\n'), ((4863, 4895), 'torch.nn.Sequential', 'nn.Sequential', (['*enc_layers[4:11]'], {}), '(*enc_layers[4:11])\n', (4876, 4895), True, 'import torch.nn as nn\n'), ((4939, 4972), 'torch.nn.Sequential', 'nn.Sequential', (['*enc_layers[11:18]'], {}), '(*enc_layers[11:18])\n', (4952, 4972), True, 'import torch.nn as nn\n'), ((5016, 5049), 'torch.nn.Sequential', 'nn.Sequential', (['*enc_layers[18:31]'], {}), '(*enc_layers[18:31])\n', (5029, 5049), True, 'import torch.nn as nn\n'), ((5093, 5126), 'torch.nn.Sequential', 'nn.Sequential', (['*enc_layers[31:44]'], {}), '(*enc_layers[31:44])\n', (5106, 5126), True, 'import torch.nn as nn\n'), ((5275, 5287), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5285, 5287), True, 'import torch.nn as nn\n'), ((6146, 6166), 'function.calc_mean_std', 'calc_mean_std', (['input'], {}), '(input)\n', (6159, 6166), False, 'from function import calc_mean_std\n'), ((6201, 6222), 'function.calc_mean_std', 'calc_mean_std', (['target'], {}), '(target)\n', (6214, 6222), False, 'from function import calc_mean_std\n'), ((6688, 6709), 'function.normal', 'normal', (['Ics_feats[-1]'], {}), '(Ics_feats[-1])\n', (6694, 6709), False, 'from function import normal\n'), ((6711, 6736), 'function.normal', 'normal', (['content_feats[-1]'], {}), '(content_feats[-1])\n', (6717, 6736), False, 'from function import normal\n'), ((6761, 6782), 'function.normal', 'normal', (['Ics_feats[-2]'], {}), '(Ics_feats[-2])\n', (6767, 6782), False, 'from function import normal\n'), ((6784, 6809), 'function.normal', 'normal', (['content_feats[-2]'], {}), '(content_feats[-2])\n', (6790, 6809), False, 'from function import normal\n'), ((3472, 3490), 'function.normal', 'normal', (['style_feat'], {}), '(style_feat)\n', (3478, 3490), False, 'from function import normal\n'), ((3356, 3376), 'function.normal', 'normal', (['content_feat'], {}), '(content_feat)\n', (3362, 3376), False, 'from function import normal\n')] |
from __future__ import absolute_import, division, print_function
from ..core import Expr
from datashape import dshape
from .boolean import BooleanInterface
from .numbers import NumberInterface
class ScalarSymbol(NumberInterface, BooleanInterface):
__slots__ = '_name', 'dtype'
def __init__(self, name, dtype='real'):
self._name = name
self.dtype = dshape(dtype)
@property
def name(self):
return self._name
@property
def dshape(self):
return dshape(self.dtype)
def __str__(self):
return str(self._name)
__hash__ = Expr.__hash__
| [
"datashape.dshape"
] | [((376, 389), 'datashape.dshape', 'dshape', (['dtype'], {}), '(dtype)\n', (382, 389), False, 'from datashape import dshape\n'), ((503, 521), 'datashape.dshape', 'dshape', (['self.dtype'], {}), '(self.dtype)\n', (509, 521), False, 'from datashape import dshape\n')] |
from dotenv import load_dotenv
import tweepy
from os import getenv
from typing import BinaryIO
BASE_BIO = "Inspired by @screenshakes. Powered by PyBoy: http://github.com/Baekalfen/PyBoy\n"
load_dotenv()
auth = tweepy.OAuthHandler(getenv('TWITTER_KEY'), getenv('TWITTER_SECRET'))
auth.set_access_token(getenv('TWITTER_ACCESS'), getenv('TWITTER_ACCESS_TOKEN'))
api = tweepy.API(auth)
def get_replies_from_latest():
"""Gathers replies from latest Tweet in order of popularity."""
latest_status = api.user_timeline(count=1, exclude_replies=True)[0]
return tweepy.Cursor(api.search, q="to:TextOnlyGameBoy", since_id=latest_status.id, result_type="recent").items()
def update(tweet_image: BinaryIO, profile_image: BinaryIO, text: str = "Image", bio: str = ""):
"""Send a Tweet with an image and optionally update the bio."""
screenshot = api.media_upload("screenshot.jpg", file=tweet_image)
api.update_profile_image("screenshot.jpg", file_=profile_image)
api.update_status(text, media_ids=[screenshot.media_id])
if bio:
api.update_profile(description=BASE_BIO + bio)
| [
"tweepy.Cursor",
"tweepy.API",
"os.getenv",
"dotenv.load_dotenv"
] | [((191, 204), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (202, 204), False, 'from dotenv import load_dotenv\n'), ((369, 385), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (379, 385), False, 'import tweepy\n'), ((233, 254), 'os.getenv', 'getenv', (['"""TWITTER_KEY"""'], {}), "('TWITTER_KEY')\n", (239, 254), False, 'from os import getenv\n'), ((256, 280), 'os.getenv', 'getenv', (['"""TWITTER_SECRET"""'], {}), "('TWITTER_SECRET')\n", (262, 280), False, 'from os import getenv\n'), ((304, 328), 'os.getenv', 'getenv', (['"""TWITTER_ACCESS"""'], {}), "('TWITTER_ACCESS')\n", (310, 328), False, 'from os import getenv\n'), ((330, 360), 'os.getenv', 'getenv', (['"""TWITTER_ACCESS_TOKEN"""'], {}), "('TWITTER_ACCESS_TOKEN')\n", (336, 360), False, 'from os import getenv\n'), ((569, 671), 'tweepy.Cursor', 'tweepy.Cursor', (['api.search'], {'q': '"""to:TextOnlyGameBoy"""', 'since_id': 'latest_status.id', 'result_type': '"""recent"""'}), "(api.search, q='to:TextOnlyGameBoy', since_id=latest_status.id,\n result_type='recent')\n", (582, 671), False, 'import tweepy\n')] |
from django.shortcuts import render
from getpage import *
from fbapp.models import Search, Clap
from django.http import Http404, HttpResponse
import json
# Create your views here.
def clap(request):
if request.is_ajax():
keyword = request.GET['keyword']
clap = Clap.objects.all()
if(len(clap) == 0):
clap = Clap(clap = int(keyword))
clap.save()
clapCount = int(keyword)
else:
clap[0].clap = int(keyword)
clap[0].save()
return HttpResponse({}, content_type = "application/json")
else:
raise Http404
def getclap(request):
if request.is_ajax():
clap = Clap.objects.all()
if(len(clap) == 0):
clap = 0
else:
clap = clap[0].clap
data = json.dumps(clap)
print(data)
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def home(request):
return render(request, 'home.html', {})
def getuser(request):
if request.is_ajax():
keyword = request.GET['keyword']
try:
user = getFacebookUser(keyword)
except:
user = 'no'
if user != 'no':
searchModel = Search.objects.filter(user = keyword)
if(not searchModel):
searchModel = Search(user = keyword)
searchModel.save()
data = json.dumps(user)
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def getpost(request):
if request.is_ajax():
userid = request.GET['keyword']
message, image, postid = getFacebookPost(userid)
data = json.dumps([message, image, postid])
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def getcomments(request):
if request.is_ajax():
postid = request.GET['keyword']
number, summary = getFacebookComments(postid)
data = json.dumps([number, summary])
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def getreplies(request):
if request.is_ajax():
commentid = request.GET['keyword']
replies, likes, users = getFacebookReplies(commentid)
data = json.dumps([replies, likes, users])
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
def suggest(request):
if request.is_ajax():
temp = []
searchModel = Search.objects.all()
for i in searchModel.values():
temp.append(i['user'])
temp = list(set(temp))
print(temp)
data = json.dumps(temp)
return HttpResponse(data, content_type = "application/json")
else:
raise Http404
| [
"django.shortcuts.render",
"django.http.HttpResponse",
"json.dumps",
"fbapp.models.Search",
"fbapp.models.Search.objects.all",
"fbapp.models.Search.objects.filter",
"fbapp.models.Clap.objects.all"
] | [((825, 857), 'django.shortcuts.render', 'render', (['request', '"""home.html"""', '{}'], {}), "(request, 'home.html', {})\n", (831, 857), False, 'from django.shortcuts import render\n'), ((268, 286), 'fbapp.models.Clap.objects.all', 'Clap.objects.all', ([], {}), '()\n', (284, 286), False, 'from fbapp.models import Search, Clap\n'), ((454, 503), 'django.http.HttpResponse', 'HttpResponse', (['{}'], {'content_type': '"""application/json"""'}), "({}, content_type='application/json')\n", (466, 503), False, 'from django.http import Http404, HttpResponse\n'), ((584, 602), 'fbapp.models.Clap.objects.all', 'Clap.objects.all', ([], {}), '()\n', (600, 602), False, 'from fbapp.models import Search, Clap\n'), ((677, 693), 'json.dumps', 'json.dumps', (['clap'], {}), '(clap)\n', (687, 693), False, 'import json\n'), ((717, 768), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""'}), "(data, content_type='application/json')\n", (729, 768), False, 'from django.http import Http404, HttpResponse\n'), ((1252, 1268), 'json.dumps', 'json.dumps', (['user'], {}), '(user)\n', (1262, 1268), False, 'import json\n'), ((1284, 1335), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""'}), "(data, content_type='application/json')\n", (1296, 1335), False, 'from django.http import Http404, HttpResponse\n'), ((1531, 1567), 'json.dumps', 'json.dumps', (['[message, image, postid]'], {}), '([message, image, postid])\n', (1541, 1567), False, 'import json\n'), ((1583, 1634), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""'}), "(data, content_type='application/json')\n", (1595, 1634), False, 'from django.http import Http404, HttpResponse\n'), ((1831, 1860), 'json.dumps', 'json.dumps', (['[number, summary]'], {}), '([number, summary])\n', (1841, 1860), False, 'import json\n'), ((1876, 1927), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""'}), "(data, content_type='application/json')\n", (1888, 1927), False, 'from django.http import Http404, HttpResponse\n'), ((2134, 2169), 'json.dumps', 'json.dumps', (['[replies, likes, users]'], {}), '([replies, likes, users])\n', (2144, 2169), False, 'import json\n'), ((2185, 2236), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""'}), "(data, content_type='application/json')\n", (2197, 2236), False, 'from django.http import Http404, HttpResponse\n'), ((2361, 2381), 'fbapp.models.Search.objects.all', 'Search.objects.all', ([], {}), '()\n', (2379, 2381), False, 'from fbapp.models import Search, Clap\n'), ((2519, 2535), 'json.dumps', 'json.dumps', (['temp'], {}), '(temp)\n', (2529, 2535), False, 'import json\n'), ((2551, 2602), 'django.http.HttpResponse', 'HttpResponse', (['data'], {'content_type': '"""application/json"""'}), "(data, content_type='application/json')\n", (2563, 2602), False, 'from django.http import Http404, HttpResponse\n'), ((1093, 1128), 'fbapp.models.Search.objects.filter', 'Search.objects.filter', ([], {'user': 'keyword'}), '(user=keyword)\n', (1114, 1128), False, 'from fbapp.models import Search, Clap\n'), ((1185, 1205), 'fbapp.models.Search', 'Search', ([], {'user': 'keyword'}), '(user=keyword)\n', (1191, 1205), False, 'from fbapp.models import Search, Clap\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter
class CRNN:
def __init__(self, alphabet=None):
self.alphabet = alphabet
def load_weights(self, path):
ocrPath = path
ocrPathtxt = path.replace('.pb', '.pbtxt')
self.model = cv2.dnn.readNetFromTensorflow(ocrPath, ocrPathtxt)
def predict(self, image):
image = resizeNormalize(image, 32)
image = image.astype(np.float32)
image = np.array([[image]])
self.model.setInput(image)
preds = self.model.forward()
preds = preds.transpose(0, 2, 3, 1)
preds = preds[0]
preds = np.argmax(preds, axis=2).reshape((-1,))
raw = strLabelConverter(preds, self.alphabet)
return raw
def predict_job(self, boxes):
n = len(boxes)
for i in range(n):
boxes[i]['text'] = self.predict(boxes[i]['img'])
return boxes
| [
"cv2.dnn.readNetFromTensorflow",
"numpy.argmax",
"numpy.array",
"Text_B_ocr_crnn_model_file.crnn.util.strLabelConverter",
"Text_B_ocr_crnn_model_file.crnn.util.resizeNormalize"
] | [((378, 428), 'cv2.dnn.readNetFromTensorflow', 'cv2.dnn.readNetFromTensorflow', (['ocrPath', 'ocrPathtxt'], {}), '(ocrPath, ocrPathtxt)\n', (407, 428), False, 'import cv2\n'), ((476, 502), 'Text_B_ocr_crnn_model_file.crnn.util.resizeNormalize', 'resizeNormalize', (['image', '(32)'], {}), '(image, 32)\n', (491, 502), False, 'from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter\n'), ((560, 579), 'numpy.array', 'np.array', (['[[image]]'], {}), '([[image]])\n', (568, 579), True, 'import numpy as np\n'), ((791, 830), 'Text_B_ocr_crnn_model_file.crnn.util.strLabelConverter', 'strLabelConverter', (['preds', 'self.alphabet'], {}), '(preds, self.alphabet)\n', (808, 830), False, 'from Text_B_ocr_crnn_model_file.crnn.util import resizeNormalize, strLabelConverter\n'), ((737, 761), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(2)'}), '(preds, axis=2)\n', (746, 761), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import textwrap
import pytest
from dkbuild_apacheconf.dotdict import dotdict
def test_add_depth1():
dd = dotdict()
dd['hello'] = 42
print(dd)
assert dd.ctx == { 'hello': 42 }
def test_add_depth2():
dd = dotdict()
dd['hello.world'] = 42
print(dd)
assert dd.ctx == {
'hello': {
'world': 42
}
}
def test_add_depth3():
dd = dotdict()
dd['hello.beautiful.world'] = 42
dd['hello.beautiful.moon'] = 43
assert dd.ctx == {
'hello': {
'beautiful': {
'world': 42,
'moon': 43
}
}
}
def test_add_err():
dd = dotdict()
with pytest.raises(TypeError):
dd[42] = 'hello world'
def test_get():
dd = dotdict()
dd['hello.world'] = 42
assert dd['hello.world'] == 42
assert dd.get('hello.world') == 42
def test_get_default():
dd = dotdict()
assert dd.get('hello.world', 42) == 42
assert dd.get('hello.world') is None
def test_serialization():
dd = dotdict()
dd['hello.world'] = 42
assert str(dd) == textwrap.dedent("""\
<dotdict {
"hello": {
"world": 42
}
}>""")
assert repr(dd) == textwrap.dedent("""\
<dotdict {
"hello.world": 42
}>""")
| [
"textwrap.dedent",
"dkbuild_apacheconf.dotdict.dotdict",
"pytest.raises"
] | [((137, 146), 'dkbuild_apacheconf.dotdict.dotdict', 'dotdict', ([], {}), '()\n', (144, 146), False, 'from dkbuild_apacheconf.dotdict import dotdict\n'), ((253, 262), 'dkbuild_apacheconf.dotdict.dotdict', 'dotdict', ([], {}), '()\n', (260, 262), False, 'from dkbuild_apacheconf.dotdict import dotdict\n'), ((420, 429), 'dkbuild_apacheconf.dotdict.dotdict', 'dotdict', ([], {}), '()\n', (427, 429), False, 'from dkbuild_apacheconf.dotdict import dotdict\n'), ((689, 698), 'dkbuild_apacheconf.dotdict.dotdict', 'dotdict', ([], {}), '()\n', (696, 698), False, 'from dkbuild_apacheconf.dotdict import dotdict\n'), ((792, 801), 'dkbuild_apacheconf.dotdict.dotdict', 'dotdict', ([], {}), '()\n', (799, 801), False, 'from dkbuild_apacheconf.dotdict import dotdict\n'), ((938, 947), 'dkbuild_apacheconf.dotdict.dotdict', 'dotdict', ([], {}), '()\n', (945, 947), False, 'from dkbuild_apacheconf.dotdict import dotdict\n'), ((1069, 1078), 'dkbuild_apacheconf.dotdict.dotdict', 'dotdict', ([], {}), '()\n', (1076, 1078), False, 'from dkbuild_apacheconf.dotdict import dotdict\n'), ((708, 732), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (721, 732), False, 'import pytest\n'), ((1128, 1235), 'textwrap.dedent', 'textwrap.dedent', (['""" <dotdict {\n "hello": {\n "world": 42\n }\n }>"""'], {}), '(\n """ <dotdict {\n "hello": {\n "world": 42\n }\n }>"""\n )\n', (1143, 1235), False, 'import textwrap\n'), ((1251, 1321), 'textwrap.dedent', 'textwrap.dedent', (['""" <dotdict {\n "hello.world": 42\n }>"""'], {}), '(""" <dotdict {\n "hello.world": 42\n }>""")\n', (1266, 1321), False, 'import textwrap\n')] |
"""
Simulated device classes
"""
from ophyd.device import Device, Component
from .signal import FakeSignal
class SimDevice(Device):
"""
Class to house components and methods common to all simulated devices.
"""
sim_x = Component(FakeSignal, value=0)
sim_y = Component(FakeSignal, value=0)
sim_z = Component(FakeSignal, value=0)
| [
"ophyd.device.Component"
] | [((238, 268), 'ophyd.device.Component', 'Component', (['FakeSignal'], {'value': '(0)'}), '(FakeSignal, value=0)\n', (247, 268), False, 'from ophyd.device import Device, Component\n'), ((281, 311), 'ophyd.device.Component', 'Component', (['FakeSignal'], {'value': '(0)'}), '(FakeSignal, value=0)\n', (290, 311), False, 'from ophyd.device import Device, Component\n'), ((324, 354), 'ophyd.device.Component', 'Component', (['FakeSignal'], {'value': '(0)'}), '(FakeSignal, value=0)\n', (333, 354), False, 'from ophyd.device import Device, Component\n')] |
from suds.client import Client
import suds
import time
import helplib as hl
#be aware that you need a chemspider_token.txt in the directory for the app to work
#the chemspider_token.txt should only contain the token (available online for free)
class ChemicalObject():
def __init__(self, name = '', cas = '', inchi = '', inchikey = '', csid = ''):
#first define the SOAP service for searching
searchurl = 'http://www.chemspider.com/Search.asmx?WSDL'
try:
self.searchclient = Client(searchurl)
except Exception as e:
print(e)
#define the soap service for inchi-conversion
inchiurl = 'http://www.chemspider.com/InChI.asmx?WSDL'
try:
self.inchiclient = Client(inchiurl)
except Exception as e:
print(e)
#set all properties to the ones from initiating call
self.cas = cas
self.inchi = inchi
self.inchikey = inchikey
self.name = name
self.csid = csid
#no transaction id for now
self.transaction_id = ''
#how quickly should we ask for results? in seconds
self.timetick = 0.2
self.maxtime = 15
#read chemspider token from config file 'chemspider_token'
try:
f = hl.openfile('chemspider_token.txt')
except IOError:
raise IOError
self.token = f.readline()
def complete(self):
"""Fills all other properties of an instance"""
#first retrieve the Chemspider ID
self.retrieve_csid()#
#fill up the other fields
self.fill_forms_with_csid()
def status(self):
#if we don't have a transaction id, we are free
if self.transaction_id != '':
return 'busy'
else:
return 'free'
def retrieve_csid(self):
#for what should we search?
if self.inchi != '':
searchterm = self.inchi
else:
searchterm = self.name
#it's a good idea to only search for ascii:
searchterm = searchterm.decode('utf8', 'replace').encode('ascii', 'replace')
#try connecting
try:
self.transaction_id = self.searchclient.service.AsyncSimpleSearch(searchterm, self.token)
except suds.WebFault as detail:
self.errorstatus = detail
self.transaction_id = ''
#don't run too long in the following loop
i = 0
#if successful we can check whether the results are already available
if self.transaction_id != '':
while self.searchclient.service.GetAsyncSearchStatus(self.transaction_id, self.token) != 'ResultReady':
#wait a little
time.sleep(self.timetick)
i = i + 1
if i > (self.maxtime / self.timetick):
print('No result, aborting')
break
#ready! the [0] is because it basically gives a list and we use the first one
result = self.searchclient.service.GetAsyncSearchResult(self.transaction_id, self.token)
if result != '':
self.csid = result[0][0]
#transaction over. set transaction id to empty for proper status displays
self.transaction_id = ''
def fill_forms_with_csid(self):
"""Retrieve all data from Chemspider service using a CS ID"""
if self.csid != '':
try:
tmp = self.searchclient.service.GetCompoundInfo(self.csid, self.token)
except suds.WebFault as detail:
print(detail)
self.inchi = tmp[1]
self.inchikey = tmp[2]
| [
"suds.client.Client",
"helplib.openfile",
"time.sleep"
] | [((519, 536), 'suds.client.Client', 'Client', (['searchurl'], {}), '(searchurl)\n', (525, 536), False, 'from suds.client import Client\n'), ((752, 768), 'suds.client.Client', 'Client', (['inchiurl'], {}), '(inchiurl)\n', (758, 768), False, 'from suds.client import Client\n'), ((1303, 1338), 'helplib.openfile', 'hl.openfile', (['"""chemspider_token.txt"""'], {}), "('chemspider_token.txt')\n", (1314, 1338), True, 'import helplib as hl\n'), ((2776, 2801), 'time.sleep', 'time.sleep', (['self.timetick'], {}), '(self.timetick)\n', (2786, 2801), False, 'import time\n')] |
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#############################################################
# Interface
#############################################################
from collections import namedtuple
# Command/Argument definition
Cmd = namedtuple('Command', 'desc cb args cmds', defaults=(None,None,None,None,))
Arg = namedtuple('Arg', 'name flags short desc default exmpl convert', defaults=(None,None,None,None,None,))
#Flags
OPTION = 0 # simple 'flag' argument; example: '--foo' or '-f'
VALUE = 1 # value argument; example: '--foo=value' '-f=value'
UNNAMED = 2 # unamed argument; example; 'foo'
REQUIRED = 4 # required argument; omitting argument will print the help text
# print help
def print_help(cmd_name, cmd):
_print_help(cmd, [cmd_name])
# execute command based on arguments
def exec_command(cmd_name, cmd, argv):
return _execute_command(cmd, argv[1:], [cmd_name])
#############################################################
# Implementation
#############################################################
_PrintCmd = namedtuple('PrintCmd', 'name text desc cmds args mla_name mla_text mla_short')
_PrintArg = namedtuple('PrintArg', 'name text short desc')
_PRE_UNAMED = 0
_PRE_SHORT = 1
_PRE_NAME = 2
def _execute_command(cmd, argv, commands):
help_args = ['help', '-help', '--help', '?']
if (len(argv) == 0 and not cmd.cb) or (len(argv) > 0 and argv[0] in help_args):
_print_help(cmd, commands)
if len(argv) == 0:
print('Error: Please specify Command!')
print('')
return -1
return 0
if cmd.cb:
args = {}
if cmd.args:
for x in range(0, len(argv)):
arg_name = argv[x]
pre = _PRE_UNAMED
if arg_name.find('--') == 0:
pre = _PRE_NAME
elif arg_name.find('-') == 0:
pre = _PRE_SHORT
found = False
for arg in cmd.args:
cc = arg_name[pre:].split('=')
if (pre == _PRE_NAME and arg.name == cc[0]) or (pre == _PRE_SHORT and arg.short == cc[0]) or (pre == _PRE_UNAMED and arg.flags & UNNAMED and arg.name not in args):
found = True
if arg.flags & VALUE or pre == _PRE_UNAMED:
idx = 0 if pre == _PRE_UNAMED else 1
val = ''.join(cc[idx:]) if len(cc) > idx else ''
if val == '':
_print_help(cmd, commands)
print('Error: Argument \'{}\': Expects to have a value!'.format(arg.name))
if arg.flags & UNNAMED:
print(' Example: {} <{}>'.format(' '.join(commands), arg.name))
else:
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl if arg.exmpl else 'foo'))
print('')
return -1
v_str = val.strip('\'')
if arg.convert:
try:
args[arg.name] = arg.convert(v_str)
except:
_print_help(cmd, commands)
print('Error: Argument \'{}\': Value not expected type!'.format(arg.name))
if arg.exmpl:
if arg.flags & UNNAMED:
print(' Example: {} <{}>'.format(' '.join(commands), arg.exmpl))
else:
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl))
print('')
return -1
else:
args[arg.name] = v_str
else:
args[arg.name] = True
break
if not found:
_print_help(cmd, commands)
print('Error: Argument \'{}\': Unknown Argument!'.format(arg_name))
print('')
return -1
for arg in cmd.args:
if not arg.name in args:
if arg.default is not None:
args[arg.name] = arg.default
elif arg.flags & REQUIRED:
_print_help(cmd, commands)
if arg.flags & UNNAMED:
print('Error: Argument \'{}\': Required Argument not set!'.format(arg.name))
print(' Example: {} <{}>'.format(' '.join(commands), arg.exmpl if arg.exmpl else arg.name))
print('')
else:
print('Error: Argument \'{}\': Required Argument not set!'.format(arg.name))
print(' Example: {} --{}=<{}>'.format(' '.join(commands), arg.name, arg.exmpl if arg.exmpl else 'foo'))
print('')
return -1
else:
args[arg.name] = None
res = cmd.cb(args)
return res if res else 0
if cmd.cmds:
if not argv[0] in cmd.cmds:
_print_help(cmd, commands)
print(' Error: Command \'{}\': Not a valid command!'.format(argv[0]))
print('')
return -1
commands.append(argv[0])
return _execute_command(cmd.cmds[argv[0]], argv[1:], commands)
return -2
def _print_help(cmd, commands, pre_len=0, post_len=0):
lines = []
n = _collect_help(cmd, commands, 0, 0, lines, 0)
for l in lines:
print('{}{}'.format(l[0].ljust(n), ' : {}'.format(l[1]) if l[1] else ''))
def _collect_help(cmd, commands, pre_len, post_len, lines, n):
if pre_len == 0:
prefix = ' '
else:
prefix = ''.ljust(pre_len)
names_args = []
unamed_args = []
arg_name_maxlen = 0
arg_text_maxlen = 0
arg_short_maxlen = 0
if cmd.cb:
if cmd.args:
for arg in cmd.args:
if arg.short:
arg_short = ' (-{})'.format(arg.short)
else:
arg_short = ''
if arg.flags & UNNAMED:
arg_text = '<{}>'.format(arg.name)
else:
arg_text = '--{}{}'.format(arg.name, '=<{}>'.format(arg.exmpl if arg.exmpl else 'foo') if arg.flags & VALUE else '')
if arg.default is not None:
arg_desc = '{} (default: {})'.format(arg.desc, arg.default)
elif arg.flags & REQUIRED:
arg_desc = arg.desc
else:
arg_desc = '{} (optional)'.format(arg.desc)
l = len(arg_text)
if l > arg_text_maxlen:
arg_text_maxlen = l
l = len(arg_short)
if l > arg_short_maxlen:
arg_short_maxlen = l
l = len(arg.name)
if l > arg_name_maxlen:
arg_name_maxlen = l
pa = _PrintArg(
name=arg.name,
text=arg_text,
short=arg_short,
desc=arg_desc)
if arg.flags & UNNAMED:
unamed_args.append(pa)
else:
names_args.append(pa)
cmd_text_maxlen = 0
cmdlist = []
if cmd.cmds:
for cmd_name in cmd.cmds:
cmdlist.append(cmd_name)
l = len(cmd_name)
if l > cmd_text_maxlen:
cmd_text_maxlen = l
if pre_len == 0:
cmd_name = ' '.join(commands).ljust(post_len)
#cmd_list_str = ' {{{}}}'.format('|'.join(cmdlist)) if cmd.cmds else ''
else:
cmd_name = commands[len(commands)-1].ljust(post_len)
#cmd_list_str = ' <Command>' if cmd.cmds else ''
cmd_text = '{}{}{}'.format(
#cmd_list_str,
' <Command>' if cmd.cmds else '',
' <Arguments>' if len(unamed_args) > 0 else '',
' [Options]' if len(names_args) > 0 else '')
cmd_desc = cmd.desc if cmd.desc else commands[len(commands)-1]
if pre_len == 0:
n = _add_line(lines, 'Usage:', None, n)
n = _add_line(lines, '{}{}{}'.format(
prefix,
cmd_name,
cmd_text),
cmd_desc, n)
if len(unamed_args) > 0 and pre_len == 0:
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Arguments:', None, n)
for arg in unamed_args:
n = _add_line(lines, '{}{}{}{}'.format(
prefix,
''.ljust(post_len + 1),
'{}'.format(arg.text).ljust(arg_text_maxlen),
'{}'.format(arg.short).ljust(arg_short_maxlen)),
arg.desc if arg.desc else arg.name, n)
if len(names_args) > 0 and pre_len == 0:
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Options:', None, n)
names_args = sorted(names_args, key=lambda x: x.name)
for arg in names_args:
n = _add_line(lines, '{}{}{}{}'.format(
prefix,
''.ljust(post_len + 1),
'{}'.format(arg.text).ljust(arg_text_maxlen),
'{}'.format(arg.short).ljust(arg_short_maxlen)),
arg.desc if arg.desc else arg.name, n)
if cmd.cmds:
if len(cmd.cmds) > 0 and pre_len == 0:
pre_len = 3
n = _add_line(lines, '', None, n)
n = _add_line(lines, 'Commands:', None, n)
else:
pre_len = pre_len + len(cmd_name) + 1
for cmd_name, cmd in cmd.cmds.items():
n = _collect_help(cmd, commands + [cmd_name], pre_len, cmd_text_maxlen, lines, n)
n = _add_line(lines, '', None, n)
elif pre_len == 0:
n = _add_line(lines, '', None, n)
return n
def _add_line(lines, ll, lr, n):
lines.append([ll, lr])
return max(n, len(ll))
| [
"collections.namedtuple"
] | [((1297, 1374), 'collections.namedtuple', 'namedtuple', (['"""Command"""', '"""desc cb args cmds"""'], {'defaults': '(None, None, None, None)'}), "('Command', 'desc cb args cmds', defaults=(None, None, None, None))\n", (1307, 1374), False, 'from collections import namedtuple\n'), ((1379, 1489), 'collections.namedtuple', 'namedtuple', (['"""Arg"""', '"""name flags short desc default exmpl convert"""'], {'defaults': '(None, None, None, None, None)'}), "('Arg', 'name flags short desc default exmpl convert', defaults=(\n None, None, None, None, None))\n", (1389, 1489), False, 'from collections import namedtuple\n'), ((2126, 2204), 'collections.namedtuple', 'namedtuple', (['"""PrintCmd"""', '"""name text desc cmds args mla_name mla_text mla_short"""'], {}), "('PrintCmd', 'name text desc cmds args mla_name mla_text mla_short')\n", (2136, 2204), False, 'from collections import namedtuple\n'), ((2217, 2263), 'collections.namedtuple', 'namedtuple', (['"""PrintArg"""', '"""name text short desc"""'], {}), "('PrintArg', 'name text short desc')\n", (2227, 2263), False, 'from collections import namedtuple\n')] |
## @package teetool
# This module contains the Visual_2d class
#
# See Visual_2d class for more details
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
import teetool as tt
## Visual_2d class generates the 2d output using Matplotlib
#
# Even 3-dimensional trajectories can be output in 2d (sliced)
class Visual_2d(object):
## Constructor for Visual_2d
# @param self object pointer
# @param thisWorld World object, filled with trajectory data and models
# @param kwargs additional parameters for plt.figure()
def __init__(self, thisWorld, **kwargs):
"""
<description>
"""
## figure object
self._fig = plt.figure(facecolor="white", **kwargs)
## axis object
self._ax = self._fig.gca()
# set colour of axis
#self._ax.set_axis_bgcolor('white')
#self._ax.set_facecolor('white')
## World object
self._world = thisWorld
## Labels of plots
self._labels = []
## Plot mean of trajectories
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotMean(self, list_icluster=None, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
Y = this_cluster["model"].getMean()
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[list_icluster[i]],
**kwargs)
## Plot trajectories of cluster
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectories(self,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract data
clusters = self._world.getCluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot trajectories of cluster
# @param self object pointer
# @param x1 point from [0,1] to visualise
# @param list_icluster list of clusters to plot
# @param ntraj maximum number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotTrajectoriesPoints(self,
x1,
list_icluster=None,
ntraj=50,
colour=None,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# obtain points
clustersP = self._world.getClusterPoints(x1, list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, A) in enumerate(clustersP):
# pass clusters
for itraj, a in enumerate(A):
a_line, = self._ax.plot(a[0],
a[1],
color=colours[i],
**kwargs)
# limit number of trajectories
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot time-series of trajectories
# @param self object pointer
# @param icluster select cluster to plot
# @param idim select dimension to plot
# @param ntraj maximum number of trajectories
# @param colour specificy colour of trajectories
# @param kwargs additional parameters for plotting
def plotTimeSeries(self, icluster=0, idim=0, ntraj=50,
colour='k', **kwargs):
# number of subplots, 2 or 3
ndim = self._world._ndim
# subplot
#f, axarr = plt.subplots(ndim, sharex=True)
# check validity
[icluster] = self._world._check_list_icluster([icluster])
# extract data
clusters = self._world.getCluster([icluster])
for (i, this_cluster) in enumerate(clusters):
# pass clusters
for itraj, (x, Y) in enumerate(this_cluster["data"]):
#for d in range(ndim):
x_norm = (x - x.min()) / (x.max() - x.min())
a_line, = self._ax.plot(x_norm,
Y[:,idim],
color=colour, **kwargs)
if itraj > ntraj:
break
self._labels.append((a_line, "data"))
## Plot a box based on two coordinates
# @param self object pointer
# @param coord_lowerleft lower-left coordinate (x,y)
# @param coord_upperright upper-right coordinate (x,y)
# @param kwargs additional parameters for plotting
def plotBox(self, coord_lowerleft, coord_upperright, **kwargs):
x_lo = coord_lowerleft[0]
x_hi = coord_upperright[0]
y_lo = coord_lowerleft[1]
y_hi = coord_upperright[1]
coords = np.array([[x_lo, y_lo],
[x_hi, y_lo],
[x_hi, y_hi],
[x_lo, y_hi],
[x_lo, y_lo]])
coords_x = coords[:,0]
coords_y = coords[:,1]
self._ax.plot(coords_x, coords_y, **kwargs)
## standard plotting function for Matplotlib
# @param self object pointer
# @param args additional arguments for plotting
# @param kwargs additional labeled parameters for plotting
def plot(self, *args, **kwargs):
# plot
self._ax.plot(*args, **kwargs)
## Plot samples of model
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param ntraj number of trajectories
# @param colour if specified, overwrites distinct colours
# @param kwargs additional parameters for plotting
def plotSamples(self, list_icluster=None, ntraj=50, colour=None, **kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# unique colours
colours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for (i, icluster) in enumerate(list_icluster):
these_samples = self._world.getSamples(icluster,
nsamples=ntraj)
for (x, Y) in these_samples:
a_line, = self._ax.plot(Y[:, 0],
Y[:, 1],
color=colours[i],
linestyle=":",
**kwargs)
self._labels.append((a_line, "samples"))
## Add legend to plot
# @param self object pointer
def plotLegend(self):
list_lines = []
list_label = []
for (a_line, a_label) in self._labels:
list_lines.append(a_line)
list_label.append(a_label)
plt.legend(handles=list_lines, labels=list_label)
## Plots a confidence region of variance sigma
# @param self object pointer
# @param list_icluster list of clusters to plot
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution sets resolution for which to calculate the tube, can be a single integer, or an actual measurement [dim1 dim2] (2d) [dim1 dim2 dim3] (3d)
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTube(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth,
z=z,
resolution=resolution)
# unique colours
lcolours = tt.helpers.getDistinctColours(len(self._world._clusters),
colour)
for i, ss1 in enumerate(ss_list):
#plt.contourf(xx, yy, 1.*ss1, levels=[-np.inf, 1., np.inf], colors=(lcolours[i],), alpha=alpha, **kwargs)
# plot an iso surface line
plt.contour(xx,
yy,
ss1,
levels=[.5],
colors=(lcolours[list_icluster[i]], 'w'),
**kwargs)
## Plots the difference confidence region of variance sigma for two models
# @param self object pointer
# @param list_icluster list of 2 clusters to compare
# @param sdwidth variance to evaluate
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
# @param colour if specified, overwrites distinct colours
# @param alpha opacity for the confidence region
# @param kwargs additional parameters for plotting
def plotTubeDifference(self,
list_icluster=None,
sdwidth=1,
z=None,
resolution=None,
colour=None,
alpha=.1,
**kwargs):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
# extract first two only!
list_icluster = list_icluster[:2]
# extract
(ss_list, [xx, yy, zz]) = self._world.getTube(list_icluster,
sdwidth, z=z,
resolution=resolution)
# to plot
ss_plot = - np.inf * np.ones_like(ss_list[0])
# 1 :: blocks added
ss_added = ((ss_list[0] - ss_list[1])==-1)
# 2 :: blocks removed
ss_removed = ((ss_list[0] - ss_list[1])==1)
# 3 :: present in both
ss_neutral = ((ss_list[0] + ss_list[1])==2)
ss_plot[ss_added] = 1.
ss_plot[ss_removed] = -1.
ss_plot[ss_neutral] = 0.
#plt.contourf(xx, yy, ss_plot, levels=[-np.inf, -1., 0., 1., np.inf], colors='none', hatches=['//', '.', '/'], **kwargs)
plt.contourf(xx,
yy,
ss_plot,
levels=[-np.inf, -1., 0., 1., np.inf],
colors=('r','b','g'),
alpha=alpha,
**kwargs)
for i in [1, 2, 3]:
if i == 1:
ss1 = 1.*ss_removed
color = 'r'
elif i == 2:
ss1 = 1.*ss_added
color = 'g'
elif i == 3:
ss1 = 1.*ss_neutral
color = 'b'
# plot an iso surface
plt.contour(xx, yy, ss1, levels=[0.5], colors=color)
## Plot the log-likehood of confidence regions -- which can be related to traffic complexity in the future
# @param self object pointer
# @param list_icluster list of clusters to compare
# @param pmin minimum value on a normalised scale
# @param pmax maximum value on a normalised scale
# @param z if specified, it evaluates the confidence region at a constant altitude for 3D trajectories
# @param resolution specify resolution of region
def plotLogLikelihood(self,
list_icluster=None,
pmin=0, pmax=1,
z=None,
resolution=None):
# check validity
list_icluster = self._world._check_list_icluster(list_icluster)
(ss_list, [xx, yy, zz]) = self._world.getLogLikelihood(list_icluster,
resolution,
z)
ss = ss_list[0] # initialise
for ss1 in ss_list:
# find those greater
mask = np.greater(ss1, ss)
# replace
ss[mask] = ss1[mask]
# normalise
ss_norm = (ss - np.min(ss)) / (np.max(ss) - np.min(ss))
# plot contours
self._ax.pcolor(xx,
yy,
ss_norm,
cmap="viridis",
vmin=pmin,
vmax=pmax)
def plotComplexityMap(self,
list_icluster=None,
complexity=1,
pmin=0, pmax=1,
z=None,
resolution=None, cmap1="Reds"):
ss, xx, yy, zz = self._world.getComplexityMap(list_icluster,
complexity,
resolution,
z)
# normalise
ss_norm = (ss - np.min(ss)) / (np.max(ss) - np.min(ss))
# plot contours
cax = self._ax.pcolor(xx,
yy,
ss_norm,
cmap=cmap1,
vmin=pmin,
vmax=pmax)
return cax
## add colorbar
def plotColourBar(self, *args, **kwargs):
cbar = self._fig.colorbar(*args, **kwargs)
# horizontal colorbar
# cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])
return cbar
## Plots the title or worldname
# @param self object pointer
def _plotTitle(self):
# add title
world_name = self._world.getName()
if not (world_name == None):
plt.title(world_name)
## saves the figure to a file in the output folder
# @param self object pointer
# @param add additional identifier for file
def save(self, add=None):
if (add==None):
saveas = self._world.getName()
else:
saveas = "{0}_{1}".format(self._world.getName(), add)
plt.savefig("output/2d_{0}.png".format(saveas))
## shows the figure (pop-up or inside notebook)
# @param self object pointer
def show(self):
plt.show()
## closes all figures
# @param self object pointer
def close(self):
plt.close("all")
| [
"matplotlib.pyplot.contourf",
"numpy.ones_like",
"numpy.greater",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contour",
"numpy.min",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((710, 749), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white', **kwargs)\n", (720, 749), True, 'import matplotlib.pyplot as plt\n'), ((6461, 6546), 'numpy.array', 'np.array', (['[[x_lo, y_lo], [x_hi, y_lo], [x_hi, y_hi], [x_lo, y_hi], [x_lo, y_lo]]'], {}), '([[x_lo, y_lo], [x_hi, y_lo], [x_hi, y_hi], [x_lo, y_hi], [x_lo, y_lo]]\n )\n', (6469, 6546), True, 'import numpy as np\n'), ((8466, 8515), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'list_lines', 'labels': 'list_label'}), '(handles=list_lines, labels=list_label)\n', (8476, 8515), True, 'import matplotlib.pyplot as plt\n'), ((12151, 12273), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'ss_plot'], {'levels': '[-np.inf, -1.0, 0.0, 1.0, np.inf]', 'colors': "('r', 'b', 'g')", 'alpha': 'alpha'}), "(xx, yy, ss_plot, levels=[-np.inf, -1.0, 0.0, 1.0, np.inf],\n colors=('r', 'b', 'g'), alpha=alpha, **kwargs)\n", (12163, 12273), True, 'import matplotlib.pyplot as plt\n'), ((16092, 16102), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16100, 16102), True, 'import matplotlib.pyplot as plt\n'), ((16192, 16208), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16201, 16208), True, 'import matplotlib.pyplot as plt\n'), ((10122, 10216), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'ss1'], {'levels': '[0.5]', 'colors': "(lcolours[list_icluster[i]], 'w')"}), "(xx, yy, ss1, levels=[0.5], colors=(lcolours[list_icluster[i]],\n 'w'), **kwargs)\n", (10133, 10216), True, 'import matplotlib.pyplot as plt\n'), ((11641, 11665), 'numpy.ones_like', 'np.ones_like', (['ss_list[0]'], {}), '(ss_list[0])\n', (11653, 11665), True, 'import numpy as np\n'), ((12729, 12781), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'ss1'], {'levels': '[0.5]', 'colors': 'color'}), '(xx, yy, ss1, levels=[0.5], colors=color)\n', (12740, 12781), True, 'import matplotlib.pyplot as plt\n'), ((13887, 13906), 'numpy.greater', 'np.greater', (['ss1', 'ss'], {}), '(ss1, ss)\n', (13897, 13906), True, 'import numpy as np\n'), ((15585, 15606), 'matplotlib.pyplot.title', 'plt.title', (['world_name'], {}), '(world_name)\n', (15594, 15606), True, 'import matplotlib.pyplot as plt\n'), ((14007, 14017), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14013, 14017), True, 'import numpy as np\n'), ((14022, 14032), 'numpy.max', 'np.max', (['ss'], {}), '(ss)\n', (14028, 14032), True, 'import numpy as np\n'), ((14035, 14045), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14041, 14045), True, 'import numpy as np\n'), ((14828, 14838), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14834, 14838), True, 'import numpy as np\n'), ((14843, 14853), 'numpy.max', 'np.max', (['ss'], {}), '(ss)\n', (14849, 14853), True, 'import numpy as np\n'), ((14856, 14866), 'numpy.min', 'np.min', (['ss'], {}), '(ss)\n', (14862, 14866), True, 'import numpy as np\n')] |
import kmeans
import json
import numpy as np
NUM_GAUSSIANS = 32
DO_KMEANS = False
DEBUG = True
mixture_weights = [1.0/NUM_GAUSSIANS] * NUM_GAUSSIANS
if DEBUG:
print ("mixture_weights: ", mixture_weights)
print("Loading parsed data...")
traindata_processed_file = open("parsed_data/data1.universalenrollparsed", "r")
data = json.loads(traindata_processed_file.read())
traindata_processed_file.close()
print("Done loading parsed data!")
means = []
if DO_KMEANS:
means = kmeans.do_kmeans(data, 32)
else:
print("Loading centroids...")
traindata_processed_file = open("parsed_data/data1.kmeanspartialcentroids",
"r")
means = json.loads(traindata_processed_file.read())
traindata_processed_file.close()
print("Done loading centroids!")
data_np = np.array(data)
variances_np = np.var(data_np, axis=0)
if DEBUG:
print ("variances_np: ", variances_np)
variances = [variances_np.tolist()] * NUM_GAUSSIANS
initial_params = {
'mixture_weights': mixture_weights,
'means': means,
'variances': variances
}
print("writing inital parameters to file...")
traindata_processed_file = open("parsed_data/data1.initialparameters", "w")
traindata_processed_file.write(json.dumps(initial_params))
traindata_processed_file.close()
print("Done writing inital parameters to file")
| [
"numpy.array",
"json.dumps",
"kmeans.do_kmeans",
"numpy.var"
] | [((811, 825), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (819, 825), True, 'import numpy as np\n'), ((842, 865), 'numpy.var', 'np.var', (['data_np'], {'axis': '(0)'}), '(data_np, axis=0)\n', (848, 865), True, 'import numpy as np\n'), ((481, 507), 'kmeans.do_kmeans', 'kmeans.do_kmeans', (['data', '(32)'], {}), '(data, 32)\n', (497, 507), False, 'import kmeans\n'), ((1308, 1334), 'json.dumps', 'json.dumps', (['initial_params'], {}), '(initial_params)\n', (1318, 1334), False, 'import json\n')] |
#! /usr/bin/env python
import sys
import optparse
import netgpib
# Usage text
usage = """usage: %prog [options] CMD
Issue a command or query from a network-connected GPIB device.
example:
%prog -i 192.168.113.105 -d AG4395A -a 10 'POIN?'"""
# Parse options
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--address",
dest="gpibAddress", type="int", default=10,
help="GPIB device address (default: 10)")
parser.add_option("-i", "--ip",
dest="ipAddress", default="gpib01",
help="IP address/Host name (default: gpib01)")
parser.add_option("-l", "--log",
dest="log", action="store_true",
help="Log GPIB commands")
(options, args) = parser.parse_args()
if not args:
print('Must supply command argument.', file=sys.stderr)
sys.exit(1)
##################################################
# Create/connect to netGPIB object
#print >>sys.stderr, 'Connecting to %s...' % (options.ipAddress),
gpibObj = netgpib.netGPIB(options.ipAddress,
options.gpibAddress,
'\004', 0,
log=options.log)
#print >>sys.stderr, ' done.'
for cmd_string in args[0].split('\n'):
if not cmd_string:
continue
cmd = cmd_string.split(' ')
if cmd[0].find('?') > 0:
print(gpibObj.query(cmd_string).strip())
elif cmd == 'refresh':
gpibObj.refresh()
else:
gpibObj.command(cmd_string)
gpibObj.close()
| [
"netgpib.netGPIB",
"optparse.OptionParser",
"sys.exit"
] | [((271, 305), 'optparse.OptionParser', 'optparse.OptionParser', ([], {'usage': 'usage'}), '(usage=usage)\n', (292, 305), False, 'import optparse\n'), ((1037, 1125), 'netgpib.netGPIB', 'netgpib.netGPIB', (['options.ipAddress', 'options.gpibAddress', '"""\x04"""', '(0)'], {'log': 'options.log'}), "(options.ipAddress, options.gpibAddress, '\\x04', 0, log=\n options.log)\n", (1052, 1125), False, 'import netgpib\n'), ((861, 872), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (869, 872), False, 'import sys\n')] |
"""
PIC NVM implementation
"""
import os
import sys
from pyedbglib.util import binary
from pymcuprog import utils
from pymcuprog.nvm import NvmAccessProviderCmsisDapTool
from pymcuprog.pymcuprog_errors import PymcuprogNotSupportedError
from pymcuprog.deviceinfo.memorynames import MemoryNames
from pymcuprog.deviceinfo.deviceinfokeys import DeviceMemoryInfoKeys, DeviceInfoKeys
class NvmAccessProviderCmsisDapPic(NvmAccessProviderCmsisDapTool):
"""
NVM access the PIC way
"""
def __init__(self, transport, device_info, packpath, options=""):
"""
:raises ImportError: if packpath is None
"""
self.pic = None
NvmAccessProviderCmsisDapTool.__init__(self, device_info)
self.options = {}
if packpath is None:
raise ImportError("No path to pack repo provided!")
# Each part pack ships its own version of the full script stack, including pyedbglib.
# pyedbglib, and other libraries, can be installed in the local python site-packages
# This path hack puts the part pack path at the front of the python import path
system_path = sys.path
sys.path = [os.path.normpath(packpath)] + sys.path
sys.path = [os.path.normpath(packpath + "//common")] + sys.path
# Create driver for scripted debuggers
self.options['skip_blank_pages'] = True
self.options['overlapped_usb_access'] = False
# This imports the debugger model from the provided packpath so the import must be late
from common.debugprovider import provide_debugger_model # pylint: disable=import-outside-toplevel, import-error
devicename = device_info[DeviceInfoKeys.NAME]
self.pic = provide_debugger_model(devicename)
# Start immediately
self.pic.setup_session(transport, self.options)
self.device_info = device_info
# Start the programming session
if 'pic24' in devicename.lower():
if 'no_pe' in options:
# Only PIC24 devices support Programming Executives
try:
# Force no Programming Executive usage by setting program_pe flag but not configure a
# PE (i.e. not calling set_program_exec)
self.pic.start_programming_operation(program_pe=options['no_pe'])
except TypeError:
# start_programming_operation does not have program_pe argument (i.e. old
# devicesupportscripts without PE support)
self.pic.start_programming_operation()
else:
self.pic.start_programming_operation(program_pe=False)
else:
self.pic.start_programming_operation()
# The stack has been built, revert path hacks
sys.path = system_path
def read(self, memory_info, offset, numbytes):
"""
Read the memory
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
:param offset: relative offset in the memory type
:param numbytes: number of bytes to read
:return: array of bytes read
"""
mem_name = memory_info[DeviceInfoKeys.NAME]
offset += memory_info[DeviceMemoryInfoKeys.ADDRESS]
if mem_name in [MemoryNames.FLASH, MemoryNames.USER_ID, MemoryNames.ICD]:
mem = self.pic.read_flash_memory(offset, numbytes)
return mem
if mem_name == MemoryNames.CONFIG_WORD:
mem = self.pic.read_config_memory(offset, numbytes)
return mem
if mem_name == MemoryNames.EEPROM:
mem = self.pic.read_eeprom_memory(offset, numbytes)
return mem
self.logger.error("Unsupported memtype!")
return []
def write(self, memory_info, offset, data):
"""
Write the memory with data
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
:param offset: relative offset within the memory type
:param data: the data to program
"""
# Make sure the data is aligned to a memory page
chunk, address = utils.pagealign(data,
offset,
memory_info[DeviceMemoryInfoKeys.PAGE_SIZE],
memory_info[DeviceMemoryInfoKeys.WRITE_SIZE])
mem_name = memory_info[DeviceInfoKeys.NAME]
address += memory_info[DeviceMemoryInfoKeys.ADDRESS]
if mem_name == MemoryNames.FLASH:
self.pic.write_flash_memory(address, chunk)
elif mem_name == MemoryNames.CONFIG_WORD:
self.pic.write_config_memory(address, chunk)
elif mem_name == MemoryNames.USER_ID:
self.pic.write_user_id_memory(address, chunk)
elif mem_name == MemoryNames.EEPROM:
self.pic.write_eeprom_memory(address, chunk)
elif mem_name == MemoryNames.ICD:
try:
self.pic.write_de_memory(address, chunk)
except AttributeError:
# Some PIC devices don't have the write_de_memory but instead a _write_de_block function
self.pic._write_de_block(address, chunk) # pylint: disable=protected-access
else:
raise PymcuprogNotSupportedError("Unsupported memtype: {}!".format(mem_name))
def erase(self, memory_info=None, address=None):
"""
Erase the device or parts of it.
:param memory_info: dictionary for the memory as provided by the DeviceMemoryInfo class
If memory_info is None the default bulk erase will be run
:param address: address info for erase (optional)
"""
if address is None:
if memory_info is None:
self.pic.erase()
else:
if memory_info[DeviceInfoKeys.NAME] == MemoryNames.ICD:
self.pic.erase_de_memory(memory_info[DeviceMemoryInfoKeys.ADDRESS],
memory_info[DeviceMemoryInfoKeys.SIZE])
else:
if DeviceMemoryInfoKeys.ERASE_ADDRESS in memory_info:
self.pic.erase(memory_info[DeviceMemoryInfoKeys.ERASE_ADDRESS])
else:
raise ValueError("Missing erase address for {}".format(memory_info[DeviceInfoKeys.NAME]))
else:
self.pic.erase(address)
def read_device_id(self):
"""
Get the device info from the device
:returns: Device ID raw bytes (little endian)
"""
pic_id = self.pic.read_id()
id_array = binary.pack_le16(pic_id)
self.logger.info("Device ID read out: '%04X'", pic_id)
return id_array
def hold_in_reset(self):
"""
Hold the device in reset
"""
self.pic.hold_in_reset()
def release_from_reset(self):
"""
Release the device from reset
"""
self.pic.release_from_reset()
def stop(self):
"""
Stop programming session
"""
if self.pic is not None:
self.pic.end_of_operations()
| [
"pymcuprog.nvm.NvmAccessProviderCmsisDapTool.__init__",
"pymcuprog.utils.pagealign",
"os.path.normpath",
"common.debugprovider.provide_debugger_model",
"pyedbglib.util.binary.pack_le16"
] | [((668, 725), 'pymcuprog.nvm.NvmAccessProviderCmsisDapTool.__init__', 'NvmAccessProviderCmsisDapTool.__init__', (['self', 'device_info'], {}), '(self, device_info)\n', (706, 725), False, 'from pymcuprog.nvm import NvmAccessProviderCmsisDapTool\n'), ((1725, 1759), 'common.debugprovider.provide_debugger_model', 'provide_debugger_model', (['devicename'], {}), '(devicename)\n', (1747, 1759), False, 'from common.debugprovider import provide_debugger_model\n'), ((4176, 4300), 'pymcuprog.utils.pagealign', 'utils.pagealign', (['data', 'offset', 'memory_info[DeviceMemoryInfoKeys.PAGE_SIZE]', 'memory_info[DeviceMemoryInfoKeys.WRITE_SIZE]'], {}), '(data, offset, memory_info[DeviceMemoryInfoKeys.PAGE_SIZE],\n memory_info[DeviceMemoryInfoKeys.WRITE_SIZE])\n', (4191, 4300), False, 'from pymcuprog import utils\n'), ((6684, 6708), 'pyedbglib.util.binary.pack_le16', 'binary.pack_le16', (['pic_id'], {}), '(pic_id)\n', (6700, 6708), False, 'from pyedbglib.util import binary\n'), ((1173, 1199), 'os.path.normpath', 'os.path.normpath', (['packpath'], {}), '(packpath)\n', (1189, 1199), False, 'import os\n'), ((1232, 1271), 'os.path.normpath', 'os.path.normpath', (["(packpath + '//common')"], {}), "(packpath + '//common')\n", (1248, 1271), False, 'import os\n')] |
import tensorflow as tf
import numpy as np
from gpflow.params import DataHolder, Minibatch
from gpflow import autoflow, params_as_tensors, ParamList
from gpflow.models.model import Model
from gpflow.mean_functions import Identity, Linear
from gpflow.mean_functions import Zero
from gpflow.quadrature import mvhermgauss
from gpflow import settings
float_type = settings.float_type
from doubly_stochastic_dgp.layers import SVGP_Layer
def init_layers_linear(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
layers = []
X_running, Z_running = X.copy(), Z.copy()
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim
print(dim_in, dim_out)
if dim_in == dim_out:
mf = Identity()
else:
if dim_in > dim_out: # stepping down, use the pca projection
_, _, V = np.linalg.svd(X_running, full_matrices=False)
W = V[:dim_out, :].T
else: # stepping up, use identity + padding
W = np.concatenate([np.eye(dim_in), np.zeros((dim_in, dim_out - dim_in))], 1)
mf = Linear(W)
mf.set_trainable(False)
layers.append(Layer(kern_in, Z_running, dim_out, mf, white=white))
if dim_in != dim_out:
Z_running = Z_running.dot(W)
X_running = X_running.dot(W)
# final layer
layers.append(Layer(kernels[-1], Z_running, num_outputs, mean_function, white=white))
return layers
def init_layers_input_prop(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
D = X.shape[1]
M = Z.shape[0]
layers = []
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim - D
std_in = kern_in.variance.read_value()**0.5
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kern_in, Z_padded, dim_out, Zero(), white=white, input_prop_dim=D))
dim_in = kernels[-1].input_dim
std_in = kernels[-2].variance.read_value()**0.5 if dim_in > D else 1.
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kernels[-1], Z_padded, num_outputs, mean_function, white=white))
return layers
| [
"numpy.eye",
"gpflow.mean_functions.Zero",
"gpflow.mean_functions.Linear",
"numpy.zeros",
"numpy.concatenate",
"numpy.linalg.svd",
"numpy.random.randn",
"gpflow.mean_functions.Identity"
] | [((555, 561), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (559, 561), False, 'from gpflow.mean_functions import Zero\n'), ((1833, 1839), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (1837, 1839), False, 'from gpflow.mean_functions import Zero\n'), ((2598, 2625), 'numpy.concatenate', 'np.concatenate', (['[Z, pad]', '(1)'], {}), '([Z, pad], 1)\n', (2612, 2625), True, 'import numpy as np\n'), ((2294, 2321), 'numpy.concatenate', 'np.concatenate', (['[Z, pad]', '(1)'], {}), '([Z, pad], 1)\n', (2308, 2321), True, 'import numpy as np\n'), ((960, 970), 'gpflow.mean_functions.Identity', 'Identity', ([], {}), '()\n', (968, 970), False, 'from gpflow.mean_functions import Identity, Linear\n'), ((1338, 1347), 'gpflow.mean_functions.Linear', 'Linear', (['W'], {}), '(W)\n', (1344, 1347), False, 'from gpflow.mean_functions import Identity, Linear\n'), ((2538, 2568), 'numpy.random.randn', 'np.random.randn', (['M', '(dim_in - D)'], {}), '(M, dim_in - D)\n', (2553, 2568), True, 'import numpy as np\n'), ((1086, 1131), 'numpy.linalg.svd', 'np.linalg.svd', (['X_running'], {'full_matrices': '(False)'}), '(X_running, full_matrices=False)\n', (1099, 1131), True, 'import numpy as np\n'), ((2230, 2260), 'numpy.random.randn', 'np.random.randn', (['M', '(dim_in - D)'], {}), '(M, dim_in - D)\n', (2245, 2260), True, 'import numpy as np\n'), ((2378, 2384), 'gpflow.mean_functions.Zero', 'Zero', ([], {}), '()\n', (2382, 2384), False, 'from gpflow.mean_functions import Zero\n'), ((1262, 1276), 'numpy.eye', 'np.eye', (['dim_in'], {}), '(dim_in)\n', (1268, 1276), True, 'import numpy as np\n'), ((1278, 1314), 'numpy.zeros', 'np.zeros', (['(dim_in, dim_out - dim_in)'], {}), '((dim_in, dim_out - dim_in))\n', (1286, 1314), True, 'import numpy as np\n')] |
#2020-04-20 <NAME> created.
#serializer는 모두 ModelSerializer로 간단히 처리함
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from .models import Profile
#Sign Up 회원가입
class UserSerializer(serializers.ModelSerializer):
class meta:
model = User
fields = ('id', 'username', 'email')
class CreateUserSerializer(serializers.ModelSerializer) :
# def create(self, validated_data):
# username = validated_data['username']
# email = validated_data['email']
# password = validated_data['password']
# user_obj = User(
# username = username,
# email = email
# )
# user_obj.set_password(password)
# user_obj.save()
# return validated_data
# class Meta:
# model = User
# fields = [
# 'username',
# 'password',
# 'email',
# 'is_superuser',
# ]
class Meta:
model = User
fields = ("id", "username", "password", "email")
extra_kwargs = {"password": {"write_<PASSWORD>": True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data["username"], None, validated_data["password"]
)
return user
#Check Valid Access on Server 접속 유지중인지 확인
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("id", "username")
#Login 로그인
#연결되는 모델이 없기 때문에 serializer로 작성
class LoginUserSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("아이디 혹은 비밀번호가 잘못 되었습니다.")
class ProfileSerializer(serializers.Serializer):
class Meta:
model = Profile
#exclude = ("user_pk", "likelion_number", "email")
fields = '__all__'
#read_only = True | [
"django.contrib.auth.authenticate",
"rest_framework.serializers.CharField",
"django.contrib.auth.models.User.objects.create_user",
"rest_framework.serializers.ValidationError"
] | [((1639, 1662), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (1660, 1662), False, 'from rest_framework import serializers\n'), ((1678, 1701), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (1699, 1701), False, 'from rest_framework import serializers\n'), ((1222, 1313), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', (["validated_data['username']", 'None', "validated_data['password']"], {}), "(validated_data['username'], None, validated_data[\n 'password'])\n", (1246, 1313), False, 'from django.contrib.auth.models import User\n'), ((1748, 1768), 'django.contrib.auth.authenticate', 'authenticate', ([], {}), '(**data)\n', (1760, 1768), False, 'from django.contrib.auth import authenticate\n'), ((1843, 1896), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""아이디 혹은 비밀번호가 잘못 되었습니다."""'], {}), "('아이디 혹은 비밀번호가 잘못 되었습니다.')\n", (1870, 1896), False, 'from rest_framework import serializers\n')] |
# ----------------------------------------------------------------------
# |
# | All.py
# |
# | <NAME> <<EMAIL>>
# | 2018-04-23 10:05:42
# |
# ----------------------------------------------------------------------
# |
# | Copyright <NAME> 2018-22.
# | Distributed under the Boost Software License, Version 1.0.
# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# |
# ----------------------------------------------------------------------
"""All items from this module."""
import os
import sys
import CommonEnvironment
from CommonEnvironment.TypeInfo.FundamentalTypes.BoolTypeInfo import BoolTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DateTimeTypeInfo import DateTimeTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DateTypeInfo import DateTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DirectoryTypeInfo import DirectoryTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.DurationTypeInfo import DurationTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.EnumTypeInfo import EnumTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.FilenameTypeInfo import FilenameTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.FloatTypeInfo import FloatTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.GuidTypeInfo import GuidTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.IntTypeInfo import IntTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.StringTypeInfo import StringTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.TimeTypeInfo import TimeTypeInfo
from CommonEnvironment.TypeInfo.FundamentalTypes.UriTypeInfo import Uri, UriTypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# |
# | Public Types
# |
# ----------------------------------------------------------------------
ALL_FUNDAMENTAL_TYPES = [ BoolTypeInfo,
DateTimeTypeInfo,
DateTypeInfo,
DirectoryTypeInfo,
DurationTypeInfo,
EnumTypeInfo,
FilenameTypeInfo,
FloatTypeInfo,
GuidTypeInfo,
IntTypeInfo,
StringTypeInfo,
TimeTypeInfo,
UriTypeInfo,
]
# ----------------------------------------------------------------------
# |
# | Public Methods
# |
# ----------------------------------------------------------------------
def CreateFromPythonType(typ, **kwargs):
"""
Creates a TypeInfo object based on the provided type.
Examples:
CreateFromPythonType(int)
CreateFromPythonType(string)
"""
if sys.version_info[0] == 2:
if typ in [ str, unicode, basestring, ]: # <Undefined variable> pylint: disable = E0602
return StringTypeInfo(**kwargs)
else:
if typ == str:
return StringTypeInfo(**kwargs)
for potential_type_info in [ BoolTypeInfo,
DateTimeTypeInfo,
DateTypeInfo,
# Ambiguous: DirectoryTypeInfo
DurationTypeInfo,
# Abmiguous: EnumTypeInfo
# Ambiguous: FilenameTypeInfo
FloatTypeInfo,
GuidTypeInfo,
IntTypeInfo,
# Defined above: StringTypeInfo
TimeTypeInfo,
UriTypeInfo,
]:
if potential_type_info.ExpectedType == typ:
return potential_type_info(**kwargs)
raise Exception("'{}' is not a recognized type".format(typ))
| [
"CommonEnvironment.ThisFullpath",
"CommonEnvironment.TypeInfo.FundamentalTypes.StringTypeInfo.StringTypeInfo",
"os.path.split"
] | [((1817, 1849), 'CommonEnvironment.ThisFullpath', 'CommonEnvironment.ThisFullpath', ([], {}), '()\n', (1847, 1849), False, 'import CommonEnvironment\n'), ((1879, 1910), 'os.path.split', 'os.path.split', (['_script_fullpath'], {}), '(_script_fullpath)\n', (1892, 1910), False, 'import os\n'), ((3608, 3632), 'CommonEnvironment.TypeInfo.FundamentalTypes.StringTypeInfo.StringTypeInfo', 'StringTypeInfo', ([], {}), '(**kwargs)\n', (3622, 3632), False, 'from CommonEnvironment.TypeInfo.FundamentalTypes.StringTypeInfo import StringTypeInfo\n'), ((3688, 3712), 'CommonEnvironment.TypeInfo.FundamentalTypes.StringTypeInfo.StringTypeInfo', 'StringTypeInfo', ([], {}), '(**kwargs)\n', (3702, 3712), False, 'from CommonEnvironment.TypeInfo.FundamentalTypes.StringTypeInfo import StringTypeInfo\n')] |
import os
import matplotlib.pyplot as plt
from keras import applications
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras import optimizers
from keras.models import Sequential, Model, load_model
from keras.layers import Dropout, Flatten, Dense, MaxPooling2D
from keras.regularizers import l2
from keras.callbacks import ModelCheckpoint
import sys
import argparse
import efficientnet
# Starter Code for Image Classification
def parse_arguments(argv):
parser = argparse.ArgumentParser(description='person classification training code')
# model name
parser.add_argument('--model_name', default='xception', type=str, help='', choices=['xception', 'efficientnet'])
# input
'''
db/
train/
positive/
skt_t_p00001.jpg
skt_t_p00002.jpg
...
negative/
skt_t_n00001.jpg
skt_t_n00001.jpg
...
validation/
positive/
skt_v_p00001.jpg
skt_v_p00002.jpg
...
negative/
skt_v_n00001.jpg
skt_v_n00002.jpg
...
'''
parser.add_argument('--train_data_dir', default='./db/train', type=str, help='root folder path for training (contaning at least two image folders)')
parser.add_argument('--val_data_dir', default='./db/validation', type=str, help='root folder path for validation (contaning at least two image folders)')
parser.add_argument('--number_of_classes', default=2, type=int, help='')
# hyper parameter
parser.add_argument('--init_lr', default=1e-4, type=float, help='')
parser.add_argument('--image_size', default=299, type=int, help='')
parser.add_argument('--train_epoch', default=20, type=int, help='')
parser.add_argument('--freeze_layer', default=-30, type=int, help='')
parser.add_argument('--dense_units', default=2048, type=int, help='')
parser.add_argument('--dropout_rate', default=0.2, type=float, help='')
# change batch_size according to your GPU memory for speed up
parser.add_argument('--train_batch_size', default=16, type=int, help='')
parser.add_argument('--val_batch_size', default=100, type=int, help='')
return parser.parse_args(argv)
def train(args):
if 'efficientnet' in args.model_name:
pretrained_model = efficientnet.EfficientNetB5(weights='imagenet', include_top=False, input_shape=(args.image_size, args.image_size, 3), pooling='avg')
else:
pretrained_model = applications.xception.Xception(weights='imagenet', include_top=False, input_shape=(args.image_size, args.image_size, 3), pooling='avg')
# Freeze the layers except the last N layers
for layer in pretrained_model.layers[:args.freeze_layer]:
layer.trainable = False
# Create the model
model = Sequential()
# Add the transper learning base model
model.add(pretrained_model)
# Add new layers
#model.add(Flatten())
model.add(Dense(args.dense_units, activation='relu', kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01)))
model.add(Dropout(args.dropout_rate))
model.add(Dense(args.number_of_classes, activation='softmax'))
# Show a summary of the model. Check the number of trainable parameters
model.summary()
# Save the checkpoint with model name
model_file_path="%s_base.h5" % args.model_name
# Keep only a single checkpoint, the best over test accuracy.
checkpoint = ModelCheckpoint(model_file_path,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=90,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255)
# Change the batch_size according to your system RAM
train_generator = train_datagen.flow_from_directory(
args.train_data_dir,
target_size=(args.image_size, args.image_size),
batch_size=args.train_batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
args.val_data_dir,
target_size=(args.image_size, args.image_size),
batch_size=args.val_batch_size,
class_mode='categorical',
shuffle=False)
# Compile the model
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=args.init_lr),
metrics=['acc'])
# Train the model
history = model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/train_generator.batch_size ,
epochs=args.train_epoch,
validation_data=validation_generator,
validation_steps=validation_generator.samples/validation_generator.batch_size,
verbose=1,
callbacks=[checkpoint])
return history
def view(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Loss')
plt.legend()
plt.figure()
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Accuracy')
plt.legend()
plt.show()
def count_dirs(folder_path):
dirs = [o for o in os.listdir(folder_path) if os.path.isdir(os.path.join(folder_path,o))]
return len(dirs)
def check_input_params(args):
valid = True
number_of_train_folders = count_dirs(args.train_data_dir)
number_of_validation_folders = count_dirs(args.val_data_dir)
if args.number_of_classes != number_of_train_folders:
print('plz, check [%s] (# of classes:%d) != (# of folders:%d)' % (args.train_data_dir, args.number_of_classes, number_of_train_folders))
valid = False
if args.number_of_classes != number_of_validation_folders:
print('plz, check [%s] (# of classes:%d) != (# of folders:%d)' % (args.val_data_dir, args.number_of_classes, number_of_validation_folders))
valid = False
return valid
def main(args):
if check_input_params(args):
history = train(args)
view(history)
if __name__ == "__main__":
main(parse_arguments(sys.argv[1:]))
| [
"keras.optimizers.Adam",
"os.listdir",
"keras.callbacks.ModelCheckpoint",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"efficientnet.EfficientNetB5",
"keras.applications.xception.Xception",
"keras.preprocessing.image.ImageDataGenerator",
"keras.models.Sequential",
"os.path.join",
"matplo... | [((498, 572), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""person classification training code"""'}), "(description='person classification training code')\n", (521, 572), False, 'import argparse\n'), ((2902, 2914), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2912, 2914), False, 'from keras.models import Sequential, Model, load_model\n'), ((3553, 3652), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_file_path'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(model_file_path, monitor='val_acc', verbose=1,\n save_best_only=True, mode='max')\n", (3568, 3652), False, 'from keras.callbacks import ModelCheckpoint\n'), ((3799, 3987), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'rotation_range': '(90)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'shear_range': '(0.1)', 'zoom_range': '(0.1)', 'horizontal_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rescale=1.0 / 255, rotation_range=90, width_shift_range=\n 0.2, height_shift_range=0.2, shear_range=0.1, zoom_range=0.1,\n horizontal_flip=True, fill_mode='nearest')\n", (3817, 3987), False, 'from keras.preprocessing.image import ImageDataGenerator, load_img\n'), ((4071, 4108), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (4089, 4108), False, 'from keras.preprocessing.image import ImageDataGenerator, load_img\n'), ((5461, 5511), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss', '"""b"""'], {'label': '"""Training loss"""'}), "(epochs, loss, 'b', label='Training loss')\n", (5469, 5511), True, 'import matplotlib.pyplot as plt\n'), ((5516, 5572), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_loss', '"""r"""'], {'label': '"""Validation loss"""'}), "(epochs, val_loss, 'r', label='Validation loss')\n", (5524, 5572), True, 'import matplotlib.pyplot as plt\n'), ((5577, 5594), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (5586, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5611), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5609, 5611), True, 'import matplotlib.pyplot as plt\n'), ((5621, 5633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5631, 5633), True, 'import matplotlib.pyplot as plt\n'), ((5643, 5691), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""b"""'], {'label': '"""Training acc"""'}), "(epochs, acc, 'b', label='Training acc')\n", (5651, 5691), True, 'import matplotlib.pyplot as plt\n'), ((5696, 5750), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""r"""'], {'label': '"""Validation acc"""'}), "(epochs, val_acc, 'r', label='Validation acc')\n", (5704, 5750), True, 'import matplotlib.pyplot as plt\n'), ((5755, 5776), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy"""'], {}), "('Accuracy')\n", (5764, 5776), True, 'import matplotlib.pyplot as plt\n'), ((5781, 5793), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5791, 5793), True, 'import matplotlib.pyplot as plt\n'), ((5804, 5814), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5812, 5814), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2548), 'efficientnet.EfficientNetB5', 'efficientnet.EfficientNetB5', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(args.image_size, args.image_size, 3)', 'pooling': '"""avg"""'}), "(weights='imagenet', include_top=False,\n input_shape=(args.image_size, args.image_size, 3), pooling='avg')\n", (2439, 2548), False, 'import efficientnet\n'), ((2582, 2721), 'keras.applications.xception.Xception', 'applications.xception.Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(args.image_size, args.image_size, 3)', 'pooling': '"""avg"""'}), "(weights='imagenet', include_top=False,\n input_shape=(args.image_size, args.image_size, 3), pooling='avg')\n", (2612, 2721), False, 'from keras import applications\n'), ((3175, 3201), 'keras.layers.Dropout', 'Dropout', (['args.dropout_rate'], {}), '(args.dropout_rate)\n', (3182, 3201), False, 'from keras.layers import Dropout, Flatten, Dense, MaxPooling2D\n'), ((3217, 3268), 'keras.layers.Dense', 'Dense', (['args.number_of_classes'], {'activation': '"""softmax"""'}), "(args.number_of_classes, activation='softmax')\n", (3222, 3268), False, 'from keras.layers import Dropout, Flatten, Dense, MaxPooling2D\n'), ((4781, 4813), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'args.init_lr'}), '(lr=args.init_lr)\n', (4796, 4813), False, 'from keras import optimizers\n'), ((5868, 5891), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (5878, 5891), False, 'import os\n'), ((3123, 3131), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (3125, 3131), False, 'from keras.regularizers import l2\n'), ((3150, 3158), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (3152, 3158), False, 'from keras.regularizers import l2\n'), ((5909, 5937), 'os.path.join', 'os.path.join', (['folder_path', 'o'], {}), '(folder_path, o)\n', (5921, 5937), False, 'import os\n')] |
import sys
sys.path.append('../')
import config
import pymysql.cursors
import pandas as pd
import numpy as np
from scipy import io as scipyio
from tempfile import SpooledTemporaryFile
from scipy.sparse import vstack as vstack_sparse_matrices
# Function to reassemble the p matrix from the vectors
def reconstitute_vector(bytesblob):
f = SpooledTemporaryFile(max_size=1000000000)
f.write(bytesblob)
f.seek(0)
return scipyio.mmread(f)
def youtubelink(vidid):
return ('https://www.youtube.com/watch?v=' + vidid)
connection = pymysql.connect(host='localhost',
user='root',
password=config.MYSQL_SERVER_PASSWORD,
db='youtubeProjectDB',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
# https://stackoverflow.com/questions/612231/how-can-i-select-rows-with-maxcolumn-value-distinct-by-another-column-in-sql?rq=1
# Note - this is a very interesting query! never seen it before..
sql = """SELECT * FROM
(SELECT DISTINCT(videoId) AS v, videoTitle FROM search_api) A
INNER JOIN
(SELECT * FROM captions c
INNER JOIN(SELECT videoId AS InnerVideoId,
MAX(wordCount) AS MaxWordCount,
MAX(id) AS MaxId
FROM captions
WHERE tfidfVector IS NOT NULL
GROUP BY videoId) grouped_c
ON c.videoId = grouped_c.InnerVideoId
AND c.wordCount = grouped_c.MaxWordCount
AND c.id = grouped_c.MaxId) B
ON A.v = B.videoId;"""
cursor.execute(sql)
manyCaptions = cursor.fetchall()
videos_df = pd.read_sql(sql, connection)
connection.close()
# note that the other program which put the vectors there only did it on captions WHERE language like '%en%'
# for that reason this query does not contain language. It has instead WHERE tfidfVector IS NOT NULL
videos_df = videos_df.drop('v', 1)
videos_df['tfidfVector_NP'] = videos_df['tfidfVector'].apply(reconstitute_vector)
listOfSparseVectors = list(videos_df['tfidfVector_NP'].values.flatten())
p = vstack_sparse_matrices(listOfSparseVectors)
video_titles = list(videos_df['videoTitle'].values.flatten())
video_ids = list(videos_df['videoId'].values.flatten())
# Apply the transformation to the term document matrix to compute similarity between all pairs
pairwise_similarity = (p * p.T).A # In Scipy, .A transforms a sparse matrix to a dense one
# df9 = pd.DataFrame(pairwise_similarity, columns=video_ids, index=video_ids)
# s = pd.Series(video_titles, index=df9.index)
# df9 = pd.concat((s.rename('videoTitles'), df9), axis=1)
def nth_similar_tuple(n, ps):
title = (np.array(video_titles))[((-ps).argsort()[n])]
vid_id = (np.array(video_ids))[((-ps).argsort()[n])]
return (title, vid_id)
d = []
for a,b,c in zip(video_titles, video_ids, pairwise_similarity):
d.append({'a':(a,b),
'b': nth_similar_tuple(1,c),
'c': nth_similar_tuple(2,c),
'd': nth_similar_tuple(3,c)})
# takes about a minute to run through the 7000 unique rows.
similarity_df = pd.DataFrame(d)
similarity_df.columns = ['original', 'first_similar', 'second_similar', 'third_similar']
# split the tuples into two-level columns.
similarity_df = pd.concat(
[pd.DataFrame(x, columns=['video_title','youtube_id']) for x in similarity_df.values.T.tolist()],
axis=1,
keys=similarity_df.columns)
print ("Finished running, the Pandas DataFrame variable similarity_df should now be in scope.") | [
"scipy.io.mmread",
"tempfile.SpooledTemporaryFile",
"numpy.array",
"pandas.DataFrame",
"scipy.sparse.vstack",
"pandas.read_sql",
"sys.path.append"
] | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((2329, 2372), 'scipy.sparse.vstack', 'vstack_sparse_matrices', (['listOfSparseVectors'], {}), '(listOfSparseVectors)\n', (2351, 2372), True, 'from scipy.sparse import vstack as vstack_sparse_matrices\n'), ((3355, 3370), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (3367, 3370), True, 'import pandas as pd\n'), ((343, 384), 'tempfile.SpooledTemporaryFile', 'SpooledTemporaryFile', ([], {'max_size': '(1000000000)'}), '(max_size=1000000000)\n', (363, 384), False, 'from tempfile import SpooledTemporaryFile\n'), ((433, 450), 'scipy.io.mmread', 'scipyio.mmread', (['f'], {}), '(f)\n', (447, 450), True, 'from scipy import io as scipyio\n'), ((1848, 1876), 'pandas.read_sql', 'pd.read_sql', (['sql', 'connection'], {}), '(sql, connection)\n', (1859, 1876), True, 'import pandas as pd\n'), ((2914, 2936), 'numpy.array', 'np.array', (['video_titles'], {}), '(video_titles)\n', (2922, 2936), True, 'import numpy as np\n'), ((2974, 2993), 'numpy.array', 'np.array', (['video_ids'], {}), '(video_ids)\n', (2982, 2993), True, 'import numpy as np\n'), ((3535, 3589), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['video_title', 'youtube_id']"}), "(x, columns=['video_title', 'youtube_id'])\n", (3547, 3589), True, 'import pandas as pd\n')] |
from django.shortcuts import render, redirect , HttpResponseRedirect, get_object_or_404
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.forms import UserCreationForm, PasswordChangeForm
from django.views.generic import View, TemplateView, CreateView, UpdateView
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from core.forms import ClienteForm, EditaContaClienteForm
from core.models import Produto
from core.models import Categoria
def index(request):
contexto = {
"produtos":Produto.objects.all()
}
return render(request, "index.html", contexto)
def produto(request): #, slug):
#contexto = {
# 'produto': get_object_or_404(Produto, slug=slug) #verifica se a url existe, caso nao exista ele retorna erro 404
#}
template_name = 'produto.html'
return render(request, template_name)
def lista_produto(request):
pass
def categoria(request, slug):
categoria = Categoria.objects.get(slug=slug)
contexto = {
'categoria': categoria,
'produtos': Produto.objects.filter(categoria=categoria),
}
return render(request,'categoria.html', contexto)
def contato(request):
pass
def festa(request):
return render(request,"festa.html")
#Autenticação login
def login_cliente(request):
return render(request,"login.html")
def contato(request):
return render(request,"contato.html")
#Auntenticação Usuario
@login_required(login_url="entrar")
def page_user(request):
return render(request,'index.html')
# -----------------------------------------------//---------------------------------#
# pagina de cadastro
def registrar(request):
# Se dados forem passados via POST
if request.POST:
form = ClienteForm(request.POST)
if form.is_valid(): # se o formulario for valido
form.save() # cria um novo usuario a partir dos dados enviados
form.cleaner
else:
form = ClienteForm()
contexto = {
"form":form
}
return render(request, "registrar.html", contexto)
# -----------------------------------------------//---------------------------------#
#funcao para alterar conta
@login_required
def editarConta(request):
template_name = 'editarConta.html'
contexto = {}
if request.method == 'POST':
form = EditaContaClienteForm(request.POST, instance=request.user)
if form.is_valid():
form.save()
form = EditaContaClienteForm(instance=request.user)
contexto['success'] = True
else:
form = EditaContaClienteForm(instance=request.user)
contexto['form'] = form
return render(request, template_name, contexto)
# -----------------------------------------------//---------------------------------#
#funcao para alterar senha
@login_required
def editarSenha(request):
template_name = 'editarSenha.html'
context = {}
if request.method == 'POST':
form = PasswordChangeForm(data=request.POST, user=request.user)
if form.is_valid():
form.save()
context['success'] = True
else:
form = PasswordChangeForm(user=request.user)
context['form'] = form
return render(request, template_name, context)
# -----------------------------------------------//---------------------------------# | [
"django.shortcuts.render",
"core.forms.EditaContaClienteForm",
"django.contrib.auth.forms.PasswordChangeForm",
"core.forms.ClienteForm",
"core.models.Produto.objects.filter",
"core.models.Produto.objects.all",
"django.contrib.auth.decorators.login_required",
"core.models.Categoria.objects.get"
] | [((1530, 1564), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""entrar"""'}), "(login_url='entrar')\n", (1544, 1564), False, 'from django.contrib.auth.decorators import login_required, user_passes_test\n'), ((653, 692), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'contexto'], {}), "(request, 'index.html', contexto)\n", (659, 692), False, 'from django.shortcuts import render\n'), ((920, 950), 'django.shortcuts.render', 'render', (['request', 'template_name'], {}), '(request, template_name)\n', (926, 950), False, 'from django.shortcuts import render\n'), ((1038, 1070), 'core.models.Categoria.objects.get', 'Categoria.objects.get', ([], {'slug': 'slug'}), '(slug=slug)\n', (1059, 1070), False, 'from core.models import Categoria\n'), ((1213, 1256), 'django.shortcuts.render', 'render', (['request', '"""categoria.html"""', 'contexto'], {}), "(request, 'categoria.html', contexto)\n", (1219, 1256), False, 'from django.shortcuts import render\n'), ((1322, 1351), 'django.shortcuts.render', 'render', (['request', '"""festa.html"""'], {}), "(request, 'festa.html')\n", (1328, 1351), False, 'from django.shortcuts import render\n'), ((1412, 1441), 'django.shortcuts.render', 'render', (['request', '"""login.html"""'], {}), "(request, 'login.html')\n", (1418, 1441), False, 'from django.shortcuts import render\n'), ((1472, 1503), 'django.shortcuts.render', 'render', (['request', '"""contato.html"""'], {}), "(request, 'contato.html')\n", (1478, 1503), False, 'from django.shortcuts import render\n'), ((1597, 1626), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (1603, 1626), False, 'from django.shortcuts import render\n'), ((2117, 2160), 'django.shortcuts.render', 'render', (['request', '"""registrar.html"""', 'contexto'], {}), "(request, 'registrar.html', contexto)\n", (2123, 2160), False, 'from django.shortcuts import render\n'), ((2747, 2787), 'django.shortcuts.render', 'render', (['request', 'template_name', 'contexto'], {}), '(request, template_name, contexto)\n', (2753, 2787), False, 'from django.shortcuts import render\n'), ((3297, 3336), 'django.shortcuts.render', 'render', (['request', 'template_name', 'context'], {}), '(request, template_name, context)\n', (3303, 3336), False, 'from django.shortcuts import render\n'), ((620, 641), 'core.models.Produto.objects.all', 'Produto.objects.all', ([], {}), '()\n', (639, 641), False, 'from core.models import Produto\n'), ((1143, 1186), 'core.models.Produto.objects.filter', 'Produto.objects.filter', ([], {'categoria': 'categoria'}), '(categoria=categoria)\n', (1165, 1186), False, 'from core.models import Produto\n'), ((1841, 1866), 'core.forms.ClienteForm', 'ClienteForm', (['request.POST'], {}), '(request.POST)\n', (1852, 1866), False, 'from core.forms import ClienteForm, EditaContaClienteForm\n'), ((2049, 2062), 'core.forms.ClienteForm', 'ClienteForm', ([], {}), '()\n', (2060, 2062), False, 'from core.forms import ClienteForm, EditaContaClienteForm\n'), ((2424, 2482), 'core.forms.EditaContaClienteForm', 'EditaContaClienteForm', (['request.POST'], {'instance': 'request.user'}), '(request.POST, instance=request.user)\n', (2445, 2482), False, 'from core.forms import ClienteForm, EditaContaClienteForm\n'), ((2663, 2707), 'core.forms.EditaContaClienteForm', 'EditaContaClienteForm', ([], {'instance': 'request.user'}), '(instance=request.user)\n', (2684, 2707), False, 'from core.forms import ClienteForm, EditaContaClienteForm\n'), ((3049, 3105), 'django.contrib.auth.forms.PasswordChangeForm', 'PasswordChangeForm', ([], {'data': 'request.POST', 'user': 'request.user'}), '(data=request.POST, user=request.user)\n', (3067, 3105), False, 'from django.contrib.auth.forms import UserCreationForm, PasswordChangeForm\n'), ((3221, 3258), 'django.contrib.auth.forms.PasswordChangeForm', 'PasswordChangeForm', ([], {'user': 'request.user'}), '(user=request.user)\n', (3239, 3258), False, 'from django.contrib.auth.forms import UserCreationForm, PasswordChangeForm\n'), ((2554, 2598), 'core.forms.EditaContaClienteForm', 'EditaContaClienteForm', ([], {'instance': 'request.user'}), '(instance=request.user)\n', (2575, 2598), False, 'from core.forms import ClienteForm, EditaContaClienteForm\n')] |
from django.contrib.auth.models import User
from django.test import TestCase
from adminlte_log.models import AdminlteLogType, AdminlteLog
class AdminlteLogTest(TestCase):
def setUp(self):
AdminlteLogType.objects.create(name='test', code='test')
self.user = User.objects.create_user(username='bohan')
def test_log(self):
log = AdminlteLog.info('test', user=self.user, sort_desc='This is a log', foo='bar')
self.assertEqual(log.id, 1)
| [
"adminlte_log.models.AdminlteLogType.objects.create",
"django.contrib.auth.models.User.objects.create_user",
"adminlte_log.models.AdminlteLog.info"
] | [((204, 260), 'adminlte_log.models.AdminlteLogType.objects.create', 'AdminlteLogType.objects.create', ([], {'name': '"""test"""', 'code': '"""test"""'}), "(name='test', code='test')\n", (234, 260), False, 'from adminlte_log.models import AdminlteLogType, AdminlteLog\n'), ((281, 323), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""bohan"""'}), "(username='bohan')\n", (305, 323), False, 'from django.contrib.auth.models import User\n'), ((363, 441), 'adminlte_log.models.AdminlteLog.info', 'AdminlteLog.info', (['"""test"""'], {'user': 'self.user', 'sort_desc': '"""This is a log"""', 'foo': '"""bar"""'}), "('test', user=self.user, sort_desc='This is a log', foo='bar')\n", (379, 441), False, 'from adminlte_log.models import AdminlteLogType, AdminlteLog\n')] |
from __future__ import print_function
import math
import tensorflow as tf
from sklearn.manifold import TSNE
from word2vec_input import *
from word2vec_plot import *
dataset_path = 'dataset/'
dataset = 'text8.zip'
vocabulary_size = 50000
batch_size = 128
embedding_size = 128
skip_window = 1
num_skips = 2
num_sampled = 64
num_steps = 100001
num_points = 400
def run(param):
# Building my graph
graph = tf.Graph()
with graph.as_default():
# Input data
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# Variables
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Model
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
# Loss
loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed, train_labels, num_sampled, vocabulary_size))
# Optimizer
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Normalizing the final embeddings
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
# Creating saver to write embeddings
saver = tf.train.Saver()
# Getting dataset
words = read_data(dataset_path, dataset)
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
if param == 'training':
# Training word embeddings
with tf.Session(graph=graph) as sess:
# Initializing all variables
init = tf.initialize_all_variables()
sess.run(init)
print('Graph Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_data, batch_labels = generate_batch(data, batch_size, num_skips, skip_window)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, l = sess.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print('Average loss at step %d: %f' % (step, average_loss))
saver.save(sess, 'dataset/embeddings')
else:
# Visualizing word embeddings
with tf.Session(graph=graph) as sess:
saver.restore(sess, 'dataset/embeddings')
print('Embeddings restored')
final_embeddings = sess.run(normalized_embeddings)
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
two_d_embeddings = tsne.fit_transform(final_embeddings[1:num_points+1, :])
words = [reverse_dictionary[i] for i in xrange(1, num_points+1)]
plot(two_d_embeddings, words)
plt.show()
if __name__ == '__main__':
run('training')
run('visualization')
| [
"tensorflow.Graph",
"tensorflow.nn.embedding_lookup",
"tensorflow.initialize_all_variables",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"tensorflow.Session",
"math.sqrt",
"sklearn.manifold.TSNE",
"tensorflow.random_uniform",
"tensorflow.train.AdagradOptimizer",
"tensorflow.square",
"t... | [((407, 417), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (415, 417), True, 'import tensorflow as tf\n'), ((478, 522), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[batch_size]'}), '(tf.int32, shape=[batch_size])\n', (492, 522), True, 'import tensorflow as tf\n'), ((540, 587), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[batch_size, 1]'}), '(tf.int32, shape=[batch_size, 1])\n', (554, 587), True, 'import tensorflow as tf\n'), ((902, 951), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'train_dataset'], {}), '(embeddings, train_dataset)\n', (924, 951), True, 'import tensorflow as tf\n'), ((1379, 1395), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1393, 1395), True, 'import tensorflow as tf\n'), ((630, 693), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocabulary_size, embedding_size]', '(-1.0)', '(1.0)'], {}), '([vocabulary_size, embedding_size], -1.0, 1.0)\n', (647, 693), True, 'import tensorflow as tf\n'), ((852, 879), 'tensorflow.zeros', 'tf.zeros', (['[vocabulary_size]'], {}), '([vocabulary_size])\n', (860, 879), True, 'import tensorflow as tf\n'), ((986, 1100), 'tensorflow.nn.sampled_softmax_loss', 'tf.nn.sampled_softmax_loss', (['softmax_weights', 'softmax_biases', 'embed', 'train_labels', 'num_sampled', 'vocabulary_size'], {}), '(softmax_weights, softmax_biases, embed,\n train_labels, num_sampled, vocabulary_size)\n', (1012, 1100), True, 'import tensorflow as tf\n'), ((1605, 1628), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (1615, 1628), True, 'import tensorflow as tf\n'), ((1680, 1709), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1707, 1709), True, 'import tensorflow as tf\n'), ((2285, 2308), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (2295, 2308), True, 'import tensorflow as tf\n'), ((2461, 2521), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(30)', 'n_components': '(2)', 'init': '"""pca"""', 'n_iter': '(5000)'}), "(perplexity=30, n_components=2, init='pca', n_iter=5000)\n", (2465, 2521), False, 'from sklearn.manifold import TSNE\n'), ((1127, 1157), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['(1.0)'], {}), '(1.0)\n', (1152, 1157), True, 'import tensorflow as tf\n'), ((1242, 1263), 'tensorflow.square', 'tf.square', (['embeddings'], {}), '(embeddings)\n', (1251, 1263), True, 'import tensorflow as tf\n'), ((793, 818), 'math.sqrt', 'math.sqrt', (['embedding_size'], {}), '(embedding_size)\n', (802, 818), False, 'import math\n')] |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
print("Python Version:", torch.__version__)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(model, device, train_dataloader, optimizer, epoch):
model.train()
for idx, (data, target) in enumerate(train_dataloader):
data, target = data.to(device), target.to(device)
pred = model(data)
loss = F.nll_loss(pred, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 100 == 0:
print("Train Epoch: {}, iteration: {}, Loss: {}".format(
epoch, idx, loss.item()))
def test(model, device, test_dataloader):
model.eval()
total_loss = 0.
correct = 0.
with torch.no_grad():
for idx, (data, target) in enumerate(test_dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
total_loss += F.nll_loss(output, target, reduction="sum").item()
pred = output.argmax(dim=1)
correct += pred.eq(target.view_as(pred)).sum().item()
total_loss /= len(test_dataloader.dataset)
acc = correct / len(test_dataloader.dataset) * 100
print("Test loss: {}, Accuracy: {}".format(total_loss, acc))
mnist_data = datasets.MNIST("./mnist_data", train=True, download=True,
transform = transforms.Compose([
transforms.ToTensor(),
]))
# print(mnist_data)
# print(mnist_data[233][0].shape)
data = [d[0].data.cpu().numpy() for d in mnist_data]
np.mean(data)
np.std(data)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 32
train_dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST("./fashion_mnist_data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True
)
test_dataloader = torch.utils.data.DataLoader(
datasets.FashionMNIST("./fashion_mnist_data", train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, num_workers=1, pin_memory=True
)
lr = 0.01
momentum = 0.5
model = Net().to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
num_epochs = 2
for epoch in range(num_epochs):
train(model, device, train_dataloader, optimizer, epoch)
test(model, device, test_dataloader)
torch.save(model.state_dict(), "fashion_mnist_cnn.pt")
| [
"numpy.mean",
"torch.nn.functional.nll_loss",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.Linear",
"numpy.std",
"torch.nn.functional.log_softmax",
"torch.no_grad",
"torch.nn.functional.max_pool2d",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] | [((2190, 2203), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2197, 2203), True, 'import numpy as np\n'), ((2204, 2216), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2210, 2216), True, 'import numpy as np\n'), ((308, 330), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(20)', '(5)', '(1)'], {}), '(1, 20, 5, 1)\n', (317, 330), True, 'import torch.nn as nn\n'), ((352, 375), 'torch.nn.Conv2d', 'nn.Conv2d', (['(20)', '(50)', '(5)', '(1)'], {}), '(20, 50, 5, 1)\n', (361, 375), True, 'import torch.nn as nn\n'), ((395, 421), 'torch.nn.Linear', 'nn.Linear', (['(4 * 4 * 50)', '(500)'], {}), '(4 * 4 * 50, 500)\n', (404, 421), True, 'import torch.nn as nn\n'), ((437, 455), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(10)'], {}), '(500, 10)\n', (446, 455), True, 'import torch.nn as nn\n'), ((529, 550), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (541, 550), True, 'import torch.nn.functional as F\n'), ((597, 618), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x', '(2)', '(2)'], {}), '(x, 2, 2)\n', (609, 618), True, 'import torch.nn.functional as F\n'), ((721, 744), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (734, 744), True, 'import torch.nn.functional as F\n'), ((987, 1011), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['pred', 'target'], {}), '(pred, target)\n', (997, 1011), True, 'import torch.nn.functional as F\n'), ((1336, 1351), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1349, 1351), False, 'import torch\n'), ((2250, 2275), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2273, 2275), False, 'import torch\n'), ((2027, 2048), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2046, 2048), False, 'from torchvision import datasets, transforms\n'), ((1537, 1580), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (1547, 1580), True, 'import torch.nn.functional as F\n'), ((2477, 2498), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2496, 2498), False, 'from torchvision import datasets, transforms\n'), ((2508, 2550), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (2528, 2550), False, 'from torchvision import datasets, transforms\n'), ((2810, 2831), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2829, 2831), False, 'from torchvision import datasets, transforms\n'), ((2841, 2883), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (2861, 2883), False, 'from torchvision import datasets, transforms\n')] |
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
This is Allie's modeling API to help build classification or regression models.
All you need to do is run the model.py script and you will be guided through the
modeling process.
Usage: python3 model.py
Alternative CLI Usage: python3 model.py audio 2 c gender males females
- audio = audio file type
- 2 = 2 classes
- c = classification (r for regression)
- gender = common name of model
- male = first class
- female = second class [via N number of classes]
For addditional documentation, check out
https://github.com/jim-schwoebel/allie/tree/master/training
'''
###############################################################
## IMPORT STATEMENTS ##
###############################################################
import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform
from pyfiglet import Figlet
f=Figlet(font='doh')
print(f.renderText('Allie'))
f=Figlet(font='doom')
import pandas as pd
import matplotlib.pyplot as plt
###############################################################
## CREATE HELPER FUNCTIONS ##
###############################################################
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_folders(listdir):
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
return folders
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
counts={'audio': filetypes.count('audio'),
'image': filetypes.count('image'),
'text': filetypes.count('text'),
'video': filetypes.count('video'),
'csv': filetypes.count('csv')}
# get back the type of folder (main file type)
countlist=list(counts)
countvalues=list(counts.values())
maxvalue=max(countvalues)
maxind=countvalues.index(maxvalue)
return countlist[maxind]
def pull_element(mylist, element):
pull_=list()
for i in range(len(mylist)):
pull_.append(mylist[i][element])
return pull_
def convert_csv(X_train, y_train, labels, mtype, classes):
'''
Take in a array of features and labels and output a
pandas DataFrame format for easy .CSV expor and for model training.
This is important to make sure all machine learning training sessions
use the same dataset (so they can be benchmarked appropriately).
'''
# from pandas merging guide https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
feature_list=labels
data=list()
for i in tqdm(range(len(X_train)), desc='converting csv...'):
newlist=list()
for j in range(len(X_train[i])):
newlist.append([X_train[i][j]])
temp=pd.DataFrame(dict(zip(feature_list,newlist)), index=[i])
# print(temp)
data.append(temp)
data = pd.concat(data)
if mtype == 'c':
data['class_']=y_train
elif mtype == 'r':
if len(classes) == 1:
data[classes[0]]=y_train
else:
for j in range(len(classes)):
newy=pull_element(y_train, j)
data[classes[j]]=newy
data=pd.DataFrame(data, columns = list(data))
# print this because in pretty much every case you will write the .CSV file afterwards
print('writing csv file...')
return data
def device_info():
cpu_data={'memory':psutil.virtual_memory(),
'cpu percent':psutil.cpu_percent(),
'cpu times':psutil.cpu_times(),
'cpu count':psutil.cpu_count(),
'cpu stats':psutil.cpu_stats(),
'cpu swap':psutil.swap_memory(),
'partitions':psutil.disk_partitions(),
'disk usage':psutil.disk_usage('/'),
'disk io counters':psutil.disk_io_counters(),
'battery':psutil.sensors_battery(),
'boot time':psutil.boot_time(),
}
data={'time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'timezone':time.tzname,
'operating system': platform.system(),
'os release':platform.release(),
'os version':platform.version(),
'cpu data':cpu_data,
'space left': list(psutil.disk_usage('/'))[2]/1000000000}
return data
def get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
metrics_=dict()
y_true=y_test
if default_training_script not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(X_test)
elif default_training_script=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif default_training_script == 'autogluon':
from autogluon import TabularPrediction as task
test_data=test_data.drop(labels=['class'],axis=1)
y_pred=clf.predict(test_data)
elif default_training_script == 'autokeras':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'autopytorch':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif default_training_script == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif default_training_script == 'devol':
X_test=X_test.reshape(X_test.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(X_test).flatten()
elif default_training_script=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(X_test).flatten()
elif mtype == 'r':
y_pred=clf.predict(X_test).flatten()
elif default_training_script=='neuraxle':
y_pred=clf.transform(X_test)
elif default_training_script=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
print(y_pred)
# get classification or regression metrics
if mtype in ['c', 'classification']:
# now get all classification metrics
mtype='classification'
metrics_['accuracy']=metrics.accuracy_score(y_true, y_pred)
metrics_['balanced_accuracy']=metrics.balanced_accuracy_score(y_true, y_pred)
try:
metrics_['precision']=metrics.precision_score(y_true, y_pred)
except:
metrics_['precision']='n/a'
try:
metrics_['recall']=metrics.recall_score(y_true, y_pred)
except:
metrics_['recall']='n/a'
try:
metrics_['f1_score']=metrics.f1_score (y_true, y_pred, pos_label=1)
except:
metrics_['f1_score']='n/a'
try:
metrics_['f1_micro']=metrics.f1_score(y_true, y_pred, average='micro')
except:
metrics_['f1_micro']='n/a'
try:
metrics_['f1_macro']=metrics.f1_score(y_true, y_pred, average='macro')
except:
metrics_['f1_macro']='n/a'
try:
metrics_['roc_auc']=metrics.roc_auc_score(y_true, y_pred)
except:
metrics_['roc_auc']='n/a'
try:
metrics_['roc_auc_micro']=metrics.roc_auc_score(y_true, y_pred, average='micro')
except:
metrics_['roc_auc_micro']='n/a'
try:
metrics_['roc_auc_macro']=metrics.roc_auc_score(y_true, y_pred, average='macro')
except:
metrics_['roc_auc_micro']='n/a'
metrics_['confusion_matrix']=metrics.confusion_matrix(y_true, y_pred).tolist()
metrics_['classification_report']=metrics.classification_report(y_true, y_pred, target_names=classes)
plot_confusion_matrix(np.array(metrics_['confusion_matrix']), classes)
try:
# predict_proba only works for or log loss and modified Huber loss.
# https://stackoverflow.com/questions/47788981/sgdclassifier-with-predict-proba
try:
y_probas = clf.predict_proba(X_test)[:, 1]
except:
try:
y_probas = clf.decision_function(X_test)[:, 1]
except:
print('error making y_probas')
plot_roc_curve(y_test, [y_probas], [default_training_script])
except:
print('error plotting ROC curve')
print('predict_proba only works for or log loss and modified Huber loss.')
elif mtype in ['r', 'regression']:
# now get all regression metrics
mtype='regression'
metrics_['mean_absolute_error'] = metrics.mean_absolute_error(y_true, y_pred)
metrics_['mean_squared_error'] = metrics.mean_squared_error(y_true, y_pred)
metrics_['median_absolute_error'] = metrics.median_absolute_error(y_true, y_pred)
metrics_['r2_score'] = metrics.r2_score(y_true, y_pred)
plot_regressor(clf, classes, X_test, y_test)
data={'sample type': problemtype,
'training time': time.time()-model_start_time,
'created date': str(datetime.datetime.now()),
'device info': device_info(),
'session id': model_session,
'classes': classes,
'problem type': mtype,
'model name': modelname,
'model type': default_training_script,
'metrics': metrics_,
'settings': settings,
'transformer name': transformer_name,
'training data': created_csv_files,
'sample X_test': X_test[0].tolist(),
'sample y_test': y_test[0].tolist()}
if modelname.endswith('.pickle'):
jsonfilename=modelname[0:-7]+'.json'
elif modelname.endswith('.h5'):
jsonfilename=modelname[0:-3]+'.json'
else:
jsonfilename=modelname+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# also output requirements.txt for reproducibilty purposes
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir)
os.system('pip3 freeze -> requirements.txt')
# FUTURE - add in optional copy of cleaning, augmentation, and feature libraries contextually
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/features', basedir+'/features')
# except:
# print('error copying features')
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/cleaning', basedir+'/cleaning')
# except:
# print('error copying cleaning techniques')
# shutil.copytree(prev_dir(prev_dir(basedir))+'/augmentation', basedir+'/augmentation')
# except:
# print('error copying augmentation techniques')
os.chdir(curdir)
def plot_roc_curve(y_test, probs, clf_names):
'''
This function plots an ROC curve with the appropriate
list of classifiers.
'''
cycol = itertools.cycle('bgrcmyk')
for i in range(len(probs)):
print(y_test)
print(probs[i])
try:
fper, tper, thresholds = roc_curve(y_test, probs[i])
plt.plot(fper, tper, color=next(cycol), label=clf_names[i]+' = %s'%(str(round(metrics.auc(fper, tper), 3))))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
except:
print('passing %s'%(clf_names[i]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.tight_layout()
plt.savefig('roc_curve.png')
plt.close()
def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("\nNormalized confusion matrix")
else:
print('\nConfusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('confusion_matrix.png')
plt.close()
def plot_regressor(regressor, classes, X_test, y_test):
'''
plot regression models with a bar chart.
'''
try:
y_pred = regressor.predict(X_test)
# plot the first 25 records
if len(classes) == 2:
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
# plot a straight line on the data
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.tight_layout()
plt.savefig('straight_line_predictions.png')
plt.close()
else:
# multi-dimensional generalization
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
except:
print('error plotting regressor')
def pursue_modeling(mtype, model_dir, problemtype, default_training_script,common_name_model):
'''
simple script to decide whether or not to continue modeling the data.
'''
try:
model_listdir=os.listdir(model_dir+'/'+problemtype+'_models')
except:
model_listdir=list()
# note that these are tpot definitions
model_exists=False
if default_training_script == 'tpot':
if common_name_model + '_classifier' in model_listdir and mtype == 'c':
model_exists=True
elif common_name_model +'_regression' in model_listdir and mtype == 'r':
model_exists=True
else:
# only look for naming conflicts with TPOT for now, can expand into the future.
model_exists=False
return model_exists, model_listdir
def get_csvfiles(listdir):
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
return csvfiles
###############################################################
## LOADING SETTINGS ##
###############################################################
# load the default feature set
cur_dir = os.getcwd()
prevdir= prev_dir(cur_dir)
sys.path.append(prevdir+'/train_dir')
settings=json.load(open(prevdir+'/settings.json'))
# get all the default feature arrays
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
create_csv=settings['create_csv']
# prepare training and testing data (should have been already featurized) - # of classes/folders
os.chdir(prevdir+'/train_dir')
data_dir=os.getcwd()
listdir=os.listdir()
folders=get_folders(listdir)
csvfiles=get_csvfiles(listdir)
# now assess folders by content type
data=dict()
for i in range(len(folders)):
os.chdir(folders[i])
listdir=os.listdir()
filetype=classifyfolder(listdir)
data[folders[i]]=filetype
os.chdir(data_dir)
###############################################################
## INITIALIZE CLASSES ##
###############################################################
# get all information from sys.argv, and if not,
# go through asking user for the proper parameters
try:
problemtype=sys.argv[1]
mtype=sys.argv[3]
if mtype == 'c':
classnum=sys.argv[2]
common_name=sys.argv[4]
classes=list()
for i in range(int(classnum)):
classes.append(sys.argv[i+5])
else:
classnum=1
problemtype='csv'
mtype=sys.argv[1]
csvfile=sys.argv[2]
classes=[sys.argv[3]]
common_name=csvfile[0:-4]
except:
# now ask user what type of problem they are trying to solve
mtype=input('is this a classification (c) or regression (r) problem? \n')
while mtype not in ['c','r']:
print('input not recognized...')
mtype=input('is this a classification (c) or regression (r) problem? \n')
if mtype == 'c':
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
while problemtype not in ['1','2','3','4','5']:
print('answer not recognized...')
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
if problemtype=='1':
problemtype='audio'
elif problemtype=='2':
problemtype='text'
elif problemtype=='3':
problemtype='image'
elif problemtype=='4':
problemtype='video'
elif problemtype=='5':
problemtype='csv'
if problemtype != 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
count=0
availableclasses=list()
for i in range(len(folders)):
if data[folders[i]]==problemtype:
availableclasses.append(folders[i])
count=count+1
classnum=input('how many classes would you like to model? (%s available) \n'%(str(count)))
print('these are the available classes: ')
print(availableclasses)
# get all if all (good for many classes)
classes=list()
if classnum=='all':
for i in range(len(availableclasses)):
classes.append(availableclasses[i])
else:
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
elif problemtype == 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
print('csv file options are: %s \n'%(csvfiles))
csvfile=input('which csvfile would you like to use for classification? \n')
g=pd.read_csv(csvfile)
columns=list(g)
print('potential targets include: %s'%(columns))
target=input('what target would you like to use? \n')
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
elif mtype =='r':
# for regression problems we need a target column to predict / classes from a .CSV
problemtype='csv'
# assumes the .CSV file is in the train dir
os.chdir(prevdir+'/train_dir')
listdir=os.listdir()
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
while csvfile not in csvfiles:
print('answer not recognized...')
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
# the available classes are only the numeric columns from the spreadsheet
data = pd.read_csv(csvfile)
columns = list(data)
availableclasses=list()
for i in range(len(columns)):
# look at filetype extension in each column
coldata=data[columns[i]]
sampletypes=list()
for j in range(len(coldata)):
try:
values=float(coldata[j])
sampletypes.append('numerical')
except:
if coldata[j].endswith('.wav'):
sampletypes.append('audio')
elif coldata[j].endswith('.txt'):
sampletypes.append('text')
elif coldata[j].endswith('.png'):
sampletypes.append('image')
elif coldata[j].endswith('.mp4'):
sampletypes.append('video')
else:
sampletypes.append('other')
coltype=most_common(sampletypes)
# correct the other category if needed
if coltype == 'other':
# if coltype.endswith('.csv'):
# coltype='csv'
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
# if less than 5 unique answers then we can interpret this as text input
coltype='typedtext'
if coltype == 'numerical':
availableclasses.append(columns[i])
if len(availableclasses) > 0:
classnum=input('how many classes would you like to model? (%s available) \n'%(str(len(availableclasses))))
print('these are the available classes: %s'%(str(availableclasses)))
classes=list()
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
else:
print('no classes available... ending session')
sys.exit()
common_name=input('what is the 1-word common name for the problem you are working on? (e.g. gender for male/female classification) \n')
###############################################################
## UPGRADE MODULES / LOAD MODULES ##
###############################################################
print('-----------------------------------')
print(' LOADING MODULES ')
print('-----------------------------------')
# upgrade to have the proper scikit-learn version later
os.chdir(cur_dir)
os.system('python3 upgrade.py')
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_curve
###############################################################
## CLEAN THE DATA ##
###############################################################
clean_data=settings['clean_data']
clean_dir=prevdir+'/cleaning'
if clean_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('CLEANING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# clean audio via default_audio_cleaners
os.chdir(clean_dir+'/audio_cleaning')
elif problemtype == 'text':
# clean text via default_text_cleaners
os.chdir(clean_dir+'/text_cleaning')
elif problemtype == 'image':
# clean images via default_image_cleaners
os.chdir(clean_dir+'/image_cleaning')
elif problemtype == 'video':
# clean video via default_video_cleaners
os.chdir(clean_dir+'/video_cleaning')
elif problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
elif clean_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## AUGMENT THE DATA ##
###############################################################
augment_data=settings['augment_data']
augment_dir=prevdir+'/augmentation'
if augment_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('AUGMENTING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# augment audio via default_audio_augmenters
os.chdir(augment_dir+'/audio_augmentation')
elif problemtype == 'text':
# augment text via default_text_augmenters
os.chdir(augment_dir+'/text_augmentation')
elif problemtype == 'image':
# augment images via default_image_augmenters
os.chdir(augment_dir+'/image_augmentation')
elif problemtype == 'video':
# augment video via default_video_augmenters
os.chdir(augment_dir+'/video_augmentation')
elif problemtype == 'csv':
# augment .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
elif augment_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# featurize .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## FEATURIZE FILES ##
###############################################################
# now featurize each class (in proper folder)
if mtype == 'c':
data={}
print('-----------------------------------')
print(f.renderText('FEATURIZING DATA'))
print('-----------------------------------')
if problemtype == 'csv':
# csv features should have already been defined
# need to separate into number of unique classes
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
csv_feature_labels=list(csv_features)
classes=list(set(list(csv_labels)))
for i in range(len(classes)):
class_type = classes[i]
feature_list=list()
label_list=list()
for i in range(len(csv_features)):
if csv_labels[i] == class_type:
feature_list.append(list(csv_features.iloc[i,:]))
label_list.append(csv_feature_labels)
data[class_type]=feature_list
else:
#
for i in range(len(classes)):
class_type=classes[i]
if problemtype == 'audio':
# featurize audio
os.chdir(prevdir+'/features/audio_features')
default_features=default_audio_features
elif problemtype == 'text':
# featurize text
os.chdir(prevdir+'/features/text_features')
default_features=default_text_features
elif problemtype == 'image':
# featurize images
os.chdir(prevdir+'/features/image_features')
default_features=default_image_features
elif problemtype == 'video':
# featurize video
os.chdir(prevdir+'/features/video_features')
default_features=default_video_features
print('-----------------------------------')
print(' FEATURIZING %s'%(classes[i].upper()))
print('-----------------------------------')
os.system('python3 featurize.py "%s"'%(data_dir+'/'+classes[i]))
os.chdir(data_dir+'/'+classes[i])
# load audio features
listdir=os.listdir()
feature_list=list()
label_list=list()
for j in range(len(listdir)):
if listdir[j][-5:]=='.json':
try:
g=json.load(open(listdir[j]))
# consolidate all features into one array (if featurizing with multiple featurizers)
default_feature=list()
default_label=list()
for k in range(len(default_features)):
default_feature=default_feature+g['features'][problemtype][default_features[k]]['features']
default_label=default_label+g['features'][problemtype][default_features[k]]['labels']
feature_list.append(default_feature)
label_list.append(default_label)
except:
print('ERROR - skipping ' + listdir[j])
data[class_type]=feature_list
elif mtype == 'r':
# featurize .CSV
os.chdir(prevdir+'/features/csv_features')
output_file=str(uuid.uuid1())+'.csv'
os.system('python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"'%(prevdir+'/train_dir/'+csvfile, prevdir+'/train_dir/'+output_file, classes[0]))
csvfile=output_file
default_features=['csv_regression']
###############################################################
## GENERATE TRAINING DATA ##
###############################################################
print('-----------------------------------')
print(f.renderText('CREATING TRAINING DATA'))
print('-----------------------------------')
# perform class balance such that both classes have the same number
# of members (true by default, but can also be false)
os.chdir(prevdir+'/training/')
model_dir=prevdir+'/models'
balance=settings['balance_data']
remove_outliers=settings['remove_outliers']
outlier_types=settings['default_outlier_detector']
if mtype == 'c':
if problemtype != 'csv':
jsonfile=''
for i in range(len(classes)):
if i==0:
jsonfile=classes[i]
else:
jsonfile=jsonfile+'_'+classes[i]
jsonfile=jsonfile+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_dir=prevdir+'/train_dir/'+classes[0]
os.chdir(labels_dir)
listdir=os.listdir()
features_file=''
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
features_file=listdir[i]
labels_=list()
for i in range(len(default_features)):
tlabel=json.load(open(features_file))['features'][problemtype][default_features[i]]['labels']
labels_=labels_+tlabel
elif problemtype == 'csv':
# format data appropriately
jsonfile=target+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# load features file and get feature labels by loading in classes
labels_=csv_feature_labels
elif mtype == 'r':
regression_data=pd.read_csv(prevdir+'/train_dir/'+csvfile)
print(csvfile)
# get features and labels
features_=regression_data.drop(columns=classes, axis=1)
labels_=list(features_)
labels_csv=regression_data.drop(columns=list(features_), axis=1)
# iterate through each column and make into proper features and labels
features=list()
labels=list()
# testing
# print(len(features_))
# print(len(labels_))
# print(features_)
# print(labels_)
# print(features_.iloc[0,:])
# print(labels_.iloc[0,:])
# get features and labels
for i in range(len(features_)):
features.append(list(features_.iloc[i,:]))
labels.append(list(labels_csv.iloc[i,:]))
# convert to name alldata just to be consistent
alldata=features
# print(alldata[0])
# print(labels[0])
# print(labels_)
os.chdir(model_dir)
# get the split from the settings.json
try:
test_size=settings['test_size']
except:
test_size=0.25
# error checking around lengths of arrays and deleting as necessary
lengths=list()
for i in range(len(alldata)):
lengths.append(len(alldata[i]))
# CLEAN IF DIMENSIONS DO NOT MATCH!!
maxval=max(lengths)
minval=min(lengths)
delete_ind=list()
inds=list()
alldata=np.array(alldata)
labels=np.array(labels)
if maxval != minval:
if lengths.count(maxval) > lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been removed
if lengths[i] == minval:
delete_ind.append(i)
elif lengths.count(maxval) < lengths.count(minval):
for i in range(len(lengths)):
# this means that additional column has been added
if lengths[i] == maxval:
delete_ind.append(i)
print('DELETING THESE INDICES: %s'%(str(delete_ind)))
print(alldata.shape)
print(labels.shape)
alldata=np.delete(alldata, tuple(delete_ind), axis=0)
labels=np.delete(labels, tuple(delete_ind))
print(alldata.shape)
print(labels.shape)
# # now see if any element in the array is a NaN and do not include if so in alldata or labels
# for i in range(len(alldata)):
# try:
# array_has_nan = list(np.isnan(np.array(alldata[i]))).count(True)
# array_has_string=list(np.char.isnumeric(np.array(alldata[i]))).count(False)
# except:
# array_has_string=1
# if array_has_nan > 0 or array_has_string > 0:
# inds.append(i)
# print(alldata[i])
# if len(inds) > 0:
# print('DELETING THESE INDICES: %s'%(str(inds)))
# alldata=np.delete(alldata, tuple(inds))
# labels=np.delete(labels, tuple(inds))
# REMOVE OUTLIERS IF SETTING IS TRUE
alldata=np.array(alldata)
labels=np.array(labels)
if remove_outliers==True:
print('-----------------------------------')
print(' REMOVING OUTLIERS')
print('-----------------------------------')
for i in range(len(outlier_types)):
outlier_type=outlier_types[i]
if outlier_type =='isolationforest':
from sklearn.ensemble import IsolationForest
clf = IsolationForest(random_state=0).fit(alldata)
y_pred = clf.predict(alldata)
inlier_ind=list(np.where(y_pred==1))
outlier_ind=list(np.where(y_pred==-1))
y_pred = y_pred.tolist()
print(type(y_pred))
print(type(y_pred[0]))
n_inliers = y_pred.count(1)
n_outliers = y_pred.count(-1)
print(n_inliers)
print(n_outliers)
# shape before
print(alldata.shape)
print(labels.shape)
# delete outliers
alldata=np.delete(alldata, tuple(outlier_ind), axis=0)
labels=np.delete(labels, tuple(outlier_ind))
print(alldata.shape)
print(labels.shape)
elif outlier_type=='zscore':
os.system('pip3 install statsmodels==0.11.1')
from scipy import stats
from statsmodels.formula.api import ols
# https://towardsdatascience.com/ways-to-detect-and-remove-the-outliers-404d16608dba
z = np.abs(stats.zscore(alldata))
# print(z)
threshold = 3
inds=list(set(np.where(z>threshold)[0]))
print(len(inds))
print(tuple(inds))
print(alldata.shape)
print('-->')
alldata = np.delete(alldata, tuple(inds), axis=0)
print(alldata.shape)
labels = np.delete(labels, tuple(inds))
print(len(alldata))
print(len(labels))
# rebalance data to all be the same length
newlabels=list(labels)
outlier_class=list()
for i in range(len(classes)):
outlier_class.append(newlabels.count(i))
lengths=np.array(outlier_class)
minlength=np.amin(outlier_class)
# now load all the classes
for i in range(len(classes)):
# only balance if specified in settings
if balance==True:
count2=newlabels.count(i)
while count2 > minlength:
count2=newlabels.count(i)
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(count2-minlength)))
ind=list(labels).index(i)
alldata=np.delete(alldata, tuple([ind]), axis=0)
labels=np.delete(labels, tuple([ind]))
newlabels=list(labels)
alldata=list(alldata)
labels=list(labels)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert everything to numpy arrays (for testing later)
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# create list of created csv files
created_csv_files=list()
# create training and testing datasets and save to a .CSV file for archive purposes
# this ensures that all machine learning training methods use the same training data
basefile=common_name
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all.csv'.upper())
if basefile+'_all.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all.csv',index=False)
created_csv_files.append(basefile+'_all.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all.csv'))
try:
print(basefile+'_train.csv'.upper())
if basefile+'_train.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train.csv',index=False)
created_csv_files.append(basefile+'_train.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train.csv'))
try:
print(basefile+'_test.csv'.upper())
if basefile+'_test.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test.csv',index=False)
created_csv_files.append(basefile+'_test.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test.csv'))
############################################################
## DATA TRANSFORMATION ##
############################################################
'''
Scale features via scalers, dimensionality reduction techniques,
and feature selection strategies per the settings.json document.
'''
preprocess_dir=prevdir+'/preprocessing'
os.chdir(preprocess_dir)
# get all the important settings for the transformations
scale_features=settings['scale_features']
reduce_dimensions=settings['reduce_dimensions']
select_features=settings['select_features']
default_scalers=settings['default_scaler']
default_reducers=settings['default_dimensionality_reducer']
default_selectors=settings['default_feature_selector']
# get command for terminal
transform_command=''
if problemtype == 'csv' and mtype == 'c':
transform_command=transform_command+' "'+'Class'+'"'
else:
for i in range(len(classes)):
transform_command=transform_command+' "'+classes[i]+'"'
# get filename / create a unique file name
if mtype=='r':
t_filename='r_'+common_name
elif mtype=='c':
t_filename='c_'+common_name
# only add names in if True
if scale_features == True:
for i in range(len(default_scalers)):
t_filename=t_filename+'_'+default_scalers[i]
if reduce_dimensions == True:
for i in range(len(default_reducers)):
t_filename=t_filename+'_'+default_reducers[i]
if select_features == True:
for i in range(len(default_selectors)):
t_filename=t_filename+'_'+default_selectors[i]
transform_file=t_filename+'.pickle'
if scale_features == True or reduce_dimensions == True or select_features == True:
print('----------------------------------')
print(f.renderText('TRANSFORMING DATA'))
print('----------------------------------')
# go to proper transformer directory
try:
os.chdir(problemtype+'_transformer')
except:
os.mkdir(problemtype+'_transformer')
os.chdir(problemtype+'_transformer')
# train transformer if it doesn't already exist
os.system('pip3 install scikit-learn==0.22.2.post1')
if transform_file in os.listdir():
# remove file if in listdir to avoid conflicts with naming
os.remove(transform_file)
print('making transformer...')
alldata=np.asarray(alldata)
labels=np.asarray(labels)
os.chdir(preprocess_dir)
if mtype == 'c':
print('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.system('python3 transform.py "%s" "%s" "%s" %s'%(problemtype, 'c', common_name, transform_command))
os.chdir(problemtype+'_transformer')
print(transform_file)
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(np.array(alldata))
elif mtype == 'r':
command='python3 transform.py "%s" "%s" "%s" "%s" "%s" "%s"'%('csv', 'r', classes[0], csvfile, prevdir+'/train_dir/', common_name)
print(command)
os.system(command)
os.chdir(problemtype+'_transformer')
transform_model=pickle.load(open(transform_file,'rb'))
alldata=transform_model.transform(alldata)
os.chdir(preprocess_dir)
os.system('python3 load_transformer.py "%s" "%s"'%(problemtype, transform_file))
# now make new files as .CSV
os.chdir(model_dir)
# split the data
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size)
# convert to numpy arrays
X_train=np.array(X_train)
X_test=np.array(X_test)
y_train=np.array(y_train)
y_test=np.array(y_test)
# get new labels_ array
labels_=list()
for i in range(len(alldata[0].tolist())):
labels_.append('transformed_feature_%s'%(str(i)))
# now create transformed excel sheets
temp_listdir=os.listdir()
if create_csv == True:
try:
print(basefile+'_all_transformed.csv'.upper())
if basefile+'_all_transformed.csv' not in temp_listdir:
all_data = convert_csv(alldata, labels, labels_, mtype, classes)
all_data.to_csv(basefile+'_all_transformed.csv',index=False)
created_csv_files.append(basefile+'_all_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_all_transformed.csv'))
try:
print(basefile+'_train_transformed.csv'.upper())
if basefile+'_train_transformed.csv' not in temp_listdir:
train_data= convert_csv(X_train, y_train, labels_, mtype, classes)
train_data.to_csv(basefile+'_train_transformed.csv',index=False)
created_csv_files.append(basefile+'_train_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_train_transformed.csv'))
try:
print(basefile+'_test_transformed.csv'.upper())
if basefile+'_test_transformed.csv' not in temp_listdir:
test_data= convert_csv(X_test, y_test, labels_, mtype, classes)
test_data.to_csv(basefile+'_test_transformed.csv',index=False)
created_csv_files.append(basefile+'_test_transformed.csv')
except:
print('error exporting data into excel sheet %s'%(basefile+'_test_transformed.csv'))
else:
# make a transform model == '' so that later during model training this can be skipped
transform_model=''
############################################################
## VISUALIZE DATA ##
############################################################
visualize_data=settings['visualize_data']
visual_dir=prevdir+'/visualize'
model_session=str(uuid.uuid1())
os.chdir(visual_dir)
if visualize_data == True and mtype == 'c':
print('----------------------------------')
print(f.renderText('VISUALIZING DATA'))
print('----------------------------------')
command='python3 visualize.py %s'%(problemtype)
for i in range(len(classes)):
command=command+' "'+classes[i]+'"'
os.system(command)
# restructure the visualization directory
os.chdir(visual_dir+'/visualization_session')
os.mkdir('visualizations')
vizdir=os.getcwd()
# move directories so that visualization is separate from main model directory
shutil.move(vizdir+'/clustering', vizdir+'/visualizations/clustering')
shutil.move(vizdir+'/feature_ranking', vizdir+'/visualizations/feature_ranking')
shutil.move(vizdir+'/model_selection', vizdir+'/visualizations/model_selection')
# go back to main direcotry
os.chdir(visual_dir)
# now copy over the visualization directory to
try:
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
except:
shutil.rmtree(model_dir+'/'+model_session)
shutil.copytree(visual_dir+'/visualization_session', model_dir+'/'+model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json',model_dir+'/%s/settings.json'%(model_session))
else:
# make a model session for next section if it doesn't exist from visualization directory
os.chdir(model_dir)
try:
os.mkdir(model_session)
except:
shutil.rmtree(model_session)
os.mkdir(model_session)
# copy over settings.json
shutil.copy(prevdir+'/settings.json', model_dir+'/%s/settings.json'%(model_session))
############################################################
## TRAIN THE MODEL ##
############################################################
'''
Now we can train the machine learning model via the default_training script.
Note you can specify multiple training scripts and it will consecutively model the
files appropriately.
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# Here is what all the variables below mean:
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^#
# alldata = list of features in an array for model training
# [[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875...],
...
[39.0, 112.15384615384616, 70.98195453650514, 248.0, 14.0, 103.0, 143.5546875,...]]
# labels = list of labels in an array for model training
# ['males','females',...,'males','females']
# mtype = classification or regression problem?
# 'c' --> classification
# 'r' --> regression
# jsonfile = filename of the .JSON document seprating classes
# males_females.json
# problemtype = type of problem selected
# 'audio' --> audio files
# 'image' --> images files
# 'text' --> text files
# 'video' --> video files
# 'csv' --> csv files
# default_featurenames = default feature array(s) to use for modeling
# ['librosa_features']
# settings = overall settings currenty used for model training
# output of the settings.json document
-----
# transform_model = transformer model if applicable
# useful for data transformation as part of the model initialization process (if pickle file)
# uses scikit-learn pipeline
# X_train, X_test, y_train, y_test
# training datasets used in the .CSV documents
# also can use pandas dataframe if applicable (loading in the model dir)
'''
print('----------------------------------')
print(f.renderText('MODELING DATA'))
print('----------------------------------')
# get defaults
default_training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
default_featurenames=''
if problemtype != 'csv' and mtype == 'c':
for i in range(len(default_features)):
if i ==0:
default_featurenames=default_features[i]
else:
default_featurenames=default_featurenames+'_|_'+default_features[i]
else:
default_featurenames='csv_classification'
# just move all created .csv files into model_session directory
os.chdir(model_dir)
os.chdir(model_session)
os.mkdir('data')
for i in range(len(created_csv_files)):
shutil.move(model_dir+'/'+created_csv_files[i], os.getcwd()+'/data/'+created_csv_files[i])
# initialize i (for tqdm) and go through all model training scripts
i=0
for i in tqdm(range(len(default_training_scripts)), desc=default_training_scripts[i]):
try:
model_start_time=time.time()
# go to model directory
os.chdir(model_dir)
# get common name and default training script to select proper model trainer
default_training_script=default_training_scripts[i]
common_name_model=common_name+'_'+default_training_script
model_exists, model_listdir = pursue_modeling(mtype, model_dir, problemtype, default_training_script, common_name_model)
if model_exists == False:
print('----------------------------------')
print(' .... training %s '%(default_training_script.upper()))
print('----------------------------------')
if default_training_script=='adanet':
print('Adanet training is coming soon! Please use a different model setting for now.')
# import train_adanet as ta
# ta.train_adanet(mtype, classes, jsonfile, alldata, labels, feature_labels, problemtype, default_featurenames)
elif default_training_script=='alphapy':
import train_alphapy as talpy
modelname, modeldir, files=talpy.train_alphapy(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='atm':
import train_atm as tatm
modelname, modeldir, files=tatm.train_atm(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autobazaar':
import train_autobazaar as autobzr
modelname, modeldir, files=autobzr.train_autobazaar(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogbt':
import train_autogbt as tautogbt
modelname, modeldir, files=tautogbt.train_autogbt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autogluon':
import train_autogluon as tautg
modelname, modeldir, files, test_data=tautg.train_autogluon(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokaggle':
import train_autokaggle as autokag
modelname, modeldir, files=autokag.train_autokaggle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autokeras':
import train_autokeras as autokeras_
modelname, modeldir, files=autokeras_.train_autokeras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='automl':
import train_automl as auto_ml
modelname, modeldir, files=auto_ml.train_automl(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='autosklearn':
print('Autosklearn training is unstable! Please use a different model setting for now.')
# import train_autosklearn as taskl
# taskl.train_autosklearn(alldata, labels, mtype, jsonfile, problemtype, default_featurenames)
elif default_training_script=='autopytorch':
import train_autopytorch as autotorch_
modelname, modeldir, files=autotorch_.train_autopytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='btb':
import train_btb as tbtb
modelname, modeldir, files=tbtb.train_btb(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='cvopt':
import train_cvopt as tcvopt
modelname, modeldir, files = tcvopt.train_cvopt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='devol':
import train_devol as td
modelname, modeldir, files=td.train_devol(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gama':
import train_gama as tgama
modelname, modeldir, files=tgama.train_gama(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='gentun':
import train_gentun as tgentun
modelname, modeldir, files=tgentun.train_gentun(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hyperband':
import train_hyperband as thband
modelname, modeldir, files = thband.train_hyperband(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hypsklearn':
import train_hypsklearn as th
modelname, modeldir, files=th.train_hypsklearn(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='hungabunga':
import train_hungabunga as thung
modelname, modeldir, files=thung.train_hungabunga(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='imbalance':
import train_imbalance as timb
modelname, modeldir, files=timb.train_imbalance(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='keras':
import train_keras as tk
modelname, modeldir, files=tk.train_keras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='ludwig':
import train_ludwig as tl
modelname, modeldir, files=tl.train_ludwig(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlblocks':
import train_mlblocks as mlb
modelname, modeldir, files=mlb.train_mlblocks(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='mlbox':
import train_mlbox as mlbox_
modelname, modeldir, files=mlbox_.train_mlbox(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='neuraxle':
if mtype=='c':
print('Neuraxle does not support classification at this time. Please use a different model training script')
break
else:
import train_neuraxle as tneuraxle
modelname, modeldir, files=tneuraxle.train_neuraxle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='plda':
print('PLDA training is unstable! Please use a different model setting for now.')
# import train_pLDA as tp
# tp.train_pLDA(alldata,labels)
elif default_training_script=='pytorch':
import train_pytorch as t_pytorch
modelname, modeldir, files = t_pytorch.train_pytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='safe':
import train_safe as tsafe
modelname, modeldir, files=tsafe.train_safe(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
elif default_training_script=='scsr':
import train_scsr as scsr
if mtype == 'c':
modelname, modeldir, files=scsr.train_sc(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,minlength)
elif mtype == 'r':
modelname, modeldir, files=scsr.train_sr(X_train,X_test,y_train,y_test,common_name_model,problemtype,classes,default_featurenames,transform_model,model_dir,settings)
elif default_training_script=='tpot':
import train_TPOT as tt
modelname, modeldir, files=tt.train_TPOT(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session)
############################################################
## CALCULATE METRICS / PLOT ROC CURVE ##
############################################################
if modelname.endswith('.pickle'):
foldername=modelname[0:-7]
elif modelname.endswith('.h5'):
foldername=modelname[0:-3]
else:
foldername=common_name_model
# copy the folder in case there are multiple models being trained
try:
shutil.copytree(model_session, foldername)
except:
shutil.rmtree(foldername)
shutil.copytree(model_session, foldername)
cur_dir2=os.getcwd()
os.chdir(foldername)
os.mkdir('model')
os.chdir('model')
model_dir_temp=os.getcwd()
# dump transform model to the models directory if necessary
if transform_model == '':
transformer_name=''
else:
# dump the tranform model into the current working directory
transformer_name=modelname.split('.')[0]+'_transform.pickle'
tmodel=open(transformer_name,'wb')
pickle.dump(transform_model, tmodel)
tmodel.close()
# move all supplementary files into model folder
for j in range(len(files)):
shutil.move(modeldir+'/'+files[j], model_dir_temp+'/'+files[j])
# load model for getting metrics
if default_training_script not in ['alphapy', 'atm', 'autokeras', 'autopytorch', 'ludwig', 'keras', 'devol']:
loadmodel=open(modelname, 'rb')
clf=pickle.load(loadmodel)
loadmodel.close()
elif default_training_script == 'atm':
from atm import Model
clf=Model.load(modelname)
elif default_training_script == 'autokeras':
import tensorflow as tf
import autokeras as ak
clf = pickle.load(open(modelname, 'rb'))
elif default_training_script=='autopytorch':
import torch
clf=torch.load(modelname)
elif default_training_script == 'ludwig':
from ludwig.api import LudwigModel
clf=LudwigModel.load('ludwig_files/experiment_run/model/')
elif default_training_script in ['devol', 'keras']:
from keras.models import load_model
clf = load_model(modelname)
else:
clf=''
# create test_data variable for anything other than autogluon
if default_training_script != 'autogluon':
test_data=''
# now make main .JSON file for the session summary with metrics
get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time)
# now move to the proper models directory
os.chdir(model_dir)
os.system('python3 create_readme.py "%s"'%(os.getcwd()+'/'+foldername))
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
shutil.move(model_dir+'/'+foldername, os.getcwd()+'/'+foldername)
############################################################
## COMPRESS MODELS ##
############################################################
if model_compress == True:
print(f.renderText('COMPRESSING MODEL'))
# now compress the model according to model type
if default_training_script in ['hypsklearn', 'scsr', 'tpot']:
# all .pickle files and can compress via scikit-small-ensemble
from sklearn.externals import joblib
# open up model
loadmodel=open(modelname, 'rb')
model = pickle.load(loadmodel)
loadmodel.close()
# compress - from 0 to 9. Higher value means more compression, but also slower read and write times.
# Using a value of 3 is often a good compromise.
joblib.dump(model, modelname[0:-7]+'_compressed.joblib',compress=3)
# can now load compressed models as such
# thenewmodel=joblib.load(modelname[0:-7]+'_compressed.joblib')
# leads to up to 10x reduction in model size and .72 sec - 0.23 secoon (3-4x faster loading model)
# note may note work in sklearn and python versions are different from saving and loading environments.
elif default_training_script in ['devol', 'keras']:
# can compress with keras_compressor
import logging
from keras.models import load_model
from keras_compressor.compressor import compress
logging.basicConfig(
level=logging.INFO,
)
try:
print('compressing model!!')
model = load_model(modelname)
model = compress(model, 7e-1)
model.save(modelname[0:-3]+'_compressed.h5')
except:
print('error compressing model!!')
else:
# for everything else, we can compress pocketflow models in the future.
print('We cannot currently compress %s models. We are working on this!! \n\n The model will remain uncompressed for now'%(default_training_script))
else:
if mtype == 'r':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_regression', problemtype+'_models', str(model_listdir)))
elif mtype == 'c':
print('SKIPPING MODELTYPE - %s already exists in the %s folder: %s'%(common_name_model+'_classifier', problemtype+'_models', str(model_listdir)))
############################################################
## PRODUCTIONIZING MODELS ##
############################################################
# TO BE COMPLETED IN THE FUTURE!
except:
print('ERROR - error in modeling session')
| [
"pandas.read_csv",
"sklearn.metrics.auc",
"sys.exit",
"train_cvopt.train_cvopt",
"ludwig.api.LudwigModel.load",
"train_mlblocks.train_mlblocks",
"pyfiglet.Figlet",
"matplotlib.pyplot.xlabel",
"platform.system",
"os.mkdir",
"train_neuraxle.train_neuraxle",
"sklearn.metrics.mean_absolute_error",... | [((2550, 2568), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""doh"""'}), "(font='doh')\n", (2556, 2568), False, 'from pyfiglet import Figlet\n'), ((2600, 2619), 'pyfiglet.Figlet', 'Figlet', ([], {'font': '"""doom"""'}), "(font='doom')\n", (2606, 2619), False, 'from pyfiglet import Figlet\n'), ((16731, 16742), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (16740, 16742), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((16770, 16809), 'sys.path.append', 'sys.path.append', (["(prevdir + '/train_dir')"], {}), "(prevdir + '/train_dir')\n", (16785, 16809), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17314, 17346), 'os.chdir', 'os.chdir', (["(prevdir + '/train_dir')"], {}), "(prevdir + '/train_dir')\n", (17322, 17346), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17355, 17366), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (17364, 17366), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17375, 17387), 'os.listdir', 'os.listdir', ([], {}), '()\n', (17385, 17387), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((24545, 24562), 'os.chdir', 'os.chdir', (['cur_dir'], {}), '(cur_dir)\n', (24553, 24562), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((24563, 24594), 'os.system', 'os.system', (['"""python3 upgrade.py"""'], {}), "('python3 upgrade.py')\n", (24572, 24594), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((31171, 31203), 'os.chdir', 'os.chdir', (["(prevdir + '/training/')"], {}), "(prevdir + '/training/')\n", (31179, 31203), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((34477, 34496), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (34485, 34496), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((34862, 34879), 'numpy.array', 'np.array', (['alldata'], {}), '(alldata)\n', (34870, 34879), True, 'import numpy as np\n'), ((34887, 34903), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (34895, 34903), True, 'import numpy as np\n'), ((36165, 36182), 'numpy.array', 'np.array', (['alldata'], {}), '(alldata)\n', (36173, 36182), True, 'import numpy as np\n'), ((36190, 36206), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (36198, 36206), True, 'import numpy as np\n'), ((38574, 38628), 'sklearn.model_selection.train_test_split', 'train_test_split', (['alldata', 'labels'], {'test_size': 'test_size'}), '(alldata, labels, test_size=test_size)\n', (38590, 38628), False, 'from sklearn.model_selection import train_test_split\n'), ((38695, 38712), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (38703, 38712), True, 'import numpy as np\n'), ((38720, 38736), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (38728, 38736), True, 'import numpy as np\n'), ((38745, 38762), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (38753, 38762), True, 'import numpy as np\n'), ((38770, 38786), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (38778, 38786), True, 'import numpy as np\n'), ((39052, 39064), 'os.listdir', 'os.listdir', ([], {}), '()\n', (39062, 39064), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((40476, 40500), 'os.chdir', 'os.chdir', (['preprocess_dir'], {}), '(preprocess_dir)\n', (40484, 40500), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45360, 45380), 'os.chdir', 'os.chdir', (['visual_dir'], {}), '(visual_dir)\n', (45368, 45380), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49270, 49289), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (49278, 49289), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49290, 49313), 'os.chdir', 'os.chdir', (['model_session'], {}), '(model_session)\n', (49298, 49313), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49314, 49330), 'os.mkdir', 'os.mkdir', (['"""data"""'], {}), "('data')\n", (49322, 49330), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((4994, 5009), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (5003, 5009), True, 'import pandas as pd\n'), ((11861, 11886), 'json.dump', 'json.dump', (['data', 'jsonfile'], {}), '(data, jsonfile)\n', (11870, 11886), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((11973, 11984), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11982, 11984), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12012, 12029), 'os.chdir', 'os.chdir', (['basedir'], {}), '(basedir)\n', (12020, 12029), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12031, 12075), 'os.system', 'os.system', (['"""pip3 freeze -> requirements.txt"""'], {}), "('pip3 freeze -> requirements.txt')\n", (12040, 12075), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12611, 12627), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (12619, 12627), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12774, 12800), 'itertools.cycle', 'itertools.cycle', (['"""bgrcmyk"""'], {}), "('bgrcmyk')\n", (12789, 12800), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((13153, 13186), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (13163, 13186), True, 'import matplotlib.pyplot as plt\n'), ((13188, 13220), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (13198, 13220), True, 'import matplotlib.pyplot as plt\n'), ((13222, 13280), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic (ROC) Curve"""'], {}), "('Receiver Operating Characteristic (ROC) Curve')\n", (13231, 13280), True, 'import matplotlib.pyplot as plt\n'), ((13282, 13294), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13292, 13294), True, 'import matplotlib.pyplot as plt\n'), ((13296, 13314), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13312, 13314), True, 'import matplotlib.pyplot as plt\n'), ((13316, 13344), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""roc_curve.png"""'], {}), "('roc_curve.png')\n", (13327, 13344), True, 'import matplotlib.pyplot as plt\n'), ((13346, 13357), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13355, 13357), True, 'import matplotlib.pyplot as plt\n'), ((13759, 13809), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (13769, 13809), True, 'import matplotlib.pyplot as plt\n'), ((13811, 13827), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (13820, 13827), True, 'import matplotlib.pyplot as plt\n'), ((13829, 13843), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13841, 13843), True, 'import matplotlib.pyplot as plt\n'), ((13883, 13927), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (13893, 13927), True, 'import matplotlib.pyplot as plt\n'), ((13929, 13960), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (13939, 13960), True, 'import matplotlib.pyplot as plt\n'), ((14224, 14242), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14240, 14242), True, 'import matplotlib.pyplot as plt\n'), ((14244, 14268), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (14254, 14268), True, 'import matplotlib.pyplot as plt\n'), ((14270, 14299), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (14280, 14299), True, 'import matplotlib.pyplot as plt\n'), ((14301, 14319), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14317, 14319), True, 'import matplotlib.pyplot as plt\n'), ((14321, 14356), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""confusion_matrix.png"""'], {}), "('confusion_matrix.png')\n", (14332, 14356), True, 'import matplotlib.pyplot as plt\n'), ((14358, 14369), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14367, 14369), True, 'import matplotlib.pyplot as plt\n'), ((17530, 17550), 'os.chdir', 'os.chdir', (['folders[i]'], {}), '(folders[i])\n', (17538, 17550), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17560, 17572), 'os.listdir', 'os.listdir', ([], {}), '()\n', (17570, 17572), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((17636, 17654), 'os.chdir', 'os.chdir', (['data_dir'], {}), '(data_dir)\n', (17644, 17654), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((37940, 37963), 'numpy.array', 'np.array', (['outlier_class'], {}), '(outlier_class)\n', (37948, 37963), True, 'import numpy as np\n'), ((37975, 37997), 'numpy.amin', 'np.amin', (['outlier_class'], {}), '(outlier_class)\n', (37982, 37997), True, 'import numpy as np\n'), ((42078, 42130), 'os.system', 'os.system', (['"""pip3 install scikit-learn==0.22.2.post1"""'], {}), "('pip3 install scikit-learn==0.22.2.post1')\n", (42087, 42130), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42298, 42317), 'numpy.asarray', 'np.asarray', (['alldata'], {}), '(alldata)\n', (42308, 42317), True, 'import numpy as np\n'), ((42326, 42344), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (42336, 42344), True, 'import numpy as np\n'), ((42346, 42370), 'os.chdir', 'os.chdir', (['preprocess_dir'], {}), '(preprocess_dir)\n', (42354, 42370), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43105, 43129), 'os.chdir', 'os.chdir', (['preprocess_dir'], {}), '(preprocess_dir)\n', (43113, 43129), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43131, 43217), 'os.system', 'os.system', (['(\'python3 load_transformer.py "%s" "%s"\' % (problemtype, transform_file))'], {}), '(\'python3 load_transformer.py "%s" "%s"\' % (problemtype,\n transform_file))\n', (43140, 43217), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43244, 43263), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (43252, 43263), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((43320, 43374), 'sklearn.model_selection.train_test_split', 'train_test_split', (['alldata', 'labels'], {'test_size': 'test_size'}), '(alldata, labels, test_size=test_size)\n', (43336, 43374), False, 'from sklearn.model_selection import train_test_split\n'), ((43413, 43430), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (43421, 43430), True, 'import numpy as np\n'), ((43439, 43455), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (43447, 43455), True, 'import numpy as np\n'), ((43465, 43482), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (43473, 43482), True, 'import numpy as np\n'), ((43491, 43507), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (43499, 43507), True, 'import numpy as np\n'), ((43701, 43713), 'os.listdir', 'os.listdir', ([], {}), '()\n', (43711, 43713), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45346, 45358), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (45356, 45358), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45677, 45695), 'os.system', 'os.system', (['command'], {}), '(command)\n', (45686, 45695), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45742, 45789), 'os.chdir', 'os.chdir', (["(visual_dir + '/visualization_session')"], {}), "(visual_dir + '/visualization_session')\n", (45750, 45789), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45789, 45815), 'os.mkdir', 'os.mkdir', (['"""visualizations"""'], {}), "('visualizations')\n", (45797, 45815), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45824, 45835), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45833, 45835), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45919, 45993), 'shutil.move', 'shutil.move', (["(vizdir + '/clustering')", "(vizdir + '/visualizations/clustering')"], {}), "(vizdir + '/clustering', vizdir + '/visualizations/clustering')\n", (45930, 45993), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((45991, 46079), 'shutil.move', 'shutil.move', (["(vizdir + '/feature_ranking')", "(vizdir + '/visualizations/feature_ranking')"], {}), "(vizdir + '/feature_ranking', vizdir +\n '/visualizations/feature_ranking')\n", (46002, 46079), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46073, 46161), 'shutil.move', 'shutil.move', (["(vizdir + '/model_selection')", "(vizdir + '/visualizations/model_selection')"], {}), "(vizdir + '/model_selection', vizdir +\n '/visualizations/model_selection')\n", (46084, 46161), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46186, 46206), 'os.chdir', 'os.chdir', (['visual_dir'], {}), '(visual_dir)\n', (46194, 46206), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46514, 46606), 'shutil.copy', 'shutil.copy', (["(prevdir + '/settings.json')", "(model_dir + '/%s/settings.json' % model_session)"], {}), "(prevdir + '/settings.json', model_dir + '/%s/settings.json' %\n model_session)\n", (46525, 46606), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46696, 46715), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (46704, 46715), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46842, 46934), 'shutil.copy', 'shutil.copy', (["(prevdir + '/settings.json')", "(model_dir + '/%s/settings.json' % model_session)"], {}), "(prevdir + '/settings.json', model_dir + '/%s/settings.json' %\n model_session)\n", (46853, 46934), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5447, 5470), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (5468, 5470), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5492, 5512), 'psutil.cpu_percent', 'psutil.cpu_percent', ([], {}), '()\n', (5510, 5512), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5532, 5550), 'psutil.cpu_times', 'psutil.cpu_times', ([], {}), '()\n', (5548, 5550), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5570, 5588), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (5586, 5588), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5608, 5626), 'psutil.cpu_stats', 'psutil.cpu_stats', ([], {}), '()\n', (5624, 5626), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5645, 5665), 'psutil.swap_memory', 'psutil.swap_memory', ([], {}), '()\n', (5663, 5665), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5686, 5710), 'psutil.disk_partitions', 'psutil.disk_partitions', ([], {}), '()\n', (5708, 5710), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5731, 5753), 'psutil.disk_usage', 'psutil.disk_usage', (['"""/"""'], {}), "('/')\n", (5748, 5753), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5780, 5805), 'psutil.disk_io_counters', 'psutil.disk_io_counters', ([], {}), '()\n', (5803, 5805), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5823, 5847), 'psutil.sensors_battery', 'psutil.sensors_battery', ([], {}), '()\n', (5845, 5847), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5867, 5885), 'psutil.boot_time', 'psutil.boot_time', ([], {}), '()\n', (5883, 5885), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6013, 6030), 'platform.system', 'platform.system', ([], {}), '()\n', (6028, 6030), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6049, 6067), 'platform.release', 'platform.release', ([], {}), '()\n', (6065, 6067), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6086, 6104), 'platform.version', 'platform.version', ([], {}), '()\n', (6102, 6104), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((8761, 8799), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8783, 8799), False, 'from sklearn import metrics\n'), ((8832, 8879), 'sklearn.metrics.balanced_accuracy_score', 'metrics.balanced_accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8863, 8879), False, 'from sklearn import metrics\n'), ((9955, 10022), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_true', 'y_pred'], {'target_names': 'classes'}), '(y_true, y_pred, target_names=classes)\n', (9984, 10022), False, 'from sklearn import metrics\n'), ((15817, 15870), 'os.listdir', 'os.listdir', (["(model_dir + '/' + problemtype + '_models')"], {}), "(model_dir + '/' + problemtype + '_models')\n", (15827, 15870), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((25901, 25967), 'os.system', 'os.system', (['(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (25910, 25967), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27392, 27460), 'os.system', 'os.system', (['(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (27401, 27460), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((30426, 30470), 'os.chdir', 'os.chdir', (["(prevdir + '/features/csv_features')"], {}), "(prevdir + '/features/csv_features')\n", (30434, 30470), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((30508, 30678), 'os.system', 'os.system', (['(\'python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"\' % (prevdir +\n \'/train_dir/\' + csvfile, prevdir + \'/train_dir/\' + output_file, classes[0])\n )'], {}), '(\'python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"\' % (\n prevdir + \'/train_dir/\' + csvfile, prevdir + \'/train_dir/\' +\n output_file, classes[0]))\n', (30517, 30678), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((31797, 31814), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (31805, 31814), True, 'import numpy as np\n'), ((31827, 31843), 'numpy.amin', 'np.amin', (['lengths'], {}), '(lengths)\n', (31834, 31843), True, 'import numpy as np\n'), ((32409, 32429), 'os.chdir', 'os.chdir', (['labels_dir'], {}), '(labels_dir)\n', (32417, 32429), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((32440, 32452), 'os.listdir', 'os.listdir', ([], {}), '()\n', (32450, 32452), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((33705, 33751), 'pandas.read_csv', 'pd.read_csv', (["(prevdir + '/train_dir/' + csvfile)"], {}), "(prevdir + '/train_dir/' + csvfile)\n", (33716, 33751), True, 'import pandas as pd\n'), ((41904, 41942), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (41912, 41942), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42153, 42165), 'os.listdir', 'os.listdir', ([], {}), '()\n', (42163, 42165), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42230, 42255), 'os.remove', 'os.remove', (['transform_file'], {}), '(transform_file)\n', (42239, 42255), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42492, 42600), 'os.system', 'os.system', (['(\'python3 transform.py "%s" "%s" "%s" %s\' % (problemtype, \'c\', common_name,\n transform_command))'], {}), '(\'python3 transform.py "%s" "%s" "%s" %s\' % (problemtype, \'c\',\n common_name, transform_command))\n', (42501, 42600), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42597, 42635), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (42605, 42635), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46265, 46356), 'shutil.copytree', 'shutil.copytree', (["(visual_dir + '/visualization_session')", "(model_dir + '/' + model_session)"], {}), "(visual_dir + '/visualization_session', model_dir + '/' +\n model_session)\n", (46280, 46356), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46724, 46747), 'os.mkdir', 'os.mkdir', (['model_session'], {}), '(model_session)\n', (46732, 46747), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49651, 49662), 'time.time', 'time.time', ([], {}), '()\n', (49660, 49662), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49692, 49711), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (49700, 49711), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6835, 6846), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6844, 6846), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6871, 6913), 'os.chdir', 'os.chdir', (["(common_name + '_alphapy_session')"], {}), "(common_name + '_alphapy_session')\n", (6879, 6913), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6926, 6937), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6935, 6937), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6940, 6957), 'os.chdir', 'os.chdir', (['"""input"""'], {}), "('input')\n", (6948, 6957), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6960, 6996), 'os.rename', 'os.rename', (['"""test.csv"""', '"""predict.csv"""'], {}), "('test.csv', 'predict.csv')\n", (6969, 6996), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6999, 7020), 'os.chdir', 'os.chdir', (['alphapy_dir'], {}), '(alphapy_dir)\n', (7007, 7020), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7023, 7053), 'os.system', 'os.system', (['"""alphapy --predict"""'], {}), "('alphapy --predict')\n", (7032, 7053), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7056, 7074), 'os.chdir', 'os.chdir', (['"""output"""'], {}), "('output')\n", (7064, 7074), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7085, 7097), 'os.listdir', 'os.listdir', ([], {}), '()\n', (7095, 7097), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7243, 7259), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (7251, 7259), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((8912, 8951), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (8935, 8951), False, 'from sklearn import metrics\n'), ((9022, 9058), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9042, 9058), False, 'from sklearn import metrics\n'), ((9128, 9173), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (9144, 9173), False, 'from sklearn import metrics\n'), ((9246, 9295), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {'average': '"""micro"""'}), "(y_true, y_pred, average='micro')\n", (9262, 9295), False, 'from sklearn import metrics\n'), ((9367, 9416), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (9383, 9416), False, 'from sklearn import metrics\n'), ((9487, 9524), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9508, 9524), False, 'from sklearn import metrics\n'), ((9600, 9654), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_pred'], {'average': '"""micro"""'}), "(y_true, y_pred, average='micro')\n", (9621, 9654), False, 'from sklearn import metrics\n'), ((9736, 9790), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (9757, 9790), False, 'from sklearn import metrics\n'), ((10048, 10086), 'numpy.array', 'np.array', (["metrics_['confusion_matrix']"], {}), "(metrics_['confusion_matrix'])\n", (10056, 10086), True, 'import numpy as np\n'), ((10757, 10800), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10784, 10800), False, 'from sklearn import metrics\n'), ((10836, 10878), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10862, 10878), False, 'from sklearn import metrics\n'), ((10917, 10962), 'sklearn.metrics.median_absolute_error', 'metrics.median_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10946, 10962), False, 'from sklearn import metrics\n'), ((10988, 11020), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (11004, 11020), False, 'from sklearn import metrics\n'), ((11126, 11137), 'time.time', 'time.time', ([], {}), '()\n', (11135, 11137), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((11180, 11203), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11201, 11203), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((12900, 12927), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'probs[i]'], {}), '(y_test, probs[i])\n', (12909, 12927), False, 'from sklearn.metrics import roc_curve\n'), ((13044, 13102), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""darkblue"""', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='darkblue', linestyle='--')\n", (13052, 13102), True, 'import matplotlib.pyplot as plt\n'), ((14724, 14794), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'linestyle': '"""-"""', 'linewidth': '"""0.5"""', 'color': '"""green"""'}), "(which='major', linestyle='-', linewidth='0.5', color='green')\n", (14732, 14794), True, 'import matplotlib.pyplot as plt\n'), ((14798, 14868), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'linestyle': '""":"""', 'linewidth': '"""0.5"""', 'color': '"""black"""'}), "(which='minor', linestyle=':', linewidth='0.5', color='black')\n", (14806, 14868), True, 'import matplotlib.pyplot as plt\n'), ((14872, 14890), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14888, 14890), True, 'import matplotlib.pyplot as plt\n'), ((14894, 14934), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar_graph_predictions.png"""'], {}), "('bar_graph_predictions.png')\n", (14905, 14934), True, 'import matplotlib.pyplot as plt\n'), ((14938, 14949), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14947, 14949), True, 'import matplotlib.pyplot as plt\n'), ((14992, 15033), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_test', 'y_test'], {'color': '"""gray"""'}), "(X_test, y_test, color='gray')\n", (15003, 15033), True, 'import matplotlib.pyplot as plt\n'), ((15038, 15088), 'matplotlib.pyplot.plot', 'plt.plot', (['X_test', 'y_pred'], {'color': '"""red"""', 'linewidth': '(2)'}), "(X_test, y_pred, color='red', linewidth=2)\n", (15046, 15088), True, 'import matplotlib.pyplot as plt\n'), ((15092, 15110), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15108, 15110), True, 'import matplotlib.pyplot as plt\n'), ((15114, 15158), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""straight_line_predictions.png"""'], {}), "('straight_line_predictions.png')\n", (15125, 15158), True, 'import matplotlib.pyplot as plt\n'), ((15162, 15173), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15171, 15173), True, 'import matplotlib.pyplot as plt\n'), ((15229, 15282), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual': y_test, 'Predicted': y_pred}"], {}), "({'Actual': y_test, 'Predicted': y_pred})\n", (15241, 15282), True, 'import pandas as pd\n'), ((15347, 15417), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""major"""', 'linestyle': '"""-"""', 'linewidth': '"""0.5"""', 'color': '"""green"""'}), "(which='major', linestyle='-', linewidth='0.5', color='green')\n", (15355, 15417), True, 'import matplotlib.pyplot as plt\n'), ((15421, 15491), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""', 'linestyle': '""":"""', 'linewidth': '"""0.5"""', 'color': '"""black"""'}), "(which='minor', linestyle=':', linewidth='0.5', color='black')\n", (15429, 15491), True, 'import matplotlib.pyplot as plt\n'), ((15495, 15513), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15511, 15513), True, 'import matplotlib.pyplot as plt\n'), ((15517, 15557), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar_graph_predictions.png"""'], {}), "('bar_graph_predictions.png')\n", (15528, 15557), True, 'import matplotlib.pyplot as plt\n'), ((15561, 15572), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15570, 15572), True, 'import matplotlib.pyplot as plt\n'), ((25403, 25442), 'os.chdir', 'os.chdir', (["(clean_dir + '/audio_cleaning')"], {}), "(clean_dir + '/audio_cleaning')\n", (25411, 25442), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26146, 26212), 'os.system', 'os.system', (['(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 clean.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (26155, 26212), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26852, 26897), 'os.chdir', 'os.chdir', (["(augment_dir + '/audio_augmentation')"], {}), "(augment_dir + '/audio_augmentation')\n", (26860, 26897), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27652, 27720), 'os.system', 'os.system', (['(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 augment.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (27661, 27720), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29514, 29584), 'os.system', 'os.system', (['(\'python3 featurize.py "%s"\' % (data_dir + \'/\' + classes[i]))'], {}), '(\'python3 featurize.py "%s"\' % (data_dir + \'/\' + classes[i]))\n', (29523, 29584), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29582, 29619), 'os.chdir', 'os.chdir', (["(data_dir + '/' + classes[i])"], {}), "(data_dir + '/' + classes[i])\n", (29590, 29619), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29653, 29665), 'os.listdir', 'os.listdir', ([], {}), '()\n', (29663, 29665), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((31933, 31955), 'random.shuffle', 'random.shuffle', (['class_'], {}), '(class_)\n', (31947, 31955), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((33075, 33092), 'numpy.array', 'np.array', (['lengths'], {}), '(lengths)\n', (33083, 33092), True, 'import numpy as np\n'), ((33105, 33121), 'numpy.amin', 'np.amin', (['lengths'], {}), '(lengths)\n', (33112, 33121), True, 'import numpy as np\n'), ((41952, 41990), 'os.mkdir', 'os.mkdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (41960, 41990), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((41991, 42029), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (41999, 42029), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42751, 42768), 'numpy.array', 'np.array', (['alldata'], {}), '(alldata)\n', (42759, 42768), True, 'import numpy as np\n'), ((42943, 42961), 'os.system', 'os.system', (['command'], {}), '(command)\n', (42952, 42961), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((42964, 43002), 'os.chdir', 'os.chdir', (["(problemtype + '_transformer')"], {}), "(problemtype + '_transformer')\n", (42972, 43002), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46358, 46404), 'shutil.rmtree', 'shutil.rmtree', (["(model_dir + '/' + model_session)"], {}), "(model_dir + '/' + model_session)\n", (46371, 46404), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46403, 46494), 'shutil.copytree', 'shutil.copytree', (["(visual_dir + '/visualization_session')", "(model_dir + '/' + model_session)"], {}), "(visual_dir + '/visualization_session', model_dir + '/' +\n model_session)\n", (46418, 46494), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46759, 46787), 'shutil.rmtree', 'shutil.rmtree', (['model_session'], {}), '(model_session)\n', (46772, 46787), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((46790, 46813), 'os.mkdir', 'os.mkdir', (['model_session'], {}), '(model_session)\n', (46798, 46813), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59108, 59119), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (59117, 59119), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59123, 59143), 'os.chdir', 'os.chdir', (['foldername'], {}), '(foldername)\n', (59131, 59143), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59147, 59164), 'os.mkdir', 'os.mkdir', (['"""model"""'], {}), "('model')\n", (59155, 59164), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59168, 59185), 'os.chdir', 'os.chdir', (['"""model"""'], {}), "('model')\n", (59176, 59185), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59204, 59215), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (59213, 59215), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61056, 61075), 'os.chdir', 'os.chdir', (['model_dir'], {}), '(model_dir)\n', (61064, 61075), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((5909, 5932), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5930, 5932), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6855, 6867), 'os.listdir', 'os.listdir', ([], {}), '()\n', (6865, 6867), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7206, 7226), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (7217, 7226), True, 'import pandas as pd\n'), ((9869, 9909), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (9893, 9909), False, 'from sklearn import metrics\n'), ((21219, 21251), 'os.chdir', 'os.chdir', (["(prevdir + '/train_dir')"], {}), "(prevdir + '/train_dir')\n", (21227, 21251), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((21260, 21272), 'os.listdir', 'os.listdir', ([], {}), '()\n', (21270, 21272), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((21812, 21832), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (21823, 21832), True, 'import pandas as pd\n'), ((25517, 25555), 'os.chdir', 'os.chdir', (["(clean_dir + '/text_cleaning')"], {}), "(clean_dir + '/text_cleaning')\n", (25525, 25555), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26108, 26145), 'os.chdir', 'os.chdir', (["(clean_dir + '/csv_cleaning')"], {}), "(clean_dir + '/csv_cleaning')\n", (26116, 26145), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((26975, 27019), 'os.chdir', 'os.chdir', (["(augment_dir + '/text_augmentation')"], {}), "(augment_dir + '/text_augmentation')\n", (26983, 27019), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27608, 27651), 'os.chdir', 'os.chdir', (["(augment_dir + '/csv_augmentation')"], {}), "(augment_dir + '/csv_augmentation')\n", (27616, 27651), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((28823, 28869), 'os.chdir', 'os.chdir', (["(prevdir + '/features/audio_features')"], {}), "(prevdir + '/features/audio_features')\n", (28831, 28869), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((30486, 30498), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (30496, 30498), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((33211, 33233), 'random.shuffle', 'random.shuffle', (['class_'], {}), '(class_)\n', (33225, 33233), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((36632, 36653), 'numpy.where', 'np.where', (['(y_pred == 1)'], {}), '(y_pred == 1)\n', (36640, 36653), True, 'import numpy as np\n'), ((36676, 36698), 'numpy.where', 'np.where', (['(y_pred == -1)'], {}), '(y_pred == -1)\n', (36684, 36698), True, 'import numpy as np\n'), ((37202, 37247), 'os.system', 'os.system', (['"""pip3 install statsmodels==0.11.1"""'], {}), "('pip3 install statsmodels==0.11.1')\n", (37211, 37247), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((49420, 49431), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (49429, 49431), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((58960, 59002), 'shutil.copytree', 'shutil.copytree', (['model_session', 'foldername'], {}), '(model_session, foldername)\n', (58975, 59002), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59515, 59551), 'pickle.dump', 'pickle.dump', (['transform_model', 'tmodel'], {}), '(transform_model, tmodel)\n', (59526, 59551), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59659, 59730), 'shutil.move', 'shutil.move', (["(modeldir + '/' + files[j])", "(model_dir_temp + '/' + files[j])"], {}), "(modeldir + '/' + files[j], model_dir_temp + '/' + files[j])\n", (59670, 59730), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59920, 59942), 'pickle.load', 'pickle.load', (['loadmodel'], {}), '(loadmodel)\n', (59931, 59942), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61164, 61197), 'os.chdir', 'os.chdir', (["(problemtype + '_models')"], {}), "(problemtype + '_models')\n", (61172, 61197), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((6154, 6176), 'psutil.disk_usage', 'psutil.disk_usage', (['"""/"""'], {}), "('/')\n", (6171, 6176), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((20828, 20848), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (20839, 20848), True, 'import pandas as pd\n'), ((24014, 24024), 'sys.exit', 'sys.exit', ([], {}), '()\n', (24022, 24024), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((25634, 25673), 'os.chdir', 'os.chdir', (["(clean_dir + '/image_cleaning')"], {}), "(clean_dir + '/image_cleaning')\n", (25642, 25673), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27101, 27146), 'os.chdir', 'os.chdir', (["(augment_dir + '/image_augmentation')"], {}), "(augment_dir + '/image_augmentation')\n", (27109, 27146), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((28968, 29013), 'os.chdir', 'os.chdir', (["(prevdir + '/features/text_features')"], {}), "(prevdir + '/features/text_features')\n", (28976, 29013), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((36528, 36559), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'random_state': '(0)'}), '(random_state=0)\n', (36543, 36559), False, 'from sklearn.ensemble import IsolationForest\n'), ((37420, 37441), 'scipy.stats.zscore', 'stats.zscore', (['alldata'], {}), '(alldata)\n', (37432, 37441), False, 'from scipy import stats\n'), ((50626, 50799), 'train_alphapy.train_alphapy', 'talpy.train_alphapy', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (50645, 50799), True, 'import train_alphapy as talpy\n'), ((59018, 59043), 'shutil.rmtree', 'shutil.rmtree', (['foldername'], {}), '(foldername)\n', (59031, 59043), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((59048, 59090), 'shutil.copytree', 'shutil.copytree', (['model_session', 'foldername'], {}), '(model_session, foldername)\n', (59063, 59090), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((60041, 60062), 'atm.Model.load', 'Model.load', (['modelname'], {}), '(modelname)\n', (60051, 60062), False, 'from atm import Model\n'), ((61211, 61244), 'os.mkdir', 'os.mkdir', (["(problemtype + '_models')"], {}), "(problemtype + '_models')\n", (61219, 61244), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61247, 61280), 'os.chdir', 'os.chdir', (["(problemtype + '_models')"], {}), "(problemtype + '_models')\n", (61255, 61280), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((61894, 61916), 'pickle.load', 'pickle.load', (['loadmodel'], {}), '(loadmodel)\n', (61905, 61916), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((62107, 62177), 'sklearn.externals.joblib.dump', 'joblib.dump', (['model', "(modelname[0:-7] + '_compressed.joblib')"], {'compress': '(3)'}), "(model, modelname[0:-7] + '_compressed.joblib', compress=3)\n", (62118, 62177), False, 'from sklearn.externals import joblib\n'), ((25751, 25790), 'os.chdir', 'os.chdir', (["(clean_dir + '/video_cleaning')"], {}), "(clean_dir + '/video_cleaning')\n", (25759, 25790), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27227, 27272), 'os.chdir', 'os.chdir', (["(augment_dir + '/video_augmentation')"], {}), "(augment_dir + '/video_augmentation')\n", (27235, 27272), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29114, 29160), 'os.chdir', 'os.chdir', (["(prevdir + '/features/image_features')"], {}), "(prevdir + '/features/image_features')\n", (29122, 29160), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((50881, 51049), 'train_atm.train_atm', 'tatm.train_atm', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (50895, 51049), True, 'import train_atm as tatm\n'), ((61321, 61332), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (61330, 61332), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((62727, 62766), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (62746, 62766), False, 'import logging\n'), ((7661, 7672), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7670, 7672), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7675, 7695), 'os.chdir', 'os.chdir', (['"""atm_temp"""'], {}), "('atm_temp')\n", (7683, 7695), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((7792, 7808), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (7800, 7808), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((25863, 25900), 'os.chdir', 'os.chdir', (["(clean_dir + '/csv_cleaning')"], {}), "(clean_dir + '/csv_cleaning')\n", (25871, 25900), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((27348, 27391), 'os.chdir', 'os.chdir', (["(augment_dir + '/csv_augmentation')"], {}), "(augment_dir + '/csv_augmentation')\n", (27356, 27391), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((29262, 29308), 'os.chdir', 'os.chdir', (["(prevdir + '/features/video_features')"], {}), "(prevdir + '/features/video_features')\n", (29270, 29308), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((37491, 37514), 'numpy.where', 'np.where', (['(z > threshold)'], {}), '(z > threshold)\n', (37499, 37514), True, 'import numpy as np\n'), ((51148, 51326), 'train_autobazaar.train_autobazaar', 'autobzr.train_autobazaar', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (51172, 51326), True, 'import train_autobazaar as autobzr\n'), ((60284, 60305), 'torch.load', 'torch.load', (['modelname'], {}), '(modelname)\n', (60294, 60305), False, 'import torch\n'), ((61122, 61133), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (61131, 61133), False, 'import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform\n'), ((62841, 62862), 'keras.models.load_model', 'load_model', (['modelname'], {}), '(modelname)\n', (62851, 62862), False, 'from keras.models import load_model\n'), ((62877, 62897), 'keras_compressor.compressor.compress', 'compress', (['model', '(0.7)'], {}), '(model, 0.7)\n', (62885, 62897), False, 'from keras_compressor.compressor import compress\n'), ((51420, 51596), 'train_autogbt.train_autogbt', 'tautogbt.train_autogbt', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (51442, 51596), True, 'import train_autogbt as tautogbt\n'), ((60398, 60452), 'ludwig.api.LudwigModel.load', 'LudwigModel.load', (['"""ludwig_files/experiment_run/model/"""'], {}), "('ludwig_files/experiment_run/model/')\n", (60414, 60452), False, 'from ludwig.api import LudwigModel\n'), ((7705, 7728), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (7716, 7728), True, 'import pandas as pd\n'), ((13010, 13033), 'sklearn.metrics.auc', 'metrics.auc', (['fper', 'tper'], {}), '(fper, tper)\n', (13021, 13033), False, 'from sklearn import metrics\n'), ((51702, 51877), 'train_autogluon.train_autogluon', 'tautg.train_autogluon', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (51723, 51877), True, 'import train_autogluon as tautg\n'), ((60559, 60580), 'keras.models.load_model', 'load_model', (['modelname'], {}), '(modelname)\n', (60569, 60580), False, 'from keras.models import load_model\n'), ((7859, 7882), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (7870, 7882), True, 'import pandas as pd\n'), ((51976, 52154), 'train_autokaggle.train_autokaggle', 'autokag.train_autokaggle', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (52000, 52154), True, 'import train_autokaggle as autokag\n'), ((52254, 52434), 'train_autokeras.train_autokeras', 'autokeras_.train_autokeras', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (52280, 52434), True, 'import train_autokeras as autokeras_\n'), ((52525, 52699), 'train_automl.train_automl', 'auto_ml.train_automl', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (52545, 52699), True, 'import train_automl as auto_ml\n'), ((8484, 8507), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (8495, 8507), True, 'import pandas as pd\n'), ((53084, 53266), 'train_autopytorch.train_autopytorch', 'autotorch_.train_autopytorch', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (53112, 53266), True, 'import train_autopytorch as autotorch_\n'), ((53348, 53516), 'train_btb.train_btb', 'tbtb.train_btb', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (53362, 53516), True, 'import train_btb as tbtb\n'), ((53606, 53778), 'train_cvopt.train_cvopt', 'tcvopt.train_cvopt', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (53624, 53778), True, 'import train_cvopt as tcvopt\n'), ((53863, 54031), 'train_devol.train_devol', 'td.train_devol', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (53877, 54031), True, 'import train_devol as td\n'), ((54116, 54286), 'train_gama.train_gama', 'tgama.train_gama', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (54132, 54286), True, 'import train_gama as tgama\n'), ((54378, 54552), 'train_gentun.train_gentun', 'tgentun.train_gentun', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (54398, 54552), True, 'import train_gentun as tgentun\n'), ((54650, 54826), 'train_hyperband.train_hyperband', 'thband.train_hyperband', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (54672, 54826), True, 'import train_hyperband as thband\n'), ((54921, 55094), 'train_hypsklearn.train_hypsklearn', 'th.train_hypsklearn', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (54940, 55094), True, 'import train_hypsklearn as th\n'), ((55191, 55367), 'train_hungabunga.train_hungabunga', 'thung.train_hungabunga', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (55213, 55367), True, 'import train_hungabunga as thung\n'), ((55461, 55635), 'train_imbalance.train_imbalance', 'timb.train_imbalance', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (55481, 55635), True, 'import train_imbalance as timb\n'), ((55719, 55887), 'train_keras.train_keras', 'tk.train_keras', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (55733, 55887), True, 'import train_keras as tk\n'), ((55973, 56142), 'train_ludwig.train_ludwig', 'tl.train_ludwig', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (55988, 56142), True, 'import train_ludwig as tl\n'), ((56233, 56405), 'train_mlblocks.train_mlblocks', 'mlb.train_mlblocks', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (56251, 56405), True, 'import train_mlblocks as mlb\n'), ((56493, 56665), 'train_mlbox.train_mlbox', 'mlbox_.train_mlbox', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (56511, 56665), True, 'import train_mlbox as mlbox_\n'), ((56918, 57096), 'train_neuraxle.train_neuraxle', 'tneuraxle.train_neuraxle', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (56942, 57096), True, 'import train_neuraxle as tneuraxle\n'), ((57387, 57564), 'train_pytorch.train_pytorch', 't_pytorch.train_pytorch', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype,\n common_name_model, problemtype, classes, default_featurenames,\n transform_model, settings, model_session)\n', (57410, 57564), True, 'import train_pytorch as t_pytorch\n'), ((57649, 57819), 'train_safe.train_safe', 'tsafe.train_safe', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (57665, 57819), True, 'import train_safe as tsafe\n'), ((57925, 58088), 'train_scsr.train_sc', 'scsr.train_sc', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'minlength'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n minlength)\n', (57938, 58088), True, 'import train_scsr as scsr\n'), ((58364, 58531), 'train_TPOT.train_TPOT', 'tt.train_TPOT', (['X_train', 'X_test', 'y_train', 'y_test', 'mtype', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'settings', 'model_session'], {}), '(X_train, X_test, y_train, y_test, mtype, common_name_model,\n problemtype, classes, default_featurenames, transform_model, settings,\n model_session)\n', (58377, 58531), True, 'import train_TPOT as tt\n'), ((58125, 58281), 'train_scsr.train_sr', 'scsr.train_sr', (['X_train', 'X_test', 'y_train', 'y_test', 'common_name_model', 'problemtype', 'classes', 'default_featurenames', 'transform_model', 'model_dir', 'settings'], {}), '(X_train, X_test, y_train, y_test, common_name_model,\n problemtype, classes, default_featurenames, transform_model, model_dir,\n settings)\n', (58138, 58281), True, 'import train_scsr as scsr\n')] |
"""empty message
Revision ID: <PASSWORD>
Revises: None
Create Date: 2016-04-27 16:54:34.185442
"""
# revision identifiers, used by Alembic.
revision = '<PASSWORD>'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('role',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_role')),
sa.UniqueConstraint('name', name=op.f('uq_role_name'))
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.Column('full_name', sa.String(), nullable=True),
sa.Column('inbox_email', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_user')),
sa.UniqueConstraint('email', name=op.f('uq_user_email')),
sa.UniqueConstraint('inbox_email', name=op.f('uq_user_inbox_email'))
)
op.create_table('note',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('updated', sa.DateTime(), nullable=True),
sa.Column('is_email', sa.Boolean(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_note_user_id_user')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_note'))
)
op.create_table('user_roles',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], name=op.f('fk_user_roles_role_id_role')),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_user_roles_user_id_user'))
)
op.create_table('note_history',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('note_id', sa.Integer(), nullable=True),
sa.Column('version', sa.Integer(), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['note_id'], ['note.id'], name=op.f('fk_note_history_note_id_note')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_note_history'))
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('note_history')
op.drop_table('user_roles')
op.drop_table('note')
op.drop_table('user')
op.drop_table('role')
### end Alembic commands ###
| [
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"sqlalchemy.Boolean",
"sqlalchemy.Text",
"alembic.op.f",
"sqlalchemy.Integer",
"sqlalchemy.String"
] | [((2719, 2748), 'alembic.op.drop_table', 'op.drop_table', (['"""note_history"""'], {}), "('note_history')\n", (2732, 2748), False, 'from alembic import op\n'), ((2753, 2780), 'alembic.op.drop_table', 'op.drop_table', (['"""user_roles"""'], {}), "('user_roles')\n", (2766, 2780), False, 'from alembic import op\n'), ((2785, 2806), 'alembic.op.drop_table', 'op.drop_table', (['"""note"""'], {}), "('note')\n", (2798, 2806), False, 'from alembic import op\n'), ((2811, 2832), 'alembic.op.drop_table', 'op.drop_table', (['"""user"""'], {}), "('user')\n", (2824, 2832), False, 'from alembic import op\n'), ((2837, 2858), 'alembic.op.drop_table', 'op.drop_table', (['"""role"""'], {}), "('role')\n", (2850, 2858), False, 'from alembic import op\n'), ((365, 377), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (375, 377), True, 'import sqlalchemy as sa\n'), ((418, 438), 'sqlalchemy.String', 'sa.String', ([], {'length': '(50)'}), '(length=50)\n', (427, 438), True, 'import sqlalchemy as sa\n'), ((485, 506), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (494, 506), True, 'import sqlalchemy as sa\n'), ((694, 706), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (704, 706), True, 'import sqlalchemy as sa\n'), ((748, 769), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (757, 769), True, 'import sqlalchemy as sa\n'), ((813, 824), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (822, 824), True, 'import sqlalchemy as sa\n'), ((869, 880), 'sqlalchemy.String', 'sa.String', ([], {}), '()\n', (878, 880), True, 'import sqlalchemy as sa\n'), ((927, 948), 'sqlalchemy.String', 'sa.String', ([], {'length': '(255)'}), '(length=255)\n', (936, 948), True, 'import sqlalchemy as sa\n'), ((990, 1002), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (1000, 1002), True, 'import sqlalchemy as sa\n'), ((1050, 1063), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1061, 1063), True, 'import sqlalchemy as sa\n'), ((1327, 1339), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1337, 1339), True, 'import sqlalchemy as sa\n'), ((1383, 1392), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (1390, 1392), True, 'import sqlalchemy as sa\n'), ((1435, 1448), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1446, 1448), True, 'import sqlalchemy as sa\n'), ((1491, 1504), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1502, 1504), True, 'import sqlalchemy as sa\n'), ((1548, 1560), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (1558, 1560), True, 'import sqlalchemy as sa\n'), ((1603, 1615), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1613, 1615), True, 'import sqlalchemy as sa\n'), ((1844, 1856), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1854, 1856), True, 'import sqlalchemy as sa\n'), ((1899, 1911), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1909, 1911), True, 'import sqlalchemy as sa\n'), ((2182, 2194), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2192, 2194), True, 'import sqlalchemy as sa\n'), ((2238, 2250), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2248, 2250), True, 'import sqlalchemy as sa\n'), ((2293, 2305), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (2303, 2305), True, 'import sqlalchemy as sa\n'), ((2348, 2357), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (2355, 2357), True, 'import sqlalchemy as sa\n'), ((2400, 2413), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (2411, 2413), True, 'import sqlalchemy as sa\n'), ((563, 578), 'alembic.op.f', 'op.f', (['"""pk_role"""'], {}), "('pk_role')\n", (567, 578), False, 'from alembic import op\n'), ((618, 638), 'alembic.op.f', 'op.f', (['"""uq_role_name"""'], {}), "('uq_role_name')\n", (622, 638), False, 'from alembic import op\n'), ((1120, 1135), 'alembic.op.f', 'op.f', (['"""pk_user"""'], {}), "('pk_user')\n", (1124, 1135), False, 'from alembic import op\n'), ((1176, 1197), 'alembic.op.f', 'op.f', (['"""uq_user_email"""'], {}), "('uq_user_email')\n", (1180, 1197), False, 'from alembic import op\n'), ((1244, 1271), 'alembic.op.f', 'op.f', (['"""uq_user_inbox_email"""'], {}), "('uq_user_inbox_email')\n", (1248, 1271), False, 'from alembic import op\n'), ((1692, 1720), 'alembic.op.f', 'op.f', (['"""fk_note_user_id_user"""'], {}), "('fk_note_user_id_user')\n", (1696, 1720), False, 'from alembic import op\n'), ((1762, 1777), 'alembic.op.f', 'op.f', (['"""pk_note"""'], {}), "('pk_note')\n", (1766, 1777), False, 'from alembic import op\n'), ((1988, 2022), 'alembic.op.f', 'op.f', (['"""fk_user_roles_role_id_role"""'], {}), "('fk_user_roles_role_id_role')\n", (1992, 2022), False, 'from alembic import op\n'), ((2084, 2118), 'alembic.op.f', 'op.f', (['"""fk_user_roles_user_id_user"""'], {}), "('fk_user_roles_user_id_user')\n", (2088, 2118), False, 'from alembic import op\n'), ((2490, 2526), 'alembic.op.f', 'op.f', (['"""fk_note_history_note_id_note"""'], {}), "('fk_note_history_note_id_note')\n", (2494, 2526), False, 'from alembic import op\n'), ((2568, 2591), 'alembic.op.f', 'op.f', (['"""pk_note_history"""'], {}), "('pk_note_history')\n", (2572, 2591), False, 'from alembic import op\n')] |
from libraries import *
from text import *
from game import *
from reception import recep
# DISPLAY HELP TEXT
def help_text():
clear_screen()
print_tab("Help text will go here!")
# DISPLAY ABOUT TEXT
def cred_text():
clear_screen()
print_tab(pr_colour("l_green","-- CREDITS --"))
print_tab("Intro Story Reviewers - <NAME>, <NAME>, <NAME> ")
print_tab("Receptionsist Name - <NAME>")
print_tab("Alpha Testers - <NAME>, <NAME>, <NAME>")
print_tab("Beta Testers - <NAME>, <NAME>")
print_tab("User Testers - <NAME>, <NAME>")
# DISPLAY ASCII ART
def game_intro():
clear_screen()
# ascii_del_dil()
print(pr_colour("l_blue","\n\tWelcome to Delviery Dilemma"))
s_pause()
# DISPLAYS AME OVER ASCII ART
def game_over():
ascii_game_over()
# GAME FUNCTION
def new_game():
clear_screen()
game = N_game()
game.enter_name()
game.set_courier()
game.create_char()
pc = game.get_character()
cour = game.get_courier()
pause()
act_1_intro(cour, pc)
recep(game)
game_over()
def menu():
ext = False
while not ext:
clear_screen()
print("")
print_tab(pr_colour("l_blue","-- MAIN MENU --") + "\n")
print_tab("[1] Start\n")
print_tab("[2] Help\n")
print_tab("[3] Credits\n")
print_tab("[4] Exit\n")
try:
main_op = int(input("\tEnter Option: "))
except:
main_op = 10
if main_op == 1:
new_game()
elif main_op == 2:
help_text()
pause()
elif main_op == 3:
cred_text()
pause()
elif main_op == 4:
print("")
print_tab(pr_colour("l_orange","Bye Bye\n"))
ext = True
else:
print_tab("Select a Number from 1-4")
pause()
# MAIN FUNCTION
def main():
game_intro()
menu()
if __name__ == "__main__":
main()
| [
"reception.recep"
] | [((1074, 1085), 'reception.recep', 'recep', (['game'], {}), '(game)\n', (1079, 1085), False, 'from reception import recep\n')] |
from crossed_wires import FuelManagementSystem
import pytest
class Test1:
@pytest.fixture
def fms(self):
return FuelManagementSystem("R8,U5,L5,D3", "U7,R6,D4,L4")
def test_steps_combined_min(self, fms):
assert fms.steps_combined_min() == 30
class Test2:
@pytest.fixture
def fms(self):
return FuelManagementSystem(
"R75,D30,R83,U83,L12,D49,R71,U7,L72", "U62,R66,U55,R34,D71,R55,D58,R83"
)
def test_steps_combined_min(self, fms):
assert fms.steps_combined_min() == 610
class Test3:
@pytest.fixture
def fms(self):
return FuelManagementSystem(
"R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51",
"U98,R91,D20,R16,D67,R40,U7,R15,U6,R7",
)
def test_steps_combined_min(self, fms):
assert fms.steps_combined_min() == 410
| [
"crossed_wires.FuelManagementSystem"
] | [((130, 180), 'crossed_wires.FuelManagementSystem', 'FuelManagementSystem', (['"""R8,U5,L5,D3"""', '"""U7,R6,D4,L4"""'], {}), "('R8,U5,L5,D3', 'U7,R6,D4,L4')\n", (150, 180), False, 'from crossed_wires import FuelManagementSystem\n'), ((341, 438), 'crossed_wires.FuelManagementSystem', 'FuelManagementSystem', (['"""R75,D30,R83,U83,L12,D49,R71,U7,L72"""', '"""U62,R66,U55,R34,D71,R55,D58,R83"""'], {}), "('R75,D30,R83,U83,L12,D49,R71,U7,L72',\n 'U62,R66,U55,R34,D71,R55,D58,R83')\n", (361, 438), False, 'from crossed_wires import FuelManagementSystem\n'), ((618, 729), 'crossed_wires.FuelManagementSystem', 'FuelManagementSystem', (['"""R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51"""', '"""U98,R91,D20,R16,D67,R40,U7,R15,U6,R7"""'], {}), "('R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51',\n 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7')\n", (638, 729), False, 'from crossed_wires import FuelManagementSystem\n')] |
import numpy as np
import cvxpy as cvx
import util
def set_contains_array(S, a):
"""
:param S: list of np.ndarray
:param a: np.ndarray
:return: contains, 0 or 1
"""
contains = 0
for b in S:
if not (a - b).any(): # if a contained in S
contains = 1
return contains
def set_sum_two(A, B):
"""
:param A: list of np.ndarray
:param B: list of np.ndarray
:return: list of np.ndarray
"""
C = []
for a in A:
for b in B:
if not set_contains_array(C, a + b):
C.append(a + b)
return C
def set_sum_list(Omega):
"""
Set sum of multiple set of np.ndarray
:param Omega: list of list of np.ndarray
:return: list of np.ndarray
"""
S = Omega[0]
# print 'len(Omega) =', len(Omega)
# print 0, 'S =', S
for i in range(1, len(Omega)):
# print i, 'Omega[i] =',Omega[i]
S = set_sum_two(S, Omega[i])
# print i, 'S =', S
return S
def pointwise_dominate(w, U):
"""
Test if w is point-wise dominated by all u in U
:param w: np.ndarray
:param U: list of np.ndarray
:return:
"""
for u in U:
if np.all(w < u):
return True
return False
def lp_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With LP in White & Clark
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
# print("LP dominate")
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [b.T*(w-u) >= d for u in U] + [np.sum(b) == 1]
prob = cvx.Problem(objective, constraints)
result = prob.solve()
# print("d =", d.value)
if d.value >= 0:
return np.ravel(b.value)
else:
return None
def dec_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With Bender's decomposition (Walraven & Spaan, 2017)
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [np.sum(b) == 1]
b_ = np.random.random(S)
b_ = b_ / np.sum(b_)
U_ = []
while 1:
_b = b_
u_ = U[np.argmin([np.dot((w - U[i]), _b) for i in range(len(U))])]
constraints += [d <= b.T*(w-u_)]
U_.append(u_)
prob = cvx.Problem(objective, constraints)
_ = prob.solve()
b_ = np.ravel(b.value)
if not (b_ - _b).any():
break
if d.value >= 0:
return _b
else:
return None
def lex_less(u, w):
if w is None:
return False
for i in range(len(u)):
if u[i] > w[i]:
return False
return True
def best_point(b, U):
# print("Find best")
_max = -np.inf
w = None
for i in range(len(U)):
u = U[i]
# print("b", b)
# print("u", u)
x = np.dot(b, u)
# print("x", x)
if x > _max or (x == _max and lex_less(u, U[w])):
w = i
_max = x
# print("max", _max)
return w
def prune(W, A=None):
# print("prune", W)
D, E = [], []
while len(W) > 0:
w = W[-1]
if pointwise_dominate(w, D):
W.pop()
else:
# b = lp_dominate(w, D)
b = dec_dominate(w, D)
if b is None:
W.pop()
else:
i = best_point(b, W)
D.append(W[i])
if A is not None:
E.append(A[i])
W.pop(i)
if A is not None:
return D, E
else:
return D
def set_union(V):
V_ = []
for v in V:
V_ += v
return V_
class POMDP:
def __init__(self, P=None, Z=None, R=None, g=None, alpha=1.0):
self.P = P # m x n x n: a(t)->s(t)->s(t+1)
self.Z = Z # m x n x k: a(t)->s(t+1)->o(t+1)
self.R = R # m x n x n: a(t)->s(t+1)->s(t+1)
self.g = g # n x 1: s(T)
self.alpha = alpha # discount factor
self.nActions = self.Z.shape[0] # m
self.nStates = self.Z.shape[1] # n
self.nLevels = self.Z.shape[2] # k
if g is None:
self.g = np.zeros(self.nStates)
# print self.nActions, self.nStates, self.nLevels
def update_belief(self, b, a, o):
p = self.Z[a, :, o] * self.P[a].T.dot(b)
return p / p.sum()
def monahan_enumeration(self, V):
"""construct the set of Omega
:param V: input list of alpha vectors
"""
V_, A_ = [], []
for a in range(self.nActions):
# print("Action", a)
Va = []
_r = np.sum(self.P[a] * self.R[a], axis=1) / self.nLevels
# print("_r:", _r)
for z in range(self.nLevels):
# print("Obs", z)
Vaz = [_r + self.alpha * (self.Z[a,:,z] * v).dot(self.P[a]) for v in V]
# print("Vaz", Vaz)
if len(Va) > 0:
Va = prune(set_sum_two(Va, Vaz)) # incremental pruning
else:
Va = Vaz
A_ += [a for _ in Va]
V_ += Va
V_, A_ = prune(V_, A_)
return V_, A_
def transition(self, a, s):
return np.random.choice(self.nStates, p=self.P[a, s])
def emmission(self, a, s):
return np.random.choice(self.nStates, p=self.Z[a, s])
@staticmethod
def optimal_action(b, V, A):
assert len(V) == len(A)
values = [np.dot(b, v) for v in V]
opt_idx = np.argmax(values)
return A[opt_idx], V[opt_idx]
def solve(self, T):
V = self.g
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T-1-t] = V
Actions[T-1-t] = A
return Values, Actions
def plan(self, T, initial_belief=None, perform=False):
V = self.g
if initial_belief is None:
initial_belief = np.ones(self.nStates) / self.nStates
b = initial_belief
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T - 1 - t] = V
Actions[T - 1 - t] = A
a0, v0 = self.optimal_action(b, Values[0], Actions[0])
if not perform:
return a0, v0
s = np.random.choice(self.nStates, p=b)
actions, states, observations, reward = [], [], [], 0.0
for t in range(T):
a, v = self.optimal_action(b, Values[t], Actions[t])
# print('a', a)
# print('v', v)
_s = s
s = self.transition(a, s)
o = self.transition(a, s)
b = self.update_belief(b, a, o)
states.append(_s)
actions.append(s)
observations.append(o)
reward += self.R[a, _s, s] * self.alpha ** t
return a0, v0, actions, states, observations, reward
def test_pomdp(nActions, nStates, nLevels, alpha):
# P = np.array([
# [[0.25, 0.75], [0.6 , 0.4 ]],
# [[0.5 , 0.5 ], [0.7 , 0.3 ]]])
# Z = np.array([
# [[0.55, 0.45], [0.3 , 0.7 ]],
# [[0.65, 0.35], [0.25, 0.75]]])
# R = np.array([
# [[2., 2. ], [ 0., 0.]],
# [[3., 3. ], [-1., -1.]]])
# g = np.array([2., -1.])
P = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
Z = util.normalize(np.random.random(size=(nActions, nStates, nLevels)), axis=2)
R = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
g = util.normalize(np.random.random(size=(nStates)), axis=0)
pomdp = POMDP(P, Z, R, g, alpha)
T = 10
V = pomdp.g
a0, v0 = pomdp.plan(T, initial_belief=None, perform=False)
# a0, v0, actions, states, observations, reward = pomdp.plan(T, initial_belief=None, perform=True)
# print('a0 =', a0, 'v0 =', v0)
# print('actions:', actions)
# print('states:', states)
# print('observations:', observations)
# print('reward:', reward)
# for t in range(T):
# print("Iteration", t+1)
# V, A = pomdp.monahan_enumeration(V)
# for v, a in zip(V, A):
# print(v, a)
if __name__ == "__main__":
# import timeit
# print(timeit.timeit("main()"))
import time
for s in range(123, 133):
start_time = time.time()
np.random.seed(s)
print("===== SEED %d =====" %(s))
test_pomdp(nActions=2, nStates=3, nLevels=3, alpha=0.9975)
end_time = time.time()
print(end_time - start_time)
| [
"cvxpy.Variable",
"cvxpy.Problem",
"numpy.ones",
"numpy.random.random",
"numpy.random.choice",
"numpy.argmax",
"numpy.sum",
"numpy.dot",
"numpy.zeros",
"numpy.random.seed",
"numpy.ravel",
"numpy.all",
"time.time",
"cvxpy.Maximize"
] | [((1555, 1569), 'cvxpy.Variable', 'cvx.Variable', ([], {}), '()\n', (1567, 1569), True, 'import cvxpy as cvx\n'), ((1578, 1593), 'cvxpy.Variable', 'cvx.Variable', (['S'], {}), '(S)\n', (1590, 1593), True, 'import cvxpy as cvx\n'), ((1610, 1625), 'cvxpy.Maximize', 'cvx.Maximize', (['d'], {}), '(d)\n', (1622, 1625), True, 'import cvxpy as cvx\n'), ((1722, 1757), 'cvxpy.Problem', 'cvx.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (1733, 1757), True, 'import cvxpy as cvx\n'), ((2205, 2219), 'cvxpy.Variable', 'cvx.Variable', ([], {}), '()\n', (2217, 2219), True, 'import cvxpy as cvx\n'), ((2228, 2243), 'cvxpy.Variable', 'cvx.Variable', (['S'], {}), '(S)\n', (2240, 2243), True, 'import cvxpy as cvx\n'), ((2260, 2275), 'cvxpy.Maximize', 'cvx.Maximize', (['d'], {}), '(d)\n', (2272, 2275), True, 'import cvxpy as cvx\n'), ((2340, 2359), 'numpy.random.random', 'np.random.random', (['S'], {}), '(S)\n', (2356, 2359), True, 'import numpy as np\n'), ((1193, 1206), 'numpy.all', 'np.all', (['(w < u)'], {}), '(w < u)\n', (1199, 1206), True, 'import numpy as np\n'), ((1848, 1865), 'numpy.ravel', 'np.ravel', (['b.value'], {}), '(b.value)\n', (1856, 1865), True, 'import numpy as np\n'), ((2374, 2384), 'numpy.sum', 'np.sum', (['b_'], {}), '(b_)\n', (2380, 2384), True, 'import numpy as np\n'), ((2579, 2614), 'cvxpy.Problem', 'cvx.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (2590, 2614), True, 'import cvxpy as cvx\n'), ((2653, 2670), 'numpy.ravel', 'np.ravel', (['b.value'], {}), '(b.value)\n', (2661, 2670), True, 'import numpy as np\n'), ((3130, 3142), 'numpy.dot', 'np.dot', (['b', 'u'], {}), '(b, u)\n', (3136, 3142), True, 'import numpy as np\n'), ((5504, 5550), 'numpy.random.choice', 'np.random.choice', (['self.nStates'], {'p': 'self.P[a, s]'}), '(self.nStates, p=self.P[a, s])\n', (5520, 5550), True, 'import numpy as np\n'), ((5598, 5644), 'numpy.random.choice', 'np.random.choice', (['self.nStates'], {'p': 'self.Z[a, s]'}), '(self.nStates, p=self.Z[a, s])\n', (5614, 5644), True, 'import numpy as np\n'), ((5790, 5807), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (5799, 5807), True, 'import numpy as np\n'), ((6728, 6763), 'numpy.random.choice', 'np.random.choice', (['self.nStates'], {'p': 'b'}), '(self.nStates, p=b)\n', (6744, 6763), True, 'import numpy as np\n'), ((7729, 7780), 'numpy.random.random', 'np.random.random', ([], {'size': '(nActions, nStates, nStates)'}), '(size=(nActions, nStates, nStates))\n', (7745, 7780), True, 'import numpy as np\n'), ((7813, 7864), 'numpy.random.random', 'np.random.random', ([], {'size': '(nActions, nStates, nLevels)'}), '(size=(nActions, nStates, nLevels))\n', (7829, 7864), True, 'import numpy as np\n'), ((7897, 7948), 'numpy.random.random', 'np.random.random', ([], {'size': '(nActions, nStates, nStates)'}), '(size=(nActions, nStates, nStates))\n', (7913, 7948), True, 'import numpy as np\n'), ((7981, 8011), 'numpy.random.random', 'np.random.random', ([], {'size': 'nStates'}), '(size=nStates)\n', (7997, 8011), True, 'import numpy as np\n'), ((8743, 8754), 'time.time', 'time.time', ([], {}), '()\n', (8752, 8754), False, 'import time\n'), ((8763, 8780), 'numpy.random.seed', 'np.random.seed', (['s'], {}), '(s)\n', (8777, 8780), True, 'import numpy as np\n'), ((8909, 8920), 'time.time', 'time.time', ([], {}), '()\n', (8918, 8920), False, 'import time\n'), ((2315, 2324), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (2321, 2324), True, 'import numpy as np\n'), ((4441, 4463), 'numpy.zeros', 'np.zeros', (['self.nStates'], {}), '(self.nStates)\n', (4449, 4463), True, 'import numpy as np\n'), ((5747, 5759), 'numpy.dot', 'np.dot', (['b', 'v'], {}), '(b, v)\n', (5753, 5759), True, 'import numpy as np\n'), ((1695, 1704), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (1701, 1704), True, 'import numpy as np\n'), ((4905, 4942), 'numpy.sum', 'np.sum', (['(self.P[a] * self.R[a])'], {'axis': '(1)'}), '(self.P[a] * self.R[a], axis=1)\n', (4911, 4942), True, 'import numpy as np\n'), ((6298, 6319), 'numpy.ones', 'np.ones', (['self.nStates'], {}), '(self.nStates)\n', (6305, 6319), True, 'import numpy as np\n'), ((2452, 2472), 'numpy.dot', 'np.dot', (['(w - U[i])', '_b'], {}), '(w - U[i], _b)\n', (2458, 2472), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding=utf-8
import os as os
import sys as sys
import traceback as trb
import argparse as argp
import csv as csv
import functools as fnt
import collections as col
import multiprocessing as mp
import numpy as np
import pandas as pd
import intervaltree as ivt
def parse_command_line():
"""
:return:
"""
parser = argp.ArgumentParser()
parser.add_argument('--input', '-i', type=str, dest='inputfile')
parser.add_argument('--cons', '-c', type=str, dest='conservation')
parser.add_argument('--direction', '-d', type=str, choices=['target', 'query'], dest='direction')
parser.add_argument('--chromosomes', '-chr', type=str, dest='chromosomes')
parser.add_argument('--workers', '-w', type=int, default=4, dest='workers')
parser.add_argument('--output', '-o', type=str, dest='outputfile')
args = parser.parse_args()
return args
def compute_weights(cons, enh):
"""
:param enh:
:param cons:
:return:
"""
s, e = enh['start'], enh['end']
enh['ftcons_enh_abs_min'] = np.round(cons[s:e].min(), 2)
enh['ftcons_enh_abs_max'] = np.round(cons[s:e].max(), 2)
enh['ftcons_enh_abs_mean'] = np.round(cons[s:e].mean(), 2)
enh['ftcons_enh_abs_median'] = np.round(cons[s:e].median(), 2)
total_score = sum([float(s) for s in enh['assoc_score'].split(',')])
enh['weight'] = 1000 - np.round(float(enh['enhancer_score']) * total_score, 2)
return enh
def process_mapped_enhancer(params):
"""
:param params:
:return:
"""
inputfile, consfile, chrom = params
with pd.HDFStore(consfile, 'r') as hdf:
cons_scores = hdf[chrom]
comp_wt = fnt.partial(compute_weights, cons_scores)
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite', 'cluster_id',
'name', 'symbol', 'assoc_score', 'enh_gene_dist']
regions = []
with open(inputfile, 'r', newline='') as infile:
rows = csv.DictReader(infile, delimiter='\t', fieldnames=header)
for r in rows:
if r['chrom'] == chrom:
r['start'] = int(r['start'])
r['end'] = int(r['end'])
l = r['end'] - r['start']
if l < 2:
continue
r = comp_wt(r)
regions.append(comp_wt(r))
ivtree = ivt.IntervalTree()
for r in regions:
ivtree[r['start']:r['end']] = r['GHid'], r['ftcons_enh_abs_mean'], r['weight']
regions = sorted(regions, key=lambda d: d['ftcons_enh_abs_mean'])
blacklist = set()
whitelist = set()
for item in regions:
if item['GHid'] in blacklist:
continue
overlaps = ivtree[item['start']:item['end']]
if len(overlaps) == 1:
whitelist.add(overlaps.pop().data[0])
elif len(overlaps) > 1:
overlaps = [o for o in sorted(overlaps, key=lambda i: (i.data[1], i.data[2])) if o.data[0] not in blacklist]
whitelist.add(overlaps[0].data[0])
[blacklist.add(o.data[0]) for o in overlaps[1:]]
else:
raise AssertionError('No self-overlap in tree: {}'.format(item))
regions = sorted([r for r in regions if r['GHid'] in whitelist], key=lambda x: (x['start'], x['end']))
return regions
def process_target_enhancer(args):
"""
:param args:
:return:
"""
with pd.HDFStore(args.conservation, 'r') as hdf:
chroms = [k.strip('/') for k in hdf.keys()]
params = [(args.inputfile, args.conservation, c) for c in chroms]
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite',
'name', 'symbol', 'assoc_score', 'ftcons_enh_abs_mean',
'ftcons_enh_abs_median', 'ftcons_enh_abs_min', 'ftcons_enh_abs_max']
with mp.Pool(args.workers) as pool:
res = pool.imap_unordered(process_mapped_enhancer, params)
outbuffer = []
for regions in res:
outbuffer.extend(regions)
outbuffer = sorted(outbuffer, key=lambda d: (d['chrom'], d['start'], d['end']))
with open(args.outputfile, 'w') as out:
_ = out.write('#')
writer = csv.DictWriter(out, fieldnames=header, delimiter='\t', extrasaction='ignore')
writer.writeheader()
writer.writerows(outbuffer)
return
def process_annotated_enhancer(params):
"""
:param params:
:return:
"""
enh_file, chrom = params
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite',
'ftcons_enh_abs_mean', 'ftcons_enh_abs_median',
'ftcons_enh_abs_min', 'ftcons_enh_abs_max']
enh_collect = col.defaultdict(list)
with open(enh_file, 'r') as infile:
rows = csv.DictReader(infile, delimiter='\t', fieldnames=header)
for row in rows:
if row['chrom'] == chrom:
row['start'] = int(row['start'])
row['end'] = int(row['end'])
row['enhancer_score'] = float(row['enhancer_score'])
row['ftcons_enh_abs_mean'] = float(row['ftcons_enh_abs_mean'])
enh_collect[row['GHid']].append(row)
enh_collect = merge_split_enhancers(enh_collect)
ivtree = ivt.IntervalTree()
for r in enh_collect:
ivtree[r['start']:r['end']] = r['GHid'], r['ftcons_enh_abs_mean'], r['ftcons_enh_abs_min']
enh_collect = sorted(enh_collect, key=lambda d: d['ftcons_enh_abs_mean'])
blacklist = set()
whitelist = set()
for item in enh_collect:
ghid = item['GHid']
if ghid in blacklist or ghid in whitelist:
continue
overlaps = ivtree[item['start']:item['end']]
if len(overlaps) == 1:
# that is: only self overlap
whitelist.add(ghid)
continue
elif len(overlaps) > 1:
if any([o.data[0] in whitelist for o in overlaps if o.data[0] != ghid]):
# region overlaps with a whitelist region -> blacklist
blacklist.add(ghid)
continue
overlaps = [o for o in sorted(overlaps, key=lambda i: (i.data[1], i.data[2])) if o.data[0] not in blacklist]
if overlaps[0].data[0] == ghid:
# the query region has highest conservation
# others can safely be blacklisted
whitelist.add(ghid)
[blacklist.add(o.data[0]) for o in overlaps[1:]]
else:
# another region is selected; could be that among
# the remaining regions, others might also be feasible
blacklist.add(ghid)
whitelist.add(overlaps[0].data[0])
else:
raise AssertionError('No self-overlap in tree: {}'.format(item))
enh_collect = sorted([r for r in enh_collect if r['GHid'] in whitelist], key=lambda x: (x['start'], x['end']))
return enh_collect
def merge_split_enhancers(collector):
"""
:param collector:
:return:
"""
mrg_collect = []
for ghid, splits in collector.items():
if len(splits) == 1:
mrg_collect.append(splits[0])
continue
c = 1
splits = sorted(splits, key=lambda d: (d['start'], d['end']))
s, e = splits[0]['start'], splits[1]['end']
for idx, entry in enumerate(splits[:-1]):
if splits[idx+1]['start'] <= e + 100:
s = min(s, splits[idx+1]['start'])
e = max(e, splits[idx+1]['end'])
else:
new_enh = dict(splits[0])
new_enh['GHid'] = new_enh['GHid'] + '-{}-{}'.format(new_enh['chrom'].strip('chr'), c)
new_enh['start'] = s
new_enh['end'] = e
mrg_collect.append(new_enh)
c += 1
s, e = splits[idx+1]['start'], splits[idx+1]['end']
new_enh = dict(splits[0])
new_enh['GHid'] = new_enh['GHid'] + '-{}-{}'.format(new_enh['chrom'].strip('chr'), c)
new_enh['start'] = s
new_enh['end'] = e
mrg_collect.append(new_enh)
mrg_collect = [m for m in mrg_collect if m['end'] - m['start'] > 49]
return mrg_collect
def process_query_enhancer(args):
"""
:param args:
:return:
"""
with open(args.chromosomes, 'r') as infile:
chroms = [l.split()[0].strip() for l in infile.readlines()]
header = ['chrom', 'start', 'end', 'GHid', 'enhancer_score', 'is_elite',
'ftcons_enh_abs_mean', 'ftcons_enh_abs_median',
'ftcons_enh_abs_min', 'ftcons_enh_abs_max']
params = [(args.inputfile, c) for c in chroms]
with mp.Pool(args.workers) as pool:
res = pool.imap_unordered(process_annotated_enhancer, params)
outbuffer = []
for regions in res:
outbuffer.extend(regions)
outbuffer = sorted(outbuffer, key=lambda d: (d['chrom'], d['start'], d['end']))
with open(args.outputfile, 'w') as out:
_ = out.write('#')
writer = csv.DictWriter(out, fieldnames=header, delimiter='\t', extrasaction='ignore')
writer.writeheader()
writer.writerows(outbuffer)
return
if __name__ == '__main__':
try:
args = parse_command_line()
if args.direction == 'target':
process_target_enhancer(args)
else:
process_query_enhancer(args)
except Exception as err:
trb.print_exc()
raise err
else:
sys.exit(0)
| [
"intervaltree.IntervalTree",
"csv.DictWriter",
"csv.DictReader",
"argparse.ArgumentParser",
"traceback.print_exc",
"collections.defaultdict",
"functools.partial",
"multiprocessing.Pool",
"sys.exit",
"pandas.HDFStore"
] | [((354, 375), 'argparse.ArgumentParser', 'argp.ArgumentParser', ([], {}), '()\n', (373, 375), True, 'import argparse as argp\n'), ((1669, 1710), 'functools.partial', 'fnt.partial', (['compute_weights', 'cons_scores'], {}), '(compute_weights, cons_scores)\n', (1680, 1710), True, 'import functools as fnt\n'), ((2338, 2356), 'intervaltree.IntervalTree', 'ivt.IntervalTree', ([], {}), '()\n', (2354, 2356), True, 'import intervaltree as ivt\n'), ((4644, 4665), 'collections.defaultdict', 'col.defaultdict', (['list'], {}), '(list)\n', (4659, 4665), True, 'import collections as col\n'), ((5204, 5222), 'intervaltree.IntervalTree', 'ivt.IntervalTree', ([], {}), '()\n', (5220, 5222), True, 'import intervaltree as ivt\n'), ((1587, 1613), 'pandas.HDFStore', 'pd.HDFStore', (['consfile', '"""r"""'], {}), "(consfile, 'r')\n", (1598, 1613), True, 'import pandas as pd\n'), ((1951, 2008), 'csv.DictReader', 'csv.DictReader', (['infile'], {'delimiter': '"""\t"""', 'fieldnames': 'header'}), "(infile, delimiter='\\t', fieldnames=header)\n", (1965, 2008), True, 'import csv as csv\n'), ((3368, 3403), 'pandas.HDFStore', 'pd.HDFStore', (['args.conservation', '"""r"""'], {}), "(args.conservation, 'r')\n", (3379, 3403), True, 'import pandas as pd\n'), ((3773, 3794), 'multiprocessing.Pool', 'mp.Pool', (['args.workers'], {}), '(args.workers)\n', (3780, 3794), True, 'import multiprocessing as mp\n'), ((4721, 4778), 'csv.DictReader', 'csv.DictReader', (['infile'], {'delimiter': '"""\t"""', 'fieldnames': 'header'}), "(infile, delimiter='\\t', fieldnames=header)\n", (4735, 4778), True, 'import csv as csv\n'), ((8597, 8618), 'multiprocessing.Pool', 'mp.Pool', (['args.workers'], {}), '(args.workers)\n', (8604, 8618), True, 'import multiprocessing as mp\n'), ((9436, 9447), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9444, 9447), True, 'import sys as sys\n'), ((4148, 4225), 'csv.DictWriter', 'csv.DictWriter', (['out'], {'fieldnames': 'header', 'delimiter': '"""\t"""', 'extrasaction': '"""ignore"""'}), "(out, fieldnames=header, delimiter='\\t', extrasaction='ignore')\n", (4162, 4225), True, 'import csv as csv\n'), ((8975, 9052), 'csv.DictWriter', 'csv.DictWriter', (['out'], {'fieldnames': 'header', 'delimiter': '"""\t"""', 'extrasaction': '"""ignore"""'}), "(out, fieldnames=header, delimiter='\\t', extrasaction='ignore')\n", (8989, 9052), True, 'import csv as csv\n'), ((9384, 9399), 'traceback.print_exc', 'trb.print_exc', ([], {}), '()\n', (9397, 9399), True, 'import traceback as trb\n')] |
#!/usr/bin/env python3
# BSD 3-Clause "New" or "Revised" License
# Copyright (c) 2021, Masanori-Suzu1024 RyuichiUeda
# All rights reserved.
# Genshin is a copyrighted work of miHoYo co., Ltd
import rospy
from std_msgs.msg import Int32
n = 0
def cb(message):
global n
n = message.data
if __name__== '__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
pub = rospy.Publisher('twice', Int32, queue_size=10)
rate = rospy.Rate(1)
a = 0
while not rospy.is_shutdown():
a = a + n
pub.publish(a)
rate.sleep()
| [
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.Rate",
"rospy.Publisher"
] | [((330, 354), 'rospy.init_node', 'rospy.init_node', (['"""twice"""'], {}), "('twice')\n", (345, 354), False, 'import rospy\n'), ((365, 404), 'rospy.Subscriber', 'rospy.Subscriber', (['"""count_up"""', 'Int32', 'cb'], {}), "('count_up', Int32, cb)\n", (381, 404), False, 'import rospy\n'), ((415, 461), 'rospy.Publisher', 'rospy.Publisher', (['"""twice"""', 'Int32'], {'queue_size': '(10)'}), "('twice', Int32, queue_size=10)\n", (430, 461), False, 'import rospy\n'), ((473, 486), 'rospy.Rate', 'rospy.Rate', (['(1)'], {}), '(1)\n', (483, 486), False, 'import rospy\n'), ((511, 530), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (528, 530), False, 'import rospy\n')] |
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect
from campy.gui.events.mouse import onmouseclicked
import random
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 400
ZONE_WIDTH = 100
ZONE_HEIGHT = 100
BALL_RADIUS = 15
MAX_SPEED = 6
MIN_Y_SPEED = 2
class ZoneGraphics:
def __init__(self, window_width=WINDOW_WIDTH, window_height=WINDOW_HEIGHT,
zone_width=ZONE_WIDTH, zone_height=ZONE_HEIGHT, ball_radius=BALL_RADIUS):
# Create window
self.window = GWindow(window_width, window_height, title='Zone Game')
# Create zone
self.zone = GRect(zone_width, zone_height, x=(window_width - zone_width) / 2,
y=(window_height - zone_height) / 2)
self.zone.color = 'blue'
self.window.add(self.zone)
# Create ball and initialize velocity/position
self.ball = GOval(2 * ball_radius, 2 * ball_radius)
self.ball.filled = True
self.ball.fill_color = 'salmon'
self.dx = 0
self.dy = 0
self.reset_ball()
# Initialize mouse listeners
onmouseclicked(self.handle_click)
# Set ball position at random inside the window
def set_ball_position(self):
self.ball.x = random.randint(0, self.window.width - self.ball.width)
self.ball.y = random.randint(0, self.window.height - self.ball.height)
def set_ball_velocity(self):
self.dx = random.randint(0, MAX_SPEED)
if random.random() > 0.5:
self.dx = -self.dx
self.dy = random.randint(MIN_Y_SPEED, MAX_SPEED)
if random.random() > 0.5:
self.dy = -self.dy
def reset_ball(self):
self.set_ball_position()
while self.ball_in_zone():
self.set_ball_position()
self.set_ball_velocity()
self.window.add(self.ball)
def move_ball(self):
self.ball.move(self.dx, self.dy)
def handle_wall_collisions(self):
if self.ball.x + self.ball.width >= self.window.width or self.ball.x <= 0:
self.dx = -self.dx
if self.ball.y + self.ball.height >= self.window.height or self.ball.y <= 0:
self.dy = -self.dy
def ball_in_zone(self):
zone_left_side = self.zone.x
zone_right_side = self.zone.x + self.zone.width
ball_x_in_zone = zone_left_side <= self.ball.x <= zone_right_side - self.ball.width
zone_top_side = self.zone.y
zone_bottom_side = self.zone.y + self.zone.height
ball_y_in_zone = zone_top_side <= self.ball.y <= zone_bottom_side - self.ball.height
return ball_x_in_zone and ball_y_in_zone
def handle_click(self, event):
obj = self.window.get_object_at(event.x, event.y)
if self.ball == obj:
self.reset_ball()
| [
"campy.graphics.gobjects.GRect",
"campy.graphics.gwindow.GWindow",
"campy.graphics.gobjects.GOval",
"random.random",
"random.randint",
"campy.gui.events.mouse.onmouseclicked"
] | [((517, 572), 'campy.graphics.gwindow.GWindow', 'GWindow', (['window_width', 'window_height'], {'title': '"""Zone Game"""'}), "(window_width, window_height, title='Zone Game')\n", (524, 572), False, 'from campy.graphics.gwindow import GWindow\n'), ((616, 723), 'campy.graphics.gobjects.GRect', 'GRect', (['zone_width', 'zone_height'], {'x': '((window_width - zone_width) / 2)', 'y': '((window_height - zone_height) / 2)'}), '(zone_width, zone_height, x=(window_width - zone_width) / 2, y=(\n window_height - zone_height) / 2)\n', (621, 723), False, 'from campy.graphics.gobjects import GOval, GRect\n'), ((889, 928), 'campy.graphics.gobjects.GOval', 'GOval', (['(2 * ball_radius)', '(2 * ball_radius)'], {}), '(2 * ball_radius, 2 * ball_radius)\n', (894, 928), False, 'from campy.graphics.gobjects import GOval, GRect\n'), ((1115, 1148), 'campy.gui.events.mouse.onmouseclicked', 'onmouseclicked', (['self.handle_click'], {}), '(self.handle_click)\n', (1129, 1148), False, 'from campy.gui.events.mouse import onmouseclicked\n'), ((1262, 1316), 'random.randint', 'random.randint', (['(0)', '(self.window.width - self.ball.width)'], {}), '(0, self.window.width - self.ball.width)\n', (1276, 1316), False, 'import random\n'), ((1339, 1395), 'random.randint', 'random.randint', (['(0)', '(self.window.height - self.ball.height)'], {}), '(0, self.window.height - self.ball.height)\n', (1353, 1395), False, 'import random\n'), ((1448, 1476), 'random.randint', 'random.randint', (['(0)', 'MAX_SPEED'], {}), '(0, MAX_SPEED)\n', (1462, 1476), False, 'import random\n'), ((1560, 1598), 'random.randint', 'random.randint', (['MIN_Y_SPEED', 'MAX_SPEED'], {}), '(MIN_Y_SPEED, MAX_SPEED)\n', (1574, 1598), False, 'import random\n'), ((1488, 1503), 'random.random', 'random.random', ([], {}), '()\n', (1501, 1503), False, 'import random\n'), ((1610, 1625), 'random.random', 'random.random', ([], {}), '()\n', (1623, 1625), False, 'import random\n')] |
# -*- coding: utf-8 -*-
# @author : wanglei
# @date : 2021/2/19 1:47 PM
# @description :
import numpy as np
"""
感应器对象
"""
class Perceptron(object):
"""
该方法为感应器的初始化方法
eta:学习速率
n_iter:学习次数(迭代次数)
"""
def __init__(self, eta=0.01, n_iter=10):
self.eta = eta
self.n_iter = n_iter
"""
该方法为模型训练的方法
shape[0]返回该矩阵有几行
shape[1]返回该矩阵有几列
在这个例子中X.shape[1]=2
np.zeros(1 + X.shape[1])是一个1行3列的元素都为零的列表
"""
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1]) # 初始化一个权重和阈值的列表,初始值为0
self.errors_ = [] # 用来记录每一次迭代全样本的错误预测次数
for _ in range(self.n_iter): # 进行多次预测样本
errors = 0 # 用来记录本次预测的全样本的错误次数
for xi, target in zip(X, y): # 遍历这个样本集和实际结果集
update = self.eta * (
target - self.predict(xi)) # 用实际结果值减掉预测结果值如果该值为0,表示预测正确,如果不为0则乘上学习速率,获取的值就是本次权重、阈值需要更新的值
self.w_[1:] += update * xi # 如果预测正确,则update为0,那么权重本次就无需改变,否则,增加
self.w_[0] += update # 如果预测正确,则update为0,那么阈值本次就无需改变,否则,增加
errors += int(update != 0.0) # 预测错误就记录一次错误数
self.errors_.append(errors) # 将所有的样本数据预测完成后,将本次的预测错误的次数放到error_这个列表中
return self
"""
该方法为将一个样本的属性值进行处理的方法
X=array([[1,2,3,4],[5,6,7,8],...])
self.w_[1:]=array([0,0,0,0])
根据api:dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
np.dot(X,self.w_[1:])=array([[0],[0],...])【将每一个属性乘上权重再将每一个样本的每个属性值进行求和】
self.w_[0]=array([[0]])获取阈值
"""
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
"""
该方法为一个样本的预测结果输出方法
numpy.where(condition[, x, y])
就是一个三目运算,满足条件就输出x,否则输出y
"""
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
import pandas as pd
"""
读取数据源
"""
df = pd.read_csv("/Users/a1/Downloads/iris.data", header=None)
print(df.tail()) # 打印后几行
y = df.iloc[0:100, 4].values # 取前100行数据的第4列,类标这一列,前100行就两类
print(y)
y = np.where(y == 'Iris-setosa', -1, 1) # 将类标这一列的文本表示替换成数字表示,就分了两类
X = df.iloc[0:100, [0, 2]].values # 获取前100行的第0列和第2列,即花瓣宽度和花萼宽度
print(X)
"""
对模型进行训练,查看训练时每次迭代的错误数量
"""
ppn= Perceptron(eta=0.1, n_iter=10)
ppn.fit(X,y) | [
"numpy.where",
"numpy.dot",
"numpy.zeros",
"pandas.read_csv"
] | [((1812, 1869), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/a1/Downloads/iris.data"""'], {'header': 'None'}), "('/Users/a1/Downloads/iris.data', header=None)\n", (1823, 1869), True, 'import pandas as pd\n'), ((1969, 2004), 'numpy.where', 'np.where', (["(y == 'Iris-setosa')", '(-1)', '(1)'], {}), "(y == 'Iris-setosa', -1, 1)\n", (1977, 2004), True, 'import numpy as np\n'), ((505, 529), 'numpy.zeros', 'np.zeros', (['(1 + X.shape[1])'], {}), '(1 + X.shape[1])\n', (513, 529), True, 'import numpy as np\n'), ((1550, 1572), 'numpy.dot', 'np.dot', (['X', 'self.w_[1:]'], {}), '(X, self.w_[1:])\n', (1556, 1572), True, 'import numpy as np\n')] |
# SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
"""
ARM 32-bit support
"""
import os
import re
from .arch import Architecture
class ARM(Architecture):
"""
ARMv7 (or earlier) target information - 32-bit little-endian
"""
_desc = 'ARM 32-bit, little-endian'
_alignment = 4
_word_size = 4
_phys_size = 4
_word_mask = 0xffffffff
_endianness = 'little'
_supports_64bit_data = False
# ident values used by RETURN_REGISTER payload
_regs = {
'r0': {'ident': 0x61},
'r1': {'ident': 0x62},
'r2': {'ident': 0x63},
'r3': {'ident': 0x64},
'r4': {'ident': 0x65},
'r5': {'ident': 0x66},
'r6': {'ident': 0x67},
'r7': {'ident': 0x68},
'r8': {'ident': 0x69},
'r9': {'ident': 0x6a, 'gd': True, 'alias': 'sb'},
'r10': {'ident': 0x6b},
'r11': {'ident': 0x6c, 'alias': 'fp'},
'r12': {'ident': 0x6d, 'alias': 'ip'},
'r13': {'ident': 0x6e, 'alias': 'sp'},
'r14': {'ident': 0x6f, 'alias': 'lr'},
'r15': {'ident': 0x70, 'alias': 'pc'},
}
_DA_ENTRY = re.compile(r"""
(?P<name>[a-zA-Z][a-zA-Z0-9]+)
\s?:\s?
(\[<)?
(?P<value>[0-9a-fA-F]{8})
(>\])?
""", re.VERBOSE)
@classmethod
def parse_data_abort(cls, text: str) -> dict:
"""
Parse ARM data abort output formatted as follows and return each field in a dict.
00000001:data abort
pc : [<8f7d8858>] lr : [<8f7d8801>]
reloc pc : [<17835858>] lr : [<17835801>]
sp : 8ed99718 ip : 00000000 fp : 00000001
r10: 00000001 r9 : 8eda2ea8 r8 : 00000001
r7 : 00000000 r6 : 00000004 r5 : 00000004 r4 : 00000001
r3 : 8ed9972c r2 : 020200b4 r1 : 8ed994ec r0 : 00000009
Flags: nZCv IRQs off FIQs off Mode SVC_32
Code: 2800f915 f04fd0cf e7ce30ff d10a2d04 (2000f8d8)
"""
ret = {}
for line in text.splitlines():
line = line.strip()
if line.startswith('Flags:'):
ret['flags'] = {}
for field in line.split(' '):
name, value = field.split(' ')
name = name.replace('Flags:', 'Asserted')
ret['flags'][name] = value
continue
elif line.startswith('Code:'):
code = line.split()
instructions = []
for instruction in code[1:]:
try:
instruction = instruction.replace('(', '').replace(')', '').strip()
instruction = int(instruction, 16)
instruction = instruction.to_bytes(cls.word_size, byteorder=cls.endianness)
instructions.append(instruction)
except ValueError as e:
msg = 'Invalid instruction or parse error: ' + str(e)
raise ValueError(msg)
ret['code'] = instructions
else:
if line.startswith('reloc '):
pfx = 'reloc '
line = line[len(pfx):]
else:
pfx = ''
for match in cls._DA_ENTRY.finditer(line):
regname, _ = cls.register(match.group('name'))
name = pfx + regname
value = match.group('value')
regs = ret.get('registers', {})
try:
regs[name] = int(value, 16)
except ValueError:
regs[name] = value
ret['registers'] = regs
if not ret:
msg = 'No data abort content found in the following text:' + os.linesep
msg += text
raise ValueError(msg)
return ret
| [
"re.compile"
] | [((1173, 1341), 're.compile', 're.compile', (['"""\n (?P<name>[a-zA-Z][a-zA-Z0-9]+)\n \\\\s?:\\\\s?\n (\\\\[<)?\n (?P<value>[0-9a-fA-F]{8})\n (>\\\\])?\n """', 're.VERBOSE'], {}), '(\n """\n (?P<name>[a-zA-Z][a-zA-Z0-9]+)\n \\\\s?:\\\\s?\n (\\\\[<)?\n (?P<value>[0-9a-fA-F]{8})\n (>\\\\])?\n """\n , re.VERBOSE)\n', (1183, 1341), False, 'import re\n')] |
from collections import defaultdict
import copy
def get_next(current, d, finish):
flag=False
if len(d[current])==1:
if d[current][0]==finish and len(d.keys())==1:
flag= True
else:
new_d = copy.deepcopy(d)
new_current = d[current][0]
new_d.pop(current)
flag=get_next(new_current, new_d, finish)
elif len(d[current])>1:
for index, c in enumerate(d[current]):
new_d = copy.deepcopy(d)
new_d[current].pop(index)
new_current = c
flag=get_next(new_current, new_d, finish)
return flag
def can_get_chained(input):
words = [(word[0], word[-1]) for word in input]
d = defaultdict(list)
for k, v in words:
d[k].append(v)
#Start with any word
start = list(d.items())[0][0]
return get_next(start, d, start)
def main():
can_get_chained(['eggs', 'karat', 'apple', 'snack', 'tuna'])
if __name__== "__main__":
main() | [
"collections.defaultdict",
"copy.deepcopy"
] | [((743, 760), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (754, 760), False, 'from collections import defaultdict\n'), ((238, 254), 'copy.deepcopy', 'copy.deepcopy', (['d'], {}), '(d)\n', (251, 254), False, 'import copy\n'), ((483, 499), 'copy.deepcopy', 'copy.deepcopy', (['d'], {}), '(d)\n', (496, 499), False, 'import copy\n')] |
import model
def izpis_igre(igra):
return (
f"Igraš igro vislic:\n" +
f"Narobe ugibane črke so: {igra.nepravilni_ugibi()}\n" +
f"Trenutno stanje besede: {igra.pravilni_del_gesla()}\n"
)
def izpis_poraza(igra):
return (
f"Izgubil si. Več sreče prihodnjič.\n" +
f"Narobe si uganil: {igra.nepravilni_ugibi()}\n" +
f"Pravilno si uganil: {igra.pravilni_del_gesla()}\n"
f"Pravilno geslo je bilo: {igra.geslo}\n"
)
def izpis_zmage(igra):
return (
f"Zmagal si. Bravo!\n" +
f"Narobe si uganil: {igra.nepravilni_ugibi()}\n" +
f"Pravilno si uganil: {igra.pravilni_del_gesla()}\n"
f"Pravilno geslo je bilo: {igra.geslo}\n"
)
def se_enkrat():
vnos = input("vnesi X, če želiš igrati še enkrat, in Y, če ne. ")
if vnos == "X":
return True
elif vnos == "Y":
return False
else:
print("Niste vnesli ne X ne Y. Vnesite še enkrat :) ")
return se_enkrat()
def pozeni_vmesnik():
igra = model.nova_igra(model.bazen_besed)
while True:
if igra.zmaga():
print(izpis_zmage(igra))
elif igra.poraz():
print(izpis_poraza(igra))
else:
print(izpis_igre(igra))
vnos = input("Vnesi novo črko: ")
igra.ugibaj(vnos)
se_enkrat_bool = se_enkrat()
if se_enkrat_bool:
pozeni_vmesnik()
pozeni_vmesnik() | [
"model.nova_igra"
] | [((1035, 1069), 'model.nova_igra', 'model.nova_igra', (['model.bazen_besed'], {}), '(model.bazen_besed)\n', (1050, 1069), False, 'import model\n')] |
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
class Neighborhood(models.Model):
name = models.CharField(max_length = 50)
location = models.ForeignKey('Location',on_delete = models.CASCADE,null = True)
admin = models.ForeignKey(User,on_delete = models.CASCADE)
occupants = models.IntegerField(null=True)
def __str__(self):
return self.name
def create_neighborhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def find_neighborhood(cls,neigborhood_id):
neighborhood = cls.objects.get(id = neigborhood_id)
return neighborhood
def update_neighborhood(self):
self.save()
def update_occupants(self):
self.occupants += 1
self.save()
class UserProfile(models.Model):
user = models.ForeignKey(User,on_delete = models.CASCADE,related_name = 'profile')
first_name = models.CharField(max_length = 50,null=True)
last_name = models.CharField(max_length = 50,null=True)
bio = models.TextField(null=True)
neighborhood = models.ForeignKey(Neighborhood,on_delete = models.CASCADE,null=True)
email = models.EmailField(max_length = 60,null=True)
profile_pic = CloudinaryField('profile/')
pub_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
class Business(models.Model):
name = models.CharField(max_length = 60)
user = models.ForeignKey(User,on_delete = models.CASCADE,related_name = 'business_user')
description = models.CharField(max_length = 150,null=True)
neighborhood = models.ForeignKey(Neighborhood,on_delete = models.CASCADE,related_name = 'business_neighbourhood')
category = models.ForeignKey('Category',on_delete = models.CASCADE,null=True)
email = models.EmailField(max_length = 60)
def __str__(self):
return self.name
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def find_business(cls,business_id):
business = Business.objects.get(id = business_id)
return business
def update_business(self):
self.save()
class Post(models.Model):
title = models.CharField(max_length = 50)
content = models.TextField()
user = models.ForeignKey(User,on_delete = models.CASCADE)
neighborhood = models.ForeignKey(Neighborhood,on_delete = models.CASCADE)
type = models.CharField(max_length = 50,null=True)
pub_date = models.DateTimeField(auto_now_add=True,null=True)
def __str__(self):
return self.title
class Comment(models.Model):
comment = models.CharField(max_length = 300)
posted_on = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def save_comment(self):
self.save()
def delete_comment(self):
self.delete()
class Location(models.Model):
name = models.CharField(max_length = 40)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length = 40)
def __str__(self):
return self.name | [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.DateTimeField",
"cloudinary.models.CloudinaryField",
"django.db.models.CharField"
] | [((192, 223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (208, 223), False, 'from django.db import models\n'), ((241, 307), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Location"""'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), "('Location', on_delete=models.CASCADE, null=True)\n", (258, 307), False, 'from django.db import models\n'), ((322, 371), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (339, 371), False, 'from django.db import models\n'), ((389, 419), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (408, 419), False, 'from django.db import models\n'), ((919, 992), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""profile"""'}), "(User, on_delete=models.CASCADE, related_name='profile')\n", (936, 992), False, 'from django.db import models\n'), ((1012, 1054), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)'}), '(max_length=50, null=True)\n', (1028, 1054), False, 'from django.db import models\n'), ((1072, 1114), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)'}), '(max_length=50, null=True)\n', (1088, 1114), False, 'from django.db import models\n'), ((1126, 1153), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (1142, 1153), False, 'from django.db import models\n'), ((1173, 1241), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Neighborhood'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Neighborhood, on_delete=models.CASCADE, null=True)\n', (1190, 1241), False, 'from django.db import models\n'), ((1254, 1297), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(60)', 'null': '(True)'}), '(max_length=60, null=True)\n', (1271, 1297), False, 'from django.db import models\n'), ((1317, 1344), 'cloudinary.models.CloudinaryField', 'CloudinaryField', (['"""profile/"""'], {}), "('profile/')\n", (1332, 1344), False, 'from cloudinary.models import CloudinaryField\n'), ((1360, 1399), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1380, 1399), False, 'from django.db import models\n'), ((1505, 1536), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (1521, 1536), False, 'from django.db import models\n'), ((1550, 1629), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""business_user"""'}), "(User, on_delete=models.CASCADE, related_name='business_user')\n", (1567, 1629), False, 'from django.db import models\n'), ((1650, 1693), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'null': '(True)'}), '(max_length=150, null=True)\n', (1666, 1693), False, 'from django.db import models\n'), ((1714, 1815), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Neighborhood'], {'on_delete': 'models.CASCADE', 'related_name': '"""business_neighbourhood"""'}), "(Neighborhood, on_delete=models.CASCADE, related_name=\n 'business_neighbourhood')\n", (1731, 1815), False, 'from django.db import models\n'), ((1828, 1894), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Category"""'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), "('Category', on_delete=models.CASCADE, null=True)\n", (1845, 1894), False, 'from django.db import models\n'), ((1907, 1939), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (1924, 1939), False, 'from django.db import models\n'), ((2328, 2359), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (2344, 2359), False, 'from django.db import models\n'), ((2376, 2394), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2392, 2394), False, 'from django.db import models\n'), ((2406, 2455), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (2423, 2455), False, 'from django.db import models\n'), ((2476, 2533), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Neighborhood'], {'on_delete': 'models.CASCADE'}), '(Neighborhood, on_delete=models.CASCADE)\n', (2493, 2533), False, 'from django.db import models\n'), ((2546, 2588), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)'}), '(max_length=50, null=True)\n', (2562, 2588), False, 'from django.db import models\n'), ((2605, 2655), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (2625, 2655), False, 'from django.db import models\n'), ((2749, 2781), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (2765, 2781), False, 'from django.db import models\n'), ((2800, 2835), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2820, 2835), False, 'from django.db import models\n'), ((2847, 2896), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (2864, 2896), False, 'from django.db import models\n'), ((3041, 3072), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (3057, 3072), False, 'from django.db import models\n'), ((3166, 3197), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (3182, 3197), False, 'from django.db import models\n')] |
from abc import ABC, abstractmethod
from typing import Protocol, Callable
from aws_lambda_powertools import Tracer
tracer = Tracer()
class UpdateTable(Protocol):
update_item: Callable
class UpdateAdapter(ABC):
@abstractmethod
def update(self, path: str) -> int:
"""return hitCount for the given path"""
class DdbUpdateAdapter(UpdateAdapter):
def __init__(self, table: UpdateTable):
self.table = table
@tracer.capture_method
def update(self, path: str) -> int:
resp = self.table.update_item(
Key={ 'PK': path },
UpdateExpression='ADD hitCount :v',
ExpressionAttributeValues={
':v': 1
},
ReturnValues='UPDATED_NEW',
)
return int(resp['Attributes']['hitCount']) | [
"aws_lambda_powertools.Tracer"
] | [((125, 133), 'aws_lambda_powertools.Tracer', 'Tracer', ([], {}), '()\n', (131, 133), False, 'from aws_lambda_powertools import Tracer\n')] |
########################################################################
# Copyright 2021, UChicago Argonne, LLC
#
# Licensed under the BSD-3 License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a
# copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
"""
date: 2021-08-12
author: matz
Cheng-Todreas correlation for flow split (1986)
"""
########################################################################
import numpy as np
from . import friction_ctd as ctd
applicability = ctd.applicability
########################################################################
# MODULE-WIDE CONSTANTS
_GAMMA = 1 / 3.0
_M = ctd._m
_EXP1 = {}
_EXP2 = {}
for regime in ctd._m.keys():
_EXP1[regime] = (1 + ctd._m[regime]) / (2 - ctd._m[regime])
_EXP2[regime] = 1 / (2 - ctd._m[regime])
########################################################################
def calculate_flow_split(asm_obj, regime=None, beta=1.0):
"""Calculate the flow split into the different types of
subchannels based on the Cheng-Todreas model
Parameters
----------
asm_obj : DASSH Assembly object
Contains the geometric description of the assembly
regime : str or NoneType
Indicate flow regime for which to calculate flow split
{'turbulent', 'laminar', None}; default = None
beta : float
Beta is a factor used to combine the laminar and turbulent
flowpslit terms in the transition region. It comes from
Cheng's 1984 thesis in which he recommends a value of
0.05. There, Figure 4.19 shows the edge flowsplit assuming
beta=0.05. However, in reality beta=0.05 gives weird results
and beta=1.0 matches what's shown in the figure. Therefore,
it'set to 1.0 here by default.
Returns
-------
numpy.ndarray
Flow split between interior, edge, and corner coolant
subchannels
"""
try:
Re_bnds = asm_obj.corr_constants['fs']['Re_bnds']
except (KeyError, AttributeError):
Re_bnds = ctd.calculate_Re_bounds(asm_obj)
try:
Cf = asm_obj.corr_constants['fs']['Cf_sc']
except (KeyError, AttributeError):
Cf = ctd.calculate_subchannel_friction_factor_const(asm_obj)
if regime is not None:
return _calculate_flow_split(asm_obj, Cf, regime, Re_bnds, beta=beta)
elif asm_obj.coolant_int_params['Re'] <= Re_bnds[0]:
return _calculate_flow_split(asm_obj, Cf, 'laminar')
elif asm_obj.coolant_int_params['Re'] >= Re_bnds[1]:
return _calculate_flow_split(asm_obj, Cf, 'turbulent')
else:
return _calculate_flow_split(asm_obj, Cf, 'transition', Re_bnds, beta)
def _calculate_flow_split(asm_obj, Cf_dict, regime, Re_bnds=None, beta=1.0):
"""Worker function to calculate the flow split into the
different types of subchannels based on the Cheng-Todreas
model.
Parameters
----------
asm_obj : DASSH Assembly object
Contains the geometric description of the assembly
Cf_dict : dict
Dictionary containing subchannel friction factor constants;
keys: ['laminar', 'turbulent']
regime : str {'laminar', 'turbulent', 'transition'}
Flow regime with which to evaluate flow split ratios
Re_bnds : list (optional)
Reynolds number flow regime boundaries for calculating
intermittency factor in transition regime
beta : float
Beta is a factor used to combine the laminar and turbulent
flowpslit terms in the transition region. It comes from
Cheng's 1984 thesis in which he recommends a value of
0.05. There, Figure 4.19 shows the edge flowsplit assuming
beta=0.05. However, in reality beta=0.05 gives weird results
and beta=1.0 matches what's shown in the figure. Therefore,
it'set to 1.0 here by default.
Returns
-------
numpy.ndarray
Flow split between interior, edge, and corner coolant
subchannels
Notes
-----
This method is imported by the flow split model in the
Upgraded Cheng-Todreas correlation (flowsplit_uctd)
"""
if regime == 'transition':
try:
na = asm_obj.corr_constants['fs']['na']
except (KeyError, AttributeError):
na = [asm_obj.subchannel.n_sc['coolant']['interior']
* asm_obj.params['area'][0],
asm_obj.subchannel.n_sc['coolant']['edge']
* asm_obj.params['area'][1],
asm_obj.subchannel.n_sc['coolant']['corner']
* asm_obj.params['area'][2]]
flow_split = np.zeros(3)
intf_b = ctd.calc_intermittency_factor(
asm_obj, Re_bnds[0], Re_bnds[1])
xratio_t = asm_obj.corr_constants['fs']['xr']['transition'].copy()
xratio_t[0] = (xratio_t[0]
* (1 - intf_b)**_GAMMA
/ asm_obj.coolant_int_params['Re'])
xratio_t[1] = (xratio_t[1]
* intf_b**_GAMMA
/ asm_obj.coolant_int_params['Re']**_M['turbulent']
)**_EXP2['turbulent']
# xratio = xratio_t1 + beta * xratio_t2
xratio = xratio_t[0] + beta * xratio_t[1]
x1x2 = xratio[1] / xratio[0] # Equation 4.51 in Cheng 1984
x3x2 = xratio[1] / xratio[2] # Equation 4.51 in Cheng 1984
flow_split[1] = (asm_obj.bundle_params['area']
/ (na[1] + x1x2 * na[0] + x3x2 * na[2]))
flow_split[0] = x1x2 * flow_split[1]
flow_split[2] = x3x2 * flow_split[1]
else:
flow_split = asm_obj.corr_constants['fs']['fs'][regime]
# x1x2 = asm_obj.corr_constants['fs']['xr'][regime][0]
# x3x2 = asm_obj.corr_constants['fs']['xr'][regime][1]
#
# # Flow split to subchannel type 2
# flow_split[1] = (asm_obj.bundle_params['area']
# / (na[1] + x1x2 * na[0] + x3x2 * na[2]))
# flow_split[0] = x1x2 * flow_split[1]
# flow_split[2] = x3x2 * flow_split[1]
return flow_split
def calc_constants(asm_obj):
"""Calculate constants needed by the CTD flowsplit calculation"""
const = ctd.calc_constants(asm_obj)
del const['Cf_b']
# Total subchannel area for each subchannel type
const['na'] = [asm_obj.subchannel.n_sc['coolant']['interior']
* asm_obj.params['area'][0],
asm_obj.subchannel.n_sc['coolant']['edge']
* asm_obj.params['area'][1],
asm_obj.subchannel.n_sc['coolant']['corner']
* asm_obj.params['area'][2]]
# REGIME RATIO CONSTANTS
const['xr'] = _calc_regime_ratio_constants(asm_obj, const['Cf_sc'])
# # Transition regime
# const['xr'] = {}
# const['xr']['transition'] = np.array([
# (const['Cf_sc']['laminar']
# * asm_obj.bundle_params['de']
# / asm_obj.params['de']**2),
# (const['Cf_sc']['turbulent']
# * asm_obj.bundle_params['de']**_M['turbulent']
# / asm_obj.params['de']**(_M['turbulent'] + 1))
# ])
#
# # Laminar/turbulent regime
# for k in ['laminar', 'turbulent']:
# const['xr'][k] = np.array([
# ((asm_obj.params['de'][0] / asm_obj.params['de'][1])**_EXP1[k]
# * (const['Cf_sc'][k][1] / const['Cf_sc'][k][0])**_EXP2[k]),
# ((asm_obj.params['de'][2] / asm_obj.params['de'][1])**_EXP1[k]
# * (const['Cf_sc'][k][1] / const['Cf_sc'][k][2])**_EXP2[k])
# ])
# Laminar/turbulent: constant flow split!
const['fs'] = _calc_constant_flowsplits(asm_obj, const)
# const['fs'] = {}
# for k in ['laminar', 'turbulent']:
# const['fs'][k] = np.zeros(3)
# const['fs'][k][1] = (asm_obj.bundle_params['area']
# / (const['na'][1]
# + const['xr'][k][0] * const['na'][0]
# + const['xr'][k][1] * const['na'][2]))
# const['fs'][k][0] = const['xr'][k][0] * const['fs'][k][1]
# const['fs'][k][2] = const['xr'][k][1] * const['fs'][k][1]
return const
def _calc_regime_ratio_constants(asm_obj, Cf_sc):
"""Constant ratios for laminar, turbulent, and transition regimes"""
xr = {}
xr['transition'] = np.array([
(Cf_sc['laminar']
* asm_obj.bundle_params['de']
/ asm_obj.params['de']**2),
(Cf_sc['turbulent']
* asm_obj.bundle_params['de']**_M['turbulent']
/ asm_obj.params['de']**(_M['turbulent'] + 1))
])
# Laminar/turbulent regime
for k in ['laminar', 'turbulent']:
xr[k] = np.array([
((asm_obj.params['de'][0] / asm_obj.params['de'][1])**_EXP1[k]
* (Cf_sc[k][1] / Cf_sc[k][0])**_EXP2[k]),
((asm_obj.params['de'][2] / asm_obj.params['de'][1])**_EXP1[k]
* (Cf_sc[k][1] / Cf_sc[k][2])**_EXP2[k])
])
return xr
def _calc_constant_flowsplits(asm_obj, const):
"""Laminar and turbulent flowsplits are constant"""
fs = {}
for k in ['laminar', 'turbulent']:
fs[k] = np.zeros(3)
fs[k][1] = (asm_obj.bundle_params['area']
/ (const['na'][1]
+ const['xr'][k][0] * const['na'][0]
+ const['xr'][k][1] * const['na'][2]))
fs[k][0] = const['xr'][k][0] * fs[k][1]
fs[k][2] = const['xr'][k][1] * fs[k][1]
return fs
| [
"numpy.array",
"numpy.zeros"
] | [((8731, 8944), 'numpy.array', 'np.array', (["[Cf_sc['laminar'] * asm_obj.bundle_params['de'] / asm_obj.params['de'] ** 2,\n Cf_sc['turbulent'] * asm_obj.bundle_params['de'] ** _M['turbulent'] / \n asm_obj.params['de'] ** (_M['turbulent'] + 1)]"], {}), "([Cf_sc['laminar'] * asm_obj.bundle_params['de'] / asm_obj.params[\n 'de'] ** 2, Cf_sc['turbulent'] * asm_obj.bundle_params['de'] ** _M[\n 'turbulent'] / asm_obj.params['de'] ** (_M['turbulent'] + 1)])\n", (8739, 8944), True, 'import numpy as np\n'), ((5058, 5069), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5066, 5069), True, 'import numpy as np\n'), ((9077, 9313), 'numpy.array', 'np.array', (["[(asm_obj.params['de'][0] / asm_obj.params['de'][1]) ** _EXP1[k] * (Cf_sc[k\n ][1] / Cf_sc[k][0]) ** _EXP2[k], (asm_obj.params['de'][2] / asm_obj.\n params['de'][1]) ** _EXP1[k] * (Cf_sc[k][1] / Cf_sc[k][2]) ** _EXP2[k]]"], {}), "([(asm_obj.params['de'][0] / asm_obj.params['de'][1]) ** _EXP1[k] *\n (Cf_sc[k][1] / Cf_sc[k][0]) ** _EXP2[k], (asm_obj.params['de'][2] /\n asm_obj.params['de'][1]) ** _EXP1[k] * (Cf_sc[k][1] / Cf_sc[k][2]) **\n _EXP2[k]])\n", (9085, 9313), True, 'import numpy as np\n'), ((9544, 9555), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9552, 9555), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import sys
import re
import fnmatch
import subprocess
import tempfile
import Utils
def GetFiles(dir, filePattern):
paths = []
for root, dirs, files in os.walk(dir):
for file in files:
if fnmatch.fnmatch(os.path.join(root, file), filePattern):
paths.append(os.path.join(root, file))
return paths
def ParseArgs(args):
filePattern = None
strPattern = None
ignoreCase = False
i = 0
count = len(args)
while i < count:
arg = args[i]
if Utils.IsSwitch(arg) and arg[1:].lower().startswith('f') and i + 1 < count:
filePattern = args[i + 1]
i += 1
elif Utils.IsSwitch(arg) and arg[1:].lower().startswith('s') and i + 1 < count:
strPattern = args[i + 1]
i += 1
elif Utils.IsSwitch(arg) and arg[1:].lower().startswith('i'):
ignoreCase = True
i += 1
if 'VS90COMNTOOLS' not in os.environ:
print('ERROR: VS90COMNTOOLS not defined in the environment!')
sys.exit(1)
if not filePattern or not strPattern:
print('ERROR: Specify a file pattern and a search string with -f and -s!')
sys.exit(1)
return filePattern, strPattern, ignoreCase
def DumpBin(filename):
filehandle, tmpfilename = tempfile.mkstemp()
os.close(filehandle)
vcvars = os.path.normpath( os.path.join( os.environ['VS90COMNTOOLS'], '..', '..', 'vc', 'bin', 'vcvars32.bat' ) )
command = '"%s" && dumpbin.exe /all /out:%s %s' % (vcvars, tmpfilename, filename)
print(command)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
return tmpfilename
if __name__ == '__main__':
filePattern, strPattern, ignoreCase = ParseArgs(sys.argv)
for filename in GetFiles(os.getcwd(), filePattern):
output_file_name = DumpBin(filename)
printHeader = False
with open(output_file_name) as file:
for line in file.readlines():
match = re.search(strPattern, line, re.I if ignoreCase else 0)
if not match:
continue
if not printHeader:
printHeader = True
print(('%s:' % filename))
print(('\t%s' % line.strip('\n')))
os.remove(output_file_name)
| [
"re.search",
"os.close",
"subprocess.Popen",
"os.path.join",
"os.getcwd",
"sys.exit",
"tempfile.mkstemp",
"Utils.IsSwitch",
"os.walk",
"os.remove"
] | [((205, 217), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (212, 217), False, 'import os\n'), ((1406, 1424), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (1422, 1424), False, 'import tempfile\n'), ((1430, 1450), 'os.close', 'os.close', (['filehandle'], {}), '(filehandle)\n', (1438, 1450), False, 'import os\n'), ((1688, 1780), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.STDOUT)\n', (1704, 1780), False, 'import subprocess\n'), ((1119, 1130), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1127, 1130), False, 'import sys\n'), ((1277, 1288), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1285, 1288), False, 'import sys\n'), ((1485, 1571), 'os.path.join', 'os.path.join', (["os.environ['VS90COMNTOOLS']", '""".."""', '""".."""', '"""vc"""', '"""bin"""', '"""vcvars32.bat"""'], {}), "(os.environ['VS90COMNTOOLS'], '..', '..', 'vc', 'bin',\n 'vcvars32.bat')\n", (1497, 1571), False, 'import os\n'), ((1947, 1958), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1956, 1958), False, 'import os\n'), ((2464, 2491), 'os.remove', 'os.remove', (['output_file_name'], {}), '(output_file_name)\n', (2473, 2491), False, 'import os\n'), ((591, 610), 'Utils.IsSwitch', 'Utils.IsSwitch', (['arg'], {}), '(arg)\n', (605, 610), False, 'import Utils\n'), ((279, 303), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (291, 303), False, 'import os\n'), ((739, 758), 'Utils.IsSwitch', 'Utils.IsSwitch', (['arg'], {}), '(arg)\n', (753, 758), False, 'import Utils\n'), ((2163, 2217), 're.search', 're.search', (['strPattern', 'line', '(re.I if ignoreCase else 0)'], {}), '(strPattern, line, re.I if ignoreCase else 0)\n', (2172, 2217), False, 'import re\n'), ((349, 373), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (361, 373), False, 'import os\n'), ((886, 905), 'Utils.IsSwitch', 'Utils.IsSwitch', (['arg'], {}), '(arg)\n', (900, 905), False, 'import Utils\n')] |
"""
Expands a bash-style brace expression, and outputs each expansion.
Licensed under MIT
Copyright (c) 2018 - 2020 <NAME> <<EMAIL>>
Copyright (c) 2021 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import argparse
import bracex
def main(argv=None):
"""Accept command line arguments and output brace expansion to stdout."""
parser = argparse.ArgumentParser(
prog='python -m bracex',
description='Expands a bash-style brace expression, and outputs each expansion.',
allow_abbrev=False,
)
parser.add_argument(
'expression',
help="Brace expression to expand",
)
terminators = parser.add_mutually_exclusive_group()
terminators.add_argument(
'--terminator', '-t',
default='\n',
metavar='STR',
help="Terminate each expansion with string STR (default: \\n)",
)
terminators.add_argument(
'-0',
action='store_const',
const='\0',
dest='terminator',
help="Terminate each expansion with a NUL character",
)
parser.add_argument(
'--version',
action='version',
version=bracex.__version__,
)
args = parser.parse_args(argv)
for expansion in bracex.iexpand(args.expression, limit=0):
print(expansion, end=args.terminator)
raise SystemExit(0)
if __name__ == '__main__':
main() # pragma: no cover
| [
"bracex.iexpand",
"argparse.ArgumentParser"
] | [((1342, 1501), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""python -m bracex"""', 'description': '"""Expands a bash-style brace expression, and outputs each expansion."""', 'allow_abbrev': '(False)'}), "(prog='python -m bracex', description=\n 'Expands a bash-style brace expression, and outputs each expansion.',\n allow_abbrev=False)\n", (1365, 1501), False, 'import argparse\n'), ((2220, 2260), 'bracex.iexpand', 'bracex.iexpand', (['args.expression'], {'limit': '(0)'}), '(args.expression, limit=0)\n', (2234, 2260), False, 'import bracex\n')] |
#!/usr/bin/env python3
import csv
import os
import requests
from bs4 import BeautifulSoup
from dateutil.parser import parse
def print_details(url, csv_filename, years):
"""
Parsing the scientific publications from the web site and
export the list in a CSV file
"""
item_year = item_journal = item_doi = item_title = ""
with open(csv_filename, "w", newline="") as csvfile:
# Header of the CSV file
fieldnames = ["Author(s)", "Year", "Title", "Journal", "DOI"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
index = 0
rn = requests.get(url)
soup = BeautifulSoup(rn.text, "lxml")
gdp_table = soup.find("table", {"class": "views-table"})
if gdp_table.findAll("tr") is not None:
for row in gdp_table.findAll("tr"):
for col in row.findAll("td"):
# Getting the title of the publication
item_title = (
col.text[0 : col.text.find("Article reference")]
).strip()
for li in row.findAll("li"):
if li.b is not None:
# Getting the Journal of the publication
if "Article reference:" in li.text:
item = li.text.split()
item_journal = item[2] + " " + item[3] + " " + item[4]
# Getting the year of the publication
if "Publication date:" in li.text:
item_year = li.text.split()[-1]
item_year = parse(item_year).year
if item_year in years:
# print(item_title)
# print(item_year)
# print(item_journal)
# print(item_doi)
writer.writerow(
{
"Author(s)": "ALICE Collaboration",
"Year": item_year,
"Title": item_title,
"Journal": item_journal,
"DOI": item_doi,
}
)
index = index + 1
def main():
print("- Parsing publications in progress...", end="")
url = (
"https://alice-publications.web.cern.ch/publications"
"?title=&field_draft_pub_date_value%5Bmin%5D="
"&field_draft_pub_date_value%5Bmax%5D=&items_per_page=100"
)
csv_filename = "publications.csv"
years = [2016, 2017, 2018, 2019]
print_details(url, csv_filename, years)
if os.stat(csv_filename).st_size > 34:
print("[OK]")
else:
print("[WARNING]")
print("No publications found in the list!")
if __name__ == "__main__":
main()
| [
"csv.DictWriter",
"dateutil.parser.parse",
"requests.get",
"bs4.BeautifulSoup",
"os.stat"
] | [((522, 568), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (536, 568), False, 'import csv\n'), ((630, 647), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (642, 647), False, 'import requests\n'), ((663, 693), 'bs4.BeautifulSoup', 'BeautifulSoup', (['rn.text', '"""lxml"""'], {}), "(rn.text, 'lxml')\n", (676, 693), False, 'from bs4 import BeautifulSoup\n'), ((2787, 2808), 'os.stat', 'os.stat', (['csv_filename'], {}), '(csv_filename)\n', (2794, 2808), False, 'import os\n'), ((1708, 1724), 'dateutil.parser.parse', 'parse', (['item_year'], {}), '(item_year)\n', (1713, 1724), False, 'from dateutil.parser import parse\n')] |
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from rl.dataset import ReplayBuffer, RandomSampler
from rl.base_agent import BaseAgent
from rl.policies.mlp_actor_critic import MlpActor, MlpCritic
from util.logger import logger
from util.mpi import mpi_average
from util.pytorch import optimizer_cuda, count_parameters, \
compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, \
obs2tensor, to_tensor
from env.action_spec import ActionSpec
class MetaPPOAgent(BaseAgent):
""" Meta policy class. """
def __init__(self, config, ob_space):
super().__init__(config, ob_space)
if config.meta is None:
logger.warn('Creating a dummy meta policy.')
return
# parse body parts and skills
if config.subdiv:
# subdiv = ob1,ob2-ac1/ob3,ob4-ac2/...
clusters = config.subdiv.split('/')
clusters = [cluster.split('-')[1].split(',') for cluster in clusters]
else:
clusters = [ob_space.keys()]
if config.subdiv_skills:
subdiv_skills = config.subdiv_skills.split('/')
subdiv_skills = [skills.split(',') for skills in subdiv_skills]
else:
subdiv_skills = [['primitive']] * len(clusters)
self.subdiv_skills = subdiv_skills
assert len(subdiv_skills) == len(clusters), \
'subdiv_skills and clusters have different # subdivisions'
if config.meta == 'hard':
ac_space = ActionSpec(size=0)
for cluster, skills in zip(clusters, subdiv_skills):
ac_space.add(','.join(cluster), 'discrete', len(skills), 0, 1)
self.ac_space = ac_space
if config.diayn:
ob_clusters = config.subdiv.split('/')
ob_clusters = [cluster.split('-')[0].split(',') for cluster in ob_clusters]
for cluster, skills in zip(ob_clusters, subdiv_skills):
self.ac_space.add(','.join(cluster) + '_diayn', 'continuous', config.z_dim, 0, 1)
# build up networks
self._actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._old_actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._critic = MlpCritic(config, ob_space)
self._network_cuda(config.device)
self._actor_optim = optim.Adam(self._actor.parameters(), lr=config.lr_actor)
self._critic_optim = optim.Adam(self._critic.parameters(), lr=config.lr_critic)
sampler = RandomSampler()
self._buffer = ReplayBuffer(['ob', 'ac', 'done', 'rew', 'ret', 'adv',
'ac_before_activation', 'log_prob'],
config.buffer_size,
sampler.sample_func)
if config.is_chef:
logger.warn('Creating a meta PPO agent')
logger.info('The actor has %d parameters', count_parameters(self._actor))
logger.info('The critic has %d parameters', count_parameters(self._critic))
def store_episode(self, rollouts):
""" Stores @rollouts to replay buffer. """
self._compute_gae(rollouts)
self._buffer.store_episode(rollouts)
def _compute_gae(self, rollouts):
""" Computes GAE from @rollouts. """
T = len(rollouts['done'])
ob = rollouts['ob']
ob = self.normalize(ob)
ob = obs2tensor(ob, self._config.device)
vpred = self._critic(ob).detach().cpu().numpy()[:,0]
assert len(vpred) == T + 1
done = rollouts['done']
rew = rollouts['rew']
adv = np.empty((T, ) , 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1 - done[t]
delta = rew[t] + self._config.discount_factor * vpred[t + 1] * nonterminal - vpred[t]
adv[t] = lastgaelam = delta + self._config.discount_factor * self._config.gae_lambda * nonterminal * lastgaelam
ret = adv + vpred[:-1]
assert np.isfinite(adv).all()
assert np.isfinite(ret).all()
# update rollouts
if adv.std() == 0:
rollouts['adv'] = (adv * 0).tolist()
else:
rollouts['adv'] = ((adv - adv.mean()) / adv.std()).tolist()
rollouts['ret'] = ret.tolist()
def state_dict(self):
if self._config.meta is None:
return {}
return {
'actor_state_dict': self._actor.state_dict(),
'critic_state_dict': self._critic.state_dict(),
'actor_optim_state_dict': self._actor_optim.state_dict(),
'critic_optim_state_dict': self._critic_optim.state_dict(),
'ob_norm_state_dict': self._ob_norm.state_dict(),
}
def load_state_dict(self, ckpt):
if self._config.meta is None:
return
self._actor.load_state_dict(ckpt['actor_state_dict'])
self._critic.load_state_dict(ckpt['critic_state_dict'])
self._ob_norm.load_state_dict(ckpt['ob_norm_state_dict'])
self._network_cuda(self._config.device)
self._actor_optim.load_state_dict(ckpt['actor_optim_state_dict'])
self._critic_optim.load_state_dict(ckpt['critic_optim_state_dict'])
optimizer_cuda(self._actor_optim, self._config.device)
optimizer_cuda(self._critic_optim, self._config.device)
def _network_cuda(self, device):
self._actor.to(device)
self._old_actor.to(device)
self._critic.to(device)
def sync_networks(self):
sync_networks(self._actor)
sync_networks(self._critic)
def train(self):
self._copy_target_network(self._old_actor, self._actor)
for _ in range(self._config.num_batches):
transitions = self._buffer.sample(self._config.batch_size)
train_info = self._update_network(transitions)
self._buffer.clear()
train_info.update({
'actor_grad_norm': compute_gradient_norm(self._actor),
'actor_weight_norm': compute_weight_norm(self._actor),
'critic_grad_norm': compute_gradient_norm(self._critic),
'critic_weight_norm': compute_weight_norm(self._critic),
})
return train_info
def _update_network(self, transitions):
info = {}
# pre-process observations
o = transitions['ob']
o = self.normalize(o)
bs = len(transitions['done'])
_to_tensor = lambda x: to_tensor(x, self._config.device)
o = _to_tensor(o)
ac = _to_tensor(transitions['ac'])
z = _to_tensor(transitions['ac_before_activation'])
ret = _to_tensor(transitions['ret']).reshape(bs, 1)
adv = _to_tensor(transitions['adv']).reshape(bs, 1)
old_log_pi = _to_tensor(transitions['log_prob']).reshape(bs, 1)
log_pi, ent = self._actor.act_log(o, z)
if (log_pi - old_log_pi).max() > 20:
print('(log_pi - old_log_pi) is too large', (log_pi - old_log_pi).max())
import ipdb; ipdb.set_trace()
# the actor loss
entropy_loss = self._config.entropy_loss_coeff * ent.mean()
ratio = torch.exp(torch.clamp(log_pi - old_log_pi, -20, 20))
surr1 = ratio * adv
surr2 = torch.clamp(ratio, 1.0 - self._config.clip_param,
1.0 + self._config.clip_param) * adv
actor_loss = -torch.min(surr1, surr2).mean()
if not np.isfinite(ratio.cpu().detach()).all() or not np.isfinite(adv.cpu().detach()).all():
import ipdb; ipdb.set_trace()
info['entropy_loss'] = entropy_loss.cpu().item()
info['actor_loss'] = actor_loss.cpu().item()
actor_loss += entropy_loss
discriminator_loss = self._actor.discriminator_loss()
if discriminator_loss is not None:
actor_loss += discriminator_loss * self._config.discriminator_loss_weight
info['discriminator_loss'] = discriminator_loss.cpu().item()
# the q loss
value_pred = self._critic(o)
value_loss = self._config.value_loss_coeff * (ret - value_pred).pow(2).mean()
info['value_target'] = ret.mean().cpu().item()
info['value_predicted'] = value_pred.mean().cpu().item()
info['value_loss'] = value_loss.cpu().item()
# update the actor
self._actor_optim.zero_grad()
actor_loss.backward()
sync_grads(self._actor)
self._actor_optim.step()
# update the critic
self._critic_optim.zero_grad()
value_loss.backward()
sync_grads(self._critic)
self._critic_optim.step()
# include info from policy
info.update(self._actor.info)
return mpi_average(info)
def act(self, ob, is_train=True):
"""
Returns a set of actions and the actors' activations given an observation @ob.
"""
if self._config.meta:
ob = self.normalize(ob)
return self._actor.act(ob, is_train, return_log_prob=True)
else:
return [0], None, None
| [
"util.pytorch.compute_gradient_norm",
"rl.dataset.ReplayBuffer",
"util.pytorch.obs2tensor",
"env.action_spec.ActionSpec",
"torch.min",
"rl.policies.mlp_actor_critic.MlpActor",
"numpy.isfinite",
"util.pytorch.sync_networks",
"rl.policies.mlp_actor_critic.MlpCritic",
"util.pytorch.count_parameters",... | [((2111, 2166), 'rl.policies.mlp_actor_critic.MlpActor', 'MlpActor', (['config', 'ob_space', 'ac_space'], {'tanh_policy': '(False)'}), '(config, ob_space, ac_space, tanh_policy=False)\n', (2119, 2166), False, 'from rl.policies.mlp_actor_critic import MlpActor, MlpCritic\n'), ((2193, 2248), 'rl.policies.mlp_actor_critic.MlpActor', 'MlpActor', (['config', 'ob_space', 'ac_space'], {'tanh_policy': '(False)'}), '(config, ob_space, ac_space, tanh_policy=False)\n', (2201, 2248), False, 'from rl.policies.mlp_actor_critic import MlpActor, MlpCritic\n'), ((2272, 2299), 'rl.policies.mlp_actor_critic.MlpCritic', 'MlpCritic', (['config', 'ob_space'], {}), '(config, ob_space)\n', (2281, 2299), False, 'from rl.policies.mlp_actor_critic import MlpActor, MlpCritic\n'), ((2535, 2550), 'rl.dataset.RandomSampler', 'RandomSampler', ([], {}), '()\n', (2548, 2550), False, 'from rl.dataset import ReplayBuffer, RandomSampler\n'), ((2574, 2715), 'rl.dataset.ReplayBuffer', 'ReplayBuffer', (["['ob', 'ac', 'done', 'rew', 'ret', 'adv', 'ac_before_activation', 'log_prob']", 'config.buffer_size', 'sampler.sample_func'], {}), "(['ob', 'ac', 'done', 'rew', 'ret', 'adv',\n 'ac_before_activation', 'log_prob'], config.buffer_size, sampler.\n sample_func)\n", (2586, 2715), False, 'from rl.dataset import ReplayBuffer, RandomSampler\n'), ((3434, 3469), 'util.pytorch.obs2tensor', 'obs2tensor', (['ob', 'self._config.device'], {}), '(ob, self._config.device)\n', (3444, 3469), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((3643, 3668), 'numpy.empty', 'np.empty', (['(T,)', '"""float32"""'], {}), "((T,), 'float32')\n", (3651, 3668), True, 'import numpy as np\n'), ((5260, 5314), 'util.pytorch.optimizer_cuda', 'optimizer_cuda', (['self._actor_optim', 'self._config.device'], {}), '(self._actor_optim, self._config.device)\n', (5274, 5314), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((5323, 5378), 'util.pytorch.optimizer_cuda', 'optimizer_cuda', (['self._critic_optim', 'self._config.device'], {}), '(self._critic_optim, self._config.device)\n', (5337, 5378), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((5553, 5579), 'util.pytorch.sync_networks', 'sync_networks', (['self._actor'], {}), '(self._actor)\n', (5566, 5579), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((5588, 5615), 'util.pytorch.sync_networks', 'sync_networks', (['self._critic'], {}), '(self._critic)\n', (5601, 5615), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((8410, 8433), 'util.pytorch.sync_grads', 'sync_grads', (['self._actor'], {}), '(self._actor)\n', (8420, 8433), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((8573, 8597), 'util.pytorch.sync_grads', 'sync_grads', (['self._critic'], {}), '(self._critic)\n', (8583, 8597), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((8722, 8739), 'util.mpi.mpi_average', 'mpi_average', (['info'], {}), '(info)\n', (8733, 8739), False, 'from util.mpi import mpi_average\n'), ((693, 737), 'util.logger.logger.warn', 'logger.warn', (['"""Creating a dummy meta policy."""'], {}), "('Creating a dummy meta policy.')\n", (704, 737), False, 'from util.logger import logger\n'), ((1529, 1547), 'env.action_spec.ActionSpec', 'ActionSpec', ([], {'size': '(0)'}), '(size=0)\n', (1539, 1547), False, 'from env.action_spec import ActionSpec\n'), ((2856, 2896), 'util.logger.logger.warn', 'logger.warn', (['"""Creating a meta PPO agent"""'], {}), "('Creating a meta PPO agent')\n", (2867, 2896), False, 'from util.logger import logger\n'), ((6480, 6513), 'util.pytorch.to_tensor', 'to_tensor', (['x', 'self._config.device'], {}), '(x, self._config.device)\n', (6489, 6513), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((7040, 7056), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7054, 7056), False, 'import ipdb\n'), ((7177, 7218), 'torch.clamp', 'torch.clamp', (['(log_pi - old_log_pi)', '(-20)', '(20)'], {}), '(log_pi - old_log_pi, -20, 20)\n', (7188, 7218), False, 'import torch\n'), ((7264, 7349), 'torch.clamp', 'torch.clamp', (['ratio', '(1.0 - self._config.clip_param)', '(1.0 + self._config.clip_param)'], {}), '(ratio, 1.0 - self._config.clip_param, 1.0 + self._config.clip_param\n )\n', (7275, 7349), False, 'import torch\n'), ((7559, 7575), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (7573, 7575), False, 'import ipdb\n'), ((2952, 2981), 'util.pytorch.count_parameters', 'count_parameters', (['self._actor'], {}), '(self._actor)\n', (2968, 2981), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((3039, 3069), 'util.pytorch.count_parameters', 'count_parameters', (['self._critic'], {}), '(self._critic)\n', (3055, 3069), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((4039, 4055), 'numpy.isfinite', 'np.isfinite', (['adv'], {}), '(adv)\n', (4050, 4055), True, 'import numpy as np\n'), ((4077, 4093), 'numpy.isfinite', 'np.isfinite', (['ret'], {}), '(ret)\n', (4088, 4093), True, 'import numpy as np\n'), ((5973, 6007), 'util.pytorch.compute_gradient_norm', 'compute_gradient_norm', (['self._actor'], {}), '(self._actor)\n', (5994, 6007), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((6042, 6074), 'util.pytorch.compute_weight_norm', 'compute_weight_norm', (['self._actor'], {}), '(self._actor)\n', (6061, 6074), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((6108, 6143), 'util.pytorch.compute_gradient_norm', 'compute_gradient_norm', (['self._critic'], {}), '(self._critic)\n', (6129, 6143), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((6179, 6212), 'util.pytorch.compute_weight_norm', 'compute_weight_norm', (['self._critic'], {}), '(self._critic)\n', (6198, 6212), False, 'from util.pytorch import optimizer_cuda, count_parameters, compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, obs2tensor, to_tensor\n'), ((7401, 7424), 'torch.min', 'torch.min', (['surr1', 'surr2'], {}), '(surr1, surr2)\n', (7410, 7424), False, 'import torch\n')] |
# Create your tests here.
from article.db_manager.article_manager import create_article_db
from utils.api.tests import APIClient, APITestCase
from utils.constants import ArticleTypeChoice
from utils.shortcuts import rand_str
def mock_create_article(title=None, content=None, art_type=None, owner_id=None):
title = title or rand_str(type='str')
content = content or rand_str(type='str')
art_type = art_type or ArticleTypeChoice[0][1]
owner_id = owner_id or 1
return create_article_db(title, content, art_type, owner_id)
class ArticleViewTest(APITestCase):
def setUp(self):
self.client = APIClient()
def test_create_article_view(self):
self.create_user('maxin', 'password', login=True)
article = mock_create_article()
result = self.client.get('/api/article/', data={'article_id': 1}).json()
self.assertEqual(result['result'], 'successful')
self.assertEqual(result['data']['title'], article.title)
| [
"utils.api.tests.APIClient",
"article.db_manager.article_manager.create_article_db",
"utils.shortcuts.rand_str"
] | [((488, 541), 'article.db_manager.article_manager.create_article_db', 'create_article_db', (['title', 'content', 'art_type', 'owner_id'], {}), '(title, content, art_type, owner_id)\n', (505, 541), False, 'from article.db_manager.article_manager import create_article_db\n'), ((330, 350), 'utils.shortcuts.rand_str', 'rand_str', ([], {'type': '"""str"""'}), "(type='str')\n", (338, 350), False, 'from utils.shortcuts import rand_str\n'), ((376, 396), 'utils.shortcuts.rand_str', 'rand_str', ([], {'type': '"""str"""'}), "(type='str')\n", (384, 396), False, 'from utils.shortcuts import rand_str\n'), ((624, 635), 'utils.api.tests.APIClient', 'APIClient', ([], {}), '()\n', (633, 635), False, 'from utils.api.tests import APIClient, APITestCase\n')] |
from django.db import models
from django.contrib.auth.models import BaseUserManager, AbstractBaseUser
from event.models import Event
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
user = self.create_user(
email,
password=password,
)
user.is_admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
first_name = models.CharField(max_length=255, blank=True, null=True)
last_name = models.CharField(max_length=255, blank=True, null=True)
cell_phone = models.CharField(max_length=30, blank=True, null=True)
date_of_birth = models.DateField(blank=True, null=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
def get_full_name(self):
if self.first_name and self.last_name:
return "{} {}".format(self.first_name, self.last_name)
else:
return self.email
def get_short_name(self):
return self.first_name
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
@property
def is_staff(self):
return self.is_admin
| [
"django.db.models.EmailField",
"django.db.models.DateField",
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((753, 829), 'django.db.models.EmailField', 'models.EmailField', ([], {'verbose_name': '"""email address"""', 'max_length': '(255)', 'unique': '(True)'}), "(verbose_name='email address', max_length=255, unique=True)\n", (770, 829), False, 'from django.db import models\n'), ((878, 933), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (894, 933), False, 'from django.db import models\n'), ((950, 1005), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (966, 1005), False, 'from django.db import models\n'), ((1023, 1077), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(True)', 'null': '(True)'}), '(max_length=30, blank=True, null=True)\n', (1039, 1077), False, 'from django.db import models\n'), ((1098, 1137), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1114, 1137), False, 'from django.db import models\n'), ((1155, 1188), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1174, 1188), False, 'from django.db import models\n'), ((1204, 1238), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1223, 1238), False, 'from django.db import models\n')] |
"""MINUIT from Python - Fitting like a boss
Basic usage example::
from iminuit import Minuit
def f(x, y, z):
return (x - 2) ** 2 + (y - 3) ** 2 + (z - 4) ** 2
m = Minuit(f)
m.migrad()
print(m.values) # {'x': 2,'y': 3,'z': 4}
print(m.errors) # {'x': 1,'y': 1,'z': 1}
Further information:
* Code: https://github.com/iminuit/iminuit
* Docs: https://iminuit.readthedocs.io
"""
__all__ = [
'Minuit',
'minimize',
'describe',
'Struct',
'__version__',
'test',
]
from ._libiminuit import Minuit
from ._minimize import minimize
from .util import describe, Struct
from .info import __version__
def test(args=None):
"""Execute the iminuit tests.
Requires pytest.
From the command line:
python -c 'import iminuit; iminuit.test()
"""
# http://pytest.org/latest/usage.html#calling-pytest-from-python-code
import pytest
args = ['-v', '--pyargs', 'iminuit']
pytest.main(args)
| [
"pytest.main"
] | [((950, 967), 'pytest.main', 'pytest.main', (['args'], {}), '(args)\n', (961, 967), False, 'import pytest\n')] |
import pandas as pd
from backtrader import TimeFrame, date2num
from sqlalchemy import create_engine, inspect
from tqdm import tqdm
from koapy.backtrader.SQLiteData import SQLiteData
from koapy.utils.data.KrxHistoricalDailyPriceDataForBacktestLoader import (
KrxHistoricalDailyPriceDataForBacktestLoader,
)
class KrxHistoricalDailyPriceDataFromSQLite(SQLiteData):
# pylint: disable=no-member
params = (
("engine", None),
("symbol", None),
("name", None),
("fromdate", None),
("todate", None),
("compression", 1),
("timeframe", TimeFrame.Days),
("calendar", None),
("timestampcolumn", 0),
("timestampcolumntimezone", None),
("lazy", False),
)
lines = (
"amount",
"marketcap",
"shares",
)
def __init__(self):
assert self.p.timeframe == TimeFrame.Days
assert self.p.compression == 1
self.p.tablename = self.p.tablename or self.p.symbol or None
self.p.name = self.p.name or self.p.symbol or self.p.tablename or ""
super().__init__()
def _load(self):
if self._cursor is None:
return False
try:
date, open_, high, low, close, volume, amount, marcap, shares = next(
self._cursor
)
except StopIteration:
return False
else:
dt = pd.Timestamp(date)
self.lines.datetime[0] = date2num(dt)
self.lines.open[0] = open_
self.lines.high[0] = high
self.lines.low[0] = low
self.lines.close[0] = close
self.lines.volume[0] = volume
self.lines.openinterest[0] = 0.0
self.lines.amount[0] = amount
self.lines.marketcap[0] = marcap
self.lines.shares[0] = shares
return True
@classmethod
def dump_from_store(
cls,
source_filename,
dest_filename,
symbols=None,
fromdate=None,
todate=None,
progress_bar=True,
):
loader = KrxHistoricalDailyPriceDataForBacktestLoader(source_filename)
if symbols is None:
symbols = loader.get_symbols()
engine = create_engine("sqlite:///" + dest_filename)
progress = tqdm(symbols, disable=not progress_bar)
for symbol in progress:
progress.set_description("Dumping Symbol [%s]" % symbol)
data = loader.load(symbol, start_time=fromdate, end_time=todate)
data.to_sql(symbol, engine, if_exists="replace")
@classmethod
def adddata_fromfile(
cls,
cerebro,
filename,
symbols=None,
fromdate=None,
todate=None,
progress_bar=True,
):
engine = create_engine("sqlite:///" + filename)
inspector = inspect(engine)
if symbols is None:
symbols = inspector.get_table_names()
progress = tqdm(symbols, disable=not progress_bar)
for symbol in progress:
progress.set_description("Adding Symbol [%s]" % symbol)
# pylint: disable=unexpected-keyword-arg
data = cls(
engine=engine,
tablename=symbol,
fromdate=fromdate,
todate=todate,
symbol=symbol,
name=symbol,
)
cerebro.adddata(data, name=data.p.name)
| [
"backtrader.date2num",
"sqlalchemy.create_engine",
"tqdm.tqdm",
"koapy.utils.data.KrxHistoricalDailyPriceDataForBacktestLoader.KrxHistoricalDailyPriceDataForBacktestLoader",
"sqlalchemy.inspect",
"pandas.Timestamp"
] | [((2109, 2170), 'koapy.utils.data.KrxHistoricalDailyPriceDataForBacktestLoader.KrxHistoricalDailyPriceDataForBacktestLoader', 'KrxHistoricalDailyPriceDataForBacktestLoader', (['source_filename'], {}), '(source_filename)\n', (2153, 2170), False, 'from koapy.utils.data.KrxHistoricalDailyPriceDataForBacktestLoader import KrxHistoricalDailyPriceDataForBacktestLoader\n'), ((2259, 2302), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + dest_filename)"], {}), "('sqlite:///' + dest_filename)\n", (2272, 2302), False, 'from sqlalchemy import create_engine, inspect\n'), ((2322, 2361), 'tqdm.tqdm', 'tqdm', (['symbols'], {'disable': '(not progress_bar)'}), '(symbols, disable=not progress_bar)\n', (2326, 2361), False, 'from tqdm import tqdm\n'), ((2810, 2848), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + filename)"], {}), "('sqlite:///' + filename)\n", (2823, 2848), False, 'from sqlalchemy import create_engine, inspect\n'), ((2869, 2884), 'sqlalchemy.inspect', 'inspect', (['engine'], {}), '(engine)\n', (2876, 2884), False, 'from sqlalchemy import create_engine, inspect\n'), ((2982, 3021), 'tqdm.tqdm', 'tqdm', (['symbols'], {'disable': '(not progress_bar)'}), '(symbols, disable=not progress_bar)\n', (2986, 3021), False, 'from tqdm import tqdm\n'), ((1423, 1441), 'pandas.Timestamp', 'pd.Timestamp', (['date'], {}), '(date)\n', (1435, 1441), True, 'import pandas as pd\n'), ((1480, 1492), 'backtrader.date2num', 'date2num', (['dt'], {}), '(dt)\n', (1488, 1492), False, 'from backtrader import TimeFrame, date2num\n')] |
from textx.exceptions import TextXSemanticError
from cid.parser.model import ParameterCliValue, BoolWithPositivePattern
from cid.common.utils import get_cli_pattern_count, is_iterable, element_type
# ------------------------------- HELPER FUNCTIONS -------------------------------
def contains_duplicate_names(lst):
defined = [e.name for e in lst if not hasattr(e, 'imported') and not hasattr(e, 'local')]
local = [e.local for e in lst if hasattr(e, 'local') and e.local]
imported = [e.imported for e in lst if hasattr(e, 'imported') and e.imported]
return len(defined) != len(set(defined)) or len(local) != len(set(local)) or len(imported) != len(set(imported))
def split_import_path(import_path):
return './' + ('/'.join(import_path.elements[:-1])) + '.cid', import_path.elements[-1]
def import_reference_path(ref):
return '/' + '/'.join(ref.elements)
# ------------------------------- PRE PROCESSING -------------------------------
def process_script(script):
# check for duplicate free parameter names
script.free_parameters = [parameter for parameter in script.elements if element_type(parameter) == 'Parameter']
if contains_duplicate_names(script.free_parameters):
raise TextXSemanticError("Found duplicate free parameter names.")
# check for duplicate free command names
script.free_commands = [command for command in script.elements if element_type(command) == 'Command']
if contains_duplicate_names(script.free_commands):
raise TextXSemanticError("Found duplicate free command names.")
# check for duplicate import paths
if len(script.imports) != len(set([imp.path for imp in script.imports])):
raise TextXSemanticError("Found duplicate import paths.")
# check for duplicate import aliases
if len(script.imports) != len(set([imp.alias for imp in script.imports])):
raise TextXSemanticError("Found duplicate import aliases.")
# -------------------------------
def process_import_statement(import_statement):
if not import_statement.alias:
import_statement.alias = import_statement.path
import_statement.alias = import_reference_path(import_statement.alias)
import_statement.file_path, import_statement.element_name = split_import_path(import_statement.path)
# -------------------------------
def process_import_reference(import_reference):
if import_reference.imported:
import_reference.imported = import_reference_path(import_reference.imported)
# -------------------------------
def process_command(command):
"""
Model structure changes:
del command.usage
"""
# command.usages = all usages
if command.usages:
command.usages = [usage.body for usage in command.usages]
elif command.usage:
command.usages = [command.usage]
del command.usage
command.description = ' '.join(command.description.split()) # reduce excess white space
command.help = ' '.join(command.help.split()) # reduce excess white space
# defaults --------------
if not command.title:
command.title = command.name.replace('_', ' ').replace('-', ' ').strip().title()
if not command.cli_command:
command.cli_command = command.name
# additional checks --------------
if contains_duplicate_names(command.parameters):
raise TextXSemanticError("Found parameters with duplicate names in command: '{}'".format(command.name))
if contains_duplicate_names(command.sub_commands):
raise TextXSemanticError("Found sub commands with duplicate names in command: '{}'".format(command.name))
# -------------------------------
def process_parameter(parameter):
"""
Model structure changes:
add parameter.nonpositional
fix parameter.default
add parameter.all_patterns
add parameter.cli_pattern_vars
add parameter.cli_pattern_count
del parameter.empty_str_disallowed
add parameter.none_allowed
del parameter.default_is_none
Checks performed: TODO
Model changes: TODO
"""
# set default bool cli pattern
if parameter.type == 'Bool' and not parameter.cli:
parameter.cli = ParameterCliValue(BoolWithPositivePattern('--{name}'.format(name=parameter.name)))
# set parameter.nonpositional
parameter.nonpositional = parameter.cli and parameter.cli.cli_pattern
# fix parameter.default model structure
if len(parameter.default) == 0:
parameter.default = None
elif len(parameter.default) == 1:
parameter.default = parameter.default[0]
if parameter.nonpositional:
# set parameter.all_patterns
parameter.cli.cli_pattern.parent = parameter
parameter.all_patterns = [parameter.cli.cli_pattern] + parameter.cli_aliases
# set parameter.cli_pattern_count
parameter.cli_pattern_count = get_cli_pattern_count(parameter.all_patterns[0])
# all_patterns
for pattern in parameter.all_patterns:
if hasattr(pattern, 'vars') and pattern.vars:
# transform vars into a list of strings
pattern.vars = [v.value for v in pattern.vars]
# set pattern.count
pattern.count = len(pattern.vars)
# set parameter.cli_pattern_vars
if not hasattr(parameter, 'cli_pattern_vars'):
parameter.cli_pattern_vars = pattern.vars
else:
if not (len(parameter.cli_pattern_vars) == len(pattern.vars) and
all([parameter.cli_pattern_vars[i] == pattern.vars[i] for i in range(0, len(pattern.vars))])):
raise TextXSemanticError("Different argument names found for patterns in parameter: '{}'".format(parameter.name))
# StringParamPattern checks
if element_type(pattern) == "StringParamPattern":
if parameter.type == "Bool":
raise TextXSemanticError("Non boolean cli pattern in Bool type parameter: '{}'.".format(parameter.name))
if pattern.count_char and not parameter.type == "Num":
raise TextXSemanticError("Counter pattern in non Num type parameter: '{}'.".format(parameter.name))
if parameter.cli_pattern_count != get_cli_pattern_count(pattern):
raise TextXSemanticError("Different parameter count values encountered in cli patterns for parameter: '{}'".format(parameter.name))
elif element_type(pattern) in ['BoolWithPositivePattern', 'BoolNegativeOnlyPattern'] and not parameter.type == "Bool":
raise TextXSemanticError("Boolean cli pattern in non Bool type parameter: '{}'.".format(parameter.name))
else:
parameter.cli_pattern_count = 1
# empty_str_allowed
if (parameter.empty_str_allowed or parameter.empty_str_disallowed) and parameter.type != 'Str':
raise TextXSemanticError("Found empty_str_allowed or empty_str_disallowed in non Str parameter: '{}'".format(parameter.name))
if parameter.default == '' and parameter.empty_str_disallowed:
raise TextXSemanticError("Found empty_str_disallowed and default value is an empty string for parameter: '{}'.".format(parameter.name))
del parameter.empty_str_disallowed
# title
if not parameter.title:
parameter.title = parameter.name.replace('_', ' ').replace('-', ' ').strip().title()
# multiplicity
if not parameter.multiplicity:
parameter.multiplicity = 1
if parameter.multiplicity != '*' and parameter.multiplicity <= 0:
raise TextXSemanticError("Multiplicity must be greater than zero for: '{}'.".format(parameter.name))
if not parameter.nonpositional and parameter.multiplicity not in [1, '*']:
raise TextXSemanticError("Multiplicity for positional parameters must be either 1 or '*': '{}'.".format(parameter.name))
if not parameter.multiplicity == 1 and parameter.type == "Bool":
raise TextXSemanticError("Multiplicity for Bool type parameters must be 1: '{}'.".format(parameter.name))
# help
parameter.help = ' '.join(parameter.help.split()) # reduce excess white space
# description
parameter.description = ' '.join(parameter.description.split()) # reduce excess white space
if not parameter.description:
parameter.description = '{default_desc}'
# default
if parameter.default_is_none:
if parameter.type == 'Bool':
raise TextXSemanticError("Found default_is_none and parameter type is 'Bool': '{}'".format(parameter.name))
if parameter.default:
raise TextXSemanticError("Found default_is_none and parameter has a default defined: '{}'.".format(parameter.name))
if not parameter.default:
if parameter.default_is_none:
parameter.default = None
else:
if parameter.type == 'Bool':
# if parameter doesnt contain both positive and negative patterns
if not ([p for p in parameter.all_patterns if p.positive] and [p for p in parameter.all_patterns if p.negative]):
# set to False by default
parameter.default = 'False'
# else: leave None (for a case where neither positive nor negative arg is provided)
del parameter.default_is_none
if parameter.default:
if parameter.cli_pattern_count not in [1, '*']:
if not is_iterable(parameter.default) or len(parameter.default) != parameter.cli_pattern_count:
raise TextXSemanticError("Parameter '{}' with {} values must have that many default values defined.".format(parameter.name, parameter.cli_pattern_count))
else:
if is_iterable(parameter.default):
raise TextXSemanticError("Parameter '{}' should only have a single default value.".format(parameter.name))
if parameter.default == '':
parameter.empty_str_allowed = True
if parameter.nonpositional and parameter.default is not None:
if parameter.cli_pattern_count not in [1, '*']:
if not isinstance(parameter.default, list):
parameter.default = [parameter.default] * parameter.cli_pattern_count
elif len(parameter.default) != parameter.cli_pattern_count:
raise TextXSemanticError("Parameter pattern count and default values count do not match: '{}'.".format(parameter.name))
if parameter.type == 'Bool':
if parameter.default and parameter.default.lower() not in ['true', 'false']:
raise TextXSemanticError("Default value is not true or false and parameter type is 'Bool': '{}'".format(parameter.name))
# add parameter.none_allowed
parameter.none_allowed = parameter.default is None or [p for p in parameter.all_patterns if p.positive] and [p for p in parameter.all_patterns if p.negative]
# date_format
if not parameter.date_format and parameter.type == 'Date':
parameter.date_format = "dd.MM.yyyy"
# choices
if parameter.choices and not parameter.type == 'Choice':
raise TextXSemanticError("Choices found in non 'Choice' parameter: '{}'.".format(parameter.name))
if parameter.type == 'Choice' and not parameter.choices:
raise TextXSemanticError("Choices are required in 'Choice' parameter: '{}'".format(parameter.name))
# constraints
for constraint in parameter.constraints:
supported_constraints = {
'Str': ['LengthConstraint', 'StringFlagConstraint', 'RegexConstraint', 'CodeConstraint'],
'Choice': ['CodeConstraint'],
'Num': ['NumericValueConstraint', 'NumberFlagConstraint', 'CodeConstraint'],
'Bool': ['CodeConstraint'],
'Date': ['DateConstraint', 'CodeConstraint'],
'File': ['FileFlagConstraint', 'CodeConstraint', 'RegexConstraint'],
}[parameter.type]
if element_type(constraint) not in supported_constraints:
raise TextXSemanticError("Constraint type '{}' is unsupported for parameter type '{}': '{}'.".format(element_type(constraint), parameter.type, parameter.name))
# -------------------------------
def process_cli_or_group(or_group):
# transform the tree structure into a list
or_group.elements = [or_group.lhs]
if element_type(or_group.rhs) == 'CliOrGroup':
or_group.elements += or_group.rhs.elements
for el in or_group.rhs.elements:
el.parent = or_group
else:
or_group.elements.append(or_group.rhs)
del or_group.lhs
del or_group.rhs
# check for CliOptionalGroup in CliOrGroup
for element in or_group.elements:
if element_type(element) == 'CliOptionalGroup':
print('warning: CliOptionalGroup in CliOrGroup')
# -------------------------------
object_processors = {
'Script': process_script,
'ImportStatement': process_import_statement,
'ParameterReference': process_import_reference,
'CommandReference': process_import_reference,
'Command': process_command,
'Parameter': process_parameter,
'CliOrGroup': process_cli_or_group,
}
| [
"cid.common.utils.get_cli_pattern_count",
"cid.common.utils.element_type",
"cid.common.utils.is_iterable",
"textx.exceptions.TextXSemanticError"
] | [((1234, 1293), 'textx.exceptions.TextXSemanticError', 'TextXSemanticError', (['"""Found duplicate free parameter names."""'], {}), "('Found duplicate free parameter names.')\n", (1252, 1293), False, 'from textx.exceptions import TextXSemanticError\n'), ((1515, 1572), 'textx.exceptions.TextXSemanticError', 'TextXSemanticError', (['"""Found duplicate free command names."""'], {}), "('Found duplicate free command names.')\n", (1533, 1572), False, 'from textx.exceptions import TextXSemanticError\n'), ((1705, 1756), 'textx.exceptions.TextXSemanticError', 'TextXSemanticError', (['"""Found duplicate import paths."""'], {}), "('Found duplicate import paths.')\n", (1723, 1756), False, 'from textx.exceptions import TextXSemanticError\n'), ((1892, 1945), 'textx.exceptions.TextXSemanticError', 'TextXSemanticError', (['"""Found duplicate import aliases."""'], {}), "('Found duplicate import aliases.')\n", (1910, 1945), False, 'from textx.exceptions import TextXSemanticError\n'), ((4887, 4935), 'cid.common.utils.get_cli_pattern_count', 'get_cli_pattern_count', (['parameter.all_patterns[0]'], {}), '(parameter.all_patterns[0])\n', (4908, 4935), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((12386, 12412), 'cid.common.utils.element_type', 'element_type', (['or_group.rhs'], {}), '(or_group.rhs)\n', (12398, 12412), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((9806, 9836), 'cid.common.utils.is_iterable', 'is_iterable', (['parameter.default'], {}), '(parameter.default)\n', (9817, 9836), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((11992, 12016), 'cid.common.utils.element_type', 'element_type', (['constraint'], {}), '(constraint)\n', (12004, 12016), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((12751, 12772), 'cid.common.utils.element_type', 'element_type', (['element'], {}), '(element)\n', (12763, 12772), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((1123, 1146), 'cid.common.utils.element_type', 'element_type', (['parameter'], {}), '(parameter)\n', (1135, 1146), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((1410, 1431), 'cid.common.utils.element_type', 'element_type', (['command'], {}), '(command)\n', (1422, 1431), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((5875, 5896), 'cid.common.utils.element_type', 'element_type', (['pattern'], {}), '(pattern)\n', (5887, 5896), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((6333, 6363), 'cid.common.utils.get_cli_pattern_count', 'get_cli_pattern_count', (['pattern'], {}), '(pattern)\n', (6354, 6363), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((9518, 9548), 'cid.common.utils.is_iterable', 'is_iterable', (['parameter.default'], {}), '(parameter.default)\n', (9529, 9548), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((12160, 12184), 'cid.common.utils.element_type', 'element_type', (['constraint'], {}), '(constraint)\n', (12172, 12184), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n'), ((6534, 6555), 'cid.common.utils.element_type', 'element_type', (['pattern'], {}), '(pattern)\n', (6546, 6555), False, 'from cid.common.utils import get_cli_pattern_count, is_iterable, element_type\n')] |
from django.db.models import fields
from rest_framework import serializers
from facegram.profiles.models import Profile
from facegram.users.api.serializers import UserSerializer
class RetrieveUserProfileSerializerV1(serializers.ModelSerializer):
user = UserSerializer(read_only=True)
class Meta:
model = Profile
exclude = ("followers","following", "up_votes", "down_votes")
read_only_fields = ('id', 'follower_count', 'following_count')
class UpdateProfileSerializerV1(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('profile_pic', 'bio', 'location', 'interests', 'skills')
read_only_fields = ('id', 'follower_count', 'following_count', "up_votes", "down_votes")
depth = 1 | [
"facegram.users.api.serializers.UserSerializer"
] | [((259, 289), 'facegram.users.api.serializers.UserSerializer', 'UserSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (273, 289), False, 'from facegram.users.api.serializers import UserSerializer\n')] |
from rest_framework import generics, permissions
from rest_framework import filters as filters_rf
from django_filters import rest_framework as filters
from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken
from .serializers import SocialAppSerializer, SocialAppExtendedSerializer, SocialAccountSerializer, \
SocialAccountExtendedSerializer, SocialTokenSerializer, SocialTokenExtendedSerializer
class SocialAppListApi(generics.ListAPIView):
"""Список всех SocialApp"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.all()
serializer_class = SocialAppExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'provider', 'sites')
search_fields = ['name', 'client_id', 'id']
ordering = ['id']
class SocialAppRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления приложения соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.all()
lookup_field = 'id'
serializer_class = SocialAppSerializer
class SocialAppCreateApi(generics.CreateAPIView):
"""Добавление приложения соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.none()
serializer_class = SocialAppSerializer
class SocialAccountListApi(generics.ListAPIView):
"""Список всех аккаунтов соц. сетей"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.all()
serializer_class = SocialAccountExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'user', 'provider')
search_fields = ['user__username']
ordering = ['id']
class SocialAccountRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления аккаунта в соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.all()
serializer_class = SocialAccountSerializer
lookup_field = 'id'
class SocialAccountCreateApi(generics.CreateAPIView):
"""Добавление аккаунта соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.none()
serializer_class = SocialAccountSerializer
class SocialTokenListApi(generics.ListAPIView):
"""Список токенов"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.all()
serializer_class = SocialTokenExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'app', 'account')
search_fields = ['account__user__username', 'token', 'id']
ordering = ['id']
class SocialTokenRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления токенов"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.all()
serializer_class = SocialTokenSerializer
lookup_field = 'id'
class SocialTokenCreateApi(generics.CreateAPIView):
"""Добавление токена"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.none()
serializer_class = SocialTokenSerializer
| [
"allauth.socialaccount.models.SocialAccount.objects.all",
"allauth.socialaccount.models.SocialAccount.objects.none",
"allauth.socialaccount.models.SocialToken.objects.none",
"allauth.socialaccount.models.SocialApp.objects.none",
"allauth.socialaccount.models.SocialApp.objects.all",
"allauth.socialaccount.... | [((580, 603), 'allauth.socialaccount.models.SocialApp.objects.all', 'SocialApp.objects.all', ([], {}), '()\n', (601, 603), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((1143, 1166), 'allauth.socialaccount.models.SocialApp.objects.all', 'SocialApp.objects.all', ([], {}), '()\n', (1164, 1166), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((1405, 1429), 'allauth.socialaccount.models.SocialApp.objects.none', 'SocialApp.objects.none', ([], {}), '()\n', (1427, 1429), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((1645, 1672), 'allauth.socialaccount.models.SocialAccount.objects.all', 'SocialAccount.objects.all', ([], {}), '()\n', (1670, 1672), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((2210, 2237), 'allauth.socialaccount.models.SocialAccount.objects.all', 'SocialAccount.objects.all', ([], {}), '()\n', (2235, 2237), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((2482, 2510), 'allauth.socialaccount.models.SocialAccount.objects.none', 'SocialAccount.objects.none', ([], {}), '()\n', (2508, 2510), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((2710, 2735), 'allauth.socialaccount.models.SocialToken.objects.all', 'SocialToken.objects.all', ([], {}), '()\n', (2733, 2735), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((3278, 3303), 'allauth.socialaccount.models.SocialToken.objects.all', 'SocialToken.objects.all', ([], {}), '()\n', (3301, 3303), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n'), ((3532, 3558), 'allauth.socialaccount.models.SocialToken.objects.none', 'SocialToken.objects.none', ([], {}), '()\n', (3556, 3558), False, 'from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken\n')] |
import numpy as np
import xarray as xr
import exceptions
from dakota_file import DakotaFile
my_netcdf = DakotaFile()
filename = 'DAKOTA.nc'
my_netcdf.read(filename)
variable_dict1 = my_netcdf.get_variable_as_dict('test_scan1')
variable_dict2 = my_netcdf.get_variable_as_dict('test_scan2')
variable_dict3 = my_netcdf.get_variable_as_dict('test_scan3')
file_out = open('DAKOTA_OUTPUT.dat','w')
file_out.write('test_scan1:\n')
values = variable_dict1['values']
file_out.write( str(values[0])+' '+str(values[1])+'\n' )
values = variable_dict2['values']
file_out.write( str(values)+'\n' )
values = variable_dict3['values']
file_out.write( str(values)+'\n' )
file_out.close()
| [
"dakota_file.DakotaFile"
] | [((105, 117), 'dakota_file.DakotaFile', 'DakotaFile', ([], {}), '()\n', (115, 117), False, 'from dakota_file import DakotaFile\n')] |
from collections import defaultdict
from jvd.normalizer.syntax import get_definition
import sys
from jvd.utils import AttrDict
class DataUnit:
def __init__(self, json_obj, file_path):
super().__init__()
with open(file_path, "rb") as f:
self.fbytes = f.read()
self.obj = AttrDict.from_nested_dict(json_obj)
if not 'data' in self.obj.bin:
self.obj.bin.data = {}
if not 'strings' in self.obj.bin:
self.obj.bin.strings = {}
self.map_b = defaultdict(list)
for b in self.obj.blocks:
self.map_b[b.addr_f].append(b)
# flattened to nested
self.map_f = {}
self.map_f_xcall = defaultdict(list)
for f in self.obj.functions:
f.unit = self
f.blocks = self.map_b.get(f.addr_start, [])
self.map_f[f.addr_start] = f
if not hasattr(f, 'calls'):
f.calls = []
for c in f.calls:
self.map_f_xcall[c].append(f)
self.map_b = {}
for b in self.obj.blocks:
self.map_b[b.addr_start] = b
self.ins_dat_ref = {}
for b in self.obj.blocks:
if not hasattr(b, 'calls'):
b.calls = []
for i in b.ins:
if not hasattr(i, 'dr'):
i.dr = []
if not hasattr(i, 'cr'):
i.cr = []
if not hasattr(i, 'oprs'):
i.oprs = []
if len(i.dr) > 0:
self.ins_dat_ref[i.ea] = i.dr
# print('##', self.obj.bin.architecture)
self.syntax = get_definition(self.obj.bin.architecture)
self.import_names = None # self.obj.bin.import_functions
self.seg_addr = sorted(
[int(k) for k in self.obj.bin.seg.keys()]) + [sys.maxsize]
self.find_seg = lambda v: next(
x[0] for x in enumerate(self.seg_addr) if x[1] > v)
| [
"jvd.normalizer.syntax.get_definition",
"jvd.utils.AttrDict.from_nested_dict",
"collections.defaultdict"
] | [((313, 348), 'jvd.utils.AttrDict.from_nested_dict', 'AttrDict.from_nested_dict', (['json_obj'], {}), '(json_obj)\n', (338, 348), False, 'from jvd.utils import AttrDict\n'), ((524, 541), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (535, 541), False, 'from collections import defaultdict\n'), ((701, 718), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (712, 718), False, 'from collections import defaultdict\n'), ((1658, 1699), 'jvd.normalizer.syntax.get_definition', 'get_definition', (['self.obj.bin.architecture'], {}), '(self.obj.bin.architecture)\n', (1672, 1699), False, 'from jvd.normalizer.syntax import get_definition\n')] |
#!/usr/bin/env python
"""
configuration for faps
Provides the Options class that will transparently handle the different option
sources through the .get() method. Pulls in defaults, site and job options plus
command line customisation. Instantiating Options will set up the logging for
the particular job.
"""
__all__ = ['Options']
# Python 3 fix
try:
import configparser
except ImportError:
import ConfigParser as configparser
import copy
import logging
import os
import re
import sys
import textwrap
# Python 3 fix
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from optparse import OptionParser
from logging import debug, error, info
import __main__
class Options(object):
"""
Transparent options handling.
A single unified way of dealing with input files and command line options
delivering sensible defaults for unspecified values. Access options with
the .get() method, or the method that specifies the expected type. It is
recommended to replace with a new instance each time the script is run,
otherwise commandline options or changed input files will not be picked up.
"""
def __init__(self, job_name=None):
"""Initialize options from all .ini files and the commandline."""
# use .get{type}() to read attributes, only access args directly
self.job_dir = ''
self.script_dir = ''
self.job_name = job_name
self.args = []
self.options = {}
self.cmdopts = {}
self._used_options = set()
self.defaults = configparser.SafeConfigParser()
self.site_ini = configparser.SafeConfigParser()
self.job_ini = configparser.SafeConfigParser()
# populate options
self._init_paths()
self.commandline()
self._init_logging()
self.load_defaults()
self.load_site_defaults()
self.load_job_defaults()
if self.options.job_type:
self.job_type_ini = configparser.SafeConfigParser()
self.load_job_type(self.options.job_type)
else:
self.job_type_ini = NullConfigParser()
def get(self, item):
"""Map values from different sources based on priorities."""
# report default options differently
option_source = 'D'
if item in self.__dict__:
# Instance attributes, such as job_name and job_dir
debug("an attribute: %s" % item)
option_source = 'A'
value = object.__getattribute__(self, item)
elif self.options.__dict__.get(item) is not None:
# Commandline options from optparse where option is set
debug("an option: %s" % item)
option_source = 'C'
value = self.options.__dict__[item]
elif item in self.cmdopts:
# Commandline -o custom key=value options
debug("a custom -o option: %s" % item)
option_source = 'O'
value = self.cmdopts[item]
elif self.job_ini.has_option('job_config', item):
# jobname.fap per-job setings
debug("a job option: %s" % item)
option_source = 'F'
value = self.job_ini.get('job_config', item)
elif self.job_type_ini.has_option('job_type', item):
debug("a job_type option: %s" % item)
option_source = 'J'
value = self.job_type_ini.get('job_type', item)
elif self.site_ini.has_option('site_config', item):
debug("a site option: %s" % item)
value = self.site_ini.get('site_config', item)
elif self.defaults.has_option('defaults', item):
debug("a default: %s" % item)
value = self.defaults.get('defaults', item)
else:
# Most things have a default, but not always. Error properly.
debug("unspecified option: %s" % item)
raise AttributeError(item)
# Show what options are used the first time they are accessed
# for the traceability
if item not in self._used_options:
if option_source == 'D':
debug("Default: %s = %s" % (item, value))
else:
info("Option (%s): %s = %s" % (option_source, item, value))
self._used_options.add(item)
# we output the raw value here and pass to caller for
return value
def getbool(self, item):
"""
Parse option and if the value of item is not already a bool return
True for "1", "yes", "true" and "on" and False for "0", "no", "false"
and "off". Case-insensitive.
"""
value = self.get(item)
if isinstance(value, bool):
return value
# Can't use isinstance with basestring to be 2.x and 3.x compatible
# fudge it by assuming strings can be lowered
elif hasattr(value, 'lower'):
if value.lower() in ["1", "yes", "true", "on"]:
return True
elif value.lower() in ["0", "no", "false", "off"]:
return False
else:
# Not a valid bool
raise ValueError(value)
else:
return bool(item)
def getint(self, item):
"""Return item's value as an integer."""
value = self.get(item)
return int(value)
def getfloat(self, item):
"""Return item's value as a float."""
value = self.get(item)
return float(value)
def gettuple(self, item, dtype=None):
"""Return item's value interpreted as a tuple of 'dtype' [strings]."""
value = self.get(item)
# Regex strips bracketing so can't nest, but safer than eval
value = [x for x in re.split('[\s,\(\)\[\]]*', value) if x]
if dtype is not None:
return tuple([dtype(x) for x in value])
else:
return tuple(value)
def _init_paths(self):
"""Find the script directory and set up working directory"""
# Where the script is has the config defaults.
if __name__ != '__main__':
self.script_dir = os.path.dirname(__file__)
else:
self.script_dir = os.path.abspath(sys.path[0])
# Where we run the job.
self.job_dir = os.getcwd()
def _init_logging(self):
"""
Setup the logging to terminal and .flog file, with levels as required.
Must run before any logging calls so we need to access attributes
rather than using self.get()!
"""
# Quiet always overrides verbose; always at least INFO in .flog
if self.options.silent:
stdout_level = logging.CRITICAL
file_level = logging.INFO
elif self.options.quiet:
stdout_level = logging.ERROR
file_level = logging.INFO
elif self.options.verbose:
stdout_level = logging.DEBUG
file_level = logging.DEBUG
else:
stdout_level = logging.INFO
file_level = logging.INFO
# Easier to do simple file configuration then add the stdout
logging.basicConfig(level=file_level,
format='[%(asctime)s] %(levelname)s %(message)s',
datefmt='%Y%m%d %H:%M:%S',
filename=self.job_name + '.flog',
filemode='a')
# Make these uniform widths
logging.addLevelName(10, '--')
logging.addLevelName(20, '>>')
logging.addLevelName(30, '**')
logging.addLevelName(40, '!!')
logging.addLevelName(50, 'XX')
if self.options.plain:
console = logging.StreamHandler(sys.stdout)
else:
# Use nice coloured console output
console = ColouredConsoleHandler(sys.stdout)
console.setLevel(stdout_level)
formatter = logging.Formatter('%(levelname)s %(message)s')
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def commandline(self):
"""Specified options, highest priority."""
usage = "usage: %prog [options] [COMMAND] JOB_NAME"
# use description for the script, not for this module
parser = OptionParser(usage=usage, version="%prog 0.1",
description=__main__.__doc__)
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose",
help="output extra debugging information")
parser.add_option("-q", "--quiet", action="store_true",
dest="quiet", help="only output warnings and errors")
parser.add_option("-s", "--silent", action="store_true",
dest="silent", help="no terminal output")
parser.add_option("-p", "--plain", action="store_true",
dest="plain", help="do not colourise or wrap output")
parser.add_option("-o", "--option", action="append", dest="cmdopts",
help="set custom options as key=value pairs")
parser.add_option("-i", "--interactive", action="store_true",
dest="interactive", help="enter interactive mode")
parser.add_option("-m", "--import", action="store_true",
dest="import", help="try and import old data")
parser.add_option("-n", "--no-submit", action="store_true",
dest="no_submit",
help="create input files only, do not run any jobs")
parser.add_option("-j", "--job-type", dest="job_type",
help="user preconfigured job settings")
parser.add_option("-d", "--daemon", action="store_true", dest="daemon",
help="run [lube] as a server and await input")
(local_options, local_args) = parser.parse_args()
# job_name may or may not be passed or set initially
if self.job_name:
if self.job_name in local_args:
local_args.remove(self.job_name)
elif len(local_args) == 0:
parser.error("No arguments given (try %prog --help)")
else:
# Take the last argument as the job name
self.job_name = local_args.pop()
# key value options from the command line
if local_options.cmdopts is not None:
for pair in local_options.cmdopts:
if '=' in pair:
pair = pair.split('=', 1) # maximum of one split
self.cmdopts[pair[0]] = pair[1]
else:
self.cmdopts[pair] = True
self.options = local_options
# Args are only the COMMANDS for the run
self.args = [arg.lower() for arg in local_args]
def load_defaults(self):
"""Load program defaults."""
# ConfigParser requires header sections so we add them to a StringIO
# of the file if they are missing. 2to3 should also deal with the
# renamed modules.
default_ini_path = os.path.join(self.script_dir, 'defaults.ini')
try:
filetemp = open(default_ini_path, 'r')
default_ini = filetemp.read()
filetemp.close()
if not '[defaults]' in default_ini.lower():
default_ini = '[defaults]\n' + default_ini
default_ini = StringIO(default_ini)
except IOError:
# file does not exist so we just use a blank string
debug('Default options not found! Something is very wrong.')
default_ini = StringIO('[defaults]\n')
self.defaults.readfp(default_ini)
def load_site_defaults(self):
"""Find where the script is and load defaults"""
site_ini_path = os.path.join(self.script_dir, 'site.ini')
try:
filetemp = open(site_ini_path, 'r')
site_ini = filetemp.read()
filetemp.close()
if not '[site_config]' in site_ini.lower():
site_ini = '[site_config]\n' + site_ini
site_ini = StringIO(site_ini)
except IOError:
# file does not exist so we just use a blank string
debug("No site options found; using defaults")
site_ini = StringIO('[site_config]\n')
self.site_ini.readfp(site_ini)
def load_job_defaults(self):
"""Find where the job is running and load defaults"""
job_ini_path = os.path.join(self.job_dir, self.job_name + '.fap')
try:
filetemp = open(job_ini_path, 'r')
job_ini = filetemp.read()
filetemp.close()
if not '[job_config]' in job_ini.lower():
job_ini = '[job_config]\n' + job_ini
job_ini = StringIO(job_ini)
debug("Job options read from %s" % job_ini_path)
except IOError:
# file does not exist so we just use a blank string
debug("No job options found; using defaults")
job_ini = StringIO('[job_config]\n')
self.job_ini.readfp(job_ini)
def load_job_type(self, job_type):
"""Find where the job is running and load defaults"""
home_dir = os.path.expanduser('~')
job_type_ini_path = os.path.join(home_dir, '.faps', job_type + '.fap')
try:
filetemp = open(job_type_ini_path, 'r')
job_type_ini = filetemp.read()
filetemp.close()
if not '[job_type]' in job_type_ini.lower():
job_type_ini = '[job_type]\n' + job_type_ini
job_type_ini = StringIO(job_type_ini)
debug("Job type options read from %s" % job_type_ini_path)
except IOError:
# file does not exist so we just use a blank string
error("Job type '%s' specified but options file '%s' not found" %
(job_type, job_type_ini_path))
job_type_ini = StringIO('[job_config]\n')
self.job_type_ini.readfp(job_type_ini)
def options_test():
"""Try and read a few options from different sources."""
testopts = Options()
print(testopts.get('job_name'))
print(testopts.get('cmdopts'))
print(testopts.get('args'))
print(testopts.get('verbose'))
print(testopts.get('script_dir'))
print(testopts.getbool('interactive'))
for arg in testopts.get('args'):
print('%s: %s' % (arg, testopts.get(arg)))
try:
print(testopts.getbool(arg))
except ValueError:
print('%s is not a bool' % arg)
try:
print(testopts.getint(arg))
except ValueError:
print('%s is not an int' % arg)
try:
print(testopts.getfloat(arg))
except ValueError:
print('%s is not a float' % arg)
try:
print(testopts.gettuple(arg))
except ValueError:
print('%s is not a tuple' % arg)
print(testopts.get('not an option'))
class ColouredConsoleHandler(logging.StreamHandler):
"""Makes colourised and wrapped output for the console."""
def emit(self, record):
"""Colourise and emit a record."""
# Need to make a actual copy of the record
# to prevent altering the message for other loggers
myrecord = copy.copy(record)
levelno = myrecord.levelno
if levelno >= 50: # CRITICAL / FATAL
front = '\033[30;41m' # black/red
elif levelno >= 40: # ERROR
front = '\033[30;41m' # black/red
elif levelno >= 30: # WARNING
front = '\033[30;43m' # black/yellow
elif levelno >= 20: # INFO
front = '\033[30;42m' # black/green
elif levelno >= 10: # DEBUG
front = '\033[30;46m' # black/cyan
else: # NOTSET and anything else
front = '\033[0m' # normal
myrecord.levelname = '%s%s\033[0m' % (front, myrecord.levelname)
logging.StreamHandler.emit(self, myrecord)
class NullConfigParser(object):
"""Use in place of a blank ConfigParser that has no options."""
def __init__(self, *args, **kwargs):
"""This is empty, so do nothing."""
pass
def has_option(*args, **kwargs):
"""Always return Fasle as there are no options."""
return False
if __name__ == '__main__':
options_test()
| [
"logging.getLogger",
"logging.StreamHandler",
"logging.debug",
"logging.StreamHandler.emit",
"copy.copy",
"logging.info",
"logging.error",
"re.split",
"logging.addLevelName",
"io.StringIO",
"os.path.expanduser",
"ConfigParser.SafeConfigParser",
"os.path.dirname",
"logging.basicConfig",
"... | [((1580, 1611), 'ConfigParser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (1609, 1611), True, 'import ConfigParser as configparser\n'), ((1636, 1667), 'ConfigParser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (1665, 1667), True, 'import ConfigParser as configparser\n'), ((1691, 1722), 'ConfigParser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (1720, 1722), True, 'import ConfigParser as configparser\n'), ((6280, 6291), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6289, 6291), False, 'import os\n'), ((7122, 7293), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'file_level', 'format': '"""[%(asctime)s] %(levelname)s %(message)s"""', 'datefmt': '"""%Y%m%d %H:%M:%S"""', 'filename': "(self.job_name + '.flog')", 'filemode': '"""a"""'}), "(level=file_level, format=\n '[%(asctime)s] %(levelname)s %(message)s', datefmt='%Y%m%d %H:%M:%S',\n filename=self.job_name + '.flog', filemode='a')\n", (7141, 7293), False, 'import logging\n'), ((7442, 7472), 'logging.addLevelName', 'logging.addLevelName', (['(10)', '"""--"""'], {}), "(10, '--')\n", (7462, 7472), False, 'import logging\n'), ((7481, 7511), 'logging.addLevelName', 'logging.addLevelName', (['(20)', '""">>"""'], {}), "(20, '>>')\n", (7501, 7511), False, 'import logging\n'), ((7520, 7550), 'logging.addLevelName', 'logging.addLevelName', (['(30)', '"""**"""'], {}), "(30, '**')\n", (7540, 7550), False, 'import logging\n'), ((7559, 7589), 'logging.addLevelName', 'logging.addLevelName', (['(40)', '"""!!"""'], {}), "(40, '!!')\n", (7579, 7589), False, 'import logging\n'), ((7598, 7628), 'logging.addLevelName', 'logging.addLevelName', (['(50)', '"""XX"""'], {}), "(50, 'XX')\n", (7618, 7628), False, 'import logging\n'), ((7894, 7940), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s %(message)s"""'], {}), "('%(levelname)s %(message)s')\n", (7911, 7940), False, 'import logging\n'), ((8294, 8370), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': '"""%prog 0.1"""', 'description': '__main__.__doc__'}), "(usage=usage, version='%prog 0.1', description=__main__.__doc__)\n", (8306, 8370), False, 'from optparse import OptionParser\n'), ((11139, 11184), 'os.path.join', 'os.path.join', (['self.script_dir', '"""defaults.ini"""'], {}), "(self.script_dir, 'defaults.ini')\n", (11151, 11184), False, 'import os\n'), ((11853, 11894), 'os.path.join', 'os.path.join', (['self.script_dir', '"""site.ini"""'], {}), "(self.script_dir, 'site.ini')\n", (11865, 11894), False, 'import os\n'), ((12534, 12584), 'os.path.join', 'os.path.join', (['self.job_dir', "(self.job_name + '.fap')"], {}), "(self.job_dir, self.job_name + '.fap')\n", (12546, 12584), False, 'import os\n'), ((13273, 13296), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (13291, 13296), False, 'import os\n'), ((13325, 13375), 'os.path.join', 'os.path.join', (['home_dir', '""".faps"""', "(job_type + '.fap')"], {}), "(home_dir, '.faps', job_type + '.fap')\n", (13337, 13375), False, 'import os\n'), ((15345, 15362), 'copy.copy', 'copy.copy', (['record'], {}), '(record)\n', (15354, 15362), False, 'import copy\n'), ((15998, 16040), 'logging.StreamHandler.emit', 'logging.StreamHandler.emit', (['self', 'myrecord'], {}), '(self, myrecord)\n', (16024, 16040), False, 'import logging\n'), ((1995, 2026), 'ConfigParser.SafeConfigParser', 'configparser.SafeConfigParser', ([], {}), '()\n', (2024, 2026), True, 'import ConfigParser as configparser\n'), ((2424, 2456), 'logging.debug', 'debug', (["('an attribute: %s' % item)"], {}), "('an attribute: %s' % item)\n", (2429, 2456), False, 'from logging import debug, error, info\n'), ((6126, 6151), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6141, 6151), False, 'import os\n'), ((6196, 6224), 'os.path.abspath', 'os.path.abspath', (['sys.path[0]'], {}), '(sys.path[0])\n', (6211, 6224), False, 'import os\n'), ((7683, 7716), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (7704, 7716), False, 'import logging\n'), ((11461, 11482), 'io.StringIO', 'StringIO', (['default_ini'], {}), '(default_ini)\n', (11469, 11482), False, 'from io import StringIO\n'), ((12159, 12177), 'io.StringIO', 'StringIO', (['site_ini'], {}), '(site_ini)\n', (12167, 12177), False, 'from io import StringIO\n'), ((12841, 12858), 'io.StringIO', 'StringIO', (['job_ini'], {}), '(job_ini)\n', (12849, 12858), False, 'from io import StringIO\n'), ((12871, 12919), 'logging.debug', 'debug', (["('Job options read from %s' % job_ini_path)"], {}), "('Job options read from %s' % job_ini_path)\n", (12876, 12919), False, 'from logging import debug, error, info\n'), ((13658, 13680), 'io.StringIO', 'StringIO', (['job_type_ini'], {}), '(job_type_ini)\n', (13666, 13680), False, 'from io import StringIO\n'), ((13693, 13751), 'logging.debug', 'debug', (["('Job type options read from %s' % job_type_ini_path)"], {}), "('Job type options read from %s' % job_type_ini_path)\n", (13698, 13751), False, 'from logging import debug, error, info\n'), ((2683, 2712), 'logging.debug', 'debug', (["('an option: %s' % item)"], {}), "('an option: %s' % item)\n", (2688, 2712), False, 'from logging import debug, error, info\n'), ((4137, 4178), 'logging.debug', 'debug', (["('Default: %s = %s' % (item, value))"], {}), "('Default: %s = %s' % (item, value))\n", (4142, 4178), False, 'from logging import debug, error, info\n'), ((4213, 4272), 'logging.info', 'info', (["('Option (%s): %s = %s' % (option_source, item, value))"], {}), "('Option (%s): %s = %s' % (option_source, item, value))\n", (4217, 4272), False, 'from logging import debug, error, info\n'), ((5741, 5779), 're.split', 're.split', (['"""[\\\\s,\\\\(\\\\)\\\\[\\\\]]*"""', 'value'], {}), "('[\\\\s,\\\\(\\\\)\\\\[\\\\]]*', value)\n", (5749, 5779), False, 'import re\n'), ((8034, 8055), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (8051, 8055), False, 'import logging\n'), ((11583, 11643), 'logging.debug', 'debug', (['"""Default options not found! Something is very wrong."""'], {}), "('Default options not found! Something is very wrong.')\n", (11588, 11643), False, 'from logging import debug, error, info\n'), ((11670, 11694), 'io.StringIO', 'StringIO', (['"""[defaults]\n"""'], {}), "('[defaults]\\n')\n", (11678, 11694), False, 'from io import StringIO\n'), ((12278, 12324), 'logging.debug', 'debug', (['"""No site options found; using defaults"""'], {}), "('No site options found; using defaults')\n", (12283, 12324), False, 'from logging import debug, error, info\n'), ((12348, 12375), 'io.StringIO', 'StringIO', (['"""[site_config]\n"""'], {}), "('[site_config]\\n')\n", (12356, 12375), False, 'from io import StringIO\n'), ((13020, 13065), 'logging.debug', 'debug', (['"""No job options found; using defaults"""'], {}), "('No job options found; using defaults')\n", (13025, 13065), False, 'from logging import debug, error, info\n'), ((13088, 13114), 'io.StringIO', 'StringIO', (['"""[job_config]\n"""'], {}), "('[job_config]\\n')\n", (13096, 13114), False, 'from io import StringIO\n'), ((13852, 13952), 'logging.error', 'error', (['("Job type \'%s\' specified but options file \'%s\' not found" % (job_type,\n job_type_ini_path))'], {}), '("Job type \'%s\' specified but options file \'%s\' not found" % (job_type,\n job_type_ini_path))\n', (13857, 13952), False, 'from logging import debug, error, info\n'), ((13994, 14020), 'io.StringIO', 'StringIO', (['"""[job_config]\n"""'], {}), "('[job_config]\\n')\n", (14002, 14020), False, 'from io import StringIO\n'), ((2894, 2932), 'logging.debug', 'debug', (["('a custom -o option: %s' % item)"], {}), "('a custom -o option: %s' % item)\n", (2899, 2932), False, 'from logging import debug, error, info\n'), ((3116, 3148), 'logging.debug', 'debug', (["('a job option: %s' % item)"], {}), "('a job option: %s' % item)\n", (3121, 3148), False, 'from logging import debug, error, info\n'), ((3311, 3348), 'logging.debug', 'debug', (["('a job_type option: %s' % item)"], {}), "('a job_type option: %s' % item)\n", (3316, 3348), False, 'from logging import debug, error, info\n'), ((3513, 3546), 'logging.debug', 'debug', (["('a site option: %s' % item)"], {}), "('a site option: %s' % item)\n", (3518, 3546), False, 'from logging import debug, error, info\n'), ((3675, 3704), 'logging.debug', 'debug', (["('a default: %s' % item)"], {}), "('a default: %s' % item)\n", (3680, 3704), False, 'from logging import debug, error, info\n'), ((3861, 3899), 'logging.debug', 'debug', (["('unspecified option: %s' % item)"], {}), "('unspecified option: %s' % item)\n", (3866, 3899), False, 'from logging import debug, error, info\n')] |
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 <NAME>, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import time
import re
# you may use urllib to encode data appropriately
import urllib.parse
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
self.socket = None
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return None
def general_parser(self, data):
parseData = data.replace("/r","")
parseData = data.split("\n")
return parseData
def get_code(self, data):
statusCode = int(data[0].split(" ")[1]) #returns status code of response.
return statusCode
def get_headers(self,data, urlPath):
htmlTagIndex = data.find("\r\n\r\n")
if(htmlTagIndex == -1):
htmlTagIndex = 0
header = data[:htmlTagIndex]
header += "\nLocation: "+urlPath
return header
def get_body(self, data):
body = ""
htmlTagIndex = data.find("\r\n\r\n")
body = data[htmlTagIndex:]
return body
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self):
buffer = bytearray()
done = False
while not done:
part = self.socket.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
decodedBody = buffer.decode('utf-8')
return decodedBody
def GET(self, url, args=None):
domainName, urlPath, urlQuery, port = self.parseURL(url)
self.connect(domainName, port)
fakeUserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
header = "GET "+urlPath+urlQuery+" HTTP/1.1\r\nHost: "+domainName+"\r\nAccept: */*\nUser-Agent: "+fakeUserAgent+"\r\n\r\n"
self.sendall(header)
#print("###GET DATA SENT###\nDomain: {}\nPath: {}\nQuery: {}\nPort: {}\nHeader: {}\n".format(domainName, urlPath, urlQuery, port, header))
print("###GET DATA SENT###\n"+header)
returnData = self.recvall()
parseData = self.general_parser(returnData)
statusCode = self.get_code(parseData)
htmlBody = self.get_body(returnData)
htmlHeader = self.get_headers(returnData, urlPath)
print("###GET DATA RECIEVED###\n"+htmlBody)
self.close()
return HTTPResponse(statusCode, htmlBody)
def parseURL(self, url):
domain = url
path = ""
query = ""
slashIndex = url.find("//")
if(slashIndex != -1):
domain = url[slashIndex+2:]
pathStartIndex = domain.find("/")
if(pathStartIndex != -1):
path = domain[pathStartIndex:]
if(path == ""):
path = "/"
queryIndex = path.find("?")
if(queryIndex != -1):
query = path[queryIndex:]
path = path[:queryIndex]
if(pathStartIndex != -1):
domain = domain[:pathStartIndex]
try:
port = int(domain.split(":")[1])
domain = domain.split(":")[0]
except:
port = 80
return domain, path, query, port
def parsePostArgs(self, args):
postBody = ""
if(args == None):
postBody = ""
else:
for key in args.keys():
postBody += "{}={}&".format(key, args[key])
return postBody, len(postBody)
def POST(self, url, args=None):
#start_time = time.time()
postBody, postBodyLen = self.parsePostArgs(args)
domainName, urlPath, urlQuery, port = self.parseURL(url)
self.connect(domainName, port)
fakeUserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
header = "POST {} HTTP/1.1\nHost: {}\nConnection: keep-alive\nAccept: */*\nOrigin: {}\nUser-Agent: {}\nAccept-Encoding: gzip, deflate\nAccept-Language: en-US;q=0.9\nContent-Type: application/x-www-form-urlencoded; charset=UTF-8\nContent-Length: {}\r\n\r\n{}".format(urlPath+urlQuery,domainName,url,fakeUserAgent,postBodyLen,postBody)
self.sendall(header)
returnData = self.recvall()
print("###POST DATA SENT###\n"+header)
#print("#####SENT DATA#####: \n"+header)
parseData = self.general_parser(returnData)
statusCode = self.get_code(parseData)
htmlBody = self.get_body(returnData)
htmlHeader = self.get_headers(returnData, urlPath)
print("###POST DATA RECIEVED###: \n"+htmlBody)
self.close()
return HTTPResponse(statusCode, htmlBody)
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| [
"socket.socket",
"sys.exit"
] | [((1286, 1335), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1299, 1335), False, 'import socket\n'), ((6115, 6126), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6123, 6126), False, 'import sys\n')] |
import ROOT,sys
from larlite import larlite as fmwk1
from larcv import larcv as fmwk2
from ROOT import handshake
io1=fmwk1.storage_manager(fmwk1.storage_manager.kBOTH)
io1.add_in_filename(sys.argv[1])
io1.set_out_filename('boke.root')
io1.open()
io2=fmwk2.IOManager(fmwk2.IOManager.kREAD)
io2.add_in_file(sys.argv[2])
io2.initialize()
hs=handshake.HandShaker()
ctr=0
while io1.next_event() and io2.read_entry(ctr):
ev_pfpart = io1.get_data(fmwk1.data.kPFParticle, "dl")
ev_vertex = io1.get_data(fmwk1.data.kVertex, "dl")
ev_shower = io1.get_data(fmwk1.data.kShower, "dl")
ev_track = io1.get_data(fmwk1.data.kTrack, "dl")
ev_cluster = io1.get_data(fmwk1.data.kCluster, "dl")
ev_hit = io1.get_data(fmwk1.data.kHit, "dl")
ev_ass = io1.get_data(fmwk1.data.kAssociation,"dl")
ev_hit_in = io1.get_data(fmwk1.data.kHit, "gaushit")
ev_pgraph = io2.get_data(fmwk2.kProductPGraph,'test')
ev_pixel2d = io2.get_data(fmwk2.kProductPixel2D,'test_ctor')
hs.pixel_distance_threshold(1.)
hs.set_larlite_pointers(ev_pfpart, ev_vertex,
ev_shower, ev_track,
ev_cluster, ev_hit,
ev_ass)
hs.construct(ev_pgraph, ev_pixel2d, ev_hit_in)
io1.set_id(io1.run_id(), io1.subrun_id(), io1.event_id())
#io1.next_event()
#io1.go_to()
#io2.read_entry()
#io1.save_entry()
ctr+=1
io1.close()
io2.finalize()
| [
"ROOT.handshake.HandShaker",
"larcv.larcv.IOManager",
"larlite.larlite.storage_manager"
] | [((118, 168), 'larlite.larlite.storage_manager', 'fmwk1.storage_manager', (['fmwk1.storage_manager.kBOTH'], {}), '(fmwk1.storage_manager.kBOTH)\n', (139, 168), True, 'from larlite import larlite as fmwk1\n'), ((252, 290), 'larcv.larcv.IOManager', 'fmwk2.IOManager', (['fmwk2.IOManager.kREAD'], {}), '(fmwk2.IOManager.kREAD)\n', (267, 290), True, 'from larcv import larcv as fmwk2\n'), ((341, 363), 'ROOT.handshake.HandShaker', 'handshake.HandShaker', ([], {}), '()\n', (361, 363), False, 'from ROOT import handshake\n')] |
import RPi.GPIO as gpio
from enum import Enum
import time
from GpioMode import GpioMode
from UltrasonicSensor import UltrasonicSensor
class UltrasonicSensorSet:
def __init__(self, *args:UltrasonicSensor):
"""
:param args: UltrasonicSensor objects
"""
self.ussSet = args
def getDistances(self):
"""
:return: list of distances of all UltrasonicSensors in order of how passed in constructor
"""
distances = []
for uss in self.ussSet:
distances.append(uss.getDistance())
return distances
def cleanup(self):
gpio.cleanup()
print("GPIO cleaned up") | [
"RPi.GPIO.cleanup"
] | [((619, 633), 'RPi.GPIO.cleanup', 'gpio.cleanup', ([], {}), '()\n', (631, 633), True, 'import RPi.GPIO as gpio\n')] |
# Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import sh
from string import Template
class CassandraClientInitializer(object):
"""
Create a new initializer
Param user The user login for the git repo
"""
def __init__(self, system):
self.template_dir = None
self.template_repo = None
self.container_data_dir = CassandraClientConfig.data_directory
self.container_log_dir = CassandraClientConfig.log_directory
"""
Generate a new hostname
"""
def new_host_name(self, instance_id):
return 'cassandra_client' + str(instance_id)
"""
Start the service on the containers.
"""
def _execute_service(self, containers, entry_point, fabric, cmd):
return fabric.cmd(containers,
'/service/sbin/startnode %s %s' % (cmd, entry_point['cassandra_url']))
def start_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "start")
def restart_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "restart")
def stop_service(self, containers, entry_point, fabric):
return self._execute_service(containers, entry_point, fabric, "stop")
def _generate_config_dir(self, uuid):
return 'cassandra_client' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return []
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
return []
def get_total_instances(self, num_instances, layers):
"""
Get total number of instances.
"""
instances = []
for i in range(num_instances):
instances.append('cassandra-client')
return instances
"""
Generate a new configuration
"""
def generate(self, num):
return CassandraClientConfig(num)
def _apply_cassandra(self, host_dir, entry_point, config, container):
yaml_in_file = open(self.template_dir + '/cassandra.yaml.template', 'r')
yaml_out_file = open(host_dir + '/cassandra.yaml', 'w+')
# Now make the changes to the template file.
changes = { "LOCAL_ADDRESS":container['data_ip'],
"DATA_DIR":config.data_directory,
"CACHE_DIR":config.cache_directory,
"COMMIT_DIR":config.commit_directory,
"SEEDS":entry_point['cassandra_url']}
for line in yaml_in_file:
s = Template(line).substitute(changes)
yaml_out_file.write(s)
yaml_out_file.close()
yaml_in_file.close()
def _apply_titan(self, host_dir, storage_entry, container):
in_file = open(self.template_dir + '/titan.properties', 'r')
out_file = open(host_dir + '/titan.properties', 'w+')
changes = { "BACKEND":"cassandrathrift",
"DB":container['args']['db'],
"IP":storage_entry['seed']}
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
out_file.close()
in_file.close()
def _find_cassandra_storage(self, containers):
"""
Find a Cassandra compatible storage entry.
"""
for c in containers:
for s in c['storage']:
if s['type'] == 'cassandra':
return s
"""
Apply the configuration to the instances
"""
def apply(self, config, containers):
entry_point = { 'type' : 'cassandra-client' }
entry_point['ip'] = containers[0]['manage_ip']
# Get the storage information.
storage_entry = self._find_cassandra_storage(containers)
if not storage_entry:
# The Cassandra client is currently only compatible with a
# Cassandra backend. So just return an error.
return None, None
# Otherwise record the storage type and get the seed node.
entry_point['cassandra_url'] = storage_entry['seed']
# Create a new configuration directory, and place
# into the template directory.
config_dirs = []
try:
host_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', host_dir)
except:
sys.stderr.write('could not create config dir ' + host_dir)
self._apply_cassandra(host_dir, entry_point, config, containers[0])
# See if we need to apply
if 'titan' in storage_entry:
self._apply_titan(host_dir, storage_entry, containers[0])
out_file = open(host_dir + '/servers', 'w+')
out_file.write("%s %s" % (storage_entry['titan']['ip'], 'rexserver'))
out_file.close
# The config dirs specifies what to transfer over. We want to
# transfer over specific files into a directory.
for c in containers:
config_dirs.append([c['container'],
host_dir + '/*',
config.config_directory])
except IOError as err:
sys.stderr.write('' + str(err))
return config_dirs, entry_point
class CassandraClientConfig(object):
data_directory = '/service/data/main/'
log_directory = '/service/data/logs/'
commit_directory = '/service/data/commits/'
cache_directory = '/service/data/cache/'
config_directory = '/service/conf/cassandra/'
def __init__(self, num):
self.num = num
self.data_directory = CassandraClientConfig.data_directory
self.commit_directory = CassandraClientConfig.commit_directory
self.cache_directory = CassandraClientConfig.cache_directory
self.log_directory = CassandraClientConfig.log_directory
self.config_directory = CassandraClientConfig.config_directory
| [
"sys.stderr.write",
"string.Template",
"sh.mkdir"
] | [((5170, 5194), 'sh.mkdir', 'sh.mkdir', (['"""-p"""', 'host_dir'], {}), "('-p', host_dir)\n", (5178, 5194), False, 'import sh\n'), ((3390, 3404), 'string.Template', 'Template', (['line'], {}), '(line)\n', (3398, 3404), False, 'from string import Template\n'), ((3909, 3923), 'string.Template', 'Template', (['line'], {}), '(line)\n', (3917, 3923), False, 'from string import Template\n'), ((5231, 5290), 'sys.stderr.write', 'sys.stderr.write', (["('could not create config dir ' + host_dir)"], {}), "('could not create config dir ' + host_dir)\n", (5247, 5290), False, 'import sys\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
from setuptools import setup, find_packages
setup(
name='fn_mcafee_esm',
version='1.0.2',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description="Resilient Circuits Components for 'fn_mcafee_esm'",
long_description="""The McAfee ESM integration with the Resilient platform allows for the escalation and enrichment
of cases between McAfee and the Resilient platform. The integration includes a poller and 6 functions. The
returned results can be used to make customized updates to the Resilient platform, such as updating incidents,
data tables and so on. The integration can also be used to make updates to McAfee ESM cases.""",
install_requires=[
'resilient_circuits>=30.0.0',
'resilient-lib'
],
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Programming Language :: Python',
],
entry_points={
"resilient.circuits.components": [
"McafeeEsmGetCaseDetailFunctionComponent = fn_mcafee_esm.components.mcafee_esm_get_case_detail:FunctionComponent",
"McafeeEsmGetListOfCasesFunctionComponent = fn_mcafee_esm.components.mcafee_esm_get_list_of_cases:FunctionComponent",
"McafeeEsmGetCaseEvenstsDetailFunctionComponent = fn_mcafee_esm.components.mcafee_esm_get_case_events_detail:FunctionComponent",
"McafeeEsmEditCaseFunctionComponent = fn_mcafee_esm.components.mcafee_esm_edit_case:FunctionComponent",
"McafeeEsmGetTriggeredAlarms = fn_mcafee_esm.components.mcafee_esm_get_triggered_alarms:FunctionComponent",
"McafeeEsmQueryLogs = fn_mcafee_esm.components.mcafee_esm_query:FunctionComponent",
"McafeeEsmCasePolling = fn_mcafee_esm.components.mcafee_esm_case_polling:ESM_CasePolling"
],
"resilient.circuits.configsection": ["gen_config = fn_mcafee_esm.util.config:config_section_data"],
"resilient.circuits.customize": ["customize = fn_mcafee_esm.util.customize:customization_data"],
"resilient.circuits.selftest": ["selftest = fn_mcafee_esm.util.selftest:selftest_function"]
}
) | [
"setuptools.find_packages"
] | [((908, 923), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (921, 923), False, 'from setuptools import setup, find_packages\n')] |
from collections import deque
from enum import Enum
import logging
from .constants.codes import CatCode
from .constants.parameters import param_to_instr
from .constants.specials import special_to_instr
from .constants.instructions import (Instructions, if_instructions,
unexpanded_cs_instructions)
from .constants import control_sequences
from .tokens import InstructionToken, BaseToken
from .utils import get_unique_id, LogicError
from .lexer import (Lexer,
control_sequence_lex_type, char_cat_lex_type)
from .macro import parse_replacement_text, parse_parameter_text
logger = logging.getLogger(__name__)
short_hand_def_type_to_token_instr = {
Instructions.char_def.value: Instructions.char_def_token,
Instructions.math_char_def.value: Instructions.math_char_def_token,
Instructions.count_def.value: Instructions.count_def_token,
Instructions.dimen_def.value: Instructions.dimen_def_token,
Instructions.skip_def.value: Instructions.skip_def_token,
Instructions.mu_skip_def.value: Instructions.mu_skip_def_token,
Instructions.toks_def.value: Instructions.toks_def_token,
Instructions.font.value: Instructions.font_def_token,
}
literals_map = {
('<', CatCode.other): Instructions.less_than,
('>', CatCode.other): Instructions.greater_than,
('=', CatCode.other): Instructions.equals,
('+', CatCode.other): Instructions.plus_sign,
('-', CatCode.other): Instructions.minus_sign,
('0', CatCode.other): Instructions.zero,
('1', CatCode.other): Instructions.one,
('2', CatCode.other): Instructions.two,
('3', CatCode.other): Instructions.three,
('4', CatCode.other): Instructions.four,
('5', CatCode.other): Instructions.five,
('6', CatCode.other): Instructions.six,
('7', CatCode.other): Instructions.seven,
('8', CatCode.other): Instructions.eight,
('9', CatCode.other): Instructions.nine,
('\'', CatCode.other): Instructions.single_quote,
('"', CatCode.other): Instructions.double_quote,
('`', CatCode.other): Instructions.backtick,
('.', CatCode.other): Instructions.point,
(',', CatCode.other): Instructions.comma,
('A', CatCode.other): Instructions.a,
('B', CatCode.other): Instructions.b,
('C', CatCode.other): Instructions.c,
('D', CatCode.other): Instructions.d,
('E', CatCode.other): Instructions.e,
('F', CatCode.other): Instructions.f,
('A', CatCode.letter): Instructions.a,
('B', CatCode.letter): Instructions.b,
('C', CatCode.letter): Instructions.c,
('D', CatCode.letter): Instructions.d,
('E', CatCode.letter): Instructions.e,
('F', CatCode.letter): Instructions.f,
}
non_active_letters_map = {
'a': Instructions.non_active_uncased_a,
'b': Instructions.non_active_uncased_b,
'c': Instructions.non_active_uncased_c,
'd': Instructions.non_active_uncased_d,
'e': Instructions.non_active_uncased_e,
'f': Instructions.non_active_uncased_f,
'g': Instructions.non_active_uncased_g,
'h': Instructions.non_active_uncased_h,
'i': Instructions.non_active_uncased_i,
'j': Instructions.non_active_uncased_j,
'k': Instructions.non_active_uncased_k,
'l': Instructions.non_active_uncased_l,
'm': Instructions.non_active_uncased_m,
'n': Instructions.non_active_uncased_n,
'o': Instructions.non_active_uncased_o,
'p': Instructions.non_active_uncased_p,
'q': Instructions.non_active_uncased_q,
'r': Instructions.non_active_uncased_r,
's': Instructions.non_active_uncased_s,
't': Instructions.non_active_uncased_t,
'u': Instructions.non_active_uncased_u,
'v': Instructions.non_active_uncased_v,
'w': Instructions.non_active_uncased_w,
'x': Instructions.non_active_uncased_x,
'y': Instructions.non_active_uncased_y,
'z': Instructions.non_active_uncased_z,
'A': Instructions.non_active_uncased_a,
'B': Instructions.non_active_uncased_b,
'C': Instructions.non_active_uncased_c,
'D': Instructions.non_active_uncased_d,
'E': Instructions.non_active_uncased_e,
'F': Instructions.non_active_uncased_f,
'G': Instructions.non_active_uncased_g,
'H': Instructions.non_active_uncased_h,
'I': Instructions.non_active_uncased_i,
'J': Instructions.non_active_uncased_j,
'K': Instructions.non_active_uncased_k,
'L': Instructions.non_active_uncased_l,
'M': Instructions.non_active_uncased_m,
'N': Instructions.non_active_uncased_n,
'O': Instructions.non_active_uncased_o,
'P': Instructions.non_active_uncased_p,
'Q': Instructions.non_active_uncased_q,
'R': Instructions.non_active_uncased_r,
'S': Instructions.non_active_uncased_s,
'T': Instructions.non_active_uncased_t,
'U': Instructions.non_active_uncased_u,
'V': Instructions.non_active_uncased_v,
'W': Instructions.non_active_uncased_w,
'X': Instructions.non_active_uncased_x,
'Y': Instructions.non_active_uncased_y,
'Z': Instructions.non_active_uncased_z,
}
category_map = {
CatCode.space: Instructions.space,
CatCode.begin_group: Instructions.left_brace,
CatCode.end_group: Instructions.right_brace,
CatCode.active: Instructions.active_character,
CatCode.parameter: Instructions.parameter,
CatCode.math_shift: Instructions.math_shift,
CatCode.align_tab: Instructions.align_tab,
CatCode.superscript: Instructions.superscript,
CatCode.subscript: Instructions.subscript,
}
def get_char_cat_pair_instruction(char, cat):
if cat in (CatCode.letter, CatCode.other) and (char, cat) in literals_map:
return literals_map[(char, cat)]
elif cat != CatCode.active and char in non_active_letters_map:
return non_active_letters_map[char]
elif cat in (CatCode.letter, CatCode.other):
return Instructions.misc_char_cat_pair
elif cat in category_map:
return category_map[cat]
else:
raise ValueError(f'Confused by char-cat pair: ({char}, {cat})')
def make_char_cat_pair_instruction_token_direct(char, cat, *args, **kwargs):
"""Make a char-cat instruction token straight from a pair.
"""
instruction = get_char_cat_pair_instruction(char, cat)
value = {'char': char, 'cat': cat, 'lex_type': char_cat_lex_type}
token = InstructionToken(
instruction,
value=value,
*args, **kwargs,
)
return token
def make_char_cat_pair_instruction_token(char_cat_lex_token):
v = char_cat_lex_token.value
return make_char_cat_pair_instruction_token_direct(
v['char'], v['cat'],
parents=[char_cat_lex_token]
)
def make_parameter_control_sequence_instruction(name, parameter, instruction):
instr_tok = make_primitive_control_sequence_instruction(name, instruction)
# This is what is used to look up the parameter value. The 'name' just
# records the name of the control sequence used to refer to this parameter.
instr_tok.value['parameter'] = parameter
return instr_tok
def make_special_control_sequence_instruction(name, special, instruction):
instr_tok = make_primitive_control_sequence_instruction(name, instruction)
# This is what is used to look up the special value. The 'name' just
# records the name of the control sequence used to refer to this special.
instr_tok.value['special'] = special
return instr_tok
def make_primitive_control_sequence_instruction(name, instruction):
return InstructionToken(
instruction,
value={'name': name, 'lex_type': control_sequence_lex_type},
parents=[],
)
def make_unexpanded_control_sequence_instruction(name, parents):
if len(name) == 1:
instruction = Instructions.unexpanded_control_symbol
else:
instruction = Instructions.unexpanded_control_word
return InstructionToken(
instruction,
value={'name': name, 'lex_type': control_sequence_lex_type},
parents=parents,
)
def lex_token_to_instruction_token(lex_token):
# If we have a char-cat pair, we must type it to its terminal version,
if lex_token.type == char_cat_lex_type:
return make_char_cat_pair_instruction_token(lex_token)
elif lex_token.type == control_sequence_lex_type:
return make_unexpanded_control_sequence_instruction(
lex_token.value, parents=[lex_token])
# Aren't any other types of lexed tokens.
else:
raise LogicError(f"Unknown lex token type: '{lex_token}'")
def make_macro_token(name, replacement_text, parameter_text,
parents,
def_type=None, prefixes=None):
if prefixes is None:
prefixes = set()
return InstructionToken(
Instructions.macro,
value={'name': name,
'prefixes': prefixes,
'replacement_text': parse_replacement_text(replacement_text),
'parameter_text': parse_parameter_text(parameter_text),
'def_type': def_type,
'lex_type': control_sequence_lex_type},
parents=parents,
)
class NoSuchControlSequence(Exception):
def __init__(self, name):
self.name = name
class ControlSequenceType(Enum):
macro = 1
let_character = 2
parameter = 3
primitive = 4
font = 5
special = 6
class RouteToken(BaseToken):
def __init__(self, type_, value):
if type_ not in ControlSequenceType:
raise ValueError('Route token {type_} not a ControlSequenceType')
super().__init__(type_, value)
class CSRouter:
def __init__(self,
param_control_sequences,
special_control_sequences,
primitive_control_sequences,
enclosing_scope=None):
self.control_sequences = {}
self.macros = {}
self.let_chars = {}
self.parameters = {}
self.specials = {}
self.primitives = {}
self.font_ids = {}
self.enclosing_scope = enclosing_scope
for name, tpl in param_control_sequences.items():
parameter, instr = tpl
self._set_parameter(name, parameter, instr)
for name, tpl in special_control_sequences.items():
special, instr = tpl
self._set_special(name, special, instr)
for name, instruction in primitive_control_sequences.items():
self._set_primitive(name, instruction)
@classmethod
def default_initial(cls):
# Router needs a map from a control sequence name, to the parameter and
# the instruction type of the parameter (integer, dimen and so on).
params = {
n: (p, param_to_instr[p])
for n, p in control_sequences.param_control_sequences.items()
}
specials = {
n: (p, special_to_instr[p])
for n, p in control_sequences.special_control_sequences.items()
}
primitives = control_sequences.primitive_control_sequences
return cls(
param_control_sequences=params,
special_control_sequences=specials,
primitive_control_sequences=primitives,
enclosing_scope=None)
@classmethod
def default_local(cls, enclosing_scope):
return cls(param_control_sequences={},
special_control_sequences={},
primitive_control_sequences={},
enclosing_scope=enclosing_scope)
def _name_means_instruction(self, name, instructions):
try:
tok = self.lookup_control_sequence(name, parents=None)
except NoSuchControlSequence:
return False
if isinstance(tok, InstructionToken):
return tok.instruction in instructions
else:
return False
def name_means_delimit_condition(self, name):
"""Test if a control sequence corresponds to an instruction to split
blocks of conditional text. Concretely, this means a control sequence
is '\else' or '\or'."""
return self._name_means_instruction(name, (Instructions.else_,
Instructions.or_))
def name_means_end_condition(self, name):
"""Test if a control sequence corresponds to an instruction to split
blocks of conditional text. Concretely, this means a control sequence
is '\fi'."""
return self._name_means_instruction(name, (Instructions.end_if,))
def name_means_start_condition(self, name):
"""Test if a control sequence corresponds to an instruction to split
blocks of conditional text. Concretely, this means a control sequence
is one of '\ifnum', '\ifcase' and so on."""
return self._name_means_instruction(name, if_instructions)
def lookup_canonical_control_sequence(self, name):
route_token = self._lookup_route_token(name)
return self._resolve_route_token_to_raw_value(route_token)
def lookup_control_sequence(self, name, parents):
canon_token = self.lookup_canonical_control_sequence(name)
token = canon_token.copy(parents=parents)
# Amend token to give it the proper control sequence name.
if isinstance(token.value, dict) and 'name' in token.value:
token.value['name'] = name
return token
def set_macro(self, name, replacement_text, parameter_text,
def_type, prefixes,
parents):
if prefixes is None:
prefixes = set()
route_id = self._set_route_token(name, ControlSequenceType.macro)
macro_token = make_macro_token(name,
replacement_text=replacement_text,
parameter_text=parameter_text,
def_type=def_type, prefixes=prefixes,
parents=parents)
self.macros[route_id] = macro_token
def do_short_hand_definition(self, name, def_type, code,
target_parents, cmd_parents):
def_token_instr = short_hand_def_type_to_token_instr[def_type]
instr_token = InstructionToken(
def_token_instr,
value=code,
parents=target_parents,
)
self.set_macro(name, replacement_text=[instr_token],
parameter_text=[], def_type='sdef', prefixes=None,
parents=cmd_parents)
def define_new_font_control_sequence(self, name, font_id,
cmd_parents, target_parents):
# Note, this token just records the font id; the information
# is stored in the global font state, because it has internal
# state that might be modified later; we need to know where to get
# at it.
self.do_short_hand_definition(
name=name,
def_type=Instructions.font.value,
code=font_id,
cmd_parents=cmd_parents,
target_parents=target_parents,
)
def do_let_assignment(self, new_name, target_token):
if target_token.value['lex_type'] == control_sequence_lex_type:
target_name = target_token.value['name']
self._copy_control_sequence(target_name, new_name)
elif target_token.value['lex_type'] == char_cat_lex_type:
self._set_let_character(new_name, target_token)
else:
raise ValueError(f'Let target does not look like a token: '
f'{target_token}')
def _set_primitive(self, name, instruction):
# Get a route from the name to a primitive.
route_id = self._set_route_token(name, ControlSequenceType.primitive)
# Make that route resolve to the instruction token.
token = make_primitive_control_sequence_instruction(
name=name, instruction=instruction)
self.primitives[route_id] = token
def _set_parameter(self, name, parameter, instr):
# Get a route from the name to a parameter.
route_id = self._set_route_token(name, ControlSequenceType.parameter)
# Make that route resolve to the parameter token.
token = make_parameter_control_sequence_instruction(
name=name, parameter=parameter, instruction=instr)
self.parameters[route_id] = token
def _set_special(self, name, special, instr):
# Get a route from the name to a special.
route_id = self._set_route_token(name, ControlSequenceType.special)
# Make that route resolve to the special token.
token = make_special_control_sequence_instruction(
name=name, special=special, instruction=instr)
self.specials[route_id] = token
def _copy_control_sequence(self, target_name, new_name):
# Make a new control sequence that is routed to the same spot as the
# current one.
target_route_token = self._lookup_route_token(target_name)
self.control_sequences[new_name] = target_route_token
def _set_let_character(self, name, char_cat_token):
route_id = self._set_route_token(name,
ControlSequenceType.let_character)
self.let_chars[route_id] = char_cat_token
def _set_route_token(self, name, cs_type):
route_id = get_unique_id()
route_token = RouteToken(cs_type, route_id)
self.control_sequences[name] = route_token
return route_id
def _lookup_route_token(self, name):
# If the route token exists in this scope, return it.
if name in self.control_sequences:
route_token = self.control_sequences[name]
# Otherwise, if there's an enclosing scope, ask it for it.
elif self.enclosing_scope is not None:
route_token = self.enclosing_scope._lookup_route_token(name)
# If we are the outermost scope, the control sequence is unknown.
else:
raise NoSuchControlSequence(name)
return route_token
def _resolve_route_token_to_raw_value(self, r):
type_ = r.type
route_id = r.value
value_maps_map = {
ControlSequenceType.parameter: self.parameters,
ControlSequenceType.special: self.specials,
ControlSequenceType.primitive: self.primitives,
ControlSequenceType.macro: self.macros,
ControlSequenceType.let_character: self.let_chars,
ControlSequenceType.font: self.font_ids,
}
value_map = value_maps_map[type_]
try:
v = value_map[route_id]
except KeyError:
v = self.enclosing_scope._resolve_route_token_to_raw_value(r)
return v
class Instructioner:
def __init__(self, lexer, resolve_cs_func):
self.lexer = lexer
self.resolve_control_sequence = resolve_cs_func
# TODO: Use GetBuffer.
self.output_buffer = deque()
@classmethod
def from_string(cls, resolve_cs_func, *args, **kwargs):
lexer = Lexer.from_string(*args, **kwargs)
return cls(lexer, resolve_cs_func=resolve_cs_func)
def replace_tokens_on_input(self, tokens):
if logger.isEnabledFor(logging.DEBUG):
if len(tokens) == 1:
s = tokens[0]
elif len(tokens) > 3:
s = f'[{tokens[0]} … {tokens[-1]}]'
else:
s = tokens
logger.debug(f'Replacing "{s}" on input instruction queue')
self.output_buffer.extendleft(reversed(tokens))
def iter_unexpanded(self):
while True:
yield self.next_unexpanded()
def next_unexpanded(self):
retrieving = self.output_buffer
if retrieving:
t = self.output_buffer.popleft()
else:
new_lex_token = next(self.lexer)
t = lex_token_to_instruction_token(new_lex_token)
# if t.char_nr is not None and logger.isEnabledFor(logging.INFO):
# source = 'Retrieved' if retrieving else 'Read'
# if self.lexer.reader.current_buffer.name != 'plain.tex':
# logger.info(f'{source}: {t.get_position_str(self.lexer.reader)}')
return t
def next_expanded(self):
instr_tok = self.next_unexpanded()
# If the token is an unexpanded control sequence call, and expansion is
# not suppressed, then we must resolve the call:
# - A user control sequence will become a macro instruction token.
# - A \let character will become its character instruction token.
# - A primitive control sequence will become its instruction token.
# NOTE: I've made this mistake twice now: we can't make this resolution
# into a two-call process, where we resolve the token, put the resolved
# token on the input, then handle it in the next call. This is because,
# for example, \expandafter expects a single call to the banisher to
# both resolve *and* expand a macro. Basically this method must do a
# certain amount to a token in each call.
if instr_tok.instruction in unexpanded_cs_instructions:
name = instr_tok.value['name']
try:
instr_tok = self.resolve_control_sequence(name,
parents=[instr_tok])
except NoSuchControlSequence:
# Might be that we are parsing too far in a chunk, and just
# need to execute a command before this can be understood. Put
# the token back on the input, potentially to read again.
self.replace_tokens_on_input([instr_tok])
raise
return instr_tok
def advance_to_end(self, expand=True):
while True:
try:
if expand:
yield self.next_expanded()
else:
yield self.next_unexpanded()
except EOFError:
return
| [
"logging.getLogger",
"collections.deque"
] | [((638, 665), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (655, 665), False, 'import logging\n'), ((18927, 18934), 'collections.deque', 'deque', ([], {}), '()\n', (18932, 18934), False, 'from collections import deque\n')] |
import pathlib
from kronos_executor.execution_context import ExecutionContext
run_script = pathlib.Path(__file__).parent / "trivial_run.sh"
class TrivialExecutionContext(ExecutionContext):
scheduler_directive_start = ""
scheduler_directive_params = {}
scheduler_use_params = []
scheduler_cancel_head = "#!/bin/bash\nkill "
scheduler_cancel_entry = "{sequence_id} "
launcher_command = "mpirun"
launcher_params = {"num_procs": "-np "}
launcher_use_params = ["num_procs"]
def env_setup(self, job_config):
return "module load openmpi"
def submit_command(self, job_config, job_script_path, deps=[]):
return [str(run_script),
job_config['job_output_file'],
job_config['job_error_file'],
job_script_path]
Context = TrivialExecutionContext
| [
"pathlib.Path"
] | [((94, 116), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (106, 116), False, 'import pathlib\n')] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config[
'SQLALCHEMY_DATABASE_URI'] = 'postgres://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class UserData(db.Model):
__tablename__ = 'UserData'
Id = db.Column(db.Integer, primary_key=True)
Name = db.Column(db.String(64))
Description = db.Column(db.String(256))
CreateDate = db.Column(db.DateTime)
def __init__(self
, Name
, Description
, CreateDate
):
self.Name = Name
self.Description = Description
self.CreateDate = CreateDate
if __name__ == '__main__':
manager.run()
| [
"flask_sqlalchemy.SQLAlchemy",
"flask_script.Manager",
"flask_migrate.Migrate",
"flask.Flask"
] | [((154, 169), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (159, 169), False, 'from flask import Flask\n'), ((270, 285), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (280, 285), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((296, 312), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {}), '(app, db)\n', (303, 312), False, 'from flask_migrate import Migrate, MigrateCommand\n'), ((324, 336), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (331, 336), False, 'from flask_script import Manager\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from absl import app
from absl import flags
import pandas as pd
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset', None, 'A path to the dataset.')
flags.DEFINE_float('test_fraction', 0.2, 'A split fraction between [0.0, 1.0]')
def main(unused_args):
df = pd.read_csv(FLAGS.dataset, sep=',', dtype=object,
quoting=csv.QUOTE_NONE)
# Step 1. Shuffle the dataset.
df = df.sample(frac=1.0).reset_index(drop=True)
split_point = int(len(df) * FLAGS.test_fraction)
# Step 2. Split the dataset.
df_test = df.iloc[:split_point, :]
df_train = df.iloc[split_point:, :]
# Step 3. Materialize the datasets.
df_train.to_csv('train.csv', sep=',', quoting=csv.QUOTE_NONE,
index=False)
df_test.to_csv('test.csv', sep=',', quoting=csv.QUOTE_NONE,
index=False)
if __name__ == '__main__':
app.run(main)
| [
"absl.app.run",
"absl.flags.DEFINE_string",
"absl.flags.DEFINE_float",
"pandas.read_csv"
] | [((211, 273), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""', 'None', '"""A path to the dataset."""'], {}), "('dataset', None, 'A path to the dataset.')\n", (230, 273), False, 'from absl import flags\n'), ((274, 353), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""test_fraction"""', '(0.2)', '"""A split fraction between [0.0, 1.0]"""'], {}), "('test_fraction', 0.2, 'A split fraction between [0.0, 1.0]')\n", (292, 353), False, 'from absl import flags\n'), ((388, 461), 'pandas.read_csv', 'pd.read_csv', (['FLAGS.dataset'], {'sep': '""","""', 'dtype': 'object', 'quoting': 'csv.QUOTE_NONE'}), "(FLAGS.dataset, sep=',', dtype=object, quoting=csv.QUOTE_NONE)\n", (399, 461), True, 'import pandas as pd\n'), ((1007, 1020), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (1014, 1020), False, 'from absl import app\n')] |
#
# Copyright 2022 DMetaSoul
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict, Union, List, Tuple
from collections import OrderedDict
import torch
from torch import nn, Tensor
from transformers import AutoTokenizer, AutoModel, AutoConfig
from sentence_transformers import SentenceTransformer
class TextTransformerEncoder(nn.Sequential):
def __init__(self, model_name_or_path, device=None, max_seq_len=None):
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
sbert = SentenceTransformer(model_name_or_path, device=device)
if max_seq_len is not None:
sbert._first_module().max_seq_length = max_seq_len
super().__init__(sbert._modules)
self.to(device) # to device
self._device = device
self._max_len = sbert._first_module().max_seq_length
self._do_lower_case = sbert._first_module().do_lower_case
self._tokenizer = sbert.tokenizer
self._input_names = self._tokenizer.model_input_names
#self._tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
self._base_config = AutoConfig.from_pretrained(model_name_or_path)
#self._base_model = AutoModel.from_pretrained(model_name_or_path)
@property
def max_seq_len(self):
return self._max_len
@property
def do_lower_case(self):
return self._do_lower_case
@property
def config(self):
return self._base_config
@property
def input_names(self):
return self._input_names
@property
def output_names(self):
return ['sentence_embedding', 'token_embeddings']
@property
def input_axes(self):
dynamic_axes = {}
for name in self.input_names:
dynamic_axes[name] = {0: 'batch_size', 1: 'max_seq_len'}
return dynamic_axes
@property
def output_axes(self):
dynamic_axes = {}
dynamic_axes['sentence_embedding'] = {0: 'batch_size'}
dynamic_axes['token_embeddings'] = {0: 'batch_size', 1: 'max_seq_len'}
return dynamic_axes
def save(self, save_path):
self._tokenizer.save_pretrained(save_path)
self._base_config.save_pretrained(save_path)
def get_dummy_inputs(self, dummy=None, batch_size=1, device='cpu', return_tensors="pt"):
text = dummy if dummy is not None else (" ".join([self._tokenizer.unk_token]) * 128)
dummy_input = [text] * batch_size
features = self.tokenize(dummy_input)
inputs = {}
for name in self.input_names:
if return_tensors == "pt":
inputs[name] = features[name].to(device)
else:
inputs[name] = features[name].cpu().numpy()
return inputs
def tokenize(self, texts: List[str]):
if self._do_lower_case:
texts = [s.lower() for s in texts]
return self._tokenizer(texts, padding=True, truncation=True, return_tensors="pt", max_length=self._max_len)
def forward(self, input_ids: Tensor=None, token_type_ids: Tensor=None, attention_mask: Tensor=None, positions_ids: Tensor=None, *args, **kwargs):
inputs = {}
if 'input_ids' in self.input_names:
inputs['input_ids'] = input_ids
if 'attention_mask' in self.input_names:
inputs['attention_mask'] = attention_mask
if 'token_type_ids' in self.input_names:
inputs['token_type_ids'] = token_type_ids
if 'positions_ids' in self.input_names:
inputs['positions_ids'] = positions_ids
for module in self:
inputs = module(inputs)
ret = OrderedDict()
for name in self.output_names:
ret[name] = inputs[name].detach()
# normalize the sentence embedding
ret['sentence_embedding'] = torch.nn.functional.normalize(ret['sentence_embedding'], p=2, dim=1)
return ret
def encode(self, texts: List[str]):
if isinstance(texts, str):
texts = [texts]
features = self.tokenize(texts)
features = {k:v.to(self._device) for k,v in features.items()}
return self.forward(**features)
if __name__ == '__main__':
encoder = TextTransformerEncoder('bert-base-chinese', device='cuda:0')
embs = encoder.encode('hello world!')['sentence_embedding']
norm_embs = torch.nn.functional.normalize(embs, p=2, dim=1)
print(embs.size())
print(embs, norm_embs)
| [
"collections.OrderedDict",
"sentence_transformers.SentenceTransformer",
"transformers.AutoConfig.from_pretrained",
"torch.nn.functional.normalize",
"torch.cuda.is_available"
] | [((4858, 4905), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['embs'], {'p': '(2)', 'dim': '(1)'}), '(embs, p=2, dim=1)\n', (4887, 4905), False, 'import torch\n'), ((1054, 1108), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_name_or_path'], {'device': 'device'}), '(model_name_or_path, device=device)\n', (1073, 1108), False, 'from sentence_transformers import SentenceTransformer\n'), ((1652, 1698), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['model_name_or_path'], {}), '(model_name_or_path)\n', (1678, 1698), False, 'from transformers import AutoTokenizer, AutoModel, AutoConfig\n'), ((4155, 4168), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4166, 4168), False, 'from collections import OrderedDict\n'), ((4333, 4401), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (["ret['sentence_embedding']"], {'p': '(2)', 'dim': '(1)'}), "(ret['sentence_embedding'], p=2, dim=1)\n", (4362, 4401), False, 'import torch\n'), ((1001, 1026), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1024, 1026), False, 'import torch\n')] |
import requests
from urllib.parse import urlparse
from os.path import join
#TODO: Break into separate standard settings module
ROOT_URL = 'http://progdisc.club/~lethargilistic/proxy'
HEADERS = {'User-Agent': 'dcapi-wrap (https://github.com/lethargilistic/dcapi-wrap)'}
def set_url(url):
if urlparse(url):
ROOT_URL = url
else:
raise ValueError('The URL was not a URL')
def character(search):
search_url = join(ROOT_URL, 'character', str(search))
response = requests.get(search_url, headers=HEADERS)
if response.status_code != 200:
raise ConnectionError('API endpoint returned status '
+ str(response.status_code))
return response.json()
if __name__ == '__main__':
print(character(0))
print(character('Ai Haibara'))
| [
"urllib.parse.urlparse",
"requests.get"
] | [((296, 309), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (304, 309), False, 'from urllib.parse import urlparse\n'), ((491, 532), 'requests.get', 'requests.get', (['search_url'], {'headers': 'HEADERS'}), '(search_url, headers=HEADERS)\n', (503, 532), False, 'import requests\n')] |
from django.conf import settings
from django.conf.urls import url
from django.urls import LocalePrefixPattern, URLResolver, get_resolver, path
from TWLight.i18n.views import set_language
# Direct rip from django.conf.urls.i18n, but imports our local set_language
# from GitHub
def i18n_patterns(*urls, prefix_default_language=True):
"""
Add the language code prefix to every URL pattern within this function.
This may only be used in the root URLconf, not in an included URLconf.
"""
if not settings.USE_I18N:
return list(urls)
return [
URLResolver(
LocalePrefixPattern(prefix_default_language=prefix_default_language),
list(urls),
)
]
urlpatterns = [path("setlang/", set_language, name="set_language")]
| [
"django.urls.path",
"django.urls.LocalePrefixPattern"
] | [((732, 783), 'django.urls.path', 'path', (['"""setlang/"""', 'set_language'], {'name': '"""set_language"""'}), "('setlang/', set_language, name='set_language')\n", (736, 783), False, 'from django.urls import LocalePrefixPattern, URLResolver, get_resolver, path\n'), ((605, 673), 'django.urls.LocalePrefixPattern', 'LocalePrefixPattern', ([], {'prefix_default_language': 'prefix_default_language'}), '(prefix_default_language=prefix_default_language)\n', (624, 673), False, 'from django.urls import LocalePrefixPattern, URLResolver, get_resolver, path\n')] |
import random
from fq.agent.base import Agent
from fq.four_board import Move
from fq.four_types import Point
class RandomBot(Agent):
def select_move(self, game_state):
'''
Choose a random valid move
'''
candidates = []
for c in range(1, game_state.board.num_cols +1):
candidate = Point(row=len(game_state.board._grid_v2[c])+1,col=c)
if game_state.is_valid_move(Move.play(candidate),game_state.next_player) \
and game_state.board.is_legal_move(candidate):
candidates.append(candidate)
if len(candidates) == 0:
exit()
return Move.play(random.choice(candidates))
| [
"random.choice",
"fq.four_board.Move.play"
] | [((659, 684), 'random.choice', 'random.choice', (['candidates'], {}), '(candidates)\n', (672, 684), False, 'import random\n'), ((431, 451), 'fq.four_board.Move.play', 'Move.play', (['candidate'], {}), '(candidate)\n', (440, 451), False, 'from fq.four_board import Move\n')] |
import math
import numpy as np
import cv2
import json
import argparse
def augment_homogeneous(V, augment):
""" Augment a 3xN array of vectors into a 4xN array of homogeneous coordinates
Args:
v (np.array 3xN): Array of vectors
augment (float): The value to fill in for the W coordinate
Returns:
(np.array 4xN): New array of augmented vectors
"""
Vh = np.zeros((4, V.shape[1]))
Vh[0:3, :] = V[0:3, :]
Vh[3, :] = augment
return Vh
def batch_normalize_3d(V, w):
""" Normalize a 4xN array of vectors in their first three dimensions
Args:
V (np.array 4xN): Array of homogeneous coordinates
w (float): Value to fill in for w coordinate after normalization
Returns:
(np.array 4xN): New array of normalized vectors
"""
norms = np.linalg.norm(V[0:3, :], axis=0)
#norms = np.sqrt(np.sum(V[0:3,:]**2.0, 0))
N = np.copy(V)
for i in range(3):
N[i, :] /= norms
N[3, :] = w
return N
def batch_sphere_interior_intersect(P, V):
""" Compute intersections of a batch of rays against the unit sphere
In case of multiple intersections, the *last* intersection is returned
Args:
P (np.array 4xN): Array of ray origins
V (np.array 4xN): Array of ray directions
Returns:
(np.array N, np.array 4xN, np.array 4xN): Valid, intersections, normals
"""
P3 = P[0:3, :]
V3 = V[0:3, :]
# Parametrize ray as a function of t so that ray(t) = p + v*t
# Then solve for t' such that ||ray(t')||^2 = 1
# This resolves to a quadratic in t that can be solved w/ quadratic eq
A = np.sum(V3 * V3, 0) # = vx^2 + vy^2 + vz^2
B = 2.0 * np.sum(P3 * V3, 0) # = 2 * (x*vx + y*vy + z*vz)
C = np.sum(P3 * P3, 0) - 1.0 # = x^2 + y^2 + z^2 - 1
discriminant = B**2.0 - 4.0*A*C
valid_pts = discriminant >= 0.0
safe_discriminant = np.maximum(discriminant, 0.0)
# Use latest (largest t) intersection
t = (-B + np.sqrt(safe_discriminant)) / (2.0*A)
# t1 = (-B - np.sqrt(safe_discriminant)) / (2.0*A)
# t = np.maximum(t0, t1)
t[valid_pts == False] = 0.0
P_intersect = P + t*V
# sphere normals are just normalized intersection locations
N = batch_normalize_3d(P_intersect, 0.0)
return valid_pts, P_intersect, N
def batch_plane_intersect(P, V):
""" Compute intersections of a batch of rays against the XY plane
Args:
P (np.array 4xN): Array of ray origins
V (np.array 4xN): Array of ray directions
Returns:
(np.array N, np.array 4xN, np.array 4xN): Valid, intersections, normals
"""
valid_pts = np.ones(P.shape[1]).astype(np.bool)
# ray(t) = p + vt, solve for t' s.t. ray(t').z = 0
# 0 = p.z + v.z * t --> t = -p.z / v.z
t = -(P[2,:] / V[2,:])
P_intersect = P + V * t
# plane normals are just z = 1
N = np.zeros(P.shape)
N[2,:] = 1.0
return valid_pts, P_intersect, N
def batch_reflect(V, N):
""" Reflect a batch of vectors by a batch of normals
Args:
V (np.array 4xN): Array of vectors
N (np.array 4xN): Array of normals
Returns:
(np.array 4xN): Array of reflected vectors
"""
v_dot_n = np.sum(V[i, :] * N[i, :] for i in range(3))
# N(V⋅N) gives the component of the vector aligned with the normal
# V = (V - N(V⋅N)) + (N(V⋅N))
# parallel part perpendicular part
# To reflect, we negate the perpendicular part
# V_ref = (V - N(V⋅N)) - (N(V⋅N))
# V_ref = V - 2N(V⋅N)
return V - (2.0 * N * v_dot_n)
def batch_transformed_intersect(T, P, V, intersect_func):
""" Compute transformed ray intersections in batch (vectorized)
Args:
T (np.array 4x4): Transform
P (np.array 4xN): Ray origins
V (np.array 4xN): Ray directions
intersect_func (function): Untransformed intersection function
Returns:
(np.array N, np.array 4xN, np.array 4xN): valid, positions, local positions, normals
"""
T_inv = np.linalg.inv(T)
P_loc = T_inv @ P
V_loc = T_inv @ V
valid, P_i_loc, N_loc = intersect_func(P_loc, V_loc)
P_intersect = T @ P_i_loc
# Normals are pseudo-vectors, so we transform them by the inverse transpose
N = batch_normalize_3d(T_inv.T @ N_loc, 0.0)
return valid, P_intersect, P_i_loc, N
def forward_trace(T_ellipse, T_plane, P, V):
""" Trace rays to UV positions on the display plane in a Northstar configuration
Args:
T_ellipse (np.array 4x4): Reflector ellipse as transform of unit sphere
T_plane (np.array 4x4): Display plane as transform of unit XY planar patch
P (np.array 4xN): Ray origins
V (np.array 4xN): Ray directions
Returns:
(np.array N, np.array 2xN): valid, UVs
"""
P = augment_homogeneous(P, 1.0)
V = augment_homogeneous(V, 0.0)
valid, P_i_e, _, N_e = batch_transformed_intersect(T_ellipse, P, V, batch_sphere_interior_intersect)
V_ref = batch_reflect(V, N_e)
valid_p, _, UV, _ = batch_transformed_intersect(T_plane, P_i_e, V_ref, batch_plane_intersect)
## cleanup: scale UVs [-1,1] -> [0,1]; mark out-of-range UVs as invalid
UV = (UV * 0.5) + 0.5
valid = np.logical_and(valid, valid_p)
for i in range(2):
valid[UV[i, :] < 0.0] = False
valid[UV[i, :] > 1.0] = False
return valid, UV[0:2, :]
def rand_circular(n_samples):
""" Sample random points in a unit circle.
Args:
n_samples (int): Number of points to sample.
Returns:
(np.array 2xN): Array of samples.
"""
length = np.random.uniform(0.0, 1.0, (n_samples))
angle = np.pi * np.random.uniform(0.0, 2.0, (n_samples))
ret = np.zeros((2, n_samples))
ret[0, :] = np.sqrt(length) * np.cos(angle)
ret[1, :] = np.sqrt(length) * np.sin(angle)
return ret
def forward_perspective_trace(T_ellipse, T_plane, fov, resolution, jitter=0.0):
""" Trace UVs for a perspective camera located at the origin.
Args:
T_ellipse (np.array 4x4): Reflector ellipse as transform of unit sphere
T_plane (np.array 4x4): Display plane as transform of unit XY planar patch
fov (float): Field of view (square aspect ratio) in radians
resolution (int): Output resolution (square aspect ratio) in pixels
jitter (float): Amount to randomly jitter each sample point origin XY
Returns:
(np.array NxN, np.array NxN, np.array NxN): valid, U, V
"""
view_limit = math.tan(fov / 2.0)
spts = np.linspace(-view_limit, view_limit, resolution)
X, Y = np.meshgrid(spts, -spts)
P = np.zeros((3, X.size))
if jitter > 0.0:
P[0:2, :] += rand_circular(P.shape[1]) * jitter
V = np.zeros((3, X.size))
V[0, :] = X.reshape(-1)
V[1, :] = Y.reshape(-1)
V[2, :] = -1.0
valid_pts, UV = forward_trace(T_ellipse, T_plane, P, V)
U = UV[0, :].reshape(X.shape)
V = UV[1, :].reshape(X.shape)
valid_mask = valid_pts.reshape(X.shape)
U[valid_mask == False] = 0.0
V[valid_mask == False] = 0.0
return valid_mask, U, V
def invert_map(x_vals, y_vals, target_vals, dest_size):
import scipy
import scipy.interpolate
interpolator = scipy.interpolate.interp2d(x_vals, y_vals, target_vals, kind='cubic')
# The interpolater returned by interp2d only accepts monotonically
# increasing inputs, so we will need to flip vertically later to
# account for our UV convention of lower-left origin
x_vals = np.linspace(0.0, 1.0, dest_size)
y_vals = np.linspace(0.0, 1.0, dest_size)
inv_map = interpolator(x_vals, y_vals)
inv_map = np.maximum(0.0, np.minimum(1.0, inv_map))
return inv_map
def compute_inverse_maps(valid, u_map, v_map, dest_size):
idim = u_map.shape[0]
src_u, src_v = np.meshgrid(np.linspace(0.0, 1.0, idim),
np.linspace(1.0, 0.0, idim))
inv_u = invert_map(u_map[valid], v_map[valid], src_u[valid], dest_size)
inv_v = invert_map(u_map[valid], v_map[valid], src_v[valid], dest_size)
# Flip V map to account for lower-left origin UVs
inv_v = np.flip(inv_v, 0)
return inv_u, inv_v
def map_image(u_map, v_map, im):
u_pixel = (u_map * im.shape[1]).astype(np.float32)
v_pixel = ((1.0 - v_map) * im.shape[0]).astype(np.float32)
im_mapped = cv2.remap(im, u_pixel, v_pixel, cv2.INTER_CUBIC)
return im_mapped
def main():
parser = argparse.ArgumentParser(description='Compute Northstar forward/inverse distortion maps.')
parser.add_argument('configfile',
help='Configuration .json to use')
parser.add_argument('--quality', type=int, default=64,
help='Intermediate interpolation resolution (>128 will be very slow)')
parser.add_argument('--testimage', default='uvgrid.png',
help='Image to use for testing projections.')
parser.add_argument('--outformat', default='exr',
help='Output format (exr/png16/png8)')
args = parser.parse_args()
#rendering
view_fov = math.pi / 2.0 # 90 degrees fov
compute_res = 64
forward_res = 1024
dest_size = 1024
# ellipse parameters
e_a = 0.665 #2.5
e_b = 0.528 #2.0
e_f = math.sqrt(e_a**2.0 - e_b**2.0) # focus
ellipse_tf = np.array([[e_a, 0.0, 0.0, -e_f],
[0.0, e_b, 0.0, 0.0],
[0.0, 0.0, e_b, 0.0],
[0.0, 0.0, 0.0, 1.0]])
psize = 0.3
plane_tf = np.array([[psize, 0.0, 0.0, 0.0],
[0.0, psize, 0.0, 0.0],
[0.0, 0.0, psize, 0.0],
[0.0, 0.0, 0.0, 1.0]])
th = -1.0 + math.pi
rotation_mat = np.array([[math.cos(th), 0.0, math.sin(th), 0.0],
[0.0, 1.0, 0.0, 0.0],
[-math.sin(th), 0.0, math.cos(th), 0.0],
[0.0, 0.0, 0.0, 1.0]])
plane_tf = rotation_mat @ plane_tf
plane_tf[0:3, 3] = np.array([-0.2, 0.0, -0.25])
valid, f_u, f_v = forward_perspective_trace(ellipse_tf, plane_tf,
view_fov,
compute_res)
print("Computing inverse maps")
inv_u, inv_v = compute_inverse_maps(valid, f_u, f_v, dest_size)
print("Generating test images")
valid, f_u, f_v = forward_perspective_trace(ellipse_tf, plane_tf,
view_fov,
forward_res)
uv_im = cv2.imread("uv.png")
forward_im = map_image(f_u, f_v, uv_im)
cv2.imwrite("forward_test.png", forward_im)
inv_im = map_image(inv_u, inv_v, uv_im)
cv2.imwrite("inv_test.png", inv_im)
round_trip_im = map_image(f_u, f_v, inv_im)
cv2.imwrite("round_trip_test.png", round_trip_im)
print("Generating miscalibrated IPD image")
ellipse_tf_ipd = np.array([[e_a, 0.0, 0.0, -e_f + 0.01],
[0.0, e_b, 0.0, 0.0],
[0.0, 0.0, e_b, 0.0],
[0.0, 0.0, 0.0, 1.0]])
valid, f_u, f_v = forward_perspective_trace(ellipse_tf_ipd, plane_tf,
view_fov,
forward_res)
round_trip_im = map_image(f_u, f_v, inv_im)
cv2.imwrite("round_trip_test_incorrect_ipd.png", round_trip_im)
print("Generating focus image.")
n_samples = 100
accum_image = np.zeros((f_u.shape[0], f_u.shape[1], 3))
for i in range(n_samples):
valid, f_u, f_v = forward_perspective_trace(ellipse_tf, plane_tf,
view_fov,
forward_res, 0.01)
accum_image += map_image(f_u, f_v, uv_im)
cv2.imwrite("focus_test.png", (accum_image / n_samples).astype(np.uint8))
print("Done")
if __name__ == '__main__':
main() | [
"numpy.sqrt",
"cv2.remap",
"math.sqrt",
"math.cos",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"scipy.interpolate.interp2d",
"numpy.flip",
"math.tan",
"argparse.ArgumentParser",
"numpy.linspace",
"numpy.meshgrid",
"numpy.maximum",
"numpy.ones",
"numpy.cos",
"cv2.imread",
"num... | [((397, 422), 'numpy.zeros', 'np.zeros', (['(4, V.shape[1])'], {}), '((4, V.shape[1]))\n', (405, 422), True, 'import numpy as np\n'), ((823, 856), 'numpy.linalg.norm', 'np.linalg.norm', (['V[0:3, :]'], {'axis': '(0)'}), '(V[0:3, :], axis=0)\n', (837, 856), True, 'import numpy as np\n'), ((912, 922), 'numpy.copy', 'np.copy', (['V'], {}), '(V)\n', (919, 922), True, 'import numpy as np\n'), ((1640, 1658), 'numpy.sum', 'np.sum', (['(V3 * V3)', '(0)'], {}), '(V3 * V3, 0)\n', (1646, 1658), True, 'import numpy as np\n'), ((1906, 1935), 'numpy.maximum', 'np.maximum', (['discriminant', '(0.0)'], {}), '(discriminant, 0.0)\n', (1916, 1935), True, 'import numpy as np\n'), ((2884, 2901), 'numpy.zeros', 'np.zeros', (['P.shape'], {}), '(P.shape)\n', (2892, 2901), True, 'import numpy as np\n'), ((4020, 4036), 'numpy.linalg.inv', 'np.linalg.inv', (['T'], {}), '(T)\n', (4033, 4036), True, 'import numpy as np\n'), ((5226, 5256), 'numpy.logical_and', 'np.logical_and', (['valid', 'valid_p'], {}), '(valid, valid_p)\n', (5240, 5256), True, 'import numpy as np\n'), ((5603, 5641), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', 'n_samples'], {}), '(0.0, 1.0, n_samples)\n', (5620, 5641), True, 'import numpy as np\n'), ((5715, 5739), 'numpy.zeros', 'np.zeros', (['(2, n_samples)'], {}), '((2, n_samples))\n', (5723, 5739), True, 'import numpy as np\n'), ((6497, 6516), 'math.tan', 'math.tan', (['(fov / 2.0)'], {}), '(fov / 2.0)\n', (6505, 6516), False, 'import math\n'), ((6528, 6576), 'numpy.linspace', 'np.linspace', (['(-view_limit)', 'view_limit', 'resolution'], {}), '(-view_limit, view_limit, resolution)\n', (6539, 6576), True, 'import numpy as np\n'), ((6588, 6612), 'numpy.meshgrid', 'np.meshgrid', (['spts', '(-spts)'], {}), '(spts, -spts)\n', (6599, 6612), True, 'import numpy as np\n'), ((6621, 6642), 'numpy.zeros', 'np.zeros', (['(3, X.size)'], {}), '((3, X.size))\n', (6629, 6642), True, 'import numpy as np\n'), ((6728, 6749), 'numpy.zeros', 'np.zeros', (['(3, X.size)'], {}), '((3, X.size))\n', (6736, 6749), True, 'import numpy as np\n'), ((7213, 7282), 'scipy.interpolate.interp2d', 'scipy.interpolate.interp2d', (['x_vals', 'y_vals', 'target_vals'], {'kind': '"""cubic"""'}), "(x_vals, y_vals, target_vals, kind='cubic')\n", (7239, 7282), False, 'import scipy\n'), ((7493, 7525), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'dest_size'], {}), '(0.0, 1.0, dest_size)\n', (7504, 7525), True, 'import numpy as np\n'), ((7539, 7571), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'dest_size'], {}), '(0.0, 1.0, dest_size)\n', (7550, 7571), True, 'import numpy as np\n'), ((8114, 8131), 'numpy.flip', 'np.flip', (['inv_v', '(0)'], {}), '(inv_v, 0)\n', (8121, 8131), True, 'import numpy as np\n'), ((8324, 8372), 'cv2.remap', 'cv2.remap', (['im', 'u_pixel', 'v_pixel', 'cv2.INTER_CUBIC'], {}), '(im, u_pixel, v_pixel, cv2.INTER_CUBIC)\n', (8333, 8372), False, 'import cv2\n'), ((8420, 8514), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute Northstar forward/inverse distortion maps."""'}), "(description=\n 'Compute Northstar forward/inverse distortion maps.')\n", (8443, 8514), False, 'import argparse\n'), ((9247, 9281), 'math.sqrt', 'math.sqrt', (['(e_a ** 2.0 - e_b ** 2.0)'], {}), '(e_a ** 2.0 - e_b ** 2.0)\n', (9256, 9281), False, 'import math\n'), ((9304, 9407), 'numpy.array', 'np.array', (['[[e_a, 0.0, 0.0, -e_f], [0.0, e_b, 0.0, 0.0], [0.0, 0.0, e_b, 0.0], [0.0, \n 0.0, 0.0, 1.0]]'], {}), '([[e_a, 0.0, 0.0, -e_f], [0.0, e_b, 0.0, 0.0], [0.0, 0.0, e_b, 0.0],\n [0.0, 0.0, 0.0, 1.0]])\n', (9312, 9407), True, 'import numpy as np\n'), ((9484, 9592), 'numpy.array', 'np.array', (['[[psize, 0.0, 0.0, 0.0], [0.0, psize, 0.0, 0.0], [0.0, 0.0, psize, 0.0], [\n 0.0, 0.0, 0.0, 1.0]]'], {}), '([[psize, 0.0, 0.0, 0.0], [0.0, psize, 0.0, 0.0], [0.0, 0.0, psize,\n 0.0], [0.0, 0.0, 0.0, 1.0]])\n', (9492, 9592), True, 'import numpy as np\n'), ((9926, 9954), 'numpy.array', 'np.array', (['[-0.2, 0.0, -0.25]'], {}), '([-0.2, 0.0, -0.25])\n', (9934, 9954), True, 'import numpy as np\n'), ((10555, 10575), 'cv2.imread', 'cv2.imread', (['"""uv.png"""'], {}), "('uv.png')\n", (10565, 10575), False, 'import cv2\n'), ((10624, 10667), 'cv2.imwrite', 'cv2.imwrite', (['"""forward_test.png"""', 'forward_im'], {}), "('forward_test.png', forward_im)\n", (10635, 10667), False, 'import cv2\n'), ((10716, 10751), 'cv2.imwrite', 'cv2.imwrite', (['"""inv_test.png"""', 'inv_im'], {}), "('inv_test.png', inv_im)\n", (10727, 10751), False, 'import cv2\n'), ((10804, 10853), 'cv2.imwrite', 'cv2.imwrite', (['"""round_trip_test.png"""', 'round_trip_im'], {}), "('round_trip_test.png', round_trip_im)\n", (10815, 10853), False, 'import cv2\n'), ((10924, 11034), 'numpy.array', 'np.array', (['[[e_a, 0.0, 0.0, -e_f + 0.01], [0.0, e_b, 0.0, 0.0], [0.0, 0.0, e_b, 0.0],\n [0.0, 0.0, 0.0, 1.0]]'], {}), '([[e_a, 0.0, 0.0, -e_f + 0.01], [0.0, e_b, 0.0, 0.0], [0.0, 0.0,\n e_b, 0.0], [0.0, 0.0, 0.0, 1.0]])\n', (10932, 11034), True, 'import numpy as np\n'), ((11408, 11471), 'cv2.imwrite', 'cv2.imwrite', (['"""round_trip_test_incorrect_ipd.png"""', 'round_trip_im'], {}), "('round_trip_test_incorrect_ipd.png', round_trip_im)\n", (11419, 11471), False, 'import cv2\n'), ((11548, 11589), 'numpy.zeros', 'np.zeros', (['(f_u.shape[0], f_u.shape[1], 3)'], {}), '((f_u.shape[0], f_u.shape[1], 3))\n', (11556, 11589), True, 'import numpy as np\n'), ((1703, 1721), 'numpy.sum', 'np.sum', (['(P3 * V3)', '(0)'], {}), '(P3 * V3, 0)\n', (1709, 1721), True, 'import numpy as np\n'), ((1760, 1778), 'numpy.sum', 'np.sum', (['(P3 * P3)', '(0)'], {}), '(P3 * P3, 0)\n', (1766, 1778), True, 'import numpy as np\n'), ((5664, 5702), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2.0)', 'n_samples'], {}), '(0.0, 2.0, n_samples)\n', (5681, 5702), True, 'import numpy as np\n'), ((5756, 5771), 'numpy.sqrt', 'np.sqrt', (['length'], {}), '(length)\n', (5763, 5771), True, 'import numpy as np\n'), ((5774, 5787), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5780, 5787), True, 'import numpy as np\n'), ((5804, 5819), 'numpy.sqrt', 'np.sqrt', (['length'], {}), '(length)\n', (5811, 5819), True, 'import numpy as np\n'), ((5822, 5835), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5828, 5835), True, 'import numpy as np\n'), ((7645, 7669), 'numpy.minimum', 'np.minimum', (['(1.0)', 'inv_map'], {}), '(1.0, inv_map)\n', (7655, 7669), True, 'import numpy as np\n'), ((7806, 7833), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'idim'], {}), '(0.0, 1.0, idim)\n', (7817, 7833), True, 'import numpy as np\n'), ((7866, 7893), 'numpy.linspace', 'np.linspace', (['(1.0)', '(0.0)', 'idim'], {}), '(1.0, 0.0, idim)\n', (7877, 7893), True, 'import numpy as np\n'), ((1992, 2018), 'numpy.sqrt', 'np.sqrt', (['safe_discriminant'], {}), '(safe_discriminant)\n', (1999, 2018), True, 'import numpy as np\n'), ((2647, 2666), 'numpy.ones', 'np.ones', (['P.shape[1]'], {}), '(P.shape[1])\n', (2654, 2666), True, 'import numpy as np\n'), ((9691, 9703), 'math.cos', 'math.cos', (['th'], {}), '(th)\n', (9699, 9703), False, 'import math\n'), ((9710, 9722), 'math.sin', 'math.sin', (['th'], {}), '(th)\n', (9718, 9722), False, 'import math\n'), ((9805, 9817), 'math.cos', 'math.cos', (['th'], {}), '(th)\n', (9813, 9817), False, 'import math\n'), ((9786, 9798), 'math.sin', 'math.sin', (['th'], {}), '(th)\n', (9794, 9798), False, 'import math\n')] |
from __future__ import absolute_import
import chainer
import chainer.functions as F
from .convolution import ConvolutionND
def _pair(x, ndim=2):
if hasattr(x, '__getitem__'):
return x
return [x]*ndim
class PixelShuffleUpsamplerND(chainer.Chain):
"""Pixel Shuffler for the super resolution.
This upsampler is effective upsampling method compared with the deconvolution.
The deconvolution has a problem of the checkerboard artifact.
A detail of this problem shows the following.
http://distill.pub/2016/deconv-checkerboard/
See also:
https://arxiv.org/abs/1609.05158
"""
def __init__(self, ndim, in_channels, out_channels, resolution,
ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False,
initialW=None, initial_bias=None):
super(PixelShuffleUpsamplerND, self).__init__()
self.ndim = ndim
self.resolution = resolution
self.in_channels = in_channels
self.out_channels = out_channels
self.pad = _pair(pad, self.ndim)
self.pad_mode = pad_mode
with self.init_scope():
m = self.resolution ** self.ndim
self.conv = ConvolutionND(
ndim, in_channels, out_channels * m,
ksize, stride, self.pad, self.pad_mode, nobias,
initialW, initial_bias)
def __call__(self, x):
r = self.resolution
out = self.conv(x)
batchsize = out.shape[0]
in_channels = out.shape[1]
out_channels = self.out_channels
in_shape = out.shape[2:]
out_shape = tuple(s * r for s in in_shape)
r_tuple = tuple(self.resolution for _ in range(self.ndim))
out = F.reshape(out, (batchsize, out_channels,) + r_tuple + in_shape)
out = F.transpose(out, self.make_transpose_indices())
out = F.reshape(out, (batchsize, out_channels, ) + out_shape)
return out
def make_transpose_indices(self):
si = [0, 1]
si.extend([2 * (i + 1) + 1 for i in range(self.ndim)])
si.extend([2 * (i + 1) for i in range(self.ndim)])
return si
class PixelShuffleUpsampler2D(PixelShuffleUpsamplerND):
def __init__(self, in_channels, out_channels, resolution,
ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False,
initialW=None, initial_bias=None):
super(PixelShuffleUpsampler2D, self).__init__(
2, in_channels, out_channels, resolution,
ksize, stride, pad, pad_mode, nobias,
initialW, initial_bias)
class PixelShuffleUpsampler3D(PixelShuffleUpsamplerND):
def __init__(self, in_channels, out_channels, resolution,
ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False,
initialW=None, initial_bias=None):
super(PixelShuffleUpsampler3D, self).__init__(
3, in_channels, out_channels, resolution,
ksize, stride, pad, pad_mode, nobias,
initialW, initial_bias)
| [
"chainer.functions.reshape"
] | [((1770, 1832), 'chainer.functions.reshape', 'F.reshape', (['out', '((batchsize, out_channels) + r_tuple + in_shape)'], {}), '(out, (batchsize, out_channels) + r_tuple + in_shape)\n', (1779, 1832), True, 'import chainer.functions as F\n'), ((1910, 1963), 'chainer.functions.reshape', 'F.reshape', (['out', '((batchsize, out_channels) + out_shape)'], {}), '(out, (batchsize, out_channels) + out_shape)\n', (1919, 1963), True, 'import chainer.functions as F\n')] |
"""A compact GUI application for optical distortion calibration of endoscopes.
See:
https://github.com/gift-surg/endocal
"""
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
doc_dir = path.abspath(path.join(path.dirname(__file__), 'doc'))
# Get the summary
summary = 'A cross-platform, compact GUI application for the optical' +\
' distortion calibration of fluid-immersed endoscopes.'
# Get the long description
with open(path.join(doc_dir, 'description.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='endocal',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='18.02.13',
description=summary,
long_description=long_description,
# The project's main homepage.
url='https://github.com/gift-surg/endocal',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='BSD-3-Clause',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Healthcare Industry',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Graphics',
'Topic :: Multimedia :: Video :: Capture',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
],
# What does your project relate to?
keywords='optical distortion calibration, endoscope, endoscopy, medical imaging,'
'image processing, biomedical engineering, medical physics,'
'image-guided interventions',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['endocal', 'cad'],
# As recommended in
# https://docs.python.org/2/distutils/setupscript.html#installing-package-data
package_dir={'endocal': 'endocal', 'cad': 'cad'},
py_modules=['endocal.calibration', 'cad.dxf'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['PyYAML', 'numpy'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={'endocal': ['data/sample_001/*', 'data/sample_002/*'],
'cad': ['data/dxf/header.dxf', 'data/dxf/footer.dxf',
'data/dxf/polyline.dxf', 'data/dxf/seqend.dxf']},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'endocal=endocal:main',
'endocal-test=endocal:test',
'dxf=cad:generate_dxf'
],
},
)
| [
"os.path.join",
"os.path.dirname",
"setuptools.setup"
] | [((589, 2032), 'setuptools.setup', 'setup', ([], {'name': '"""endocal"""', 'version': '"""18.02.13"""', 'description': 'summary', 'long_description': 'long_description', 'url': '"""https://github.com/gift-surg/endocal"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""BSD-3-Clause"""', 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Multimedia :: Graphics',\n 'Topic :: Multimedia :: Video :: Capture',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python', 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows']", 'keywords': '"""optical distortion calibration, endoscope, endoscopy, medical imaging,image processing, biomedical engineering, medical physics,image-guided interventions"""', 'packages': "['endocal', 'cad']", 'package_dir': "{'endocal': 'endocal', 'cad': 'cad'}", 'py_modules': "['endocal.calibration', 'cad.dxf']", 'install_requires': "['PyYAML', 'numpy']", 'package_data': "{'endocal': ['data/sample_001/*', 'data/sample_002/*'], 'cad': [\n 'data/dxf/header.dxf', 'data/dxf/footer.dxf', 'data/dxf/polyline.dxf',\n 'data/dxf/seqend.dxf']}", 'entry_points': "{'console_scripts': ['endocal=endocal:main', 'endocal-test=endocal:test',\n 'dxf=cad:generate_dxf']}"}), "(name='endocal', version='18.02.13', description=summary,\n long_description=long_description, url=\n 'https://github.com/gift-surg/endocal', author='<NAME>', author_email=\n '<EMAIL>', license='BSD-3-Clause', classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Multimedia :: Graphics',\n 'Topic :: Multimedia :: Video :: Capture',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python', 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows'], keywords=\n 'optical distortion calibration, endoscope, endoscopy, medical imaging,image processing, biomedical engineering, medical physics,image-guided interventions'\n , packages=['endocal', 'cad'], package_dir={'endocal': 'endocal', 'cad':\n 'cad'}, py_modules=['endocal.calibration', 'cad.dxf'], install_requires\n =['PyYAML', 'numpy'], package_data={'endocal': ['data/sample_001/*',\n 'data/sample_002/*'], 'cad': ['data/dxf/header.dxf',\n 'data/dxf/footer.dxf', 'data/dxf/polyline.dxf', 'data/dxf/seqend.dxf']},\n entry_points={'console_scripts': ['endocal=endocal:main',\n 'endocal-test=endocal:test', 'dxf=cad:generate_dxf']})\n", (594, 2032), False, 'from setuptools import setup\n'), ((265, 287), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'from os import path\n'), ((493, 530), 'os.path.join', 'path.join', (['doc_dir', '"""description.rst"""'], {}), "(doc_dir, 'description.rst')\n", (502, 530), False, 'from os import path\n')] |
import sys
from unittest.mock import MagicMock, patch # noqa F401
import mock
import pytest # noqa F401
# TODO: Simplify the mocking of private (unavailable) dataiku lib.
# Current mocking is ugly and complex.
if 'dataiku.Dataset' in sys.modules:
del sys.modules['dataiku.Dataset']
if 'dataiku' in sys.modules:
del sys.modules['dataiku']
if 'dataiku.Dataset' in sys.modules:
del sys.modules['dataiku.Dataset']
if 'dataikuapi' in sys.modules:
del sys.modules['dataikuapi']
if 'dataikuapi.dss.project.DSSProject' in sys.modules:
del sys.modules['dataikuapi.dss.project.DSSProject']
dataiku_mock = mock.MagicMock()
sys.modules['dataiku'] = dataiku_mock
spark_mock = mock.MagicMock()
sys.modules['dataiku.spark'] = spark_mock
ds_mock = mock.MagicMock()
sys.modules['dataiku.Dataset'] = ds_mock
dataikuapi_mock = mock.MagicMock()
sys.modules['dataikuapi'] = dataikuapi_mock
project_mock = mock.MagicMock()
sys.modules['dataikuapi.dss.project.DSSProject'] = project_mock
project_obj_mock = mock.MagicMock()
project_mock.return_value = project_obj_mock
dapi_dataset_mock = mock.MagicMock()
project_obj_mock.get_dataset.return_value = dapi_dataset_mock
import dataikuapi.dss.project.DSSProject # noqa l202
dataikuapi.dss.project.DSSProject.return_value = project_obj_mock
from birgitta.dataframesource.sources.dataikusource import DataikuSource # noqa F401
from birgitta.dataframe import dataframe, dfdiff # noqa l202
from birgitta.dataiku import schema as dkuschema # noqa E402
from birgitta.fields import Catalog # noqa E402
from birgitta.schema.schema import Schema # noqa E402
def is_current_platform():
return True
@mock.patch("birgitta.dataiku.platform.is_current_platform",
is_current_platform)
def test_write_without_set_schema():
dataiku_source = DataikuSource()
dataset_name = "fixtures"
s3_dir = "s3://birgittatestbucket/sourcetests"
fixtures_mock = MagicMock()
catalog = Catalog()
catalog.add_field('fooint', description='Foo int', example=39)
schema = Schema([['fooint', 'bigint']], catalog)
dataframe.write(fixtures_mock,
dataset_name,
prefix=s3_dir,
schema=schema,
skip_cast=True,
set_schema=False,
dataframe_source=dataiku_source)
dapi_dataset_mock.set_schema.assert_not_called()
def is_current_platform():
return True
@mock.patch("birgitta.dataiku.platform.is_current_platform",
is_current_platform)
def test_write():
# dapi_dataset_mock = mock.MagicMock()
# project_obj_mock.get_dataset.return_value = dapi_dataset_mock
dataiku_source = DataikuSource()
dataset_name = "fixtures"
s3_dir = "s3://birgittatestbucket/sourcetests"
fixtures_mock = MagicMock()
catalog = Catalog()
catalog.add_field('fooint', description='Foo int', example=39)
schema = Schema([['fooint', 'bigint']], catalog)
dataframe.write(fixtures_mock,
dataset_name,
prefix=s3_dir,
schema=schema,
skip_cast=True,
dataframe_source=dataiku_source)
dataiku_schema = dkuschema.to_dataiku(schema)
dapi_dataset_mock.set_schema.assert_called_once_with(dataiku_schema)
# dso_mock.set_schema.assert_called_once_with(dataiku_schema)
| [
"mock.patch",
"birgitta.schema.schema.Schema",
"birgitta.fields.Catalog",
"unittest.mock.MagicMock",
"birgitta.dataframe.dataframe.write",
"mock.MagicMock",
"birgitta.dataframesource.sources.dataikusource.DataikuSource",
"birgitta.dataiku.schema.to_dataiku"
] | [((620, 636), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (634, 636), False, 'import mock\n'), ((688, 704), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (702, 704), False, 'import mock\n'), ((757, 773), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (771, 773), False, 'import mock\n'), ((833, 849), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (847, 849), False, 'import mock\n'), ((909, 925), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (923, 925), False, 'import mock\n'), ((1009, 1025), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1023, 1025), False, 'import mock\n'), ((1091, 1107), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1105, 1107), False, 'import mock\n'), ((1654, 1739), 'mock.patch', 'mock.patch', (['"""birgitta.dataiku.platform.is_current_platform"""', 'is_current_platform'], {}), "('birgitta.dataiku.platform.is_current_platform', is_current_platform\n )\n", (1664, 1739), False, 'import mock\n'), ((2445, 2530), 'mock.patch', 'mock.patch', (['"""birgitta.dataiku.platform.is_current_platform"""', 'is_current_platform'], {}), "('birgitta.dataiku.platform.is_current_platform', is_current_platform\n )\n", (2455, 2530), False, 'import mock\n'), ((1805, 1820), 'birgitta.dataframesource.sources.dataikusource.DataikuSource', 'DataikuSource', ([], {}), '()\n', (1818, 1820), False, 'from birgitta.dataframesource.sources.dataikusource import DataikuSource\n'), ((1922, 1933), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (1931, 1933), False, 'from unittest.mock import MagicMock, patch\n'), ((1948, 1957), 'birgitta.fields.Catalog', 'Catalog', ([], {}), '()\n', (1955, 1957), False, 'from birgitta.fields import Catalog\n'), ((2038, 2077), 'birgitta.schema.schema.Schema', 'Schema', (["[['fooint', 'bigint']]", 'catalog'], {}), "([['fooint', 'bigint']], catalog)\n", (2044, 2077), False, 'from birgitta.schema.schema import Schema\n'), ((2082, 2227), 'birgitta.dataframe.dataframe.write', 'dataframe.write', (['fixtures_mock', 'dataset_name'], {'prefix': 's3_dir', 'schema': 'schema', 'skip_cast': '(True)', 'set_schema': '(False)', 'dataframe_source': 'dataiku_source'}), '(fixtures_mock, dataset_name, prefix=s3_dir, schema=schema,\n skip_cast=True, set_schema=False, dataframe_source=dataiku_source)\n', (2097, 2227), False, 'from birgitta.dataframe import dataframe, dfdiff\n'), ((2688, 2703), 'birgitta.dataframesource.sources.dataikusource.DataikuSource', 'DataikuSource', ([], {}), '()\n', (2701, 2703), False, 'from birgitta.dataframesource.sources.dataikusource import DataikuSource\n'), ((2805, 2816), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2814, 2816), False, 'from unittest.mock import MagicMock, patch\n'), ((2831, 2840), 'birgitta.fields.Catalog', 'Catalog', ([], {}), '()\n', (2838, 2840), False, 'from birgitta.fields import Catalog\n'), ((2921, 2960), 'birgitta.schema.schema.Schema', 'Schema', (["[['fooint', 'bigint']]", 'catalog'], {}), "([['fooint', 'bigint']], catalog)\n", (2927, 2960), False, 'from birgitta.schema.schema import Schema\n'), ((2965, 3092), 'birgitta.dataframe.dataframe.write', 'dataframe.write', (['fixtures_mock', 'dataset_name'], {'prefix': 's3_dir', 'schema': 'schema', 'skip_cast': '(True)', 'dataframe_source': 'dataiku_source'}), '(fixtures_mock, dataset_name, prefix=s3_dir, schema=schema,\n skip_cast=True, dataframe_source=dataiku_source)\n', (2980, 3092), False, 'from birgitta.dataframe import dataframe, dfdiff\n'), ((3210, 3238), 'birgitta.dataiku.schema.to_dataiku', 'dkuschema.to_dataiku', (['schema'], {}), '(schema)\n', (3230, 3238), True, 'from birgitta.dataiku import schema as dkuschema\n')] |
#! /usr/bin/env python
"""
.. module:: bug0
:platform: Unix
:synopsis: Python module for implementing the bug0 path planning algorithm
.. moduleauthor:: <NAME> <EMAIL>
This node implements the bug0 path planning algorithm for moving a robot from its current
position to some target position.
Subscribes to:
/odom topic where the simulator publishes the robot position
/laser_scan topic where the robot publishes the laser scan readings
Publishes to:
/cmd_vel publishes velocity command to this topic
Service:
/go_to_point_switch sends a goal request to the go to point server
/wall_follower_switch sends a request to the wall follower server
/bug_switch accepts a request and sends a response to the bug switch client
"""
import rospy
import time
# import ros message
from geometry_msgs.msg import Point, Pose
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from tf import transformations
# import ros service
from std_srvs.srv import *
from geometry_msgs.msg import Twist
from final_assignment.srv import MoveBaseResult, MoveBaseResultResponse
import math
pub = None
srv_client_go_to_point_ = None
srv_client_wall_follower_ = None
yaw_ = 0
yaw_error_allowed_ = 5 * (math.pi / 180) # 5 degrees
position_ = Point()
desired_position_ = Point()
desired_position_.x = 0
desired_position_.y = 0
desired_position_.z = 0
regions_ = None
state_desc_ = ["Go to point", "wall following", "target reached"]
state_ = 0
# 0 - go to point
# 1 - wall following
# callbacks
def clbk_odom(msg):
"""
The pose callback function that takes the position and posture of
the robot from the argument "msg" and set it to
two global variables containing the x, y coordinates pose and yaw angle.
Args:
pose_message (Odom): an object containing all the values
of the current position and posture of the robot
"""
global position_, yaw_
# position
position_ = msg.position
# yaw
quaternion = (
msg.orientation.x,
msg.orientation.y,
msg.orientation.z,
msg.orientation.w,
)
euler = transformations.euler_from_quaternion(quaternion)
yaw_ = euler[2]
def clbk_laser(msg):
"""A callback function that takes in the Laser scan reading from the
argument "msg" and then interpret it by disecting it into regions
Args:
msg (LaserScan): an object containing the laser scan values
coming from the laser sensor of the robot.
"""
global regions_
regions_ = {
"left": min(min(msg.ranges[0:53]), 10),
"fleft": min(min(msg.ranges[54:107]), 10),
"front": min(min(msg.ranges[108:161]), 10),
"fright": min(min(msg.ranges[162:215]), 10),
"right": min(min(msg.ranges[216:270]), 10),
}
def handle_result(mes):
"""This is a callback function that handles the request from a client service
to start the bug0 node by changing the state to 0 which makes the node active
Args:
mes (string): a request message sent by the client calling the service.
Returns:
[MoveBaseResultResponse]: returns a response message "Target Reached" when the
target has been reached.
"""
global state_
time.sleep(1)
change_state(0)
count = 0
while state_ != 4 and count != 30:
time.sleep(2)
count += 1
if state_ == 4:
res = "Target Reached"
print(res)
elif count == 300:
res = "Target could not be reached"
change_state(2)
print(res)
return MoveBaseResultResponse(res)
def change_state(state):
global state_, state_desc_
global srv_client_wall_follower_, srv_client_go_to_point_
state_ = state
log = "state changed: %s" % state_desc_[state]
rospy.loginfo(log)
if state_ == 0:
resp = srv_client_go_to_point_(True)
resp = srv_client_wall_follower_(False)
if state_ == 1:
resp = srv_client_go_to_point_(False)
resp = srv_client_wall_follower_(True)
if state_ == 2:
resp = srv_client_go_to_point_(False)
resp = srv_client_wall_follower_(False)
twist_msg = Twist()
twist_msg.linear.x = 0
twist_msg.angular.z = 0
pub.publish(twist_msg)
state_ = 4
def normalize_angle(angle):
if math.fabs(angle) > math.pi:
angle = angle - (2 * math.pi * angle) / (math.fabs(angle))
return angle
def main():
time.sleep(2)
global regions_, position_, desired_position_, state_, yaw_, yaw_error_allowed_
global srv_client_go_to_point_, srv_client_wall_follower_, pub
rospy.init_node("bug0")
sub_laser = rospy.Subscriber("/laser_scan", LaserScan, clbk_laser)
sub_odom = rospy.Subscriber("/odometry_frame", Pose, clbk_odom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
srv_client_go_to_point_ = rospy.ServiceProxy("/go_to_point_switch", SetBool)
srv_client_wall_follower_ = rospy.ServiceProxy("/wall_follower_switch", SetBool)
t = rospy.Service("bug_switch", MoveBaseResult, handle_result)
# initialize going to the point
change_state(2)
rate = rospy.Rate(20)
while not rospy.is_shutdown():
if regions_ == None:
continue
if state_ == 4:
continue
if state_ == 0:
err_pos = math.sqrt(
pow(desired_position_.y - position_.y, 2)
+ pow(desired_position_.x - position_.x, 2)
)
if err_pos < 0.3:
change_state(2)
elif regions_["front"] < 0.5:
change_state(1)
elif state_ == 1:
desired_yaw = math.atan2(
desired_position_.y - position_.y, desired_position_.x - position_.x
)
err_yaw = normalize_angle(desired_yaw - yaw_)
err_pos = math.sqrt(
pow(desired_position_.y - position_.y, 2)
+ pow(desired_position_.x - position_.x, 2)
)
if err_pos < 0.3:
change_state(2)
if regions_["front"] > 1 and math.fabs(err_yaw) < 0.05:
change_state(0)
elif state_ == 2:
desired_position_.x = rospy.get_param("des_pos_x")
desired_position_.y = rospy.get_param("des_pos_y")
err_pos = math.sqrt(
pow(desired_position_.y - position_.y, 2)
+ pow(desired_position_.x - position_.x, 2)
)
if err_pos > 0.35:
change_state(0)
rate.sleep()
if __name__ == "__main__":
main()
| [
"tf.transformations.euler_from_quaternion",
"rospy.Publisher",
"rospy.is_shutdown",
"geometry_msgs.msg.Twist",
"rospy.init_node",
"rospy.get_param",
"rospy.ServiceProxy",
"rospy.Service",
"time.sleep",
"geometry_msgs.msg.Point",
"rospy.Rate",
"final_assignment.srv.MoveBaseResultResponse",
"m... | [((1295, 1302), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (1300, 1302), False, 'from geometry_msgs.msg import Point, Pose\n'), ((1323, 1330), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (1328, 1330), False, 'from geometry_msgs.msg import Point, Pose\n'), ((2147, 2196), 'tf.transformations.euler_from_quaternion', 'transformations.euler_from_quaternion', (['quaternion'], {}), '(quaternion)\n', (2184, 2196), False, 'from tf import transformations\n'), ((3266, 3279), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3276, 3279), False, 'import time\n'), ((3586, 3613), 'final_assignment.srv.MoveBaseResultResponse', 'MoveBaseResultResponse', (['res'], {}), '(res)\n', (3608, 3613), False, 'from final_assignment.srv import MoveBaseResult, MoveBaseResultResponse\n'), ((3808, 3826), 'rospy.loginfo', 'rospy.loginfo', (['log'], {}), '(log)\n', (3821, 3826), False, 'import rospy\n'), ((4475, 4488), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4485, 4488), False, 'import time\n'), ((4645, 4668), 'rospy.init_node', 'rospy.init_node', (['"""bug0"""'], {}), "('bug0')\n", (4660, 4668), False, 'import rospy\n'), ((4686, 4740), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/laser_scan"""', 'LaserScan', 'clbk_laser'], {}), "('/laser_scan', LaserScan, clbk_laser)\n", (4702, 4740), False, 'import rospy\n'), ((4756, 4808), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/odometry_frame"""', 'Pose', 'clbk_odom'], {}), "('/odometry_frame', Pose, clbk_odom)\n", (4772, 4808), False, 'import rospy\n'), ((4819, 4867), 'rospy.Publisher', 'rospy.Publisher', (['"""/cmd_vel"""', 'Twist'], {'queue_size': '(1)'}), "('/cmd_vel', Twist, queue_size=1)\n", (4834, 4867), False, 'import rospy\n'), ((4899, 4949), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/go_to_point_switch"""', 'SetBool'], {}), "('/go_to_point_switch', SetBool)\n", (4917, 4949), False, 'import rospy\n'), ((4982, 5034), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/wall_follower_switch"""', 'SetBool'], {}), "('/wall_follower_switch', SetBool)\n", (5000, 5034), False, 'import rospy\n'), ((5043, 5101), 'rospy.Service', 'rospy.Service', (['"""bug_switch"""', 'MoveBaseResult', 'handle_result'], {}), "('bug_switch', MoveBaseResult, handle_result)\n", (5056, 5101), False, 'import rospy\n'), ((5171, 5185), 'rospy.Rate', 'rospy.Rate', (['(20)'], {}), '(20)\n', (5181, 5185), False, 'import rospy\n'), ((3361, 3374), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (3371, 3374), False, 'import time\n'), ((4187, 4194), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (4192, 4194), False, 'from geometry_msgs.msg import Twist\n'), ((4345, 4361), 'math.fabs', 'math.fabs', (['angle'], {}), '(angle)\n', (4354, 4361), False, 'import math\n'), ((5200, 5219), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5217, 5219), False, 'import rospy\n'), ((4422, 4438), 'math.fabs', 'math.fabs', (['angle'], {}), '(angle)\n', (4431, 4438), False, 'import math\n'), ((5695, 5780), 'math.atan2', 'math.atan2', (['(desired_position_.y - position_.y)', '(desired_position_.x - position_.x)'], {}), '(desired_position_.y - position_.y, desired_position_.x - position_.x\n )\n', (5705, 5780), False, 'import math\n'), ((6253, 6281), 'rospy.get_param', 'rospy.get_param', (['"""des_pos_x"""'], {}), "('des_pos_x')\n", (6268, 6281), False, 'import rospy\n'), ((6316, 6344), 'rospy.get_param', 'rospy.get_param', (['"""des_pos_y"""'], {}), "('des_pos_y')\n", (6331, 6344), False, 'import rospy\n'), ((6133, 6151), 'math.fabs', 'math.fabs', (['err_yaw'], {}), '(err_yaw)\n', (6142, 6151), False, 'import math\n')] |
# Copyright 2014-present PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Monitor a directory for newly created files for processing
"""
import os
from time import sleep
from asyncio import Queue
from watchgod import awatch
from typing import Dict, Optional
from stoq import Payload, PayloadMeta
from stoq.plugins import ProviderPlugin
from stoq.helpers import StoqConfigParser
from stoq.exceptions import StoqPluginException
class DirmonPlugin(ProviderPlugin):
def __init__(self, config: StoqConfigParser) -> None:
super().__init__(config)
self.source_dir = config.get('options', 'source_dir', fallback=None)
if not self.source_dir or not os.path.exists(self.source_dir):
raise StoqPluginException(
f"Source directory not defined or doesn't exist: '{self.source_dir}'"
)
self.source_dir = os.path.abspath(self.source_dir)
async def ingest(self, queue: Queue) -> None:
"""
Monitor a directory for newly created files for ingest
"""
self.log.info(f'Monitoring {self.source_dir} for newly created files...')
async for changes in awatch(self.source_dir):
for change in list(changes):
event = change[0]
src_path = os.path.abspath(change[1])
# Only handle Change.added
if event != 1:
continue
meta = PayloadMeta(
extra_data={
'filename': os.path.basename(src_path),
'source_dir': os.path.dirname(src_path),
}
)
with open(src_path, 'rb') as f:
payload = Payload(f.read(), meta)
await queue.put(payload)
| [
"os.path.exists",
"stoq.exceptions.StoqPluginException",
"watchgod.awatch",
"os.path.dirname",
"os.path.basename",
"os.path.abspath"
] | [((1441, 1473), 'os.path.abspath', 'os.path.abspath', (['self.source_dir'], {}), '(self.source_dir)\n', (1456, 1473), False, 'import os\n'), ((1725, 1748), 'watchgod.awatch', 'awatch', (['self.source_dir'], {}), '(self.source_dir)\n', (1731, 1748), False, 'from watchgod import awatch\n'), ((1294, 1389), 'stoq.exceptions.StoqPluginException', 'StoqPluginException', (['f"""Source directory not defined or doesn\'t exist: \'{self.source_dir}\'"""'], {}), '(\n f"Source directory not defined or doesn\'t exist: \'{self.source_dir}\'")\n', (1313, 1389), False, 'from stoq.exceptions import StoqPluginException\n'), ((1243, 1274), 'os.path.exists', 'os.path.exists', (['self.source_dir'], {}), '(self.source_dir)\n', (1257, 1274), False, 'import os\n'), ((1852, 1878), 'os.path.abspath', 'os.path.abspath', (['change[1]'], {}), '(change[1])\n', (1867, 1878), False, 'import os\n'), ((2087, 2113), 'os.path.basename', 'os.path.basename', (['src_path'], {}), '(src_path)\n', (2103, 2113), False, 'import os\n'), ((2153, 2178), 'os.path.dirname', 'os.path.dirname', (['src_path'], {}), '(src_path)\n', (2168, 2178), False, 'import os\n')] |
import tensorflow as tf
from tensorflow.keras.layers import *
assert tf.__version__>="2.0.0", f"Expect TF>=2.0.0 but get {tf.__version__}"
class PositionalSinEmbedding(tf.keras.layers.Layer):
"""
Positional Sinusoidal Embedding layer as described in "Attention is All You Need".
|
| Parameters:
| | input_dim: parameter for embedding layer
| | output_dim: parameter for embedding layer
|
| Inputs: two dimensional tensor to be embedded
|
| Outputs:
| | inputs_embed: tensor of shape (batch_size, n_step, output_dim)
| | inputs_positional_encoding: tensor of shape (n_step, output_dim)
"""
def __init__(self, input_dim, output_dim, **kwargs):
super(PositionalSinEmbedding, self).__init__(**kwargs)
self._input_dim = input_dim
self._output_dim = output_dim
self._embedding_layer = Embedding(input_dim=input_dim, output_dim=output_dim)
@tf.function
def call(self, inputs, training=None):
inputs_embed = self._embedding_layer(inputs)
inputs_positional_encoding = self.get_positional_encoding(tf.shape(inputs_embed)[-2], tf.shape(inputs_embed)[-1])
return inputs_embed, inputs_positional_encoding
def get_positional_encoding(self, n_step, n_embed):
"""
Formula:
| PE(pos,2i) = sin(pos/10000**(2*i/d_model))
| PE(pos,2i+1) = cos(pos/10000**(2*i/d_model))
Return:
| Tensor of shape (n_step, n_embed)
"""
n_step = tf.cast(n_step, tf.float32)
n_embed = tf.cast(n_embed,tf.float32)
step_dim = tf.cast(tf.reshape(tf.range(n_step), shape=(-1,1)), tf.float32)
embed_dim = tf.cast(tf.range(n_embed), tf.float32)
positional_encoding = step_dim * (1.0/tf.math.pow(10000.0, (2.0*(embed_dim//2.0))/n_embed))
positional_encoding = tf.where(tf.cast(tf.range(n_embed)%2.0, tf.bool), tf.math.cos(positional_encoding), tf.math.sin(positional_encoding))
return positional_encoding
def get_config(self):
"""
Get configuration for current layer
"""
config = super(PositionalSinEmbedding, self).get_config()
cur_config = {"input_dim":self._input_dim, "output_dim":self._output_dim}
config.update(cur_config)
return config
class MultiHeadAttention(tf.keras.layers.Layer):
"""
Multi-Head Attention layer as described in "Attention is All You Need".
|
| Parameters:
| | n_head: number of heads to use
| | d_model: last dimension of attention vector
|
| Inputs:
| | Q: query tensor of shape (batch_size, n_query, _)
| | K: key tensor of shape (batch_size, n_key, _)
| | V: value tensor of shape (batch_size, n_key, _)
|
| Outputs:
| | attn_vector: vector of shape (batch_size, n_query, d_model)
|
| Notes:
| | All assertations in call function are depreciated as it's not well supported in Autograph yet.
"""
def __init__(self, n_head=8, d_model=512, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
# Parameter setting: d_k = d_v = d_model/h, in original paper, d_model=512, h=8, d_k=d_v=64
# The dimension of each head is reduced to reduce computational cost
assert d_model%n_head==0, f"Illegal parameter n_head:{n_head}, d_model:{d_model}"
self._n_head = n_head
self._d_model = d_model
self._d_k = d_model//n_head
self._d_v = d_model//n_head
self._Wq = Dense(d_model)
self._Wk = Dense(d_model)
self._Wv = Dense(d_model)
self._concat_layer = Concatenate()
self._Wo = Dense(self._d_model)
@tf.function
def call(self, Q, K, V, training=None, mask=None):
"""
Input:
| Q: Tensor of shape (batch_size, n_query, _)
| K: Tensor of shape (batch_size, n_key, _)
| V: Tensor of shape (batch_size, n_key, _)
| mask: Tensor of shape (batch_size, n_query, n_key)
"""
# assert tf.shape(K)[-2] == tf.shape(V)[-2], f"K has shape {tf.shape(K)} while V has shape {tf.shape(V)}"
batch_size = tf.shape(Q)[0]
Q = self._Wq(Q) # (batch_size, n_query, d_model)
K = self._Wq(K) # (batch_size, n_key, d_model)
V = self._Wq(V) # (batch_size, n_key, d_model)
Q = self.tensor_split(Q, batch_size) # (batch_size, n_head, n_query, d_k)
K = self.tensor_split(K, batch_size) # (batch_size, n_head, n_query, d_k)
V = self.tensor_split(V, batch_size) # (batch_size, n_head, n_query, d_k)
scaled_attention = self.get_scaled_dot_product_attention(Q, K, V, mask=mask) # (batch_size, n_head, n_query, d_k)
scaled_attention = tf.transpose(scaled_attention, perm=[0,2,1,3]) # (batch_size, n_qeury, n_head, d_k)
scaled_attention = tf.reshape(scaled_attention, shape=[batch_size, -1, self._d_model]) # (batch_size, n_query, d_model)
multi_head_attn = self._Wo(scaled_attention) # (batch_size, n_query, d_model)
return multi_head_attn
def tensor_split(self, tensor, batch_size):
tensor = tf.reshape(tensor, shape=(batch_size, -1, self._n_head, self._d_k)) # (batch_size, n_step, n_head, d_k)
tensor = tf.transpose(tensor, perm=[0,2,1,3]) # (batch_size, n_head, n_step, d_k)
return tensor
def get_scaled_dot_product_attention(self, Q, K, V, mask=None):
"""
Input:
| Q: Tensor of shape (..., n_query, d_k)
| K: Tensor of shape (..., n_key, d_k)
| V: Tensor of shape (..., n_key, d_v)
| mask: Tensor of shape (..., n_query, n_key)
Formula:
| attn = softmax(Q.dot(K.T)/sqrt(d_k))V
Output:
| attn_vector: Tensor of shape (..., n_query, d_v)
"""
# assert tf.shape(Q)[-1] == tf.shape(K)[-1], f"Q has shape {tf.shape(Q)} while K has shape {tf.shape(K)}"
# assert tf.shape(K)[-2] == tf.shape(V)[-2], f"K has shape {tf.shape(K)} while V has shape {tf.shape(V)}"
QK = tf.matmul(Q, K, transpose_b=True) # (..., n_query, n_key)
d_k = tf.cast(tf.shape(K)[-1], tf.float32)
QK_scale = QK/tf.math.sqrt(d_k) # (..., n_query, n_key)
if mask is not None:
QK_scale += (mask * -1e9) # (..., n_query, n_key)
attn_weight = tf.nn.softmax(QK_scale, axis=-1) # (..., n_query, n_key)
attn_vector = tf.matmul(attn_weight, V) # (..., n_query, d_v)
return attn_vector
def get_config(self):
"""
Get configuration for current layer
"""
config = super(MultiHeadAttention, self).get_config()
cur_config = {"n_head":self._n_head, "d_model":self._d_model}
config.update(cur_config)
return config
class PointWiseFeedForwardNetwork(tf.keras.layers.Layer):
"""
Point wise feed forward network as described in "Attention is All You Need".
|
| Parameters:
| | d_model: output dimension
| | d_ff: middle dimension
"""
def __init__(self, d_model, d_ff, **kwargs):
super(PointWiseFeedForwardNetwork, self).__init__(**kwargs)
self._d_model = d_model
self._d_ff = d_ff
self._dense_layer_1 = Dense(d_ff, activation="relu")
self._dense_layer_2 = Dense(d_model)
@tf.function
def call(self, inputs, training=None):
outputs = self._dense_layer_1(inputs)
outputs = self._dense_layer_2(outputs)
return outputs
def get_config(self):
"""
Get configuration for current layer
"""
config = super(PointWiseFeedForwardNetwork, self).get_config()
cur_config = {"d_model":self._d_model, "d_ff":self._d_ff}
config.update(cur_config)
return config
class TransformerEncoderLayer(tf.keras.layers.Layer):
"""
Encoder layer as described in "Attention is All You Need".
|
| Parameters:
| | attn_n_head: number of heads to use
| | d_model: last dimension of attention vector
| | d_ff: inner dimension for point wise feed forward network
| | dropout_rate: drop out rate
|
| Inputs:
| | Inputs: query tensor of shape (batch_size, n_step, d_model)
|
| Outputs:
| | Outputs: vector of shape (batch_size, n_step, d_model)
|
| Notes:
| | 1. As described in the original paper, to use residual connection the input and output data should share the same dimension.
| | 2. Multi-head attention in encoder layer only uses padding mask.
"""
def __init__(self, attn_n_head, d_model, d_ff, dropout_rate=0.1, **kwargs):
super(TransformerEncoderLayer, self).__init__(**kwargs)
self._attn_n_head = attn_n_head
self._d_model = d_model
self._d_ff = d_ff
self._dropout_rate = dropout_rate
self._multi_head_attn_layer = MultiHeadAttention(attn_n_head, d_model)
self._layer_norm_1 = LayerNormalization(epsilon=1e-6)
self._dropout_layer_1 = Dropout(dropout_rate)
self._ffnn_layer = PointWiseFeedForwardNetwork(d_model=d_model, d_ff=d_ff)
self._layer_norm_2 = LayerNormalization(epsilon=1e-6)
self._dropout_layer_2 = Dropout(dropout_rate)
@tf.function
def call(self, inputs, training=None, mask=None):
mh_attn = self._multi_head_attn_layer(inputs, inputs, inputs, mask=mask)
mh_attn = self._dropout_layer_1(mh_attn, training=training)
ffnn_input = self._layer_norm_1(mh_attn+inputs)
ffnn_output = self._ffnn_layer(ffnn_input)
ffnn_output = self._dropout_layer_2(ffnn_output, training=training)
outputs = self._layer_norm_2(ffnn_input+ffnn_output)
return outputs
def get_config(self):
"""
Get configuration for current layer
"""
config = super(TransformerEncoderLayer, self).get_config()
cur_config = {"attn_n_head":self._attn_n_head, "d_model":self._d_model, "d_ff":self._d_ff, "dropout_rate":self._dropout_rate}
config.update(cur_config)
return config
class TransformerEncoder(tf.keras.layers.Layer):
"""
Encoder as described in "Attention is All You Need".
|
| Parameters:
| | input_vocab_size: parameter for embedding layer
| | n_layer: number of encoder layer to stack
| | attn_n_head: number of heads to use
| | d_model: last dimension of attention vector
| | d_ff: inner dimension for point wise feed forward network
| | dropout_rate: drop out rate
|
| Inputs:
| | Inputs: query tensor of shape (batch_size, n_step, d_model)
|
| Outputs:
| | Outputs: vector of shape (batch_size, n_step, d_model)
|
| Notes:
| | 1. As described in the original paper, to use residual connection the input and output data should share the same dimension.
| | 2. Multi-head attention in encoder layer only uses padding mask.
"""
def __init__(self, input_vocab_size, n_layer, d_model, attn_n_head, d_ff, dropout_rate=0.1, **kwargs):
super(TransformerEncoder, self).__init__(**kwargs)
self._input_vocab_size = input_vocab_size
self._n_layer = n_layer
self._d_model = d_model
self._attn_n_head = attn_n_head
self._d_ff = d_ff,
self._dropout_rate = dropout_rate
self._positional_embedding_layer = PositionalSinEmbedding(input_vocab_size, d_model)
self._enc_layer_list = [TransformerEncoderLayer(attn_n_head, d_model, d_ff, dropout_rate=dropout_rate) for _ in range(n_layer)]
self._dropout_layer = Dropout(dropout_rate)
@tf.function
def call(self, inputs, training=None, mask=None):
inputs_embed, inputs_positional_encoding = self._positional_embedding_layer(inputs)
# Make up for the scaled attention
inputs_embed *= tf.math.sqrt(tf.cast(self._d_model, tf.float32))
inputs_pos_embed = inputs_embed + inputs_positional_encoding
outputs = self._dropout_layer(inputs_pos_embed, training=training)
for i in range(self._n_layer):
outputs = self._enc_layer_list[i](outputs, training=training, mask=mask)
return outputs
def get_config(self):
"""
Get configuration for current layer
"""
config = super(TransformerEncoder, self).get_config()
cur_config = {"input_vocab_size":self._input_vocab_size, "n_layer":self._n_layer, "attn_n_head":self._attn_n_head, "d_model":self._d_model, "d_ff":self._d_ff, "dropout_rate":self._dropout_rate}
config.update(cur_config)
return config
class TransformerDecoderLayer(tf.keras.layers.Layer):
"""
Encoder layer as described in "Attention is All You Need".
|
| Parameters:
| | attn_n_head: number of heads to use
| | d_model: last dimension of attention vector
| | d_ff: inner dimension for point wise feed forward network
| | dropout_rate: drop out rate
|
| Inputs:
| | Inputs: query tensor of shape (batch_size, n_step, d_model)
|
| Outputs:
| | Outputs: vector of shape (batch_size, n_step, d_model)
|
| Notes:
| | As described in the original paper, to use residual connection the input and output data should share the same dimension.
"""
def __init__(self, attn_n_head, d_model, d_ff, dropout_rate=0.1, **kwargs):
super(TransformerDecoderLayer, self).__init__(**kwargs)
self._attn_n_head = attn_n_head
self._d_model = d_model
self._d_ff = d_ff
self._dropout_rate = dropout_rate
self._multi_head_attn_layer_1 = MultiHeadAttention(attn_n_head, d_model)
self._multi_head_attn_layer_2 = MultiHeadAttention(attn_n_head, d_model)
self._layer_norm_1 = LayerNormalization(epsilon=1e-6)
self._layer_norm_2 = LayerNormalization(epsilon=1e-6)
self._layer_norm_3 = LayerNormalization(epsilon=1e-6)
self._dropout_layer_1 = Dropout(dropout_rate)
self._dropout_layer_2 = Dropout(dropout_rate)
self._dropout_layer_3 = Dropout(dropout_rate)
self._point_wise_feed_forward_network_layer = PointWiseFeedForwardNetwork(d_model, d_ff)
def call(self, inputs, enc_inputs, training=None, comb_mask=None, padding_mask=None):
mh_attn_1 = self._multi_head_attn_layer_1(inputs, inputs, inputs, mask=comb_mask)
mh_attn_1 = self._dropout_layer_1(mh_attn_1, training=training)
inputs_1 = self._layer_norm_1(mh_attn_1 + inputs)
mh_attn_2 = self._multi_head_attn_layer_2(inputs_1, enc_inputs, enc_inputs, mask=padding_mask)
mh_attn_2 = self._dropout_layer_2(mh_attn_2, training=training)
inputs_2 = self._layer_norm_2(mh_attn_2 + inputs_1)
ffnn_output = self._point_wise_feed_forward_network_layer(inputs_2)
ffnn_output = self._dropout_layer_3(ffnn_output, training=training)
outputs = self._layer_norm_3(inputs_2 + ffnn_output)
return outputs
def get_config(self):
"""
Get configuration for current layer
"""
config = super(TransformerDecoderLayer, self).get_config()
cur_config = {"attn_n_head":self._attn_n_head, "d_model":self._d_model, "d_ff":self._d_ff, "dropout_rate":self._dropout_rate}
config.update(cur_config)
return config
class TransformerDecoder(tf.keras.layers.Layer):
"""
Decoder as described in "Attention is All You Need".
|
| Parameters:
| | target_vocab_size: parameter for embedding layer
| | n_layer: number of decoder layer to stack
| | attn_n_head: number of heads to use
| | d_model: last dimension of attention vector
| | d_ff: inner dimension for point wise feed forward network
| | dropout_rate: drop out rate
|
| Inputs:
| | inputs: tensor of shape (batch_size, n_step, d_model)
| | enc_inputs: tensor of shape (batch_size, n_step, d_model)
|
| Outputs:
| | outputs: vector of shape (batch_size, n_step, d_model)
|
| Notes:
| | 1. As described in the original paper, to use residual connection the input and output data should share the same dimension.
| | 2. Multi-head attention in encoder layer only uses padding mask.
"""
def __init__(self, target_vocab_size, n_layer, d_model, attn_n_head, d_ff, dropout_rate=0.1, **kwargs):
super(TransformerDecoder, self).__init__(**kwargs)
self._target_vocab_size = target_vocab_size
self._n_layer = n_layer
self._d_model = d_model
self._attn_n_head = attn_n_head
self._d_ff = d_ff,
self._dropout_rate = dropout_rate
self._positional_embedding_layer = PositionalSinEmbedding(target_vocab_size, d_model)
self._dec_layer_list = [TransformerDecoderLayer(attn_n_head, d_model, d_ff, dropout_rate=dropout_rate) for _ in range(n_layer)]
self._dropout_layer = Dropout(dropout_rate)
@tf.function
def call(self, inputs, enc_inputs, training=None, comb_mask=None, padding_mask=None):
inputs_embed, inputs_positional_encoding = self._positional_embedding_layer(inputs)
# Make up for the scaled attention
inputs_embed *= tf.math.sqrt(tf.cast(self._d_model, tf.float32))
inputs_pos_embed = inputs_embed + inputs_positional_encoding
outputs = self._dropout_layer(inputs_pos_embed, training=training)
for i in range(self._n_layer):
outputs = self._dec_layer_list[i](outputs, enc_inputs, training=training, comb_mask=comb_mask, padding_mask=padding_mask)
return outputs
def get_config(self):
"""
Get configuration for current layer
"""
config = super(TransformerDecoder, self).get_config()
cur_config = {"target_vocab_size":self._target_vocab_size, "n_layer":self._n_layer, "attn_n_head":self._attn_n_head, "d_model":self._d_model, "d_ff":self._d_ff, "dropout_rate":self._dropout_rate}
config.update(cur_config)
return config
| [
"tensorflow.math.pow",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.math.cos",
"tensorflow.math.sqrt",
"tensorflow.math.sin",
"tensorflow.range",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.cast"
] | [((1359, 1386), 'tensorflow.cast', 'tf.cast', (['n_step', 'tf.float32'], {}), '(n_step, tf.float32)\n', (1366, 1386), True, 'import tensorflow as tf\n'), ((1399, 1427), 'tensorflow.cast', 'tf.cast', (['n_embed', 'tf.float32'], {}), '(n_embed, tf.float32)\n', (1406, 1427), True, 'import tensorflow as tf\n'), ((4243, 4292), 'tensorflow.transpose', 'tf.transpose', (['scaled_attention'], {'perm': '[0, 2, 1, 3]'}), '(scaled_attention, perm=[0, 2, 1, 3])\n', (4255, 4292), True, 'import tensorflow as tf\n'), ((4348, 4415), 'tensorflow.reshape', 'tf.reshape', (['scaled_attention'], {'shape': '[batch_size, -1, self._d_model]'}), '(scaled_attention, shape=[batch_size, -1, self._d_model])\n', (4358, 4415), True, 'import tensorflow as tf\n'), ((4613, 4680), 'tensorflow.reshape', 'tf.reshape', (['tensor'], {'shape': '(batch_size, -1, self._n_head, self._d_k)'}), '(tensor, shape=(batch_size, -1, self._n_head, self._d_k))\n', (4623, 4680), True, 'import tensorflow as tf\n'), ((4728, 4767), 'tensorflow.transpose', 'tf.transpose', (['tensor'], {'perm': '[0, 2, 1, 3]'}), '(tensor, perm=[0, 2, 1, 3])\n', (4740, 4767), True, 'import tensorflow as tf\n'), ((5417, 5450), 'tensorflow.matmul', 'tf.matmul', (['Q', 'K'], {'transpose_b': '(True)'}), '(Q, K, transpose_b=True)\n', (5426, 5450), True, 'import tensorflow as tf\n'), ((5672, 5704), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['QK_scale'], {'axis': '(-1)'}), '(QK_scale, axis=-1)\n', (5685, 5704), True, 'import tensorflow as tf\n'), ((5745, 5770), 'tensorflow.matmul', 'tf.matmul', (['attn_weight', 'V'], {}), '(attn_weight, V)\n', (5754, 5770), True, 'import tensorflow as tf\n'), ((1527, 1544), 'tensorflow.range', 'tf.range', (['n_embed'], {}), '(n_embed)\n', (1535, 1544), True, 'import tensorflow as tf\n'), ((1726, 1758), 'tensorflow.math.cos', 'tf.math.cos', (['positional_encoding'], {}), '(positional_encoding)\n', (1737, 1758), True, 'import tensorflow as tf\n'), ((1760, 1792), 'tensorflow.math.sin', 'tf.math.sin', (['positional_encoding'], {}), '(positional_encoding)\n', (1771, 1792), True, 'import tensorflow as tf\n'), ((3711, 3722), 'tensorflow.shape', 'tf.shape', (['Q'], {}), '(Q)\n', (3719, 3722), True, 'import tensorflow as tf\n'), ((5536, 5553), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['d_k'], {}), '(d_k)\n', (5548, 5553), True, 'import tensorflow as tf\n'), ((10591, 10625), 'tensorflow.cast', 'tf.cast', (['self._d_model', 'tf.float32'], {}), '(self._d_model, tf.float32)\n', (10598, 10625), True, 'import tensorflow as tf\n'), ((15428, 15462), 'tensorflow.cast', 'tf.cast', (['self._d_model', 'tf.float32'], {}), '(self._d_model, tf.float32)\n', (15435, 15462), True, 'import tensorflow as tf\n'), ((1021, 1043), 'tensorflow.shape', 'tf.shape', (['inputs_embed'], {}), '(inputs_embed)\n', (1029, 1043), True, 'import tensorflow as tf\n'), ((1049, 1071), 'tensorflow.shape', 'tf.shape', (['inputs_embed'], {}), '(inputs_embed)\n', (1057, 1071), True, 'import tensorflow as tf\n'), ((1460, 1476), 'tensorflow.range', 'tf.range', (['n_step'], {}), '(n_step)\n', (1468, 1476), True, 'import tensorflow as tf\n'), ((1598, 1654), 'tensorflow.math.pow', 'tf.math.pow', (['(10000.0)', '(2.0 * (embed_dim // 2.0) / n_embed)'], {}), '(10000.0, 2.0 * (embed_dim // 2.0) / n_embed)\n', (1609, 1654), True, 'import tensorflow as tf\n'), ((5491, 5502), 'tensorflow.shape', 'tf.shape', (['K'], {}), '(K)\n', (5499, 5502), True, 'import tensorflow as tf\n'), ((1693, 1710), 'tensorflow.range', 'tf.range', (['n_embed'], {}), '(n_embed)\n', (1701, 1710), True, 'import tensorflow as tf\n')] |
import sys
sys.path.insert(1, '../')
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import random
from random import randint
from functions import config, wait
class Bot:
def __init__(self):
self.creds, self.accs, self.comments = config()
self.browser = webdriver.Chrome(executable_path='/usr/local/bin/chromedriver')
#TODO make own ActionChains Manager Class
self.browser.get("https://www.instagram.com/accounts/login/?source=auth_switcher")
wait(2,5)
self.login()
self.actions = ActionChains(self.browser)
def login(self):
self.insert(self.browser.find_element_by_name("username"), self.creds[0])
wait(2,5)
pw = self.browser.find_element_by_name("password")
pw.click()
self.insert(pw, self.creds[1])
wait(2,5)
pw.send_keys(Keys.ENTER)
self.browser.maximize_window()
wait(5,8)
# no Save Login Info
self.browser.find_element_by_class_name('sqdOP.L3NKy.y3zKF').click()
wait(5,8)
# no notification push
self.browser.find_element_by_class_name('aOOlW.HoLwm ').click()
wait(8,10)
def insert(self, textField, _input):
for char in _input:
wait(0.15,0.35)
textField.send_keys(char)
def searchUser(self, user):
search = self.browser.find_element_by_class_name("XTCLo.x3qfX")
wait(2,5)
self.insert(search, user)
wait(10,12)
try:
# sometimes the exact user literal does not find the wanted user
if (self.browser.find_element_by_class_name("Ap253").text != user):
self.browser.get('https://www.instagram.com/' + user)
wait(7,12)
return True
self.browser.find_element_by_class_name("Ap253").click()
wait(10,14)
return True
except:
wait(5,8)
self.browser.find_element_by_class_name("aIYm8.coreSpriteSearchClear").click()
wait(10,15)
return False
def likePost(self):
for post in self.browser.find_element_by_class_name('fr66n').find_elements_by_css_selector("*"):
if(post.get_attribute("class") == '_8-yf5 '):
if(post.get_attribute("fill") == '#ed4956'):
break
else:
self.browser.find_element_by_class_name('fr66n').click() # like
break
wait(4,8)
def insertComment(self, user):
comment = random.choice(self.comments) + ' '
if (randint(0,2)==0):
comment = comment + '@' + user + ' '
commentField = self.browser.find_element_by_class_name('Ypffh')
commentField.click()
commentField = self.browser.find_element_by_class_name('Ypffh')
for char in comment:
wait(0.2, 0.6)
commentField.send_keys(char)
wait(5,8)
self.actions.send_keys(Keys.ENTER).perform()
wait(5,8)
self.actions.send_keys(Keys.ESCAPE).perform()
wait(5,8)
self.actions.reset_actions()
def tryFollow(self):
wait(3,5)
for button in self.browser.find_elements_by_tag_name("button"):
if (type(button.text) is not str):
print('not string')
continue
if (button.text == 'Requested' or button.text == 'Following'):
print('already following')
break
if (button.text == 'Follow' or button.text == 'Follow Back'):
button.click()
break
wait(5,8)
| [
"sys.path.insert",
"random.choice",
"random.randint",
"selenium.webdriver.Chrome",
"selenium.webdriver.common.action_chains.ActionChains",
"functions.wait",
"functions.config"
] | [((11, 36), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../"""'], {}), "(1, '../')\n", (26, 36), False, 'import sys\n'), ((350, 358), 'functions.config', 'config', ([], {}), '()\n', (356, 358), False, 'from functions import config, wait\n'), ((388, 451), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': '"""/usr/local/bin/chromedriver"""'}), "(executable_path='/usr/local/bin/chromedriver')\n", (404, 451), False, 'from selenium import webdriver\n'), ((610, 620), 'functions.wait', 'wait', (['(2)', '(5)'], {}), '(2, 5)\n', (614, 620), False, 'from functions import config, wait\n'), ((664, 690), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['self.browser'], {}), '(self.browser)\n', (676, 690), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((824, 834), 'functions.wait', 'wait', (['(2)', '(5)'], {}), '(2, 5)\n', (828, 834), False, 'from functions import config, wait\n'), ((972, 982), 'functions.wait', 'wait', (['(2)', '(5)'], {}), '(2, 5)\n', (976, 982), False, 'from functions import config, wait\n'), ((1079, 1089), 'functions.wait', 'wait', (['(5)', '(8)'], {}), '(5, 8)\n', (1083, 1089), False, 'from functions import config, wait\n'), ((1212, 1222), 'functions.wait', 'wait', (['(5)', '(8)'], {}), '(5, 8)\n', (1216, 1222), False, 'from functions import config, wait\n'), ((1342, 1353), 'functions.wait', 'wait', (['(8)', '(10)'], {}), '(8, 10)\n', (1346, 1353), False, 'from functions import config, wait\n'), ((1622, 1632), 'functions.wait', 'wait', (['(2)', '(5)'], {}), '(2, 5)\n', (1626, 1632), False, 'from functions import config, wait\n'), ((1674, 1686), 'functions.wait', 'wait', (['(10)', '(12)'], {}), '(10, 12)\n', (1678, 1686), False, 'from functions import config, wait\n'), ((2757, 2767), 'functions.wait', 'wait', (['(4)', '(8)'], {}), '(4, 8)\n', (2761, 2767), False, 'from functions import config, wait\n'), ((3347, 3357), 'functions.wait', 'wait', (['(5)', '(8)'], {}), '(5, 8)\n', (3351, 3357), False, 'from functions import config, wait\n'), ((3477, 3487), 'functions.wait', 'wait', (['(5)', '(8)'], {}), '(5, 8)\n', (3481, 3487), False, 'from functions import config, wait\n'), ((3552, 3562), 'functions.wait', 'wait', (['(5)', '(8)'], {}), '(5, 8)\n', (3556, 3562), False, 'from functions import config, wait\n'), ((3651, 3661), 'functions.wait', 'wait', (['(3)', '(5)'], {}), '(3, 5)\n', (3655, 3661), False, 'from functions import config, wait\n'), ((4120, 4130), 'functions.wait', 'wait', (['(5)', '(8)'], {}), '(5, 8)\n', (4124, 4130), False, 'from functions import config, wait\n'), ((1436, 1452), 'functions.wait', 'wait', (['(0.15)', '(0.35)'], {}), '(0.15, 0.35)\n', (1440, 1452), False, 'from functions import config, wait\n'), ((2062, 2074), 'functions.wait', 'wait', (['(10)', '(14)'], {}), '(10, 14)\n', (2066, 2074), False, 'from functions import config, wait\n'), ((2862, 2890), 'random.choice', 'random.choice', (['self.comments'], {}), '(self.comments)\n', (2875, 2890), False, 'import random\n'), ((2917, 2930), 'random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (2924, 2930), False, 'from random import randint\n'), ((3252, 3266), 'functions.wait', 'wait', (['(0.2)', '(0.6)'], {}), '(0.2, 0.6)\n', (3256, 3266), False, 'from functions import config, wait\n'), ((1942, 1953), 'functions.wait', 'wait', (['(7)', '(12)'], {}), '(7, 12)\n', (1946, 1953), False, 'from functions import config, wait\n'), ((2126, 2136), 'functions.wait', 'wait', (['(5)', '(8)'], {}), '(5, 8)\n', (2130, 2136), False, 'from functions import config, wait\n'), ((2239, 2251), 'functions.wait', 'wait', (['(10)', '(15)'], {}), '(10, 15)\n', (2243, 2251), False, 'from functions import config, wait\n')] |
# Generated by Django 3.1.2 on 2021-01-27 18:43
from django.db import migrations
class Migration(migrations.Migration):
def add_file_data_providers(apps, schema_editor):
DataProviderType = apps.get_model("jobs", "DataProviderType")
ExportFormat = apps.get_model("jobs", "ExportFormat")
# Create the DataProvider objects if they don't exist.
DataProviderType.objects.get_or_create(type_name="vector-file")
DataProviderType.objects.get_or_create(type_name="raster-file")
# Currently available Provider Types.
vector_data_provider_types = ["vector-file"]
raster_data_provider_types = ["raster-file"]
# Currently available Export Formats.
nitf = ExportFormat.objects.get(slug="nitf")
gtiff = ExportFormat.objects.get(slug="gtiff")
kml = ExportFormat.objects.get(slug="kml")
shp = ExportFormat.objects.get(slug="shp")
gpkg = ExportFormat.objects.get(slug="gpkg")
# Set the known supported export formats per provider type.
for provider_type in DataProviderType.objects.all():
if provider_type.type_name in vector_data_provider_types:
provider_type.supported_formats.add(gpkg, shp, kml)
if provider_type.type_name in raster_data_provider_types:
provider_type.supported_formats.add(gpkg, gtiff, nitf)
dependencies = [
("jobs", "0010_dataprovider_data_type"),
]
operations = [migrations.RunPython(add_file_data_providers)]
| [
"django.db.migrations.RunPython"
] | [((1485, 1530), 'django.db.migrations.RunPython', 'migrations.RunPython', (['add_file_data_providers'], {}), '(add_file_data_providers)\n', (1505, 1530), False, 'from django.db import migrations\n')] |
# -*- coding: utf-8; -*-
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
import os
from setuptools import setup, find_packages
import json
package_json = json.loads(read("package.json"))
if package_json:
version = package_json.get('version')
else:
raise RuntimeError('Unable to find version string')
def get_versions():
return version
def read(path: str) -> str:
p = Path(os.path.dirname(__file__)) / path
with open(p.resolve(), "r", encoding="utf-8") as fp:
return fp.read()
setup(name='crate-admin',
version=version,
description='Crate Admin UI',
long_description='Crate Admin UI',
classifiers=[
"Programming Language :: JavaScript",
],
author='CRATE Technology',
author_email='<EMAIL>',
url='https://github.com/crate/crate-admin',
keywords='crate admin ui',
license='apache license 2.0',
packages=find_packages(),
namespace_packages=[],
include_package_data=True,
zip_safe=False,
test_suite="",
)
| [
"os.path.dirname",
"setuptools.find_packages"
] | [((1862, 1877), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1875, 1877), False, 'from setuptools import setup, find_packages\n'), ((1344, 1369), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1359, 1369), False, 'import os\n')] |
#!/usr/bin/python
import sys
import csv
def mapper():
reader = csv.reader(sys.stdin, delimiter='\t')
writer = csv.writer(sys.stdout, delimiter='\t')
tagFrequency = {}
for line in reader:
nodeType = line[5]
if not nodeType == "question":
continue
tags = line[2].split()
for tag in tags:
if tag not in tagFrequency:
tagFrequency[tag] = 1
else:
tagFrequency[tag] += 1
for tag in tagFrequency:
writer.writerow([tag, tagFrequency[tag]])
def main():
mapper()
if __name__ == "__main__":
main()
| [
"csv.writer",
"csv.reader"
] | [((68, 105), 'csv.reader', 'csv.reader', (['sys.stdin'], {'delimiter': '"""\t"""'}), "(sys.stdin, delimiter='\\t')\n", (78, 105), False, 'import csv\n'), ((119, 157), 'csv.writer', 'csv.writer', (['sys.stdout'], {'delimiter': '"""\t"""'}), "(sys.stdout, delimiter='\\t')\n", (129, 157), False, 'import csv\n')] |
# Copyright 2019, FBPIC contributors
# Authors: <NAME>, <NAME>
# License: 3-Clause-BSD-LBNL
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It defines the picmi Simulation interface
"""
import numpy as np
from scipy.constants import c, e, m_e
from .particle_charge_and_mass import particle_charge, particle_mass
# Import relevant fbpic object
from fbpic.main import Simulation as FBPICSimulation
from fbpic.fields.smoothing import BinomialSmoother
from fbpic.lpa_utils.laser import add_laser_pulse, GaussianLaser
from fbpic.lpa_utils.bunch import add_particle_bunch_gaussian
from fbpic.openpmd_diag import FieldDiagnostic, ParticleDiagnostic
# Import picmi base class
from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid
from picmistandard import PICMI_AnalyticDistribution, PICMI_UniformDistribution, PICMI_GriddedLayout
from picmistandard import PICMI_PseudoRandomLayout, PICMI_GaussianBunchDistribution
from picmistandard import PICMI_LaserAntenna, PICMI_GaussianLaser
from picmistandard import PICMI_Species, PICMI_MultiSpecies
from picmistandard import PICMI_FieldDiagnostic, PICMI_ParticleDiagnostic
# Define a new simulation object for picmi, that derives from PICMI_Simulation
class Simulation( PICMI_Simulation ):
# Redefine the `init` method, as required by the picmi `_ClassWithInit`
def init(self, kw):
# Get the grid
grid = self.solver.grid
if not type(grid) == PICMI_CylindricalGrid:
raise ValueError('When using fbpic with PICMI, '
'the grid needs to be a CylindricalGrid object.')
# Check rmin and boundary conditions
assert grid.rmin == 0.
assert grid.bc_zmin == grid.bc_zmax
assert grid.bc_zmax in ['periodic', 'open']
assert grid.bc_rmax in ['reflective', 'open']
# Determine timestep
if self.solver.cfl is not None:
dz = (grid.zmax-grid.zmin)/grid.nz
dt = self.solver.cfl * dz / c
elif self.time_step_size is not None:
dt = self.time_step_size
else:
raise ValueError(
'You need to either set the `cfl` of the solver\n'
'or the `timestep_size` of the `Simulation`.')
# Convert API for the smoother
if self.solver.source_smoother is None:
smoother = BinomialSmoother()
else:
smoother = BinomialSmoother(
n_passes=self.solver.source_smoother.n_pass,
compensator=self.solver.source_smoother.compensation )
# Order of the stencil for z derivatives in the Maxwell solver
if self.solver.stencil_order is None:
n_order = -1
else:
n_order = self.solver.stencil_order[-1]
# Initialize and store the FBPIC simulation object
self.fbpic_sim = FBPICSimulation(
Nz=int(grid.nz), zmin=grid.zmin, zmax=grid.zmax,
Nr=int(grid.nr), rmax=grid.rmax, Nm=grid.n_azimuthal_modes,
dt=dt, use_cuda=True, smoother=smoother, n_order=n_order,
boundaries={'z':grid.bc_zmax, 'r':grid.bc_rmax} )
# Set the moving window
if grid.moving_window_zvelocity is not None:
self.fbpic_sim.set_moving_window(grid.moving_window_zvelocity)
# Redefine the method `add_laser` from the PICMI Simulation class
def add_laser( self, laser, injection_method ):
# Call method of parent class
PICMI_Simulation.add_laser( self, laser, injection_method )
# Handle injection method
assert type(injection_method) == PICMI_LaserAntenna
# Handle laser profile method
if type(laser) == PICMI_GaussianLaser:
assert laser.propagation_direction[0] == 0.
assert laser.propagation_direction[1] == 0.
assert (laser.zeta is None) or (laser.zeta == 0)
assert (laser.beta is None) or (laser.beta == 0)
phi2_chirp = laser.phi2
if phi2_chirp is None:
phi2_chirp = 0
polarization_angle = np.arctan2(laser.polarization_direction[1],
laser.polarization_direction[0])
laser_profile = GaussianLaser( a0=laser.a0, waist=laser.waist,
z0=laser.centroid_position[-1], zf=laser.focal_position[-1],
tau=laser.duration, theta_pol=polarization_angle,
phi2_chirp=phi2_chirp )
else:
raise ValueError('Unknown laser profile: %s' %type(injection_method))
# Inject the laser
add_laser_pulse( self.fbpic_sim, laser_profile, method='antenna',
z0_antenna=injection_method.position[-1] )
# Redefine the method `add_species` from the PICMI Simulation class
def add_species( self, species, layout, initialize_self_field=False ):
# Call method of parent class
PICMI_Simulation.add_species( self, species, layout,
initialize_self_field )
# Extract list of species
if type(species) == PICMI_Species:
species_instances_list = [species]
elif type(species) == PICMI_MultiSpecies:
species_instances_list = species.species_instances_list
else:
raise ValueError('Unknown type: %s' %type(species))
# Loop over species and create FBPIC species
for s in species_instances_list:
# Get their charge and mass
if s.particle_type is not None:
s.charge = particle_charge[s.particle_type]
s.mass = particle_mass[s.particle_type]
# If `charge_state` is set, redefine the charge and mass
if s.charge_state is not None:
s.charge = s.charge_state*e
s.mass -= s.charge_state*m_e
# Add the species to the FBPIC simulation
fbpic_species = self._create_new_fbpic_species(s,
layout, initialize_self_field)
# Register a pointer to the FBPIC species in the PICMI species itself
# (Useful for particle diagnostics later on)
s.fbpic_species = fbpic_species
# Loop over species and handle ionization
for s in species_instances_list:
for interaction in s.interactions:
assert interaction[0] == 'ionization'
assert interaction[1] == 'ADK'
picmi_target = interaction[2]
if not hasattr( picmi_target, 'fbpic_species' ):
raise RuntimeError('For ionization with PICMI+FBPIC:\n'
'You need to add the target species to the simulation,'
' before the other species.')
fbpic_target = picmi_target.fbpic_species
fbpic_source = s.fbpic_species
fbpic_source.make_ionizable( element=s.particle_type,
level_start=s.charge_state,
target_species=fbpic_target )
def _create_new_fbpic_species(self, s, layout, initialize_self_field):
# - For the case of a plasma defined in a gridded layout
if type(layout) == PICMI_GriddedLayout:
assert initialize_self_field == False
# - Uniform distribution
if type(s.initial_distribution)==PICMI_UniformDistribution:
n0 = s.initial_distribution.density
dens_func = None
# - Analytic distribution
elif type(s.initial_distribution)==PICMI_AnalyticDistribution:
import numexpr
density_expression = s.initial_distribution.density_expression
if s.density_scale is not None:
n0 = s.density_scale
else:
n0 = 1.
def dens_func(z, r):
n = numexpr.evaluate(density_expression)
return n
else:
raise ValueError('Unknown combination of layout and distribution')
p_nr = layout.n_macroparticle_per_cell[0]
p_nt = layout.n_macroparticle_per_cell[1]
p_nz = layout.n_macroparticle_per_cell[2]
fbpic_species = self.fbpic_sim.add_new_species(
q=s.charge, m=s.mass, n=n0,
dens_func=dens_func, p_nz=p_nz, p_nr=p_nr, p_nt=p_nt,
p_zmin=s.initial_distribution.lower_bound[-1],
p_zmax=s.initial_distribution.upper_bound[-1],
continuous_injection=s.initial_distribution.fill_in )
# - For the case of a Gaussian beam
elif (type(s.initial_distribution)==PICMI_GaussianBunchDistribution) \
and (type(layout) == PICMI_PseudoRandomLayout):
dist = s.initial_distribution
gamma0_beta0 = dist.centroid_velocity[-1]/c
gamma0 = ( 1 + gamma0_beta0**2 )**.5
sig_r = dist.rms_bunch_size[0]
sig_z = dist.rms_bunch_size[-1]
sig_gamma = dist.rms_velocity[-1]/c
sig_vr = dist.rms_velocity[0] / gamma0
if sig_vr != 0:
tf = - sig_r**2/sig_vr**2 * dist.velocity_divergence[0]
else:
tf = 0.
zf = dist.centroid_position[-1] + \
dist.centroid_velocity[-1]/gamma0 * tf
# Calculate size at focus and emittance
sig_r0 = (sig_r**2 - (sig_vr*tf)**2)**0.5
n_emit = gamma0 * sig_r0 * sig_vr/c
# Get the number of physical particles
n_physical_particles = dist.n_physical_particles
if s.density_scale is not None:
n_physical_particles *= s.density_scale
fbpic_species = add_particle_bunch_gaussian( self.fbpic_sim,
q=s.charge, m=s.mass,
gamma0=gamma0, sig_gamma=sig_gamma,
sig_r=sig_r0, sig_z=sig_z, n_emit=n_emit,
n_physical_particles=n_physical_particles,
n_macroparticles=layout.n_macroparticles,
zf=zf, tf=tf,
initialize_self_field=initialize_self_field )
# - For the case of an empty species
elif (s.initial_distribution is None) and (layout is None):
fbpic_species = self.fbpic_sim.add_new_species(q=s.charge, m=s.mass)
else:
raise ValueError('Unknown combination of layout and distribution')
return fbpic_species
# Redefine the method `add_diagnostic` of the parent class
def add_diagnostic(self, diagnostic):
# Call method of parent class
PICMI_Simulation.add_diagnostic( self, diagnostic )
# Handle diagnostic
if diagnostic.step_min is None:
iteration_min = 0
else:
iteration_min = diagnostic.step_min
if diagnostic.step_max is None:
iteration_max = np.inf
else:
iteration_max = diagnostic.step_max
# Register field diagnostic
if type(diagnostic) == PICMI_FieldDiagnostic:
diag = FieldDiagnostic(
period=diagnostic.period,
fldobject=self.fbpic_sim.fld,
comm=self.fbpic_sim.comm,
fieldtypes=diagnostic.data_list,
write_dir=diagnostic.write_dir,
iteration_min=iteration_min,
iteration_max=iteration_max)
# Register particle diagnostic
elif type(diagnostic) == PICMI_ParticleDiagnostic:
species_dict = {}
for s in diagnostic.species:
if s.name is None:
raise ValueError('When using a species in a diagnostic, '
'its name must be set.')
species_dict[s.name] = s.fbpic_species
diag = ParticleDiagnostic(
period=diagnostic.period,
species=species_dict,
comm=self.fbpic_sim.comm,
particle_data=diagnostic.data_list,
write_dir=diagnostic.write_dir,
iteration_min=iteration_min,
iteration_max=iteration_max)
# Add it to the FBPIC simulation
self.fbpic_sim.diags.append( diag )
# Redefine the method `step` of the parent class
def step(self, nsteps=None):
if nsteps is None:
nsteps = self.max_steps
self.fbpic_sim.step( nsteps )
| [
"picmistandard.PICMI_Simulation.add_laser",
"picmistandard.PICMI_Simulation.add_diagnostic",
"fbpic.openpmd_diag.FieldDiagnostic",
"fbpic.lpa_utils.bunch.add_particle_bunch_gaussian",
"fbpic.lpa_utils.laser.GaussianLaser",
"fbpic.lpa_utils.laser.add_laser_pulse",
"fbpic.openpmd_diag.ParticleDiagnostic",... | [((3469, 3526), 'picmistandard.PICMI_Simulation.add_laser', 'PICMI_Simulation.add_laser', (['self', 'laser', 'injection_method'], {}), '(self, laser, injection_method)\n', (3495, 3526), False, 'from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid\n'), ((4589, 4700), 'fbpic.lpa_utils.laser.add_laser_pulse', 'add_laser_pulse', (['self.fbpic_sim', 'laser_profile'], {'method': '"""antenna"""', 'z0_antenna': 'injection_method.position[-1]'}), "(self.fbpic_sim, laser_profile, method='antenna', z0_antenna\n =injection_method.position[-1])\n", (4604, 4700), False, 'from fbpic.lpa_utils.laser import add_laser_pulse, GaussianLaser\n'), ((4905, 4979), 'picmistandard.PICMI_Simulation.add_species', 'PICMI_Simulation.add_species', (['self', 'species', 'layout', 'initialize_self_field'], {}), '(self, species, layout, initialize_self_field)\n', (4933, 4979), False, 'from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid\n'), ((10795, 10844), 'picmistandard.PICMI_Simulation.add_diagnostic', 'PICMI_Simulation.add_diagnostic', (['self', 'diagnostic'], {}), '(self, diagnostic)\n', (10826, 10844), False, 'from picmistandard import PICMI_Simulation, PICMI_CylindricalGrid\n'), ((2356, 2374), 'fbpic.fields.smoothing.BinomialSmoother', 'BinomialSmoother', ([], {}), '()\n', (2372, 2374), False, 'from fbpic.fields.smoothing import BinomialSmoother\n'), ((2412, 2532), 'fbpic.fields.smoothing.BinomialSmoother', 'BinomialSmoother', ([], {'n_passes': 'self.solver.source_smoother.n_pass', 'compensator': 'self.solver.source_smoother.compensation'}), '(n_passes=self.solver.source_smoother.n_pass, compensator=\n self.solver.source_smoother.compensation)\n', (2428, 2532), False, 'from fbpic.fields.smoothing import BinomialSmoother\n'), ((4078, 4154), 'numpy.arctan2', 'np.arctan2', (['laser.polarization_direction[1]', 'laser.polarization_direction[0]'], {}), '(laser.polarization_direction[1], laser.polarization_direction[0])\n', (4088, 4154), True, 'import numpy as np\n'), ((4227, 4416), 'fbpic.lpa_utils.laser.GaussianLaser', 'GaussianLaser', ([], {'a0': 'laser.a0', 'waist': 'laser.waist', 'z0': 'laser.centroid_position[-1]', 'zf': 'laser.focal_position[-1]', 'tau': 'laser.duration', 'theta_pol': 'polarization_angle', 'phi2_chirp': 'phi2_chirp'}), '(a0=laser.a0, waist=laser.waist, z0=laser.centroid_position[-1\n ], zf=laser.focal_position[-1], tau=laser.duration, theta_pol=\n polarization_angle, phi2_chirp=phi2_chirp)\n', (4240, 4416), False, 'from fbpic.lpa_utils.laser import add_laser_pulse, GaussianLaser\n'), ((11254, 11488), 'fbpic.openpmd_diag.FieldDiagnostic', 'FieldDiagnostic', ([], {'period': 'diagnostic.period', 'fldobject': 'self.fbpic_sim.fld', 'comm': 'self.fbpic_sim.comm', 'fieldtypes': 'diagnostic.data_list', 'write_dir': 'diagnostic.write_dir', 'iteration_min': 'iteration_min', 'iteration_max': 'iteration_max'}), '(period=diagnostic.period, fldobject=self.fbpic_sim.fld,\n comm=self.fbpic_sim.comm, fieldtypes=diagnostic.data_list, write_dir=\n diagnostic.write_dir, iteration_min=iteration_min, iteration_max=\n iteration_max)\n', (11269, 11488), False, 'from fbpic.openpmd_diag import FieldDiagnostic, ParticleDiagnostic\n'), ((9809, 10115), 'fbpic.lpa_utils.bunch.add_particle_bunch_gaussian', 'add_particle_bunch_gaussian', (['self.fbpic_sim'], {'q': 's.charge', 'm': 's.mass', 'gamma0': 'gamma0', 'sig_gamma': 'sig_gamma', 'sig_r': 'sig_r0', 'sig_z': 'sig_z', 'n_emit': 'n_emit', 'n_physical_particles': 'n_physical_particles', 'n_macroparticles': 'layout.n_macroparticles', 'zf': 'zf', 'tf': 'tf', 'initialize_self_field': 'initialize_self_field'}), '(self.fbpic_sim, q=s.charge, m=s.mass, gamma0=\n gamma0, sig_gamma=sig_gamma, sig_r=sig_r0, sig_z=sig_z, n_emit=n_emit,\n n_physical_particles=n_physical_particles, n_macroparticles=layout.\n n_macroparticles, zf=zf, tf=tf, initialize_self_field=initialize_self_field\n )\n', (9836, 10115), False, 'from fbpic.lpa_utils.bunch import add_particle_bunch_gaussian\n'), ((12035, 12268), 'fbpic.openpmd_diag.ParticleDiagnostic', 'ParticleDiagnostic', ([], {'period': 'diagnostic.period', 'species': 'species_dict', 'comm': 'self.fbpic_sim.comm', 'particle_data': 'diagnostic.data_list', 'write_dir': 'diagnostic.write_dir', 'iteration_min': 'iteration_min', 'iteration_max': 'iteration_max'}), '(period=diagnostic.period, species=species_dict, comm=\n self.fbpic_sim.comm, particle_data=diagnostic.data_list, write_dir=\n diagnostic.write_dir, iteration_min=iteration_min, iteration_max=\n iteration_max)\n', (12053, 12268), False, 'from fbpic.openpmd_diag import FieldDiagnostic, ParticleDiagnostic\n'), ((7952, 7988), 'numexpr.evaluate', 'numexpr.evaluate', (['density_expression'], {}), '(density_expression)\n', (7968, 7988), False, 'import numexpr\n')] |
from .base import Algorithm
import random
from copy import deepcopy
from models import Tour
from typing import List, Tuple
n_population = 100
CXPB = 0.95
MUTPB = 0.1
class GeneticAlgorithm(Algorithm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.population: List[Tour] = []
self.init_population()
def init_population(self):
cities = [idx for idx in range(len(self.cities))]
for _ in range(n_population):
random.shuffle(cities)
tour = Tour(len(cities), self.cost_map)
tour[:] = cities
self.population.append(tour)
self.population = sorted(self.population, key=lambda x: x.cost)
self.best = deepcopy(self.population[0])
def iterate(self):
random.shuffle(self.population)
offsprings = list()
for i, (ind1, ind2) in enumerate(zip(self.population[::2], self.population[1::2])):
if random.random() > CXPB:
continue
offspring1, offspring2 = self.crossover(ind1, ind2)
offsprings.append(offspring1)
offsprings.append(offspring2)
for i, mutant in enumerate(offsprings):
if random.random() > MUTPB:
continue
offsprings[i] = self._mutate(mutant)
new_population = self.population + offsprings
new_population = sorted(new_population, key=lambda x: x.cost)
self.population[:n_population//2] = new_population[:n_population//2]
self.population[n_population//2:] = random.choices(
new_population[n_population//2:],
k=n_population-n_population//2)
assert self.best.cost >= self.population[0].cost, 'Tien hoa khong thanh cong'
self.best = deepcopy(self.population[0])
def _mutate(self, individual: Tour):
individual = deepcopy(individual)
idx1 = random.randint(0, len(self.cities) - 1)
individual[idx1:] = individual[idx1:][::-1]
# idx2 = random.randint(0, len(self.cities) - 1)
# individual[idx1], individual[idx2] = individual[idx2], individual[idx1]
return individual
def select(self, population):
offspring = list()
offspring.append(population[0].clone())
for _ in range(1, len(population)):
idx = random.randint(0, len(population) - 1)
offspring.append(population[idx].clone())
return offspring
def crossover(self, ind1: Tour, ind2: Tour) -> Tuple[Tour, Tour]:
child1 = Tour(len(ind1), ind1.cost_map)
child2 = Tour(len(ind1), ind1.cost_map)
idx = random.randint(0, len(ind1) - 1)
for i in range(idx):
child1.add(ind1[i])
child2.add(ind2[i])
for i in range(idx, len(ind1)):
if ind2[i] not in child1:
child1.add(ind2[i])
if ind1[i] not in child2:
child2.add(ind1[i])
for i in range(len(ind1)):
if ind1[i] not in child1:
child1.add(ind1[i])
if ind2[i] not in child2:
child2.add(ind2[i])
return child1, child2
| [
"random.random",
"random.choices",
"random.shuffle",
"copy.deepcopy"
] | [((742, 770), 'copy.deepcopy', 'deepcopy', (['self.population[0]'], {}), '(self.population[0])\n', (750, 770), False, 'from copy import deepcopy\n'), ((803, 834), 'random.shuffle', 'random.shuffle', (['self.population'], {}), '(self.population)\n', (817, 834), False, 'import random\n'), ((1577, 1668), 'random.choices', 'random.choices', (['new_population[n_population // 2:]'], {'k': '(n_population - n_population // 2)'}), '(new_population[n_population // 2:], k=n_population - \n n_population // 2)\n', (1591, 1668), False, 'import random\n'), ((1790, 1818), 'copy.deepcopy', 'deepcopy', (['self.population[0]'], {}), '(self.population[0])\n', (1798, 1818), False, 'from copy import deepcopy\n'), ((1882, 1902), 'copy.deepcopy', 'deepcopy', (['individual'], {}), '(individual)\n', (1890, 1902), False, 'from copy import deepcopy\n'), ((504, 526), 'random.shuffle', 'random.shuffle', (['cities'], {}), '(cities)\n', (518, 526), False, 'import random\n'), ((971, 986), 'random.random', 'random.random', ([], {}), '()\n', (984, 986), False, 'import random\n'), ((1232, 1247), 'random.random', 'random.random', ([], {}), '()\n', (1245, 1247), False, 'import random\n')] |
import random, pygame
import tkinter as tk
from tkinter import messagebox
pygame.init()
def text_format(message, textFont, textSize, textColor):
newFont=pygame.font.Font(textFont, textSize)
newText=newFont.render(message, 0, textColor)
return newText
font = "Retro.ttf"
class cube(object):
rows = 50
w = 500
def __init__(self,start,dirnx=1,dirny=0,color=(255,0,0)):
self.pos = start
self.dirnx = 1
self.dirny = 0
self.color = color
def move(self, dirnx, dirny):
self.dirnx = dirnx
self.dirny = dirny
self.pos = (self.pos[0] + self.dirnx, self.pos[1] + self.dirny)
def draw(self, surface):
dis = self.w // self.rows
i = self.pos[0]
j = self.pos[1]
pygame.draw.rect(surface, self.color, (i*dis+1,j*dis+1, dis-2, dis-2))
class snake(object):
body = []
turns = {}
def __init__(self, color, pos):
self.color = color
self.head = cube(pos)
self.body.append(self.head)
self.dirnx = 0
self.dirny = 1
def move(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
keys = pygame.key.get_pressed()
for key in keys:
if keys[pygame.K_LEFT]:
self.dirnx = -1
self.dirny = 0
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_RIGHT]:
self.dirnx = 1
self.dirny = 0
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_UP]:
self.dirnx = 0
self.dirny = -1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
elif keys[pygame.K_DOWN]:
self.dirnx = 0
self.dirny = 1
self.turns[self.head.pos[:]] = [self.dirnx, self.dirny]
for i, c in enumerate(self.body):
p = c.pos[:]
if p in self.turns:
turn = self.turns[p]
c.move(turn[0],turn[1])
if i == len(self.body)-1:
self.turns.pop(p)
else:
if c.dirnx == -1 and c.pos[0] <= 0: c.pos = (c.rows-1, c.pos[1])
elif c.dirnx == 1 and c.pos[0] >= c.rows-1: c.pos = (0,c.pos[1])
elif c.dirny == 1 and c.pos[1] >= c.rows-1: c.pos = (c.pos[0], 0)
elif c.dirny == -1 and c.pos[1] <= 0: c.pos = (c.pos[0],c.rows-1)
else: c.move(c.dirnx,c.dirny)
def reset(self, pos):
self.head = cube(pos)
self.body = []
self.body.append(self.head)
self.turns = {}
self.dirnx = 0
self.dirny = 1
def addCube(self):
tail = self.body[-1]
dx, dy = tail.dirnx, tail.dirny
if dx == 1 and dy == 0:
self.body.append(cube((tail.pos[0]-1,tail.pos[1])))
elif dx == -1 and dy == 0:
self.body.append(cube((tail.pos[0]+1,tail.pos[1])))
elif dx == 0 and dy == 1:
self.body.append(cube((tail.pos[0],tail.pos[1]-1)))
elif dx == 0 and dy == -1:
self.body.append(cube((tail.pos[0],tail.pos[1]+1)))
self.body[-1].dirnx = dx
self.body[-1].dirny = dy
def draw(self, surface):
for i, c in enumerate(self.body):
if i ==0:
c.draw(surface)
else:
c.draw(surface)
def redrawWindow(surface):
global width, rows, s, food, extra, zoom
surface.fill((0,0,0))
s.draw(surface)
food.draw(surface)
extra.draw(surface)
zoom.draw(surface)
pygame.display.update()
def randomSnack(rows, item):
positions = item.body
while True:
x = random.randrange(rows)
y = random.randrange(rows)
if len(list(filter(lambda z:z.pos == (x,y), positions))) > 0:
continue
else:
break
return (x,y)
def message_box(subject, content):
root = tk.Tk()
root.attributes("-topmost", True)
root.withdraw()
messagebox.showinfo(subject, content)
try:
root.destroy()
except:
pass
def game():
global width, rows, s, food, extra, zoom
width = 500
rows = 50
win = pygame.display.set_mode((width, width))
pygame.display.set_caption("Project Hydra | Singleplayer")
s = snake((255,0,0), (10,10))
food = cube(randomSnack(rows, s), color=(0,255,0))
extra = cube((-1,-1), color=(0,0,255))
zoom = cube((-1,-1), color=(255,255,255))
flag = True
speedtime = 0
clock = pygame.time.Clock()
speed = 10
while flag:
pygame.time.delay(50)
clock.tick(speed)
speed = 10
s.move()
if s.body[0].pos == food.pos:
s.addCube()
food = cube(randomSnack(rows, s), color=(0,255,0))
if random.randint(1,6) == 6:
extra = cube(randomSnack(rows, s), color=(0,0,255))
if random. randint(1,10) == 10:
zoom = cube(randomSnack(rows, s), color=(255,255,255))
if s.body[0].pos == extra.pos:
s.addCube()
s.addCube()
s.addCube()
extra = cube((-1,-1), color=(0,0,255))
if s.body[0].pos == zoom.pos:
speedtime = 50
zoom = cube((-1,-1), color=(255,255,255))
if speedtime > 0:
speed = 50
speedtime -= 1
redrawWindow(win)
for x in range(len(s.body)):
if s.body[x].pos in list(map(lambda z:z.pos,s.body[x+1:])):
print('Score: ', len(s.body))
message_box('You died!', 'Back to main menu.')
flag = False
s.body.clear()
main_menu()
def main_menu():
global font
width = 500
screen = pygame.display.set_mode((width, width))
menu=True
selected="single"
clock = pygame.time.Clock()
while menu:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
if event.key==pygame.K_UP and (selected == "multi" or selected == "single"):
selected = "single"
elif event.key==pygame.K_DOWN and selected == "single":
selected = "multi"
elif event.key==pygame.K_DOWN and selected == "multi":
selected = "quit"
elif event.key==pygame.K_UP and selected == "quit":
selected = "multi"
if event.key==pygame.K_RETURN:
if selected == "single":
game()
if selected == "multi":
print("Not available yet")
if selected == "quit":
pygame.quit()
quit()
# Main Menu UI
screen.fill((0,0,0))
title=text_format("Project Hydra", font, 90, (255,255,0))
if selected =="single":
text_0 = text_format("Singleplayer", font, 75, (255,255,255))
else:
text_0 = text_format("Singleplayer", font, 75, (120,120,120))
if selected == "multi":
text_1 = text_format("Multiplayer", font, 75, (255,255,255))
else:
text_1 = text_format("Multiplayer", font, 75, (120,120,120))
if selected =="quit":
text_2 = text_format("QUIT", font, 75, (255,255,255))
else:
text_2 = text_format("QUIT", font, 75, (120,120,120))
title_rect = title.get_rect()
single_rect = text_0.get_rect()
multi_rect = text_1.get_rect()
quit_rect = text_2.get_rect()
# Main Menu Text
screen.blit(title, (width/2 - (title_rect[2]/2), 80))
screen.blit(text_0, (width/2 - (single_rect[2]/2), 300))
screen.blit(text_1, (width/2 - (multi_rect[2]/2), 360))
screen.blit(text_2, (width/2 - (quit_rect[2]/2), 420))
pygame.display.update()
clock.tick(10)
pygame.display.set_caption("Project Hydra | Main Menu")
main_menu()
pygame.quit()
quit()
| [
"tkinter.messagebox.showinfo",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"random.randrange",
"pygame.display.set_mode",
"pygame.time.delay",
"pygame.time.Clock",
"pygame.key.get_pressed",
"tkinter.Tk",
"pygame.draw.rect",
"pygame.display.set_caption",
"pygame.font.Font",
"pygame.di... | [((75, 88), 'pygame.init', 'pygame.init', ([], {}), '()\n', (86, 88), False, 'import random, pygame\n'), ((8384, 8397), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (8395, 8397), False, 'import random, pygame\n'), ((159, 195), 'pygame.font.Font', 'pygame.font.Font', (['textFont', 'textSize'], {}), '(textFont, textSize)\n', (175, 195), False, 'import random, pygame\n'), ((3832, 3855), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3853, 3855), False, 'import random, pygame\n'), ((4198, 4205), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (4203, 4205), True, 'import tkinter as tk\n'), ((4268, 4305), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['subject', 'content'], {}), '(subject, content)\n', (4287, 4305), False, 'from tkinter import messagebox\n'), ((4462, 4501), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, width)'], {}), '((width, width))\n', (4485, 4501), False, 'import random, pygame\n'), ((4506, 4564), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Project Hydra | Singleplayer"""'], {}), "('Project Hydra | Singleplayer')\n", (4532, 4564), False, 'import random, pygame\n'), ((4796, 4815), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (4813, 4815), False, 'import random, pygame\n'), ((6021, 6060), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, width)'], {}), '((width, width))\n', (6044, 6060), False, 'import random, pygame\n'), ((6111, 6130), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (6128, 6130), False, 'import random, pygame\n'), ((785, 873), 'pygame.draw.rect', 'pygame.draw.rect', (['surface', 'self.color', '(i * dis + 1, j * dis + 1, dis - 2, dis - 2)'], {}), '(surface, self.color, (i * dis + 1, j * dis + 1, dis - 2, \n dis - 2))\n', (801, 873), False, 'import random, pygame\n'), ((1135, 1153), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1151, 1153), False, 'import random, pygame\n'), ((3943, 3965), 'random.randrange', 'random.randrange', (['rows'], {}), '(rows)\n', (3959, 3965), False, 'import random, pygame\n'), ((3978, 4000), 'random.randrange', 'random.randrange', (['rows'], {}), '(rows)\n', (3994, 4000), False, 'import random, pygame\n'), ((4860, 4881), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (4877, 4881), False, 'import random, pygame\n'), ((6168, 6186), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6184, 6186), False, 'import random, pygame\n'), ((8260, 8283), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8281, 8283), False, 'import random, pygame\n'), ((8315, 8370), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Project Hydra | Main Menu"""'], {}), "('Project Hydra | Main Menu')\n", (8341, 8370), False, 'import random, pygame\n'), ((1247, 1271), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (1269, 1271), False, 'import random, pygame\n'), ((1213, 1226), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1224, 1226), False, 'import random, pygame\n'), ((5084, 5104), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (5098, 5104), False, 'import random, pygame\n'), ((5193, 5214), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (5207, 5214), False, 'import random, pygame\n'), ((6244, 6257), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (6255, 6257), False, 'import random, pygame\n'), ((7069, 7082), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7080, 7082), False, 'import random, pygame\n')] |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: <EMAIL>
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This module defines the following classes:
- QShellDialog
- QShell
QShell is based on ideas and code from PyCute developed by <NAME>.
Used with the author's permission.
More information on PyCute, visit:
http://gerard.vermeulen.free.fr/html/pycute-intro.html
"""
from PyQt4 import QtGui, QtCore
import sys
from vistrails.core.bundles import py_import
from vistrails.core.interpreter.default import get_default_interpreter
from vistrails.gui.vistrails_palette import QVistrailsPaletteInterface
################################################################################
_shell_dialog = None
def get_shell_dialog():
global _shell_dialog
if _shell_dialog is not None:
return _shell_dialog
try:
deps = {'pip': 'ipython>=1.0',
'linux-ubuntu': 'ipython-qtconsole',
'linux-debian': 'ipython-qtconsole'}
IPython = py_import('IPython.qt.console.rich_ipython_widget', deps)
RichIPythonWidget = \
IPython.qt.console.rich_ipython_widget.RichIPythonWidget
py_import('IPython.qt.inprocess', deps)
QtInProcessKernelManager = \
IPython.qt.inprocess.QtInProcessKernelManager
except ImportError:
return None
km = QtInProcessKernelManager()
km.start_kernel()
kernel = km.kernel
kernel.gui = 'qt4'
kernel_client = km.client()
kernel_client.start_channels()
class IPythonDialog(RichIPythonWidget, QVistrailsPaletteInterface):
"""This class incorporates an IPython shell into a dockable widget for use in the
VisTrails environment"""
def __init__(self, parent=None):
RichIPythonWidget.__init__(self, parent)
self.old_streams = None
self.running_workflow = False
self.kernel_manager = km
self.kernel_client = kernel_client
self.exit_requested.connect(self.stop)
self.setWindowTitle("Console")
self.vistrails_interpreter = get_default_interpreter()
def visibility_changed(self, visible):
QVistrailsPaletteInterface.visibility_changed(self, visible)
if visible:
self.show()
else:
self.hide()
def stop(self):
kernel_client.stop_channels()
km.shutdown_kernel()
def hide(self):
"""suspend() -> None
Called when hiding the parent window in order to recover the previous
state.
"""
#recovering the state
if self.old_streams is not None:
sys.stdout, sys.stderr, sys.stdin = self.old_streams
self.old_streams = None
RichIPythonWidget.hide(self)
def show(self):
"""show() -> None
Store previous state and starts capturing all interactive input and
output.
"""
# capture all interactive input/output
if self.old_streams is None:
self.old_streams = sys.stdout, sys.stderr, sys.stdin
sys.stdout = self
sys.stderr = self
sys.stdin = self
RichIPythonWidget.show(self)
def showEvent(self, e):
"""showEvent(e) -> None
Event handler called when the dialog acquires focus
"""
self.show()
def flush(self):
"""flush() -> None.
Simulate stdin, stdout, and stderr.
"""
pass
def isatty(self):
"""isatty() -> int
Simulate stdin, stdout, and stderr.
"""
return 1
def readline(self):
"""readline() -> str
Simulate stdin, stdout, and stderr.
"""
return ""
def write(self, text):
"""write(text: str) -> None
Simulate stdin, stdout, and stderr.
"""
self.input_buffer = ''
if not self.running_workflow:
self.running_workflow = True
# make text blue
self._append_plain_text("\n\x1b[34m<STANDARD OUTPUT>\x1b[0m\n", True)
self._append_plain_text(text, True)
self._prompt_pos = self._get_end_cursor().position()
self._control.ensureCursorVisible()
self._control.moveCursor(QtGui.QTextCursor.End)
def eventFilter(self, obj, event):
""" Reimplemented to ensure a console-like behavior in the underlying
text widgets.
"""
etype = event.type()
if etype == QtCore.QEvent.KeyPress:
self.running_workflow = False
return RichIPythonWidget.eventFilter(self, obj, event)
_shell_dialog = IPythonDialog
return IPythonDialog
| [
"vistrails.gui.vistrails_palette.QVistrailsPaletteInterface.visibility_changed",
"vistrails.core.bundles.py_import",
"vistrails.core.interpreter.default.get_default_interpreter"
] | [((2766, 2823), 'vistrails.core.bundles.py_import', 'py_import', (['"""IPython.qt.console.rich_ipython_widget"""', 'deps'], {}), "('IPython.qt.console.rich_ipython_widget', deps)\n", (2775, 2823), False, 'from vistrails.core.bundles import py_import\n'), ((2935, 2974), 'vistrails.core.bundles.py_import', 'py_import', (['"""IPython.qt.inprocess"""', 'deps'], {}), "('IPython.qt.inprocess', deps)\n", (2944, 2974), False, 'from vistrails.core.bundles import py_import\n'), ((3879, 3904), 'vistrails.core.interpreter.default.get_default_interpreter', 'get_default_interpreter', ([], {}), '()\n', (3902, 3904), False, 'from vistrails.core.interpreter.default import get_default_interpreter\n'), ((3965, 4025), 'vistrails.gui.vistrails_palette.QVistrailsPaletteInterface.visibility_changed', 'QVistrailsPaletteInterface.visibility_changed', (['self', 'visible'], {}), '(self, visible)\n', (4010, 4025), False, 'from vistrails.gui.vistrails_palette import QVistrailsPaletteInterface\n')] |
"""empty message
Revision ID: 4a7d74b38564
Revises: <PASSWORD>
Create Date: 2017-02-16 16:09:46.859183
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4a<PASSWORD>b<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('token', sa.Column('token', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('token', 'token')
# ### end Alembic commands ###
| [
"sqlalchemy.String",
"alembic.op.drop_column"
] | [((595, 627), 'alembic.op.drop_column', 'op.drop_column', (['"""token"""', '"""token"""'], {}), "('token', 'token')\n", (609, 627), False, 'from alembic import op\n'), ((433, 453), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (442, 453), True, 'import sqlalchemy as sa\n')] |
#importing dependencies
import sqlite3
#creating a connection and also the database or connecting to the database if it exists already
conn = sqlite3.connect('student.db')
#Creating the cursor
c = conn.cursor()
#creating a table called students
c.execute("""CREATE TABLE students(
first_name text,
last_name text,
email text,
course text
)""")
#commiting the connection/ commit command
conn.commit()
#closing the connection
conn.close() | [
"sqlite3.connect"
] | [((147, 176), 'sqlite3.connect', 'sqlite3.connect', (['"""student.db"""'], {}), "('student.db')\n", (162, 176), False, 'import sqlite3\n')] |
from django.contrib import admin
from .models import Alumni
# Register your models here.
admin.site.register(Alumni)
| [
"django.contrib.admin.site.register"
] | [((89, 116), 'django.contrib.admin.site.register', 'admin.site.register', (['Alumni'], {}), '(Alumni)\n', (108, 116), False, 'from django.contrib import admin\n')] |
import random
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from .model import Walker, ShapeExample
def agent_draw(agent):
portrayal = None
if agent is None:
# Actually this if part is unnecessary, but still keeping it for
# aesthetics
pass
elif isinstance(agent, Walker):
print("Uid: {0}, Heading: {1}".format(agent.unique_id, agent.heading))
portrayal = {"Shape": "arrowHead",
"Filled": "true",
"Layer": 2,
"Color": ["#00FF00", "#99FF99"],
"stroke_color": "#666666",
"Filled": "true",
"heading_x": agent.heading[0],
"heading_y": agent.heading[1],
"text": agent.unique_id,
"text_color": "white",
"scale": 0.8,
}
return portrayal
def launch_shape_model():
width = 15
height = 10
num_agents = 2
pixel_ratio = 50
grid = CanvasGrid(agent_draw, width, height,
width * pixel_ratio, height * pixel_ratio)
server = ModularServer(ShapeExample, [grid], "Shape Model Example",
{"N": num_agents, "width": width, "height": height})
server.max_steps = 0
server.port = 8521
server.launch()
if __name__ == "__main__":
random.seed(3)
launch_shape_model()
| [
"random.seed",
"mesa.visualization.modules.CanvasGrid",
"mesa.visualization.ModularVisualization.ModularServer"
] | [((1101, 1186), 'mesa.visualization.modules.CanvasGrid', 'CanvasGrid', (['agent_draw', 'width', 'height', '(width * pixel_ratio)', '(height * pixel_ratio)'], {}), '(agent_draw, width, height, width * pixel_ratio, height * pixel_ratio\n )\n', (1111, 1186), False, 'from mesa.visualization.modules import CanvasGrid\n'), ((1217, 1332), 'mesa.visualization.ModularVisualization.ModularServer', 'ModularServer', (['ShapeExample', '[grid]', '"""Shape Model Example"""', "{'N': num_agents, 'width': width, 'height': height}"], {}), "(ShapeExample, [grid], 'Shape Model Example', {'N': num_agents,\n 'width': width, 'height': height})\n", (1230, 1332), False, 'from mesa.visualization.ModularVisualization import ModularServer\n'), ((1457, 1471), 'random.seed', 'random.seed', (['(3)'], {}), '(3)\n', (1468, 1471), False, 'import random\n')] |
#!/usr/bin/env python
from redbot.message import headers
from redbot.syntax import rfc7231
from redbot.type import AddNoteMethodType
class date(headers.HttpHeader):
canonical_name = "Date"
description = """\
The `Date` header represents the time when the message was generated, regardless of caching that
happened since.
It is used by caches as input to expiration calculations, and to detect clock drift."""
reference = "%s#header.date" % rfc7231.SPEC_URL
syntax = False # rfc7231.Date
list_header = False
deprecated = False
valid_in_requests = True
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> int:
try:
date_value = headers.parse_date(field_value, add_note)
except ValueError:
raise
return date_value
class BasicDateTest(headers.HeaderTest):
name = 'Date'
inputs = [b'Mon, 04 Jul 2011 09:08:06 GMT']
expected_out = 1309770486
expected_err = [] # type: ignore
class BadDateTest(headers.HeaderTest):
name = 'Date'
inputs = [b'0']
expected_out = None # type: ignore
expected_err = [headers.BAD_DATE_SYNTAX]
class BlankDateTest(headers.HeaderTest):
name = 'Date'
inputs = [b'']
expected_out = None # type: ignore
expected_err = [headers.BAD_DATE_SYNTAX]
| [
"redbot.message.headers.parse_date"
] | [((727, 768), 'redbot.message.headers.parse_date', 'headers.parse_date', (['field_value', 'add_note'], {}), '(field_value, add_note)\n', (745, 768), False, 'from redbot.message import headers\n')] |
import abc
import itertools
from oslo_utils import reflection
import six
from padre import exceptions as excp
from padre import utils
@six.add_metaclass(abc.ABCMeta)
class auth_base(object):
"""Base of all authorizers."""
def __and__(self, other):
return all_must_pass(self, other)
def __or__(self, other):
return any_must_pass(self, other)
@abc.abstractmethod
def __call__(self, bot, message, args=None):
pass
def pformat(self, bot):
return 'auth_base()'
class no_auth(auth_base):
"""Lets any message through."""
def pformat(self, bot):
return 'no_auth()'
def __call__(self, bot, message, args=None):
pass
class args_key_is_empty_or_allowed(auth_base):
"""Denies if args key is non-empty and not allowed."""
def __init__(self, args_key, allowed_extractor_func):
self.args_key = args_key
self.allowed_extractor_func = allowed_extractor_func
def __call__(self, bot, message, args=None):
if args is None:
raise excp.NotAuthorized(
"Message lacks a (non-empty)"
" 'args' keyword argument, unable to auth against"
" unknown arguments", message)
else:
v = args.get(self.args_key)
if v:
allowed_extractor_func = self.allowed_extractor_func
allowed = allowed_extractor_func(message)
if v not in allowed:
raise excp.NotAuthorized(
"Action can not be triggered"
" please check that the argument '%s' value is"
" allowed or that argument '%s' is"
" empty" % (self.args_key, self.args_key))
def pformat(self, bot):
base = 'args_key_is_empty_or_allowed'
func_name = reflection.get_callable_name(self.allowed_extractor_func)
return '%s(%r, %s)' % (base, self.args_key, func_name)
class user_in_ldap_groups(auth_base):
"""Denies if sending user is not in **config** driven ldap groups."""
def __init__(self, config_key, *more_config_keys):
self.config_keys = (config_key,) + more_config_keys
def pformat(self, bot):
groups = self._fetch_ok_groups(bot)
return 'user_in_ldap_groups(%s)' % (utils.quote_join(groups))
def _fetch_ok_groups(self, bot):
groups = []
for k in self.config_keys:
try:
val = utils.dict_or_munch_extract(bot.config, k)
except KeyError:
pass
else:
if isinstance(val, six.string_types):
groups.append(val)
elif isinstance(val, (tuple, list, set)):
groups.extend(val)
else:
raise TypeError("Unexpected ldap group"
" configuration value type"
" '%s' corresponding to lookup"
" key: %s" % (type(val), k))
return groups
def __call__(self, bot, message, args=None):
ldap_client = bot.clients.get("ldap_client")
if not ldap_client:
raise excp.NotFound("Ldap client not found; required to perform"
" authorization checks")
try:
user_name = message.body.user_name
except AttributeError:
user_name = None
if not user_name:
raise excp.NotAuthorized(
"Message lacks a (non-empty)"
" user name, unable to auth against"
" unknown users", message)
else:
if not ldap_client.is_allowed(user_name,
self._fetch_ok_groups(bot)):
raise excp.NotAuthorized(
"Action can not be triggered"
" please check that the sender is in the correct"
" ldap group(s)", message)
class message_from_channels(auth_base):
"""Denies messages not from certain channel name(s)."""
def __init__(self, channels):
self.channels = tuple(channels)
def pformat(self, bot):
return 'message_from_channels(%s)' % (utils.quote_join(self.channels))
def __call__(self, bot, message, args=None):
try:
channel_name = message.body.channel_name
except AttributeError:
channel_name = None
if not channel_name:
raise excp.NotAuthorized(
"Message lacks a (non-empty)"
" channel name, unable to trigger against"
" unknown channels", message)
if channel_name not in self.channels:
raise excp.NotAuthorized(
"Action can not be triggered in provided"
" channel '%s', please make sure that the sender"
" is in the correct channel(s)" % channel_name, message)
class any_must_pass(auth_base):
"""Combines one or more authorizer (any must pass)."""
def __init__(self, authorizer, *more_authorizers):
self.authorizers = tuple(
itertools.chain([authorizer], more_authorizers))
def pformat(self, bot):
others = ", ".join(a.pformat(bot) for a in self.authorizers)
return 'any_must_pass(%s)' % (others)
def __call__(self, bot, message, args=None):
fails = []
any_passed = False
for authorizer in self.authorizers:
try:
authorizer(bot, message, args=args)
except excp.NotAuthorized as e:
fails.append(e)
else:
any_passed = True
break
if not any_passed and fails:
# TODO: maybe make a multiple not authorized exception???
what = " or ".join('(%s)' % e for e in fails)
raise excp.NotAuthorized(what, message)
class all_must_pass(auth_base):
"""Combines one or more authorizer (all must pass)."""
def __init__(self, authorizer, *more_authorizers):
self.authorizers = tuple(
itertools.chain([authorizer], more_authorizers))
def pformat(self, bot):
others = ", ".join(a.pformat(bot) for a in self.authorizers)
return 'all_must_pass(%s)' % (others)
def __call__(self, bot, message, args=None):
for authorizer in self.authorizers:
authorizer(bot, message, args=args)
| [
"itertools.chain",
"padre.exceptions.NotFound",
"six.add_metaclass",
"padre.utils.dict_or_munch_extract",
"padre.exceptions.NotAuthorized",
"padre.utils.quote_join",
"oslo_utils.reflection.get_callable_name"
] | [((139, 169), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (156, 169), False, 'import six\n'), ((1867, 1924), 'oslo_utils.reflection.get_callable_name', 'reflection.get_callable_name', (['self.allowed_extractor_func'], {}), '(self.allowed_extractor_func)\n', (1895, 1924), False, 'from oslo_utils import reflection\n'), ((1057, 1191), 'padre.exceptions.NotAuthorized', 'excp.NotAuthorized', (['"""Message lacks a (non-empty) \'args\' keyword argument, unable to auth against unknown arguments"""', 'message'], {}), '(\n "Message lacks a (non-empty) \'args\' keyword argument, unable to auth against unknown arguments"\n , message)\n', (1075, 1191), True, 'from padre import exceptions as excp\n'), ((2335, 2359), 'padre.utils.quote_join', 'utils.quote_join', (['groups'], {}), '(groups)\n', (2351, 2359), False, 'from padre import utils\n'), ((3244, 3329), 'padre.exceptions.NotFound', 'excp.NotFound', (['"""Ldap client not found; required to perform authorization checks"""'], {}), "('Ldap client not found; required to perform authorization checks'\n )\n", (3257, 3329), True, 'from padre import exceptions as excp\n'), ((3524, 3640), 'padre.exceptions.NotAuthorized', 'excp.NotAuthorized', (['"""Message lacks a (non-empty) user name, unable to auth against unknown users"""', 'message'], {}), "(\n 'Message lacks a (non-empty) user name, unable to auth against unknown users'\n , message)\n", (3542, 3640), True, 'from padre import exceptions as excp\n'), ((4285, 4316), 'padre.utils.quote_join', 'utils.quote_join', (['self.channels'], {}), '(self.channels)\n', (4301, 4316), False, 'from padre import utils\n'), ((4544, 4669), 'padre.exceptions.NotAuthorized', 'excp.NotAuthorized', (['"""Message lacks a (non-empty) channel name, unable to trigger against unknown channels"""', 'message'], {}), "(\n 'Message lacks a (non-empty) channel name, unable to trigger against unknown channels'\n , message)\n", (4562, 4669), True, 'from padre import exceptions as excp\n'), ((4779, 4950), 'padre.exceptions.NotAuthorized', 'excp.NotAuthorized', (['("Action can not be triggered in provided channel \'%s\', please make sure that the sender is in the correct channel(s)"\n % channel_name)', 'message'], {}), '(\n "Action can not be triggered in provided channel \'%s\', please make sure that the sender is in the correct channel(s)"\n % channel_name, message)\n', (4797, 4950), True, 'from padre import exceptions as excp\n'), ((5191, 5238), 'itertools.chain', 'itertools.chain', (['[authorizer]', 'more_authorizers'], {}), '([authorizer], more_authorizers)\n', (5206, 5238), False, 'import itertools\n'), ((5926, 5959), 'padre.exceptions.NotAuthorized', 'excp.NotAuthorized', (['what', 'message'], {}), '(what, message)\n', (5944, 5959), True, 'from padre import exceptions as excp\n'), ((6155, 6202), 'itertools.chain', 'itertools.chain', (['[authorizer]', 'more_authorizers'], {}), '([authorizer], more_authorizers)\n', (6170, 6202), False, 'import itertools\n'), ((2493, 2535), 'padre.utils.dict_or_munch_extract', 'utils.dict_or_munch_extract', (['bot.config', 'k'], {}), '(bot.config, k)\n', (2520, 2535), False, 'from padre import utils\n'), ((3846, 3975), 'padre.exceptions.NotAuthorized', 'excp.NotAuthorized', (['"""Action can not be triggered please check that the sender is in the correct ldap group(s)"""', 'message'], {}), "(\n 'Action can not be triggered please check that the sender is in the correct ldap group(s)'\n , message)\n", (3864, 3975), True, 'from padre import exceptions as excp\n'), ((1499, 1675), 'padre.exceptions.NotAuthorized', 'excp.NotAuthorized', (['("Action can not be triggered please check that the argument \'%s\' value is allowed or that argument \'%s\' is empty"\n % (self.args_key, self.args_key))'], {}), '(\n "Action can not be triggered please check that the argument \'%s\' value is allowed or that argument \'%s\' is empty"\n % (self.args_key, self.args_key))\n', (1517, 1675), True, 'from padre import exceptions as excp\n')] |
from django.views.generic import CreateView
from django.contrib.auth import authenticate, login
from django.contrib.auth.views import LoginView
from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
class SignUpView(CreateView):
template_name = 'users/signup.html'
form_class = UserCreationForm
success_url = reverse_lazy('photo:list')
def form_valid(self, form):
to_return = super().form_valid(form)
user = authenticate(
username=form.cleaned_data["username"],
password=form.cleaned_data["<PASSWORD>"],
)
login(self.request, user)
return to_return
class CustomLoginView(LoginView):
template_name = 'users/login.html' | [
"django.contrib.auth.authenticate",
"django.contrib.auth.login",
"django.urls.reverse_lazy"
] | [((370, 396), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""photo:list"""'], {}), "('photo:list')\n", (382, 396), False, 'from django.urls import reverse_lazy\n'), ((491, 590), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': "form.cleaned_data['username']", 'password': "form.cleaned_data['<PASSWORD>']"}), "(username=form.cleaned_data['username'], password=form.\n cleaned_data['<PASSWORD>'])\n", (503, 590), False, 'from django.contrib.auth import authenticate, login\n'), ((630, 655), 'django.contrib.auth.login', 'login', (['self.request', 'user'], {}), '(self.request, user)\n', (635, 655), False, 'from django.contrib.auth import authenticate, login\n')] |
import re
from _cleaning_options.cleaning_options import _is_title_or_etc, _is_books_copy, \
_is_email_init, _is_footnote, _is_image, _is_table
from _cleaning_options.strip_headers import _strip_headers
def simple_cleaner(book: str) -> str:
"""
Just removes lines that are part of the Project Gutenberg header or footer.
Doesnt go deeply in the text to remove other things like titles or footnotes or etc.
:rtype: str
:param book: str of a gutenberg's book
:return: str of the book without the lines that are part of the Project Gutenberg header and footer.
"""
return _strip_headers(book)
def super_cleaner(book: str, min_token: int = 5, max_token: int = 600, mark_deletions: bool = True) -> str:
"""
Super clean the book (titles, footnotes, images, book information, etc.). may delete some good lines too.
^_^ Do you have a comment to make it better? make an issue here: https://github.com/kiasar/gutenberg_cleaner ^_^.
IMPORTANT: if you don't want the text to be tokenize, just put min_token = -1.
:rtype: str
:param book: str of a gutenberg's book.
:param min_token: The minimum tokens of a paragraph that is not "dialog" or "quote",
-1 means don't tokenize the txt (so it will be faster).
:param max_token: The maximum tokens of a paragraph.
:return: str of the book with paragraphs that have been deleted are shown with "[deleted]" in it.
you can split the book to paragraphs by "\n\n".
"""
headless_book = _strip_headers(book)
paragraphs = headless_book.split("\n\n") # split the book to paragraphs.
paragraphs_after_cleaning = []
for par in paragraphs:
if _is_image(par) or _is_footnote(par) or _is_email_init(par) or \
_is_books_copy(par) or _is_table(par) or _is_title_or_etc(par, min_token, max_token):
if mark_deletions:
paragraphs_after_cleaning.append("[deleted]") # if the paragraph is not good, replace it with [deleted]
else:
par = re.sub("(\\n)+", " ", par).replace("_", "")
paragraphs_after_cleaning.append(par)
cleaned_book = "\n\n".join(paragraphs_after_cleaning) # joining the list of paragraphs into one string
return cleaned_book
| [
"_cleaning_options.strip_headers._strip_headers",
"_cleaning_options.cleaning_options._is_table",
"_cleaning_options.cleaning_options._is_image",
"_cleaning_options.cleaning_options._is_title_or_etc",
"_cleaning_options.cleaning_options._is_email_init",
"_cleaning_options.cleaning_options._is_books_copy",... | [((607, 627), '_cleaning_options.strip_headers._strip_headers', '_strip_headers', (['book'], {}), '(book)\n', (621, 627), False, 'from _cleaning_options.strip_headers import _strip_headers\n'), ((1506, 1526), '_cleaning_options.strip_headers._strip_headers', '_strip_headers', (['book'], {}), '(book)\n', (1520, 1526), False, 'from _cleaning_options.strip_headers import _strip_headers\n'), ((1679, 1693), '_cleaning_options.cleaning_options._is_image', '_is_image', (['par'], {}), '(par)\n', (1688, 1693), False, 'from _cleaning_options.cleaning_options import _is_title_or_etc, _is_books_copy, _is_email_init, _is_footnote, _is_image, _is_table\n'), ((1697, 1714), '_cleaning_options.cleaning_options._is_footnote', '_is_footnote', (['par'], {}), '(par)\n', (1709, 1714), False, 'from _cleaning_options.cleaning_options import _is_title_or_etc, _is_books_copy, _is_email_init, _is_footnote, _is_image, _is_table\n'), ((1718, 1737), '_cleaning_options.cleaning_options._is_email_init', '_is_email_init', (['par'], {}), '(par)\n', (1732, 1737), False, 'from _cleaning_options.cleaning_options import _is_title_or_etc, _is_books_copy, _is_email_init, _is_footnote, _is_image, _is_table\n'), ((1759, 1778), '_cleaning_options.cleaning_options._is_books_copy', '_is_books_copy', (['par'], {}), '(par)\n', (1773, 1778), False, 'from _cleaning_options.cleaning_options import _is_title_or_etc, _is_books_copy, _is_email_init, _is_footnote, _is_image, _is_table\n'), ((1782, 1796), '_cleaning_options.cleaning_options._is_table', '_is_table', (['par'], {}), '(par)\n', (1791, 1796), False, 'from _cleaning_options.cleaning_options import _is_title_or_etc, _is_books_copy, _is_email_init, _is_footnote, _is_image, _is_table\n'), ((1800, 1843), '_cleaning_options.cleaning_options._is_title_or_etc', '_is_title_or_etc', (['par', 'min_token', 'max_token'], {}), '(par, min_token, max_token)\n', (1816, 1843), False, 'from _cleaning_options.cleaning_options import _is_title_or_etc, _is_books_copy, _is_email_init, _is_footnote, _is_image, _is_table\n'), ((2029, 2055), 're.sub', 're.sub', (['"""(\\\\n)+"""', '""" """', 'par'], {}), "('(\\\\n)+', ' ', par)\n", (2035, 2055), False, 'import re\n')] |
"""REST API for accessing log files."""
from django.core.exceptions import ObjectDoesNotExist
from django.utils.decorators import method_decorator
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import generics, status
from rest_framework.decorators import (
api_view,
parser_classes,
permission_classes,
)
from rest_framework.exceptions import (
PermissionDenied,
NotFound,
ValidationError,
)
from rest_framework.parsers import FileUploadParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from crashreports.response_descriptions import default_desc
from crashreports.serializers import LogFileSerializer
from crashreports.models import Crashreport, LogFile
from crashreports.permissions import (
HasRightsOrIsDeviceOwnerDeviceCreation,
user_owns_uuid,
user_is_hiccup_staff,
SWAGGER_SECURITY_REQUIREMENTS_ALL,
)
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="List log files",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
),
)
class ListView(generics.ListAPIView):
"""Endpoint for listing log files."""
queryset = LogFile.objects.all()
permission_classes = (HasRightsOrIsDeviceOwnerDeviceCreation,)
serializer_class = LogFileSerializer
@method_decorator(
name="get",
decorator=swagger_auto_schema(
operation_description="Get a log file",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound)]),
),
)
@method_decorator(
name="put",
decorator=swagger_auto_schema(
operation_description="Update a log file",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound), default_desc(ValidationError)]),
),
)
@method_decorator(
name="patch",
decorator=swagger_auto_schema(
operation_description="Partially update a log file",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound), default_desc(ValidationError)]),
),
)
@method_decorator(
name="delete",
decorator=swagger_auto_schema(
operation_description="Delete a log file",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
responses=dict([default_desc(NotFound)]),
),
)
class RetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):
"""Endpoint for retrieving, updating and deleting log files."""
# pylint: disable=too-many-ancestors
queryset = LogFile.objects.all()
permission_classes = (HasRightsOrIsDeviceOwnerDeviceCreation,)
serializer_class = LogFileSerializer
@swagger_auto_schema(
method="post",
security=SWAGGER_SECURITY_REQUIREMENTS_ALL,
request_body=LogFileSerializer,
responses=dict(
[
default_desc(ValidationError),
(
status.HTTP_404_NOT_FOUND,
openapi.Response("Crashreport does not exist."),
),
(status.HTTP_201_CREATED, openapi.Response("Created")),
]
),
)
@api_view(http_method_names=["POST"])
@parser_classes([FileUploadParser])
@permission_classes([IsAuthenticated])
def logfile_put(request, uuid, device_local_id, filename):
"""Upload a log file for a crash report."""
try:
crashreport = Crashreport.objects.get(
device__uuid=uuid, device_local_id=device_local_id
)
except ObjectDoesNotExist:
raise NotFound(detail="Crashreport does not exist.")
if not (
user_owns_uuid(request.user, crashreport.device.uuid)
or user_is_hiccup_staff(request.user)
):
raise PermissionDenied(detail="Not allowed.")
file = request.data["file"]
logfile = LogFile(crashreport=crashreport, logfile=file)
logfile.save()
return Response(status=201)
| [
"rest_framework.decorators.permission_classes",
"drf_yasg.openapi.Response",
"crashreports.models.Crashreport.objects.get",
"crashreports.permissions.user_is_hiccup_staff",
"crashreports.response_descriptions.default_desc",
"drf_yasg.utils.swagger_auto_schema",
"crashreports.models.LogFile",
"rest_fra... | [((3122, 3158), 'rest_framework.decorators.api_view', 'api_view', ([], {'http_method_names': "['POST']"}), "(http_method_names=['POST'])\n", (3130, 3158), False, 'from rest_framework.decorators import api_view, parser_classes, permission_classes\n'), ((3160, 3194), 'rest_framework.decorators.parser_classes', 'parser_classes', (['[FileUploadParser]'], {}), '([FileUploadParser])\n', (3174, 3194), False, 'from rest_framework.decorators import api_view, parser_classes, permission_classes\n'), ((3196, 3233), 'rest_framework.decorators.permission_classes', 'permission_classes', (['[IsAuthenticated]'], {}), '([IsAuthenticated])\n', (3214, 3233), False, 'from rest_framework.decorators import api_view, parser_classes, permission_classes\n'), ((1235, 1256), 'crashreports.models.LogFile.objects.all', 'LogFile.objects.all', ([], {}), '()\n', (1254, 1256), False, 'from crashreports.models import Crashreport, LogFile\n'), ((2567, 2588), 'crashreports.models.LogFile.objects.all', 'LogFile.objects.all', ([], {}), '()\n', (2586, 2588), False, 'from crashreports.models import Crashreport, LogFile\n'), ((3791, 3837), 'crashreports.models.LogFile', 'LogFile', ([], {'crashreport': 'crashreport', 'logfile': 'file'}), '(crashreport=crashreport, logfile=file)\n', (3798, 3837), False, 'from crashreports.models import Crashreport, LogFile\n'), ((3868, 3888), 'rest_framework.response.Response', 'Response', ([], {'status': '(201)'}), '(status=201)\n', (3876, 3888), False, 'from rest_framework.response import Response\n'), ((1009, 1117), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'operation_description': '"""List log files"""', 'security': 'SWAGGER_SECURITY_REQUIREMENTS_ALL'}), "(operation_description='List log files', security=\n SWAGGER_SECURITY_REQUIREMENTS_ALL)\n", (1028, 1117), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((3372, 3447), 'crashreports.models.Crashreport.objects.get', 'Crashreport.objects.get', ([], {'device__uuid': 'uuid', 'device_local_id': 'device_local_id'}), '(device__uuid=uuid, device_local_id=device_local_id)\n', (3395, 3447), False, 'from crashreports.models import Crashreport, LogFile\n'), ((3705, 3744), 'rest_framework.exceptions.PermissionDenied', 'PermissionDenied', ([], {'detail': '"""Not allowed."""'}), "(detail='Not allowed.')\n", (3721, 3744), False, 'from rest_framework.exceptions import PermissionDenied, NotFound, ValidationError\n'), ((3515, 3561), 'rest_framework.exceptions.NotFound', 'NotFound', ([], {'detail': '"""Crashreport does not exist."""'}), "(detail='Crashreport does not exist.')\n", (3523, 3561), False, 'from rest_framework.exceptions import PermissionDenied, NotFound, ValidationError\n'), ((3584, 3637), 'crashreports.permissions.user_owns_uuid', 'user_owns_uuid', (['request.user', 'crashreport.device.uuid'], {}), '(request.user, crashreport.device.uuid)\n', (3598, 3637), False, 'from crashreports.permissions import HasRightsOrIsDeviceOwnerDeviceCreation, user_owns_uuid, user_is_hiccup_staff, SWAGGER_SECURITY_REQUIREMENTS_ALL\n'), ((3649, 3683), 'crashreports.permissions.user_is_hiccup_staff', 'user_is_hiccup_staff', (['request.user'], {}), '(request.user)\n', (3669, 3683), False, 'from crashreports.permissions import HasRightsOrIsDeviceOwnerDeviceCreation, user_owns_uuid, user_is_hiccup_staff, SWAGGER_SECURITY_REQUIREMENTS_ALL\n'), ((2866, 2895), 'crashreports.response_descriptions.default_desc', 'default_desc', (['ValidationError'], {}), '(ValidationError)\n', (2878, 2895), False, 'from crashreports.response_descriptions import default_desc\n'), ((2970, 3017), 'drf_yasg.openapi.Response', 'openapi.Response', (['"""Crashreport does not exist."""'], {}), "('Crashreport does not exist.')\n", (2986, 3017), False, 'from drf_yasg import openapi\n'), ((3072, 3099), 'drf_yasg.openapi.Response', 'openapi.Response', (['"""Created"""'], {}), "('Created')\n", (3088, 3099), False, 'from drf_yasg import openapi\n'), ((1561, 1583), 'crashreports.response_descriptions.default_desc', 'default_desc', (['NotFound'], {}), '(NotFound)\n', (1573, 1583), False, 'from crashreports.response_descriptions import default_desc\n'), ((1793, 1815), 'crashreports.response_descriptions.default_desc', 'default_desc', (['NotFound'], {}), '(NotFound)\n', (1805, 1815), False, 'from crashreports.response_descriptions import default_desc\n'), ((1817, 1846), 'crashreports.response_descriptions.default_desc', 'default_desc', (['ValidationError'], {}), '(ValidationError)\n', (1829, 1846), False, 'from crashreports.response_descriptions import default_desc\n'), ((2068, 2090), 'crashreports.response_descriptions.default_desc', 'default_desc', (['NotFound'], {}), '(NotFound)\n', (2080, 2090), False, 'from crashreports.response_descriptions import default_desc\n'), ((2092, 2121), 'crashreports.response_descriptions.default_desc', 'default_desc', (['ValidationError'], {}), '(ValidationError)\n', (2104, 2121), False, 'from crashreports.response_descriptions import default_desc\n'), ((2334, 2356), 'crashreports.response_descriptions.default_desc', 'default_desc', (['NotFound'], {}), '(NotFound)\n', (2346, 2356), False, 'from crashreports.response_descriptions import default_desc\n')] |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'système'"""
from primaires.interpreteur.commande.commande import Commande
from secondaires.systeme.contextes.systeme import Systeme
class CmdSysteme(Commande):
"""Commande 'système'.
"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "systeme", "system")
self.groupe = "administrateur"
self.schema = ""
self.aide_courte = "intègre une console interactive Python"
self.aide_longue = \
"Cette commande ouvre une console virtuelle Python. " \
"Elle permet d'entrer du code directement, comme dans " \
"un interpréteur Python. |att|Soyez excessivement prudent " \
"quant aux manipulations effectuées et aux informations " \
"que vous envoyez. Souvenez-vous qu'elles transitent " \
"par un protocole non sécurisé.|ff| N'utilisez cette " \
"commande qu'à des fins de debug."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
# On récupère la configuration des droits du module système
cfg_droits = type(self).importeur.systeme.cfg_droits
if not cfg_droits.cmd_systeme:
personnage << "|err|Cette commande a été désactivée.|ff|"
return
adresse_ip = list(cfg_droits.cmd_systeme_ip)
adresse_ip.insert(0, "127.0.0.1")
if not personnage.instance_connexion.adresse_ip in adresse_ip:
personnage << "|err|Cette adresse IP n'est pas autorisée.|ff|"
return
contexte = Systeme(personnage.instance_connexion)
personnage.contexte_actuel.migrer_contexte(contexte)
| [
"primaires.interpreteur.commande.commande.Commande.__init__",
"secondaires.systeme.contextes.systeme.Systeme"
] | [((1881, 1925), 'primaires.interpreteur.commande.commande.Commande.__init__', 'Commande.__init__', (['self', '"""systeme"""', '"""system"""'], {}), "(self, 'systeme', 'system')\n", (1898, 1925), False, 'from primaires.interpreteur.commande.commande import Commande\n'), ((3218, 3256), 'secondaires.systeme.contextes.systeme.Systeme', 'Systeme', (['personnage.instance_connexion'], {}), '(personnage.instance_connexion)\n', (3225, 3256), False, 'from secondaires.systeme.contextes.systeme import Systeme\n')] |
import math
import numpy as np
import os
import pickle
from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from .constants import MAX_MILLER, COVALENT_MATERIALS_MPIDS
class Bulk():
'''
This class handles all things with the bulk.
It also provides possible surfaces, later used to create a Surface object.
Attributes
----------
precomputed_structures : str
root dir of precomputed structures
bulk_atoms : Atoms
actual atoms of the bulk
mpid : str
mpid of the bulk
bulk_sampling_str : str
string capturing the bulk index and number of possible bulks
index_of_bulk_atoms : int
index of bulk in the db
n_elems : int
number of elements of the bulk
elem_sampling_str : str
string capturing n_elems and the max possible elements
Public methods
--------------
get_possible_surfaces()
returns a list of possible surfaces for this bulk instance
'''
def __init__(self, bulk_database, precomputed_structures=None, bulk_index=None, max_elems=3):
'''
Initializes the object by choosing or sampling from the bulk database
Args:
bulk_database: either a list of dict of bulks
precomputed_structures: Root directory of precomputed structures for
surface enumeration
bulk_index: index of bulk to select if not doing a random sample
max_elems: max number of elements for any bulk
'''
self.precomputed_structures = precomputed_structures
self.choose_bulk_pkl(bulk_database, bulk_index, max_elems)
def choose_bulk_pkl(self, bulk_db, bulk_index, max_elems):
'''
Chooses a bulk from our pkl file at random as long as the bulk contains
the specified number of elements in any composition.
Args:
bulk_db Unpickled dict or list of bulks
bulk_index Index of which bulk to select. If None, randomly sample one.
max_elems Max elems for any bulk structure. Currently it is 3 by default.
Sets as class attributes:
bulk_atoms `ase.Atoms` of the chosen bulk structure.
mpid A string indicating which MPID the bulk is
bulk_sampling_str A string to enumerate the sampled structure
index_of_bulk_atoms Index of the chosen bulk in the array (should match
bulk_index if provided)
'''
try:
if bulk_index is not None:
assert len(bulk_db) > max_elems, f'Bulk db only has {len(bulk_db)} entries. Did you pass in the correct bulk database?'
assert isinstance(bulk_db[bulk_index], tuple)
self.bulk_atoms, self.mpid, self.bulk_sampling_str, self.index_of_bulk_atoms = bulk_db[bulk_index]
self.n_elems = len(set(self.bulk_atoms.symbols)) # 1, 2, or 3
self.elem_sampling_str = f'{self.n_elems}/{max_elems}'
else:
self.sample_n_elems()
assert isinstance(bulk_db, dict), 'Did you pass in the correct bulk database?'
assert self.n_elems in bulk_db.keys(), f'Bulk db does not have bulks of {self.n_elems} elements'
assert isinstance(bulk_db[self.n_elems], list), 'Did you pass in the correct bulk database?'
total_elements_for_key = len(bulk_db[self.n_elems])
row_bulk_index = np.random.choice(total_elements_for_key)
self.bulk_atoms, self.mpid, self.bulk_sampling_str, self.index_of_bulk_atoms = bulk_db[self.n_elems][row_bulk_index]
except IndexError:
raise ValueError('Randomly chose to look for a %i-component material, '
'but no such materials exist. Please add one '
'to the database or change the weights to exclude '
'this number of components.'
% self.n_elems)
def sample_n_elems(self, n_cat_elems_weights={1: 0.05, 2: 0.65, 3: 0.3}):
'''
Chooses the number of species we should look for in this sample.
Arg:
n_cat_elems_weights A dictionary whose keys are integers containing the
number of species you want to consider and whose
values are the probabilities of selecting this
number. The probabilities must sum to 1.
Sets:
n_elems An integer showing how many species have been chosen.
elem_sampling_str Enum string of [chosen n_elems]/[total number of choices]
'''
possible_n_elems = list(n_cat_elems_weights.keys())
weights = list(n_cat_elems_weights.values())
assert math.isclose(sum(weights), 1)
self.n_elems = np.random.choice(possible_n_elems, p=weights)
self.elem_sampling_str = str(self.n_elems) + "/" + str(len(possible_n_elems))
def get_possible_surfaces(self):
'''
Returns a list of possible surfaces for this bulk instance.
This can be later used to iterate through all surfaces,
or select one at random, to make a Surface object.
'''
if self.precomputed_structures:
surfaces_info = self.read_from_precomputed_enumerations(self.index_of_bulk_atoms)
else:
surfaces_info = self.enumerate_surfaces()
return surfaces_info
def read_from_precomputed_enumerations(self, index):
'''
Loads relevant pickle of precomputed surfaces.
Args:
index: bulk index
Returns:
surfaces_info: a list of surface_info tuples (atoms, miller, shift, top)
'''
with open(os.path.join(self.precomputed_structures, str(index) + ".pkl"), "rb") as f:
surfaces_info = pickle.load(f)
return surfaces_info
def enumerate_surfaces(self, max_miller=MAX_MILLER):
'''
Enumerate all the symmetrically distinct surfaces of a bulk structure. It
will not enumerate surfaces with Miller indices above the `max_miller`
argument. Note that we also look at the bottoms of surfaces if they are
distinct from the top. If they are distinct, we flip the surface so the bottom
is pointing upwards.
Args:
bulk_atoms `ase.Atoms` object of the bulk you want to enumerate
surfaces from.
max_miller An integer indicating the maximum Miller index of the surfaces
you are willing to enumerate. Increasing this argument will
increase the number of surfaces, but the surfaces will
generally become larger.
Returns:
all_slabs_info A list of 4-tuples containing: `pymatgen.Structure`
objects for surfaces we have enumerated, the Miller
indices, floats for the shifts, and Booleans for "top".
'''
bulk_struct = self.standardize_bulk(self.bulk_atoms)
all_slabs_info = []
for millers in get_symmetrically_distinct_miller_indices(bulk_struct, MAX_MILLER):
slab_gen = SlabGenerator(initial_structure=bulk_struct,
miller_index=millers,
min_slab_size=7.,
min_vacuum_size=20.,
lll_reduce=False,
center_slab=True,
primitive=True,
max_normal_search=1)
slabs = slab_gen.get_slabs(tol=0.3,
bonds=None,
max_broken_bonds=0,
symmetrize=False)
# Additional filtering for the 2D materials' slabs
if self.mpid in COVALENT_MATERIALS_MPIDS:
slabs = [slab for slab in slabs if is_2D_slab_reasonsable(slab) is True]
# If the bottoms of the slabs are different than the tops, then we want
# to consider them, too
if len(slabs) != 0:
flipped_slabs_info = [(self.flip_struct(slab), millers, slab.shift, False)
for slab in slabs if self.is_structure_invertible(slab) is False]
# Concatenate all the results together
slabs_info = [(slab, millers, slab.shift, True) for slab in slabs]
all_slabs_info.extend(slabs_info + flipped_slabs_info)
return all_slabs_info
def is_2D_slab_reasonsable(self, struct):
'''
There are 400+ 2D bulk materials whose slabs generated by pymaten require
additional filtering: some slabs are cleaved where one or more surface atoms
have no bonds with other atoms on the slab.
Arg:
struct `pymatgen.Structure` object of a slab
Returns:
A boolean indicating whether or not the slab is
reasonable.
'''
for site in struct:
if len(struct.get_neighbors(site, 3)) == 0:
return False
return True
def standardize_bulk(self, atoms):
'''
There are many ways to define a bulk unit cell. If you change the unit cell
itself but also change the locations of the atoms within the unit cell, you
can get effectively the same bulk structure. To address this, there is a
standardization method used to reduce the degrees of freedom such that each
unit cell only has one "true" configuration. This function will align a
unit cell you give it to fit within this standardization.
Args:
atoms: `ase.Atoms` object of the bulk you want to standardize
Returns:
standardized_struct: `pymatgen.Structure` of the standardized bulk
'''
struct = AseAtomsAdaptor.get_structure(atoms)
sga = SpacegroupAnalyzer(struct, symprec=0.1)
standardized_struct = sga.get_conventional_standard_structure()
return standardized_struct
def flip_struct(self, struct):
'''
Flips an atoms object upside down. Normally used to flip surfaces.
Arg:
struct `pymatgen.Structure` object
Returns:
flipped_struct: The same `ase.Atoms` object that was fed as an
argument, but flipped upside down.
'''
atoms = AseAtomsAdaptor.get_atoms(struct)
# This is black magic wizardry to me. Good look figuring it out.
atoms.wrap()
atoms.rotate(180, 'x', rotate_cell=True, center='COM')
if atoms.cell[2][2] < 0.:
atoms.cell[2] = -atoms.cell[2]
if np.cross(atoms.cell[0], atoms.cell[1])[2] < 0.0:
atoms.cell[1] = -atoms.cell[1]
atoms.center()
atoms.wrap()
flipped_struct = AseAtomsAdaptor.get_structure(atoms)
return flipped_struct
def is_structure_invertible(self, structure):
'''
This function figures out whether or not an `pymatgen.Structure` object has
symmetricity. In this function, the affine matrix is a rotation matrix that
is multiplied with the XYZ positions of the crystal. If the z,z component
of that is negative, it means symmetry operation exist, it could be a
mirror operation, or one that involves multiple rotations/etc. Regardless,
it means that the top becomes the bottom and vice-versa, and the structure
is the symmetric. i.e. structure_XYZ = structure_XYZ*M.
In short: If this function returns `False`, then the input structure can
be flipped in the z-direction to create a new structure.
Arg:
structure: A `pymatgen.Structure` object.
Returns
A boolean indicating whether or not your `ase.Atoms` object is
symmetric in z-direction (i.e. symmetric with respect to x-y plane).
'''
# If any of the operations involve a transformation in the z-direction,
# then the structure is invertible.
sga = SpacegroupAnalyzer(structure, symprec=0.1)
for operation in sga.get_symmetry_operations():
xform_matrix = operation.affine_matrix
z_xform = xform_matrix[2, 2]
if z_xform == -1:
return True
return False
| [
"pymatgen.io.ase.AseAtomsAdaptor.get_atoms",
"numpy.cross",
"numpy.random.choice",
"pickle.load",
"pymatgen.core.surface.SlabGenerator",
"pymatgen.core.surface.get_symmetrically_distinct_miller_indices",
"pymatgen.symmetry.analyzer.SpacegroupAnalyzer",
"pymatgen.io.ase.AseAtomsAdaptor.get_structure"
] | [((5132, 5177), 'numpy.random.choice', 'np.random.choice', (['possible_n_elems'], {'p': 'weights'}), '(possible_n_elems, p=weights)\n', (5148, 5177), True, 'import numpy as np\n'), ((7442, 7508), 'pymatgen.core.surface.get_symmetrically_distinct_miller_indices', 'get_symmetrically_distinct_miller_indices', (['bulk_struct', 'MAX_MILLER'], {}), '(bulk_struct, MAX_MILLER)\n', (7483, 7508), False, 'from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices\n'), ((10323, 10359), 'pymatgen.io.ase.AseAtomsAdaptor.get_structure', 'AseAtomsAdaptor.get_structure', (['atoms'], {}), '(atoms)\n', (10352, 10359), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((10374, 10413), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['struct'], {'symprec': '(0.1)'}), '(struct, symprec=0.1)\n', (10392, 10413), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((10890, 10923), 'pymatgen.io.ase.AseAtomsAdaptor.get_atoms', 'AseAtomsAdaptor.get_atoms', (['struct'], {}), '(struct)\n', (10915, 10923), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((11332, 11368), 'pymatgen.io.ase.AseAtomsAdaptor.get_structure', 'AseAtomsAdaptor.get_structure', (['atoms'], {}), '(atoms)\n', (11361, 11368), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((12558, 12600), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure'], {'symprec': '(0.1)'}), '(structure, symprec=0.1)\n', (12576, 12600), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((6154, 6168), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6165, 6168), False, 'import pickle\n'), ((7533, 7722), 'pymatgen.core.surface.SlabGenerator', 'SlabGenerator', ([], {'initial_structure': 'bulk_struct', 'miller_index': 'millers', 'min_slab_size': '(7.0)', 'min_vacuum_size': '(20.0)', 'lll_reduce': '(False)', 'center_slab': '(True)', 'primitive': '(True)', 'max_normal_search': '(1)'}), '(initial_structure=bulk_struct, miller_index=millers,\n min_slab_size=7.0, min_vacuum_size=20.0, lll_reduce=False, center_slab=\n True, primitive=True, max_normal_search=1)\n', (7546, 7722), False, 'from pymatgen.core.surface import SlabGenerator, get_symmetrically_distinct_miller_indices\n'), ((3706, 3746), 'numpy.random.choice', 'np.random.choice', (['total_elements_for_key'], {}), '(total_elements_for_key)\n', (3722, 3746), True, 'import numpy as np\n'), ((11170, 11208), 'numpy.cross', 'np.cross', (['atoms.cell[0]', 'atoms.cell[1]'], {}), '(atoms.cell[0], atoms.cell[1])\n', (11178, 11208), True, 'import numpy as np\n')] |
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from core.models import Person, OfficeLocation, OrgGroup
import random
class Command(BaseCommand):
args = '<number_of_users>'
help = 'Creates random users for local testing'
def handle(self, *args, **options):
last_names = [
'Sherman', 'Maddox', 'Montgomery', 'Small', 'Larsen', 'Marsh', 'Gardner', 'White', 'Gill', 'Pennington', 'Stein', 'Kirby', 'Jennings', 'French', 'Glass', 'Velasquez', 'Doyle', 'York', 'Fisher', 'Strong', 'Henson', 'Harmon', 'Higgins', 'James', 'Hancock', 'Drake', 'Eaton', 'Gordon', 'Harrington', 'Blevins', 'Avila', 'Solis', 'Richmond', 'Stark', 'Haynes', 'Durham', 'Montoya', 'Barrett', 'Chase', 'Mckay', 'Little', 'Perry', 'Howard', 'Caldwell', 'West', 'Fox', 'Long', 'Wright', 'Foster', 'Sloan', 'Frazier', 'Lowe', 'Cabrera', 'Barron', 'Ayala', 'Frank', 'Hammond', 'Orr', 'Holloway', 'King', 'Rush', 'Wiley', 'Neal', 'Davis', 'Fulton', 'Webb', 'Sanchez', 'Strickland', 'Clark', 'Middleton', 'Moody', 'Owens', 'Graham', 'Cotton', 'Shaffer', 'Hawkins',
'Cooper', 'Justice', 'Clarke', 'Mcconnell', 'Mccarthy', 'Macdonald', 'Castillo', 'Gilbert', 'Horton', 'Finley', 'Beard', 'Sanders', 'Levy', 'Richard', 'Bowen', 'Grant', 'Wilkins', 'Ramsey', 'Lynch', 'Koch', 'Mercado', 'Roach', 'Bond', 'Lane', 'Tanner', 'Byers', 'Humphrey', 'Austin', 'Carney', 'Golden', 'Pope', 'Kramer', 'Ellison', 'Jefferson', 'Duffy', 'Gross', 'Mcmahon', 'Hudson', 'Mckee', 'Atkinson', 'Bush', 'Thompson', 'Faulkner', 'Christian', 'Ingram', 'Cannon', 'Gay', 'Nieves', 'Hodges', 'Langley', 'Watson', 'Woods', 'Gallagher', 'Delacruz', 'Stafford', 'Knight', 'Kerr', 'Chapman', 'Roman', 'Christensen', 'Robles', 'Mathews', 'Waller', 'Buckley', 'Myers', 'Powers', 'Lindsay', 'Gates', 'Miller', 'Johns', 'Morin', 'Fleming', 'Bishop', 'Clements']
first_names = [
'Sarah', 'Ian', 'Upton', 'Uriah', 'Hayden', 'Zia', 'Lila', 'Benjamin', 'Addison', 'Vivian', 'Kirby', 'Oscar', 'Demetrius', 'Hashim', 'Michelle', 'Odessa', 'Phillip', 'Michael', 'Dante', 'Omar', 'Dominic', 'Wing', 'Joshua', 'Charlotte', 'Thomas', 'Aquila', 'Rana', 'Jolene', 'Felix', 'Cailin', 'Tatiana', 'Oprah', 'Belle', 'Sydnee', 'Kuame', 'Fleur', 'Matthew', 'Sylvia', 'Mary', 'Deborah', 'Ross', 'Hyacinth', 'Jacqueline', 'Jessica', 'Callie', 'Ariana', 'Leo', 'Desiree', 'Lunea', 'Chava', 'Jorden', 'Rudyard', 'Cally', 'Knox', 'Arthur', 'Dana', 'Rebekah', 'Yen', 'Hadassah', 'Duncan', 'Ginger', 'Valentine', 'Ivana', 'Iona', 'Jemima', 'Dorothy', 'Joan', 'Timothy', 'Amity', 'Uriel', 'Skyler', 'Phelan', 'Alma', 'Hadley',
'Quemby', 'Sonya', 'Axel', 'Slade', 'Riley', 'Rajah', 'Giselle', 'Selma', 'Nadine', 'Pascale', 'Carol', 'Steel', 'Lane', 'Emi', 'Trevor', 'Wyatt', 'Claire', 'Harlan', 'Liberty', 'Alexandra', 'Avram', 'Barbara', 'Rashad', 'Holmes', 'Kenneth', 'Preston', 'Patience', 'Adele', 'Alfonso', 'Harrison', 'Julian', 'Jena', 'Peter', 'Kessie', 'Katell', 'Denton', 'Piper', 'Jerry', 'Teegan', 'Chandler', 'Walter', 'Cheryl', 'Desirae', 'Tasha', 'Hunter', 'Logan', 'Tatyana', 'Gail', 'Galvin', 'Dara', 'Athena', 'Kay', 'Dustin', 'Faith', 'Mariam', 'Leroy', 'Edan', 'Alexis', 'Nissim', 'Octavia', 'Kareem', 'Heidi', 'Aspen', 'Gregory', 'Garrison', 'Jolie', 'Gloria', 'Alec', 'Asher', 'Julie', 'Ayanna', 'Gavin', 'Germane', 'Bertha', 'Quinn', 'Tarik']
office_location = OfficeLocation.objects.get(pk='DC123')
for x in xrange(int(args[0])):
while True:
first_name = first_names[int(random.random() *
len(first_names))]
last_name = last_names[int(random.random() * len(last_names))]
username = last_name + first_name[0]
email = username + '@exmaple.com'
if not get_user_model().objects.filter(username=email).exists():
break
user_attr = {
'first_name': first_name,
'last_name': last_name,
'is_active': True,
'is_superuser': False,
'date_joined': '2012-03-01T16:03:14Z',
'password': '<PASSWORD>=',
'is_staff': True,
'email': email,
'username': email
}
user = get_user_model(**user_attr)
user.save()
person_attr = {
'office_location': office_location,
'allow_tagging': True,
'photo_file': 'avatars/default1.jpg',
'stub': username.replace('.', ''),
'office_phone': '5555555555',
'user': user,
'email_notifications': False,
'org_group': OrgGroup.objects.order_by('?')[0]
}
person = Person(**person_attr)
person.save()
self.stdout.write(
'Successfully created %d users and persons' % int(args[0]))
| [
"django.contrib.auth.get_user_model",
"core.models.OfficeLocation.objects.get",
"core.models.Person",
"random.random",
"core.models.OrgGroup.objects.order_by"
] | [((3422, 3460), 'core.models.OfficeLocation.objects.get', 'OfficeLocation.objects.get', ([], {'pk': '"""DC123"""'}), "(pk='DC123')\n", (3448, 3460), False, 'from core.models import Person, OfficeLocation, OrgGroup\n'), ((4356, 4383), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '(**user_attr)\n', (4370, 4383), False, 'from django.contrib.auth import get_user_model\n'), ((4854, 4875), 'core.models.Person', 'Person', ([], {}), '(**person_attr)\n', (4860, 4875), False, 'from core.models import Person, OfficeLocation, OrgGroup\n'), ((4784, 4814), 'core.models.OrgGroup.objects.order_by', 'OrgGroup.objects.order_by', (['"""?"""'], {}), "('?')\n", (4809, 4814), False, 'from core.models import Person, OfficeLocation, OrgGroup\n'), ((3570, 3585), 'random.random', 'random.random', ([], {}), '()\n', (3583, 3585), False, 'import random\n'), ((3695, 3710), 'random.random', 'random.random', ([], {}), '()\n', (3708, 3710), False, 'import random\n'), ((3857, 3873), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (3871, 3873), False, 'from django.contrib.auth import get_user_model\n')] |