code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
from telegram.ext import Updater, MessageHandler, CommandHandler, Filters
import os
import subprocess
import logging
import random
import numpy
import datetime
import time
import atexit
logging.basicConfig(
filename='spam.log',
level=logging.INFO,
format='[%(asctime)s] [%(levelname)s] %(message)s'
)
# telegram API-key
token = os.environ['TG_TOKEN']
######## globaalit variablet ########
catIsHungry = False
complainedRecently = True
fedTime = time.time() # alussa ruokittu
def start(bot, update):
chat_id = update.message.chat.id
bot.sendMessage(chat_id,
'Heissulivei! Tämä on PehmoleluBotti, jolla voi esim. ruokkia kisulia.')
def exit_handler():
print("Sammutetaan...")
def catFed(): # retrievaa kuinka monta kertaa on syötetty
try:
fedFile = open('fedLog.txt', 'r+')
fedFile.seek(0) # aina streamin alkuun, sillä spaghettikoodi
newestFed = int(fedFile.readlines()[-1].split()[1]) # spaghettikoodi
fedFile.close() # turvallinen save
except: # creates the log file with feed_cat()
newestFed = 0
return newestFed
def cat_hungry(bot, update):
print(update) # log that stuff
chat_id = update.message.chat.id
global catIsHungry
if catIsHungry:
ran = ['Kisuli on nälkäinen! *miaaaaau*',
'Kisuli on nälkäinen. *murrr*',
'Kisuli tahtoo ruokaa /ᐠ。‸。ᐟ\\']
bot.send_photo(chat_id, photo=open('Images/tahtooNamusia.jpg', 'rb'))
else:
ran = ["Kummasti kisuli ei ole nälkäinen!",
"Kisulilla ei maha murise!",
"Kummasti kisuli ei ole nälkäinen!"]
bot.sendSticker(chat_id, sticker_map.get('kisuli'))
bot.sendMessage(chat_id, random.choice(ran))
def cat_hungry_random(): # random aika
# return random.randint(3*60*60, 6*60*60) # 3h<t<6h
return numpy.random.exponential(1.5)*4*60*60
def eaten_recently():
print('cat has eaten_recently')
global fedTime
if fedTime - time.time() > -(2*60*60): # jos kulunut alle 2h
return True
else:
return False
def cat_gets_hungry(bot=None, job=None): # muuttaa vaan variablen
chat_id = [-1001291373279, -1001131311658]
global catIsHungry, updater
catIsHungry = True
if not eaten_recently():
for id in chat_id:
updater.dispatcher.bot.send_photo(id, photo=open('Images/murr.jpg', 'rb'))
ran = ['*MIAAAAAAAU* Kisuli ei ole syönyt johonkin aikaan, kisulilla on nälkä!',
'*murrrrrrrr* Kisulilla on nälkä /ᐠ。‸。ᐟ\\',
'Kisuli tahtoo ruokaa /ᐠ。‸。ᐟ\\']
else:
for id in chat_id:
updater.dispatcher.bot.send_photo(id, photo=open('Images/tahtooNamusia.jpg', 'rb'))
ran = ['Kisuli on nälkäinen! *miaaaaau*',
'Kisuli on nälkäinen. *murrr*',
'Kisuli tahtoo ruokaa /ᐠ。‸。ᐟ\\']
for id in chat_id:
updater.dispatcher.bot.sendMessage(id, random.choice(ran))
jq.run_once(cat_gets_hungry, when=cat_hungry_random()) # stack the new time for feeding
def feed_cat(bot, update):
chat_id = update.message.chat.id
global catIsHungry, fedTime, complainedRecently
fedTime = time.time()
complainedRecently = False
if not catIsHungry:
ran = ['Kisuli ei ole nälkäinen, mutta ottaa silti namun =^._.^=',
'Kisuli popsii namun, vaikka ei ole nälkäinen.',
'Kisulilla ei ole näläkä; se ei estä syömästä namua.']
else: # on hungry
catIsHungry = False
ran = ['Kisuli syö namun. Ei ole nyt ainakaan nälkäinen!',
'Kisuli syö iloisesti nälkäänsä. *miaaaaau*',
'Kisuli popsii namun ahkerasti!']
add_point(bot, update) # lisää piste jos nälkäsyöttö
nowFed = catFed() + 1
fedFile = open('fedLog.txt', 'a+')
fedFile.write(str(time.time()) + " " + str(nowFed) + "\n")
fedFile.close()
bot.sendSticker(chat_id, sticker_map.get('kisuli'))
bot.sendMessage(chat_id, random.choice(ran))
def cat_fed_times(bot, update):
chat_id = update.message.chat.id
fedTeksti = 'Kisulille on annettu namuja ' + str(catFed()) + ' =^._.^='
bot.sendMessage(chat_id, fedTeksti)
def handle_message(bot, update):
chat_id = update.message.chat.id
if update.message.text:
words = update.message.text.split()
words = list(map(lambda x: x.lower(), words))
global catIsHungry, complainedRecently
commonWords = list(set(words).intersection(foodWords))
if commonWords: # tee tämä hienommaksi
'''
global catIsHungry
catIsHungry = True '''
ruokaSana = random.choice(commonWords)
'''
ran = ['Kisuli kuulee sanan ' + ruokaSana + ', hän on nyt nälkäinen.',
'Kisuli tuli nälkäiseksi kuullessaan sanan ' + ruokaSana,
'Kisulille tuli näläkä kuultuaan sanan ' + ruokaSana] '''
ran = ['Kisuli kuulee sanan ' + ruokaSana + ', hän melkein tuli nälkäiseksi.',
'Kisuli ei aivan nälkääntynyt kuullessaan sanan ' + ruokaSana,
'Kisulille tuli melkein näläkä kuultuaan sanan ' + ruokaSana]
bot.sendMessage(chat_id, random.choice(ran))
elif (not eaten_recently()) and catIsHungry and (not complainedRecently):
complainedRecently = True
bot.send_photo(chat_id, photo=open('Images/murr.jpg', 'rb'))
ran = ['*MIAAAAAAAU* Kisuli ei ole syönyt johonkin aikaan, kisulilla on nälkä!',
'*murrrrrrrr* Kisulilla on nälkä /ᐠ。‸。ᐟ\\',
'Kisuli tahtoo ruokaa /ᐠ。‸。ᐟ\\']
bot.sendMessage(chat_id, random.choice(ran))
if "forceupdateplot" in words: # debuggaus
update_plot()
# tutkitaan jokaiselle sanalle löytyykö matchia
for hotword, sticker in sticker_map.items():
# lähettää stickkerin matchaaviin sanoihin
if hotword in words:
bot.sendSticker(chat_id, sticker)
def show_plot(bot, update):
chat_id = update.message.chat.id
bot.send_photo(chat_id, photo=open('plotti.png', 'rb'))
def show_leaderboards(bot, update):
chat_id = update.message.chat.id
bot.send_photo(chat_id, photo=open('leaderboards.png', 'rb'))
def add_point(bot, update):
pointsFile = open('leaderboardsLog.txt', 'a+')
pointsFile.write(str(time.time()) + " " + str(update.message.from_user.username) + "\n")
pointsFile.close()
"""
def wappu(bot, update):
chat_id = update.message.chat.id
bot.send_photo(chat_id, photo=open('Images/wappu.png', 'rb'))
"""
# stickerit listana
sticker_map = {
'husky-strawberry': 'CAADBAADHQADlS56CMNshytcGo3hAg',
'winston': 'CAADBAADIQADlS56CKuKJ27vuhaPAg',
'winstonjoulu': 'CAADBAADQAADlS56CC-y3uHoyBk9Ag',
'pusheenwinston': 'CAADBAADKAADlS56CLUIxcv8o91KAg',
'penelope': 'CAADBAADKgADlS56CJ0fGrSNoQ_mAg',
'pingviinigang': 'CAADBAADKwADlS56CLo6nNJF-9kuAg',
'inttinalle': 'CAADBAADKQADlS56CE8pASMtZ-jqAg',
'kisuli': 'CAADBAADXwADlS56CL7r1G64m5GQAg',
'pingviini': 'CAADBAADYAADlS56CNYIEUXgh5upAg',
'miisa': 'CAADBAADYgADlS56CKRNpGh4NEIKAg',
'kaarleppi': 'CAADBAADHgADlS56CIVOivZh0GgWAg',
'мишка': 'CAADBAADYwADlS56CGH_gl4AAXE1SAI',
}
def update_plot(bot=None, job=None):
subprocess.call("./runPlot.run", shell=True) # selvitä onko turvallisempaa tapaa
def not_complained_recently(bot=None, job=None):
global complainedRecently
complainedRecently = False
def show_leaderboards_daily(bot=None, job=None):
chat_id = [-1001291373279, -1001131311658]
for id in chat_id:
bot.send_photo(chat_id, photo=open('leaderboards.png', 'rb'))
# ruokasanat kerralla muistiin
foodWords = [line.rstrip('\n') for line in open("ruokasanat.txt", "r")]
updater = Updater(token)
# Taustalla menevät prosessit job queuella
jq = updater.job_queue
# jq.run_repeating(cat_gets_hungry, interval=cat_hungry_random(), first=cat_hungry_random())
jq.run_once(cat_gets_hungry, when=cat_hungry_random()) # first run
jq.run_repeating(not_complained_recently, interval=(0.5*60*60), first=0)
jq.run_repeating(update_plot, interval=(30*60), first=0)
jq.run_daily(show_leaderboards_daily, datetime.time(0)) # keskiöittäin
# Telegram komennot käytäntöön
updater.dispatcher.add_handler(CommandHandler('start', start))
#updater.dispatcher.add_handler(CommandHandler('wappu', wappu))
updater.dispatcher.add_handler(CommandHandler('kisulinalka', cat_hungry))
updater.dispatcher.add_handler(CommandHandler('syotakisuli', feed_cat))
updater.dispatcher.add_handler(CommandHandler('syottokerrat', show_plot))
updater.dispatcher.add_handler(CommandHandler('leaderboards', show_leaderboards))
updater.dispatcher.add_handler(MessageHandler(Filters.all, handle_message))
updater.start_polling()
updater.idle()
# yritä sulkea fedFile sulkiessa
atexit.register(exit_handler)
| [
"logging.basicConfig",
"random.choice",
"datetime.time",
"atexit.register",
"numpy.random.exponential",
"subprocess.call",
"telegram.ext.MessageHandler",
"time.time",
"telegram.ext.Updater",
"telegram.ext.CommandHandler"
] | [((211, 328), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""spam.log"""', 'level': 'logging.INFO', 'format': '"""[%(asctime)s] [%(levelname)s] %(message)s"""'}), "(filename='spam.log', level=logging.INFO, format=\n '[%(asctime)s] [%(levelname)s] %(message)s')\n", (230, 328), False, 'import logging\n'), ((487, 498), 'time.time', 'time.time', ([], {}), '()\n', (496, 498), False, 'import time\n'), ((7896, 7910), 'telegram.ext.Updater', 'Updater', (['token'], {}), '(token)\n', (7903, 7910), False, 'from telegram.ext import Updater, MessageHandler, CommandHandler, Filters\n'), ((8958, 8987), 'atexit.register', 'atexit.register', (['exit_handler'], {}), '(exit_handler)\n', (8973, 8987), False, 'import atexit\n'), ((3242, 3253), 'time.time', 'time.time', ([], {}), '()\n', (3251, 3253), False, 'import time\n'), ((7395, 7439), 'subprocess.call', 'subprocess.call', (['"""./runPlot.run"""'], {'shell': '(True)'}), "('./runPlot.run', shell=True)\n", (7410, 7439), False, 'import subprocess\n'), ((8312, 8328), 'datetime.time', 'datetime.time', (['(0)'], {}), '(0)\n', (8325, 8328), False, 'import datetime\n'), ((8411, 8441), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""start"""', 'start'], {}), "('start', start)\n", (8425, 8441), False, 'from telegram.ext import Updater, MessageHandler, CommandHandler, Filters\n'), ((8538, 8579), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""kisulinalka"""', 'cat_hungry'], {}), "('kisulinalka', cat_hungry)\n", (8552, 8579), False, 'from telegram.ext import Updater, MessageHandler, CommandHandler, Filters\n'), ((8612, 8651), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""syotakisuli"""', 'feed_cat'], {}), "('syotakisuli', feed_cat)\n", (8626, 8651), False, 'from telegram.ext import Updater, MessageHandler, CommandHandler, Filters\n'), ((8684, 8725), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""syottokerrat"""', 'show_plot'], {}), "('syottokerrat', show_plot)\n", (8698, 8725), False, 'from telegram.ext import Updater, MessageHandler, CommandHandler, Filters\n'), ((8758, 8807), 'telegram.ext.CommandHandler', 'CommandHandler', (['"""leaderboards"""', 'show_leaderboards'], {}), "('leaderboards', show_leaderboards)\n", (8772, 8807), False, 'from telegram.ext import Updater, MessageHandler, CommandHandler, Filters\n'), ((8840, 8883), 'telegram.ext.MessageHandler', 'MessageHandler', (['Filters.all', 'handle_message'], {}), '(Filters.all, handle_message)\n', (8854, 8883), False, 'from telegram.ext import Updater, MessageHandler, CommandHandler, Filters\n'), ((1772, 1790), 'random.choice', 'random.choice', (['ran'], {}), '(ran)\n', (1785, 1790), False, 'import random\n'), ((4041, 4059), 'random.choice', 'random.choice', (['ran'], {}), '(ran)\n', (4054, 4059), False, 'import random\n'), ((2035, 2046), 'time.time', 'time.time', ([], {}), '()\n', (2044, 2046), False, 'import time\n'), ((2997, 3015), 'random.choice', 'random.choice', (['ran'], {}), '(ran)\n', (3010, 3015), False, 'import random\n'), ((4712, 4738), 'random.choice', 'random.choice', (['commonWords'], {}), '(commonWords)\n', (4725, 4738), False, 'import random\n'), ((1901, 1930), 'numpy.random.exponential', 'numpy.random.exponential', (['(1.5)'], {}), '(1.5)\n', (1925, 1930), False, 'import numpy\n'), ((5286, 5304), 'random.choice', 'random.choice', (['ran'], {}), '(ran)\n', (5299, 5304), False, 'import random\n'), ((5746, 5764), 'random.choice', 'random.choice', (['ran'], {}), '(ran)\n', (5759, 5764), False, 'import random\n'), ((3895, 3906), 'time.time', 'time.time', ([], {}), '()\n', (3904, 3906), False, 'import time\n'), ((6466, 6477), 'time.time', 'time.time', ([], {}), '()\n', (6475, 6477), False, 'import time\n')] |
import constants
import gen
import vis
import math
import cv2
from cv2 import xphoto
import numpy as np
from glob import glob
# makes output match image, but internal processing is mirrored
FLIP = True
RES = 1080
# Options: None, ROTATE_TEE, ROTATE_BASE, TEE, BASE
# Warping beyond rotation (TEE, BASE) is currently broken
WARP_METHOD = "ROTATE_TEE"
R_ADJ = 0.8 # color does not extend to the edge of the rock
R_THR = 1.15 # rock radius permissibility
R_FILL = 0.60 # minimum proportion of color filling rock radius
# target mark color
TGT0 = np.array([50,220,20])
TGT1 = np.array([60,255,50])
BLU0 = np.array([85,30,30])
BLU1 = np.array([105,255,255])
YEL0 = np.array([20,70,130])
YEL1 = np.array([45,255,250])
RED1_0 = np.array([0,90,100])
RED1_1 = np.array([20,255,220])
RED2_0 = np.array([175,90,100])
RED2_1 = np.array([180,255,220])
def find_tee(img, DEBUG=False):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# FIND TEE
mask_blue = cv2.inRange(hsv, BLU0, BLU1)
res_blue = cv2.bitwise_and(hsv,hsv, mask=mask_blue)
gray_blue = cv2.cvtColor(res_blue, cv2.COLOR_BGR2GRAY)
_,thresh_blue = cv2.threshold(gray_blue,10,255,cv2.THRESH_BINARY)
bdbg = cv2.cvtColor(thresh_blue, cv2.COLOR_GRAY2BGR)
bcircs = cv2.HoughCircles(thresh_blue,cv2.HOUGH_GRADIENT,3.1,RES/8,param1=100,param2=120,minRadius=RES//4,maxRadius=RES//2)[0]
bcircs = [list([c for c in h]) for h in bcircs]
r12 = 0.0
tee = None
for c in bcircs:
x, y, radius = c
center = (x, y)
if x > RES/3 and x < 2*RES/3 and y > RES/2:
if tee is None or radius > r12 and abs(RES/2-x) < abs(RES/2-tee[0]):
r12 = radius
tee = center
if DEBUG:
c = [int(f) for f in center]
r = int(radius)
cv2.circle(bdbg, c, r, (240, 240, 0), 2)
if tee is None:
return None
if DEBUG:
c = [int(f) for f in tee]
r = int(r12)
cv2.circle(bdbg, c, r, (255, 0, 0), 5)
return bdbg, tee, r12
# WARP IMAGE SO SHEET SIDES ARE VERTICAL
# 1: filter image to highlight sheet vs non-sheet
# 2: detect lines indicating sides of sheet. Average left and right groups.
# 3: find the bisecting angle of these lines to find the required rotation
# 4: calculate warp matrix to rotate + fix perspective
def warp(img, tee=None, method=None, DEBUG=False):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_,thresh_gray = cv2.threshold(gray,100,255,cv2.THRESH_BINARY)
blur_gray = cv2.blur(thresh_gray, (3,3))
canny = cv2.Canny(blur_gray, 100, 200)
canny_blur = cv2.blur(canny, (5, 5))
warped = img.copy()
ldbg = cv2.cvtColor(thresh_gray, cv2.COLOR_GRAY2BGR)
lb = 0
rb = RES
lines = cv2.HoughLinesP(canny_blur,1,0.01,10,minLineLength=RES/2)
lines = [l[0] for l in lines]
MAXSKEW=RES/20
sidelines = list()
if lines is not None:
for line in lines:
x1,y1,x2,y2 = line
if abs(x2-x1) < MAXSKEW:
sidelines.append(line)
llines = list()
rlines = list()
for line in sidelines:
x1,y1,x2,y2=line
if (x1+x2)/2 < RES /2:
llines.append(line)
else:
rlines.append(line)
if len(llines) > 0 and len(rlines) > 0:
lbound = np.mean(llines,axis=0)
rbound = np.mean(rlines,axis=0)
lm = (lbound[2]-lbound[0])/(lbound[3]-lbound[1])
rm = (rbound[2]-rbound[0])/(rbound[3]-rbound[1])
la = math.atan(lm)
ra = math.atan(rm)
theta = la + ra
def lbfy(y):
b = lbound[0] - (lm*lbound[1])
return (lm*y)+b
def rbfy(y):
b = rbound[0] - (rm*rbound[1])
return (rm*y)+b
def rotate(pt, center):
x, y = pt
cx, cy = center
sx, sy = x - cx, y - cy
rx = sx*math.cos(theta) - sy*math.sin(theta) + cx
ry = sx*math.sin(theta) + sy*math.cos(theta) + cy
return [rx, ry]
ini = None
fin = None
if method is None or method == "NONE":
lb = min((lbfy(0), lbfy(RES)))
rb = max((rbfy(0), rbfy(RES)))
elif method == "ROTATE_BASE":
l0 = [lbfy(0), 0]
r0 = [rbfy(0), 0]
c = [(l0[0] + r0[0])/2, 0]
l1 = [lbfy(RES), RES]
r1 = [rbfy(RES), RES]
l0d = rotate(l0, c)
r0d = rotate(r0, c)
l1d = rotate(l1, c)
r1d = rotate(r1, c)
ini = np.float32([l0, l1, r0, r1])
fin = np.float32([l0d, l1d, r0d, r1d])
elif method == "ROTATE_TEE":
if tee is None:
raise Exception("tee required if using method ROTATE_TEE")
l0 = [lbfy(tee[1]), tee[1]]
r0 = [rbfy(tee[1]), tee[1]]
l1 = [lbfy(RES), RES]
r1 = [rbfy(RES), RES]
l0d = rotate(l0, tee)
r0d = rotate(r0, tee)
l1d = rotate(l1, tee)
r1d = rotate(r1, tee)
ini = np.float32([l0, l1, r0, r1])
fin = np.float32([l0d, l1d, r0d, r1d])
elif method == "BASE":
l0 = [lbfy(0), 0]
r0 = [rbfy(0), 0]
c = [(l0[0] + r0[0])/2, 0]
l1 = [lbfy(RES), RES]
r1 = [rbfy(RES), RES]
l0d = rotate(l0, c)
r0d = rotate(r0, c)
l1d = [l0d[0], RES]
r1d = [r0d[0], RES]
ini = np.float32([l0, l1, r0, r1])
fin = np.float32([l0d, l1d, r0d, r1d])
elif method == "TEE":
if tee is None:
raise Exception("tee required if using method TEE")
l0 = [lbfy(tee[1]), tee[1]]
r0 = [rbfy(tee[1]), tee[1]]
l1 = [lbfy(RES), RES]
r1 = [rbfy(RES), RES]
l0d = rotate(l0, tee)
r0d = rotate(r0, tee)
l1d = [l0d[0], RES]
r1d = [r0d[0], RES]
ini = np.float32([l0, l1, r0, r1])
fin = np.float32([l0d, l1d, r0d, r1d])
if method is not None or method != "NONE":
warp_matrix = cv2.getPerspectiveTransform(ini, fin)
warped = cv2.warpPerspective(img,warp_matrix,(RES,RES))
lb = int(l0d[0])
rb = int(r0d[0])
if DEBUG:
lx1,ly1,lx2,ly2=[int(v) for v in lbound]
rx1,ry1,rx2,ry2=[int(v) for v in rbound]
cv2.line(ldbg, (lx1,ly1), (lx2,ly2), (0,0,255),2)
cv2.line(ldbg, (rx1,ry1), (rx2,ry2), (0,0,255),2)
if DEBUG:
cv2.line(ldbg,(lb,0),(lb,RES),(0,255,0))
cv2.line(ldbg,(rb,0),(rb,RES),(0,255,0))
cv2.line(ldbg,(lb,0),(lb,RES),(0,255,0))
cv2.line(ldbg,(rb,0),(rb,RES),(0,255,0))
return warped, ldbg, lb, rb
def find_target(img, tee, scale, lb, rb, DEBUG=False):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask_green = cv2.inRange(img, TGT0, TGT1)
res_green = cv2.bitwise_and(img,img, mask=mask_green)
gray_green = cv2.cvtColor(res_green, cv2.COLOR_BGR2GRAY)
_,thresh_green = cv2.threshold(gray_green,10,255,cv2.THRESH_BINARY)
tdbg = cv2.cvtColor(thresh_green, cv2.COLOR_GRAY2BGR)
cnt_g,_ = cv2.findContours(thresh_green,cv2.RETR_TREE,cv2.CHAIN_APPROX_TC89_KCOS)
gcs = list()
for c in cnt_g:
center, radius = cv2.minEnclosingCircle(c)
(x, y) = center
if radius < RES/50 and x > lb and x < rb:
area = cv2.contourArea(c)
if area > .8 * math.pi * pow(radius, 2):
gcs.append(np.subtract(tee, center) * scale)
if DEBUG:
ic = [int(f) for f in center]
r = int(radius)
cv2.circle(tdbg, ic, r, (0, 240, 240), 1)
if len(gcs) >= 1:
target = gcs[-1]
else:
target = None
return tdbg, target
def find_rocks(img, tee, scale, lb, rb, DEBUG=False):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask_yellow = cv2.inRange(hsv, YEL0, YEL1)
mask_red1 = cv2.inRange(hsv, RED1_0, RED1_1)
mask_red2 = cv2.inRange(hsv, RED2_0, RED2_1)
mask_red = mask_red1 + mask_red2
res_yellow = cv2.bitwise_and(hsv,hsv, mask=mask_yellow)
res_red = cv2.bitwise_and(hsv,hsv, mask=mask_red)
gray_yellow = cv2.cvtColor(res_yellow, cv2.COLOR_BGR2GRAY)
gray_red = cv2.cvtColor(res_red, cv2.COLOR_BGR2GRAY)
blur_yellow = cv2.blur(gray_yellow, (3,3))
blur_red = cv2.blur(gray_red, (3,3))
_,thresh_yellow = cv2.threshold(blur_yellow,10,255,cv2.THRESH_BINARY)
_,thresh_red = cv2.threshold(blur_red,10,255,cv2.THRESH_BINARY)
rdbg = cv2.cvtColor(thresh_yellow + thresh_red, cv2.COLOR_GRAY2BGR)
cnt_y,_ = cv2.findContours(thresh_yellow,cv2.RETR_TREE,cv2.CHAIN_APPROX_TC89_KCOS)
cnt_r,_ = cv2.findContours(thresh_red,cv2.RETR_TREE,cv2.CHAIN_APPROX_TC89_KCOS)
ycs = list()
rcs = list()
rmin = (constants.R_ROCK / scale) * R_ADJ / R_THR
rmax = (constants.R_ROCK / scale) * R_ADJ * R_THR
for c in cnt_y:
center, radius = cv2.minEnclosingCircle(c)
(x, y) = center
if radius > rmin and radius < rmax and x > lb and x < rb:
area = cv2.contourArea(c)
if area > R_FILL * math.pi * pow(radius, 2):
coords = np.subtract(tee, center) * scale
if coords[1] > -12.0 and coords[1] < 21.0:
ycs.append(coords)
if DEBUG:
ic = [int(f) for f in center]
r = int(radius)
cv2.circle(rdbg, ic, r, (0, 240, 240), 5)
for c in cnt_r:
center, radius = cv2.minEnclosingCircle(c)
(x, y) = center
if radius > rmin and radius < rmax and x > lb and x < rb:
area = cv2.contourArea(c)
if area > R_FILL * math.pi * pow(radius, 2):
coords = np.subtract(tee, center) * scale
if coords[1] > -12.0 and coords[1] < 21.0:
rcs.append(coords)
if DEBUG:
ic = [int(f) for f in center]
r = int(radius)
cv2.circle(rdbg, ic, r, (0, 0, 255), 5)
return rdbg, ycs, rcs
def process_sheet(img, DEBUG=False):
# FIND TEE
bdbg, tee, r12 = find_tee(img, DEBUG=DEBUG)
scale = constants.TWELVE / r12
# WARP SHEET
warped,ldbg,lb,rb = warp(img, tee=tee, method=WARP_METHOD, DEBUG=DEBUG)
gdbg = cv2.cvtColor(cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
# ADJUST TEE
#bdbg, tee, r12 = find_tee(warped)
#scale = constants.TWELVE / r12
# COLOR CORRECT
wb = cv2.xphoto.createSimpleWB()
cc = wb.balanceWhite(warped)
# LOCATE TARGET MARKING
tdbg, target = find_target(cc, tee, scale, lb, rb, DEBUG=DEBUG)
# LOCATE ROCKS
rdbg, ycs, rcs = find_rocks(cc, tee, scale, lb, rb, DEBUG=DEBUG)
if DEBUG:
c = [int(f) for f in tee]
r = int(r12)
cv2.circle(gdbg, c, r, (255, 0, 0), 5)
cv2.line(gdbg,(lb,0),(lb,RES),(0,255,0), 2)
cv2.line(gdbg,(rb,0),(rb,RES),(0,255,0), 2)
cv2.line(gdbg,(lb,0),(lb,RES),(0,255,0), 2)
cv2.line(gdbg,(rb,0),(rb,RES),(0,255,0), 2)
if target is not None:
tc = [int(f) for f in (-target/scale + tee)]
cv2.circle(gdbg, tc, int(5), (0, 255, 0), 5)
for c in ycs:
c = [int(f) for f in (-c/scale + tee)]
cv2.circle(gdbg, c, int(constants.R_ROCK/scale), (0, 240, 240), 5)
for c in rcs:
c = [int(f) for f in (-c/scale + tee)]
cv2.circle(gdbg, c, int(constants.R_ROCK/scale), (0, 0, 255), 5)
#cv2.imshow('ldbg {}'.format(id(img)), ldbg)
#cv2.imshow('bdbg {}'.format(id(img)), bdbg)
#cv2.imshow('tdbg {}'.format(id(img)), tdbg)
#cv2.imshow('rdbg {}'.format(id(img)), rdbg)
cv2.imshow('debug {}'.format(id(img)), gdbg)
cv2.waitKey(0)
return ycs, rcs, target
def get_sheets(DEBUG=False):
urls = glob('imgs/*.png')
imgs = [cv2.imread(url) for url in urls]
sheets = list()
errors = dict()
for img, url in zip(imgs, urls):
y, x, c = img.shape
# convert to RESxRES square, center cropped with full height
if x > y:
mgn = (x-y)//2
img = img[:,mgn:-mgn,:]
img = cv2.resize(img, (RES, RES), interpolation=cv2.INTER_AREA)
#cv2.imshow('resized', img)
#cv2.waitKey(0)
if FLIP:
img = cv2.flip(img, 1)
hit = None
if url[-5] == 'H':
hit = 1
elif url[-5] == 'M':
hit = 0
if hit is not None:
ycs, rcs, target = process_sheet(img, DEBUG=DEBUG)
if target is not None:
if len(ycs + rcs) > 0:
sheets.append((ycs + rcs, target, hit))
else:
if DEBUG:
print("ERROR: no rocks found in", url)
errors[url] = "no rocks"
else:
if DEBUG:
print("ERROR: no target found in", url)
errors[url] = "no target"
else:
if DEBUG:
print("ERROR: no result specified for", url)
errors[url] = "no hit"
print(len(errors), "errors")
print(errors)
data = list()
for sheet, target, hit in sheets:
throw = gen.sheet_to_data(sheet)
throw.update({"x": target[0], "y": target[1], "hit": hit})
data.append(throw)
return data
| [
"math.cos",
"numpy.array",
"cv2.warpPerspective",
"cv2.xphoto.createSimpleWB",
"math.atan",
"numpy.mean",
"cv2.threshold",
"cv2.line",
"cv2.HoughCircles",
"numpy.subtract",
"cv2.contourArea",
"cv2.waitKey",
"glob.glob",
"cv2.blur",
"cv2.getPerspectiveTransform",
"gen.sheet_to_data",
... | [((548, 571), 'numpy.array', 'np.array', (['[50, 220, 20]'], {}), '([50, 220, 20])\n', (556, 571), True, 'import numpy as np\n'), ((577, 600), 'numpy.array', 'np.array', (['[60, 255, 50]'], {}), '([60, 255, 50])\n', (585, 600), True, 'import numpy as np\n'), ((607, 629), 'numpy.array', 'np.array', (['[85, 30, 30]'], {}), '([85, 30, 30])\n', (615, 629), True, 'import numpy as np\n'), ((635, 660), 'numpy.array', 'np.array', (['[105, 255, 255]'], {}), '([105, 255, 255])\n', (643, 660), True, 'import numpy as np\n'), ((666, 689), 'numpy.array', 'np.array', (['[20, 70, 130]'], {}), '([20, 70, 130])\n', (674, 689), True, 'import numpy as np\n'), ((695, 719), 'numpy.array', 'np.array', (['[45, 255, 250]'], {}), '([45, 255, 250])\n', (703, 719), True, 'import numpy as np\n'), ((727, 749), 'numpy.array', 'np.array', (['[0, 90, 100]'], {}), '([0, 90, 100])\n', (735, 749), True, 'import numpy as np\n'), ((757, 781), 'numpy.array', 'np.array', (['[20, 255, 220]'], {}), '([20, 255, 220])\n', (765, 781), True, 'import numpy as np\n'), ((789, 813), 'numpy.array', 'np.array', (['[175, 90, 100]'], {}), '([175, 90, 100])\n', (797, 813), True, 'import numpy as np\n'), ((821, 846), 'numpy.array', 'np.array', (['[180, 255, 220]'], {}), '([180, 255, 220])\n', (829, 846), True, 'import numpy as np\n'), ((888, 924), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (900, 924), False, 'import cv2\n'), ((957, 985), 'cv2.inRange', 'cv2.inRange', (['hsv', 'BLU0', 'BLU1'], {}), '(hsv, BLU0, BLU1)\n', (968, 985), False, 'import cv2\n'), ((1001, 1042), 'cv2.bitwise_and', 'cv2.bitwise_and', (['hsv', 'hsv'], {'mask': 'mask_blue'}), '(hsv, hsv, mask=mask_blue)\n', (1016, 1042), False, 'import cv2\n'), ((1058, 1100), 'cv2.cvtColor', 'cv2.cvtColor', (['res_blue', 'cv2.COLOR_BGR2GRAY'], {}), '(res_blue, cv2.COLOR_BGR2GRAY)\n', (1070, 1100), False, 'import cv2\n'), ((1121, 1173), 'cv2.threshold', 'cv2.threshold', (['gray_blue', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray_blue, 10, 255, cv2.THRESH_BINARY)\n', (1134, 1173), False, 'import cv2\n'), ((1182, 1227), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh_blue', 'cv2.COLOR_GRAY2BGR'], {}), '(thresh_blue, cv2.COLOR_GRAY2BGR)\n', (1194, 1227), False, 'import cv2\n'), ((2391, 2428), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2403, 2428), False, 'import cv2\n'), ((2449, 2497), 'cv2.threshold', 'cv2.threshold', (['gray', '(100)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray, 100, 255, cv2.THRESH_BINARY)\n', (2462, 2497), False, 'import cv2\n'), ((2511, 2540), 'cv2.blur', 'cv2.blur', (['thresh_gray', '(3, 3)'], {}), '(thresh_gray, (3, 3))\n', (2519, 2540), False, 'import cv2\n'), ((2552, 2582), 'cv2.Canny', 'cv2.Canny', (['blur_gray', '(100)', '(200)'], {}), '(blur_gray, 100, 200)\n', (2561, 2582), False, 'import cv2\n'), ((2600, 2623), 'cv2.blur', 'cv2.blur', (['canny', '(5, 5)'], {}), '(canny, (5, 5))\n', (2608, 2623), False, 'import cv2\n'), ((2660, 2705), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh_gray', 'cv2.COLOR_GRAY2BGR'], {}), '(thresh_gray, cv2.COLOR_GRAY2BGR)\n', (2672, 2705), False, 'import cv2\n'), ((2743, 2806), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['canny_blur', '(1)', '(0.01)', '(10)'], {'minLineLength': '(RES / 2)'}), '(canny_blur, 1, 0.01, 10, minLineLength=RES / 2)\n', (2758, 2806), False, 'import cv2\n'), ((6870, 6906), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (6882, 6906), False, 'import cv2\n'), ((6924, 6952), 'cv2.inRange', 'cv2.inRange', (['img', 'TGT0', 'TGT1'], {}), '(img, TGT0, TGT1)\n', (6935, 6952), False, 'import cv2\n'), ((6969, 7011), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask_green'}), '(img, img, mask=mask_green)\n', (6984, 7011), False, 'import cv2\n'), ((7028, 7071), 'cv2.cvtColor', 'cv2.cvtColor', (['res_green', 'cv2.COLOR_BGR2GRAY'], {}), '(res_green, cv2.COLOR_BGR2GRAY)\n', (7040, 7071), False, 'import cv2\n'), ((7093, 7146), 'cv2.threshold', 'cv2.threshold', (['gray_green', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray_green, 10, 255, cv2.THRESH_BINARY)\n', (7106, 7146), False, 'import cv2\n'), ((7156, 7202), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh_green', 'cv2.COLOR_GRAY2BGR'], {}), '(thresh_green, cv2.COLOR_GRAY2BGR)\n', (7168, 7202), False, 'import cv2\n'), ((7218, 7291), 'cv2.findContours', 'cv2.findContours', (['thresh_green', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_TC89_KCOS'], {}), '(thresh_green, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)\n', (7234, 7291), False, 'import cv2\n'), ((7947, 7983), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (7959, 7983), False, 'import cv2\n'), ((8002, 8030), 'cv2.inRange', 'cv2.inRange', (['hsv', 'YEL0', 'YEL1'], {}), '(hsv, YEL0, YEL1)\n', (8013, 8030), False, 'import cv2\n'), ((8047, 8079), 'cv2.inRange', 'cv2.inRange', (['hsv', 'RED1_0', 'RED1_1'], {}), '(hsv, RED1_0, RED1_1)\n', (8058, 8079), False, 'import cv2\n'), ((8096, 8128), 'cv2.inRange', 'cv2.inRange', (['hsv', 'RED2_0', 'RED2_1'], {}), '(hsv, RED2_0, RED2_1)\n', (8107, 8128), False, 'import cv2\n'), ((8184, 8227), 'cv2.bitwise_and', 'cv2.bitwise_and', (['hsv', 'hsv'], {'mask': 'mask_yellow'}), '(hsv, hsv, mask=mask_yellow)\n', (8199, 8227), False, 'import cv2\n'), ((8241, 8281), 'cv2.bitwise_and', 'cv2.bitwise_and', (['hsv', 'hsv'], {'mask': 'mask_red'}), '(hsv, hsv, mask=mask_red)\n', (8256, 8281), False, 'import cv2\n'), ((8300, 8344), 'cv2.cvtColor', 'cv2.cvtColor', (['res_yellow', 'cv2.COLOR_BGR2GRAY'], {}), '(res_yellow, cv2.COLOR_BGR2GRAY)\n', (8312, 8344), False, 'import cv2\n'), ((8360, 8401), 'cv2.cvtColor', 'cv2.cvtColor', (['res_red', 'cv2.COLOR_BGR2GRAY'], {}), '(res_red, cv2.COLOR_BGR2GRAY)\n', (8372, 8401), False, 'import cv2\n'), ((8421, 8450), 'cv2.blur', 'cv2.blur', (['gray_yellow', '(3, 3)'], {}), '(gray_yellow, (3, 3))\n', (8429, 8450), False, 'import cv2\n'), ((8465, 8491), 'cv2.blur', 'cv2.blur', (['gray_red', '(3, 3)'], {}), '(gray_red, (3, 3))\n', (8473, 8491), False, 'import cv2\n'), ((8514, 8568), 'cv2.threshold', 'cv2.threshold', (['blur_yellow', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blur_yellow, 10, 255, cv2.THRESH_BINARY)\n', (8527, 8568), False, 'import cv2\n'), ((8585, 8636), 'cv2.threshold', 'cv2.threshold', (['blur_red', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blur_red, 10, 255, cv2.THRESH_BINARY)\n', (8598, 8636), False, 'import cv2\n'), ((8646, 8706), 'cv2.cvtColor', 'cv2.cvtColor', (['(thresh_yellow + thresh_red)', 'cv2.COLOR_GRAY2BGR'], {}), '(thresh_yellow + thresh_red, cv2.COLOR_GRAY2BGR)\n', (8658, 8706), False, 'import cv2\n'), ((8722, 8796), 'cv2.findContours', 'cv2.findContours', (['thresh_yellow', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_TC89_KCOS'], {}), '(thresh_yellow, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)\n', (8738, 8796), False, 'import cv2\n'), ((8809, 8880), 'cv2.findContours', 'cv2.findContours', (['thresh_red', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_TC89_KCOS'], {}), '(thresh_red, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)\n', (8825, 8880), False, 'import cv2\n'), ((10696, 10723), 'cv2.xphoto.createSimpleWB', 'cv2.xphoto.createSimpleWB', ([], {}), '()\n', (10721, 10723), False, 'import cv2\n'), ((12076, 12094), 'glob.glob', 'glob', (['"""imgs/*.png"""'], {}), "('imgs/*.png')\n", (12080, 12094), False, 'from glob import glob\n'), ((1242, 1373), 'cv2.HoughCircles', 'cv2.HoughCircles', (['thresh_blue', 'cv2.HOUGH_GRADIENT', '(3.1)', '(RES / 8)'], {'param1': '(100)', 'param2': '(120)', 'minRadius': '(RES // 4)', 'maxRadius': '(RES // 2)'}), '(thresh_blue, cv2.HOUGH_GRADIENT, 3.1, RES / 8, param1=100,\n param2=120, minRadius=RES // 4, maxRadius=RES // 2)\n', (1258, 1373), False, 'import cv2\n'), ((1963, 2001), 'cv2.circle', 'cv2.circle', (['bdbg', 'c', 'r', '(255, 0, 0)', '(5)'], {}), '(bdbg, c, r, (255, 0, 0), 5)\n', (1973, 2001), False, 'import cv2\n'), ((3300, 3323), 'numpy.mean', 'np.mean', (['llines'], {'axis': '(0)'}), '(llines, axis=0)\n', (3307, 3323), True, 'import numpy as np\n'), ((3340, 3363), 'numpy.mean', 'np.mean', (['rlines'], {'axis': '(0)'}), '(rlines, axis=0)\n', (3347, 3363), True, 'import numpy as np\n'), ((3500, 3513), 'math.atan', 'math.atan', (['lm'], {}), '(lm)\n', (3509, 3513), False, 'import math\n'), ((3527, 3540), 'math.atan', 'math.atan', (['rm'], {}), '(rm)\n', (3536, 3540), False, 'import math\n'), ((6583, 6630), 'cv2.line', 'cv2.line', (['ldbg', '(lb, 0)', '(lb, RES)', '(0, 255, 0)'], {}), '(ldbg, (lb, 0), (lb, RES), (0, 255, 0))\n', (6591, 6630), False, 'import cv2\n'), ((6632, 6679), 'cv2.line', 'cv2.line', (['ldbg', '(rb, 0)', '(rb, RES)', '(0, 255, 0)'], {}), '(ldbg, (rb, 0), (rb, RES), (0, 255, 0))\n', (6640, 6679), False, 'import cv2\n'), ((6681, 6728), 'cv2.line', 'cv2.line', (['ldbg', '(lb, 0)', '(lb, RES)', '(0, 255, 0)'], {}), '(ldbg, (lb, 0), (lb, RES), (0, 255, 0))\n', (6689, 6728), False, 'import cv2\n'), ((6730, 6777), 'cv2.line', 'cv2.line', (['ldbg', '(rb, 0)', '(rb, RES)', '(0, 255, 0)'], {}), '(ldbg, (rb, 0), (rb, RES), (0, 255, 0))\n', (6738, 6777), False, 'import cv2\n'), ((7352, 7377), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (7374, 7377), False, 'import cv2\n'), ((9068, 9093), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (9090, 9093), False, 'import cv2\n'), ((9675, 9700), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (9697, 9700), False, 'import cv2\n'), ((10511, 10551), 'cv2.cvtColor', 'cv2.cvtColor', (['warped', 'cv2.COLOR_BGR2GRAY'], {}), '(warped, cv2.COLOR_BGR2GRAY)\n', (10523, 10551), False, 'import cv2\n'), ((11021, 11059), 'cv2.circle', 'cv2.circle', (['gdbg', 'c', 'r', '(255, 0, 0)', '(5)'], {}), '(gdbg, c, r, (255, 0, 0), 5)\n', (11031, 11059), False, 'import cv2\n'), ((11069, 11119), 'cv2.line', 'cv2.line', (['gdbg', '(lb, 0)', '(lb, RES)', '(0, 255, 0)', '(2)'], {}), '(gdbg, (lb, 0), (lb, RES), (0, 255, 0), 2)\n', (11077, 11119), False, 'import cv2\n'), ((11121, 11171), 'cv2.line', 'cv2.line', (['gdbg', '(rb, 0)', '(rb, RES)', '(0, 255, 0)', '(2)'], {}), '(gdbg, (rb, 0), (rb, RES), (0, 255, 0), 2)\n', (11129, 11171), False, 'import cv2\n'), ((11173, 11223), 'cv2.line', 'cv2.line', (['gdbg', '(lb, 0)', '(lb, RES)', '(0, 255, 0)', '(2)'], {}), '(gdbg, (lb, 0), (lb, RES), (0, 255, 0), 2)\n', (11181, 11223), False, 'import cv2\n'), ((11225, 11275), 'cv2.line', 'cv2.line', (['gdbg', '(rb, 0)', '(rb, RES)', '(0, 255, 0)', '(2)'], {}), '(gdbg, (rb, 0), (rb, RES), (0, 255, 0), 2)\n', (11233, 11275), False, 'import cv2\n'), ((11991, 12005), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (12002, 12005), False, 'import cv2\n'), ((12107, 12122), 'cv2.imread', 'cv2.imread', (['url'], {}), '(url)\n', (12117, 12122), False, 'import cv2\n'), ((13496, 13520), 'gen.sheet_to_data', 'gen.sheet_to_data', (['sheet'], {}), '(sheet)\n', (13513, 13520), False, 'import gen\n'), ((1803, 1843), 'cv2.circle', 'cv2.circle', (['bdbg', 'c', 'r', '(240, 240, 0)', '(2)'], {}), '(bdbg, c, r, (240, 240, 0), 2)\n', (1813, 1843), False, 'import cv2\n'), ((6154, 6191), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['ini', 'fin'], {}), '(ini, fin)\n', (6181, 6191), False, 'import cv2\n'), ((6213, 6262), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'warp_matrix', '(RES, RES)'], {}), '(img, warp_matrix, (RES, RES))\n', (6232, 6262), False, 'import cv2\n'), ((6448, 6502), 'cv2.line', 'cv2.line', (['ldbg', '(lx1, ly1)', '(lx2, ly2)', '(0, 0, 255)', '(2)'], {}), '(ldbg, (lx1, ly1), (lx2, ly2), (0, 0, 255), 2)\n', (6456, 6502), False, 'import cv2\n'), ((6510, 6564), 'cv2.line', 'cv2.line', (['ldbg', '(rx1, ry1)', '(rx2, ry2)', '(0, 0, 255)', '(2)'], {}), '(ldbg, (rx1, ry1), (rx2, ry2), (0, 0, 255), 2)\n', (6518, 6564), False, 'import cv2\n'), ((7471, 7489), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (7486, 7489), False, 'import cv2\n'), ((9203, 9221), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (9218, 9221), False, 'import cv2\n'), ((9810, 9828), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (9825, 9828), False, 'import cv2\n'), ((12414, 12471), 'cv2.resize', 'cv2.resize', (['img', '(RES, RES)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (RES, RES), interpolation=cv2.INTER_AREA)\n', (12424, 12471), False, 'import cv2\n'), ((12575, 12591), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (12583, 12591), False, 'import cv2\n'), ((4543, 4571), 'numpy.float32', 'np.float32', (['[l0, l1, r0, r1]'], {}), '([l0, l1, r0, r1])\n', (4553, 4571), True, 'import numpy as np\n'), ((4590, 4622), 'numpy.float32', 'np.float32', (['[l0d, l1d, r0d, r1d]'], {}), '([l0d, l1d, r0d, r1d])\n', (4600, 4622), True, 'import numpy as np\n'), ((5066, 5094), 'numpy.float32', 'np.float32', (['[l0, l1, r0, r1]'], {}), '([l0, l1, r0, r1])\n', (5076, 5094), True, 'import numpy as np\n'), ((5113, 5145), 'numpy.float32', 'np.float32', (['[l0d, l1d, r0d, r1d]'], {}), '([l0d, l1d, r0d, r1d])\n', (5123, 5145), True, 'import numpy as np\n'), ((7736, 7777), 'cv2.circle', 'cv2.circle', (['tdbg', 'ic', 'r', '(0, 240, 240)', '(1)'], {}), '(tdbg, ic, r, (0, 240, 240), 1)\n', (7746, 7777), False, 'import cv2\n'), ((9304, 9328), 'numpy.subtract', 'np.subtract', (['tee', 'center'], {}), '(tee, center)\n', (9315, 9328), True, 'import numpy as np\n'), ((9911, 9935), 'numpy.subtract', 'np.subtract', (['tee', 'center'], {}), '(tee, center)\n', (9922, 9935), True, 'import numpy as np\n'), ((3887, 3902), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3895, 3902), False, 'import math\n'), ((3908, 3923), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3916, 3923), False, 'import math\n'), ((3949, 3964), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3957, 3964), False, 'import math\n'), ((3970, 3985), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3978, 3985), False, 'import math\n'), ((5491, 5519), 'numpy.float32', 'np.float32', (['[l0, l1, r0, r1]'], {}), '([l0, l1, r0, r1])\n', (5501, 5519), True, 'import numpy as np\n'), ((5538, 5570), 'numpy.float32', 'np.float32', (['[l0d, l1d, r0d, r1d]'], {}), '([l0d, l1d, r0d, r1d])\n', (5548, 5570), True, 'import numpy as np\n'), ((7570, 7594), 'numpy.subtract', 'np.subtract', (['tee', 'center'], {}), '(tee, center)\n', (7581, 7594), True, 'import numpy as np\n'), ((9583, 9624), 'cv2.circle', 'cv2.circle', (['rdbg', 'ic', 'r', '(0, 240, 240)', '(5)'], {}), '(rdbg, ic, r, (0, 240, 240), 5)\n', (9593, 9624), False, 'import cv2\n'), ((10190, 10229), 'cv2.circle', 'cv2.circle', (['rdbg', 'ic', 'r', '(0, 0, 255)', '(5)'], {}), '(rdbg, ic, r, (0, 0, 255), 5)\n', (10200, 10229), False, 'import cv2\n'), ((5996, 6024), 'numpy.float32', 'np.float32', (['[l0, l1, r0, r1]'], {}), '([l0, l1, r0, r1])\n', (6006, 6024), True, 'import numpy as np\n'), ((6043, 6075), 'numpy.float32', 'np.float32', (['[l0d, l1d, r0d, r1d]'], {}), '([l0d, l1d, r0d, r1d])\n', (6053, 6075), True, 'import numpy as np\n')] |
from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from prediction.apps import PredictionConfig
from django.http import JsonResponse
from players.models import Rusher_Avg
import pandas as pd
import numpy as np
def get_rusher_avgs(rusher_nfl_id):
rusher_row = Rusher_Avg.objects.filter(nfl_id=rusher_nfl_id).values_list('avg_speed', 'avg_acc')
speed, acc = list(rusher_row)[0][0], list(rusher_row)[0][1]
return speed, acc
def predict_game_weather(temp_input, wind_speed_input):
loaded_model = PredictionConfig.weather_model
y_pred = loaded_model.predict(np.array([temp_input, wind_speed_input]).reshape(1, -1))
return y_pred[0]
class NFL_Model_Predict(APIView):
def post(self, request, format=None):
data = request.data
if len(data) < 1:
return JsonResponse({'result':'error', 'message':'No parameters given'}, status=404)
values = []
rusher_id, temperature, wind_speed = -1, -1, -1
for key in data:
if key == "Temperature":
temperature = data[key]
continue
elif key == "WindSpeed":
wind_speed = data[key]
predicted_weather = predict_game_weather(temperature, wind_speed)
values.append(predicted_weather)
continue
elif key == "rusher_NflId":
rusher_id = data[key]
speed, acc = get_rusher_avgs(rusher_id)
values.append(data[key])
values.append(speed)
values.append(acc)
X = pd.Series(values).to_numpy().reshape(1, -1)
loaded_model = PredictionConfig.ml_model
y_pred = loaded_model.predict(X)
y_pred = pd.Series(y_pred)
response_dict = {"prediction": y_pred[0]}
return Response(response_dict, status=200) | [
"pandas.Series",
"django.http.JsonResponse",
"numpy.array",
"rest_framework.response.Response",
"players.models.Rusher_Avg.objects.filter"
] | [((1864, 1881), 'pandas.Series', 'pd.Series', (['y_pred'], {}), '(y_pred)\n', (1873, 1881), True, 'import pandas as pd\n'), ((1956, 1991), 'rest_framework.response.Response', 'Response', (['response_dict'], {'status': '(200)'}), '(response_dict, status=200)\n', (1964, 1991), False, 'from rest_framework.response import Response\n'), ((417, 464), 'players.models.Rusher_Avg.objects.filter', 'Rusher_Avg.objects.filter', ([], {'nfl_id': 'rusher_nfl_id'}), '(nfl_id=rusher_nfl_id)\n', (442, 464), False, 'from players.models import Rusher_Avg\n'), ((957, 1036), 'django.http.JsonResponse', 'JsonResponse', (["{'result': 'error', 'message': 'No parameters given'}"], {'status': '(404)'}), "({'result': 'error', 'message': 'No parameters given'}, status=404)\n", (969, 1036), False, 'from django.http import JsonResponse\n'), ((729, 769), 'numpy.array', 'np.array', (['[temp_input, wind_speed_input]'], {}), '([temp_input, wind_speed_input])\n', (737, 769), True, 'import numpy as np\n'), ((1713, 1730), 'pandas.Series', 'pd.Series', (['values'], {}), '(values)\n', (1722, 1730), True, 'import pandas as pd\n')] |
import numpy as np
#Questions on NumPy Sorting and Searching
# How to get the indices of the sorted array using NumPy in Python?
#argsort Returns the indices that would sort an array.
np.argsort(np.array([3, 1, 2]))
# Finding the k smallest values of a NumPy array
#sort Return a sorted copy of an array.
k = 4
arr = np.array([23, 12, 1, 3, 4, 5, 6])
arr1 = np.sort(arr)
arr1[:k]
# How to get the n-largest values of an array using NumPy?
k = 4
arr = np.array([23, 12, 1, 3, 4, 5, 6])
arr1 = np.sort(arr)
arr1[-k:]
# Sort the values in a matrix
#sort Return a sorted matrix
i = np.matrix('[4, 1; 12, 3]')
i.sort()
i
# Filter out integers from float numpy array
#astype Copy of the array, cast to a specified type.
ini_array = np.array([1.0, 1.2, 2.2, 2.0, 3.0, 2.0])
result = ini_array[ini_array != ini_array.astype(int)]
result
# Find the indices into a sorted array
#searchsorted Find indices where elements should be inserted to maintain order.
in_arr = [2, 3, 4, 5, 6]
np.searchsorted(in_arr, 4, side='right') | [
"numpy.searchsorted",
"numpy.array",
"numpy.sort",
"numpy.matrix"
] | [((319, 352), 'numpy.array', 'np.array', (['[23, 12, 1, 3, 4, 5, 6]'], {}), '([23, 12, 1, 3, 4, 5, 6])\n', (327, 352), True, 'import numpy as np\n'), ((360, 372), 'numpy.sort', 'np.sort', (['arr'], {}), '(arr)\n', (367, 372), True, 'import numpy as np\n'), ((454, 487), 'numpy.array', 'np.array', (['[23, 12, 1, 3, 4, 5, 6]'], {}), '([23, 12, 1, 3, 4, 5, 6])\n', (462, 487), True, 'import numpy as np\n'), ((495, 507), 'numpy.sort', 'np.sort', (['arr'], {}), '(arr)\n', (502, 507), True, 'import numpy as np\n'), ((582, 608), 'numpy.matrix', 'np.matrix', (['"""[4, 1; 12, 3]"""'], {}), "('[4, 1; 12, 3]')\n", (591, 608), True, 'import numpy as np\n'), ((731, 771), 'numpy.array', 'np.array', (['[1.0, 1.2, 2.2, 2.0, 3.0, 2.0]'], {}), '([1.0, 1.2, 2.2, 2.0, 3.0, 2.0])\n', (739, 771), True, 'import numpy as np\n'), ((982, 1022), 'numpy.searchsorted', 'np.searchsorted', (['in_arr', '(4)'], {'side': '"""right"""'}), "(in_arr, 4, side='right')\n", (997, 1022), True, 'import numpy as np\n'), ((196, 215), 'numpy.array', 'np.array', (['[3, 1, 2]'], {}), '([3, 1, 2])\n', (204, 215), True, 'import numpy as np\n')] |
import gym
import ma_gym
import random
import datetime
import numpy as np
import tensorflow as tf
def get_variable(name, shape):
return tf.get_variable(name, shape, tf.float32,
tf.initializers.truncated_normal(0,0.01))
def Qmix_mixer(agent_qs, state, state_dim, n_agents, n_h_mixer):
"""
Args:
agent_qs: shape [batch, n_agents]
state: shape [batch, state_dim]
state_dim: integer
n_agents: integer
n_h_mixer: integer
"""
agent_qs_reshaped = tf.reshape(agent_qs, [-1, 1, n_agents])
# n_h_mixer * n_agents because result will be reshaped into matrix
hyper_w_1 = get_variable('hyper_w_1', [state_dim, n_h_mixer*n_agents])
hyper_w_final = get_variable('hyper_w_final', [state_dim, n_h_mixer])
hyper_b_1 = tf.get_variable('hyper_b_1', [state_dim, n_h_mixer])
hyper_b_final_l1 = tf.layers.dense(inputs=state, units=n_h_mixer, activation=tf.nn.relu,
use_bias=False, name='hyper_b_final_l1')
hyper_b_final = tf.layers.dense(inputs=hyper_b_final_l1, units=1, activation=None,
use_bias=False, name='hyper_b_final')
# First layer
w1 = tf.abs(tf.matmul(state, hyper_w_1))
b1 = tf.matmul(state, hyper_b_1)
w1_reshaped = tf.reshape(w1, [-1, n_agents, n_h_mixer]) # reshape into batch of matrices
b1_reshaped = tf.reshape(b1, [-1, 1, n_h_mixer])
# [batch, 1, n_h_mixer]
hidden = tf.nn.elu(tf.matmul(agent_qs_reshaped, w1_reshaped) + b1_reshaped)
# Second layer
w_final = tf.abs(tf.matmul(state, hyper_w_final))
w_final_reshaped = tf.reshape(w_final, [-1, n_h_mixer, 1]) # reshape into batch of matrices
b_final_reshaped = tf.reshape(hyper_b_final, [-1, 1, 1])
# [batch, 1, 1]
y = tf.matmul(hidden, w_final_reshaped) + b_final_reshaped
q_tot = tf.reshape(y, [-1, 1])
return q_tot
class QMix():
def __init__(self, env, num_s, num_a, lr=0.0001, gamma=0.99, replace_target_iter=5000,
memory_size=200000, batch_size=256, epsilon=1, epsilon_decay=0.0001):
self.n_agents = 2
self.env = env
self.name = "qmix"
self.num_global_s = 2*num_s
self.num_s = num_s
self.num_a = num_a
self.lr = lr
self.gamma = gamma
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.epsilon_min = 0.1
self.learn_step_cnt = 0 # total learning step
self.episode_cnt = 0
self.memory = []
self.memory_counter = 0
self._build_net()
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/target_net')
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/eval_net')
e_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/eval_hyper')
t_params += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/mixing_net' + '/target_hyper')
with tf.variable_scope('soft_replacement'):
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/' + current_time
self.summary_writer = tf.summary.FileWriter(train_log_dir, self.sess.graph)
def _build_net(self): # we use parameter sharing among agents
with tf.variable_scope(self.name):
# ------------------ all inputs ------------------------
self.S = tf.placeholder(tf.float32, [None, self.num_global_s], name='S') # input Global State
self.s = tf.placeholder(tf.float32, [None, self.num_s], name='s1') # input state for agent1
self.S_ = tf.placeholder(tf.float32, [None, self.num_global_s], name='S_') # input Next Global State
self.s_ = tf.placeholder(tf.float32, [None, self.num_s], name='s1_') # input next state for agent1
self.R = tf.placeholder(tf.float32, [None, ], name='R') # input Reward
self.a = tf.placeholder(tf.float32, [None, self.num_a], name='a') # input Action onehot for agent1
self.done = tf.placeholder(tf.float32, [None, ], name='done') # input Done info ???
self.q_m_ = tf.placeholder(tf.float32, [None, ], name='q_value_next_max')
self.q_target = tf.placeholder(tf.float32, [None,], name='q_tot_target')
w_initializer, b_initializer = tf.random_normal_initializer(0., 0.1), tf.constant_initializer(0.0)
# ------------------ build evaluate_net ------------------
with tf.variable_scope('eval_net'):
a_fc1 = tf.layers.dense(self.s, 128, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='agent_fc1_e')
# a_fc2 = tf.layers.dense(a_fc1, 128, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc2_e')
# a_fc3 = tf.layers.dense(a_fc2, 64, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc3_e')
self.q_eval = tf.layers.dense(a_fc1, self.num_a, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='q_e')
# ------------------ build target_net ------------------
with tf.variable_scope('target_net'):
a_fc1_ = tf.layers.dense(self.s_, 128, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='agent_fc1_t')
# a_fc2_ = tf.layers.dense(a_fc1_, 128, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc2_t')
# a_fc3_ = tf.layers.dense(a_fc2_, 64, tf.nn.relu, kernel_initializer=w_initializer,
# bias_initializer=b_initializer, name='agent_fc3_t')
self.q_next = tf.layers.dense(a_fc1_, self.num_a, kernel_initializer=w_initializer,
bias_initializer=b_initializer, name='q_t')
# [batch*n_agents, 1]
self.q_selected = tf.reduce_sum(tf.multiply(self.q_eval, self.a), axis=1)
# ------------------ build mixing_net ------------------
with tf.variable_scope('mixing_net'):
# [batch, n_agents]
self.q_concat = tf.reshape(self.q_selected, [-1, self.n_agents])
self.q_concat_ =tf.reshape(self.q_m_, [-1, self.n_agents])
with tf.variable_scope('eval_hyper'):
self.Q_tot = Qmix_mixer(self.q_concat, self.S, self.num_global_s, self.n_agents, 32)
with tf.variable_scope('target_hyper'):
self.Q_tot_ = Qmix_mixer(self.q_concat_, self.S_, self.num_global_s, self.n_agents, 32)
# with tf.variable_scope('layer_mix_eval'):
# lin1 = tf.matmul(tf.reshape(self.q_concat, shape=[-1, 1, self.n_agents]), self.w1) + tf.reshape(self.b1, shape=[-1, 1, 32])
# a1 = tf.nn.elu(lin1, name='a1')
# self.Q_tot = tf.reshape(tf.matmul(a1, self.w2), shape=[-1, 1]) + self.b2
# with tf.variable_scope('layer_mix_target'):
# lin1_ = tf.matmul(tf.reshape(self.q_concat_, shape=[-1, 1, self.n_agents]), self.w1_) + tf.reshape(self.b1_, shape=[-1, 1, 32])
# a1_ = tf.nn.elu(lin1_, name='a1_')
# self.Q_tot_ = tf.reshape(tf.matmul(a1_, self.w2_), shape=[-1, 1]) + self.b2_
# todo: add q_target, loss, train_op
# with tf.variable_scope('q_target'):
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, tf.squeeze(self.Q_tot), name='TD_error'))
# self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.Q_tot, name='TD_error'))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
def act(self, state):
if np.random.uniform() > self.epsilon :# pick the argmax action
s = np.array(state)
if len(s.shape) < 2:
s = np.array(state)[np.newaxis, :]
q_eval = self.sess.run(self.q_eval, feed_dict={self.s: s})
action = np.argmax(q_eval, axis=-1).tolist()
else: # pick random action
action = self.env.action_space.sample()
return action
def store(self, EXP):
self.memory_counter += 1
if len(self.memory) > self.memory_size:
# random replacement
index = np.random.randint(0, self.memory_size)
self.memory[index] = EXP
else:
self.memory.append(EXP)
def learn(self):
if len(self.memory) < self.batch_size :
return
# sample batch exp from memory
if self.learn_step_cnt % 10000 == 0:
print(self.name, 'update ----> learn_step_cnt', self.learn_step_cnt)
batch_exp = random.sample(self.memory, self.batch_size)
S, s, a, R, S_, s_, done = [[] for _ in range(7)]
for exp in batch_exp:
S.append(exp[0])
s.append([exp[1] , exp[2]])
a.append([exp[3] , exp[4]])
R.append(exp[5])
S_.append(exp[6])
s_.append([exp[7], exp[8]])
done.append(exp[9])
# to get q_tot
s = np.stack(s)
a = np.stack(a)
s_ = np.stack(s_)
s.shape = (self.batch_size*self.n_agents, self.num_s)
s_.shape = (self.batch_size*self.n_agents, self.num_s)
actions_1hot = np.zeros([self.batch_size, self.n_agents, self.num_a], dtype=np.float32)
grid = np.indices((self.batch_size, self.n_agents))
actions_1hot[grid[0], grid[1], a] = 1
actions_1hot.shape = (self.batch_size*self.n_agents, self.num_a)
# to get q_tot_
q_ = self.sess.run(self.q_next, feed_dict={self.s_: s_})
q_m_ = np.max(q_, axis=1)
q_tot_ = self.sess.run(self.Q_tot_, feed_dict={self.S_: S_, self.q_m_: q_m_})
q_target = np.array(R) + (1 - np.array(done)) * self.gamma * np.squeeze(q_tot_, axis=-1)
# import pdb; pdb.set_trace()
tvars = tf.trainable_variables()
tvars_vals_b = self.sess.run(tvars)
# f = open("before.txt", "a")
# for var, val in zip(tvars, tvars_vals):
# f.write(var,)
# f.close()
# update
_, cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.S: S, self.s:s, self.a: actions_1hot,
self.q_target: q_target, self.done: done})
# print('cost', cost)
tvars_vals_a = self.sess.run(tvars)
# f = open("after.txt", "a")
# for var, val in zip(tvars, tvars_vals):
# f.write(tvars_vals)
# f.close()
import pdb; pdb.set_trace()
self.write_summary_scalar('loss', cost, self.learn_step_cnt)
self.write_summary_scalar('epsilon', self.epsilon, self.learn_step_cnt)
self.write_summary_scalar('memory_cnt', self.memory_counter, self.learn_step_cnt)
self.epsilon = max(self.epsilon - self.epsilon_decay, self.epsilon_min) # decay epsilon
self.learn_step_cnt += 1
# check to do the soft replacement of target net
if self.learn_step_cnt % self.replace_target_iter == 0 and self.learn_step_cnt:
self.sess.run(self.target_replace_op)
def train(self):
for i in range(50000):
done_n = [False for _ in range(env.n_agents)]
ep_reward = 0
obs = env.reset()
while not all(done_n):
# env.render()
action = self.act(obs)
obs_n, reward_n, done_n, info = env.step(action)
ep_reward += sum(reward_n)
obs_glob = [obs[0] + obs[1]]
obs_glob_next = [obs_n[0] + obs_n[1]]
self.store(obs_glob + obs + action + [sum(reward_n)] + obs_glob_next + obs_n + [all(done_n)])
obs = obs_n
self.learn()
self.write_summary_scalar("ep_reward", ep_reward, self.learn_step_cnt)
def write_summary_scalar(self, tag, value, iteration):
self.summary_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)]), iteration)
env = gym.make('Switch2-v0')
alg = QMix(env, env.observation_space[0].shape[0], env.action_space[0].n)
# alg = QMix(env, env.observation_space.shape[0], env.action_space.n)
alg.train()
| [
"tensorflow.get_variable",
"tensorflow.multiply",
"numpy.array",
"tensorflow.Summary.Value",
"gym.make",
"tensorflow.Session",
"tensorflow.placeholder",
"numpy.max",
"tensorflow.random_normal_initializer",
"numpy.stack",
"tensorflow.assign",
"tensorflow.matmul",
"tensorflow.trainable_variabl... | [((13083, 13105), 'gym.make', 'gym.make', (['"""Switch2-v0"""'], {}), "('Switch2-v0')\n", (13091, 13105), False, 'import gym\n'), ((531, 570), 'tensorflow.reshape', 'tf.reshape', (['agent_qs', '[-1, 1, n_agents]'], {}), '(agent_qs, [-1, 1, n_agents])\n', (541, 570), True, 'import tensorflow as tf\n'), ((810, 862), 'tensorflow.get_variable', 'tf.get_variable', (['"""hyper_b_1"""', '[state_dim, n_h_mixer]'], {}), "('hyper_b_1', [state_dim, n_h_mixer])\n", (825, 862), True, 'import tensorflow as tf\n'), ((887, 1001), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'state', 'units': 'n_h_mixer', 'activation': 'tf.nn.relu', 'use_bias': '(False)', 'name': '"""hyper_b_final_l1"""'}), "(inputs=state, units=n_h_mixer, activation=tf.nn.relu,\n use_bias=False, name='hyper_b_final_l1')\n", (902, 1001), True, 'import tensorflow as tf\n'), ((1057, 1166), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'hyper_b_final_l1', 'units': '(1)', 'activation': 'None', 'use_bias': '(False)', 'name': '"""hyper_b_final"""'}), "(inputs=hyper_b_final_l1, units=1, activation=None, use_bias\n =False, name='hyper_b_final')\n", (1072, 1166), True, 'import tensorflow as tf\n'), ((1271, 1298), 'tensorflow.matmul', 'tf.matmul', (['state', 'hyper_b_1'], {}), '(state, hyper_b_1)\n', (1280, 1298), True, 'import tensorflow as tf\n'), ((1317, 1358), 'tensorflow.reshape', 'tf.reshape', (['w1', '[-1, n_agents, n_h_mixer]'], {}), '(w1, [-1, n_agents, n_h_mixer])\n', (1327, 1358), True, 'import tensorflow as tf\n'), ((1410, 1444), 'tensorflow.reshape', 'tf.reshape', (['b1', '[-1, 1, n_h_mixer]'], {}), '(b1, [-1, 1, n_h_mixer])\n', (1420, 1444), True, 'import tensorflow as tf\n'), ((1654, 1693), 'tensorflow.reshape', 'tf.reshape', (['w_final', '[-1, n_h_mixer, 1]'], {}), '(w_final, [-1, n_h_mixer, 1])\n', (1664, 1693), True, 'import tensorflow as tf\n'), ((1750, 1787), 'tensorflow.reshape', 'tf.reshape', (['hyper_b_final', '[-1, 1, 1]'], {}), '(hyper_b_final, [-1, 1, 1])\n', (1760, 1787), True, 'import tensorflow as tf\n'), ((1885, 1907), 'tensorflow.reshape', 'tf.reshape', (['y', '[-1, 1]'], {}), '(y, [-1, 1])\n', (1895, 1907), True, 'import tensorflow as tf\n'), ((211, 252), 'tensorflow.initializers.truncated_normal', 'tf.initializers.truncated_normal', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (243, 252), True, 'import tensorflow as tf\n'), ((1233, 1260), 'tensorflow.matmul', 'tf.matmul', (['state', 'hyper_w_1'], {}), '(state, hyper_w_1)\n', (1242, 1260), True, 'import tensorflow as tf\n'), ((1598, 1629), 'tensorflow.matmul', 'tf.matmul', (['state', 'hyper_w_final'], {}), '(state, hyper_w_final)\n', (1607, 1629), True, 'import tensorflow as tf\n'), ((1817, 1852), 'tensorflow.matmul', 'tf.matmul', (['hidden', 'w_final_reshaped'], {}), '(hidden, w_final_reshaped)\n', (1826, 1852), True, 'import tensorflow as tf\n'), ((2757, 2842), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': "(self.name + '/target_net')"}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name +\n '/target_net')\n", (2774, 2842), True, 'import tensorflow as tf\n'), ((2858, 2937), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': "(self.name + '/eval_net')"}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/eval_net')\n", (2875, 2937), True, 'import tensorflow as tf\n'), ((2959, 3060), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': "(self.name + '/mixing_net' + '/eval_hyper')"}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name +\n '/mixing_net' + '/eval_hyper')\n", (2976, 3060), True, 'import tensorflow as tf\n'), ((3077, 3180), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': "(self.name + '/mixing_net' + '/target_hyper')"}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name +\n '/mixing_net' + '/target_hyper')\n", (3094, 3180), True, 'import tensorflow as tf\n'), ((3342, 3354), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3352, 3354), True, 'import tensorflow as tf\n'), ((3563, 3616), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['train_log_dir', 'self.sess.graph'], {}), '(train_log_dir, self.sess.graph)\n', (3584, 3616), True, 'import tensorflow as tf\n'), ((9624, 9667), 'random.sample', 'random.sample', (['self.memory', 'self.batch_size'], {}), '(self.memory, self.batch_size)\n', (9637, 9667), False, 'import random\n'), ((10032, 10043), 'numpy.stack', 'np.stack', (['s'], {}), '(s)\n', (10040, 10043), True, 'import numpy as np\n'), ((10056, 10067), 'numpy.stack', 'np.stack', (['a'], {}), '(a)\n', (10064, 10067), True, 'import numpy as np\n'), ((10081, 10093), 'numpy.stack', 'np.stack', (['s_'], {}), '(s_)\n', (10089, 10093), True, 'import numpy as np\n'), ((10252, 10324), 'numpy.zeros', 'np.zeros', (['[self.batch_size, self.n_agents, self.num_a]'], {'dtype': 'np.float32'}), '([self.batch_size, self.n_agents, self.num_a], dtype=np.float32)\n', (10260, 10324), True, 'import numpy as np\n'), ((10340, 10384), 'numpy.indices', 'np.indices', (['(self.batch_size, self.n_agents)'], {}), '((self.batch_size, self.n_agents))\n', (10350, 10384), True, 'import numpy as np\n'), ((10609, 10627), 'numpy.max', 'np.max', (['q_'], {'axis': '(1)'}), '(q_, axis=1)\n', (10615, 10627), True, 'import numpy as np\n'), ((10867, 10891), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (10889, 10891), True, 'import tensorflow as tf\n'), ((11560, 11575), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (11573, 11575), False, 'import pdb\n'), ((1496, 1537), 'tensorflow.matmul', 'tf.matmul', (['agent_qs_reshaped', 'w1_reshaped'], {}), '(agent_qs_reshaped, w1_reshaped)\n', (1505, 1537), True, 'import tensorflow as tf\n'), ((3191, 3228), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""soft_replacement"""'], {}), "('soft_replacement')\n", (3208, 3228), True, 'import tensorflow as tf\n'), ((3377, 3410), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3408, 3410), True, 'import tensorflow as tf\n'), ((3698, 3726), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.name'], {}), '(self.name)\n', (3715, 3726), True, 'import tensorflow as tf\n'), ((3818, 3881), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.num_global_s]'], {'name': '"""S"""'}), "(tf.float32, [None, self.num_global_s], name='S')\n", (3832, 3881), True, 'import tensorflow as tf\n'), ((3925, 3982), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.num_s]'], {'name': '"""s1"""'}), "(tf.float32, [None, self.num_s], name='s1')\n", (3939, 3982), True, 'import tensorflow as tf\n'), ((4031, 4095), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.num_global_s]'], {'name': '"""S_"""'}), "(tf.float32, [None, self.num_global_s], name='S_')\n", (4045, 4095), True, 'import tensorflow as tf\n'), ((4145, 4203), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.num_s]'], {'name': '"""s1_"""'}), "(tf.float32, [None, self.num_s], name='s1_')\n", (4159, 4203), True, 'import tensorflow as tf\n'), ((4256, 4300), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""R"""'}), "(tf.float32, [None], name='R')\n", (4270, 4300), True, 'import tensorflow as tf\n'), ((4340, 4396), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.num_a]'], {'name': '"""a"""'}), "(tf.float32, [None, self.num_a], name='a')\n", (4354, 4396), True, 'import tensorflow as tf\n'), ((4455, 4502), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""done"""'}), "(tf.float32, [None], name='done')\n", (4469, 4502), True, 'import tensorflow as tf\n'), ((4553, 4612), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""q_value_next_max"""'}), "(tf.float32, [None], name='q_value_next_max')\n", (4567, 4612), True, 'import tensorflow as tf\n'), ((4643, 4698), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""q_tot_target"""'}), "(tf.float32, [None], name='q_tot_target')\n", (4657, 4698), True, 'import tensorflow as tf\n'), ((8647, 8666), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8664, 8666), True, 'import numpy as np\n'), ((8724, 8739), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (8732, 8739), True, 'import numpy as np\n'), ((9224, 9262), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.memory_size'], {}), '(0, self.memory_size)\n', (9241, 9262), True, 'import numpy as np\n'), ((10734, 10745), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (10742, 10745), True, 'import numpy as np\n'), ((3268, 3283), 'tensorflow.assign', 'tf.assign', (['t', 'e'], {}), '(t, e)\n', (3277, 3283), True, 'import tensorflow as tf\n'), ((3436, 3459), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3457, 3459), False, 'import datetime\n'), ((4744, 4782), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (4772, 4782), True, 'import tensorflow as tf\n'), ((4783, 4811), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (4806, 4811), True, 'import tensorflow as tf\n'), ((4901, 4930), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""eval_net"""'], {}), "('eval_net')\n", (4918, 4930), True, 'import tensorflow as tf\n'), ((4956, 5086), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.s', '(128)', 'tf.nn.relu'], {'kernel_initializer': 'w_initializer', 'bias_initializer': 'b_initializer', 'name': '"""agent_fc1_e"""'}), "(self.s, 128, tf.nn.relu, kernel_initializer=w_initializer,\n bias_initializer=b_initializer, name='agent_fc1_e')\n", (4971, 5086), True, 'import tensorflow as tf\n'), ((5540, 5656), 'tensorflow.layers.dense', 'tf.layers.dense', (['a_fc1', 'self.num_a'], {'kernel_initializer': 'w_initializer', 'bias_initializer': 'b_initializer', 'name': '"""q_e"""'}), "(a_fc1, self.num_a, kernel_initializer=w_initializer,\n bias_initializer=b_initializer, name='q_e')\n", (5555, 5656), True, 'import tensorflow as tf\n'), ((5786, 5817), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target_net"""'], {}), "('target_net')\n", (5803, 5817), True, 'import tensorflow as tf\n'), ((5844, 5975), 'tensorflow.layers.dense', 'tf.layers.dense', (['self.s_', '(128)', 'tf.nn.relu'], {'kernel_initializer': 'w_initializer', 'bias_initializer': 'b_initializer', 'name': '"""agent_fc1_t"""'}), "(self.s_, 128, tf.nn.relu, kernel_initializer=w_initializer,\n bias_initializer=b_initializer, name='agent_fc1_t')\n", (5859, 5975), True, 'import tensorflow as tf\n'), ((6436, 6553), 'tensorflow.layers.dense', 'tf.layers.dense', (['a_fc1_', 'self.num_a'], {'kernel_initializer': 'w_initializer', 'bias_initializer': 'b_initializer', 'name': '"""q_t"""'}), "(a_fc1_, self.num_a, kernel_initializer=w_initializer,\n bias_initializer=b_initializer, name='q_t')\n", (6451, 6553), True, 'import tensorflow as tf\n'), ((6675, 6707), 'tensorflow.multiply', 'tf.multiply', (['self.q_eval', 'self.a'], {}), '(self.q_eval, self.a)\n', (6686, 6707), True, 'import tensorflow as tf\n'), ((6804, 6835), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mixing_net"""'], {}), "('mixing_net')\n", (6821, 6835), True, 'import tensorflow as tf\n'), ((6905, 6953), 'tensorflow.reshape', 'tf.reshape', (['self.q_selected', '[-1, self.n_agents]'], {}), '(self.q_selected, [-1, self.n_agents])\n', (6915, 6953), True, 'import tensorflow as tf\n'), ((6986, 7028), 'tensorflow.reshape', 'tf.reshape', (['self.q_m_', '[-1, self.n_agents]'], {}), '(self.q_m_, [-1, self.n_agents])\n', (6996, 7028), True, 'import tensorflow as tf\n'), ((8214, 8239), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (8231, 8239), True, 'import tensorflow as tf\n'), ((8493, 8519), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""train"""'], {}), "('train')\n", (8510, 8519), True, 'import tensorflow as tf\n'), ((10784, 10811), 'numpy.squeeze', 'np.squeeze', (['q_tot_'], {'axis': '(-1)'}), '(q_tot_, axis=-1)\n', (10794, 10811), True, 'import numpy as np\n'), ((7052, 7083), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""eval_hyper"""'], {}), "('eval_hyper')\n", (7069, 7083), True, 'import tensorflow as tf\n'), ((7212, 7245), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""target_hyper"""'], {}), "('target_hyper')\n", (7229, 7245), True, 'import tensorflow as tf\n'), ((8793, 8808), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (8801, 8808), True, 'import numpy as np\n'), ((8916, 8942), 'numpy.argmax', 'np.argmax', (['q_eval'], {'axis': '(-1)'}), '(q_eval, axis=-1)\n', (8925, 8942), True, 'import numpy as np\n'), ((8321, 8343), 'tensorflow.squeeze', 'tf.squeeze', (['self.Q_tot'], {}), '(self.Q_tot)\n', (8331, 8343), True, 'import tensorflow as tf\n'), ((8554, 8588), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.lr'], {}), '(self.lr)\n', (8579, 8588), True, 'import tensorflow as tf\n'), ((10753, 10767), 'numpy.array', 'np.array', (['done'], {}), '(done)\n', (10761, 10767), True, 'import numpy as np\n'), ((13015, 13060), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'tag', 'simple_value': 'value'}), '(tag=tag, simple_value=value)\n', (13031, 13060), True, 'import tensorflow as tf\n')] |
import json
from glob import glob
import numpy as np
import pytorch_lightning as pl
import torch
from audio_processing import random_crop
from prepare_data import get_id_from_path
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.model_selection import train_test_split
from torch.nn import functional as F
from torch.utils.data import DataLoader
from pytorch_lightning.callbacks import ModelCheckpoint
class AudioDataset(torch.utils.data.Dataset):
def __init__(self, data, max_len=512):
self.data = data
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
npy_path = self.data[idx][0]
label = self.data[idx][1]
array = np.load(npy_path)
array = random_crop(array, crop_size=self.max_len)
tokens = torch.tensor(array, dtype=torch.float)
label = torch.tensor(label, dtype=torch.long)
return tokens, label
class AudioClassifier(pl.LightningModule):
def __init__(self, classes=8, input_size=128, reconstruction_weight=0.1, p=0.3):
super().__init__()
self.save_hyperparameters()
self.reconstruction_weight = reconstruction_weight
self.input_size = input_size
self.p = p
self.do = torch.nn.Dropout(p=self.p)
self.lstm1 = torch.nn.LSTM(
input_size=self.input_size,
hidden_size=self.input_size,
bidirectional=True,
batch_first=True,
)
self.lstm2 = torch.nn.LSTM(
input_size=2 * self.input_size,
hidden_size=self.input_size,
bidirectional=True,
batch_first=True,
)
self.fc1 = torch.nn.Linear(self.input_size * 2, self.input_size)
self.fy = torch.nn.Linear(self.input_size, classes)
self.fc2 = torch.nn.Linear(self.input_size * 2, input_size)
def forward(self, x):
x = self.do(x)
x, _ = self.lstm1(x)
x_seq, _ = self.lstm2(x)
x, _ = torch.max(self.do(x_seq), dim=1)
x = F.relu(self.do(self.fc1(x)))
y_hat = self.fy(x)
x_reconstruction = torch.clamp(self.fc2(self.do(x_seq)), -1.0, 1.0)
return y_hat, x_reconstruction
def training_step(self, batch, batch_idx):
x, y = batch
y_hat, x_reconstruction = self(x)
loss_y = F.cross_entropy(y_hat, y)
loss_x = F.l1_loss(x, x_reconstruction)
return loss_y + self.reconstruction_weight * loss_x
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat, x_reconstruction = self(x)
loss_y = F.cross_entropy(y_hat, y)
loss_x = F.l1_loss(x, x_reconstruction)
loss = loss_y + self.reconstruction_weight * loss_x
_, predicted = torch.max(y_hat, 1)
acc = (predicted == y).double().mean()
self.log("valid_loss", loss)
self.log("valid_loss_y", loss_y)
self.log("valid_loss_x", loss_x)
self.log("valid_acc", acc)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat, x_reconstruction = self(x)
loss_y = F.cross_entropy(y_hat, y)
loss_x = F.l1_loss(x, x_reconstruction)
loss = loss_y + self.reconstruction_weight * loss_x
_, predicted = torch.max(y_hat, 1)
acc = (predicted == y).double().mean()
self.log("test_loss", loss)
self.log("test_loss_y", loss_y)
self.log("test_loss_x", loss_x)
self.log("test_acc", acc)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-4)
class DecayLearningRate(pl.Callback):
def __init__(self):
self.old_lrs = []
def on_train_start(self, trainer, pl_module):
# track the initial learning rates
for opt_idx, optimizer in enumerate(trainer.optimizers):
group = []
for param_group in optimizer.param_groups:
group.append(param_group["lr"])
self.old_lrs.append(group)
def on_train_epoch_end(self, trainer, pl_module, outputs):
for opt_idx, optimizer in enumerate(trainer.optimizers):
old_lr_group = self.old_lrs[opt_idx]
new_lr_group = []
for p_idx, param_group in enumerate(optimizer.param_groups):
old_lr = old_lr_group[p_idx]
new_lr = old_lr * 0.97
new_lr_group.append(new_lr)
param_group["lr"] = new_lr
self.old_lrs[opt_idx] = new_lr_group
if __name__ == "__main__":
import argparse
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument("--metadata_path")
parser.add_argument("--mp3_path")
parser.add_argument("--reconstruction_weight", type=float)
args = parser.parse_args()
metadata_path = Path(args.metadata_path)
mp3_path = Path(args.mp3_path)
batch_size = 32
epochs = 256
reconstruction_weight = args.reconstruction_weight
CLASS_MAPPING = json.load(open(metadata_path / "mapping.json"))
id_to_genres = json.load(open(metadata_path / "tracks_genre.json"))
id_to_genres = {int(k): v for k, v in id_to_genres.items()}
files = sorted(list(glob(str(mp3_path / "*/*.npy"))))
labels = [CLASS_MAPPING[id_to_genres[int(get_id_from_path(x))]] for x in files]
print(len(labels))
samples = list(zip(files, labels))
_train, test = train_test_split(
samples, test_size=0.2, random_state=1337, stratify=[a[1] for a in samples]
)
train, val = train_test_split(
_train, test_size=0.1, random_state=1337, stratify=[a[1] for a in _train]
)
train_data = AudioDataset(train)
test_data = AudioDataset(test)
val_data = AudioDataset(val)
train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=8, shuffle=True)
val_loader = DataLoader(val_data, batch_size=batch_size, num_workers=8, shuffle=True)
test_loader = DataLoader(
test_data, batch_size=batch_size, shuffle=False, num_workers=8
)
model = AudioClassifier(reconstruction_weight=reconstruction_weight)
logger = TensorBoardLogger(
save_dir="../",
version="Lambda=%s" % reconstruction_weight,
name="lightning_logs",
)
checkpoint_callback = ModelCheckpoint(
monitor="valid_acc",
mode="max",
filepath="../models/",
prefix="model_%s" % reconstruction_weight,
)
trainer = pl.Trainer(
max_epochs=epochs,
gpus=1,
logger=logger,
checkpoint_callback=checkpoint_callback,
callbacks=[DecayLearningRate()],
)
trainer.fit(model, train_loader, val_loader)
trainer.test(test_dataloaders=test_loader)
| [
"pytorch_lightning.callbacks.ModelCheckpoint",
"torch.nn.Dropout",
"torch.nn.functional.l1_loss",
"prepare_data.get_id_from_path",
"argparse.ArgumentParser",
"pathlib.Path",
"sklearn.model_selection.train_test_split",
"torch.nn.LSTM",
"torch.max",
"pytorch_lightning.loggers.TensorBoardLogger",
"... | [((4638, 4663), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4661, 4663), False, 'import argparse\n'), ((4861, 4885), 'pathlib.Path', 'Path', (['args.metadata_path'], {}), '(args.metadata_path)\n', (4865, 4885), False, 'from pathlib import Path\n'), ((4901, 4920), 'pathlib.Path', 'Path', (['args.mp3_path'], {}), '(args.mp3_path)\n', (4905, 4920), False, 'from pathlib import Path\n'), ((5446, 5543), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples'], {'test_size': '(0.2)', 'random_state': '(1337)', 'stratify': '[a[1] for a in samples]'}), '(samples, test_size=0.2, random_state=1337, stratify=[a[1] for\n a in samples])\n', (5462, 5543), False, 'from sklearn.model_selection import train_test_split\n'), ((5572, 5667), 'sklearn.model_selection.train_test_split', 'train_test_split', (['_train'], {'test_size': '(0.1)', 'random_state': '(1337)', 'stratify': '[a[1] for a in _train]'}), '(_train, test_size=0.1, random_state=1337, stratify=[a[1] for\n a in _train])\n', (5588, 5667), False, 'from sklearn.model_selection import train_test_split\n'), ((5804, 5878), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'batch_size', 'num_workers': '(8)', 'shuffle': '(True)'}), '(train_data, batch_size=batch_size, num_workers=8, shuffle=True)\n', (5814, 5878), False, 'from torch.utils.data import DataLoader\n'), ((5896, 5968), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {'batch_size': 'batch_size', 'num_workers': '(8)', 'shuffle': '(True)'}), '(val_data, batch_size=batch_size, num_workers=8, shuffle=True)\n', (5906, 5968), False, 'from torch.utils.data import DataLoader\n'), ((5987, 6061), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(8)'}), '(test_data, batch_size=batch_size, shuffle=False, num_workers=8)\n', (5997, 6061), False, 'from torch.utils.data import DataLoader\n'), ((6164, 6269), 'pytorch_lightning.loggers.TensorBoardLogger', 'TensorBoardLogger', ([], {'save_dir': '"""../"""', 'version': "('Lambda=%s' % reconstruction_weight)", 'name': '"""lightning_logs"""'}), "(save_dir='../', version='Lambda=%s' %\n reconstruction_weight, name='lightning_logs')\n", (6181, 6269), False, 'from pytorch_lightning.loggers import TensorBoardLogger\n'), ((6324, 6442), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'monitor': '"""valid_acc"""', 'mode': '"""max"""', 'filepath': '"""../models/"""', 'prefix': "('model_%s' % reconstruction_weight)"}), "(monitor='valid_acc', mode='max', filepath='../models/',\n prefix='model_%s' % reconstruction_weight)\n", (6339, 6442), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((745, 762), 'numpy.load', 'np.load', (['npy_path'], {}), '(npy_path)\n', (752, 762), True, 'import numpy as np\n'), ((780, 822), 'audio_processing.random_crop', 'random_crop', (['array'], {'crop_size': 'self.max_len'}), '(array, crop_size=self.max_len)\n', (791, 822), False, 'from audio_processing import random_crop\n'), ((841, 879), 'torch.tensor', 'torch.tensor', (['array'], {'dtype': 'torch.float'}), '(array, dtype=torch.float)\n', (853, 879), False, 'import torch\n'), ((896, 933), 'torch.tensor', 'torch.tensor', (['label'], {'dtype': 'torch.long'}), '(label, dtype=torch.long)\n', (908, 933), False, 'import torch\n'), ((1292, 1318), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {'p': 'self.p'}), '(p=self.p)\n', (1308, 1318), False, 'import torch\n'), ((1341, 1453), 'torch.nn.LSTM', 'torch.nn.LSTM', ([], {'input_size': 'self.input_size', 'hidden_size': 'self.input_size', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(input_size=self.input_size, hidden_size=self.input_size,\n bidirectional=True, batch_first=True)\n', (1354, 1453), False, 'import torch\n'), ((1530, 1646), 'torch.nn.LSTM', 'torch.nn.LSTM', ([], {'input_size': '(2 * self.input_size)', 'hidden_size': 'self.input_size', 'bidirectional': '(True)', 'batch_first': '(True)'}), '(input_size=2 * self.input_size, hidden_size=self.input_size,\n bidirectional=True, batch_first=True)\n', (1543, 1646), False, 'import torch\n'), ((1722, 1775), 'torch.nn.Linear', 'torch.nn.Linear', (['(self.input_size * 2)', 'self.input_size'], {}), '(self.input_size * 2, self.input_size)\n', (1737, 1775), False, 'import torch\n'), ((1794, 1835), 'torch.nn.Linear', 'torch.nn.Linear', (['self.input_size', 'classes'], {}), '(self.input_size, classes)\n', (1809, 1835), False, 'import torch\n'), ((1856, 1904), 'torch.nn.Linear', 'torch.nn.Linear', (['(self.input_size * 2)', 'input_size'], {}), '(self.input_size * 2, input_size)\n', (1871, 1904), False, 'import torch\n'), ((2383, 2408), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['y_hat', 'y'], {}), '(y_hat, y)\n', (2398, 2408), True, 'from torch.nn import functional as F\n'), ((2426, 2456), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['x', 'x_reconstruction'], {}), '(x, x_reconstruction)\n', (2435, 2456), True, 'from torch.nn import functional as F\n'), ((2650, 2675), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['y_hat', 'y'], {}), '(y_hat, y)\n', (2665, 2675), True, 'from torch.nn import functional as F\n'), ((2693, 2723), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['x', 'x_reconstruction'], {}), '(x, x_reconstruction)\n', (2702, 2723), True, 'from torch.nn import functional as F\n'), ((2809, 2828), 'torch.max', 'torch.max', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (2818, 2828), False, 'import torch\n'), ((3157, 3182), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['y_hat', 'y'], {}), '(y_hat, y)\n', (3172, 3182), True, 'from torch.nn import functional as F\n'), ((3200, 3230), 'torch.nn.functional.l1_loss', 'F.l1_loss', (['x', 'x_reconstruction'], {}), '(x, x_reconstruction)\n', (3209, 3230), True, 'from torch.nn import functional as F\n'), ((3316, 3335), 'torch.max', 'torch.max', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (3325, 3335), False, 'import torch\n'), ((5324, 5343), 'prepare_data.get_id_from_path', 'get_id_from_path', (['x'], {}), '(x)\n', (5340, 5343), False, 'from prepare_data import get_id_from_path\n')] |
import numpy as np
import copy
from util import print_iter
def q_from_v(env, V, s, gamma=1):
q = np.zeros(env.nA)
for action in range(env.nA):
for prob, next_state, reward, done in env.P[s][action]:
q[action] += prob * (reward + gamma * V[next_state])
return q
def policy_evaluation(env, policy, gamma=1, theta=1e-8):
V = np.zeros(env.nS)
while True:
delta = 0
for state in range(env.nS):
Vs = 0
for action, action_prop in enumerate(policy[state]):
for prob, next_state, reward, done in env.P[state][action]:
Vs += action_prop * prob * (reward + gamma * V[next_state])
delta = max(delta, abs(Vs - V[state]))
V[state] = Vs
if delta < theta:
break
return V
def truncated_policy_evaluation(env, policy, V, max_it=1, gamma=1):
counter = 0
while counter < max_it:
for state in range(env.nS):
Vs = 0
for action, action_prob in enumerate(policy[state]):
for prob, next_state, reward, done in env.P[state][action]:
Vs += action_prob * prob * (reward + gamma * V[next_state])
V[state] = Vs
counter += 1
return V
def policy_improvement(env, V, gamma=1):
policy = np.zeros([env.nS, env.nA]) / env.nA
for state in range(env.nS):
q = q_from_v(env, V, state, gamma)
best_actions = np.argwhere(q == max(q)).flatten()
policy[state, best_actions[0]] = 1.0
return policy
def policy_iteration(env, gamma=1, theta=1e-8):
policy = np.ones([env.nS, env.nA]) / env.nA
i = 1
while True:
V = policy_evaluation(env, policy, gamma, theta)
new_policy = policy_improvement(env, V, gamma)
if (new_policy == policy).all():
break
policy = copy.copy(new_policy)
print_iter(i, V, env, policy)
i += 1
return policy, V
def truncated_policy_iteration(env, max_it=90000000000, gamma=1, theta=1e-8):
V = np.zeros(env.nS)
policy = np.zeros([env.nS, env.nA]) / env.nA
i = 1
while i < max_it:
policy = policy_improvement(env, V, gamma)
old_V = copy.copy(V)
V = truncated_policy_evaluation(env, policy, V, max_it, gamma)
if max(abs(V - old_V)) < theta:
break
print_iter(i, V, env, policy)
i += 1
return policy, V
def value_iteration(env, max_iter=9000000000, gamma=1, theta=1e-8):
V = np.zeros(env.nS)
i = 1
while i < max_iter:
delta = 0
for s in range(env.nS):
v = V[s]
V[s] = max(q_from_v(env, V, s, gamma))
delta = max(delta, abs(V[s] - v))
if delta < theta:
break
print_iter(i, V, env)
i += 1
policy = policy_improvement(env, V, gamma)
return policy, V
| [
"numpy.zeros",
"numpy.ones",
"util.print_iter",
"copy.copy"
] | [((103, 119), 'numpy.zeros', 'np.zeros', (['env.nA'], {}), '(env.nA)\n', (111, 119), True, 'import numpy as np\n'), ((362, 378), 'numpy.zeros', 'np.zeros', (['env.nS'], {}), '(env.nS)\n', (370, 378), True, 'import numpy as np\n'), ((2057, 2073), 'numpy.zeros', 'np.zeros', (['env.nS'], {}), '(env.nS)\n', (2065, 2073), True, 'import numpy as np\n'), ((2516, 2532), 'numpy.zeros', 'np.zeros', (['env.nS'], {}), '(env.nS)\n', (2524, 2532), True, 'import numpy as np\n'), ((1329, 1355), 'numpy.zeros', 'np.zeros', (['[env.nS, env.nA]'], {}), '([env.nS, env.nA])\n', (1337, 1355), True, 'import numpy as np\n'), ((1624, 1649), 'numpy.ones', 'np.ones', (['[env.nS, env.nA]'], {}), '([env.nS, env.nA])\n', (1631, 1649), True, 'import numpy as np\n'), ((1873, 1894), 'copy.copy', 'copy.copy', (['new_policy'], {}), '(new_policy)\n', (1882, 1894), False, 'import copy\n'), ((1903, 1932), 'util.print_iter', 'print_iter', (['i', 'V', 'env', 'policy'], {}), '(i, V, env, policy)\n', (1913, 1932), False, 'from util import print_iter\n'), ((2087, 2113), 'numpy.zeros', 'np.zeros', (['[env.nS, env.nA]'], {}), '([env.nS, env.nA])\n', (2095, 2113), True, 'import numpy as np\n'), ((2222, 2234), 'copy.copy', 'copy.copy', (['V'], {}), '(V)\n', (2231, 2234), False, 'import copy\n'), ((2372, 2401), 'util.print_iter', 'print_iter', (['i', 'V', 'env', 'policy'], {}), '(i, V, env, policy)\n', (2382, 2401), False, 'from util import print_iter\n'), ((2787, 2808), 'util.print_iter', 'print_iter', (['i', 'V', 'env'], {}), '(i, V, env)\n', (2797, 2808), False, 'from util import print_iter\n')] |
# File name: planet.py
# Author: <NAME>
# Creation Date: 9/October/2018
# Description: 2D numerical modeling of a planet as an N-body
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from itertools import count
# constants
PI = 3.14159265359
class Planet:
_ids = count(0)
def __init__(self, i_m, x_i, y_i, vx_i=0, vy_i=0, i_key=None):
"""
:param i_m: mass
:param x_i: initial x position
:param y_i: initial y position
:param vx_i: initial x velocity
:param vy_i: initial y velocity
:param i_key: key string that can be used identify the object in a list or a tree
"""
self.id = next(self._ids)
self.m = i_m
self.dx = [x_i]
self.dy = [y_i]
self.vx = [vx_i]
self.vy = [vy_i]
if i_key is None:
self.key = "planet " + str(self.id)
else:
self.key = i_key
def orbit(self, n, dt):
"""
Calculates planet's orbit around a star without the influence of other planets
:param n: number of time steps
:param dt: time step size
:return: x and y trajectory lists
"""
for i in range(n):
ri = self.R_i(i)
self.vx.append(self.vx[i] - (4 * PI * PI * self.dx[i] * dt / ri ** 3))
self.vy.append(self.vy[i] - (4 * PI * PI * self.dy[i] * dt / ri ** 3))
self.dx.append(self.dx[i] + self.vx[i + 1] * dt)
self.dy.append(self.dy[i] + self.vy[i + 1] * dt)
return self.dx, self.dy
def r_i(self, i):
"""
:param i: time step point
:return: position vector
"""
return np.array([self.dx[i], self.dy[i]])
def R_i(self, i):
"""
:param i: time step point
:return: magnitude of position vector at i
"""
r_i = sqrt((self.dx[i] ** 2) + (self.dy[i] ** 2))
return r_i
def r_ab_i(self, i, planet_b):
"""
:param i: time step point
:param planet_b: The other planet which the distance is being calculated from
:return: The position vector from planet a to planet b
"""
a = np.array([self.dx[i], self.dy[i]])
b = np.array([planet_b.dx[i], planet_b.dy[i]])
ab = a - b
return ab
def R_ab_i(self, i, planet_b):
"""
:param i: the number of the current time step point
:param planet_b: The other planet which the distance is being calculated from
:return: distance of both objects from the center of the system.
"""
x_temp = (self.dx[i] - planet_b.dx[i]) ** 2
y_temp = (self.dy[i] - planet_b.dy[i]) ** 2
r_ab_i = sqrt(x_temp + y_temp)
return r_ab_i
def set_params(self, i_m=None, x_i=None, y_i=None, vx_i=None, vy_i=None):
"""
Set all physical parameters of the planet object
"""
if i_m is not None:
self.m = i_m
if x_i is not None:
self.dx = [x_i]
if y_i is not None:
self.dy = [y_i]
if vx_i is not None:
self.vx = [vx_i]
if vy_i is not None:
self.vy = [vy_i]
def set_key(self, i_key):
"""
:param i_key: set key identifying object. must be a string.
"""
if isinstance(i_key, str) is True:
self.key = i_key
else:
raise ValueError("Key value must be a string.")
def get_key(self):
"""
:return: returns planets key
"""
return self.key
def reset(self):
"""
Resets the trajectory lists for Planet object
"""
del self.dx, self.dy, self.vx, self.vy
self.dx = [0]
self.dy = [0]
self.vx = [0]
self.vy = [0]
def output_txt(self):
"""
Outputs trajectory data to a txt file
"""
data = open("trajectory.txt", "a")
data_init = "[" + str(datetime.now()) + "] Projectile Trajectory:"
data.write(data_init)
for i in range(len(self.dx)):
data_str = "dx: " + str(self.dx[i]) + " dy: " + str(self.dy[i]) + \
" vx: " + str(self.vx[i]) + " vy: " + str(self.vy[i])
data.write(data_str)
def plot(self):
"""
plots the trajectory of the planet
"""
plt.xlabel("x-Position")
plt.xlabel("y-Position")
plt.plot(self.dx, self.dy)
plt.show()
| [
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sqrt",
"numpy.array",
"datetime.datetime.now",
"itertools.count",
"matplotlib.pyplot.show"
] | [((334, 342), 'itertools.count', 'count', (['(0)'], {}), '(0)\n', (339, 342), False, 'from itertools import count\n'), ((1739, 1773), 'numpy.array', 'np.array', (['[self.dx[i], self.dy[i]]'], {}), '([self.dx[i], self.dy[i]])\n', (1747, 1773), True, 'import numpy as np\n'), ((1920, 1959), 'math.sqrt', 'sqrt', (['(self.dx[i] ** 2 + self.dy[i] ** 2)'], {}), '(self.dx[i] ** 2 + self.dy[i] ** 2)\n', (1924, 1959), False, 'from math import sqrt\n'), ((2238, 2272), 'numpy.array', 'np.array', (['[self.dx[i], self.dy[i]]'], {}), '([self.dx[i], self.dy[i]])\n', (2246, 2272), True, 'import numpy as np\n'), ((2285, 2327), 'numpy.array', 'np.array', (['[planet_b.dx[i], planet_b.dy[i]]'], {}), '([planet_b.dx[i], planet_b.dy[i]])\n', (2293, 2327), True, 'import numpy as np\n'), ((2765, 2786), 'math.sqrt', 'sqrt', (['(x_temp + y_temp)'], {}), '(x_temp + y_temp)\n', (2769, 2786), False, 'from math import sqrt\n'), ((4432, 4456), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-Position"""'], {}), "('x-Position')\n", (4442, 4456), True, 'import matplotlib.pyplot as plt\n'), ((4465, 4489), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""y-Position"""'], {}), "('y-Position')\n", (4475, 4489), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4524), 'matplotlib.pyplot.plot', 'plt.plot', (['self.dx', 'self.dy'], {}), '(self.dx, self.dy)\n', (4506, 4524), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4541, 4543), True, 'import matplotlib.pyplot as plt\n'), ((4033, 4047), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4045, 4047), False, 'from datetime import datetime\n')] |
import pandas as pd
from pathlib import Path
import os
import numpy as np
def calculate_voltage_steps(df_phases):
result_df = pd.DataFrame()
phase_counter = 1
for df_p in df_phases:
steps_up = list(np.where(df_p.Value.diff() > 1)[0])
steps_down = list(np.where(df_p.Value.diff() < -1)[0])
column_name = "1VStepsP"+str(phase_counter)
# column_name_down = "StepDownP" + str(phase_counter)
up_column = {column_name: 'Up'}
down_column = {column_name: 'Down'}
steps_up_df = df_p.iloc[steps_up, :].assign(**up_column)[[column_name]]
steps_down_df = df_p.iloc[steps_down, :].assign(**down_column)[[column_name]]
steps_df = pd.concat([steps_up_df, steps_down_df]).sort_index()
result_df = pd.concat([steps_df, result_df], axis=1).sort_index()
phase_counter = phase_counter + 1
return result_df
def calculate_voltage_range(df_phases, df_sdp):
phase_counter = 1
for df_p in df_phases:
transgressions = list(np.where(df_p.Value > 240)[0])
column_name = "Over240P" + str(phase_counter)
over_column = {column_name: 'Over'}
transgressions_df = df_p.iloc[transgressions, :].assign(**over_column)[[column_name]]
df_sdp = pd.concat([transgressions_df, df_sdp], axis=1).sort_index()
phase_counter = phase_counter + 1
return df_sdp
def calculate_phase_distance(df_phases, df_sdp):
phase_counter = 1
for df_p in df_phases:
transgressions = list(np.where(df_p.Value > 240)[0])
column_name = "Over240P" + str(phase_counter)
over_column = {column_name: 'Over'}
transgressions_df = df_p.iloc[transgressions, :].assign(**over_column)[[column_name]]
df_sdp = pd.concat([transgressions_df, df_sdp], axis=1).sort_index()
phase_counter = phase_counter + 1
return df_sdp
def calculate_anomalies(pickle_directory, excel_file_path):
# print(os.getcwd())
file_paths = os.listdir(pickle_directory)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_sdp = calculate_voltage_steps(df_phases)
df_sdp = calculate_voltage_range(df_phases, df_sdp)
# excel_writer = pd.ExcelWriter(path=excel_file_path, datetime_format='YYYY-MM-DD HH:MM:SS')
# df_sdp.to_excel(sheet_name=path.name, excel_writer=excel_writer)
csv_path = Path('anomalies') / (path.name+'.csv')
df_sdp.to_csv(path_or_buf=csv_path, sep=';')
# workbook = excel_writer.book
# excel_writer.save()
def main():
pickle_directory = Path("testPickles")
excel_file_path = Path("test.xlsx")
# calculate_anomalies(pickle_directory, excel_file_path)
main() | [
"pandas.read_pickle",
"os.listdir",
"pathlib.Path",
"numpy.where",
"pandas.DataFrame",
"pandas.concat"
] | [((132, 146), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (144, 146), True, 'import pandas as pd\n'), ((1977, 2005), 'os.listdir', 'os.listdir', (['pickle_directory'], {}), '(pickle_directory)\n', (1987, 2005), False, 'import os\n'), ((2722, 2741), 'pathlib.Path', 'Path', (['"""testPickles"""'], {}), "('testPickles')\n", (2726, 2741), False, 'from pathlib import Path\n'), ((2764, 2781), 'pathlib.Path', 'Path', (['"""test.xlsx"""'], {}), "('test.xlsx')\n", (2768, 2781), False, 'from pathlib import Path\n'), ((2111, 2121), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2115, 2121), False, 'from pathlib import Path\n'), ((2524, 2541), 'pathlib.Path', 'Path', (['"""anomalies"""'], {}), "('anomalies')\n", (2528, 2541), False, 'from pathlib import Path\n'), ((702, 741), 'pandas.concat', 'pd.concat', (['[steps_up_df, steps_down_df]'], {}), '([steps_up_df, steps_down_df])\n', (711, 741), True, 'import pandas as pd\n'), ((775, 815), 'pandas.concat', 'pd.concat', (['[steps_df, result_df]'], {'axis': '(1)'}), '([steps_df, result_df], axis=1)\n', (784, 815), True, 'import pandas as pd\n'), ((1021, 1047), 'numpy.where', 'np.where', (['(df_p.Value > 240)'], {}), '(df_p.Value > 240)\n', (1029, 1047), True, 'import numpy as np\n'), ((1261, 1307), 'pandas.concat', 'pd.concat', (['[transgressions_df, df_sdp]'], {'axis': '(1)'}), '([transgressions_df, df_sdp], axis=1)\n', (1270, 1307), True, 'import pandas as pd\n'), ((1512, 1538), 'numpy.where', 'np.where', (['(df_p.Value > 240)'], {}), '(df_p.Value > 240)\n', (1520, 1538), True, 'import numpy as np\n'), ((1752, 1798), 'pandas.concat', 'pd.concat', (['[transgressions_df, df_sdp]'], {'axis': '(1)'}), '([transgressions_df, df_sdp], axis=1)\n', (1761, 1798), True, 'import pandas as pd\n'), ((2161, 2197), 'pandas.read_pickle', 'pd.read_pickle', (["(path / ('phase' + p))"], {}), "(path / ('phase' + p))\n", (2175, 2197), True, 'import pandas as pd\n')] |
"""
Authors: <NAME>, <NAME>
Feature Selection module of chi2, anova, and mutual information.
The main object is to insert X, y, and output an dataframe with features and their scores.
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import f_classif,chi2, mutual_info_classif, SelectKBest
class Filter_Algorithms(object):
def __init__(self, X, y, test_size, seed=0):
"""
Parameters
----------
input:
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores
y: array-like {n_samples}
Training labels
output:
R: Ranked features according particular algorithm
-------
"""
self.X = X # Feature values
self.y = y # Target values
self.seed = seed # Fixed seed
self.test_size = test_size # Split for train and test
def fit_Chi2(self):
scores_Chi2 = []
X_train, X_val, y_train, y_val = train_test_split(self.X, self.y, stratify=self.y, test_size=self.test_size, random_state=self.seed)
X_train = pd.DataFrame(data=X_train, columns=self.X.columns)
scores, pvalues = chi2(X_train, y_train)
for i in range(X_train.shape[1]):
scores_Chi2.append((scores[i], X_train.columns[i]))
df_Chi2 = pd.DataFrame(data=scores_Chi2, columns=('score', 'feature'))
blankIndex = [''] * len(df_Chi2)
df_Chi2.index = blankIndex
df_Chi2 = df_Chi2.sort_values(by='score', ascending=False)
return df_Chi2
def fit_Anova(self):
scores_Anova = []
X_train, X_val, y_train, y_val = train_test_split(self.X, self.y, stratify=self.y, test_size=self.test_size, random_state=self.seed)
X_train = pd.DataFrame(data=X_train, columns=self.X.columns)
scores, pvalues = f_classif(X_train, y_train)
for i in range(X_train.shape[1]):
scores_Anova.append((scores[i], X_train.columns[i]))
df_Anova = pd.DataFrame(data=scores_Anova, columns=('score', 'feature'))
blankIndex=[''] * len(df_Anova)
df_Anova.index = blankIndex
df_Anova = df_Anova.sort_values(by='score', ascending=False)
return df_Anova
def fit_Mutual(self):
scores_Mutual = []
X_train, X_val, y_train, y_val = train_test_split(self.X, self.y, stratify=self.y, test_size=self.test_size, random_state=self.seed)
X_train = pd.DataFrame(data=X_train, columns=self.X.columns)
scores = mutual_info_classif(np.array(X_train), np.array(y_train))
for i in range(X_train.shape[1]):
scores_Mutual.append((scores[i], X_train.columns[i]))
df_Mutual = pd.DataFrame(data=scores_Mutual, columns=('score', 'feature'))
blankIndex=[''] * len(df_Mutual)
df_Mutual.index = blankIndex
df_Mutual = df_Mutual.sort_values(by='score', ascending=False)
return df_Mutual | [
"pandas.DataFrame",
"sklearn.model_selection.train_test_split",
"sklearn.feature_selection.f_classif",
"numpy.array",
"sklearn.feature_selection.chi2"
] | [((1201, 1304), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'stratify': 'self.y', 'test_size': 'self.test_size', 'random_state': 'self.seed'}), '(self.X, self.y, stratify=self.y, test_size=self.test_size,\n random_state=self.seed)\n', (1217, 1304), False, 'from sklearn.model_selection import train_test_split\n'), ((1335, 1385), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_train', 'columns': 'self.X.columns'}), '(data=X_train, columns=self.X.columns)\n', (1347, 1385), True, 'import pandas as pd\n'), ((1420, 1442), 'sklearn.feature_selection.chi2', 'chi2', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (1424, 1442), False, 'from sklearn.feature_selection import f_classif, chi2, mutual_info_classif, SelectKBest\n'), ((1965, 2068), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'stratify': 'self.y', 'test_size': 'self.test_size', 'random_state': 'self.seed'}), '(self.X, self.y, stratify=self.y, test_size=self.test_size,\n random_state=self.seed)\n', (1981, 2068), False, 'from sklearn.model_selection import train_test_split\n'), ((2099, 2149), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_train', 'columns': 'self.X.columns'}), '(data=X_train, columns=self.X.columns)\n', (2111, 2149), True, 'import pandas as pd\n'), ((2184, 2211), 'sklearn.feature_selection.f_classif', 'f_classif', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2193, 2211), False, 'from sklearn.feature_selection import f_classif, chi2, mutual_info_classif, SelectKBest\n'), ((2733, 2836), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'stratify': 'self.y', 'test_size': 'self.test_size', 'random_state': 'self.seed'}), '(self.X, self.y, stratify=self.y, test_size=self.test_size,\n random_state=self.seed)\n', (2749, 2836), False, 'from sklearn.model_selection import train_test_split\n'), ((2859, 2909), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X_train', 'columns': 'self.X.columns'}), '(data=X_train, columns=self.X.columns)\n', (2871, 2909), True, 'import pandas as pd\n'), ((1592, 1652), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'scores_Chi2', 'columns': "('score', 'feature')"}), "(data=scores_Chi2, columns=('score', 'feature'))\n", (1604, 1652), True, 'import pandas as pd\n'), ((2360, 2421), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'scores_Anova', 'columns': "('score', 'feature')"}), "(data=scores_Anova, columns=('score', 'feature'))\n", (2372, 2421), True, 'import pandas as pd\n'), ((2956, 2973), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (2964, 2973), True, 'import numpy as np\n'), ((2975, 2992), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2983, 2992), True, 'import numpy as np\n'), ((3144, 3206), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'scores_Mutual', 'columns': "('score', 'feature')"}), "(data=scores_Mutual, columns=('score', 'feature'))\n", (3156, 3206), True, 'import pandas as pd\n')] |
import sys
import numpy as np
import torch
import os
from tqdm import tqdm, trange
sys.path.append("..")
import moviepy.editor as mpy
import env
import gym
import pickle
from gym import spaces
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from config import argparser
from config.motion_planner import add_arguments as mp_add_arguments
from collections import OrderedDict
from termcolor import colored
from bc_visual_args import args
from behavioral_cloning_visual import BC_Visual_Policy, BC_Image_Only, BC_Robot_Only, BC_Visual_Policy_Stochastic
import matplotlib
matplotlib.use('Agg')
# set global seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def retrieve_np_state(raw_state):
for idx, values in enumerate(raw_state):
if(idx==0):
ot = np.array(values)
else:
ot = np.concatenate((ot, np.array(values)), axis=0)
return ot
def get_img_robot_state(obs, env):
obs_img = torch.from_numpy(obs['image'])
if env == 'PusherObstacle-v0':
state_info = list(obs.values())
state_info = state_info[0:2]
obs_robot = retrieve_np_state(state_info)
elif env == 'SawyerPushObstacle-v0' or \
env == 'SawyerAssemblyObstacle-v0' or \
env == 'SawyerLiftObstacle-v0':
obs_robot = np.concatenate((obs['joint_pos'], obs['joint_vel'], obs['gripper_qpos'], obs['gripper_qvel'], obs['eef_pos'], obs['eef_quat']))
else:
print('ERROR: Incorrect env name')
obs_robot = torch.from_numpy(obs_robot).float()
obs_robot = obs_robot[None, :]
return obs_img, obs_robot
def visualize_feature_maps(obs_img, policy):
from matplotlib import pyplot
out = policy.visualize_third_conv_layer(obs_img)
square = 16
ix = 1
for _ in range(square):
for _ in range(square):
# specify subplot and turn of axis
ax = pyplot.subplot(square, square, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in grayscale
pyplot.imshow(out[0, ix-1, :, :].cpu().detach().numpy(), cmap='gray')
ix += 1
# show the figure
pyplot.savefig('../out/feature_maps_conv3.png')
breakpoint()
return None
def run(config):
os.environ["DISPLAY"] = ":1"
if config.gpu is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(args.cuda_num)
assert torch.cuda.is_available()
device = 'cuda:{}'.format(args.cuda_num)
else:
device = torch.device("cpu")
if args.model == 'BC_Visual_Policy':
policy = BC_Visual_Policy(robot_state=args.robot_state_size, num_classes=args.action_size, img_size=args.env_image_size)
elif args.model == 'BC_Image_Only':
policy = BC_Image_Only(num_classes=args.action_size, img_size=args.env_image_size)
elif args.model == 'BC_Robot_Only':
policy = BC_Robot_Only(robot_state=args.robot_state_size, num_classes=args.action_size)
elif args.model == 'BC_Visual_Policy_Stochastic':
policy = BC_Visual_Policy_Stochastic(robot_state=args.robot_state_size, num_classes=args.action_size, img_size=args.env_image_size)
else:
print(colored('ERROR: Do not support this model {}'.format(args.model)), 'red')
exit(1)
print(colored('test model {}'.format(args.model), 'blue'))
print(policy)
checkpoint = torch.load(os.path.join(args.model_save_dir, args.checkpoint), map_location='cpu')
print('loading checkpoint {}'.format(os.path.join(args.model_save_dir, args.checkpoint)))
policy.load_state_dict(checkpoint['state_dict'])
policy.eval()
num_success = 0
num_ep = args.num_eval_ep
ep_len_success_total = 0
ep_success_total = 0
total_episodes = 0
total_discounted_rewards = 0
eefs = []
running_seeds = []
if args.three_hundred_eval_five_seeds:
running_seeds = [1234, 200, 500, 2320, 1800]
else:
running_seeds = [config.seed]
for curr_seed in running_seeds:
_env_eval = gym.make(config.env, **config.__dict__)
_env_eval.set_seed(curr_seed)
print(colored('Seed {}'.format(curr_seed), 'blue'))
for ep in trange(num_ep):
states = []
_record_frames = []
rollout_states = []
obs = _env_eval.reset()
obs_img, obs_robot = get_img_robot_state(obs, config.env)
# visualize_feature_maps(obs_img, policy) # DEBUG
states.append(obs_robot)
done = False
ep_len = 0
ep_rew = 0
ep_discounted_rew = 0
_store_frame(_env_eval, _record_frames, info={})
# rollout_states.append({"ob": [obs_robot], "ac": []})
while ep_len < args.eval_bc_max_step and not done:
if args.model == 'BC_Visual_Policy':
action = policy(obs_img, obs_robot)
elif args.model == 'BC_Image_Only':
action = policy(obs_img)
elif args.model == 'BC_Robot_Only':
action = policy(obs_robot)
elif args.model == 'BC_Visual_Policy_Stochastic':
action = policy(obs_img, obs_robot)
if len(action.shape) == 2:
action = action[0]
obs, reward, done, info = _env_eval.step(action.detach().numpy(), is_bc_policy=True)
rollout_states.append({"ob": [obs], "ac": [action.detach().numpy()]})
obs_img, obs_robot = get_img_robot_state(obs, config.env)
ep_len += 1
ep_rew += reward
# discounted reward based on formula: $\sum_{t=0}^{T-1} \gamma^t R(s_t, a_t)$
discounted_reward = pow(args.discount_factor, (ep_len-1)) * reward
ep_discounted_rew += discounted_reward
_store_frame(_env_eval, _record_frames, info)
if(ep_len % 100 == 0):
print (colored("Current Episode Step: {}, Reward: {}, Discounted Reward: {}".format(ep_len, reward, discounted_reward), "green"))
if _env_eval._success:
ep_success = "s"
ep_len_success_total += ep_len
ep_success_total += 1
else:
ep_success = "f"
fname = "{}_step_{:011d}_{}_r_{}_{}.mp4".format(
config.env,
0,
total_episodes,
ep_rew,
ep_success,
)
total_episodes += 1
total_discounted_rewards += ep_discounted_rew
if(ep_rew>0):
num_success += 1
_save_video(fname, _record_frames, config)
# saving eefs
cur_eefs = []
for obj in rollout_states:
cur_eefs.append(obj['ob'][0]['eef_pos'])
eefs.append(np.array(cur_eefs))
print("Episode Length: {}, Episode Reward:{}, Episode Discounted Reward:{}.".format(ep_len, ep_rew, ep_discounted_rew), done)
print(colored("Number of positive reward episodes: " + str(num_success), "red"))
with open(args.saved_rollouts+"/{}.p".format("bc"), "wb") as f:
pickle.dump(rollout_states, f)
print('Finished running seed ', curr_seed)
# saving end-effector positions in numpy
# with open('bc_eef_positions.npy', 'wb') as f:
# np.save(f, eefs)
print(colored("Average success rate: " + str(ep_success_total/total_episodes*100) + "%", "yellow"))
print(colored("Average discounted rewards: " + str(total_discounted_rewards/total_episodes), "yellow"))
print(colored("Average episode length: " + str(ep_len_success_total/ep_success_total), "yellow"))
def _store_frame(env, _record_frames, info={}):
color = (200, 200, 200)
geom_colors = {}
frame = env.render("rgb_array") * 255.0
_record_frames.append(frame)
def _save_video(fname, frames, config, fps=8.0):
if(not os.path.exists(args.bc_video_dir)):
os.mkdir(args.bc_video_dir)
record_dir = args.bc_video_dir
path = os.path.join(record_dir, fname)
def f(t):
frame_length = len(frames)
new_fps = 1.0 / (1.0 / fps + 1.0 / frame_length)
idx = min(int(t * new_fps), frame_length - 1)
return frames[idx]
video = mpy.VideoClip(f, duration=len(frames) / fps + 2)
video.write_videofile(path, fps, verbose=False, logger=None)
print (colored("[*] Video saved: {}".format(path), "green"))
def overwrite_env_args(env_args):
env_args.env = args.env
env_args.env_image_size = args.env_image_size
env_args.seed = args.env_seed
env_args.screen_width = args.screen_width
env_args.screen_height = args.screen_height
env_args.obs_space = 'all'
if __name__ == "__main__":
parser = argparser()
args_mopa, unparsed = parser.parse_known_args()
if "Pusher" in args.env:
from config.pusher import add_arguments
elif "Sawyer" in args.env:
from config.sawyer import add_arguments
else:
raise ValueError("args.env (%s) is not supported" % args_mopa.env)
add_arguments(parser)
mp_add_arguments(parser)
args_mopa, unparsed = parser.parse_known_args()
# overwrite environment arguments from bc_visual_args.py
overwrite_env_args(args_mopa)
if args_mopa.debug:
args_mopa.rollout_length = 150
args_mopa.start_steps = 100
if args_mopa.env == 'PusherObstacle-v0':
args.action_size = 4
args.robot_state_size = 14
elif args_mopa.env == 'SawyerPushObstacle-v0' or \
args_mopa.env == 'SawyerAssemblyObstacle-v0':
args.action_size = 7
args.robot_state_size = 25
elif args_mopa.env == 'SawyerLiftObstacle-v0':
args.action_size = 8
args.robot_state_size = 25
else:
print('ERROR: Incorrect env name')
exit(1)
if len(unparsed):
logger.error("Unparsed argument is detected:\n%s", unparsed)
else:
run(args_mopa)
| [
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"config.motion_planner.add_arguments",
"sys.path.append",
"gym.make",
"os.path.exists",
"behavioral_cloning_visual.BC_Robot_Only",
"behavioral_cloning_visual.BC_Visual_Policy",
"numpy.random.seed",
"os.mkdir",
"numpy.concatenate",
... | [((83, 104), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (98, 104), False, 'import sys\n'), ((598, 619), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (612, 619), False, 'import matplotlib\n'), ((639, 664), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (653, 664), True, 'import numpy as np\n'), ((665, 693), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (682, 693), False, 'import torch\n'), ((694, 731), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (720, 731), False, 'import torch\n'), ((1009, 1039), 'torch.from_numpy', 'torch.from_numpy', (["obs['image']"], {}), "(obs['image'])\n", (1025, 1039), False, 'import torch\n'), ((2207, 2254), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""../out/feature_maps_conv3.png"""'], {}), "('../out/feature_maps_conv3.png')\n", (2221, 2254), False, 'from matplotlib import pyplot\n'), ((8302, 8333), 'os.path.join', 'os.path.join', (['record_dir', 'fname'], {}), '(record_dir, fname)\n', (8314, 8333), False, 'import os\n'), ((9060, 9071), 'config.argparser', 'argparser', ([], {}), '()\n', (9069, 9071), False, 'from config import argparser\n'), ((9371, 9392), 'config.sawyer.add_arguments', 'add_arguments', (['parser'], {}), '(parser)\n', (9384, 9392), False, 'from config.sawyer import add_arguments\n'), ((9397, 9421), 'config.motion_planner.add_arguments', 'mp_add_arguments', (['parser'], {}), '(parser)\n', (9413, 9421), True, 'from config.motion_planner import add_arguments as mp_add_arguments\n'), ((2459, 2484), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2482, 2484), False, 'import torch\n'), ((2561, 2580), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2573, 2580), False, 'import torch\n'), ((2640, 2756), 'behavioral_cloning_visual.BC_Visual_Policy', 'BC_Visual_Policy', ([], {'robot_state': 'args.robot_state_size', 'num_classes': 'args.action_size', 'img_size': 'args.env_image_size'}), '(robot_state=args.robot_state_size, num_classes=args.\n action_size, img_size=args.env_image_size)\n', (2656, 2756), False, 'from behavioral_cloning_visual import BC_Visual_Policy, BC_Image_Only, BC_Robot_Only, BC_Visual_Policy_Stochastic\n'), ((3437, 3487), 'os.path.join', 'os.path.join', (['args.model_save_dir', 'args.checkpoint'], {}), '(args.model_save_dir, args.checkpoint)\n', (3449, 3487), False, 'import os\n'), ((4079, 4118), 'gym.make', 'gym.make', (['config.env'], {}), '(config.env, **config.__dict__)\n', (4087, 4118), False, 'import gym\n'), ((4236, 4250), 'tqdm.trange', 'trange', (['num_ep'], {}), '(num_ep)\n', (4242, 4250), False, 'from tqdm import tqdm, trange\n'), ((8037, 8060), 'env.render', 'env.render', (['"""rgb_array"""'], {}), "('rgb_array')\n", (8047, 8060), False, 'import env\n'), ((8172, 8205), 'os.path.exists', 'os.path.exists', (['args.bc_video_dir'], {}), '(args.bc_video_dir)\n', (8186, 8205), False, 'import os\n'), ((8220, 8247), 'os.mkdir', 'os.mkdir', (['args.bc_video_dir'], {}), '(args.bc_video_dir)\n', (8228, 8247), False, 'import os\n'), ((849, 865), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (857, 865), True, 'import numpy as np\n'), ((1355, 1486), 'numpy.concatenate', 'np.concatenate', (["(obs['joint_pos'], obs['joint_vel'], obs['gripper_qpos'], obs[\n 'gripper_qvel'], obs['eef_pos'], obs['eef_quat'])"], {}), "((obs['joint_pos'], obs['joint_vel'], obs['gripper_qpos'],\n obs['gripper_qvel'], obs['eef_pos'], obs['eef_quat']))\n", (1369, 1486), True, 'import numpy as np\n'), ((1552, 1579), 'torch.from_numpy', 'torch.from_numpy', (['obs_robot'], {}), '(obs_robot)\n', (1568, 1579), False, 'import torch\n'), ((1937, 1971), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['square', 'square', 'ix'], {}), '(square, square, ix)\n', (1951, 1971), False, 'from matplotlib import pyplot\n'), ((2809, 2882), 'behavioral_cloning_visual.BC_Image_Only', 'BC_Image_Only', ([], {'num_classes': 'args.action_size', 'img_size': 'args.env_image_size'}), '(num_classes=args.action_size, img_size=args.env_image_size)\n', (2822, 2882), False, 'from behavioral_cloning_visual import BC_Visual_Policy, BC_Image_Only, BC_Robot_Only, BC_Visual_Policy_Stochastic\n'), ((3550, 3600), 'os.path.join', 'os.path.join', (['args.model_save_dir', 'args.checkpoint'], {}), '(args.model_save_dir, args.checkpoint)\n', (3562, 3600), False, 'import os\n'), ((2940, 3018), 'behavioral_cloning_visual.BC_Robot_Only', 'BC_Robot_Only', ([], {'robot_state': 'args.robot_state_size', 'num_classes': 'args.action_size'}), '(robot_state=args.robot_state_size, num_classes=args.action_size)\n', (2953, 3018), False, 'from behavioral_cloning_visual import BC_Visual_Policy, BC_Image_Only, BC_Robot_Only, BC_Visual_Policy_Stochastic\n'), ((7039, 7057), 'numpy.array', 'np.array', (['cur_eefs'], {}), '(cur_eefs)\n', (7047, 7057), True, 'import numpy as np\n'), ((7388, 7418), 'pickle.dump', 'pickle.dump', (['rollout_states', 'f'], {}), '(rollout_states, f)\n', (7399, 7418), False, 'import pickle\n'), ((917, 933), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (925, 933), True, 'import numpy as np\n'), ((3090, 3217), 'behavioral_cloning_visual.BC_Visual_Policy_Stochastic', 'BC_Visual_Policy_Stochastic', ([], {'robot_state': 'args.robot_state_size', 'num_classes': 'args.action_size', 'img_size': 'args.env_image_size'}), '(robot_state=args.robot_state_size, num_classes=\n args.action_size, img_size=args.env_image_size)\n', (3117, 3217), False, 'from behavioral_cloning_visual import BC_Visual_Policy, BC_Image_Only, BC_Robot_Only, BC_Visual_Policy_Stochastic\n')] |
import numpy as np
from .. import ChromaPitchEncoder
def test_chroma_encoder():
batch_size = 10
n_frames = 5
signal_length = 500 * n_frames
test_data = np.random.randn(batch_size, signal_length)
encoder = ChromaPitchEncoder()
encoded_data = encoder.encode(test_data)
assert encoded_data.shape == (batch_size, 12 * n_frames) | [
"numpy.random.randn"
] | [((170, 212), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'signal_length'], {}), '(batch_size, signal_length)\n', (185, 212), True, 'import numpy as np\n')] |
import argparse
from torch.nn import functional
from mdptetris_experiments.agents.action_networks import NN1DAction
import os
import random
import time
from collections import deque
import numpy as np
import torch
from gym_mdptetris.envs import board, piece, tetris
from gym_mdptetris.envs.tetris import TetrisFlat
from mdptetris_experiments.agents.FFNN import NN1D, NNHeuristic
from torch import nn
from torch.utils.tensorboard import SummaryWriter
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--gpu", type=str, default='0')
parser.add_argument("--test", action='store_true')
parser.add_argument("--render", action='store_true')
parser.add_argument("--board_height", type=int, default=20)
parser.add_argument("--board_width", type=int, default=10)
parser.add_argument("--replay_buffer_length", type=int, default=20000)
parser.add_argument("--training_start", type=int, default=2000,
help="Minimum timesteps for training to start.")
parser.add_argument("--batch_size", type=int, default=512)
parser.add_argument("--alpha", type=float, default=1e-4,
help="Optimiser learning rate.")
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--init_epsilon", type=float, default=1)
parser.add_argument("--final_epsilon", type=float, default=1e-3)
parser.add_argument("--total_timesteps", type=int, default=1e7)
parser.add_argument("--epochs", type=int, default=3000)
parser.add_argument("--target_network_update", type=int, default=5,
help="Epoch interval to update the target network.")
parser.add_argument("--saving_interval", type=int, default=500)
parser.add_argument("--epsilon_decay_period", type=int, default=2000)
parser.add_argument("--state_rep", type=str, default="heuristic")
parser.add_argument("--log_dir", type=str, default="runs")
parser.add_argument("--load_file", type=str, default=None,
help="Path to partially trained model")
parser.add_argument("--save_dir", type=str,
default=f"runs/run-info")
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--comment", type=str, default="test",
help="Run comment for TensorBoard writer.")
args = parser.parse_args()
return args
class DQN:
def __init__(self, args: argparse.Namespace):
"""
Class that implements a model-based DQN agent to learn a game of Tetris.
The model for the environment is provided in the linear_agent file,
which allows generation of subsequent states, and retrieval of their
representation as either the full board, or as a set of features.
:param args: A Namespace object containing experiment hyperparameters
"""
self.env = TetrisFlat(board_height=args.board_height,
board_width=args.board_width, seed=args.seed)
self._init_hyperparams(args)
input_dims = self.env.observation_space.shape[0]
output_dims = self.env.action_space.shape[0]
# Initialise models
self.model = NN1DAction(input_dims, output_dims).to(self.device)
self.target = NN1DAction(input_dims, output_dims).to(self.device)
self.target.load_state_dict(self.model.state_dict())
self.target.eval()
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=args.alpha)
self.replay_buffer = deque(maxlen=args.replay_buffer_length)
self.loss_criterion = nn.MSELoss()
def train(self):
"""
Method to train the agent. Iterates through timesteps to gather training
data, which is then stored in the buffer. After an episode concludes, makes
a training step. Outputs information on the current training status
of the agent while training, and saves the trained model at intervals.
"""
self.epochs = []
self.timesteps = []
state = self.env.reset().to(self.device)
self.epoch = 0
self.timestep = 0
ep_score = 0
while self.epoch < self.total_epochs:
action = self.get_action(state)
new_state, reward, done, info = self.env.step(action)
ep_score += reward
self.timestep += 1
self.replay_buffer.append([state, action, reward, new_state, done])
self.timesteps.append(reward)
if done:
self.update_model()
if self.epoch > 0:
self._log(ep_score)
ep_score = 0
state = self.env.reset().to(self.device)
else:
state = new_state.to(self.device)
def test(self, nb_episodes: int=1000):
"""
Method to test the performance of a trained agent for specified
number of episodes. Outputs performance during testing and saves
results to csv files. The agent is loaded from the pre-specified
load file passed when the agent is instantiated.
:param nb_episodes: Number of episodes to test the trained agent for.
"""
self.load()
episode_rewards = []
episode_durations = []
done = False
self.epsilon = 0
for i in range(nb_episodes):
state = self.env.reset()
ep_score = 0
timesteps = 0
while not done:
action, _ = self.get_action_and_new_state()
reward, done = self.env.step(action)
ep_score += reward
timesteps += 1
episode_rewards.append(ep_score)
episode_durations.append(timesteps)
print(f"Episode reward: {ep_score}, episode duration: {timesteps}")
self.writer.add_scalar(f"DQN-{self.runid}/Episode reward", ep_score, i)
self.writer.add_scalar(f"DQN-{self.runid}/Episode duration", timesteps, i)
np.array(episode_rewards).tofile(f"{self.save_dir}/DQN-test-rewards-{self.runid}.csv", sep=',')
np.array(episode_durations).tofile(f"{self.save_dir}/DQN-test-durations-{self.runid}.csv", sep=',')
print(f"Average rewards: {np.mean(np.array(episode_rewards))}")
print(f"Average duration: {np.mean(np.array(episode_durations))}")
def update_model(self):
"""
Method to perform one update step on the agent model from the state
transitions saved in the agent memory.
"""
if len(self.replay_buffer) < self.training_start:
return
# Increment epoch and decrement epsilon
self.epoch += 1
self.epsilon -= self.epsilon_decay_rate
self.epsilon = max(self.epsilon, self.final_epsilon)
batch = random.sample(self.replay_buffer, min(
len(self.replay_buffer), self.batch_size))
state_b, action_b, reward_b, new_state_b, done_b = zip(*batch)
state_b = torch.stack(state_b).to(self.device)
reward_b = torch.from_numpy(
np.array(reward_b, dtype=np.float32)[:, None]).to(self.device)
new_state_b = torch.stack(new_state_b).to(self.device)
# Use model to judge state values, train prediction against target network
q_vals = self.model(state_b).to(self.device)
with torch.no_grad():
next_predictions = self.target(new_state_b)
y_b = []
for reward, done, prediction in zip(reward_b, done_b, next_predictions):
y_b.append(reward if done else reward + self.gamma*prediction)
y_b = torch.cat(y_b).to(self.device)
# Calculate loss and train network
self.optimizer.zero_grad()
loss = self.loss_criterion(q_vals, y_b)
loss.backward()
self.optimizer.step()
# Update the target network
if self.epoch % self.target_network_update == 0:
self.target.load_state_dict(self.model.state_dict())
if self.epoch % self.saving_interval == 0:
self.save()
def get_action(self, state: torch.Tensor):
"""
Get action.
:param state: Current state.
"""
probs = self.model(state)
dist = functional.softmax(probs)
action = torch.argmax(dist).item()
return action
def load(self):
"""
Load trained or partially trained model from load file specified
in agent parameters.
"""
if self.load_file == None:
raise ValueError("No load file given")
if self.load_file[:-3] != ".pt":
self.model = torch.load(self.load_file).to(self.device)
else:
self.model.load_state_dict(
torch.load(self.load_file)).to(self.device)
self.target.load_state_dict(self.model.state_dict())
self.target.eval()
def _log(self, ep_score: int):
"""
Log information about the current epoch to output and TensorBoard.
:param ep_score: score from the previous episode.
"""
self.epochs.append(ep_score)
print(f"Epoch: {self.epoch}, score: {ep_score}")
self.writer.add_scalar(f'Train-{self.runid}/Lines cleared per epoch',
ep_score, self.epoch - 1)
self.writer.add_scalar(f'Train-{self.runid}/Lines cleared over last 100 timesteps',
sum(self.timesteps[-100:]), self.timestep - 1)
self.writer.add_scalar(
f'Train-{self.runid}/Epsilon vlaue', self.epsilon, self.epoch - 1)
def _init_hyperparams(self, args: argparse.Namespace):
"""
Initialise agent hyperparameters from the arguments passed in.
:param args: Namespace containing hyperparameters.
"""
self.device = torch.device(
f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
self.runid = time.strftime('%Y%m%dT%H%M%SZ')
self.save_dir = f"{args.save_dir}-{self.runid}"
# Writer for TensorBoard
self.writer = SummaryWriter(
args.log_dir, comment=f"{args.comment}-{self.runid}")
if not os.path.isdir(self.save_dir):
os.makedirs(self.save_dir)
with open(f"{self.save_dir}/args.txt", 'w') as f:
f.write(str(args))
self.epsilon = args.init_epsilon
self.epsilon_decay_rate = (
args.init_epsilon - args.final_epsilon) / args.epsilon_decay_period
self.total_epochs = args.epochs
self.training_start = args.training_start
self.final_epsilon = args.final_epsilon
self.batch_size = args.batch_size
self.gamma = args.gamma
self.target_network_update = args.target_network_update
self.saving_interval = args.saving_interval
self.load_file = args.load_file
# Seed randomness
if args.seed == None:
seed = int(time.time())
random.seed(seed)
self.env.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
else:
torch.manual_seed(seed)
else:
random.seed(args.seed)
self.env.seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
else:
torch.manual_seed(args.seed)
def save(self):
"""
Method to save the current model and information about the run to disk.
"""
torch.save(self.model.state_dict(), f"{self.save_dir}/model.pt")
np.array(self.epochs).tofile(f"{self.save_dir}/epochs.csv", sep=',')
np.array(self.timesteps).tofile(
f"{self.save_dir}/timesteps.csv", sep=',')
if __name__ == '__main__':
# Train the model
args = get_args()
if args.test:
assert args.load_file != None
agent = DQN(args)
agent.test()
else:
agent = DQN(args)
agent.train()
| [
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"mdptetris_experiments.agents.action_networks.NN1DAction",
"torch.utils.tensorboard.SummaryWriter",
"collections.deque",
"gym_mdptetris.envs.tetris.TetrisFlat",
"argparse.ArgumentParser",
"os.path.isdir",
... | [((505, 530), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (528, 530), False, 'import argparse\n'), ((2928, 3020), 'gym_mdptetris.envs.tetris.TetrisFlat', 'TetrisFlat', ([], {'board_height': 'args.board_height', 'board_width': 'args.board_width', 'seed': 'args.seed'}), '(board_height=args.board_height, board_width=args.board_width,\n seed=args.seed)\n', (2938, 3020), False, 'from gym_mdptetris.envs.tetris import TetrisFlat\n'), ((3584, 3623), 'collections.deque', 'deque', ([], {'maxlen': 'args.replay_buffer_length'}), '(maxlen=args.replay_buffer_length)\n', (3589, 3623), False, 'from collections import deque\n'), ((3654, 3666), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3664, 3666), False, 'from torch import nn\n'), ((8310, 8335), 'torch.nn.functional.softmax', 'functional.softmax', (['probs'], {}), '(probs)\n', (8328, 8335), False, 'from torch.nn import functional\n'), ((9998, 10029), 'time.strftime', 'time.strftime', (['"""%Y%m%dT%H%M%SZ"""'], {}), "('%Y%m%dT%H%M%SZ')\n", (10011, 10029), False, 'import time\n'), ((10142, 10209), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['args.log_dir'], {'comment': 'f"""{args.comment}-{self.runid}"""'}), "(args.log_dir, comment=f'{args.comment}-{self.runid}')\n", (10155, 10209), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((7422, 7437), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7435, 7437), False, 'import torch\n'), ((10238, 10266), 'os.path.isdir', 'os.path.isdir', (['self.save_dir'], {}), '(self.save_dir)\n', (10251, 10266), False, 'import os\n'), ((10280, 10306), 'os.makedirs', 'os.makedirs', (['self.save_dir'], {}), '(self.save_dir)\n', (10291, 10306), False, 'import os\n'), ((11027, 11044), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (11038, 11044), False, 'import random\n'), ((11092, 11117), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11115, 11117), False, 'import torch\n'), ((11248, 11270), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (11259, 11270), False, 'import random\n'), ((11323, 11348), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11346, 11348), False, 'import torch\n'), ((3245, 3280), 'mdptetris_experiments.agents.action_networks.NN1DAction', 'NN1DAction', (['input_dims', 'output_dims'], {}), '(input_dims, output_dims)\n', (3255, 3280), False, 'from mdptetris_experiments.agents.action_networks import NN1DAction\n'), ((3319, 3354), 'mdptetris_experiments.agents.action_networks.NN1DAction', 'NN1DAction', (['input_dims', 'output_dims'], {}), '(input_dims, output_dims)\n', (3329, 3354), False, 'from mdptetris_experiments.agents.action_networks import NN1DAction\n'), ((6071, 6096), 'numpy.array', 'np.array', (['episode_rewards'], {}), '(episode_rewards)\n', (6079, 6096), True, 'import numpy as np\n'), ((6175, 6202), 'numpy.array', 'np.array', (['episode_durations'], {}), '(episode_durations)\n', (6183, 6202), True, 'import numpy as np\n'), ((7060, 7080), 'torch.stack', 'torch.stack', (['state_b'], {}), '(state_b)\n', (7071, 7080), False, 'import torch\n'), ((7231, 7255), 'torch.stack', 'torch.stack', (['new_state_b'], {}), '(new_state_b)\n', (7242, 7255), False, 'import torch\n'), ((7683, 7697), 'torch.cat', 'torch.cat', (['y_b'], {}), '(y_b)\n', (7692, 7697), False, 'import torch\n'), ((8353, 8371), 'torch.argmax', 'torch.argmax', (['dist'], {}), '(dist)\n', (8365, 8371), False, 'import torch\n'), ((9938, 9963), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9961, 9963), False, 'import torch\n'), ((11002, 11013), 'time.time', 'time.time', ([], {}), '()\n', (11011, 11013), False, 'import time\n'), ((11135, 11163), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (11157, 11163), False, 'import torch\n'), ((11198, 11221), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (11215, 11221), False, 'import torch\n'), ((11366, 11399), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (11388, 11399), False, 'import torch\n'), ((11434, 11462), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (11451, 11462), False, 'import torch\n'), ((11670, 11691), 'numpy.array', 'np.array', (['self.epochs'], {}), '(self.epochs)\n', (11678, 11691), True, 'import numpy as np\n'), ((11747, 11771), 'numpy.array', 'np.array', (['self.timesteps'], {}), '(self.timesteps)\n', (11755, 11771), True, 'import numpy as np\n'), ((8702, 8728), 'torch.load', 'torch.load', (['self.load_file'], {}), '(self.load_file)\n', (8712, 8728), False, 'import torch\n'), ((6317, 6342), 'numpy.array', 'np.array', (['episode_rewards'], {}), '(episode_rewards)\n', (6325, 6342), True, 'import numpy as np\n'), ((6390, 6417), 'numpy.array', 'np.array', (['episode_durations'], {}), '(episode_durations)\n', (6398, 6417), True, 'import numpy as np\n'), ((7146, 7182), 'numpy.array', 'np.array', (['reward_b'], {'dtype': 'np.float32'}), '(reward_b, dtype=np.float32)\n', (7154, 7182), True, 'import numpy as np\n'), ((8815, 8841), 'torch.load', 'torch.load', (['self.load_file'], {}), '(self.load_file)\n', (8825, 8841), False, 'import torch\n')] |
import torch
import numpy as np
from FClip.nms import non_maximum_suppression, structure_nms
class PointParsing():
@staticmethod
def jheatmap_torch(jmap, joff, delta=0.8, K=1000, kernel=3, joff_type="raw", resolution=128):
h, w = jmap.shape
lcmap = non_maximum_suppression(jmap[None, ...], delta, kernel).reshape(-1)
score, index = torch.topk(lcmap, k=int(K))
if joff is not None:
lcoff = joff.reshape(2, -1)
if joff_type == "raw":
y = (index // w).float() + lcoff[0][index] + 0.5
x = (index % w).float() + lcoff[1][index] + 0.5
elif joff_type == "gaussian":
y = (index // w).float() + lcoff[0][index]
x = (index % w).float() + lcoff[1][index]
else:
raise NotImplementedError
else:
y = (index // w).float()
x = (index % w).float()
yx = torch.cat([y[..., None], x[..., None]], dim=-1).clamp(0, resolution - 1e-6)
return yx, score, index
@staticmethod
def jheatmap_numpy(jmap, joff, delta=0.8, K=1000, kernel=3, resolution=128):
jmap = torch.from_numpy(jmap)
if joff is not None:
joff = torch.from_numpy(joff)
xy, score, index = PointParsing.jheatmap_torch(jmap, joff, delta, K, kernel, resolution=resolution)
v = torch.cat([xy, score[:, None]], 1)
return v.numpy()
class OneStageLineParsing():
# @staticmethod
# def get_resolution():
# return C.model.resolution
@staticmethod
def fclip_numpy(lcmap, lcoff, lleng, angle, delta=0.8, nlines=1000, ang_type="radian", kernel=3, resolution=128):
lcmap = torch.from_numpy(lcmap)
lcoff = torch.from_numpy(lcoff)
lleng = torch.from_numpy(lleng)
angle = torch.from_numpy(angle)
lines, scores = OneStageLineParsing.fclip_torch(lcmap, lcoff, lleng, angle, delta, nlines, ang_type, kernel, resolution=resolution)
return lines.numpy(), scores.numpy()
@staticmethod
def fclip_torch(lcmap, lcoff, lleng, angle, delta=0.8, nlines=1000, ang_type="radian", kernel=3, resolution=128):
xy, score, index = PointParsing.jheatmap_torch(lcmap, lcoff, delta, nlines, kernel, resolution=resolution)
lines = OneStageLineParsing.fclip_merge(xy, index, lleng, angle, ang_type, resolution=resolution)
return lines, score
@staticmethod
def fclip_merge(xy, xy_idx, length_regress, angle_regress, ang_type="radian", resolution=128):
"""
:param xy: (K, 2)
:param xy_idx: (K,)
:param length_regress: (H, W)
:param angle_regress: (H, W)
:param ang_type
:param resolution
:return:
"""
# resolution = OneStageLineParsing.get_resolution()
xy_idx = xy_idx.reshape(-1)
lleng_regress = length_regress.reshape(-1)[xy_idx] # (K,)
angle_regress = angle_regress.reshape(-1)[xy_idx] # (K,)
lengths = lleng_regress * (resolution / 2)
if ang_type == "cosine":
angles = angle_regress * 2 - 1
elif ang_type == "radian":
angles = torch.cos(angle_regress * np.pi)
else:
raise NotImplementedError
angles1 = -torch.sqrt(1-angles**2)
direction = torch.cat([angles1[:, None], angles[:, None]], 1) # (K, 2)
v1 = (xy + direction * lengths[:, None]).clamp(0, resolution)
v2 = (xy - direction * lengths[:, None]).clamp(0, resolution)
return torch.cat([v1[:, None], v2[:, None]], 1)
def line_parsing_from_npz(
npz_name, ang_type="radian",
delta=0.8, nlines=1000, kernel=3,
s_nms=0, resolution=128
):
# -------line parsing----
with np.load(npz_name) as fpred:
lcmap = fpred["lcmap"]
lcoff = fpred["lcoff"]
lleng = fpred["lleng"]
angle = fpred["angle"]
line, score = OneStageLineParsing.fclip_numpy(
lcmap, lcoff, lleng, angle, delta, nlines, ang_type, kernel, resolution=resolution
)
# ---------step 2 remove line by structure nms ----
if s_nms > 0:
line, score = structure_nms(line, score, s_nms)
return line, score
| [
"FClip.nms.structure_nms",
"FClip.nms.non_maximum_suppression",
"torch.sqrt",
"torch.from_numpy",
"torch.cos",
"numpy.load",
"torch.cat"
] | [((1175, 1197), 'torch.from_numpy', 'torch.from_numpy', (['jmap'], {}), '(jmap)\n', (1191, 1197), False, 'import torch\n'), ((1389, 1423), 'torch.cat', 'torch.cat', (['[xy, score[:, None]]', '(1)'], {}), '([xy, score[:, None]], 1)\n', (1398, 1423), False, 'import torch\n'), ((1717, 1740), 'torch.from_numpy', 'torch.from_numpy', (['lcmap'], {}), '(lcmap)\n', (1733, 1740), False, 'import torch\n'), ((1757, 1780), 'torch.from_numpy', 'torch.from_numpy', (['lcoff'], {}), '(lcoff)\n', (1773, 1780), False, 'import torch\n'), ((1797, 1820), 'torch.from_numpy', 'torch.from_numpy', (['lleng'], {}), '(lleng)\n', (1813, 1820), False, 'import torch\n'), ((1837, 1860), 'torch.from_numpy', 'torch.from_numpy', (['angle'], {}), '(angle)\n', (1853, 1860), False, 'import torch\n'), ((3337, 3386), 'torch.cat', 'torch.cat', (['[angles1[:, None], angles[:, None]]', '(1)'], {}), '([angles1[:, None], angles[:, None]], 1)\n', (3346, 3386), False, 'import torch\n'), ((3553, 3593), 'torch.cat', 'torch.cat', (['[v1[:, None], v2[:, None]]', '(1)'], {}), '([v1[:, None], v2[:, None]], 1)\n', (3562, 3593), False, 'import torch\n'), ((3776, 3793), 'numpy.load', 'np.load', (['npz_name'], {}), '(npz_name)\n', (3783, 3793), True, 'import numpy as np\n'), ((4185, 4218), 'FClip.nms.structure_nms', 'structure_nms', (['line', 'score', 's_nms'], {}), '(line, score, s_nms)\n', (4198, 4218), False, 'from FClip.nms import non_maximum_suppression, structure_nms\n'), ((1246, 1268), 'torch.from_numpy', 'torch.from_numpy', (['joff'], {}), '(joff)\n', (1262, 1268), False, 'import torch\n'), ((3293, 3320), 'torch.sqrt', 'torch.sqrt', (['(1 - angles ** 2)'], {}), '(1 - angles ** 2)\n', (3303, 3320), False, 'import torch\n'), ((277, 332), 'FClip.nms.non_maximum_suppression', 'non_maximum_suppression', (['jmap[None, ...]', 'delta', 'kernel'], {}), '(jmap[None, ...], delta, kernel)\n', (300, 332), False, 'from FClip.nms import non_maximum_suppression, structure_nms\n'), ((950, 997), 'torch.cat', 'torch.cat', (['[y[..., None], x[..., None]]'], {'dim': '(-1)'}), '([y[..., None], x[..., None]], dim=-1)\n', (959, 997), False, 'import torch\n'), ((3189, 3221), 'torch.cos', 'torch.cos', (['(angle_regress * np.pi)'], {}), '(angle_regress * np.pi)\n', (3198, 3221), False, 'import torch\n')] |
import cv2
import numpy as np
import trackpy as tp
import pandas as pd
from ..general.parameters import get_param_val, get_method_key
from ..track import intensity_methods as im
from ..customexceptions.track_error import *
from ..user_methods import *
'''
--------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------
Tracking Methods
--------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------
'''
def trackpy(ppframe,frame, parameters=None):
"""
Trackpy implementation
Notes
-----
This method uses the trackpy python library which can be found here:
http://soft-matter.github.io/trackpy/v0.5.0
If you use this method in a research publication be sure to cite according to
the details given here:
http://soft-matter.github.io/trackpy/v0.5.0/generated/trackpy.locate.html
using get_intensities will seriously slow down the processing so optimise
everything else first.
Parameters
----------
First five parameters expose trackpy options. For more information
see http://soft-matter.github.io/trackpy/v0.5.0/generated/trackpy.locate.html#trackpy.locate
diameter
An estimate of the objects to be tracked feature size in pixels
minmass
The minimum integrated brightness.
percentile
Features must have a peak brighter than pixels in this percentile. This helps eliminate spurious peaks.
invert
Set True if looking for dark objects on bright background
max_iterations
max number of loops to refine the center of mass, default 10
get_intensities
If not False results in the software extracting a circular region around each particle of radius set by intensity radius and running a method in intensity_methods. Select the method by writing its name in the get_intensities box.
intensity_radius
The radius of the extracted intensity around each particle centre, see get_intensities.
show_output'
print tracked data to terminal window.
New Columns
-----------
x
x location of particle
y
y location of particle
mass
total integrated brightness of the blob
size
radius of gyration of its Gaussian-like profile
ecc
eccentricity
signal
?!
raw_mass
total integrated brightness in raw_image
Args
----
ppframe
The preprocessed frame upon which tracking is to be performed.
frame
The unprocessed frame on which get_intensities is run.
parameters
Nested dictionary specifying the tracking parameters
Returns
-------
Dataframe containing data from a single frame
"""
try:
method_key = get_method_key('trackpy')
df = tp.locate(ppframe,
get_param_val(parameters[method_key]['diameter']),
minmass=get_param_val(parameters[method_key]['minmass']),
percentile=get_param_val(parameters[method_key]['percentile']),
invert=get_param_val(parameters[method_key]['invert']),
max_iterations=get_param_val(parameters[method_key]['max_iterations']),
engine='numba'
)
if parameters[method_key]['get_intensities'] != False:
x = df['x'].to_numpy()
y = df['y'].to_numpy()
intensity = []
for i in range(np.size(x)):
xc = x[i]
yc = y[i]
rc = get_param_val(parameters[method_key]['intensity_radius'])
try:
# Try because some circles overlap the edge giving meaningless answers
cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]
h, w = cut_out_frame.shape[:2]
mask = _create_circular_mask(h, w)
masked_img = cut_out_frame.copy()
masked_img[~mask] = 0
value = getattr(im, get_param_val(parameters[method_key]['get_intensities']))(masked_img)
except:
value = np.nan
intensity.append(value)
df['intensities'] = np.array(intensity)
return df
except Exception as e:
raise TrackpyError(e)
def hough(ppframe, frame,parameters=None):
'''
Performs the opencv hough circles transform to locate circles in an image.
Notes
-----
This method uses the opencv hough circles algorithm to look for circles in an image.
It works well provided you constrain the radii searched to reasonably tight range. It
is particularly good for tightly bunched large particles. To estimate the appropriate
range of radii double left click on the image will give you a coordinate or you can use
the circular crop tool to start off with about the right values. Set min dist that the
centre of two circles can approach (a bit less than diameter). You then need to use P1
and P2 which are different gradient terms associated with the image. P1 is usually bigger
than P2. Annotation with circles will automatically pick up the radii from the tracking so
can be used to help get the settings right.
min_dist
minimum distance in pixels between two particles
min_rad
minimum radius of particles in pixels
max_rad
maximum radius of particles in pixels
p1
Control parameter
p2
Control parameter
get_intensities
If not False results in the software extracting a circular region around each particle of radius set by tracking and running a method in intensity_methods. Select the method by writing its name in the get_intensities box.
Args
----
ppframe
The preprocessed frame upon which tracking is to be performed.
frame
The unprocessed frame on which get_intensities is run.
parameters
Nested dictionary specifying the tracking parameters
Returns
-------
Dataframe containing data from a single frame
'''
try:
method_key = get_method_key('hough')
circles = np.squeeze(cv2.HoughCircles(
ppframe,
cv2.HOUGH_GRADIENT,
1,
get_param_val(parameters[method_key]['min_dist']),
param1=get_param_val(parameters[method_key]['p1']),
param2=get_param_val(parameters[method_key]['p2']),
minRadius=get_param_val(parameters[method_key]['min_rad']),
maxRadius=get_param_val(parameters[method_key]['max_rad'])))
try:
circles_dict = {'x': circles[:, 0], 'y': circles[:, 1], 'r': circles[:, 2]}
except:
circles_dict={'x':[np.nan],'y':[np.nan],'r':[np.nan]}
if (parameters[method_key]['get_intensities'] != False):
intensity = []
for i,_ in enumerate(circles_dict['x']):
xc = circles_dict['x'][i]
yc = circles_dict['y'][i]
rc = circles_dict['r'][i]
try:
#Try because some circles overlap the edge giving meaningless answers
cut_out_frame = frame[int(yc - rc):int(yc + rc), int(xc - rc):int(xc + rc)]
h,w= cut_out_frame.shape[:2]
mask = _create_circular_mask(h, w)
masked_img = cut_out_frame.copy()
masked_img[~mask] = 0
value = getattr(im, get_param_val(parameters[method_key]['get_intensities']))(masked_img)
except:
value = np.nan
intensity.append(value)
circles_dict['intensities']=np.array(intensity)
df = pd.DataFrame(circles_dict)
return df
except Exception as e:
raise HoughCirclesError(e)
def contours(pp_frame, frame, parameters=None):
'''
Implementation of OpenCVs contours.
Notes
-----
To use contours you must have preprocessed the image to produce a black and white
binary image with separated object. Contours stores: the centroid x, y, area enclosed by contour,
the bounding rectangle (not rotated) which is used with contour to generate
mask so that you can extract pixels from original image
and perform some analysis.
area_min
Minimum contour area to store object
area_max
Maximum contour area to store object
aspect_min
Minimum contour aspect ratio to store object
aspect_max
Maximum contour aspect ratio to store object
get_intensities
If not False results in the software extracting a region around each particle. Pixels outside the contour are masked. The remaining particle image is processed using get_intensities method. Select the method by writing its name in the get_intensities box.
Args
----
ppframe
The preprocessed frame upon which tracking is to be performed.
frame
The unprocessed frame on which get_intensities is run.
parameters
Nested dictionary specifying the tracking parameters
Returns
-------
Dataframe containing data from a single frame
'''
try:
method_key = get_method_key('contours')
params = parameters[method_key]
get_intensities = (get_param_val(params['get_intensities']) != False)
sz = np.shape(frame)
if np.shape(sz)[0] == 3:
frame= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
area_min = get_param_val(params['area_min'])
area_max = get_param_val(params['area_max'])
aspect_min = get_param_val(params['aspect_min'])
aspect_max = get_param_val(params['aspect_max'])
info = []
contour_pts = _find_contours(pp_frame)
for index, contour in enumerate(contour_pts):
M = cv2.moments(contour)
if M['m00'] > 0:
area = cv2.contourArea(contour)
if (area < area_max) & (area > area_min):
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
rect = cv2.minAreaRect(contour)
(x, y), (w, h), angle = rect
aspect = max(w,h)/min(w,h)
if (aspect <= aspect_max) & (aspect >= aspect_min):
if get_intensities:
intensity = _find_intensity_inside_contour(contour, frame, get_intensities)
info_contour = [cx, cy, area, contour, intensity]
else:
info_contour = [cx, cy, area, contour]
info.append(info_contour)
if get_intensities:
info_headings = ['x', 'y', 'area', 'contours', 'intensities']
else:
info_headings = ['x', 'y', 'area', 'contours']
df = pd.DataFrame(data=info, columns=info_headings)
return df
except Exception as e:
raise ContoursError(e)
'''
------------------------------------------------------------------------
Supporting functions
------------------------------------------------------------------------
'''
def _create_circular_mask(h, w, center=None, radius=None):
if center is None: # use the middle of the image
center = (int(w/2), int(h/2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w-center[0], h-center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
return mask
def _find_contours(img, hierarchy=False):
"""
contours is a tuple containing (img, contours)
"""
# work for any version of opencv
try:
im, contours, hier = cv2.findContours(
img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
except:
contours, hier = cv2.findContours(
img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if hierarchy:
return contours, hier
else:
return contours
def _draw_contours(img, contours, col=(0,0,255), thickness=1):
"""
:param img:
:param contours:
:param col: Can be a defined colour in colors.py or a list of tuples(3,1) of colors of length contours
:param thickness: -1 fills the contour.
:return:
"""
try:
if thickness == -1:
thickness = cv2.FILLED
if (np.size(np.shape(col)) == 0) | (np.size(np.shape(col)) == 1):
img = cv2.drawContours(img, [contours], -1, col, thickness)
else:
for i, contour in enumerate(contours):
img = cv2.drawContours(img, contour, -1, col[i], thickness)
return img
except Exception as e:
print('Error in tracking_methods._draw_contours')
print(e)
def _find_intensity_inside_contour(contour, frame, intensity_method):
try:
#find bounding rectangle
x,y,w,h = cv2.boundingRect(contour)
cut_out_frame = frame[y:y+h,x:x+w]
shifted_contour = contour - [x,y]
mask = np.zeros((h,w,3))
mask = _draw_contours(mask, shifted_contour,col=(255,255,255),thickness=-1)
cut_out_frame[~(mask[:,:,0] > 0)] = 0
value = getattr(im, intensity_method)(cut_out_frame)
return value
except Exception as e:
print('Error in tracking_methods._find_intensity_inside_contour')
print(e)
| [
"numpy.sqrt",
"cv2.drawContours",
"numpy.size",
"cv2.findContours",
"cv2.contourArea",
"cv2.minAreaRect",
"numpy.array",
"numpy.zeros",
"cv2.cvtColor",
"cv2.moments",
"pandas.DataFrame",
"numpy.shape",
"cv2.boundingRect"
] | [((11993, 12045), 'numpy.sqrt', 'np.sqrt', (['((X - center[0]) ** 2 + (Y - center[1]) ** 2)'], {}), '((X - center[0]) ** 2 + (Y - center[1]) ** 2)\n', (12000, 12045), True, 'import numpy as np\n'), ((8128, 8154), 'pandas.DataFrame', 'pd.DataFrame', (['circles_dict'], {}), '(circles_dict)\n', (8140, 8154), True, 'import pandas as pd\n'), ((9797, 9812), 'numpy.shape', 'np.shape', (['frame'], {}), '(frame)\n', (9805, 9812), True, 'import numpy as np\n'), ((11336, 11382), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'info', 'columns': 'info_headings'}), '(data=info, columns=info_headings)\n', (11348, 11382), True, 'import pandas as pd\n'), ((12286, 12345), 'cv2.findContours', 'cv2.findContours', (['img', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (12302, 12345), False, 'import cv2\n'), ((13451, 13476), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (13467, 13476), False, 'import cv2\n'), ((13577, 13596), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {}), '((h, w, 3))\n', (13585, 13596), True, 'import numpy as np\n'), ((4503, 4522), 'numpy.array', 'np.array', (['intensity'], {}), '(intensity)\n', (4511, 4522), True, 'import numpy as np\n'), ((8091, 8110), 'numpy.array', 'np.array', (['intensity'], {}), '(intensity)\n', (8099, 8110), True, 'import numpy as np\n'), ((9865, 9904), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (9877, 9904), False, 'import cv2\n'), ((10277, 10297), 'cv2.moments', 'cv2.moments', (['contour'], {}), '(contour)\n', (10288, 10297), False, 'import cv2\n'), ((12396, 12455), 'cv2.findContours', 'cv2.findContours', (['img', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (12412, 12455), False, 'import cv2\n'), ((13003, 13056), 'cv2.drawContours', 'cv2.drawContours', (['img', '[contours]', '(-1)', 'col', 'thickness'], {}), '(img, [contours], -1, col, thickness)\n', (13019, 13056), False, 'import cv2\n'), ((3706, 3716), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (3713, 3716), True, 'import numpy as np\n'), ((9824, 9836), 'numpy.shape', 'np.shape', (['sz'], {}), '(sz)\n', (9832, 9836), True, 'import numpy as np\n'), ((10350, 10374), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (10365, 10374), False, 'import cv2\n'), ((13144, 13197), 'cv2.drawContours', 'cv2.drawContours', (['img', 'contour', '(-1)', 'col[i]', 'thickness'], {}), '(img, contour, -1, col[i], thickness)\n', (13160, 13197), False, 'import cv2\n'), ((10578, 10602), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contour'], {}), '(contour)\n', (10593, 10602), False, 'import cv2\n'), ((12931, 12944), 'numpy.shape', 'np.shape', (['col'], {}), '(col)\n', (12939, 12944), True, 'import numpy as np\n'), ((12963, 12976), 'numpy.shape', 'np.shape', (['col'], {}), '(col)\n', (12971, 12976), True, 'import numpy as np\n')] |
"""Component that wraps FFMPEG to allow for the production of videos."""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from subprocess import PIPE, Popen, STDOUT
from typing import Tuple
import cv2
import numpy as np
class VideoWriter:
"""The VideoWriter class.
Description:
Provides a straightforward means of producing videos by providing a lightweight wrapper
around the commonly used FFMPEG command-line tool. Is best used as a context
manager as shown in the example below.
Example:
Provided the ffmpeg executable is installed on the system, the user can create a
video in the following way:
.. code-block:: python
import cv2
import numpy as np
from scenepic import VideoWriter
with VideoWriter("example_video.mp4", (256, 256)) as video:
angles = np.linspace(0, 2 * np.pi, 60, endpoint=False)
for angle in angles:
x = int(np.cos(angle) * 64 + 128)
y = int(np.sin(angle) * 64 + 128)
video.clear_frame()
cv2.circle(video.frame, (x, y), 16, (0, 0, 255), -1)
video.write_frame()
This will result in the following video:
.. image:: ../../ci/assets/example_video.gif
"""
def __init__(self,
output_path: str,
frame_size: Tuple[int, int],
quality: int = None,
ffmpeg_path="ffmpeg",
background_color=(0, 0, 0),
rgb=False,
audio_path=None,
codec="libx264",
framerate=30,
text="",
text_color=(1, 1, 0),
font_scale=1):
"""Constructor.
Args:
output_path (str): The path to the output video file
frame_size (Tuple[int, int]): The (width, height) of the frame in pixels
quality (int, optional): The 'q' or 'crf' value driving the quality of the encoding.
Defaults to None.
ffmpeg_path (str, optional): The path to the FFMPEG executable. Default "ffmpeg".
background_color (Tuple[float, float, float], optional): The (r, g, b) background
color for the frame where 0.0 <= val <= 1.0. Defaults to (0, 0, 0).
rgb (bool, optional): Whether the pixels are in RGB or BGR format. Defaults to False (bgr).
audio_path (str, optional): Path to an audio file to demux with the video. Defaults to None.
codec (str, optional): Codec to use for encoding the video. Defaults to "libx264".
framerate (float, optional): Framerate for video
text (str, optional): Text to burn into the frame
text_color (Tuple[float, float, float], optional): Color for the text
font_scale (float, optional): Scaling factor for the font.
Defaults to 1.
"""
self._output_path = output_path
if codec == "libx264":
self._codec_flags = ["-c:v", "libx264"]
if quality is not None:
self._codec_flags.extend(["-crf", str(quality)])
else:
self._codec_flags = ["-c:v", codec]
if quality is not None:
self._codec_flags.extend(["q", str(quality)])
if audio_path:
self._audio_flags = ["-i", audio_path]
else:
self._audio_flags = []
self._framerate = framerate
self._rgb = rgb
self._frame_size = frame_size
self._ffmpeg_path = ffmpeg_path
self._process = None
self._text = text
self._frame = np.zeros((frame_size[1], frame_size[0], 3), np.uint8)
self._background_color = self._to_color(background_color)
self._text_color = self._to_color(text_color)
self._font_scale = font_scale
def _to_color(self, color: Tuple[float, float, float]) -> np.array:
r, g, b = color
assert 0 <= r <= 1 and 0 <= g <= 1 and 0 <= b <= 1
if self._rgb:
color = np.array([r, g, b])
else:
color = np.array([b, g, r])
return (color * 255).astype(np.uint8)
def __enter__(self):
"""Called when using the VideoWriter as a context manager."""
self.start()
return self
def __exit__(self, type, value, traceback): # noqa: A002
"""Called when using the VideoWriter as a context manager."""
self.stop()
return False
def start(self):
"""Starts the video writing process.
No need to call this method manually if using VideoWriter as a context manager.
"""
self._process = Popen([self._ffmpeg_path, "-y", "-f", "image2pipe",
"-c:v", "ppm",
"-s", "{}x{}".format(*self._frame_size),
"-pix_fmt", "bgr24",
"-framerate", str(self._framerate),
"-i", "-",
*self._audio_flags,
*self._codec_flags,
"-pix_fmt", "yuv420p",
self._output_path],
stdin=PIPE, stderr=STDOUT)
def stop(self):
"""Stops the video writing process and closes the video file.
No need to call this method manually if using VideoWriter as a context manager.
"""
self._process.stdin.flush()
self._process.stdin.close()
self._process.wait()
@property
def text(self) -> str:
"""Text to burn into the frame."""
return self._text
@text.setter
def text(self, text: str):
self._text = text
@property
def text_color(self) -> np.ndarray:
"""The color of the burned text."""
return (self._text_color / 255).astype(np.float32)
@text_color.setter
def text_color(self, text_color: Tuple[float, float, float]):
self._text_color = self._to_color(text_color)
@property
def frame(self) -> np.ndarray:
"""Returns the frame buffer as a (H, W, 3) uint8 numpy array."""
return self._frame
def clear_frame(self):
"""Clears the frame buffer, setting all pixels to the background color."""
self._frame[:, :] = self._background_color
def write_frame(self):
"""Write the frame buffer to the video."""
if self.text:
width, height = self._frame.shape[:2]
size = min(width, height)
font_scale = self._font_scale * size / 512
thickness = max(1, int(2 * font_scale))
self._frame = cv2.putText(self._frame, self._text,
(50, height - 50),
cv2.FONT_HERSHEY_SIMPLEX, font_scale,
tuple(self._text_color.tolist()),
thickness, cv2.LINE_AA)
if self._rgb:
cv2.cvtColor(self._frame, cv2.COLOR_RGB2BGR, self._frame)
_, buffer = cv2.imencode(".PPM", self._frame)
self._process.stdin.write(buffer)
| [
"numpy.array",
"numpy.zeros",
"cv2.imencode",
"cv2.cvtColor"
] | [((3786, 3839), 'numpy.zeros', 'np.zeros', (['(frame_size[1], frame_size[0], 3)', 'np.uint8'], {}), '((frame_size[1], frame_size[0], 3), np.uint8)\n', (3794, 3839), True, 'import numpy as np\n'), ((7249, 7282), 'cv2.imencode', 'cv2.imencode', (['""".PPM"""', 'self._frame'], {}), "('.PPM', self._frame)\n", (7261, 7282), False, 'import cv2\n'), ((4197, 4216), 'numpy.array', 'np.array', (['[r, g, b]'], {}), '([r, g, b])\n', (4205, 4216), True, 'import numpy as np\n'), ((4251, 4270), 'numpy.array', 'np.array', (['[b, g, r]'], {}), '([b, g, r])\n', (4259, 4270), True, 'import numpy as np\n'), ((7170, 7227), 'cv2.cvtColor', 'cv2.cvtColor', (['self._frame', 'cv2.COLOR_RGB2BGR', 'self._frame'], {}), '(self._frame, cv2.COLOR_RGB2BGR, self._frame)\n', (7182, 7227), False, 'import cv2\n')] |
import gc
import numpy as np
import torch
from tqdm.notebook import tqdm
from transformers import BertModel, BertTokenizer
class WordEmbeddingFastText():
def __init__(self, model, device='auto', verbose=False, model_path='bert-base-uncased',
sentence_embedding=False):
self.sentence_embedding = sentence_embedding
self.model = model
# Set the device to GPU (cuda) if available, otherwise stick with CPU
if device == 'auto':
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
self.device = device
self.verbose = verbose
def get_word_embeddings(self, sentences):
phrase_list = []
tokens = []
for phrase in sentences:
words = phrase.split()
phrase_list.append(words)
for word in words:
tokens.append(word)
token_emb = self.get_token_embeddings(tokens)
word_embeddings = []
s = 0
for i, phrase in enumerate(phrase_list):
end = s + len(phrase)
tmp_list=[]
for x in token_emb[s:end]:
tmp_list.append(torch.tensor(x))
word_embeddings.append(torch.stack(tmp_list))
s = end
return word_embeddings, phrase_list
def get_token_embeddings(self, token_list):
token_vecs = self.model[token_list]
return token_vecs
@staticmethod
def get_words_to_embed(x):
not_na_mask = x.notna()
if not_na_mask.any():
words = np.concatenate([str(val).split() for val in x[not_na_mask].values]) # set of unique words here
return ' '.join(words)
else:
return None
@staticmethod
def get_words_by_attribute(x):
not_na_mask = x.notna()
if not_na_mask.any():
words = np.concatenate([str(val).split() for val in x[not_na_mask].values]) # set of unique words here
return ' '.join(words)
else:
return None
def get_embedding_df(self, df):
columns = np.setdiff1d(df.columns, ['id'])
df = df.replace('None', np.nan).replace('nan', np.nan)
sentences = df[columns].apply(WordEmbeddingFastText.get_words_to_embed, 1)
not_None_sentences = [x for x in sentences if x is not None]
if len(not_None_sentences) > 0:
if self.sentence_embedding:
tmp_emb_all, tmp_words, tmp_sentences = self.get_word_embeddings(not_None_sentences)
else:
tmp_emb_all, tmp_words = self.get_word_embeddings(not_None_sentences)
emb_all, words, sentences_emb = [], [], []
index = 0
for i in sentences:
if i is None:
emb_all.append(torch.tensor([0]).to('cpu'))
if self.sentence_embedding:
sentences_emb.append(torch.tensor([0]).to('cpu'))
else:
emb_all.append(tmp_emb_all[index].to('cpu'))
words.append(tmp_words[index])
if self.sentence_embedding:
sentences_emb.append(tmp_sentences[index].to('cpu'))
index += 1
emb_all = np.array(emb_all, dtype=object)
if self.sentence_embedding:
sentences_emb = np.array(sentences_emb, dtype=object)
return emb_all, words, sentences_emb
else:
return emb_all, words
def generate_embedding(self, df, chunk_size=500):
emb_list, words_list, sent_emb_list = [], [], []
n_chunk = np.ceil(df.shape[0] / chunk_size).astype(int)
torch.cuda.empty_cache()
if self.verbose:
print('Computing embedding')
to_cycle = tqdm(range(n_chunk))
else:
to_cycle = range(n_chunk)
for chunk in to_cycle:
# assert False
if self.sentence_embedding:
emb, words, sent_emb = self.get_embedding_df(df.iloc[chunk * chunk_size:(chunk + 1) * chunk_size])
sent_emb_list.append(sent_emb)
else:
emb, words = self.get_embedding_df(df.iloc[chunk * chunk_size:(chunk + 1) * chunk_size])
emb_list.append(emb)
words_list += words
gc.collect()
torch.cuda.empty_cache()
if len(emb_list) > 0:
emb_list = np.concatenate(emb_list)
if self.sentence_embedding:
sent_emb_list = np.concatenate(sent_emb_list)
if self.sentence_embedding:
return emb_list, words_list, sent_emb_list
else:
return emb_list, words_list
| [
"numpy.ceil",
"torch.stack",
"numpy.array",
"torch.tensor",
"torch.cuda.is_available",
"numpy.setdiff1d",
"numpy.concatenate",
"gc.collect",
"torch.cuda.empty_cache"
] | [((2093, 2125), 'numpy.setdiff1d', 'np.setdiff1d', (['df.columns', "['id']"], {}), "(df.columns, ['id'])\n", (2105, 2125), True, 'import numpy as np\n'), ((3212, 3243), 'numpy.array', 'np.array', (['emb_all'], {'dtype': 'object'}), '(emb_all, dtype=object)\n', (3220, 3243), True, 'import numpy as np\n'), ((3629, 3653), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3651, 3653), False, 'import torch\n'), ((3309, 3346), 'numpy.array', 'np.array', (['sentences_emb'], {'dtype': 'object'}), '(sentences_emb, dtype=object)\n', (3317, 3346), True, 'import numpy as np\n'), ((4277, 4289), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4287, 4289), False, 'import gc\n'), ((4302, 4326), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4324, 4326), False, 'import torch\n'), ((4380, 4404), 'numpy.concatenate', 'np.concatenate', (['emb_list'], {}), '(emb_list)\n', (4394, 4404), True, 'import numpy as np\n'), ((517, 542), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (540, 542), False, 'import torch\n'), ((1225, 1246), 'torch.stack', 'torch.stack', (['tmp_list'], {}), '(tmp_list)\n', (1236, 1246), False, 'import torch\n'), ((3574, 3607), 'numpy.ceil', 'np.ceil', (['(df.shape[0] / chunk_size)'], {}), '(df.shape[0] / chunk_size)\n', (3581, 3607), True, 'import numpy as np\n'), ((4477, 4506), 'numpy.concatenate', 'np.concatenate', (['sent_emb_list'], {}), '(sent_emb_list)\n', (4491, 4506), True, 'import numpy as np\n'), ((1173, 1188), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (1185, 1188), False, 'import torch\n'), ((2780, 2797), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (2792, 2797), False, 'import torch\n'), ((2894, 2911), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (2906, 2911), False, 'import torch\n')] |
"""
Created on Thu Sep 28 15:17:50 2017
@author: zqwu
"""
import numpy as np
import tensorflow as tf
import copy
import sys
from deepchem.metrics import to_one_hot, from_one_hot
from deepchem.models import KerasModel, layers
from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy
from deepchem.trans import undo_transforms
from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda
# Common symbols in SMILES, note that Cl and Br are regarded as single symbol
default_dict = {
'#': 1,
'(': 2,
')': 3,
'+': 4,
'-': 5,
'/': 6,
'1': 7,
'2': 8,
'3': 9,
'4': 10,
'5': 11,
'6': 12,
'7': 13,
'8': 14,
'=': 15,
'C': 16,
'F': 17,
'H': 18,
'I': 19,
'N': 20,
'O': 21,
'P': 22,
'S': 23,
'[': 24,
'\\': 25,
']': 26,
'_': 27,
'c': 28,
'Cl': 29,
'Br': 30,
'n': 31,
'o': 32,
's': 33
}
class TextCNNModel(KerasModel):
""" A Convolutional neural network on smiles strings
Reimplementation of the discriminator module in ORGAN: https://arxiv.org/abs/1705.10843
Originated from: http://emnlp2014.org/papers/pdf/EMNLP2014181.pdf
This model applies multiple 1D convolutional filters to the padded strings,
then max-over-time pooling is applied on all filters, extracting one feature per filter.
All features are concatenated and transformed through several hidden layers to form predictions.
This model is initially developed for sentence-level classification tasks, with
words represented as vectors. In this implementation, SMILES strings are dissected
into characters and transformed to one-hot vectors in a similar way. The model can
be used for general molecular-level classification or regression tasks. It is also
used in the ORGAN model as discriminator.
Training of the model only requires SMILES strings input, all featurized datasets
that include SMILES in the `ids` attribute are accepted. PDBbind, QM7 and QM7b
are not supported. To use the model, `build_char_dict` should be called first
before defining the model to build character dict of input dataset, example can
be found in examples/delaney/delaney_textcnn.py
"""
def __init__(
self,
n_tasks,
char_dict,
seq_length,
n_embedding=75,
kernel_sizes=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20],
num_filters=[100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160],
dropout=0.25,
mode="classification",
**kwargs):
"""
Parameters
----------
n_tasks: int
Number of tasks
char_dict: dict
Mapping from characters in smiles to integers
seq_length: int
Length of sequences(after padding)
n_embedding: int, optional
Length of embedding vector
filter_sizes: list of int, optional
Properties of filters used in the conv net
num_filters: list of int, optional
Properties of filters used in the conv net
dropout: float, optional
Dropout rate
mode: str
Either "classification" or "regression" for type of model.
"""
self.n_tasks = n_tasks
self.char_dict = char_dict
self.seq_length = max(seq_length, max(kernel_sizes))
self.n_embedding = n_embedding
self.kernel_sizes = kernel_sizes
self.num_filters = num_filters
self.dropout = dropout
self.mode = mode
# Build the model.
smiles_seqs = Input(shape=(self.seq_length,), dtype=tf.int32)
# Character embedding
embedding = layers.DTNNEmbedding(
n_embedding=self.n_embedding,
periodic_table_length=len(self.char_dict.keys()) + 1)(smiles_seqs)
pooled_outputs = []
conv_layers = []
for filter_size, num_filter in zip(self.kernel_sizes, self.num_filters):
# Multiple convolutional layers with different filter widths
conv_layers.append(
Conv1D(kernel_size=filter_size, filters=num_filter,
padding='valid')(embedding))
# Max-over-time pooling
reduced = Lambda(lambda x: tf.reduce_max(x, axis=1))(conv_layers[-1])
pooled_outputs.append(reduced)
# Concat features from all filters(one feature per filter)
concat_outputs = Concatenate(axis=1)(pooled_outputs)
dropout = Dropout(rate=self.dropout)(concat_outputs)
dense = Dense(200, activation=tf.nn.relu)(dropout)
# Highway layer from https://arxiv.org/pdf/1505.00387.pdf
gather = layers.Highway()(dense)
if self.mode == "classification":
logits = Dense(self.n_tasks * 2)(gather)
logits = Reshape((self.n_tasks, 2))(logits)
output = Softmax()(logits)
outputs = [output, logits]
output_types = ['prediction', 'loss']
loss = SoftmaxCrossEntropy()
else:
output = Dense(self.n_tasks * 1)(gather)
output = Reshape((self.n_tasks, 1))(output)
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs)
super(TextCNNModel, self).__init__(
model, loss, output_types=output_types, **kwargs)
@staticmethod
def build_char_dict(dataset, default_dict=default_dict):
""" Collect all unique characters(in smiles) from the dataset.
This method should be called before defining the model to build appropriate char_dict
"""
# SMILES strings
X = dataset.ids
# Maximum length is expanded to allow length variation during train and inference
seq_length = int(max([len(smile) for smile in X]) * 1.2)
# '_' served as delimiter and padding
all_smiles = '_'.join(X)
tot_len = len(all_smiles)
# Initialize common characters as keys
keys = list(default_dict.keys())
out_dict = copy.deepcopy(default_dict)
current_key_val = len(keys) + 1
# Include space to avoid extra keys
keys.extend([' '])
extra_keys = []
i = 0
while i < tot_len:
# For 'Cl', 'Br', etc.
if all_smiles[i:i + 2] in keys:
i = i + 2
elif all_smiles[i:i + 1] in keys:
i = i + 1
else:
# Character not recognized, add to extra_keys
extra_keys.append(all_smiles[i])
keys.append(all_smiles[i])
i = i + 1
# Add all extra_keys to char_dict
for extra_key in extra_keys:
out_dict[extra_key] = current_key_val
current_key_val += 1
return out_dict, seq_length
@staticmethod
def convert_bytes_to_char(s):
s = ''.join(chr(b) for b in s)
return s
def smiles_to_seq_batch(self, ids_b):
"""Converts SMILES strings to np.array sequence.
A tf.py_func wrapper is written around this when creating the input_fn for make_estimator
"""
if isinstance(
ids_b[0], bytes
) and sys.version_info[0] != 2: # Python 2.7 bytes and string are analogous
ids_b = [TextCNNModel.convert_bytes_to_char(smiles) for smiles in ids_b]
smiles_seqs = [self.smiles_to_seq(smiles) for smiles in ids_b]
smiles_seqs = np.vstack(smiles_seqs)
return smiles_seqs
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
"""Transfer smiles strings to fixed length integer vectors"""
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
if y_b is not None:
if self.mode == 'classification':
y_b = to_one_hot(y_b.flatten(), 2).reshape(-1, self.n_tasks, 2)
# Transform SMILES sequence to integers
X_b = self.smiles_to_seq_batch(ids_b)
yield ([X_b], [y_b], [w_b])
def smiles_to_seq(self, smiles):
""" Tokenize characters in smiles to integers
"""
smiles_len = len(smiles)
seq = [0]
keys = self.char_dict.keys()
i = 0
while i < smiles_len:
# Skip all spaces
if smiles[i:i + 1] == ' ':
i = i + 1
# For 'Cl', 'Br', etc.
elif smiles[i:i + 2] in keys:
seq.append(self.char_dict[smiles[i:i + 2]])
i = i + 2
elif smiles[i:i + 1] in keys:
seq.append(self.char_dict[smiles[i:i + 1]])
i = i + 1
else:
raise ValueError('character not found in dict')
for i in range(self.seq_length - len(seq)):
# Padding with '_'
seq.append(self.char_dict['_'])
return np.array(seq, dtype=np.int32)
#################### Deprecation warnings for renamed TensorGraph models ####################
import warnings
TENSORGRAPH_DEPRECATION = "{} is deprecated and has been renamed to {} and will be removed in DeepChem 3.0."
class TextCNNTensorGraph(TextCNNModel):
def __init__(self, *args, **kwargs):
warnings.warn(
TENSORGRAPH_DEPRECATION.format("TextCNNTensorGraph", "TextCNNModel"),
FutureWarning)
super(TextCNNTensorGraph, self).__init__(*args, **kwargs)
| [
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Concatenate",
"deepchem.models.layers.Highway",
"deepchem.models.losses.L2Loss",
"tensorflow.keras.layers.Dropout",
"deepchem.models.losses.SoftmaxCrossEntropy",
"tensorflow.reduce_max",
"numpy.array",
"te... | [((3447, 3494), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.seq_length,)', 'dtype': 'tf.int32'}), '(shape=(self.seq_length,), dtype=tf.int32)\n', (3452, 3494), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4954, 5007), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[smiles_seqs]', 'outputs': 'outputs'}), '(inputs=[smiles_seqs], outputs=outputs)\n', (4968, 5007), True, 'import tensorflow as tf\n'), ((5731, 5758), 'copy.deepcopy', 'copy.deepcopy', (['default_dict'], {}), '(default_dict)\n', (5744, 5758), False, 'import copy\n'), ((6970, 6992), 'numpy.vstack', 'np.vstack', (['smiles_seqs'], {}), '(smiles_seqs)\n', (6979, 6992), True, 'import numpy as np\n'), ((8495, 8524), 'numpy.array', 'np.array', (['seq'], {'dtype': 'np.int32'}), '(seq, dtype=np.int32)\n', (8503, 8524), True, 'import numpy as np\n'), ((4222, 4241), 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (4233, 4241), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4272, 4298), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': 'self.dropout'}), '(rate=self.dropout)\n', (4279, 4298), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4327, 4360), 'tensorflow.keras.layers.Dense', 'Dense', (['(200)'], {'activation': 'tf.nn.relu'}), '(200, activation=tf.nn.relu)\n', (4332, 4360), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4445, 4461), 'deepchem.models.layers.Highway', 'layers.Highway', ([], {}), '()\n', (4459, 4461), False, 'from deepchem.models import KerasModel, layers\n'), ((4728, 4749), 'deepchem.models.losses.SoftmaxCrossEntropy', 'SoftmaxCrossEntropy', ([], {}), '()\n', (4747, 4749), False, 'from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy\n'), ((4932, 4940), 'deepchem.models.losses.L2Loss', 'L2Loss', ([], {}), '()\n', (4938, 4940), False, 'from deepchem.models.losses import L2Loss, SoftmaxCrossEntropy\n'), ((4523, 4546), 'tensorflow.keras.layers.Dense', 'Dense', (['(self.n_tasks * 2)'], {}), '(self.n_tasks * 2)\n', (4528, 4546), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4570, 4596), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(self.n_tasks, 2)'], {}), '((self.n_tasks, 2))\n', (4577, 4596), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4620, 4629), 'tensorflow.keras.layers.Softmax', 'Softmax', ([], {}), '()\n', (4627, 4629), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4776, 4799), 'tensorflow.keras.layers.Dense', 'Dense', (['(self.n_tasks * 1)'], {}), '(self.n_tasks * 1)\n', (4781, 4799), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4823, 4849), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(self.n_tasks, 1)'], {}), '((self.n_tasks, 1))\n', (4830, 4849), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((3897, 3965), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'kernel_size': 'filter_size', 'filters': 'num_filter', 'padding': '"""valid"""'}), "(kernel_size=filter_size, filters=num_filter, padding='valid')\n", (3903, 3965), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Softmax, Dropout, Conv1D, Concatenate, Lambda\n'), ((4058, 4082), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (4071, 4082), True, 'import tensorflow as tf\n')] |
# coding: utf-8
# In[1]:
import numpy
import numpy as np
import matplotlib.pyplot as plot
import time as TIME
import csv
import usbtmc
#CONSTANTS
TIME_LENGTH = 600
# In[ ]:
r = usbtmc.usb_instrument()
s1 = r.sample_norm("CHAN1")
print("Sample Captured")
# In[ ]:
data = numpy.frombuffer(s1, 'B')
print(data)
voltscale = float( r.ask(":CHAN1:SCAL?", length=20))
voltageOffset = float( r.ask(":CHAN1:OFFS?", length=20))
timescale = float( r.ask(":TIM:SCAL?", length = 20))
timeOffset = float( r.ask(":TIM:OFFS?", length =20))
# In[2]:
def sample(channel="CHAN1"):
dtemp = r.sample_norm(channel)
if len(dtemp) < TIME_LENGTH:
raise "Device unresponsive. Please Try again."
voltscale = float( r.ask(":CHAN1:SCAL?", length=20))
voltageOffset = float( r.ask(":CHAN1:OFFS?", length=20))
timescale = float( r.ask(":TIM:SCAL?", length = 20))
timeOffset = float( r.ask(":TIM:OFFS?", length =20))
weird_offset = 11
data = data*-1+255
data = data[weird_offset:]
data = (data - 130.0 - voltageOffset/voltscale*25) / 25 * voltscale
return data
def writeSample(filename, data, time):
with open(filename, 'wb') as csvfile:
cartographer = csv.writer(csvfile, delimiter = " ",
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in range(0,len(data)):
cartographer.writerow([str(data[i]), str(time[i])])
def graphSample(anex, save = False, img_name = str(TIME.strftime("%H%M%S"))): #anex=(data,time)
data = anex[0,:]
t = anex[1,:]
# # See if we should use a different time axis
if (t[599] < 1e-3):
t = t * 1e6
tUnit = "uS"
elif (time[599] < 1):
t = t * 1e3
tUnit = "mS"
else:
tUnit = "S"
# Plot the data
newFig = plot.figure()
plot.plot(t, data)
plot.title("Oscilloscope Channel 1")
plot.ylabel("Voltage (V)")
plot.xlabel("Time (" + tUnit + ")") #Relabel tUnit if re-enabling scale
plot.xlim(t[0], t[599])
if(save): plot.savefig(img_name)
plot.show()
# In[ ]:
weird_offset = 11
data = data*-1+255
data = data[weird_offset:]
data = (data - 130.0 - voltageOffset/voltscale*25) / 25 * voltscale
#
# In[3]:
timescale = 1
time = numpy.arange(-300.0/50*timescale, 300.0/50*timescale, timescale/50.0)
fake_data = numpy.arange(1000.0/50*timescale, 1600.0/50*timescale, timescale/50.0)
package = np.vstack((fake_data,time))
np.savetxt("test.csv", package, delimiter=",")
# writeSample("Test.csv", fake_data, time)
new_pk = np.loadtxt("test.csv", delimiter=",")
print(new_pk)
graphSample(package, save=True)
| [
"usbtmc.usb_instrument",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"csv.writer",
"time.strftime",
"matplotlib.pyplot.figure",
"numpy.vstack",
"numpy.savetxt",
"numpy.from... | [((185, 208), 'usbtmc.usb_instrument', 'usbtmc.usb_instrument', ([], {}), '()\n', (206, 208), False, 'import usbtmc\n'), ((281, 306), 'numpy.frombuffer', 'numpy.frombuffer', (['s1', '"""B"""'], {}), "(s1, 'B')\n", (297, 306), False, 'import numpy\n'), ((2277, 2356), 'numpy.arange', 'numpy.arange', (['(-300.0 / 50 * timescale)', '(300.0 / 50 * timescale)', '(timescale / 50.0)'], {}), '(-300.0 / 50 * timescale, 300.0 / 50 * timescale, timescale / 50.0)\n', (2289, 2356), False, 'import numpy\n'), ((2359, 2444), 'numpy.arange', 'numpy.arange', (['(1000.0 / 50 * timescale)', '(1600.0 / 50 * timescale)', '(timescale / 50.0)'], {}), '(1000.0 / 50 * timescale, 1600.0 / 50 * timescale, timescale / 50.0\n )\n', (2371, 2444), False, 'import numpy\n'), ((2440, 2468), 'numpy.vstack', 'np.vstack', (['(fake_data, time)'], {}), '((fake_data, time))\n', (2449, 2468), True, 'import numpy as np\n'), ((2468, 2514), 'numpy.savetxt', 'np.savetxt', (['"""test.csv"""', 'package'], {'delimiter': '""","""'}), "('test.csv', package, delimiter=',')\n", (2478, 2514), True, 'import numpy as np\n'), ((2567, 2604), 'numpy.loadtxt', 'np.loadtxt', (['"""test.csv"""'], {'delimiter': '""","""'}), "('test.csv', delimiter=',')\n", (2577, 2604), True, 'import numpy as np\n'), ((1825, 1838), 'matplotlib.pyplot.figure', 'plot.figure', ([], {}), '()\n', (1836, 1838), True, 'import matplotlib.pyplot as plot\n'), ((1843, 1861), 'matplotlib.pyplot.plot', 'plot.plot', (['t', 'data'], {}), '(t, data)\n', (1852, 1861), True, 'import matplotlib.pyplot as plot\n'), ((1866, 1902), 'matplotlib.pyplot.title', 'plot.title', (['"""Oscilloscope Channel 1"""'], {}), "('Oscilloscope Channel 1')\n", (1876, 1902), True, 'import matplotlib.pyplot as plot\n'), ((1907, 1933), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""Voltage (V)"""'], {}), "('Voltage (V)')\n", (1918, 1933), True, 'import matplotlib.pyplot as plot\n'), ((1938, 1973), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (["('Time (' + tUnit + ')')"], {}), "('Time (' + tUnit + ')')\n", (1949, 1973), True, 'import matplotlib.pyplot as plot\n'), ((2014, 2037), 'matplotlib.pyplot.xlim', 'plot.xlim', (['t[0]', 't[599]'], {}), '(t[0], t[599])\n', (2023, 2037), True, 'import matplotlib.pyplot as plot\n'), ((2079, 2090), 'matplotlib.pyplot.show', 'plot.show', ([], {}), '()\n', (2088, 2090), True, 'import matplotlib.pyplot as plot\n'), ((1205, 1281), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""" """', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (1215, 1281), False, 'import csv\n'), ((1483, 1506), 'time.strftime', 'TIME.strftime', (['"""%H%M%S"""'], {}), "('%H%M%S')\n", (1496, 1506), True, 'import time as TIME\n'), ((2052, 2074), 'matplotlib.pyplot.savefig', 'plot.savefig', (['img_name'], {}), '(img_name)\n', (2064, 2074), True, 'import matplotlib.pyplot as plot\n')] |
# STEP 1: Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# Run the code in the cell below to extract object points and image points for camera calibration.
import numpy as np
import cv2
import glob
# import matplotlib
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pickle
CHESSBOARD_X = 9 # !number of corners of calibration chessboard in x direction
CHESSBOARD_Y = 6 # !number of corners of calibration chessboard in y direction
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((CHESSBOARD_Y*CHESSBOARD_X,3), np.float32)
objp[:,:2] = np.mgrid[0:CHESSBOARD_X, 0:CHESSBOARD_Y].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (CHESSBOARD_X,CHESSBOARD_Y), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (CHESSBOARD_X,CHESSBOARD_Y), corners, ret)
#write_name = 'corners_found'+str(idx)+'.jpg'
#cv2.imwrite(write_name, img)
cv2.imshow('img', img)
cv2.waitKey(10)
cv2.destroyAllWindows()
# Use objpoints and imgpoints to do camera calibration.
# Run the cell below to calibrate, calculate distortion coefficients, and test undistortion on an image!
# Test undistortion on an image
img = cv2.imread('camera_cal/test_image.jpg')
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
cv2.imwrite('camera_cal/test_undist.jpg',dst)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "camera_cal/cali_pickle.p", "wb" ) )
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
print("Unable to visualize undistortion due to plotting error.")
print("Pickle file successfully saved.")
'''
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)
'''
| [
"cv2.imwrite",
"cv2.undistort",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.drawChessboardCorners",
"cv2.imread",
"glob.glob"
] | [((586, 640), 'numpy.zeros', 'np.zeros', (['(CHESSBOARD_Y * CHESSBOARD_X, 3)', 'np.float32'], {}), '((CHESSBOARD_Y * CHESSBOARD_X, 3), np.float32)\n', (594, 640), True, 'import numpy as np\n'), ((915, 955), 'glob.glob', 'glob.glob', (['"""camera_cal/calibration*.jpg"""'], {}), "('camera_cal/calibration*.jpg')\n", (924, 955), False, 'import glob\n'), ((1654, 1677), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1675, 1677), False, 'import cv2\n'), ((1882, 1921), 'cv2.imread', 'cv2.imread', (['"""camera_cal/test_image.jpg"""'], {}), "('camera_cal/test_image.jpg')\n", (1892, 1921), False, 'import cv2\n'), ((2055, 2118), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'img_size', 'None', 'None'], {}), '(objpoints, imgpoints, img_size, None, None)\n', (2074, 2118), False, 'import cv2\n'), ((2125, 2165), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (2138, 2165), False, 'import cv2\n'), ((2166, 2212), 'cv2.imwrite', 'cv2.imwrite', (['"""camera_cal/test_undist.jpg"""', 'dst'], {}), "('camera_cal/test_undist.jpg', dst)\n", (2177, 2212), False, 'import cv2\n'), ((1062, 1079), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (1072, 1079), False, 'import cv2\n'), ((1091, 1128), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1103, 1128), False, 'import cv2\n'), ((1183, 1250), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(CHESSBOARD_X, CHESSBOARD_Y)', 'None'], {}), '(gray, (CHESSBOARD_X, CHESSBOARD_Y), None)\n', (1208, 1250), False, 'import cv2\n'), ((1432, 1506), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', '(CHESSBOARD_X, CHESSBOARD_Y)', 'corners', 'ret'], {}), '(img, (CHESSBOARD_X, CHESSBOARD_Y), corners, ret)\n', (1457, 1506), False, 'import cv2\n'), ((1606, 1628), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (1616, 1628), False, 'import cv2\n'), ((1637, 1652), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1648, 1652), False, 'import cv2\n')] |
import numpy as np
import xobjects as xo
import xtrack as xt
from ..general import _pkg_root
from ..tables import CollimatorImpactsData, CollimatorImpacts
class BlackAbsorber(xt.BeamElement):
_xofields = {
'inactive_front': xo.Float64,
'active_length': xo.Float64,
'inactive_back': xo.Float64,
'jaw_F_L': xo.Float64,
'jaw_F_R': xo.Float64,
'jaw_B_L': xo.Float64,
'jaw_B_R': xo.Float64,
'jaw_U': xo.Float64,
'jaw_D': xo.Float64,
'dx': xo.Float64,
'dy': xo.Float64,
'cos_z': xo.Float64,
'sin_z': xo.Float64,
'_active': xo.Int8,
'_record_impacts': xo.Int8,
'_impacts': xo.Ref(CollimatorImpactsData)
}
isthick = True
behaves_like_drift = True
# TODO: how to pass _impacts to from_dict()... ?
_skip_in_to_dict = ['_impacts', '_active', '_record_impacts']
_store_in_to_dict = ['angle', 'is_active']
def __init__(self, angle=0, is_active=True, impacts=None, **kwargs):
kwargs.setdefault('jaw_F_L', 1)
kwargs.setdefault('jaw_F_R', -1)
kwargs.setdefault('jaw_B_L', 1)
kwargs.setdefault('jaw_B_R', -1)
kwargs.setdefault('jaw_U', 1)
kwargs.setdefault('jaw_D', -1)
kwargs.setdefault('inactive_front', 0)
kwargs.setdefault('inactive_back', 0)
kwargs.setdefault('dx', 0)
kwargs.setdefault('dy', 0)
anglerad = angle / 180. * np.pi
kwargs['cos_z'] = np.cos(anglerad)
kwargs['sin_z'] = np.sin(anglerad)
is_active = 1 if is_active == True else is_active
is_active = 0 if is_active == False else is_active
kwargs['_active'] = is_active
if impacts is None:
kwargs['_record_impacts'] = 0
else:
kwargs['_record_impacts'] = 1
kwargs['_impacts'] = impacts
super().__init__(**kwargs)
@property
def angle(self):
return np.arctan2(self.sin_z, self.cos_z) * (180.0 / np.pi)
@angle.setter
def angle(self, angle):
anglerad = angle / 180. * np.pi
self.cos_z = np.cos(anglerad)
self.sin_z = np.sin(anglerad)
@property
def is_active(self):
return True if self._active == 1 else False
@is_active.setter
def is_active(self, is_active):
is_active = 1 if is_active == True else is_active
is_active = 0 if is_active == False else is_active
self._active = is_active
if is_active <= 0:
self.jaw_F_L = 1
self.jaw_F_R = -1
self.jaw_B_L = 1
self.jaw_B_R = -1
@property
def length(self):
return (self.inactive_front + self.active_length + self.inactive_back)
@property
def impacts(self):
return self._impacts
@impacts.setter
def impacts(self, impacts):
if impacts is None:
self._record_impacts = 0
elif isinstance(impacts, CollimatorImpacts):
self._record_impacts = 1
else:
raise ValueError("The variable 'impacts' needs to be a CollimatorImpacts object!")
self._impacts = impacts
BlackAbsorber.XoStruct.extra_sources = [
_pkg_root.joinpath('beam_elements/collimators_src/absorber.h')]
| [
"numpy.sin",
"xobjects.Ref",
"numpy.arctan2",
"numpy.cos"
] | [((701, 730), 'xobjects.Ref', 'xo.Ref', (['CollimatorImpactsData'], {}), '(CollimatorImpactsData)\n', (707, 730), True, 'import xobjects as xo\n'), ((1495, 1511), 'numpy.cos', 'np.cos', (['anglerad'], {}), '(anglerad)\n', (1501, 1511), True, 'import numpy as np\n'), ((1538, 1554), 'numpy.sin', 'np.sin', (['anglerad'], {}), '(anglerad)\n', (1544, 1554), True, 'import numpy as np\n'), ((2121, 2137), 'numpy.cos', 'np.cos', (['anglerad'], {}), '(anglerad)\n', (2127, 2137), True, 'import numpy as np\n'), ((2159, 2175), 'numpy.sin', 'np.sin', (['anglerad'], {}), '(anglerad)\n', (2165, 2175), True, 'import numpy as np\n'), ((1960, 1994), 'numpy.arctan2', 'np.arctan2', (['self.sin_z', 'self.cos_z'], {}), '(self.sin_z, self.cos_z)\n', (1970, 1994), True, 'import numpy as np\n')] |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import sys
from functools import partial
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import pccm
from ccimport import compat
from pccm.core import CodeFormatter
# from myclang import clangformat
from cumm import cudasim, dtypes
from cumm import tensorview as tv
from cumm.common import CummNVRTCLib, GemmBasic, GemmBasicHost, TensorView, TensorViewKernel
from cumm.constants import CUMM_MAXIMUM_NVRTC_CONV_NDIM, CUTLASS_MODE
from cumm.conv import kernel
from cumm.conv.bases import (NCHW, NHWC, ConvIterAlgo, ConvLayout,
ConvLayoutType, ConvMode, ConvOpType)
from cumm.conv.params import (ConvProblem,
conv_iwo_012_to_abc, gemm_abc_012_to_iwo,
get_gemm_trans_abc)
from cumm.core_cc.csrc.arrayref import ArrayPtr
from cumm.gemm import codeops
from cumm.gemm.algospec import GemmAlgo
from cumm.gemm.algospec.core import TensorOp
from cumm.gemm.core.metaarray import MetaArray
from cumm.gemm.kernel import GemmKernel
from cumm.gemm.main import (GemmAlgoParams, GemmMainUnitTest,
NVRTCMode)
from cumm.conv.nvrtc_code import nvrtc_conv_template
def seq(*vals):
return np.array([*vals], dtype=np.int64)
class ConvAlgoParams(GemmAlgoParams):
def __init__(self,
ndim: int,
op_type: ConvOpType,
iter_algo: ConvIterAlgo,
ts: Tuple[int, int, int],
wts: Tuple[int, int, int],
num_stage: int,
dtype_shorts: str,
layout_desp_input: ConvLayout,
layout_desp_weight: ConvLayout,
layout_desp_output: ConvLayout,
algo: GemmAlgo,
tensorop: Optional[TensorOp] = None,
splitk_serial: bool = False,
splitk_parallel: bool = False,
mask_sparse: bool = False,
increment_k_first: bool = False,
access_per_vector: int = 1):
trans_a, trans_b, trans_c = get_gemm_trans_abc(op_type)
super().__init__(ts,
wts,
num_stage,
dtype_shorts,
trans_a,
trans_b,
trans_c,
algo,
tensorop,
splitk_serial,
splitk_parallel,
access_per_vector=access_per_vector)
self.ndim = ndim
self.op_type = op_type
self.iter_algo = iter_algo
self.mask_sparse = mask_sparse
self.increment_k_first = increment_k_first
indices = conv_iwo_012_to_abc(op_type)
dtypes_abc = [self.dtype_a, self.dtype_b, self.dtype_c]
self.dtype_input = dtypes_abc[indices[0]]
self.dtype_weight = dtypes_abc[indices[1]]
self.dtype_output = dtypes_abc[indices[2]]
self.layout_desp_input = layout_desp_input
self.layout_desp_weight = layout_desp_weight
self.layout_desp_output = layout_desp_output
def skipped(self):
if self.op_type != ConvOpType.kForward and self.dtype_a.itemsize(
) == 1:
return True
return super().skipped()
def gen_gemm_params(op_types: List[ConvOpType],
ts,
wts,
ndim: int,
iter_algo: ConvIterAlgo,
stage: int,
dtypes_string: Union[str, List[str]],
li: ConvLayout,
lw: ConvLayout,
lo: ConvLayout,
algo: GemmAlgo,
tensorop: Optional[TensorOp],
splitk_serial: bool = False,
splitk_parallel: bool = False,
mask_sparse: bool = False,
increment_k_first: bool = False,
access_per_vector: int = 1):
res = []
if not isinstance(dtypes_string, list):
dtypes_string = [dtypes_string]
for dts in dtypes_string:
for op_type in op_types:
if op_type == ConvOpType.kBackwardWeight:
p = ConvAlgoParams(ndim, op_type, iter_algo, ts, wts, stage,
dts, li, lw, lo, algo, tensorop, True,
splitk_parallel, mask_sparse,
increment_k_first, access_per_vector)
else:
p = ConvAlgoParams(ndim, op_type, iter_algo, ts, wts, stage,
dts, li, lw, lo, algo, tensorop,
splitk_serial, splitk_parallel, mask_sparse,
increment_k_first, access_per_vector)
if not p.skipped():
res.append(p)
return res
ConvFwdAndBwdInput = [ConvOpType.kBackwardInput, ConvOpType.kForward, ]
ConvBwdWeight = [ConvOpType.kBackwardWeight]
ConvAllOp = [
ConvOpType.kForward, ConvOpType.kBackwardInput, ConvOpType.kBackwardWeight
]
def gen_spwgrad_params(ts,
wts,
ndim: int,
iter_algo: ConvIterAlgo,
stage: int,
dtypes_string: str,
li: ConvLayout,
lw: ConvLayout,
lo: ConvLayout,
algo: GemmAlgo,
tensorop: Optional[TensorOp],
splitk_serial: bool = False,
splitk_parallel: bool = False,
mask_sparse: bool = False,
increment_k_first: bool = False,
access_per_vector: int = 1):
p = ConvAlgoParams(ndim, ConvOpType.kBackwardWeight, iter_algo, ts, wts,
stage, dtypes_string, li, lw, lo, algo, tensorop, True,
splitk_parallel, mask_sparse, increment_k_first,
access_per_vector)
return [p]
def gen_gemm_kernels(params: ConvAlgoParams,
nvrtc_mode: NVRTCMode = NVRTCMode.Disabled):
return kernel.ConvKernel(params.ndim,
params.op_type,
params.iter_algo,
params.ts,
params.wts,
params.num_stage,
dtype_a=params.dtype_a,
dtype_b=params.dtype_b,
dtype_c=params.dtype_c,
dtype_acc=params.dtype_acc,
dtype_comp=params.dtype_comp,
layout_desp_input=params.layout_desp_input,
layout_desp_output=params.layout_desp_output,
layout_desp_weight=params.layout_desp_weight,
algo=params.algo,
tensorop=params.tensorop,
splitk_serial=params.splitk_serial,
splitk_parallel=params.splitk_parallel,
mask_sparse=params.mask_sparse,
increment_k_first=params.increment_k_first,
access_per_vector=params.access_per_vector,
nvrtc_mode=nvrtc_mode)
SHUFFLE_SIMT_PARAMS = []
SHUFFLE_VOLTA_PARAMS = []
SHUFFLE_TURING_PARAMS = []
class ConvMainUnitTest(pccm.Class):
def __init__(self, conv_params: Optional[List[ConvAlgoParams]] = None):
super().__init__()
self.add_dependency(TensorView, GemmBasic, GemmBasicHost, kernel.ConvNVRTCParams,
CummNVRTCLib)
# unit test params: [ts, wts, stage, dtypes, trans, algo, tensorop]
if conv_params is None:
is_debug = os.getenv("CUMM_DEBUG", None)
if is_debug is not None and is_debug == "1":
simt_params: List[ConvAlgoParams] = [
# *gen_gemm_params((64, 128, 32), (32, 64, 32), 2, ConvIterAlgo.Optimized, 2, "s8,s8,s32,s32,s32",
# NHWC, NHWC, NHWC, GemmAlgo.SimtDP4A, None),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 32), (32, 32, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 16), (32, 32, 8), 3, ConvIterAlgo.Optimized, 2, "f32,f32,f32,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Simt, None, mask_sparse=True, increment_k_first=True),
*gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 32),
(32, 32, 32), 3, ConvIterAlgo.Optimized,
2, "f16,f16,f16,f32,f32", NHWC, NHWC,
NHWC, GemmAlgo.Turing,
TensorOp((16, 8, 8))),
*gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 16),
(32, 32, 8), 3, ConvIterAlgo.Optimized, 2,
"f32,f32,f32,f32,f32", NHWC, NHWC, NHWC,
GemmAlgo.Simt, None),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 16), (32, 32, 8), 3, ConvIterAlgo.Optimized, 2, "f32,f32,f32,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Simt, None, mask_sparse=True, increment_k_first=True),
# *gen_gemm_params(ConvBwdWeight, (128, 128, 8), (32, 64, 8), 3, ConvIterAlgo.Optimized, 2, "f32,f32,f32,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Simt, None, mask_sparse=True, increment_k_first=True),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 64, 32), (32, 32, 16), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 256, 32), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 32), (32, 32, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 64), (32, 32, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 64), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 64), (32, 32, 64), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (32, 128, 64), (32, 64, 64), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvBwdWeight, (128, 128, 32), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvBwdWeight, (64, 128, 32), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvBwdWeight, (128, 64, 32), (64, 32, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvBwdWeight, (64, 64, 32), (32, 32, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvBwdWeight, (64, 256, 32), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvBwdWeight, (128, 256, 32), (64, 64, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 64, 32), (32, 32, 16), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 256, 32), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 128, 32), (32, 32, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 128, 64), (32, 32, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 128, 64), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 128, 64), (32, 32, 64), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 128, 64), (32, 64, 64), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Turing, TensorOp((16, 8, 8)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 128, 32), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Volta, TensorOp((8, 8, 4)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvFwdAndBwdInput, (64, 64, 32), (32, 32, 32), 3, ConvIterAlgo.Optimized, 2, ["f16,f16,f16,f32,f32", "f16,f16,f16,f16,f16"],
# NHWC, NHWC, NHWC, GemmAlgo.Volta, TensorOp((8, 8, 4)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_gemm_params(ConvBwdWeight, (64, 256, 32), (32, 64, 32), 3, ConvIterAlgo.Optimized, 2, "f16,f16,f16,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Volta, TensorOp((8, 8, 4)), mask_sparse=True, increment_k_first=True, access_per_vector=1),
# *gen_spwgrad_params((128, 128, 8), (32, 64, 8), 3, ConvIterAlgo.Optimized, 2, "f32,f32,f32,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Simt, None, mask_sparse=True, increment_k_first=True, mask_width=32),
# *gen_gemm_params((32, 128, 16), (32, 32, 8), 3, ConvIterAlgo.Optimized, 2, "f32,f32,f32,f32,f32",
# NHWC, NHWC, NHWC, GemmAlgo.Simt, None, mask_sparse=True, increment_k_first=True, mask_width=32),
# *gen_gemm_params(
# (32, 128, 16), (32, 32, 8), 2, ConvIterAlgo.Optimized, 2,
# "f32,f32,f32,f32,f32", NHWC, NHWC, NHWC, GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((8, 32, 8), (8, 32, 8), 2, "f32,f32,f32,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((16, 32, 8), (16, 32, 8), 2, "f32,f32,f32,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((8, 32, 8), (8, 16, 8), 2, "f32,f32,f32,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((8, 64, 8), (8, 32, 8), 2, "f32,f32,f32,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((16, 32, 8), (16, 16, 8), 2, "f32,f32,f32,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((64, 128, 8), (32, 32, 8), 2, "f32,f32,f32,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((128, 128, 8), (32, 64, 8), 2, "f16,f16,f16,f16,f16", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((128, 128, 8), (32, 64, 8), 2, "f16,f16,f16,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((128, 128, 16), (32, 64, 16), 2, "f32,f32,f32,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params_rowmajor_c((128, 128, 16), (32, 64, 16), 2, "f16,f16,f16,f32,f32", GemmAlgo.Simt, None),
# *gen_gemm_params((128, 128, 32), (32, 64, 32), 2, "s8,s8,s32,s32,s32", GemmAlgo.SimtDP4A, None),
] # type: List[ConvAlgoParams]
volta_params: List[ConvAlgoParams] = []
turing_params: List[ConvAlgoParams] = []
else:
simt_params: List[ConvAlgoParams] = [
*SHUFFLE_SIMT_PARAMS,
]
volta_params: List[ConvAlgoParams] = [
*SHUFFLE_VOLTA_PARAMS,
]
turing_params: List[ConvAlgoParams] = [
*SHUFFLE_TURING_PARAMS,
]
self.all_params = simt_params + volta_params + turing_params
self.all_kernels = [gen_gemm_kernels(p) for p in self.all_params]
else:
assert len(conv_params) > 0
self.all_params = conv_params
self.all_kernels = [gen_gemm_kernels(p) for p in self.all_params]
self.ker_names = [k.get_algo_name() for k in self.all_kernels]
assert len(set(self.ker_names)) == len(
self.ker_names), "kernel must unique"
@staticmethod
def _get_layout_types(ker: kernel.ConvKernel):
p = ker.problem
return (p.layout_desp_input.layout_type,
p.layout_desp_weight.layout_type,
p.layout_desp_output.layout_type)
@staticmethod
def _get_layout_interleaves(ker: kernel.ConvKernel):
p = ker.problem
return (p.layout_desp_input.interleave,
p.layout_desp_weight.interleave,
p.layout_desp_output.interleave)
@staticmethod
def _get_sparse_params(ker: kernel.ConvKernel):
return (ker.mask_sparse, ker.increment_k_first)
@staticmethod
def conv_select_helper_stage1(kernels: List[kernel.ConvKernel],
code: pccm.FunctionCode):
ndim_op_iter_to_kers = codeops.group_by(
lambda x: (x.problem.ndim, x.problem.op_type, x.iter_algo),
kernels)
for ndim_op_iter, ndim_op_iter_kers in ndim_op_iter_to_kers.items():
if_tests = [
f"algo_desp.ndim == {ndim_op_iter[0]}",
f"static_cast<int>(algo_desp.op_type) == {ndim_op_iter[1].value}",
f"static_cast<int>(algo_desp.iter_algo) == {ndim_op_iter[2].value}",
]
with code.if_(" && ".join(if_tests)):
li_lw_lo_to_kers = codeops.group_by(
ConvMainUnitTest._get_layout_types, ndim_op_iter_kers)
for li_lw_lo, lilwlo_kers in li_lw_lo_to_kers.items():
if_tests = [
f"static_cast<int>(algo_desp.layout_i) == {li_lw_lo[0].value}",
f"static_cast<int>(algo_desp.layout_w) == {li_lw_lo[1].value}",
f"static_cast<int>(algo_desp.layout_o) == {li_lw_lo[2].value}",
]
with code.if_(" && ".join(if_tests)):
lii_lwi_loi_to_kers = codeops.group_by(
ConvMainUnitTest._get_layout_interleaves,
lilwlo_kers)
for liilwiloi, liilwiloi_kers in lii_lwi_loi_to_kers.items(
):
if_tests = [
f"algo_desp.interleave_i == {liilwiloi[0]}",
f"algo_desp.interleave_w == {liilwiloi[1]}",
f"algo_desp.interleave_o == {liilwiloi[2]}",
]
with code.if_(" && ".join(if_tests)):
ms_ikf_mw_to_kers = codeops.group_by(
ConvMainUnitTest._get_sparse_params,
liilwiloi_kers)
for ms_ikf_mw, ms_ikf_mw_kers in ms_ikf_mw_to_kers.items(
):
assert len(
ms_ikf_mw_kers
) == 1, "find multiple kernels for one configuration"
if_tests = [
f"algo_desp.mask_sparse == {pccm.boolean(ms_ikf_mw[0])}",
f"algo_desp.increment_k_first == {pccm.boolean(ms_ikf_mw[1])}",
]
with code.if_(" && ".join(if_tests)):
yield ms_ikf_mw_kers
@staticmethod
def conv_select_helper(kernels: List[Union[kernel.ConvKernel]],
code: pccm.FunctionCode):
for kers in GemmMainUnitTest.matmul_select_helper_stage2(
kernels, code, False, False):
yield from ConvMainUnitTest.conv_select_helper_stage1(kers, code)
@pccm.pybind.mark
@pccm.static_function
def extract_mnk(self):
code = pccm.code()
code.arg("op_type", "int")
code.arg("N, C, K", "int")
code.arg("kernel_volume, in_prod, out_prod", "int")
code.arg("mask_sparse", "bool")
code.raw(f"""
auto op_type_enum = static_cast<tv::gemm::ConvOpType>(op_type);
auto res = tv::gemm::implicit_gemm_mnk(op_type_enum, N, C, K,
kernel_volume, in_prod, out_prod, mask_sparse);
return {{res[0], res[1], res[2]}};
""")
return code.ret("std::array<int, 3>")
@pccm.pybind.mark
@pccm.cuda.static_function
def implicit_gemm2(self):
code = pccm.code()
for p, ker in zip(self.all_params, self.all_kernels):
code.add_param_class("cp" + ker.get_algo_name(), ker.gemm_params,
"ConvParams" + ker.get_algo_name())
code.add_param_class(ker.get_algo_name(), ker,
"Conv" + ker.get_algo_name())
code.arg("params", "tv::gemm::ConvParams", pyanno="cumm.tensorview.gemm.ConvParams")
code.add_dependency(TensorViewKernel)
ch_first = ConvLayoutType.ChannelFirst.value
nvrtc_conv_template(code)
for kers in self.conv_select_helper(self.all_kernels, code):
ker = kers[0]
p = ker.problem
param_type_str = "ConvParams" + ker.get_algo_name()
indices = conv_iwo_012_to_abc(ker.problem.op_type)
inv_indices = gemm_abc_012_to_iwo(ker.problem.op_type)
dtypes_abc = [ker.dtype_a, ker.dtype_b, ker.dtype_c]
dtypes_iwo = [dtypes_abc[i] for i in indices]
param_cls_name = "ConvParams" + ker.get_algo_name()
param_cls_ns = "cp" + ker.get_algo_name()
if not ker.support_splitk():
code.raw(f"""
TV_ASSERT_RT_ERR("algo don't support splitk but you provide split_k_slices > 1.", split_k_slices);
""")
# TODO if input is NCxHWx
# TODO if input weight and output have different layout
dim_start = 2 if p.layout_desp_weight.is_channel_first() else 1
io_ndim = 2 if p.mask_sparse else p.ndim + 2
weight_ndim = 3 if p.mask_sparse else p.ndim + 2
abc_names = ["a", "b", "c"]
abc_names = [abc_names[i] for i in indices]
input_names = ["input", "weight", "output"]
input_names = [input_names[i] for i in inv_indices]
code.raw(f"""
// {ker.get_algo_name()}
found = true;
""")
if p.mask_sparse:
if p.op_type == ConvOpType.kBackwardWeight:
code.raw(f"""
TV_ASSERT_RT_ERR(mask_width > 0 && mask_width % {ker.tile_shape[2]} == 0, "error");
""")
code.raw(f"""
TV_ASSERT_RT_ERR(!indices.empty(), "error");
TV_ASSERT_RT_ERR(!mask.empty(), "error");
TV_ASSERT_RT_ERR(!mask_argsort.empty(), "error");
int kernel_volume = weight.dim({dim_start});
tv::check_shape(indices, {{kernel_volume, -1}});
N = indices.dim(1);
{param_cls_ns}::ConvProblem problem(N, C, K, kernel_volume,
tv::gemm::ConvMode::kCrossCorrelation, split_k_slices, groups);
""")
if p.op_type == ConvOpType.kBackwardWeight:
code.raw(f"""
TV_ASSERT_RT_ERR(N == output.dim(0), "error");
TV_ASSERT_RT_ERR(int64_t(N) * int64_t(C) * {ker.dtype_b.bitsize()} / 8 < std::numeric_limits<int32_t>::max(),
"your data exceed int32 range. this will be fixed in cumm + nvrtc (spconv 2.2/2.3).");
TV_ASSERT_RT_ERR(int64_t(N) * int64_t(K) * {ker.dtype_a.bitsize()} / 8 < std::numeric_limits<int32_t>::max(),
"your data exceed int32 range. this will be fixed in cumm + nvrtc (spconv 2.2/2.3).");
""")
elif p.op_type == ConvOpType.kForward:
code.raw(f"""
TV_ASSERT_RT_ERR(N == output.dim(0), "error");
TV_ASSERT_RT_ERR(int64_t(N) * int64_t(C) * {ker.dtype_a.bitsize()} / 8 < std::numeric_limits<int32_t>::max(),
"your data exceed int32 range. this will be fixed in cumm + nvrtc (spconv 2.2/2.3).");
""")
else:
code.raw(f"""
TV_ASSERT_RT_ERR(int64_t(N) * int64_t(K) * {ker.dtype_a.bitsize()} / 8 < std::numeric_limits<int32_t>::max(),
"your data exceed int32 range. this will be fixed in cumm + nvrtc (spconv 2.2/2.3).");
TV_ASSERT_RT_ERR(N == input.dim(0), "error");
""")
else:
code.raw(f"""
tv::array<int, {p.ndim}> input_dims, output_dims;
tv::array<int, {p.ndim}> ksize;
TV_ASSERT_RT_ERR({p.ndim} == padding.size() && {p.ndim} == stride.size() && {p.ndim} == dilation.size(), "error");
for (int i = {dim_start}; i < {dim_start + p.ndim}; ++i){{
ksize[i - {dim_start}] = weight.dim(i);
input_dims[i - {dim_start}] = input.dim(i);
output_dims[i - {dim_start}] = output.dim(i);
}}
int kernel_volume = ksize.op<tv::arrayops::prod>();
tv::array<int, {p.ndim}> padding_arr{{{code.unpack([f"padding[{i}]" for i in range(p.ndim)])}}};
tv::array<int, {p.ndim}> stride_arr{{{code.unpack([f"stride[{i}]" for i in range(p.ndim)])}}};
tv::array<int, {p.ndim}> dilation_arr{{{code.unpack([f"dilation[{i}]" for i in range(p.ndim)])}}};
auto output_dims_check_again = {param_cls_ns}::ConvProblem::calc_output_dims(input_dims, ksize, padding_arr, stride_arr, dilation_arr);
for (int i = 0; i < {p.ndim}; ++i){{
TV_ASSERT_RT_ERR(output_dims_check_again[i] == output_dims[i], "error");
}}
{param_cls_ns}::ConvProblem problem(N, C, K, input_dims, output_dims, ksize, padding_arr, stride_arr, dilation_arr,
tv::gemm::ConvMode::kCrossCorrelation, split_k_slices, groups);
""")
if p.mask_sparse:
mask_out_ptr = "mask_output.empty() ? nullptr : mask_output.data_ptr<uint32_t>(), "
if p.op_type != ConvOpType.kForward:
mask_out_ptr = ""
mask_width_str = "mask_width,"
if p.op_type != ConvOpType.kBackwardWeight:
mask_width_str = ""
code.raw(f"""
{param_type_str} ker_params(
problem, a_ten.data_ptr<{ker.dtype_a}>(), b_ten.data_ptr<{ker.dtype_b}>(),
c_ten.data_ptr<{ker.dtype_c}>(), c_ten.data_ptr<{ker.dtype_c}>(),
mask.data_ptr<uint32_t>(), mask_argsort.data_ptr<int32_t>(),
indices.data_ptr<int32_t>(), {mask_out_ptr} params.mask_filter,
params.reverse_mask, {mask_width_str}
{ker.dtype_comp}(params.alpha), {ker.dtype_comp}(params.beta){", split_k_slices, workspace.raw_data()" if ker.support_splitk() else ""});
""")
else:
code.raw(f"""
{param_type_str} ker_params(
problem, a_ten.data_ptr<{ker.dtype_a}>(), b_ten.data_ptr<{ker.dtype_b}>(),
c_ten.data_ptr<{ker.dtype_c}>(), c_ten.data_ptr<{ker.dtype_c}>(),
{ker.dtype_comp}(params.alpha), {ker.dtype_comp}(params.beta){", split_k_slices, workspace.raw_data()" if ker.support_splitk() else ""});
""")
if p.op_type == ConvOpType.kBackwardWeight:
code.raw(f"""
int num_reduced_mask = tv::div_up(ker_params.problem.N, ker_params.mask_width);
TV_ASSERT_RT_ERR(mask.dim(0) >= num_reduced_mask, "error");
""")
code.raw(f"""
tv::cuda::Launch launcher(ker_params.grid_dims, dim3({ker.num_threads}),
{ker.smem_size}, reinterpret_cast<cudaStream_t>(params.stream));
cudaError_t result;
if ({ker.smem_size} >= (48 << 10)) {{
result = cudaFuncSetAttribute({ker.get_algo_name()}::conv_kernel,
cudaFuncAttributeMaxDynamicSharedMemorySize,
{ker.smem_size});
TV_ASSERT_RT_ERR(result == cudaSuccess, "error");
result = cudaFuncSetAttribute(
{ker.get_algo_name()}::conv_kernel,
cudaFuncAttributePreferredSharedMemoryCarveout, 100);
TV_ASSERT_RT_ERR(result == cudaSuccess, "error");
}}
""")
# if cudasim.enable_debug():
code.raw(f"""
auto timer = tv::CUDATimer(params.verbose);
// tv::ssprint("CPU Time", rtxtimer.report() / 1000.0);
{{
tv::CUDAKernelTimerGuard timerguard(\"{ker.get_algo_name()}\", evtimer, reinterpret_cast<cudaStream_t>(params.stream));
launcher({ker.get_algo_name()}::conv_kernel, ker_params);
}}
""")
if p.mask_sparse:
code.raw(f"""
TV_CHECK_CUDA_ERR_V2("{ker.get_algo_name()}", "error with params", input.shape(), weight.shape(), output.shape(),
indices.shape(), mask.shape(), mask_argsort.shape(), mask_output.shape(), mask_width);
""")
else:
code.raw(f"""
TV_CHECK_CUDA_ERR_V2("{ker.get_algo_name()}", "error with params", input.shape(), weight.shape(), output.shape());
""")
# if cudasim.enable_debug():
code.raw(f"""
if (params.verbose){{
cudaFuncAttributes attr;
checkCudaErrors(
cudaFuncGetAttributes(&attr, {ker.get_algo_name()}::conv_kernel));
tv::ssprint("{ker.get_algo_name()} kernel num regs:", attr.numRegs, "time:", timer.report() / 1000.0);
}}
""")
code.raw(f"return;")
code.raw("""
if (!found){
TV_THROW_INVALID_ARG("Can't Found Algorithm for params:", algo_desp.__repr__(), tv::dtype_str(input.dtype()),
tv::dtype_str(weight.dtype()), tv::dtype_str(output.dtype()), tv::dtype_str(dacc),
tv::dtype_str(dcomp));
}
""")
return code
@pccm.pybind.mark
@pccm.static_function
def get_all_conv_algo_desp(self):
code = pccm.code()
code.raw(f"""
std::vector<tv::gemm::ConvAlgoDesp> desps;
""")
for ker in self.all_kernels:
code.raw("{")
code.raw(f"""
tv::gemm::ConvAlgoDesp desp({ker.problem.ndim}, tv::gemm::ConvOpType({ker.problem.op_type.value}));
desp.dtype_a = {ker.dtype_a.tv_dtype};
desp.dtype_b = {ker.dtype_b.tv_dtype};
desp.dtype_c = {ker.dtype_c.tv_dtype};
desp.dacc = {ker.dtype_acc.tv_dtype};
desp.dcomp = {ker.dtype_comp.tv_dtype};
desp.trans_a_set({pccm.boolean(ker.trans_a)});
desp.trans_b_set({pccm.boolean(ker.trans_b)});
desp.trans_c_set({pccm.boolean(ker.trans_c)});
desp.tile_shape = {{{ker.tile_shape[0]}, {ker.tile_shape[1]}, {ker.tile_shape[2]}}};
desp.warp_tile_shape = {{{ker.warp_tile_shape[0]}, {ker.warp_tile_shape[1]}, {ker.warp_tile_shape[2]}}};
""")
if ker.tensorop is not None:
code.raw(
f"desp.tensorop = {{{ker.tensorop[0]}, {ker.tensorop[1]}, {ker.tensorop[2]}}};"
)
else:
code.raw(f"desp.tensorop = {{-1, -1, -1}};")
code.raw(f"""
desp.num_stage = {ker.num_stage};
desp.algo = "{ker.algo.value}";
desp.split_k_serial_set({pccm.boolean(ker.splitk_serial)});
desp.split_k_parallel_set({pccm.boolean(ker.splitk_parallel)});
desp.shuffle_type = static_cast<tv::gemm::ShuffleStrideType>({ker.shuffle_stride.value});
desp.element_per_access_a = {ker.input_spec.input_iter_a.element_per_acc};
desp.element_per_access_b = {ker.input_spec.input_iter_b.element_per_acc};
desp.element_per_access_c = {ker.output_spec.out_iter.element_per_acc};
desp.access_per_vector = {ker.access_per_vector};
// Conv attrs
desp.ndim = {ker.problem.ndim};
desp.op_type = static_cast<tv::gemm::ConvOpType>({ker.problem.op_type.value});
desp.iter_algo = static_cast<tv::gemm::ConvIterAlgo>({ker.iter_algo.value});
desp.layout_i = static_cast<tv::gemm::ConvLayoutType>({ker.problem.layout_desp_input.layout_type.value});
desp.layout_w = static_cast<tv::gemm::ConvLayoutType>({ker.problem.layout_desp_weight.layout_type.value});
desp.layout_o = static_cast<tv::gemm::ConvLayoutType>({ker.problem.layout_desp_output.layout_type.value});
desp.interleave_i = {ker.problem.layout_desp_input.interleave};
desp.interleave_w = {ker.problem.layout_desp_weight.interleave};
desp.interleave_o = {ker.problem.layout_desp_output.interleave};
desp.mask_sparse = {pccm.boolean(ker.mask_sparse)};
desp.increment_k_first = {pccm.boolean(ker.increment_k_first)};
TV_ASSERT_RT_ERR(desp.__repr__() == {ker.get_algo_name()});
desps.push_back(desp);
""")
code.raw("}")
code.raw(f"""
return desps;
""")
return code.ret("std::vector<tv::gemm::ConvAlgoDesp>",
pyanno="List[ConvAlgoDesp]")
# @lineprof.lineprof_wrapper_cpp
def implicit_gemm_python(self,
input_: np.ndarray,
weight: np.ndarray,
output: np.ndarray,
input_meta: np.ndarray,
weight_meta: np.ndarray,
output_meta: np.ndarray,
padding: List[int],
stride: List[int],
dilation: List[int],
ndim: int,
iter_algo: ConvIterAlgo,
op_type: ConvOpType,
i_ltype: ConvLayoutType,
w_ltype: ConvLayoutType,
o_ltype: ConvLayoutType,
ts: np.ndarray,
wts: np.ndarray,
num_stage: int,
dacc: dtypes.DType,
dcomp: dtypes.DType,
algo: str,
tensorop: np.ndarray,
i_interleave: int = 1,
w_interleave: int = 1,
o_interleave: int = 1):
found = False
for p, ker in zip(self.all_params, self.all_kernels):
indices = conv_iwo_012_to_abc(p.op_type)
inv_indices = gemm_abc_012_to_iwo(p.op_type)
dtypes_abc = [p.dtype_a, p.dtype_b, p.dtype_c]
dtypes_iwo = [dtypes_abc[i] for i in indices]
if_tests = [
dtypes_iwo[0].npdtype() == input_.dtype,
dtypes_iwo[1].npdtype() == weight.dtype,
dtypes_iwo[2].npdtype() == output.dtype,
p.layout_desp_input.layout_type == i_ltype,
p.layout_desp_weight.layout_type == w_ltype,
p.layout_desp_output.layout_type == o_ltype,
p.layout_desp_input.interleave == i_interleave,
p.layout_desp_weight.interleave == w_interleave,
p.layout_desp_output.interleave == o_interleave,
p.ts[0] == ts[0] and p.ts[1] == ts[1] and p.ts[2] == ts[2],
p.wts[0] == wts[0] and p.wts[1] == wts[1]
and p.wts[2] == wts[2],
p.num_stage == num_stage,
p.dtype_acc == dacc,
p.dtype_comp == dcomp,
algo == p.algo.value,
]
if all(if_tests):
found = True
assert input_.ndim == p.ndim + 2
assert weight.ndim == p.ndim + 2
assert output.ndim == p.ndim + 2
N = input_.shape[0]
if p.layout_desp_input.is_channel_first():
C = input_.shape[1]
else:
C = input_.shape[p.ndim + 1]
K = weight.shape[0]
if p.layout_desp_output.is_channel_first():
K2 = output.shape[1]
else:
K2 = output.shape[p.ndim + 1]
assert K == K2
ksize = [0] * p.ndim
input_dims = [0] * p.ndim
output_dims = [0] * p.ndim
dim_start = 2 if p.layout_desp_weight.is_channel_first() else 1
for i in range(dim_start, dim_start + p.ndim):
ksize[i - dim_start] = weight.shape[i]
input_dims[i - dim_start] = input_.shape[i]
output_dims[i - dim_start] = output.shape[i]
output_dims_check_again = ConvProblem.calc_output_dims_python(
input_dims, ksize, padding, stride, dilation)
assert output_dims_check_again == output_dims
problem = ker.problem.python_ctor(N, C, K, input_dims,
output_dims, ksize, padding,
stride, dilation,
ConvMode.kCrossCorrelation,
1, 1)
print(problem.N_, problem.C_, problem.K_, problem.output_dims_)
inputs = [input_, weight, output]
input_metas = [input_meta, weight_meta, output_meta]
input_abcs = [inputs[i] for i in inv_indices]
input_meta_abcs = [input_metas[i] for i in inv_indices]
a_ten = input_abcs[0]
b_ten = input_abcs[1]
c_ten = input_abcs[2]
a_meta_ten = input_meta_abcs[0]
b_meta_ten = input_meta_abcs[1]
if cudasim.enable_debug():
a_ptr = ArrayPtr(p.dtype_a.tv_dtype,
a_ten.size,
external_data=tv.from_numpy(a_ten),
meta_data=tv.from_numpy(a_meta_ten))
b_ptr = ArrayPtr(p.dtype_b.tv_dtype,
b_ten.size,
external_data=tv.from_numpy(b_ten),
meta_data=tv.from_numpy(b_meta_ten))
else:
a_ptr = ArrayPtr(p.dtype_a.tv_dtype,
a_ten.size,
external_data=tv.from_numpy(a_ten),
meta_data=tv.Tensor())
b_ptr = ArrayPtr(p.dtype_b.tv_dtype,
b_ten.size,
external_data=tv.from_numpy(b_ten),
meta_data=tv.Tensor())
c_ptr = ArrayPtr(p.dtype_c.tv_dtype,
c_ten.size,
external_data=tv.from_numpy(c_ten))
params = ker.gemm_params.python_ctor(problem, a_ptr, b_ptr,
c_ptr, c_ptr, 1.0, 0.0)
func = partial(ker.conv_kernel_python, params=params)
blocks = params.grid_dims
threads = cudasim.Dim3(ker.num_threads, 1, 1)
return asyncio.run(
cudasim.kernel_launch(func, blocks, threads,
ker.smem_size)), blocks, threads
raise NotImplementedError
| [
"cumm.tensorview.Tensor",
"cumm.gemm.main.GemmMainUnitTest.matmul_select_helper_stage2",
"numpy.array",
"cumm.conv.params.ConvProblem.calc_output_dims_python",
"pccm.boolean",
"cumm.conv.params.conv_iwo_012_to_abc",
"cumm.conv.params.get_gemm_trans_abc",
"cumm.conv.nvrtc_code.nvrtc_conv_template",
"... | [((1840, 1873), 'numpy.array', 'np.array', (['[*vals]'], {'dtype': 'np.int64'}), '([*vals], dtype=np.int64)\n', (1848, 1873), True, 'import numpy as np\n'), ((6871, 7546), 'cumm.conv.kernel.ConvKernel', 'kernel.ConvKernel', (['params.ndim', 'params.op_type', 'params.iter_algo', 'params.ts', 'params.wts', 'params.num_stage'], {'dtype_a': 'params.dtype_a', 'dtype_b': 'params.dtype_b', 'dtype_c': 'params.dtype_c', 'dtype_acc': 'params.dtype_acc', 'dtype_comp': 'params.dtype_comp', 'layout_desp_input': 'params.layout_desp_input', 'layout_desp_output': 'params.layout_desp_output', 'layout_desp_weight': 'params.layout_desp_weight', 'algo': 'params.algo', 'tensorop': 'params.tensorop', 'splitk_serial': 'params.splitk_serial', 'splitk_parallel': 'params.splitk_parallel', 'mask_sparse': 'params.mask_sparse', 'increment_k_first': 'params.increment_k_first', 'access_per_vector': 'params.access_per_vector', 'nvrtc_mode': 'nvrtc_mode'}), '(params.ndim, params.op_type, params.iter_algo, params.ts,\n params.wts, params.num_stage, dtype_a=params.dtype_a, dtype_b=params.\n dtype_b, dtype_c=params.dtype_c, dtype_acc=params.dtype_acc, dtype_comp\n =params.dtype_comp, layout_desp_input=params.layout_desp_input,\n layout_desp_output=params.layout_desp_output, layout_desp_weight=params\n .layout_desp_weight, algo=params.algo, tensorop=params.tensorop,\n splitk_serial=params.splitk_serial, splitk_parallel=params.\n splitk_parallel, mask_sparse=params.mask_sparse, increment_k_first=\n params.increment_k_first, access_per_vector=params.access_per_vector,\n nvrtc_mode=nvrtc_mode)\n', (6888, 7546), False, 'from cumm.conv import kernel\n'), ((2704, 2731), 'cumm.conv.params.get_gemm_trans_abc', 'get_gemm_trans_abc', (['op_type'], {}), '(op_type)\n', (2722, 2731), False, 'from cumm.conv.params import ConvProblem, conv_iwo_012_to_abc, gemm_abc_012_to_iwo, get_gemm_trans_abc\n'), ((3378, 3406), 'cumm.conv.params.conv_iwo_012_to_abc', 'conv_iwo_012_to_abc', (['op_type'], {}), '(op_type)\n', (3397, 3406), False, 'from cumm.conv.params import ConvProblem, conv_iwo_012_to_abc, gemm_abc_012_to_iwo, get_gemm_trans_abc\n'), ((21440, 21529), 'cumm.gemm.codeops.group_by', 'codeops.group_by', (['(lambda x: (x.problem.ndim, x.problem.op_type, x.iter_algo))', 'kernels'], {}), '(lambda x: (x.problem.ndim, x.problem.op_type, x.iter_algo),\n kernels)\n', (21456, 21529), False, 'from cumm.gemm import codeops\n'), ((24268, 24341), 'cumm.gemm.main.GemmMainUnitTest.matmul_select_helper_stage2', 'GemmMainUnitTest.matmul_select_helper_stage2', (['kernels', 'code', '(False)', '(False)'], {}), '(kernels, code, False, False)\n', (24312, 24341), False, 'from cumm.gemm.main import GemmAlgoParams, GemmMainUnitTest, NVRTCMode\n'), ((24529, 24540), 'pccm.code', 'pccm.code', ([], {}), '()\n', (24538, 24540), False, 'import pccm\n'), ((25137, 25148), 'pccm.code', 'pccm.code', ([], {}), '()\n', (25146, 25148), False, 'import pccm\n'), ((25680, 25705), 'cumm.conv.nvrtc_code.nvrtc_conv_template', 'nvrtc_conv_template', (['code'], {}), '(code)\n', (25699, 25705), False, 'from cumm.conv.nvrtc_code import nvrtc_conv_template\n'), ((35436, 35447), 'pccm.code', 'pccm.code', ([], {}), '()\n', (35445, 35447), False, 'import pccm\n'), ((8600, 8629), 'os.getenv', 'os.getenv', (['"""CUMM_DEBUG"""', 'None'], {}), "('CUMM_DEBUG', None)\n", (8609, 8629), False, 'import os\n'), ((25915, 25955), 'cumm.conv.params.conv_iwo_012_to_abc', 'conv_iwo_012_to_abc', (['ker.problem.op_type'], {}), '(ker.problem.op_type)\n', (25934, 25955), False, 'from cumm.conv.params import ConvProblem, conv_iwo_012_to_abc, gemm_abc_012_to_iwo, get_gemm_trans_abc\n'), ((25982, 26022), 'cumm.conv.params.gemm_abc_012_to_iwo', 'gemm_abc_012_to_iwo', (['ker.problem.op_type'], {}), '(ker.problem.op_type)\n', (26001, 26022), False, 'from cumm.conv.params import ConvProblem, conv_iwo_012_to_abc, gemm_abc_012_to_iwo, get_gemm_trans_abc\n'), ((40072, 40102), 'cumm.conv.params.conv_iwo_012_to_abc', 'conv_iwo_012_to_abc', (['p.op_type'], {}), '(p.op_type)\n', (40091, 40102), False, 'from cumm.conv.params import ConvProblem, conv_iwo_012_to_abc, gemm_abc_012_to_iwo, get_gemm_trans_abc\n'), ((40129, 40159), 'cumm.conv.params.gemm_abc_012_to_iwo', 'gemm_abc_012_to_iwo', (['p.op_type'], {}), '(p.op_type)\n', (40148, 40159), False, 'from cumm.conv.params import ConvProblem, conv_iwo_012_to_abc, gemm_abc_012_to_iwo, get_gemm_trans_abc\n'), ((21976, 22047), 'cumm.gemm.codeops.group_by', 'codeops.group_by', (['ConvMainUnitTest._get_layout_types', 'ndim_op_iter_kers'], {}), '(ConvMainUnitTest._get_layout_types, ndim_op_iter_kers)\n', (21992, 22047), False, 'from cumm.gemm import codeops\n'), ((42342, 42427), 'cumm.conv.params.ConvProblem.calc_output_dims_python', 'ConvProblem.calc_output_dims_python', (['input_dims', 'ksize', 'padding', 'stride', 'dilation'], {}), '(input_dims, ksize, padding, stride,\n dilation)\n', (42377, 42427), False, 'from cumm.conv.params import ConvProblem, conv_iwo_012_to_abc, gemm_abc_012_to_iwo, get_gemm_trans_abc\n'), ((43423, 43445), 'cumm.cudasim.enable_debug', 'cudasim.enable_debug', ([], {}), '()\n', (43443, 43445), False, 'from cumm import cudasim, dtypes\n'), ((44797, 44843), 'functools.partial', 'partial', (['ker.conv_kernel_python'], {'params': 'params'}), '(ker.conv_kernel_python, params=params)\n', (44804, 44843), False, 'from functools import partial\n'), ((44912, 44947), 'cumm.cudasim.Dim3', 'cudasim.Dim3', (['ker.num_threads', '(1)', '(1)'], {}), '(ker.num_threads, 1, 1)\n', (44924, 44947), False, 'from cumm import cudasim, dtypes\n'), ((22563, 22634), 'cumm.gemm.codeops.group_by', 'codeops.group_by', (['ConvMainUnitTest._get_layout_interleaves', 'lilwlo_kers'], {}), '(ConvMainUnitTest._get_layout_interleaves, lilwlo_kers)\n', (22579, 22634), False, 'from cumm.gemm import codeops\n'), ((36043, 36068), 'pccm.boolean', 'pccm.boolean', (['ker.trans_a'], {}), '(ker.trans_a)\n', (36055, 36068), False, 'import pccm\n'), ((36102, 36127), 'pccm.boolean', 'pccm.boolean', (['ker.trans_b'], {}), '(ker.trans_b)\n', (36114, 36127), False, 'import pccm\n'), ((36161, 36186), 'pccm.boolean', 'pccm.boolean', (['ker.trans_c'], {}), '(ker.trans_c)\n', (36173, 36186), False, 'import pccm\n'), ((36838, 36869), 'pccm.boolean', 'pccm.boolean', (['ker.splitk_serial'], {}), '(ker.splitk_serial)\n', (36850, 36869), False, 'import pccm\n'), ((36912, 36945), 'pccm.boolean', 'pccm.boolean', (['ker.splitk_parallel'], {}), '(ker.splitk_parallel)\n', (36924, 36945), False, 'import pccm\n'), ((38240, 38269), 'pccm.boolean', 'pccm.boolean', (['ker.mask_sparse'], {}), '(ker.mask_sparse)\n', (38252, 38269), False, 'import pccm\n'), ((38310, 38345), 'pccm.boolean', 'pccm.boolean', (['ker.increment_k_first'], {}), '(ker.increment_k_first)\n', (38322, 38345), False, 'import pccm\n'), ((44599, 44619), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['c_ten'], {}), '(c_ten)\n', (44612, 44619), True, 'from cumm import tensorview as tv\n'), ((45004, 45063), 'cumm.cudasim.kernel_launch', 'cudasim.kernel_launch', (['func', 'blocks', 'threads', 'ker.smem_size'], {}), '(func, blocks, threads, ker.smem_size)\n', (45025, 45063), False, 'from cumm import cudasim, dtypes\n'), ((9767, 9787), 'cumm.gemm.algospec.core.TensorOp', 'TensorOp', (['(16, 8, 8)'], {}), '((16, 8, 8))\n', (9775, 9787), False, 'from cumm.gemm.algospec.core import TensorOp\n'), ((43604, 43624), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['a_ten'], {}), '(a_ten)\n', (43617, 43624), True, 'from cumm import tensorview as tv\n'), ((43673, 43698), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['a_meta_ten'], {}), '(a_meta_ten)\n', (43686, 43698), True, 'from cumm import tensorview as tv\n'), ((43857, 43877), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['b_ten'], {}), '(b_ten)\n', (43870, 43877), True, 'from cumm import tensorview as tv\n'), ((43926, 43951), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['b_meta_ten'], {}), '(b_meta_ten)\n', (43939, 43951), True, 'from cumm import tensorview as tv\n'), ((44132, 44152), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['a_ten'], {}), '(a_ten)\n', (44145, 44152), True, 'from cumm import tensorview as tv\n'), ((44201, 44212), 'cumm.tensorview.Tensor', 'tv.Tensor', ([], {}), '()\n', (44210, 44212), True, 'from cumm import tensorview as tv\n'), ((44371, 44391), 'cumm.tensorview.from_numpy', 'tv.from_numpy', (['b_ten'], {}), '(b_ten)\n', (44384, 44391), True, 'from cumm import tensorview as tv\n'), ((44440, 44451), 'cumm.tensorview.Tensor', 'tv.Tensor', ([], {}), '()\n', (44449, 44451), True, 'from cumm import tensorview as tv\n'), ((23223, 23292), 'cumm.gemm.codeops.group_by', 'codeops.group_by', (['ConvMainUnitTest._get_sparse_params', 'liilwiloi_kers'], {}), '(ConvMainUnitTest._get_sparse_params, liilwiloi_kers)\n', (23239, 23292), False, 'from cumm.gemm import codeops\n'), ((23801, 23827), 'pccm.boolean', 'pccm.boolean', (['ms_ikf_mw[0]'], {}), '(ms_ikf_mw[0])\n', (23813, 23827), False, 'import pccm\n'), ((23905, 23931), 'pccm.boolean', 'pccm.boolean', (['ms_ikf_mw[1]'], {}), '(ms_ikf_mw[1])\n', (23917, 23931), False, 'import pccm\n')] |
import cv2
import csv
import numpy as np
import myutils as utils
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model
import os
import emoji
from PIL import Image, ImageFont, ImageDraw
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
assignmentImgsPath = "../assessment-images-raw/"
assignmentKeysPath = "../assessment-keys/"
filenames = utils.getImageFiles(assignmentImgsPath)
extension = ".JPEG"
heightImg = 640
widthImg = 480
imgBlank = np.zeros((heightImg, widthImg, 3), np.uint8)
mcqRectFolderPath = "../assessment-mcqs/"
modelFilename = "Control-10_Epochs.h5"
mcqPredictionModel = load_model(modelFilename)
# Iterate each file (assignment submission) in directory path
for filename in filenames:
# Get the name of the image file without extension
filename = filename.split(".")[0]
pathImage = str(assignmentImgsPath + filename + extension)
img = cv2.imread(pathImage)
print(pathImage)
# Get answer key
with open(assignmentKeysPath + "key" + '.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)
[assessmentKeys] = data
# Resize the image to a low resolution image
height, width, channels = img.shape
img = cv2.resize(img, (widthImg, heightImg))
# Use OpenCV library to find contours on the image
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgThreshold = cv2.Canny(imgBlur, 90, 90)
# Find the biggest contour
# Biggest contour on the image file will be the assignment sheet
imgBigContour = img.copy()
contours, hierarchy = cv2.findContours(imgThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
biggest, maxArea = utils.biggestContour(contours)
# If picture is not taken correctly, capturing all the edges of the assignment sheet
if biggest.size == 0:
print("Cannot find the biggest contour on the image")
continue
# If the assignment sheet is warped on the image, fix it.
biggest = utils.reorder(biggest)
cv2.drawContours(imgBigContour, biggest, -1, (0, 255, 0), 20)
imgBigContour = utils.drawRectangle(imgBigContour, biggest, 2)
pts1 = np.float32(biggest)
pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg))
imgWarpColored = imgWarpColored[20:imgWarpColored.shape[0] - 20, 20:imgWarpColored.shape[1] - 20]
imgWarpColored = cv2.resize(imgWarpColored, (widthImg, heightImg))
imgWarpGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)
# Find contours on the assignment sheet
docGray = cv2.cvtColor(imgWarpColored, cv2.COLOR_BGR2GRAY)
docBlur = cv2.GaussianBlur(docGray, (7,7), 1)
docCanny = cv2.Canny(docBlur, 50, 50)
docContour = imgWarpColored.copy()
docContours, hierarchy = cv2.findContours(docCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Identify the MCQ rectangles from the contours
mcqRects = []
for index, cnt in enumerate(docContours):
area = cv2.contourArea(cnt)
if area > 500 and area < 4000:
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
objCor = len(approx)
x, y, w, h = cv2.boundingRect(approx)
if (2.5 * h) < w < (5.5 * h):
cv2.drawContours(docContour, cnt, -1, (255, 255, 0), 3)
mcqRects.append([x, y, w, h])
# Sort the MCQ rectangles based on their Y values
mcqRects.sort(key=lambda rect: rect[1])
docPredictions = docContour.copy()
for index, mcqRect in enumerate(mcqRects):
[x, y, w, h] = mcqRect
y = y + ((h - 21) // 2)
x = x + ((w - 72) // 2)
# Crop and store the MCQ rectangles each into separate files
mcqImage = imgWarpColored[y:y + 21, x:x + 72]
mcqFilePath = mcqRectFolderPath + filename + "-" + str(index + 1) + ".jpg"
cv2.imwrite(mcqFilePath, mcqImage)
# Prep MCQ image for model prediction
image_shape = mcqImage.shape
mcqImage = image.img_to_array(mcqImage)
mcqImage = np.expand_dims(mcqImage, axis=0)
# Make predictions
predictions = mcqPredictionModel.predict_classes(mcqImage)
predictedAnswer = 'N'
if predictions[0] == 0:
predictedAnswer = 'A'
elif predictions[0] == 1:
predictedAnswer = 'B'
elif predictions[0] == 2:
predictedAnswer = 'C'
elif predictions[0] == 3:
predictedAnswer = 'D'
mcqRect.append(predictedAnswer)
mcqRect.append(assessmentKeys[index])
mcqRect.append(predictedAnswer == assessmentKeys[index])
cv2.putText(docPredictions, predictedAnswer, (x + 85, y + 15),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0), 2)
print("predictions", predictions)
# Write evaluations on assessment sheet
docEvaluation = Image.fromarray(docPredictions)
draw = ImageDraw.Draw(docEvaluation)
font = ImageFont.truetype("Arial Unicode.ttf", 32)
correctAnswerCount = 0
for index, mcqRect in enumerate(mcqRects):
if mcqRect[6]:
correctAnswerCount = correctAnswerCount + 1
draw.text((mcqRect[0] + 105, mcqRect[1] - 20), "\u2713", (0, 255, 0), font=font)
else:
draw.text((mcqRect[0] + 105, mcqRect[1] - 20), "\u2717", (0, 0, 255), font=font)
# Write score on the assessment sheet
docScore = np.array(docEvaluation).copy()
score = (correctAnswerCount * 100) // len(mcqRects)
cv2.putText(docScore, str(score) + "%", (300, 100),
cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 0), 2)
# Show the transformation as stacked images
imageArray = (
[img, imgThreshold, imgBigContour, imgWarpColored],
[docContour, docPredictions, np.array(docEvaluation), docScore]
)
labels = [
["Original Image", "Find All Contours", "Find Largest Contour", "Warp Assessment Sheet"],
["Find MCQs", "Make Predictions", "Evaluate Answers", "Score Assignment"]
]
stackedImage = utils.stackImages(imageArray, 0.75, labels)
cv2.imshow("Result", stackedImage)
cv2.waitKey(0) | [
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"PIL.ImageDraw.Draw",
"tensorflow.keras.models.load_model",
"cv2.approxPolyDP",
"cv2.arcLength",
"myutils.biggestContour",
"PIL.ImageFont.truetype",
"cv2.contourArea",
"tensorflow.keras.preprocessing.image.img_to_array",
"cv2.waitKey",
"csv... | [((378, 417), 'myutils.getImageFiles', 'utils.getImageFiles', (['assignmentImgsPath'], {}), '(assignmentImgsPath)\n', (397, 417), True, 'import myutils as utils\n'), ((480, 524), 'numpy.zeros', 'np.zeros', (['(heightImg, widthImg, 3)', 'np.uint8'], {}), '((heightImg, widthImg, 3), np.uint8)\n', (488, 524), True, 'import numpy as np\n'), ((628, 653), 'tensorflow.keras.models.load_model', 'load_model', (['modelFilename'], {}), '(modelFilename)\n', (638, 653), False, 'from tensorflow.keras.models import load_model\n'), ((911, 932), 'cv2.imread', 'cv2.imread', (['pathImage'], {}), '(pathImage)\n', (921, 932), False, 'import cv2\n'), ((1232, 1270), 'cv2.resize', 'cv2.resize', (['img', '(widthImg, heightImg)'], {}), '(img, (widthImg, heightImg))\n', (1242, 1270), False, 'import cv2\n'), ((1341, 1378), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1353, 1378), False, 'import cv2\n'), ((1393, 1429), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['imgGray', '(5, 5)', '(1)'], {}), '(imgGray, (5, 5), 1)\n', (1409, 1429), False, 'import cv2\n'), ((1449, 1475), 'cv2.Canny', 'cv2.Canny', (['imgBlur', '(90)', '(90)'], {}), '(imgBlur, 90, 90)\n', (1458, 1475), False, 'import cv2\n'), ((1634, 1708), 'cv2.findContours', 'cv2.findContours', (['imgThreshold', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(imgThreshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1650, 1708), False, 'import cv2\n'), ((1732, 1762), 'myutils.biggestContour', 'utils.biggestContour', (['contours'], {}), '(contours)\n', (1752, 1762), True, 'import myutils as utils\n'), ((2033, 2055), 'myutils.reorder', 'utils.reorder', (['biggest'], {}), '(biggest)\n', (2046, 2055), True, 'import myutils as utils\n'), ((2060, 2121), 'cv2.drawContours', 'cv2.drawContours', (['imgBigContour', 'biggest', '(-1)', '(0, 255, 0)', '(20)'], {}), '(imgBigContour, biggest, -1, (0, 255, 0), 20)\n', (2076, 2121), False, 'import cv2\n'), ((2142, 2188), 'myutils.drawRectangle', 'utils.drawRectangle', (['imgBigContour', 'biggest', '(2)'], {}), '(imgBigContour, biggest, 2)\n', (2161, 2188), True, 'import myutils as utils\n'), ((2200, 2219), 'numpy.float32', 'np.float32', (['biggest'], {}), '(biggest)\n', (2210, 2219), True, 'import numpy as np\n'), ((2231, 2305), 'numpy.float32', 'np.float32', (['[[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]]'], {}), '([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])\n', (2241, 2305), True, 'import numpy as np\n'), ((2319, 2358), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (2346, 2358), False, 'import cv2\n'), ((2380, 2435), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'matrix', '(widthImg, heightImg)'], {}), '(img, matrix, (widthImg, heightImg))\n', (2399, 2435), False, 'import cv2\n'), ((2559, 2608), 'cv2.resize', 'cv2.resize', (['imgWarpColored', '(widthImg, heightImg)'], {}), '(imgWarpColored, (widthImg, heightImg))\n', (2569, 2608), False, 'import cv2\n'), ((2627, 2675), 'cv2.cvtColor', 'cv2.cvtColor', (['imgWarpColored', 'cv2.COLOR_BGR2GRAY'], {}), '(imgWarpColored, cv2.COLOR_BGR2GRAY)\n', (2639, 2675), False, 'import cv2\n'), ((2735, 2783), 'cv2.cvtColor', 'cv2.cvtColor', (['imgWarpColored', 'cv2.COLOR_BGR2GRAY'], {}), '(imgWarpColored, cv2.COLOR_BGR2GRAY)\n', (2747, 2783), False, 'import cv2\n'), ((2798, 2834), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['docGray', '(7, 7)', '(1)'], {}), '(docGray, (7, 7), 1)\n', (2814, 2834), False, 'import cv2\n'), ((2849, 2875), 'cv2.Canny', 'cv2.Canny', (['docBlur', '(50)', '(50)'], {}), '(docBlur, 50, 50)\n', (2858, 2875), False, 'import cv2\n'), ((2944, 3012), 'cv2.findContours', 'cv2.findContours', (['docCanny', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(docCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (2960, 3012), False, 'import cv2\n'), ((5060, 5091), 'PIL.Image.fromarray', 'Image.fromarray', (['docPredictions'], {}), '(docPredictions)\n', (5075, 5091), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((5103, 5132), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['docEvaluation'], {}), '(docEvaluation)\n', (5117, 5132), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((5144, 5187), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""Arial Unicode.ttf"""', '(32)'], {}), "('Arial Unicode.ttf', 32)\n", (5162, 5187), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((6229, 6272), 'myutils.stackImages', 'utils.stackImages', (['imageArray', '(0.75)', 'labels'], {}), '(imageArray, 0.75, labels)\n', (6246, 6272), True, 'import myutils as utils\n'), ((6277, 6311), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'stackedImage'], {}), "('Result', stackedImage)\n", (6287, 6311), False, 'import cv2\n'), ((6316, 6330), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6327, 6330), False, 'import cv2\n'), ((1062, 1075), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1072, 1075), False, 'import csv\n'), ((3145, 3165), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (3160, 3165), False, 'import cv2\n'), ((4050, 4084), 'cv2.imwrite', 'cv2.imwrite', (['mcqFilePath', 'mcqImage'], {}), '(mcqFilePath, mcqImage)\n', (4061, 4084), False, 'import cv2\n'), ((4188, 4216), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['mcqImage'], {}), '(mcqImage)\n', (4206, 4216), False, 'from tensorflow.keras.preprocessing import image\n'), ((4236, 4268), 'numpy.expand_dims', 'np.expand_dims', (['mcqImage'], {'axis': '(0)'}), '(mcqImage, axis=0)\n', (4250, 4268), True, 'import numpy as np\n'), ((4823, 4937), 'cv2.putText', 'cv2.putText', (['docPredictions', 'predictedAnswer', '(x + 85, y + 15)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.7)', '(255, 0, 0)', '(2)'], {}), '(docPredictions, predictedAnswer, (x + 85, y + 15), cv2.\n FONT_HERSHEY_COMPLEX, 0.7, (255, 0, 0), 2)\n', (4834, 4937), False, 'import cv2\n'), ((3224, 3248), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (3237, 3248), False, 'import cv2\n'), ((3270, 3310), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', '(0.02 * peri)', '(True)'], {}), '(cnt, 0.02 * peri, True)\n', (3286, 3310), False, 'import cv2\n'), ((3369, 3393), 'cv2.boundingRect', 'cv2.boundingRect', (['approx'], {}), '(approx)\n', (3385, 3393), False, 'import cv2\n'), ((5599, 5622), 'numpy.array', 'np.array', (['docEvaluation'], {}), '(docEvaluation)\n', (5607, 5622), True, 'import numpy as np\n'), ((5968, 5991), 'numpy.array', 'np.array', (['docEvaluation'], {}), '(docEvaluation)\n', (5976, 5991), True, 'import numpy as np\n'), ((3452, 3507), 'cv2.drawContours', 'cv2.drawContours', (['docContour', 'cnt', '(-1)', '(255, 255, 0)', '(3)'], {}), '(docContour, cnt, -1, (255, 255, 0), 3)\n', (3468, 3507), False, 'import cv2\n')] |
from carla_utils import carla
cc = carla.ColorConverter
import re
import numpy as np
import collections
import pygame
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
def make_surface(carla_image):
carla_image.convert(cc.Raw)
array = np.frombuffer(carla_image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (carla_image.height, carla_image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
return pygame.surfarray.make_surface(array.swapaxes(0, 1))
def parse_collision_history(history):
history_dict = collections.defaultdict(int)
if history:
for frame, data, intensity in history:
history_dict[frame] += intensity
return history_dict
class Util(object):
@staticmethod
def blits(destination_surface, source_surfaces, rect=None, blend_mode=0):
"""Function that renders the all the source surfaces in a destination source"""
for surface in source_surfaces:
destination_surface.blit(surface[0], surface[1], rect, blend_mode)
@staticmethod
def length(v):
"""Returns the length of a vector"""
return np.sqrt(v.x**2 + v.y**2 + v.z**2)
@staticmethod
def get_bounding_box(actor):
"""Gets the bounding box corners of an actor in world space"""
bb = actor.trigger_volume.extent
corners = [carla.Location(x=-bb.x, y=-bb.y),
carla.Location(x=bb.x, y=-bb.y),
carla.Location(x=bb.x, y=bb.y),
carla.Location(x=-bb.x, y=bb.y),
carla.Location(x=-bb.x, y=-bb.y)]
corners = [x + actor.trigger_volume.location for x in corners]
t = actor.get_transform()
t.transform(corners)
return corners
COLOR_WHITE = pygame.Color(255, 255, 255)
COLOR_BLACK = pygame.Color(0, 0, 0)
class FadingText(object):
"""Renders texts that fades out after some seconds that the user specifies"""
def __init__(self, font, dim, pos):
"""Initializes variables such as text font, dimensions and position"""
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=COLOR_WHITE, seconds=2.0):
"""Sets the text, color and seconds until fade out"""
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill(COLOR_BLACK)
self.surface.blit(text_texture, (10, 11))
def tick(self, clock):
"""Each frame, it shows the displayed text for some specified seconds, if any"""
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
""" Renders the text in its surface and its position"""
display.blit(self.surface, self.pos)
class HelpText(object):
def __init__(self, font, width, height):
"""Renders the help text that shows the controls for using no rendering mode"""
lines = __doc__.split('\n')
self.font = font
self.dim = (680, len(lines) * 22 + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill(COLOR_BLACK)
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, COLOR_WHITE)
self.surface.blit(text_texture, (22, n * 22))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
"""Toggles display of help text"""
self._render = not self._render
def render(self, display):
"""Renders the help text, if enabled"""
if self._render:
display.blit(self.surface, self.pos)
| [
"numpy.sqrt",
"numpy.reshape",
"re.compile",
"pygame.Surface",
"re.match",
"collections.defaultdict",
"carla_utils.carla.Location",
"pygame.Color",
"numpy.dtype"
] | [((2267, 2294), 'pygame.Color', 'pygame.Color', (['(255)', '(255)', '(255)'], {}), '(255, 255, 255)\n', (2279, 2294), False, 'import pygame\n'), ((2309, 2330), 'pygame.Color', 'pygame.Color', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2321, 2330), False, 'import pygame\n'), ((159, 226), 're.compile', 're.compile', (['""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)"""'], {}), "('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')\n", (169, 226), False, 'import re\n'), ((807, 868), 'numpy.reshape', 'np.reshape', (['array', '(carla_image.height, carla_image.width, 4)'], {}), '(array, (carla_image.height, carla_image.width, 4))\n', (817, 868), True, 'import numpy as np\n'), ((1048, 1076), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (1071, 1076), False, 'import collections\n'), ((1634, 1673), 'numpy.sqrt', 'np.sqrt', (['(v.x ** 2 + v.y ** 2 + v.z ** 2)'], {}), '(v.x ** 2 + v.y ** 2 + v.z ** 2)\n', (1641, 1673), True, 'import numpy as np\n'), ((2684, 2708), 'pygame.Surface', 'pygame.Surface', (['self.dim'], {}), '(self.dim)\n', (2698, 2708), False, 'import pygame\n'), ((2916, 2940), 'pygame.Surface', 'pygame.Surface', (['self.dim'], {}), '(self.dim)\n', (2930, 2940), False, 'import pygame\n'), ((3909, 3933), 'pygame.Surface', 'pygame.Surface', (['self.dim'], {}), '(self.dim)\n', (3923, 3933), False, 'import pygame\n'), ((352, 374), 're.match', 're.match', (['"""[A-Z].+"""', 'x'], {}), "('[A-Z].+', x)\n", (360, 374), False, 'import re\n'), ((776, 793), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (784, 793), True, 'import numpy as np\n'), ((1851, 1883), 'carla_utils.carla.Location', 'carla.Location', ([], {'x': '(-bb.x)', 'y': '(-bb.y)'}), '(x=-bb.x, y=-bb.y)\n', (1865, 1883), False, 'from carla_utils import carla\n'), ((1904, 1935), 'carla_utils.carla.Location', 'carla.Location', ([], {'x': 'bb.x', 'y': '(-bb.y)'}), '(x=bb.x, y=-bb.y)\n', (1918, 1935), False, 'from carla_utils import carla\n'), ((1956, 1986), 'carla_utils.carla.Location', 'carla.Location', ([], {'x': 'bb.x', 'y': 'bb.y'}), '(x=bb.x, y=bb.y)\n', (1970, 1986), False, 'from carla_utils import carla\n'), ((2007, 2038), 'carla_utils.carla.Location', 'carla.Location', ([], {'x': '(-bb.x)', 'y': 'bb.y'}), '(x=-bb.x, y=bb.y)\n', (2021, 2038), False, 'from carla_utils import carla\n'), ((2059, 2091), 'carla_utils.carla.Location', 'carla.Location', ([], {'x': '(-bb.x)', 'y': '(-bb.y)'}), '(x=-bb.x, y=-bb.y)\n', (2073, 2091), False, 'from carla_utils import carla\n')] |
import numpy as np
from pytest import fixture
from evobench.linkage.dsm import DependencyStructureMatrix
from evobench.model import Population, Solution
from .cec2013lsgo import CEC2013LSGO
class Helpers:
@staticmethod
def test_evaluate_solution(benchmark: CEC2013LSGO):
genome = np.zeros(shape=benchmark.genome_size)
solution = Solution(genome)
fitness = benchmark.evaluate_solution(solution)
assert isinstance(fitness, float)
@staticmethod
def test_evaluate_population(benchmark: CEC2013LSGO):
genomes = np.ones(shape=(5, benchmark.genome_size))
factors = np.array([0, 1, 2, 3, 300])
genomes = genomes * factors[:, None]
solutions = list(Solution(genome) for genome in genomes)
population = Population(solutions)
fitness = benchmark.evaluate_population(population)
assert isinstance(fitness, np.ndarray)
assert len(fitness) == population.size
@staticmethod
def test_dsm(benchmark: CEC2013LSGO):
D = benchmark.genome_size
dsm = benchmark.dsm
assert isinstance(dsm, DependencyStructureMatrix)
assert dsm.interactions.shape == (D, D)
@fixture(scope="session")
def helpers() -> Helpers:
return Helpers
| [
"evobench.model.Solution",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"pytest.fixture",
"evobench.model.Population"
] | [((1195, 1219), 'pytest.fixture', 'fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1202, 1219), False, 'from pytest import fixture\n'), ((301, 338), 'numpy.zeros', 'np.zeros', ([], {'shape': 'benchmark.genome_size'}), '(shape=benchmark.genome_size)\n', (309, 338), True, 'import numpy as np\n'), ((358, 374), 'evobench.model.Solution', 'Solution', (['genome'], {}), '(genome)\n', (366, 374), False, 'from evobench.model import Population, Solution\n'), ((568, 609), 'numpy.ones', 'np.ones', ([], {'shape': '(5, benchmark.genome_size)'}), '(shape=(5, benchmark.genome_size))\n', (575, 609), True, 'import numpy as np\n'), ((628, 655), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 300]'], {}), '([0, 1, 2, 3, 300])\n', (636, 655), True, 'import numpy as np\n'), ((787, 808), 'evobench.model.Population', 'Population', (['solutions'], {}), '(solutions)\n', (797, 808), False, 'from evobench.model import Population, Solution\n'), ((726, 742), 'evobench.model.Solution', 'Solution', (['genome'], {}), '(genome)\n', (734, 742), False, 'from evobench.model import Population, Solution\n')] |
"""
heatmap demo
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from freeplot.base import FreePlot
titles = ("S", "h", "a", "n")
row_labels = ('c', 'u', 't', 'e')
col_labels = ('l', 'r', 'i', 'g')
# shape: 1, 4; figsize: 9, 2
fp = FreePlot((1, 4), (9, 2), titles=titles, dpi=100, sharey=True)
for title in titles:
data = np.random.rand(4, 4)
df = pd.DataFrame(data, index=col_labels, columns=row_labels)
fp.heatmap(df, index=title, annot=True, fmt=".4f", cbar=False, linewidth=0.5)
fp.set(Xlabel="X")
fp.set_label('Y', index=(0, 0), axis='y')
fp.savefig("heatmap_demo.pdf", format="pdf", tight_layout=False)
# plt.show() | [
"pandas.DataFrame",
"numpy.random.rand",
"freeplot.base.FreePlot"
] | [((268, 329), 'freeplot.base.FreePlot', 'FreePlot', (['(1, 4)', '(9, 2)'], {'titles': 'titles', 'dpi': '(100)', 'sharey': '(True)'}), '((1, 4), (9, 2), titles=titles, dpi=100, sharey=True)\n', (276, 329), False, 'from freeplot.base import FreePlot\n'), ((363, 383), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (377, 383), True, 'import numpy as np\n'), ((393, 449), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'col_labels', 'columns': 'row_labels'}), '(data, index=col_labels, columns=row_labels)\n', (405, 449), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from six import StringIO
import os.path as op
import numpy as np
import pandas as pd
import cooler
import pytest
from cooler.create import (
sanitize_records,
sanitize_pixels,
validate_pixels,
aggregate_records,
BadInputError,
)
testdir = op.dirname(op.realpath(__file__))
datadir = op.join(testdir, "data")
columns = [
"chrom1",
"pos1",
"strand1",
"chrom2",
"pos2",
"strand2",
"name",
"pair_type",
"triu",
]
valid_data = """chr1\t1\t+\tchr2\t100\t-\t.\tLL\t1
chr2\t99\t+\tchr1\t13\t-\t.\tLL\t0
chr2\t13\t+\tchr2\t60\t-\t.\tLL\t1
chr1\t200\t+\tchr2\t50\t-\t.\tLL\t1
chr3\t11\t+\tchr3\t40\t-\t.\tLL\t1
chr1\t234\t+\tchr3\t30\t-\t.\tLL\t1
chr3\t3\t+\tchr2\t20\t-\t.\tLL\t0
chr2\t23\t+\tchr3\t11\t-\t.\tLL\t1
chr1\t123\t+\tchr1\t200\t-\t.\tLL\t1
"""
nuisance_chroms = """chr1\t222\t+\tchr9\t200\t-\t.\tLL\t1
chr9\t222\t+\tchr9\t200\t-\t.\tLL\t1"""
oob_lower = """chr1\t-1\t+\tchr1\t10\t+\t.\tLL\t1"""
oob_upper = """chr1\t123\t+\tchr1\t301\t+\t.\tLL\t1"""
binsize = 10
chromsizes = pd.Series(index=["chr1", "chr2", "chr3"], data=[300, 300, 300])
bins = cooler.util.binnify(chromsizes, binsize)
def _insert_lines(d, new):
lines = d.split("\n")
for line in new.split("\n"):
lines.insert(np.random.randint(len(lines)), line)
return "\n".join(lines)
def test_sanitize_records():
chunk = pd.read_csv(StringIO(valid_data), sep="\t", names=columns)
with pytest.raises(ValueError):
sanitize_records(
bins,
schema="doesnotexist",
validate=True,
tril_action="reflect",
is_one_based=True,
sort=True,
)(chunk.copy())
chunk = pd.read_csv(StringIO(valid_data), sep="\t", names=columns)
sanitize_records(
bins,
schema="pairs",
validate=True,
tril_action="reflect",
is_one_based=True,
sort=True,
)(chunk.copy())
# variable-length bins
chunk = pd.read_csv(StringIO(valid_data), sep="\t", names=columns)
sanitize_records(
pd.DataFrame({
'chrom': ['chr1', 'chr1', 'chr2', 'chr2', 'chr3'],
'start': [0, 150, 0, 100, 0],
'end': [150, 300, 100, 300, 300],
}),
schema="pairs",
validate=True,
tril_action="reflect",
is_one_based=True,
sort=True,
)(chunk.copy())
# input with already enum-encoded chromosomes (decode_chroms=False)
text = """0\t1\t+\t1\t100\t-\t.\tLL\t1
1\t99\t+\t0\t13\t-\t.\tLL\t0
1\t13\t+\t1\t60\t-\t.\tLL\t1
0\t200\t+\t1\t50\t-\t.\tLL\t1
2\t11\t+\t2\t40\t-\t.\tLL\t1
0\t234\t+\t2\t30\t-\t.\tLL\t1
2\t3\t+\t1\t20\t-\t.\tLL\t0
1\t23\t+\t2\t11\t-\t.\tLL\t1
0\t123\t+\t-1\t200\t-\t.\tLL\t1
"""
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
sanitize_records(
bins,
schema="pairs",
decode_chroms=False,
validate=True,
tril_action="reflect"
)(chunk.copy())
# fails on string chromosomes
chunk = pd.read_csv(StringIO(valid_data), sep="\t", names=columns)
with pytest.raises(BadInputError):
sanitize_records(
bins,
schema="pairs",
decode_chroms=False,
validate=True,
tril_action="reflect"
)(chunk.copy())
# empty chunk
out = sanitize_records(
bins,
schema="pairs",
validate=True,
tril_action="reflect"
)(chunk.iloc[0:0])
assert len(out) == 0
def test_sanitize_records_triu_action():
text = valid_data
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
out = sanitize_records(bins, schema="pairs", validate=True, tril_action="reflect")(
chunk.copy()
)
is_tril = ~np.array(out["triu"], dtype=bool)
is_tril_ix = out.index[is_tril]
assert np.all(out.loc[is_tril_ix, "chrom1"] == chunk.loc[is_tril_ix, "chrom2"])
assert np.all(out.loc[is_tril_ix, "chrom2"] == chunk.loc[is_tril_ix, "chrom1"])
assert np.all(out.loc[is_tril_ix, "strand1"] == "+")
text = valid_data
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
out = sanitize_records(bins, schema="pairs", validate=True, tril_action="drop")(
chunk.copy()
)
is_tril = ~np.array(out["triu"], dtype=bool)
is_tril_ix = out.index[is_tril]
assert np.all(out.loc[is_tril_ix, "chrom1"] == chunk.loc[is_tril_ix, "chrom2"])
assert np.all(out.loc[is_tril_ix, "chrom2"] == chunk.loc[is_tril_ix, "chrom1"])
assert np.all(out.loc[is_tril_ix, "strand1"] == "+")
assert len(out) == chunk["triu"].sum()
func = sanitize_records(bins, schema="pairs", validate=True, tril_action="raise")
text = valid_data
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
with pytest.raises(BadInputError):
func(chunk)
def test_sanitize_records_with_strand_column():
text = valid_data
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
out = sanitize_records(
bins,
schema="pairs",
validate=True,
tril_action="reflect",
sided_fields=("chrom", "pos", "strand"),
)(chunk.copy())
is_tril = ~np.array(out["triu"], dtype=bool)
assert np.all(out.loc[is_tril, "chrom1"] == chunk.loc[is_tril, "chrom2"])
assert np.all(out.loc[is_tril, "chrom2"] == chunk.loc[is_tril, "chrom1"])
assert np.all(out.loc[is_tril, "strand1"] == "-")
def test_sanitize_records_with_nuisance_records():
text = _insert_lines(valid_data, nuisance_chroms)
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
out = sanitize_records(bins, schema="pairs", validate=True, tril_action="reflect")(
chunk.copy()
)
assert ("chr9" not in out["chrom1"]) and ("chr9" not in out["chrom2"])
def test_sanitize_records_with_bad_records():
func = sanitize_records(bins, schema="pairs", validate=True, tril_action="reflect")
text = _insert_lines(valid_data, oob_lower)
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
with pytest.raises(BadInputError):
func(chunk)
text = _insert_lines(valid_data, oob_upper)
chunk = pd.read_csv(StringIO(text), sep="\t", names=columns)
with pytest.raises(BadInputError):
func(chunk)
def test_sanitize_pixels():
bins = cooler.binnify(
cooler.util.read_chromsizes(op.join(datadir, "toy.chrom.sizes")), 1
)
chunk = pd.read_csv(
op.join(datadir, "toy.symm.upper.1.zb.coo"),
sep='\t',
names=['bin1_id', 'bin2_id', 'count']
)
chunk['foo1'] = 4
chunk['foo2'] = 2
sanitize_pixels(
bins,
)(chunk.copy())
out = sanitize_pixels(
bins,
is_one_based=True,
)(chunk.copy())
assert (out['bin1_id'] == chunk['bin1_id'] - 1).all()
# tril action
tril_chunk = chunk.copy()
tril_chunk['bin2_id'] = chunk['bin1_id']
tril_chunk['bin1_id'] = chunk['bin2_id']
out = sanitize_pixels(
bins,
tril_action="reflect",
sided_fields=['foo'],
)(tril_chunk.copy())
assert len(out) == len(chunk)
assert (out['foo2'] == chunk['foo1']).all()
assert (out['foo1'] == chunk['foo2']).all()
out = sanitize_pixels(
bins,
tril_action="drop",
)(tril_chunk.copy())
assert len(out) == 0
with pytest.raises(BadInputError):
sanitize_pixels(
bins,
tril_action="raise",
)(tril_chunk.copy())
def test_validate_pixels():
bins = cooler.binnify(
cooler.util.read_chromsizes(op.join(datadir, "toy.chrom.sizes")), 1
)
chunk = pd.read_csv(
op.join(datadir, "toy.symm.upper.1.zb.coo"),
sep='\t',
names=['bin1_id', 'bin2_id', 'count']
)
validator = validate_pixels(
len(bins),
boundscheck=True,
triucheck=True,
dupcheck=True,
ensure_sorted=True
)
validator(chunk.copy())
validator(chunk.to_dict(orient='series'))
# wrongly assume zero-based, producing -1 bins IDs
chunk_ = sanitize_pixels(
bins,
is_one_based=True,
)(chunk.copy())
with pytest.raises(BadInputError):
validator(chunk_)
# out-of-bounds bin ID
chunk_ = chunk.copy()
chunk_.at[-1, 'bin1_id'] = len(bins) + 1
with pytest.raises(BadInputError):
validator(chunk_)
# pass in non-triu data
tril_chunk = chunk.copy()
tril_chunk['bin2_id'] = chunk['bin1_id']
tril_chunk['bin1_id'] = chunk['bin2_id']
with pytest.raises(BadInputError):
validator(tril_chunk)
# pass in duplicates
with pytest.raises(BadInputError):
validator(pd.concat([chunk, chunk], ignore_index=True))
def test_aggregate_records():
bins = cooler.binnify(
cooler.util.read_chromsizes(op.join(datadir, "toy.chrom.sizes")), 1
)
records = pd.read_csv(
op.join(datadir, "toy.pairs"),
sep='\t',
names=[
"read_id",
"chrom1", "pos1",
"chrom2", "pos2",
"strand1", "strand2",
"value"
]
)
sanitizer = sanitize_records(
bins,
schema="pairs",
validate=False,
tril_action="reflect",
is_one_based=False,
sort=False,
)
chunk = sanitizer(records)
aggregator = aggregate_records()
aggregator(chunk)
| [
"pandas.Series",
"cooler.create.sanitize_pixels",
"os.path.join",
"os.path.realpath",
"numpy.array",
"cooler.util.binnify",
"cooler.create.sanitize_records",
"pytest.raises",
"six.StringIO",
"pandas.DataFrame",
"cooler.create.aggregate_records",
"numpy.all",
"pandas.concat"
] | [((378, 402), 'os.path.join', 'op.join', (['testdir', '"""data"""'], {}), "(testdir, 'data')\n", (385, 402), True, 'import os.path as op\n'), ((1115, 1178), 'pandas.Series', 'pd.Series', ([], {'index': "['chr1', 'chr2', 'chr3']", 'data': '[300, 300, 300]'}), "(index=['chr1', 'chr2', 'chr3'], data=[300, 300, 300])\n", (1124, 1178), True, 'import pandas as pd\n'), ((1186, 1226), 'cooler.util.binnify', 'cooler.util.binnify', (['chromsizes', 'binsize'], {}), '(chromsizes, binsize)\n', (1205, 1226), False, 'import cooler\n'), ((345, 366), 'os.path.realpath', 'op.realpath', (['__file__'], {}), '(__file__)\n', (356, 366), True, 'import os.path as op\n'), ((3906, 3978), 'numpy.all', 'np.all', (["(out.loc[is_tril_ix, 'chrom1'] == chunk.loc[is_tril_ix, 'chrom2'])"], {}), "(out.loc[is_tril_ix, 'chrom1'] == chunk.loc[is_tril_ix, 'chrom2'])\n", (3912, 3978), True, 'import numpy as np\n'), ((3990, 4062), 'numpy.all', 'np.all', (["(out.loc[is_tril_ix, 'chrom2'] == chunk.loc[is_tril_ix, 'chrom1'])"], {}), "(out.loc[is_tril_ix, 'chrom2'] == chunk.loc[is_tril_ix, 'chrom1'])\n", (3996, 4062), True, 'import numpy as np\n'), ((4074, 4119), 'numpy.all', 'np.all', (["(out.loc[is_tril_ix, 'strand1'] == '+')"], {}), "(out.loc[is_tril_ix, 'strand1'] == '+')\n", (4080, 4119), True, 'import numpy as np\n'), ((4416, 4488), 'numpy.all', 'np.all', (["(out.loc[is_tril_ix, 'chrom1'] == chunk.loc[is_tril_ix, 'chrom2'])"], {}), "(out.loc[is_tril_ix, 'chrom1'] == chunk.loc[is_tril_ix, 'chrom2'])\n", (4422, 4488), True, 'import numpy as np\n'), ((4500, 4572), 'numpy.all', 'np.all', (["(out.loc[is_tril_ix, 'chrom2'] == chunk.loc[is_tril_ix, 'chrom1'])"], {}), "(out.loc[is_tril_ix, 'chrom2'] == chunk.loc[is_tril_ix, 'chrom1'])\n", (4506, 4572), True, 'import numpy as np\n'), ((4584, 4629), 'numpy.all', 'np.all', (["(out.loc[is_tril_ix, 'strand1'] == '+')"], {}), "(out.loc[is_tril_ix, 'strand1'] == '+')\n", (4590, 4629), True, 'import numpy as np\n'), ((4685, 4759), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""raise"""'}), "(bins, schema='pairs', validate=True, tril_action='raise')\n", (4701, 4759), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((5292, 5358), 'numpy.all', 'np.all', (["(out.loc[is_tril, 'chrom1'] == chunk.loc[is_tril, 'chrom2'])"], {}), "(out.loc[is_tril, 'chrom1'] == chunk.loc[is_tril, 'chrom2'])\n", (5298, 5358), True, 'import numpy as np\n'), ((5370, 5436), 'numpy.all', 'np.all', (["(out.loc[is_tril, 'chrom2'] == chunk.loc[is_tril, 'chrom1'])"], {}), "(out.loc[is_tril, 'chrom2'] == chunk.loc[is_tril, 'chrom1'])\n", (5376, 5436), True, 'import numpy as np\n'), ((5448, 5490), 'numpy.all', 'np.all', (["(out.loc[is_tril, 'strand1'] == '-')"], {}), "(out.loc[is_tril, 'strand1'] == '-')\n", (5454, 5490), True, 'import numpy as np\n'), ((5912, 5988), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""reflect"""'}), "(bins, schema='pairs', validate=True, tril_action='reflect')\n", (5928, 5988), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((9178, 9292), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(False)', 'tril_action': '"""reflect"""', 'is_one_based': '(False)', 'sort': '(False)'}), "(bins, schema='pairs', validate=False, tril_action=\n 'reflect', is_one_based=False, sort=False)\n", (9194, 9292), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((9392, 9411), 'cooler.create.aggregate_records', 'aggregate_records', ([], {}), '()\n', (9409, 9411), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((1457, 1477), 'six.StringIO', 'StringIO', (['valid_data'], {}), '(valid_data)\n', (1465, 1477), False, 'from six import StringIO\n'), ((1513, 1538), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1526, 1538), False, 'import pytest\n'), ((1784, 1804), 'six.StringIO', 'StringIO', (['valid_data'], {}), '(valid_data)\n', (1792, 1804), False, 'from six import StringIO\n'), ((1835, 1945), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""reflect"""', 'is_one_based': '(True)', 'sort': '(True)'}), "(bins, schema='pairs', validate=True, tril_action='reflect',\n is_one_based=True, sort=True)\n", (1851, 1945), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((2063, 2083), 'six.StringIO', 'StringIO', (['valid_data'], {}), '(valid_data)\n', (2071, 2083), False, 'from six import StringIO\n'), ((2842, 2856), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (2850, 2856), False, 'from six import StringIO\n'), ((2887, 2988), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'decode_chroms': '(False)', 'validate': '(True)', 'tril_action': '"""reflect"""'}), "(bins, schema='pairs', decode_chroms=False, validate=True,\n tril_action='reflect')\n", (2903, 2988), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((3103, 3123), 'six.StringIO', 'StringIO', (['valid_data'], {}), '(valid_data)\n', (3111, 3123), False, 'from six import StringIO\n'), ((3159, 3187), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (3172, 3187), False, 'import pytest\n'), ((3408, 3484), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""reflect"""'}), "(bins, schema='pairs', validate=True, tril_action='reflect')\n", (3424, 3484), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((3654, 3668), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (3662, 3668), False, 'from six import StringIO\n'), ((3705, 3781), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""reflect"""'}), "(bins, schema='pairs', validate=True, tril_action='reflect')\n", (3721, 3781), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((3825, 3858), 'numpy.array', 'np.array', (["out['triu']"], {'dtype': 'bool'}), "(out['triu'], dtype=bool)\n", (3833, 3858), True, 'import numpy as np\n'), ((4167, 4181), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (4175, 4181), False, 'from six import StringIO\n'), ((4218, 4291), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""drop"""'}), "(bins, schema='pairs', validate=True, tril_action='drop')\n", (4234, 4291), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((4335, 4368), 'numpy.array', 'np.array', (["out['triu']"], {'dtype': 'bool'}), "(out['triu'], dtype=bool)\n", (4343, 4368), True, 'import numpy as np\n'), ((4806, 4820), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (4814, 4820), False, 'from six import StringIO\n'), ((4856, 4884), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (4869, 4884), False, 'import pytest\n'), ((5002, 5016), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (5010, 5016), False, 'from six import StringIO\n'), ((5053, 5174), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""reflect"""', 'sided_fields': "('chrom', 'pos', 'strand')"}), "(bins, schema='pairs', validate=True, tril_action='reflect',\n sided_fields=('chrom', 'pos', 'strand'))\n", (5069, 5174), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((5247, 5280), 'numpy.array', 'np.array', (["out['triu']"], {'dtype': 'bool'}), "(out['triu'], dtype=bool)\n", (5255, 5280), True, 'import numpy as np\n'), ((5622, 5636), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (5630, 5636), False, 'from six import StringIO\n'), ((5673, 5749), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'validate': '(True)', 'tril_action': '"""reflect"""'}), "(bins, schema='pairs', validate=True, tril_action='reflect')\n", (5689, 5749), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((6062, 6076), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (6070, 6076), False, 'from six import StringIO\n'), ((6112, 6140), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (6125, 6140), False, 'import pytest\n'), ((6235, 6249), 'six.StringIO', 'StringIO', (['text'], {}), '(text)\n', (6243, 6249), False, 'from six import StringIO\n'), ((6285, 6313), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (6298, 6313), False, 'import pytest\n'), ((6507, 6550), 'os.path.join', 'op.join', (['datadir', '"""toy.symm.upper.1.zb.coo"""'], {}), "(datadir, 'toy.symm.upper.1.zb.coo')\n", (6514, 6550), True, 'import os.path as op\n'), ((6670, 6691), 'cooler.create.sanitize_pixels', 'sanitize_pixels', (['bins'], {}), '(bins)\n', (6685, 6691), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((6731, 6771), 'cooler.create.sanitize_pixels', 'sanitize_pixels', (['bins'], {'is_one_based': '(True)'}), '(bins, is_one_based=True)\n', (6746, 6771), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((7016, 7082), 'cooler.create.sanitize_pixels', 'sanitize_pixels', (['bins'], {'tril_action': '"""reflect"""', 'sided_fields': "['foo']"}), "(bins, tril_action='reflect', sided_fields=['foo'])\n", (7031, 7082), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((7273, 7314), 'cooler.create.sanitize_pixels', 'sanitize_pixels', (['bins'], {'tril_action': '"""drop"""'}), "(bins, tril_action='drop')\n", (7288, 7314), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((7391, 7419), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (7404, 7419), False, 'import pytest\n'), ((7698, 7741), 'os.path.join', 'op.join', (['datadir', '"""toy.symm.upper.1.zb.coo"""'], {}), "(datadir, 'toy.symm.upper.1.zb.coo')\n", (7705, 7741), True, 'import os.path as op\n'), ((8114, 8154), 'cooler.create.sanitize_pixels', 'sanitize_pixels', (['bins'], {'is_one_based': '(True)'}), '(bins, is_one_based=True)\n', (8129, 8154), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((8201, 8229), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (8214, 8229), False, 'import pytest\n'), ((8365, 8393), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (8378, 8393), False, 'import pytest\n'), ((8579, 8607), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (8592, 8607), False, 'import pytest\n'), ((8674, 8702), 'pytest.raises', 'pytest.raises', (['BadInputError'], {}), '(BadInputError)\n', (8687, 8702), False, 'import pytest\n'), ((8944, 8973), 'os.path.join', 'op.join', (['datadir', '"""toy.pairs"""'], {}), "(datadir, 'toy.pairs')\n", (8951, 8973), True, 'import os.path as op\n'), ((1548, 1666), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""doesnotexist"""', 'validate': '(True)', 'tril_action': '"""reflect"""', 'is_one_based': '(True)', 'sort': '(True)'}), "(bins, schema='doesnotexist', validate=True, tril_action=\n 'reflect', is_one_based=True, sort=True)\n", (1564, 1666), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((2140, 2274), 'pandas.DataFrame', 'pd.DataFrame', (["{'chrom': ['chr1', 'chr1', 'chr2', 'chr2', 'chr3'], 'start': [0, 150, 0, \n 100, 0], 'end': [150, 300, 100, 300, 300]}"], {}), "({'chrom': ['chr1', 'chr1', 'chr2', 'chr2', 'chr3'], 'start': [\n 0, 150, 0, 100, 0], 'end': [150, 300, 100, 300, 300]})\n", (2152, 2274), True, 'import pandas as pd\n'), ((3197, 3298), 'cooler.create.sanitize_records', 'sanitize_records', (['bins'], {'schema': '"""pairs"""', 'decode_chroms': '(False)', 'validate': '(True)', 'tril_action': '"""reflect"""'}), "(bins, schema='pairs', decode_chroms=False, validate=True,\n tril_action='reflect')\n", (3213, 3298), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((6428, 6463), 'os.path.join', 'op.join', (['datadir', '"""toy.chrom.sizes"""'], {}), "(datadir, 'toy.chrom.sizes')\n", (6435, 6463), True, 'import os.path as op\n'), ((7429, 7471), 'cooler.create.sanitize_pixels', 'sanitize_pixels', (['bins'], {'tril_action': '"""raise"""'}), "(bins, tril_action='raise')\n", (7444, 7471), False, 'from cooler.create import sanitize_records, sanitize_pixels, validate_pixels, aggregate_records, BadInputError\n'), ((7619, 7654), 'os.path.join', 'op.join', (['datadir', '"""toy.chrom.sizes"""'], {}), "(datadir, 'toy.chrom.sizes')\n", (7626, 7654), True, 'import os.path as op\n'), ((8722, 8766), 'pandas.concat', 'pd.concat', (['[chunk, chunk]'], {'ignore_index': '(True)'}), '([chunk, chunk], ignore_index=True)\n', (8731, 8766), True, 'import pandas as pd\n'), ((8863, 8898), 'os.path.join', 'op.join', (['datadir', '"""toy.chrom.sizes"""'], {}), "(datadir, 'toy.chrom.sizes')\n", (8870, 8898), True, 'import os.path as op\n')] |
'''
module docstring for density of states
'''
from numpy import exp,sqrt,pi
from semic.constants.constants import value
def density_of_states(m_star=0,energy=0,conduction_band_energy=0):
'''
Function to find the density of quantum states as a function of
energy
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
D(E) = m_star*sqrt(2*m_star*(energy-conduction_band_energy))/(pi**2 * h_bar**3)
'''
h_bar = value('reduced Planck in eV s')
d_e = m_star*sqrt(2*m_star*(energy-conduction_band_energy)) / ((pi**2) * (h_bar**3))
return d_e
def density_of_states_abm(m1_star=0,m2_star=0,m3_star=0,energy=0,conduction_band_energy=0):
'''
Function to find the density of quantum states as a function of
energy for an anisotropic band minimum.
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
D(E) = sqrt(2*m1_star*m2_star*m3_star*(energy-conduction_band_energy))/(pi**2 * h_bar**3)
'''
h_bar = value('reduced Planck in eV s')
d_e = sqrt(2*m1_star*m2_star*m3_star*(energy-conduction_band_energy)) / ((pi**2) * (h_bar**3))
return d_e
def density_of_states_non_parabolic(m_star=0,energy=0,conduction_band_energy=0,alpha=0):
'''
Function to find the density of quantum states as a function of
energy for a non-parabolic energy band.
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
alpha: an arbitary constant
D(E) = m_star*sqrt(2*m_star*(energy-conduction_band_energy)
*[1+alpha*(energy-conduction_band_energy)])
*[1+2*alpha*(energy-conduction_band_energy)/(pi**2 * h_bar**3)
'''
h_bar = value('reduced Planck constant in eV s')
energy_sub = energy-conduction_band_energy
pi_h_product = (pi**2) * (h_bar**3)
sqrt_product = sqrt(2*m_star*(energy_sub)*(1+alpha*(energy_sub)))
d_e = m_star*sqrt_product*(1+2*alpha*energy_sub) / pi_h_product
return d_e
def density_of_states_two_d(m_star=0):
'''
Function to find the 2D density of quantum states.
m_star: the effective mass of a carrier in an energy band.
D_2d = m_star/(pi*h_bar**2)
'''
h_bar = value('reduced Planck constant in eV s')
d_2d = m_star / (pi * (h_bar**2))
return d_2d
def density_of_states_one_d(m_star=0,energy=0,conduction_band_energy=0):
'''
Function to find the 1D density of quantum states.
m_star: the effective mass of a carrier in an energy band.
energy: Energy of a particle or system, or of a particular quantum state
conduction_band_energy: Conduction band edge energy in a semiconductor.
This is the potential energy for electrons, including
the electrostatic potential.
D_1d = 1/(pi*h_bar) * sqrt(m_star/2(energy-conduction_band_energy))
'''
h_bar = value('reduced Planck constant in eV s')
d_1d = (1 / (pi * h_bar)) * sqrt(m_star / (2*(energy-conduction_band_energy)))
return d_1d
def density_of_states_photon(omega=0,speed_of_light=0,refractive_index=1):
'''
Function to find the 3D photon density of states.
omega: angular frequency in rad/s.
speed_of_light: speed of light in medium.
refractive_index: refractive index of medium
d_photon3d = omega**2 * refractive_index**3 / pi**2 * speed_of_light**3
'''
d_photon3d = ((omega**2)*(refractive_index**3)) / ((pi**2) * (speed_of_light**3))
return d_photon3d
def density_of_states_photon1d(refractive_index=1,speed_of_light=1):
'''
Function to find the 1D photon density of states.
speed_of_light: speed of light in medium.
refractive_index: refractive index of medium.
d_photon1d = refractive_index / (pi * speed_of_light)
'''
d_photon1d = refractive_index / (pi * speed_of_light)
return d_photon1d
def equilibrium_energy_density(omega=0,speed_of_light=1,temp=1):
'''
Function to find the equilibrium energy density in an
electromagnetic field.
omega: angular frequency in rad/s.
speed_of_light: speed of light in medium.
temp: temperature in kelvin
exponential = 1/(exp(h_bar*omega/(k_b * temps)) - 1)
u_w = ((h_bar * omega**3) / ((pi**2)*(speed_of_light**3))) * exponential
'''
h_bar = value('reduced Planck constant in eV s')
kb_t = value('Boltzmann constant in eV/K') * temp
exponential = 1/(exp(h_bar*omega/kb_t) - 1)
u_w = ((h_bar * omega**3) / ((pi**2)*(speed_of_light**3))) * exponential
return u_w
def intensity_thermal_radiation(omega=0,speed_of_light=1,temp=1):
'''
Function to find the intensity (flux) of thermal radiation.
omega: angular frequency in rad/s.
speed_of_light: speed of light in medium.
temp: temperature in kelvin
exponential = 1/(exp(h_bar*omega/(k_b * temps)) - 1)
I_w = ((h_bar * omega**3) / ((4*pi**3)*(speed_of_light**2))) * exponential
'''
h_bar = value('reduced Planck constant in eV s')
kb_t = value('Boltzmann constant in eV/K') * temp
exponential = 1/(exp(h_bar*omega/(kb_t)) - 1)
i_w = ((h_bar * omega**3) / ((4*(pi**3))*(speed_of_light**2))) * exponential
return i_w
| [
"numpy.exp",
"semic.constants.constants.value",
"numpy.sqrt"
] | [((738, 769), 'semic.constants.constants.value', 'value', (['"""reduced Planck in eV s"""'], {}), "('reduced Planck in eV s')\n", (743, 769), False, 'from semic.constants.constants import value\n'), ((1561, 1592), 'semic.constants.constants.value', 'value', (['"""reduced Planck in eV s"""'], {}), "('reduced Planck in eV s')\n", (1566, 1592), False, 'from semic.constants.constants import value\n'), ((2525, 2565), 'semic.constants.constants.value', 'value', (['"""reduced Planck constant in eV s"""'], {}), "('reduced Planck constant in eV s')\n", (2530, 2565), False, 'from semic.constants.constants import value\n'), ((2672, 2728), 'numpy.sqrt', 'sqrt', (['(2 * m_star * energy_sub * (1 + alpha * energy_sub))'], {}), '(2 * m_star * energy_sub * (1 + alpha * energy_sub))\n', (2676, 2728), False, 'from numpy import exp, sqrt, pi\n'), ((3028, 3068), 'semic.constants.constants.value', 'value', (['"""reduced Planck constant in eV s"""'], {}), "('reduced Planck constant in eV s')\n", (3033, 3068), False, 'from semic.constants.constants import value\n'), ((3712, 3752), 'semic.constants.constants.value', 'value', (['"""reduced Planck constant in eV s"""'], {}), "('reduced Planck constant in eV s')\n", (3717, 3752), False, 'from semic.constants.constants import value\n'), ((5134, 5174), 'semic.constants.constants.value', 'value', (['"""reduced Planck constant in eV s"""'], {}), "('reduced Planck constant in eV s')\n", (5139, 5174), False, 'from semic.constants.constants import value\n'), ((5787, 5827), 'semic.constants.constants.value', 'value', (['"""reduced Planck constant in eV s"""'], {}), "('reduced Planck constant in eV s')\n", (5792, 5827), False, 'from semic.constants.constants import value\n'), ((1604, 1677), 'numpy.sqrt', 'sqrt', (['(2 * m1_star * m2_star * m3_star * (energy - conduction_band_energy))'], {}), '(2 * m1_star * m2_star * m3_star * (energy - conduction_band_energy))\n', (1608, 1677), False, 'from numpy import exp, sqrt, pi\n'), ((3785, 3839), 'numpy.sqrt', 'sqrt', (['(m_star / (2 * (energy - conduction_band_energy)))'], {}), '(m_star / (2 * (energy - conduction_band_energy)))\n', (3789, 3839), False, 'from numpy import exp, sqrt, pi\n'), ((5186, 5221), 'semic.constants.constants.value', 'value', (['"""Boltzmann constant in eV/K"""'], {}), "('Boltzmann constant in eV/K')\n", (5191, 5221), False, 'from semic.constants.constants import value\n'), ((5839, 5874), 'semic.constants.constants.value', 'value', (['"""Boltzmann constant in eV/K"""'], {}), "('Boltzmann constant in eV/K')\n", (5844, 5874), False, 'from semic.constants.constants import value\n'), ((787, 839), 'numpy.sqrt', 'sqrt', (['(2 * m_star * (energy - conduction_band_energy))'], {}), '(2 * m_star * (energy - conduction_band_energy))\n', (791, 839), False, 'from numpy import exp, sqrt, pi\n'), ((5251, 5276), 'numpy.exp', 'exp', (['(h_bar * omega / kb_t)'], {}), '(h_bar * omega / kb_t)\n', (5254, 5276), False, 'from numpy import exp, sqrt, pi\n'), ((5904, 5929), 'numpy.exp', 'exp', (['(h_bar * omega / kb_t)'], {}), '(h_bar * omega / kb_t)\n', (5907, 5929), False, 'from numpy import exp, sqrt, pi\n')] |
import numpy
import pytest
import torch
import torch.nn
from nagl.nn import SequentialLayers
def test_init_sequential_layers_default():
sequential_layers = SequentialLayers(in_feats=1, hidden_feats=[2])
assert len(sequential_layers.layers) == 3
assert isinstance(sequential_layers.layers[0], torch.nn.Linear)
assert isinstance(sequential_layers.layers[1], torch.nn.ReLU)
assert isinstance(sequential_layers.layers[2], torch.nn.Dropout)
assert numpy.isclose(sequential_layers.layers[2].p, 0.0)
def test_init_sequential_layers_inputs():
sequential_layers = SequentialLayers(
in_feats=1,
hidden_feats=[2, 1],
activation=[torch.nn.ReLU(), torch.nn.LeakyReLU()],
dropout=[0.0, 0.5],
)
assert len(sequential_layers.layers) == 6
assert isinstance(sequential_layers.layers[0], torch.nn.Linear)
assert isinstance(sequential_layers.layers[1], torch.nn.ReLU)
assert isinstance(sequential_layers.layers[2], torch.nn.Dropout)
assert numpy.isclose(sequential_layers.layers[2].p, 0.0)
assert isinstance(sequential_layers.layers[3], torch.nn.Linear)
assert isinstance(sequential_layers.layers[4], torch.nn.LeakyReLU)
assert isinstance(sequential_layers.layers[5], torch.nn.Dropout)
assert numpy.isclose(sequential_layers.layers[5].p, 0.5)
def test_init_sequential_layers_invalid():
with pytest.raises(ValueError) as error_info:
SequentialLayers(
in_feats=1,
hidden_feats=[2],
activation=[torch.nn.ReLU(), torch.nn.LeakyReLU()],
dropout=[0.0, 0.5],
)
assert "The `hidden_feats`, `activation`, and `dropout` lists must be the" in str(
error_info.value
)
| [
"nagl.nn.SequentialLayers",
"torch.nn.ReLU",
"numpy.isclose",
"torch.nn.LeakyReLU",
"pytest.raises"
] | [((164, 210), 'nagl.nn.SequentialLayers', 'SequentialLayers', ([], {'in_feats': '(1)', 'hidden_feats': '[2]'}), '(in_feats=1, hidden_feats=[2])\n', (180, 210), False, 'from nagl.nn import SequentialLayers\n'), ((473, 522), 'numpy.isclose', 'numpy.isclose', (['sequential_layers.layers[2].p', '(0.0)'], {}), '(sequential_layers.layers[2].p, 0.0)\n', (486, 522), False, 'import numpy\n'), ((1015, 1064), 'numpy.isclose', 'numpy.isclose', (['sequential_layers.layers[2].p', '(0.0)'], {}), '(sequential_layers.layers[2].p, 0.0)\n', (1028, 1064), False, 'import numpy\n'), ((1285, 1334), 'numpy.isclose', 'numpy.isclose', (['sequential_layers.layers[5].p', '(0.5)'], {}), '(sequential_layers.layers[5].p, 0.5)\n', (1298, 1334), False, 'import numpy\n'), ((1390, 1415), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1403, 1415), False, 'import pytest\n'), ((679, 694), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (692, 694), False, 'import torch\n'), ((696, 716), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (714, 716), False, 'import torch\n'), ((1536, 1551), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1549, 1551), False, 'import torch\n'), ((1553, 1573), 'torch.nn.LeakyReLU', 'torch.nn.LeakyReLU', ([], {}), '()\n', (1571, 1573), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.metrics import accuracy_score
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
def my_covariance(x):
N = x.shape[2]
m1 = x - x.sum(2, keepdims=1) / N
out = np.einsum('ijk,ilk->ijl', m1, m1) / (N - 1)
return out
class Perceptron:
def __init__(self, inSize, outSize, leak=1.0, random_state=42, non_linear=False):
self.random_state = random_state
np.random.seed(self.random_state)
self.inSize = inSize
self.outSize = outSize
self.Wout = np.random.rand(self.outSize, self.inSize + 1) - 0.5
self.leak = leak # not being used now
self.outStates = None
self.outCovariance = None
self.outMean = None
self.non_linear = non_linear
return
def run(self, data, initLen, trainLen, covariance=False, mean=False):
'''Data is an array. Dimension is (numExamples, numInputs, timeLen)'''
# add bias unit to input data
ones = np.ones((data.shape[0], 1, trainLen-initLen))
inputs = np.concatenate((ones, data[:, :, initLen:trainLen]), axis=1)
if self.non_linear:
self.outStates = np.tanh(np.einsum('ij,ljk -> lik', self.Wout, inputs))
else:
self.outStates = np.einsum('ij,ljk -> lik', self.Wout, inputs)
if covariance: # update covariances
self.outCovariance = my_covariance(self.outStates)
if mean: # update mean states
self.outMean = np.mean(self.outStates, axis=2)
return
def predict(self, mode='mean'):
#Run data through through perceptron, get covariances in output units. If var0 > var 1, class is 0.
Y = []
if mode == 'mean':
for ex in range(self.outStates.shape[0]):
max_out = np.max(self.outMean[ex, :])
pred = np.where(self.outMean[ex, :] == max_out)[0][0]
Y.append(pred)
if mode == 'covariance':
for ex in range(self.outStates.shape[0]):
diagonals = np.diag(self.outCovariance[ex, :, :])
max_out = np.max(diagonals)
pred = np.where(diagonals == max_out)[0][0]
Y.append(pred)
return Y
def score(self, Y_true, Y_pred):
return accuracy_score(Y_true, Y_pred)
def plot_output_units(self):
'''Plot some random reservoir unit activity during a random input presentation'''
if self.outStates is None:
print('Run data to update output states!')
return
else:
print('Plotting activity')
# plot output units for a random example
fig = plt.figure()
time = [i for i in range(self.outStates.shape[2])]
sample = np.random.choice(self.outStates.shape[0])
plt.plot(time, self.outStates[sample, :, :].T)
plt.xlabel('Steps (input %i)' % sample)
plt.ylabel('Output activation')
fig.savefig('outputActivity.png', dpi=200, bbox_inches='tight')
print('Done')
return
| [
"numpy.mean",
"numpy.ones",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"numpy.random.choice",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.diag",
"matplotlib.pyplot.figure",
"numpy.einsum",
"numpy.random.seed",
"numpy.concatenate",
"sklea... | [((303, 336), 'numpy.einsum', 'np.einsum', (['"""ijk,ilk->ijl"""', 'm1', 'm1'], {}), "('ijk,ilk->ijl', m1, m1)\n", (312, 336), True, 'import numpy as np\n'), ((526, 559), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (540, 559), True, 'import numpy as np\n'), ((1106, 1153), 'numpy.ones', 'np.ones', (['(data.shape[0], 1, trainLen - initLen)'], {}), '((data.shape[0], 1, trainLen - initLen))\n', (1113, 1153), True, 'import numpy as np\n'), ((1170, 1230), 'numpy.concatenate', 'np.concatenate', (['(ones, data[:, :, initLen:trainLen])'], {'axis': '(1)'}), '((ones, data[:, :, initLen:trainLen]), axis=1)\n', (1184, 1230), True, 'import numpy as np\n'), ((2445, 2475), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_true', 'Y_pred'], {}), '(Y_true, Y_pred)\n', (2459, 2475), False, 'from sklearn.metrics import accuracy_score\n'), ((643, 688), 'numpy.random.rand', 'np.random.rand', (['self.outSize', '(self.inSize + 1)'], {}), '(self.outSize, self.inSize + 1)\n', (657, 688), True, 'import numpy as np\n'), ((1390, 1435), 'numpy.einsum', 'np.einsum', (['"""ij,ljk -> lik"""', 'self.Wout', 'inputs'], {}), "('ij,ljk -> lik', self.Wout, inputs)\n", (1399, 1435), True, 'import numpy as np\n'), ((1618, 1649), 'numpy.mean', 'np.mean', (['self.outStates'], {'axis': '(2)'}), '(self.outStates, axis=2)\n', (1625, 1649), True, 'import numpy as np\n'), ((2843, 2855), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2853, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2983), 'numpy.random.choice', 'np.random.choice', (['self.outStates.shape[0]'], {}), '(self.outStates.shape[0])\n', (2958, 2983), True, 'import numpy as np\n'), ((2997, 3043), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'self.outStates[sample, :, :].T'], {}), '(time, self.outStates[sample, :, :].T)\n', (3005, 3043), True, 'import matplotlib.pyplot as plt\n'), ((3057, 3096), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Steps (input %i)' % sample)"], {}), "('Steps (input %i)' % sample)\n", (3067, 3096), True, 'import matplotlib.pyplot as plt\n'), ((3110, 3141), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Output activation"""'], {}), "('Output activation')\n", (3120, 3141), True, 'import matplotlib.pyplot as plt\n'), ((1298, 1343), 'numpy.einsum', 'np.einsum', (['"""ij,ljk -> lik"""', 'self.Wout', 'inputs'], {}), "('ij,ljk -> lik', self.Wout, inputs)\n", (1307, 1343), True, 'import numpy as np\n'), ((1942, 1969), 'numpy.max', 'np.max', (['self.outMean[ex, :]'], {}), '(self.outMean[ex, :])\n', (1948, 1969), True, 'import numpy as np\n'), ((2193, 2230), 'numpy.diag', 'np.diag', (['self.outCovariance[ex, :, :]'], {}), '(self.outCovariance[ex, :, :])\n', (2200, 2230), True, 'import numpy as np\n'), ((2258, 2275), 'numpy.max', 'np.max', (['diagonals'], {}), '(diagonals)\n', (2264, 2275), True, 'import numpy as np\n'), ((1994, 2034), 'numpy.where', 'np.where', (['(self.outMean[ex, :] == max_out)'], {}), '(self.outMean[ex, :] == max_out)\n', (2002, 2034), True, 'import numpy as np\n'), ((2300, 2330), 'numpy.where', 'np.where', (['(diagonals == max_out)'], {}), '(diagonals == max_out)\n', (2308, 2330), True, 'import numpy as np\n')] |
import pandas as pd
import quandl, math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
# get Google stock data set
df = quandl.get('WIKI/GOOGL')
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume',]]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100
df = df[['Adj. Close', 'HL_PCT', 'PCT_change', 'Adj. Volume']]
forecast_col = 'Adj. Close'
# Because ML can not work with na data
df.fillna(-99999, inplace=True)
# math.ceil(x) = smallest integer value greater than or equal to x.
# We are try to predict out 10 percent of the dataframe and you'll see that actually
# when will go out and do this.
# number of predict
forecast_out = int(math.ceil(0.01*len(df)))
print (forecast_out)
df['label'] = df[forecast_col].shift(-forecast_out)
# print (df['label'])
# print (df[forecast_col])
# delect the the column which value is NA
df.dropna(inplace=True)
# print (df['label'])
# print (df.head())
X = np.array(df.drop(['label'], 1))
y = np.array(df['label'])
# scale data to "Gaussian with zero mean and unit variance"
X = preprocessing.scale(X)
# 20% of data we want to actually use as testing data.
# Generate the training and test data
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
# classifier
clf = LinearRegression(n_jobs=-1)
# clf = svm.SVR()
# clf = svm.SVR(kernel='poly')
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print (accuracy)
| [
"numpy.array",
"quandl.get",
"sklearn.cross_validation.train_test_split",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.scale"
] | [((200, 224), 'quandl.get', 'quandl.get', (['"""WIKI/GOOGL"""'], {}), "('WIKI/GOOGL')\n", (210, 224), False, 'import quandl, math\n'), ((1146, 1167), 'numpy.array', 'np.array', (["df['label']"], {}), "(df['label'])\n", (1154, 1167), True, 'import numpy as np\n'), ((1232, 1254), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (1251, 1254), False, 'from sklearn import preprocessing, cross_validation, svm\n'), ((1384, 1438), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (1417, 1438), False, 'from sklearn import preprocessing, cross_validation, svm\n'), ((1458, 1485), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (1474, 1485), False, 'from sklearn.linear_model import LinearRegression\n')] |
import os
import sys
import math
import random
import numpy as np
from datetime import datetime
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import data_utils
def _stoke_decoding(stoke):
lift_pen_padding = 2.0
lines = []
points = []
x_prev = 0
y_prev = 0
was_drawing = False
for i in range(len(stoke)):
x = x_prev + stoke[i, 0]
y = y_prev + stoke[i, 1]
lift_pen = stoke[i, 2]
if lift_pen == lift_pen_padding:
break
is_drawing = (lift_pen == 0.0)
if is_drawing:
points.append((x, y))
if was_drawing and is_drawing and x_prev != x and y_prev != y:
lines.append(((x_prev, y_prev), (x, y)))
x_prev = x
y_prev = y
was_drawing = is_drawing
return lines, points
def map_fn(stoke, label, point_num=512):
lines, points = _stoke_decoding(stoke)
points_array = np.zeros(shape=(point_num, 3), dtype=np.float32)
normals_array = np.zeros(shape=(point_num, 3), dtype=np.float32)
if len(lines) == 0 and len(points) == 0:
print('Empty stoke detected!')
elif len(lines) == 0:
print('Stoke without any line detected!')
for sample_idx in range(point_num):
sample_idx_float = sample_idx / (point_num - 1)
px, py = points[sample_idx % len(points)]
points_array[sample_idx] = (px, sample_idx_float, py)
else:
line_len_list = []
for ((x0, y0), (x1, y1)) in lines:
x_diff = x1 - x0
y_diff = y1 - y0
line_len_list.append(math.sqrt(x_diff * x_diff + y_diff * y_diff))
line_len_sum = sum(line_len_list)
factor = point_num / line_len_sum
sample_nums = [math.ceil(line_len * factor) for line_len in line_len_list]
sample_num_total = sum(sample_nums)
sample_nums_indices = [x for x, y in sorted(enumerate(sample_nums), key=lambda x: x[1])]
for i in range(sample_num_total - point_num):
ii = sample_nums_indices[i]
sample_nums[ii] = sample_nums[ii] - 1
assert (sum(sample_nums) == point_num)
sample_idx = 0
for idx_line, line_sample_num in enumerate(sample_nums):
if line_sample_num == 0:
continue
((x0, y0), (x1, y1)) = lines[idx_line]
nx = y1 - y0
ny = x0 - x1
n_len = math.sqrt(nx * nx + ny * ny)
nx /= n_len
ny /= n_len
if line_sample_num == 1:
sample_idx_float = sample_idx / (point_num - 1)
points_array[sample_idx] = ((x0 + x1) / 2, sample_idx_float, (y0 + y1) / 2)
normals_array[sample_idx] = (nx, random.random() * 1e-6, ny)
sample_idx += 1
elif line_sample_num > 1:
x_diff = x1 - x0
y_diff = y1 - y0
for alpha in np.linspace(0, 1, line_sample_num):
sample_idx_float = sample_idx / (point_num - 1)
points_array[sample_idx] = (x0 + alpha * x_diff, sample_idx_float, y0 + alpha * y_diff)
normals_array[sample_idx] = (nx, random.random() * 1e-6, ny)
sample_idx += 1
points_min = np.amin(points_array, axis=0)
points_max = np.amax(points_array, axis=0)
points_center = (points_min + points_max) / 2
scale = np.amax(points_max - points_min) / 2
points_array = (points_array - points_center) * (0.8 / scale, 0.4, 0.8 / scale)
return np.concatenate((points_array, normals_array), axis=-1).astype(np.float32), label
def _extract_padded_stokes(stokes, stoke_len_max, stoke_placeholder, ratio):
padded_stokes_list = []
for stoke in stokes:
if (len(stoke)) == 0: # bad data, ignore it!
continue
lines, points = _stoke_decoding(stoke)
if len(lines) == 0 or len(points) == 0: # bad data, ignore it!
continue
pad_len = stoke_len_max - len(stoke)
if pad_len == 0:
padded_stokes_list.append(stoke.astype(np.float32))
else:
padded_stokes_list.append(np.concatenate([stoke.astype(np.float32), stoke_placeholder[:pad_len]], axis=0))
if len(padded_stokes_list) > ratio * len(stokes): # The data is too big, only use a subset...
break
return np.stack(padded_stokes_list)
def load_fn(folder_npz, ratio, categories=None):
lift_pen_padding = 2.0
categories = [line.strip() for line in
open(os.path.join(folder_npz, 'categories.txt'), 'r')] if categories is None else categories
stoke_len_max = 0
stoke_len_sum = 0
stoke_num = 0
load_data_list = []
for idx_category, category in enumerate(categories):
print('{}-Loading category {} ({} of {})...'.format(datetime.now(), category, idx_category+1, len(categories)))
sys.stdout.flush()
filename_category = os.path.join(folder_npz, category + '.npz')
load_data = np.load(filename_category, encoding='bytes')
load_data_list.append(load_data)
for tag in load_data:
for stoke in load_data[tag]:
stoke_len_max = max(stoke_len_max, stoke.shape[0])
stoke_len_sum += stoke.shape[0]
stoke_num += len(load_data[tag])
print('{}-Max stoke length: {}, average stoke length: {}.'.format(datetime.now(), stoke_len_max,
stoke_len_sum / stoke_num))
sys.stdout.flush()
stoke_placeholder = np.array([(0.0, 0.0, lift_pen_padding)] * stoke_len_max).astype(np.float32)
raw_train_list = []
label_train_list = []
raw_val_list = []
label_val_list = []
for idx_category, category in enumerate(categories):
print('{}-Extracting category {} ({} of {})...'.format(datetime.now(), category, idx_category+1, len(categories)))
sys.stdout.flush()
load_data = load_data_list[idx_category]
raw_train_list.append(_extract_padded_stokes(load_data['train'], stoke_len_max, stoke_placeholder, ratio))
label_train_list += [idx_category] * len(raw_train_list[-1])
raw_val_list.append(_extract_padded_stokes(load_data['valid'], stoke_len_max, stoke_placeholder, ratio))
label_val_list += [idx_category] * len(raw_val_list[-1])
raw_train = np.concatenate(raw_train_list, axis=0)
label_train = np.array(label_train_list)
raw_val = np.concatenate(raw_val_list, axis=0)
label_val = np.array(label_val_list)
print('{}-Shuffling data...'.format(datetime.now()))
sys.stdout.flush()
raw_train, label_train = data_utils.grouped_shuffle([raw_train, label_train])
raw_val, label_val = data_utils.grouped_shuffle([raw_val, label_val])
print('{}-Quick Draw data loaded!'.format(datetime.now()))
sys.stdout.flush()
return raw_train, label_train, raw_val, label_val
| [
"math.ceil",
"numpy.amin",
"os.path.join",
"math.sqrt",
"numpy.stack",
"numpy.zeros",
"numpy.array",
"datetime.datetime.now",
"numpy.linspace",
"numpy.concatenate",
"random.random",
"os.path.abspath",
"data_utils.grouped_shuffle",
"sys.stdout.flush",
"numpy.load",
"numpy.amax"
] | [((946, 994), 'numpy.zeros', 'np.zeros', ([], {'shape': '(point_num, 3)', 'dtype': 'np.float32'}), '(shape=(point_num, 3), dtype=np.float32)\n', (954, 994), True, 'import numpy as np\n'), ((1015, 1063), 'numpy.zeros', 'np.zeros', ([], {'shape': '(point_num, 3)', 'dtype': 'np.float32'}), '(shape=(point_num, 3), dtype=np.float32)\n', (1023, 1063), True, 'import numpy as np\n'), ((3296, 3325), 'numpy.amin', 'np.amin', (['points_array'], {'axis': '(0)'}), '(points_array, axis=0)\n', (3303, 3325), True, 'import numpy as np\n'), ((3343, 3372), 'numpy.amax', 'np.amax', (['points_array'], {'axis': '(0)'}), '(points_array, axis=0)\n', (3350, 3372), True, 'import numpy as np\n'), ((4398, 4426), 'numpy.stack', 'np.stack', (['padded_stokes_list'], {}), '(padded_stokes_list)\n', (4406, 4426), True, 'import numpy as np\n'), ((5563, 5581), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5579, 5581), False, 'import sys\n'), ((6416, 6454), 'numpy.concatenate', 'np.concatenate', (['raw_train_list'], {'axis': '(0)'}), '(raw_train_list, axis=0)\n', (6430, 6454), True, 'import numpy as np\n'), ((6473, 6499), 'numpy.array', 'np.array', (['label_train_list'], {}), '(label_train_list)\n', (6481, 6499), True, 'import numpy as np\n'), ((6514, 6550), 'numpy.concatenate', 'np.concatenate', (['raw_val_list'], {'axis': '(0)'}), '(raw_val_list, axis=0)\n', (6528, 6550), True, 'import numpy as np\n'), ((6567, 6591), 'numpy.array', 'np.array', (['label_val_list'], {}), '(label_val_list)\n', (6575, 6591), True, 'import numpy as np\n'), ((6654, 6672), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6670, 6672), False, 'import sys\n'), ((6702, 6754), 'data_utils.grouped_shuffle', 'data_utils.grouped_shuffle', (['[raw_train, label_train]'], {}), '([raw_train, label_train])\n', (6728, 6754), False, 'import data_utils\n'), ((6780, 6828), 'data_utils.grouped_shuffle', 'data_utils.grouped_shuffle', (['[raw_val, label_val]'], {}), '([raw_val, label_val])\n', (6806, 6828), False, 'import data_utils\n'), ((6896, 6914), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6912, 6914), False, 'import sys\n'), ((3435, 3467), 'numpy.amax', 'np.amax', (['(points_max - points_min)'], {}), '(points_max - points_min)\n', (3442, 3467), True, 'import numpy as np\n'), ((4932, 4950), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4948, 4950), False, 'import sys\n'), ((4979, 5022), 'os.path.join', 'os.path.join', (['folder_npz', "(category + '.npz')"], {}), "(folder_npz, category + '.npz')\n", (4991, 5022), False, 'import os\n'), ((5043, 5087), 'numpy.load', 'np.load', (['filename_category'], {'encoding': '"""bytes"""'}), "(filename_category, encoding='bytes')\n", (5050, 5087), True, 'import numpy as np\n'), ((5967, 5985), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5983, 5985), False, 'import sys\n'), ((145, 170), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (160, 170), False, 'import os\n'), ((5430, 5444), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5442, 5444), False, 'from datetime import datetime\n'), ((5607, 5663), 'numpy.array', 'np.array', (['([(0.0, 0.0, lift_pen_padding)] * stoke_len_max)'], {}), '([(0.0, 0.0, lift_pen_padding)] * stoke_len_max)\n', (5615, 5663), True, 'import numpy as np\n'), ((6633, 6647), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6645, 6647), False, 'from datetime import datetime\n'), ((6875, 6889), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6887, 6889), False, 'from datetime import datetime\n'), ((1772, 1800), 'math.ceil', 'math.ceil', (['(line_len * factor)'], {}), '(line_len * factor)\n', (1781, 1800), False, 'import math\n'), ((2437, 2465), 'math.sqrt', 'math.sqrt', (['(nx * nx + ny * ny)'], {}), '(nx * nx + ny * ny)\n', (2446, 2465), False, 'import math\n'), ((3568, 3622), 'numpy.concatenate', 'np.concatenate', (['(points_array, normals_array)'], {'axis': '(-1)'}), '((points_array, normals_array), axis=-1)\n', (3582, 3622), True, 'import numpy as np\n'), ((4864, 4878), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4876, 4878), False, 'from datetime import datetime\n'), ((5899, 5913), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5911, 5913), False, 'from datetime import datetime\n'), ((1619, 1663), 'math.sqrt', 'math.sqrt', (['(x_diff * x_diff + y_diff * y_diff)'], {}), '(x_diff * x_diff + y_diff * y_diff)\n', (1628, 1663), False, 'import math\n'), ((4572, 4614), 'os.path.join', 'os.path.join', (['folder_npz', '"""categories.txt"""'], {}), "(folder_npz, 'categories.txt')\n", (4584, 4614), False, 'import os\n'), ((2949, 2983), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'line_sample_num'], {}), '(0, 1, line_sample_num)\n', (2960, 2983), True, 'import numpy as np\n'), ((2756, 2771), 'random.random', 'random.random', ([], {}), '()\n', (2769, 2771), False, 'import random\n'), ((3214, 3229), 'random.random', 'random.random', ([], {}), '()\n', (3227, 3229), False, 'import random\n')] |
import json
import os
import numpy as np
from tqdm import tqdm
from utils.datastream import DataCollection
def split_stream_instance(root, feature_root, streams, dataset_id):
collection = DataCollection(root, feature_root, streams)
np.random.seed(2147483647)
none_instances = collection.collect_instance_by_labels(labels=[0], dataset=collection.datasets[dataset_id])
train_none = none_instances["train"]
rest_instances = []
stream_instances = []
nstreams = len(streams)
collected = set()
for stream in streams:
stream_instances.append([])
rest_instances.append([])
for label in tqdm(stream):
if label == 0:
continue
if label not in collected:
collected.add(label)
else:
print(label)
label_instances = collection.collect_instance_by_labels(labels=[label], dataset=collection.datasets[dataset_id])
train_instances = label_instances["train"]
for t in train_instances:
if t['label'] != label:
print(label, t, stream)
input()
rand_perm = np.random.permutation(len(train_instances))
stream_instances[-1].extend([train_instances[i] for i in range(int(len(rand_perm)*0.9))])
rest_instances[-1].extend([train_instances[i] for i in range(int(len(rand_perm)*0.9), len(rand_perm))])
rest = iter(train_instances[i] for i in range(int(len(rand_perm)*0.9), len(rand_perm)))
for instance in rest:
instance["original_label"] = instance["label"] + 0
instance["label"] = 0
assert instance['label'] == 0, instance["original_label"] == label
for i in range(len(streams)):
nrest = len(rest_instances[i])
drest = nrest // (nstreams - 1)
irest = drest
for j in range(nstreams):
if j != i:
stream_instances[j].extend(rest_instances[i][irest-drest:irest])
irest += drest
if irest > nrest:
stream_instances[j].extend(rest_instances[i][irest-drest:irest])
rand_perm = np.random.permutation(len(train_none))
nnone = len(train_none)
dnone = nnone // nstreams
inone = dnone
train_none = [train_none[i] for i in rand_perm]
for j in range(nstreams):
stream_instances[j].extend(train_none[inone-dnone:inone])
print(j, len(stream_instances[j]))
inone += dnone
if inone > nnone:
stream_instances[j].extend(train_none[inone-dnone:inone])
return stream_instances
if __name__ == "__main__":
root="./data/"
feature_root="./data/features"
dataset_id = 0
streams = json.load(open(os.path.join(root, "MAVEN", "streams.json")))
instances = split_stream_instance(root, feature_root, streams, dataset_id)
json.dump(instances, open(os.path.join(root, "MAVEN", "stream_instances.json"), "wt"), indent=4)
| [
"utils.datastream.DataCollection",
"tqdm.tqdm",
"os.path.join",
"numpy.random.seed"
] | [((193, 236), 'utils.datastream.DataCollection', 'DataCollection', (['root', 'feature_root', 'streams'], {}), '(root, feature_root, streams)\n', (207, 236), False, 'from utils.datastream import DataCollection\n'), ((241, 267), 'numpy.random.seed', 'np.random.seed', (['(2147483647)'], {}), '(2147483647)\n', (255, 267), True, 'import numpy as np\n'), ((639, 651), 'tqdm.tqdm', 'tqdm', (['stream'], {}), '(stream)\n', (643, 651), False, 'from tqdm import tqdm\n'), ((2798, 2841), 'os.path.join', 'os.path.join', (['root', '"""MAVEN"""', '"""streams.json"""'], {}), "(root, 'MAVEN', 'streams.json')\n", (2810, 2841), False, 'import os\n'), ((2953, 3005), 'os.path.join', 'os.path.join', (['root', '"""MAVEN"""', '"""stream_instances.json"""'], {}), "(root, 'MAVEN', 'stream_instances.json')\n", (2965, 3005), False, 'import os\n')] |
def Main_Check_QR_Code():
import cv2
import numpy as np
from pyzbar.pyzbar import decode
import pandas
from tkinter import messagebox as msgx
xls = pandas.ExcelFile('CollegeData.xlsx')
df1 = pandas.read_excel(xls,'Students')
df2 = pandas.read_excel(xls,'Lecturers')
list1 = df1['College Id'].tolist()
list2 = df2['College Id'].tolist()
List = list1 + list2
QR_Code_List = [str(i) for i in List]
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(3,640)
cap.set(4,480)
Passed = False
while True:
success, img = cap.read()
decoded_image = decode(img)
if success and decoded_image is not None:
for barcode in decode(img):
myData = barcode.data.decode('utf-8')
if myData in QR_Code_List:
myOutput = 'Authorized'
myColor = (0,255,0)
Passed = True
else:
myOutput = 'Un-Authorized'
myColor = (0,0,255)
Passed = False
pts = np.array([barcode.polygon],np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,myColor,5)
pts2 = barcode.rect
cv2.putText(img,myOutput,(pts2[0],pts2[1]),cv2.FONT_HERSHEY_SIMPLEX,
0.9,myColor,3)
cv2.imshow("QR-Code Student's ID Scanner ",img)
k = cv2.waitKey(1)
if k % 256 == 27:
msgx.showinfo("QR-Code Checking Window", "QR-Code Checking Window is Destroyed Successfully")
#print("Window Destroyed Successfully")
break
cap .release()
cv2.destroyAllWindows()
return Passed
| [
"cv2.polylines",
"cv2.imshow",
"cv2.putText",
"pyzbar.pyzbar.decode",
"numpy.array",
"pandas.ExcelFile",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"pandas.read_excel",
"tkinter.messagebox.showinfo",
"cv2.waitKey"
] | [((180, 216), 'pandas.ExcelFile', 'pandas.ExcelFile', (['"""CollegeData.xlsx"""'], {}), "('CollegeData.xlsx')\n", (196, 216), False, 'import pandas\n'), ((228, 262), 'pandas.read_excel', 'pandas.read_excel', (['xls', '"""Students"""'], {}), "(xls, 'Students')\n", (245, 262), False, 'import pandas\n'), ((273, 308), 'pandas.read_excel', 'pandas.read_excel', (['xls', '"""Lecturers"""'], {}), "(xls, 'Lecturers')\n", (290, 308), False, 'import pandas\n'), ((474, 508), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)', 'cv2.CAP_DSHOW'], {}), '(0, cv2.CAP_DSHOW)\n', (490, 508), False, 'import cv2\n'), ((1794, 1817), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1815, 1817), False, 'import cv2\n'), ((647, 658), 'pyzbar.pyzbar.decode', 'decode', (['img'], {}), '(img)\n', (653, 658), False, 'from pyzbar.pyzbar import decode\n'), ((738, 749), 'pyzbar.pyzbar.decode', 'decode', (['img'], {}), '(img)\n', (744, 749), False, 'from pyzbar.pyzbar import decode\n'), ((1465, 1513), 'cv2.imshow', 'cv2.imshow', (['"""QR-Code Student\'s ID Scanner """', 'img'], {}), '("QR-Code Student\'s ID Scanner ", img)\n', (1475, 1513), False, 'import cv2\n'), ((1530, 1544), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1541, 1544), False, 'import cv2\n'), ((1146, 1183), 'numpy.array', 'np.array', (['[barcode.polygon]', 'np.int32'], {}), '([barcode.polygon], np.int32)\n', (1154, 1183), True, 'import numpy as np\n'), ((1245, 1288), 'cv2.polylines', 'cv2.polylines', (['img', '[pts]', '(True)', 'myColor', '(5)'], {}), '(img, [pts], True, myColor, 5)\n', (1258, 1288), False, 'import cv2\n'), ((1341, 1435), 'cv2.putText', 'cv2.putText', (['img', 'myOutput', '(pts2[0], pts2[1])', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', 'myColor', '(3)'], {}), '(img, myOutput, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX, \n 0.9, myColor, 3)\n', (1352, 1435), False, 'import cv2\n'), ((1593, 1690), 'tkinter.messagebox.showinfo', 'msgx.showinfo', (['"""QR-Code Checking Window"""', '"""QR-Code Checking Window is Destroyed Successfully"""'], {}), "('QR-Code Checking Window',\n 'QR-Code Checking Window is Destroyed Successfully')\n", (1606, 1690), True, 'from tkinter import messagebox as msgx\n')] |
from __future__ import print_function
# standard library imports
import sys
import os
try:
from io import StringIO
except:
from StringIO import StringIO
# third party
import numpy as np
# local application imports
from ._linesearch import NoLineSearch
from .base_optimizer import base_optimizer
from utilities import units, block_matrix, manage_xyz
class eigenvector_follow(base_optimizer):
def optimize(
self,
molecule,
refE=0.,
opt_type='UNCONSTRAINED',
opt_steps=3,
ictan=None,
xyzframerate=4,
verbose=False,
path=os.getcwd(),
):
# stash/initialize some useful attributes
self.check_inputs(molecule, opt_type, ictan)
nconstraints = self.get_nconstraints(opt_type)
self.buf = StringIO()
# print " refE %5.4f" % refE
print(" initial E %5.4f" % (molecule.energy - refE))
print(" CONV_TOL %1.5f" % self.conv_grms)
geoms = []
energies = []
geoms.append(molecule.geometry)
energies.append(molecule.energy-refE)
self.converged = False
# form initial coord basis
if opt_type != 'TS':
constraints = self.get_constraint_vectors(molecule, opt_type, ictan)
molecule.update_coordinate_basis(constraints=constraints)
molecule.form_Hessian_in_basis()
# Evaluate the function value and its gradient.
fx = molecule.energy
g = molecule.gradient.copy()
# project out the constraint
gc = g.copy()
for c in molecule.constraints.T:
gc -= np.dot(gc.T, c[:, np.newaxis])*c[:, np.newaxis]
gmax = float(np.max(np.absolute(gc)))
if self.check_only_grad_converged:
if molecule.gradrms < self.conv_grms and gmax < self.conv_gmax:
self.converged = True
return geoms, energies
else:
self.check_only_grad_converged = False
# for cartesian these are the same
x = np.copy(molecule.coordinates)
xyz = np.copy(molecule.xyz)
if opt_type == 'TS':
self.Linesearch = NoLineSearch
if opt_type == 'SEAM' or opt_type == 'MECI' or opt_type == "TS-SEAM":
self.opt_cross = True
# TODO are these used? -- n is used for gradrms,linesearch
if molecule.coord_obj.__class__.__name__ == 'CartesianCoordinates':
n = molecule.num_coordinates
else:
n_actual = molecule.num_coordinates
n = n_actual - nconstraints
self.x_prim = np.zeros((molecule.num_primitives, 1), dtype=float)
self.g_prim = np.zeros((molecule.num_primitives, 1), dtype=float)
molecule.gradrms = np.sqrt(np.dot(gc.T, gc)/n)
dE = molecule.difference_energy
update_hess = False
# ====> Do opt steps <======= #
for ostep in range(opt_steps):
print(" On opt step {} for node {}".format(ostep+1, molecule.node_id))
# update Hess
if update_hess:
if opt_type != 'TS':
self.update_Hessian(molecule, 'BFGS')
else:
self.update_Hessian(molecule, 'BOFILL')
update_hess = True
# => Form eigenvector step <= #
if molecule.coord_obj.__class__.__name__ == 'CartesianCoordinates':
raise NotImplementedError
else:
if opt_type != 'TS':
dq = self.eigenvector_step(molecule, gc)
else:
dq = self.TS_eigenvector_step(molecule, g, ictan)
if not self.maxol_good:
print(" Switching to climb! Maxol not good!")
nconstraints = 1
opt_type = 'CLIMB'
actual_step = np.linalg.norm(dq)
# print(" actual_step= %1.2f"% actual_step)
dq = dq/actual_step # normalize
if actual_step > self.DMAX:
step = self.DMAX
# print(" reducing step, new step = %1.2f" %step)
else:
step = actual_step
# store values
xp = x.copy()
gp = g.copy()
xyzp = xyz.copy()
fxp = fx
pgradrms = molecule.gradrms
if not molecule.coord_obj.__class__.__name__ == 'CartesianCoordinates':
# xp_prim = self.x_prim.copy()
gp_prim = self.g_prim.copy()
# => calculate constraint step <= #
constraint_steps = self.get_constraint_steps(molecule, opt_type, g)
# print(" ### Starting line search ###")
ls = self.Linesearch(nconstraints, x, fx, g, dq, step, xp, constraint_steps, self.linesearch_parameters, molecule, verbose)
# get values from linesearch
molecule = ls['molecule']
step = ls['step']
x = ls['x']
fx = ls['fx']
g = ls['g']
if ls['status'] == -2:
print('[ERROR] the point return to the privious point')
x = xp.copy()
molecule.xyz = xyzp
g = gp.copy()
fx = fxp
ratio = 0.
molecule.newHess = 5
# return ls['status']
if ls['step'] > self.DMAX:
if ls['step'] <= self.options['abs_max_step']: # absolute max
print(" Increasing DMAX to {}".format(ls['step']))
self.DMAX = ls['step']
else:
self.DMAX = self.options['abs_max_step']
elif ls['step'] < self.DMAX:
if ls['step'] >= self.DMIN: # absolute min
print(" Decreasing DMAX to {}".format(ls['step']))
self.DMAX = ls['step']
elif ls['step'] <= self.DMIN:
self.DMAX = self.DMIN
print(" Decreasing DMAX to {}".format(self.DMIN))
# calculate predicted value from Hessian, gp is previous constrained gradient
scaled_dq = dq*step
dEtemp = np.dot(self.Hessian, scaled_dq)
dEpre = np.dot(np.transpose(scaled_dq), gc) + 0.5*np.dot(np.transpose(dEtemp), scaled_dq)
dEpre *= units.KCAL_MOL_PER_AU
# print(constraint_steps.T)
constraint_energy = np.dot(gp.T, constraint_steps)*units.KCAL_MOL_PER_AU
# print("constraint_energy: %1.4f" % constraint_energy)
dEpre += constraint_energy
# if abs(dEpre)<0.01:
# dEpre = np.sign(dEpre)*0.01
# project out the constraint
gc = g.copy()
for c in molecule.constraints.T:
gc -= np.dot(gc.T, c[:, np.newaxis])*c[:, np.newaxis]
# control step size
dEstep = fx - fxp
print(" dEstep=%5.4f" % dEstep)
ratio = dEstep/dEpre
molecule.gradrms = np.sqrt(np.dot(gc.T, gc)/n)
if ls['status'] != -2:
self.step_controller(actual_step, ratio, molecule.gradrms, pgradrms, dEpre, opt_type, dEstep)
# update molecule xyz
xyz = molecule.update_xyz(x-xp)
if ostep % xyzframerate == 0:
geoms.append(molecule.geometry)
energies.append(molecule.energy-refE)
manage_xyz.write_xyzs_w_comments('{}/opt_{}.xyz'.format(path, molecule.node_id), geoms, energies, scale=1.)
# save variables for update Hessian!
if not molecule.coord_obj.__class__.__name__ == 'CartesianCoordinates':
# only form g_prim for non-constrained
self.g_prim = block_matrix.dot(molecule.coord_basis, gc)
self.dx = x-xp
self.dg = g - gp
self.dx_prim_actual = molecule.coord_obj.Prims.calcDiff(xyz, xyzp)
self.dx_prim_actual = np.reshape(self.dx_prim_actual, (-1, 1))
self.dx_prim = block_matrix.dot(molecule.coord_basis, scaled_dq)
self.dg_prim = self.g_prim - gp_prim
else:
raise NotImplementedError(" ef not implemented for CART")
if self.options['print_level'] > 0:
print(" Node: %d Opt step: %d E: %5.4f predE: %5.4f ratio: %1.3f gradrms: %1.5f ss: %1.3f DMAX: %1.3f" % (molecule.node_id, ostep+1, fx-refE, dEpre, ratio, molecule.gradrms, step, self.DMAX))
self.buf.write(u' Node: %d Opt step: %d E: %5.4f predE: %5.4f ratio: %1.3f gradrms: %1.5f ss: %1.3f DMAX: %1.3f\n' % (molecule.node_id, ostep+1, fx-refE, dEpre, ratio, molecule.gradrms, step, self.DMAX))
# check for convergence TODO
fx = molecule.energy
dE = molecule.difference_energy
if dE < 1000.:
print(" difference energy is %5.4f" % dE)
gmax = float(np.max(np.absolute(gc)))
disp = float(np.linalg.norm((xyz-xyzp).flatten()))
xnorm = np.sqrt(np.dot(x.T, x))
# gnorm = np.sqrt(np.dot(g.T, g))
if xnorm < 1.0:
xnorm = 1.0
print(" gmax %5.4f disp %5.4f Ediff %5.4f gradrms %5.4f\n" % (gmax, disp, dEstep, molecule.gradrms))
# TODO turn back on conv_DE
if self.opt_cross and abs(dE) < self.conv_dE and molecule.gradrms < self.conv_grms and abs(gmax) < self.conv_gmax and abs(dEstep) < self.conv_Ediff and abs(disp) < self.conv_disp:
if opt_type == "TS-SEAM":
gts = np.dot(g.T, molecule.constraints[:, 0])
print(" gts %1.4f" % gts)
if abs(gts) < self.conv_grms*5:
self.converged = True
else:
self.converged = True
elif not self.opt_cross and molecule.gradrms < self.conv_grms and abs(gmax) < self.conv_gmax and abs(dEstep) < self.conv_Ediff and abs(disp) < self.conv_disp:
if opt_type == "CLIMB":
gts = np.dot(g.T, molecule.constraints[:, 0])
if abs(gts) < self.conv_grms*5.:
self.converged = True
elif opt_type == "TS":
if self.gtse < self.conv_grms*5.:
self.converged = True
else:
self.converged = True
if self.converged:
print(" converged")
if ostep % xyzframerate != 0:
geoms.append(molecule.geometry)
energies.append(molecule.energy-refE)
manage_xyz.write_xyzs_w_comments('{}/opt_{}.xyz'.format(path, molecule.node_id), geoms, energies, scale=1.)
break
# update DLC --> this changes q, g, Hint
if not molecule.coord_obj.__class__.__name__ == 'CartesianCoordinates':
if opt_type != 'TS':
constraints = self.get_constraint_vectors(molecule, opt_type, ictan)
molecule.update_coordinate_basis(constraints=constraints)
x = np.copy(molecule.coordinates)
g = molecule.gradient.copy()
# project out the constraint
gc = g.copy()
for c in molecule.constraints.T:
gc -= np.dot(gc.T, c[:, np.newaxis])*c[:, np.newaxis]
print()
sys.stdout.flush()
print(" opt-summary {}".format(molecule.node_id))
print(self.buf.getvalue())
return geoms, energies
if __name__ == '__main__':
from qchem import QChem
from pes import PES
from molecule import Molecule
from slots import Distance
basis = "6-31G*"
nproc = 8
filepath = "examples/tests/bent_benzene.xyz"
lot = QChem.from_options(states=[(1, 0)], charge=0, basis=basis, functional='HF', nproc=nproc, fnm=filepath)
pes = PES.from_options(lot=lot, ad_idx=0, multiplicity=1)
M = Molecule.from_options(fnm=filepath, PES=pes, coordinate_type="DLC")
distance = Distance(5, 8) # Not 1 based!!
print(distance)
ef = eigenvector_follow.from_options() # Linesearch=NoLineSearch)
geoms = ef.optimize(molecule=M, refE=M.energy, opt_steps=5)
#geoms = ef.optimize(molecule=M,refE=M.energy,opt_steps=1)
print(M.primitive_internal_coordinates)
manage_xyz.write_xyzs('opt.xyz', geoms, scale=1.)
| [
"StringIO.StringIO",
"numpy.copy",
"slots.Distance",
"utilities.manage_xyz.write_xyzs",
"numpy.reshape",
"numpy.absolute",
"qchem.QChem.from_options",
"utilities.block_matrix.dot",
"os.getcwd",
"pes.PES.from_options",
"numpy.zeros",
"numpy.dot",
"numpy.linalg.norm",
"sys.stdout.flush",
"... | [((11964, 12070), 'qchem.QChem.from_options', 'QChem.from_options', ([], {'states': '[(1, 0)]', 'charge': '(0)', 'basis': 'basis', 'functional': '"""HF"""', 'nproc': 'nproc', 'fnm': 'filepath'}), "(states=[(1, 0)], charge=0, basis=basis, functional='HF',\n nproc=nproc, fnm=filepath)\n", (11982, 12070), False, 'from qchem import QChem\n'), ((12077, 12128), 'pes.PES.from_options', 'PES.from_options', ([], {'lot': 'lot', 'ad_idx': '(0)', 'multiplicity': '(1)'}), '(lot=lot, ad_idx=0, multiplicity=1)\n', (12093, 12128), False, 'from pes import PES\n'), ((12137, 12204), 'molecule.Molecule.from_options', 'Molecule.from_options', ([], {'fnm': 'filepath', 'PES': 'pes', 'coordinate_type': '"""DLC"""'}), "(fnm=filepath, PES=pes, coordinate_type='DLC')\n", (12158, 12204), False, 'from molecule import Molecule\n'), ((12220, 12234), 'slots.Distance', 'Distance', (['(5)', '(8)'], {}), '(5, 8)\n', (12228, 12234), False, 'from slots import Distance\n'), ((12520, 12570), 'utilities.manage_xyz.write_xyzs', 'manage_xyz.write_xyzs', (['"""opt.xyz"""', 'geoms'], {'scale': '(1.0)'}), "('opt.xyz', geoms, scale=1.0)\n", (12541, 12570), False, 'from utilities import units, block_matrix, manage_xyz\n'), ((643, 654), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (652, 654), False, 'import os\n'), ((841, 851), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (849, 851), False, 'from StringIO import StringIO\n'), ((2081, 2110), 'numpy.copy', 'np.copy', (['molecule.coordinates'], {}), '(molecule.coordinates)\n', (2088, 2110), True, 'import numpy as np\n'), ((2125, 2146), 'numpy.copy', 'np.copy', (['molecule.xyz'], {}), '(molecule.xyz)\n', (2132, 2146), True, 'import numpy as np\n'), ((2645, 2696), 'numpy.zeros', 'np.zeros', (['(molecule.num_primitives, 1)'], {'dtype': 'float'}), '((molecule.num_primitives, 1), dtype=float)\n', (2653, 2696), True, 'import numpy as np\n'), ((2723, 2774), 'numpy.zeros', 'np.zeros', (['(molecule.num_primitives, 1)'], {'dtype': 'float'}), '((molecule.num_primitives, 1), dtype=float)\n', (2731, 2774), True, 'import numpy as np\n'), ((3926, 3944), 'numpy.linalg.norm', 'np.linalg.norm', (['dq'], {}), '(dq)\n', (3940, 3944), True, 'import numpy as np\n'), ((6256, 6287), 'numpy.dot', 'np.dot', (['self.Hessian', 'scaled_dq'], {}), '(self.Hessian, scaled_dq)\n', (6262, 6287), True, 'import numpy as np\n'), ((11578, 11596), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11594, 11596), False, 'import sys\n'), ((1661, 1691), 'numpy.dot', 'np.dot', (['gc.T', 'c[:, np.newaxis]'], {}), '(gc.T, c[:, np.newaxis])\n', (1667, 1691), True, 'import numpy as np\n'), ((1737, 1752), 'numpy.absolute', 'np.absolute', (['gc'], {}), '(gc)\n', (1748, 1752), True, 'import numpy as np\n'), ((2811, 2827), 'numpy.dot', 'np.dot', (['gc.T', 'gc'], {}), '(gc.T, gc)\n', (2817, 2827), True, 'import numpy as np\n'), ((6505, 6535), 'numpy.dot', 'np.dot', (['gp.T', 'constraint_steps'], {}), '(gp.T, constraint_steps)\n', (6511, 6535), True, 'import numpy as np\n'), ((7837, 7879), 'utilities.block_matrix.dot', 'block_matrix.dot', (['molecule.coord_basis', 'gc'], {}), '(molecule.coord_basis, gc)\n', (7853, 7879), False, 'from utilities import units, block_matrix, manage_xyz\n'), ((8066, 8106), 'numpy.reshape', 'np.reshape', (['self.dx_prim_actual', '(-1, 1)'], {}), '(self.dx_prim_actual, (-1, 1))\n', (8076, 8106), True, 'import numpy as np\n'), ((8138, 8187), 'utilities.block_matrix.dot', 'block_matrix.dot', (['molecule.coord_basis', 'scaled_dq'], {}), '(molecule.coord_basis, scaled_dq)\n', (8154, 8187), False, 'from utilities import units, block_matrix, manage_xyz\n'), ((9152, 9166), 'numpy.dot', 'np.dot', (['x.T', 'x'], {}), '(x.T, x)\n', (9158, 9166), True, 'import numpy as np\n'), ((6315, 6338), 'numpy.transpose', 'np.transpose', (['scaled_dq'], {}), '(scaled_dq)\n', (6327, 6338), True, 'import numpy as np\n'), ((6879, 6909), 'numpy.dot', 'np.dot', (['gc.T', 'c[:, np.newaxis]'], {}), '(gc.T, c[:, np.newaxis])\n', (6885, 6909), True, 'import numpy as np\n'), ((7106, 7122), 'numpy.dot', 'np.dot', (['gc.T', 'gc'], {}), '(gc.T, gc)\n', (7112, 7122), True, 'import numpy as np\n'), ((9043, 9058), 'numpy.absolute', 'np.absolute', (['gc'], {}), '(gc)\n', (9054, 9058), True, 'import numpy as np\n'), ((9685, 9724), 'numpy.dot', 'np.dot', (['g.T', 'molecule.constraints[:, 0]'], {}), '(g.T, molecule.constraints[:, 0])\n', (9691, 9724), True, 'import numpy as np\n'), ((11253, 11282), 'numpy.copy', 'np.copy', (['molecule.coordinates'], {}), '(molecule.coordinates)\n', (11260, 11282), True, 'import numpy as np\n'), ((6357, 6377), 'numpy.transpose', 'np.transpose', (['dEtemp'], {}), '(dEtemp)\n', (6369, 6377), True, 'import numpy as np\n'), ((10170, 10209), 'numpy.dot', 'np.dot', (['g.T', 'molecule.constraints[:, 0]'], {}), '(g.T, molecule.constraints[:, 0])\n', (10176, 10209), True, 'import numpy as np\n'), ((11498, 11528), 'numpy.dot', 'np.dot', (['gc.T', 'c[:, np.newaxis]'], {}), '(gc.T, c[:, np.newaxis])\n', (11504, 11528), True, 'import numpy as np\n')] |
import awkward
import numpy as np
class TableWrapper(awkward.Table):
def __init__(self, *args, **kwargs):
super(TableWrapper, self).__init__(*args, **kwargs)
self._set_column_attributes()
def _set_column_attributes(self):
# The columns as attributes should only be used as read-only,
# since setting via them doesn't run the validation!
for col in self.columns:
setattr(self, col, self._contents[col])
@classmethod
def fromtree(cls, tree, **branches):
data = {}
for key, col_name in branches.items():
if isinstance(tree, list):
data[key] = awkward.util.concatenate([t.array(col_name) for t in tree])
else:
data[key] = tree.array(col_name)
return cls(**data)
@classmethod
def fromtable(cls, table):
new = cls()
new._view = table._view
new._base = table._base
new._rowname = table._rowname
new._contents = table._contents
return new
def table(self):
out = awkward.Table()
out._view = self._view
out._base = self._base
out._rowname = self._rowname
out._contents = self._contents
return out
def __getitem__(self, where):
if isinstance(where, np.ndarray):
if where.dtype == np.dtype("bool"):
return self._slice_mask(where)
if isinstance(where, awkward.JaggedArray):
if where.content.dtype == bool:
return self._slice_mask(where)
return super(TableWrapper, self).__getitem__(where)
def _slice_mask(self, mask):
data = {}
for key, x in self._contents.items():
data[key] = x[mask]
return type(self)(**data)
| [
"awkward.Table",
"numpy.dtype"
] | [((1080, 1095), 'awkward.Table', 'awkward.Table', ([], {}), '()\n', (1093, 1095), False, 'import awkward\n'), ((1361, 1377), 'numpy.dtype', 'np.dtype', (['"""bool"""'], {}), "('bool')\n", (1369, 1377), True, 'import numpy as np\n')] |
# Code for initialization of NMF, copied with little modification from scikit-learn
# Original source: https://github.com/scikit-learn/scikit-learn/blob/7e1e6d09bcc2eaeba98f7e737aac2ac782f0e5f1/sklearn/decomposition/_nmf.py#L229
import numpy as np
from scipy import linalg
import warnings
from math import sqrt
import numbers
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState instance" % seed
)
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array-like
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.ravel(x, order="K")
if np.issubdtype(x.dtype, np.integer):
warnings.warn(
"Array type is integer, np.dot may overflow. "
"Data should be float type to avoid this issue",
UserWarning,
)
return np.dot(x, x)
def norm(x):
"""Dot product-based Euclidean norm implementation.
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
Parameters
----------
x : array-like
Vector for which to compute the norm.
"""
return sqrt(squared_norm(x))
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u : ndarray
u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
v : ndarray
u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
The input v should really be called vt to be consistent with scipy's
output.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, range(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[range(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def randomized_range_finder(
A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None
):
"""Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) https://arxiv.org/pdf/0909.4061.pdf
An implementation of a randomized algorithm for principal component
analysis
<NAME> al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
if A.dtype.kind == "f":
# Ensure f32 is preserved as f32
Q = Q.astype(A.dtype, copy=False)
# Deal with "auto" mode
if power_iteration_normalizer == "auto":
if n_iter <= 2:
power_iteration_normalizer = "none"
else:
power_iteration_normalizer = "LU"
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == "none":
Q = np.dot(A, Q)
Q = np.dot(A.T, Q)
elif power_iteration_normalizer == "LU":
Q, _ = linalg.lu(np.dot(A, Q), permute_l=True)
Q, _ = linalg.lu(np.dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == "QR":
Q, _ = linalg.qr(np.dot(A, Q), mode="economic")
Q, _ = linalg.qr(np.dot(A.T, Q), mode="economic")
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(np.dot(A, Q), mode="economic")
return Q
def randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state="warn",
):
"""Computes a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in the
Halko et al paper (problem (1.5), p5).
Parameters
----------
M : {ndarray, sparse matrix}
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The previous behavior (`random_state=0`) is deprecated, and
from v1.2 the default value will be `random_state=None`. Set
the value of `random_state` explicitly to suppress the deprecation
warning.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions (Algorithm 4.3)
Halko, et al., 2009 https://arxiv.org/abs/0909.4061
* A randomized algorithm for the decomposition of matrices
<NAME>, <NAME> and <NAME>
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
if random_state == "warn":
warnings.warn(
"If 'random_state' is not supplied, the current default "
"is to use 0 as a fixed seed. This will change to "
"None in version 1.2 leading to non-deterministic results "
"that better reflect nature of the randomized_svd solver. "
"If you want to silence this warning, set 'random_state' "
"to an integer seed or to None explicitly depending "
"if you want your code to be deterministic or not.",
FutureWarning,
)
random_state = 0
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == "auto":
# Checks if the number of iterations is explicitly specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < 0.1 * min(M.shape) else 4
if transpose == "auto":
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(
M,
size=n_random,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
)
# project M to the (k + p) dimensional space using the basis vectors
B = np.dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, Vt = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, Vt = svd_flip(U, Vt)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, Vt = svd_flip(U, Vt, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], Vt[:n_components, :]
def _initialize_nmf(X, n_components, init="warn", eps=1e-6, random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : int
The number of components desired in the approximation.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None
Method used to initialize the procedure.
Default: None.
Valid options:
- None: 'nndsvd' if n_components <= min(n_samples, n_features),
otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
eps : float, default=1e-6
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, default=None
Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : array-like of shape (n_samples, n_components)
Initial guesses for solving X ~= WH.
H : array-like of shape (n_components, n_features)
Initial guesses for solving X ~= WH.
References
----------
<NAME>, <NAME>: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
if init == "warn":
warnings.warn(
"The 'init' value, when 'init=None' and "
"n_components is less than n_samples and "
"n_features, will be changed from 'nndsvd' to "
"'nndsvda' in 1.1 (renaming of 0.26).",
FutureWarning,
)
init = None
if X.min() < 0:
raise ValueError("Negative values in data passed to NMF initialization")
n_samples, n_features = X.shape
if (
init is not None
and init != "random"
and n_components > min(n_samples, n_features)
):
raise ValueError(
"init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)".format(init)
)
if init is None:
if n_components <= min(n_samples, n_features):
init = "nndsvd"
else:
init = "random"
# Random initialization
if init == "random":
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.randn(n_components, n_features).astype(X.dtype, copy=False)
W = avg * rng.randn(n_samples, n_components).astype(X.dtype, copy=False)
np.abs(H, out=H)
np.abs(W, out=W)
return W, H
# NNDSVD initialization
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W = np.zeros_like(U)
H = np.zeros_like(V)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.randn(len(H[H == 0])) / 100)
else:
raise ValueError(
"Invalid init parameter: got %r instead of one of %r"
% (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar"))
)
return W, H
| [
"numpy.abs",
"numpy.sqrt",
"numpy.maximum",
"numpy.minimum",
"numpy.issubdtype",
"numpy.dot",
"scipy.linalg.svd",
"numpy.ravel",
"warnings.warn",
"numpy.zeros_like",
"numpy.random.RandomState"
] | [((1399, 1421), 'numpy.ravel', 'np.ravel', (['x'], {'order': '"""K"""'}), "(x, order='K')\n", (1407, 1421), True, 'import numpy as np\n'), ((1429, 1463), 'numpy.issubdtype', 'np.issubdtype', (['x.dtype', 'np.integer'], {}), '(x.dtype, np.integer)\n', (1442, 1463), True, 'import numpy as np\n'), ((1654, 1666), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (1660, 1666), True, 'import numpy as np\n'), ((12945, 12959), 'numpy.dot', 'np.dot', (['Q.T', 'M'], {}), '(Q.T, M)\n', (12951, 12959), True, 'import numpy as np\n'), ((13034, 13068), 'scipy.linalg.svd', 'linalg.svd', (['B'], {'full_matrices': '(False)'}), '(B, full_matrices=False)\n', (13044, 13068), False, 'from scipy import linalg\n'), ((13088, 13103), 'numpy.dot', 'np.dot', (['Q', 'Uhat'], {}), '(Q, Uhat)\n', (13094, 13103), True, 'import numpy as np\n'), ((17043, 17059), 'numpy.zeros_like', 'np.zeros_like', (['U'], {}), '(U)\n', (17056, 17059), True, 'import numpy as np\n'), ((17068, 17084), 'numpy.zeros_like', 'np.zeros_like', (['V'], {}), '(V)\n', (17081, 17084), True, 'import numpy as np\n'), ((890, 917), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (911, 917), True, 'import numpy as np\n'), ((1473, 1602), 'warnings.warn', 'warnings.warn', (['"""Array type is integer, np.dot may overflow. Data should be float type to avoid this issue"""', 'UserWarning'], {}), "(\n 'Array type is integer, np.dot may overflow. Data should be float type to avoid this issue'\n , UserWarning)\n", (1486, 1602), False, 'import warnings\n'), ((6492, 6504), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (6498, 6504), True, 'import numpy as np\n'), ((11571, 11988), 'warnings.warn', 'warnings.warn', (['"""If \'random_state\' is not supplied, the current default is to use 0 as a fixed seed. This will change to None in version 1.2 leading to non-deterministic results that better reflect nature of the randomized_svd solver. If you want to silence this warning, set \'random_state\' to an integer seed or to None explicitly depending if you want your code to be deterministic or not."""', 'FutureWarning'], {}), '(\n "If \'random_state\' is not supplied, the current default is to use 0 as a fixed seed. This will change to None in version 1.2 leading to non-deterministic results that better reflect nature of the randomized_svd solver. If you want to silence this warning, set \'random_state\' to an integer seed or to None explicitly depending if you want your code to be deterministic or not."\n , FutureWarning)\n', (11584, 11988), False, 'import warnings\n'), ((15694, 15896), 'warnings.warn', 'warnings.warn', (['"""The \'init\' value, when \'init=None\' and n_components is less than n_samples and n_features, will be changed from \'nndsvd\' to \'nndsvda\' in 1.1 (renaming of 0.26)."""', 'FutureWarning'], {}), '(\n "The \'init\' value, when \'init=None\' and n_components is less than n_samples and n_features, will be changed from \'nndsvd\' to \'nndsvda\' in 1.1 (renaming of 0.26)."\n , FutureWarning)\n', (15707, 15896), False, 'import warnings\n'), ((16871, 16887), 'numpy.abs', 'np.abs', (['H'], {'out': 'H'}), '(H, out=H)\n', (16877, 16887), True, 'import numpy as np\n'), ((16896, 16912), 'numpy.abs', 'np.abs', (['W'], {'out': 'W'}), '(W, out=W)\n', (16902, 16912), True, 'import numpy as np\n'), ((17201, 17214), 'numpy.sqrt', 'np.sqrt', (['S[0]'], {}), '(S[0])\n', (17208, 17214), True, 'import numpy as np\n'), ((17217, 17232), 'numpy.abs', 'np.abs', (['U[:, 0]'], {}), '(U[:, 0])\n', (17223, 17232), True, 'import numpy as np\n'), ((17247, 17260), 'numpy.sqrt', 'np.sqrt', (['S[0]'], {}), '(S[0])\n', (17254, 17260), True, 'import numpy as np\n'), ((17263, 17278), 'numpy.abs', 'np.abs', (['V[0, :]'], {}), '(V[0, :])\n', (17269, 17278), True, 'import numpy as np\n'), ((17962, 17983), 'numpy.sqrt', 'np.sqrt', (['(S[j] * sigma)'], {}), '(S[j] * sigma)\n', (17969, 17983), True, 'import numpy as np\n'), ((3161, 3170), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (3167, 3170), True, 'import numpy as np\n'), ((3370, 3379), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (3376, 3379), True, 'import numpy as np\n'), ((5991, 6003), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (5997, 6003), True, 'import numpy as np\n'), ((6020, 6034), 'numpy.dot', 'np.dot', (['A.T', 'Q'], {}), '(A.T, Q)\n', (6026, 6034), True, 'import numpy as np\n'), ((17433, 17449), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (17443, 17449), True, 'import numpy as np\n'), ((17451, 17467), 'numpy.maximum', 'np.maximum', (['y', '(0)'], {}), '(y, 0)\n', (17461, 17467), True, 'import numpy as np\n'), ((17494, 17510), 'numpy.minimum', 'np.minimum', (['x', '(0)'], {}), '(x, 0)\n', (17504, 17510), True, 'import numpy as np\n'), ((17520, 17536), 'numpy.minimum', 'np.minimum', (['y', '(0)'], {}), '(y, 0)\n', (17530, 17536), True, 'import numpy as np\n'), ((6113, 6125), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (6119, 6125), True, 'import numpy as np\n'), ((6172, 6186), 'numpy.dot', 'np.dot', (['A.T', 'Q'], {}), '(A.T, Q)\n', (6178, 6186), True, 'import numpy as np\n'), ((6282, 6294), 'numpy.dot', 'np.dot', (['A', 'Q'], {}), '(A, Q)\n', (6288, 6294), True, 'import numpy as np\n'), ((6342, 6356), 'numpy.dot', 'np.dot', (['A.T', 'Q'], {}), '(A.T, Q)\n', (6348, 6356), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 10 14:59:37 2020
Copyright 2020 by <NAME>.
"""
# %% Imports.
# Standard library imports:
import numpy as np
from scipy.sparse import csr_matrix
# Chebpy imports:
from chebpy.nla import sphankel
# %% Test 1.
col = np.array([1, 2, 3, 4])
H = sphankel(col)
print(csr_matrix.todense(H)) | [
"chebpy.nla.sphankel",
"numpy.array",
"scipy.sparse.csr_matrix.todense"
] | [((288, 310), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (296, 310), True, 'import numpy as np\n'), ((315, 328), 'chebpy.nla.sphankel', 'sphankel', (['col'], {}), '(col)\n', (323, 328), False, 'from chebpy.nla import sphankel\n'), ((335, 356), 'scipy.sparse.csr_matrix.todense', 'csr_matrix.todense', (['H'], {}), '(H)\n', (353, 356), False, 'from scipy.sparse import csr_matrix\n')] |
import math
import numpy as np
import logging
logger = logging.getLogger('cwl')
class CWLMetric(object):
def __init__(self):
self.expected_utility = 0.0
self.expected_cost = 0.0
self.expected_total_utility = 0.0
self.expected_total_cost = 0.0
self.expected_items = 0.0
self.residual_expected_utility = None
self.residual_expected_cost = None
self.residual_expected_total_utility = None
self.residual_expected_total_cost = None
self.residual_expected_items = None
self.residuals = False
self.metric_name = "Undefined"
self.ranking = None
self.bibtex = ""
def name(self):
return self.metric_name
def c_vector(self, ranking, worse_case=True):
"""
Create a vector of C probabilities (i.e. probability of continuing from position i to position i+1)
Note: when defining a metric is best/easiest to re-implement this function.
:param ranking: CWL Ranking object
:param worse_case: Boolean, to denote whether to estimate based on assuming the
worse case i.e. unjudged are considered to be zero gain, and max cost or
best case i.e. worse_case=False, and unjudged are considered to be max gain, and min cost
Note that the Ranking object handles what is returned in the gain and cost vectors.
:return: returns the C vector probabilities
"""
cvec = np.ones(len(ranking.get_gain_vector(worse_case)))
return cvec
def l_vector(self, ranking, worse_case=True):
"""
Create a vector of L probabilities (i.e. the Likelihoods of stopping at position i given the C vector)
:param ranking: CWL Ranking object
:param worse_case: Boolean, to denote whether to estimate based on assuming the
:return: returns the L vector probabilities
"""
cvec = self.c_vector(ranking, worse_case)
logger.debug("{0} {1} {2} {3}".format(ranking.topic_id, self.name(), "cvec", cvec[0:11]))
cshift = np.append(np.array([1.0]), cvec[0:-1])
lvec = np.cumprod(cshift)
lvec = np.multiply(lvec, (np.subtract(np.ones(len(cvec)), cvec)))
logger.debug("{0} {1} {2} {3}".format(ranking.topic_id, self.name(), "lvec", lvec[0:11]))
return lvec
def w_vector(self, ranking, worse_case=True):
"""
Create a vector of E probabilities (i.e. probability of examining item i)
Note: when defining a metric is best/easiest to re-implement this function.
:param ranking: CWL Ranking object
:param worse_case: Boolean, to denote whether to estimate based on assuming the
:return: returns the W vector probabilities
"""
cvec = self.c_vector(ranking, worse_case)
cvec = cvec[0:-1]
cvec_prod = np.cumprod(cvec)
cvec_prod = np.pad(cvec_prod, (1, 0), 'constant', constant_values=1.0)
w1 = np.divide(1.0, np.sum(cvec_prod))
w_tail = np.multiply(cvec_prod[1:len(cvec_prod)], w1)
wvec = np.append(w1, w_tail)
logger.debug("{0} {1} {2} {3}".format(ranking.topic_id, self.name(), "wvec", wvec[0:11]))
return wvec
def measure(self, ranking):
"""
Given the ranking, measure estimates the various measurements given the CWL framework
if residuals are required, these are also computed.
:param ranking: CWL Ranking object
:return: the expected utility per item
"""
self.ranking = ranking
# score based on worse case - lower bounds
(eu, etu, ec, etc, ei) = self._do_score(ranking, True)
self.expected_utility = eu
self.expected_total_utility = etu
self.expected_cost = ec
self.expected_total_cost = etc
self.expected_items = ei
if self.residuals:
# score based on best case - upper bounds
(eu, etu, ec, etc, ei) = self._do_score(ranking, False)
# compute the residual i.e. the difference between the upper and lower bounds
self.residual_expected_utility = eu - self.expected_utility
self.residual_expected_total_utility = etu - self.expected_total_utility
self.residual_expected_cost = ec - self.expected_cost
self.residual_expected_total_cost = etc - self.expected_total_cost
self.residual_expected_items = ei - self.expected_items
# return the rate of gain per document
return self.expected_utility
def _do_score(self, ranking, worse_case=True):
"""
An internal function that handles the scoring of a ranking given the CWL machinery.
:param ranking: CWL Ranking object
:return: the expected utility per item
:return: returns the expected utility per item, etc..
"""
wvec = self.w_vector(ranking, worse_case)
lvec = self.l_vector(ranking, worse_case)
gain_vec = ranking.get_gain_vector(worse_case)
cost_vec = ranking.get_cost_vector(worse_case)
cum_gains = np.cumsum(gain_vec)
cum_costs = np.cumsum(cost_vec)
expected_utility = np.sum(np.dot(wvec, gain_vec))
expected_total_utility = np.sum(np.dot(lvec, cum_gains))
expected_cost = np.sum(np.dot(wvec, cost_vec))
expected_total_cost = np.sum(np.dot(lvec, cum_costs))
expected_items = 1.0 / wvec[0]
return expected_utility, expected_total_utility, expected_cost, expected_total_cost, expected_items
def report(self):
if self.residuals:
print("{0}\t{1}\t{2:.4f}\t{3:.4f}\t{4:.4f}\t{5:.4f}\t{6:.4f}\t{7:.4f}\t{8:.4f}\t{9:.4f}\t{10:.4f}\t{11:.4f}".format(
self.ranking.topic_id, self.name(), self.expected_utility, self.expected_total_utility,
self.expected_cost, self.expected_total_cost, self.expected_items,
self.residual_expected_utility, self.residual_expected_total_utility,
self.residual_expected_cost, self.residual_expected_total_cost, self.residual_expected_items
))
else:
print("{0}\t{1}\t{2:.4f}\t{3:.4f}\t{4:.4f}\t{5:.4f}\t{6:.4f}".format(
self.ranking.topic_id, self.name(), self.expected_utility, self.expected_total_utility,
self.expected_cost, self.expected_total_cost, self.expected_items,
))
def csv(self):
return ("{0},{1:.3f},{2:.3f},{3:.3f},{4:.3f},{5:.3f}".format(
self.name(), self.expected_utility, self.expected_total_utility, self.expected_cost,
self.expected_total_cost, self.expected_items))
def get_scores(self):
"""
:return: list with values of each measurement for the previously measured ranking
"""
scores = [
self.expected_utility,
self.expected_total_utility,
self.expected_cost,
self.expected_total_cost,
self.expected_items]
return scores
def _pad_vector(self, vec1, n, val):
"""
Pads vector 1 up to size n, with the value val
:param vec1: np array
:param n: size of the desired array
:param val: the value to be inserted if padding is required
:return: the padded vector
"""
if len(vec1) < n:
vec1 = np.pad(vec1, (0, n-len(vec1)), 'constant', constant_values=val)
return vec1
def validate_gain_range(self, min_allowed_gain, max_allowed_gain, gain_vec):
"""
Checks that the gain vector does not violate any metric assumptions
These assumptions (about the min or max gain) should be provided by
the calling metric class.
"""
if np.min(gain_vec) < min_allowed_gain:
raise ValueError("Supplied gain values violate metric assumptions: Metric = {}.\n "
"The minimum allowable gain for this metric is: {}.".format(self.name(), min_allowed_gain))
if np.max(gain_vec) > max_allowed_gain:
raise ValueError("Supplied gain values ({}) violate metric assumptions: Metric = {}.\n "
"The maximum allowable gain for this "
"metric is: {}.".format(np.max(gain_vec), self.name(), max_allowed_gain))
| [
"logging.getLogger",
"numpy.max",
"numpy.append",
"numpy.array",
"numpy.sum",
"numpy.dot",
"numpy.min",
"numpy.cumsum",
"numpy.pad",
"numpy.cumprod"
] | [((60, 84), 'logging.getLogger', 'logging.getLogger', (['"""cwl"""'], {}), "('cwl')\n", (77, 84), False, 'import logging\n'), ((2183, 2201), 'numpy.cumprod', 'np.cumprod', (['cshift'], {}), '(cshift)\n', (2193, 2201), True, 'import numpy as np\n'), ((2929, 2945), 'numpy.cumprod', 'np.cumprod', (['cvec'], {}), '(cvec)\n', (2939, 2945), True, 'import numpy as np\n'), ((2967, 3025), 'numpy.pad', 'np.pad', (['cvec_prod', '(1, 0)', '"""constant"""'], {'constant_values': '(1.0)'}), "(cvec_prod, (1, 0), 'constant', constant_values=1.0)\n", (2973, 3025), True, 'import numpy as np\n'), ((3153, 3174), 'numpy.append', 'np.append', (['w1', 'w_tail'], {}), '(w1, w_tail)\n', (3162, 3174), True, 'import numpy as np\n'), ((5213, 5232), 'numpy.cumsum', 'np.cumsum', (['gain_vec'], {}), '(gain_vec)\n', (5222, 5232), True, 'import numpy as np\n'), ((5254, 5273), 'numpy.cumsum', 'np.cumsum', (['cost_vec'], {}), '(cost_vec)\n', (5263, 5273), True, 'import numpy as np\n'), ((2138, 2153), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2146, 2153), True, 'import numpy as np\n'), ((3055, 3072), 'numpy.sum', 'np.sum', (['cvec_prod'], {}), '(cvec_prod)\n', (3061, 3072), True, 'import numpy as np\n'), ((5309, 5331), 'numpy.dot', 'np.dot', (['wvec', 'gain_vec'], {}), '(wvec, gain_vec)\n', (5315, 5331), True, 'import numpy as np\n'), ((5374, 5397), 'numpy.dot', 'np.dot', (['lvec', 'cum_gains'], {}), '(lvec, cum_gains)\n', (5380, 5397), True, 'import numpy as np\n'), ((5431, 5453), 'numpy.dot', 'np.dot', (['wvec', 'cost_vec'], {}), '(wvec, cost_vec)\n', (5437, 5453), True, 'import numpy as np\n'), ((5493, 5516), 'numpy.dot', 'np.dot', (['lvec', 'cum_costs'], {}), '(lvec, cum_costs)\n', (5499, 5516), True, 'import numpy as np\n'), ((7915, 7931), 'numpy.min', 'np.min', (['gain_vec'], {}), '(gain_vec)\n', (7921, 7931), True, 'import numpy as np\n'), ((8183, 8199), 'numpy.max', 'np.max', (['gain_vec'], {}), '(gain_vec)\n', (8189, 8199), True, 'import numpy as np\n'), ((8445, 8461), 'numpy.max', 'np.max', (['gain_vec'], {}), '(gain_vec)\n', (8451, 8461), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Day 78 - Nobel Prize Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SgXvIFxNzlhilbpyDmjbAIQlJ4hkXYF5
# Setup and Context
### Introduction
On November 27, 1895, <NAME> signed his last will in Paris. When it was opened after his death, the will caused a lot of controversy, as Nobel had left much of his wealth for the establishment of a prize.
<NAME> dictates that his entire remaining estate should be used to endow “prizes to those who, during the preceding year, have conferred the greatest benefit to humankind”.
Every year the Nobel Prize is given to scientists and scholars in the categories chemistry, literature, physics, physiology or medicine, economics, and peace.
<img src=https://i.imgur.com/36pCx5Q.jpg>
Let's see what patterns we can find in the data of the past Nobel laureates. What can we learn about the Nobel prize and our world more generally?
### Upgrade plotly (only Google Colab Notebook)
Google Colab may not be running the latest version of plotly. If you're working in Google Colab, uncomment the line below, run the cell, and restart your notebook server.
"""
# Commented out IPython magic to ensure Python compatibility.
# %pip install --upgrade plotly
"""### Import Statements"""
import pandas as pd
import numpy as np
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
"""### Notebook Presentation"""
pd.options.display.float_format = '{:,.2f}'.format
"""### Read the Data"""
df_data = pd.read_csv('nobel_prize_data.csv')
"""Caveats: The exact birth dates for <NAME>, <NAME>, and <NAME> are unknown. I've substituted them with mid-year estimate of July 2nd.
# Data Exploration & Cleaning
**Challenge**: Preliminary data exploration.
* What is the shape of `df_data`? How many rows and columns?
* What are the column names?
* In which year was the Nobel prize first awarded?
* Which year is the latest year included in the dataset?
"""
print(f'df_data has {df_data.shape[0]} rows and {df_data.shape[1]} columns.')
print('The column names are:')
for col in df_data.columns:
print(col)
df_data.sort_values('year')
print('The Nobel prize first awarded in 1901.')
print('The latest year included in the dataset is 2020.')
"""**Challenge**:
* Are there any duplicate values in the dataset?
* Are there NaN values in the dataset?
* Which columns tend to have NaN values?
* How many NaN values are there per column?
* Why do these columns have NaN values?
### Check for Duplicates
"""
print(f'Are there any duplicate values in the dataset? {df_data.duplicated().values.any()}')
"""### Check for NaN Values"""
print(f'Are there NaN values in the dataset? {df_data.isna().values.any()}\n')
print(f'Which columns tend to have NaN values?\n{df_data.isna().any()}\n')
print(f'How many NaN values are there per column?\n{df_data.isna().sum()}\n')
df_data.loc[df_data.birth_date.isna()]
"""### Type Conversions
**Challenge**:
* Convert the `birth_date` column to Pandas `Datetime` objects
* Add a Column called `share_pct` which has the laureates' share as a percentage in the form of a floating-point number.
"""
df_data.head()
"""#### Convert Year and Birth Date to Datetime"""
df_data.birth_date = pd.to_datetime(df_data.birth_date)
"""#### Add a Column with the Prize Share as a Percentage"""
separated_values = df_data.prize_share.str.split('/', expand=True)
numerator = pd.to_numeric(separated_values[0])
denomenator = pd.to_numeric(separated_values[1])
df_data['share_pct'] = numerator / denomenator
df_data.info()
"""# Plotly Donut Chart: Percentage of Male vs. Female Laureates
**Challenge**: Create a [donut chart using plotly](https://plotly.com/python/pie-charts/) which shows how many prizes went to men compared to how many prizes went to women. What percentage of all the prizes went to women?
"""
biology = df_data.sex.value_counts()
fig = px.pie(labels=biology.index,
values=biology.values,
title='Percentage of Male vs Female winners',
names=biology.index,
hole=0.4)
fig.show()
"""# Who were the first 3 Women to Win the Nobel Prize?
**Challenge**:
* What are the names of the first 3 female Nobel laureates?
* What did the win the prize for?
* What do you see in their `birth_country`? Were they part of an organisation?
"""
df_data[df_data.sex == 'Female'].sort_values('year')[:3]
"""# Find the Repeat Winners
**Challenge**: Did some people get a Nobel Prize more than once? If so, who were they?
"""
is_winner = df_data.duplicated(subset=['full_name'], keep=False)
multiple_winners = df_data[is_winner]
print(f'There are {multiple_winners.full_name.nunique()} winners that won multiple times')
multiple_winners[['year', 'full_name']]
"""# Number of Prizes per Category
**Challenge**:
* In how many categories are prizes awarded?
* Create a plotly bar chart with the number of prizes awarded by category.
* Use the color scale called `Aggrnyl` to colour the chart, but don't show a color axis.
* Which category has the most number of prizes awarded?
* Which category has the fewest number of prizes awarded?
"""
df_data.category.nunique()
prizes_per_category = df_data.category.value_counts()
v_bar = px.bar(
x = prizes_per_category.index,
y = prizes_per_category.values,
color = prizes_per_category.values,
color_continuous_scale='Aggrnyl',
title='Number of Prizes Awarded per Category')
v_bar.update_layout(xaxis_title='Nobel Prize Category',
coloraxis_showscale=False,
yaxis_title='Number of Prizes')
v_bar.show()
"""**Challenge**:
* When was the first prize in the field of Economics awarded?
* Who did the prize go to?
"""
df_data[df_data.category == 'Economics'].sort_values('year')[:3]
print(f'First year: {df_data.year[df_data.category == "Economics"][:1]}')
df_data.full_name[df_data.category == 'Economics'][:1]
df_data[df_data.birth_country == 'Romania']
"""# Male and Female Winners by Category
**Challenge**: Create a [plotly bar chart](https://plotly.com/python/bar-charts/) that shows the split between men and women by category.
* Hover over the bar chart. How many prizes went to women in Literature compared to Physics?
<img src=https://i.imgur.com/od8TfOp.png width=650>
"""
cat_men_women = df_data.groupby(['category', 'sex'],
as_index=False).agg({'prize': pd.Series.count})
cat_men_women.sort_values('prize', ascending=False, inplace=True)
v_bar_split = px.bar(x = cat_men_women.category,
y = cat_men_women.prize,
color = cat_men_women.sex,
title='Number of Prizes Awarded per Category split by Men and Women')
v_bar_split.update_layout(xaxis_title='Nobel Prize Category',
yaxis_title='Number of Prizes')
v_bar_split.show()
"""# Number of Prizes Awarded Over Time
**Challenge**: Are more prizes awarded recently than when the prize was first created? Show the trend in awards visually.
* Count the number of prizes awarded every year.
* Create a 5 year rolling average of the number of prizes (Hint: see previous lessons analysing Google Trends).
* Using Matplotlib superimpose the rolling average on a scatter plot.
* Show a tick mark on the x-axis for every 5 years from 1900 to 2020. (Hint: you'll need to use NumPy).
<img src=https://i.imgur.com/4jqYuWC.png width=650>
* Use the [named colours](https://matplotlib.org/3.1.0/gallery/color/named_colors.html) to draw the data points in `dogerblue` while the rolling average is coloured in `crimson`.
<img src=https://i.imgur.com/u3RlcJn.png width=350>
* Looking at the chart, did the first and second world wars have an impact on the number of prizes being given out?
* What could be the reason for the trend in the chart?
"""
prize_per_year = df_data.groupby(by='year').count().prize
moving_average = prize_per_year.rolling(window=5).mean()
plt.figure(figsize=(16,8), dpi=200)
plt.title('Number of Nobel Prizes Awarded per Year', fontsize=18)
plt.yticks(fontsize=14)
plt.xticks(ticks=np.arange(1900, 2021, step=5),
fontsize=14,
rotation=45)
ax = plt.gca() # get current axis
ax.set_xlim(1900, 2020)
ax.scatter(x=prize_per_year.index,
y=prize_per_year.values,
c='dodgerblue',
alpha=0.7,
s=100,)
ax.plot(prize_per_year.index,
moving_average.values,
c='crimson',
linewidth=3,)
plt.show()
"""# Are More Prizes Shared Than Before?
**Challenge**: Investigate if more prizes are shared than before.
* Calculate the average prize share of the winners on a year by year basis.
* Calculate the 5 year rolling average of the percentage share.
* Copy-paste the cell from the chart you created above.
* Modify the code to add a secondary axis to your Matplotlib chart.
* Plot the rolling average of the prize share on this chart.
* See if you can invert the secondary y-axis to make the relationship even more clear.
"""
yearly_avg_share = df_data.groupby(by='year').agg({'share_pct': pd.Series.mean})
share_moving_average = yearly_avg_share.rolling(window=5).mean()
plt.figure(figsize=(16,8), dpi=200)
plt.title('Number of Nobel Prizes Awarded per Year', fontsize=18)
plt.yticks(fontsize=14)
plt.xticks(ticks=np.arange(1900, 2021, step=5),
fontsize=14,
rotation=45)
ax1 = plt.gca()
ax2 = ax1.twinx()
ax1.set_xlim(1900, 2020)
# Can invert axis
ax2.invert_yaxis()
ax1.scatter(x=prize_per_year.index,
y=prize_per_year.values,
c='dodgerblue',
alpha=0.7,
s=100,)
ax1.plot(prize_per_year.index,
moving_average.values,
c='crimson',
linewidth=3,)
ax2.plot(prize_per_year.index,
share_moving_average.values,
c='grey',
linewidth=3,)
plt.show()
"""# The Countries with the Most Nobel Prizes
**Challenge**:
* Create a Pandas DataFrame called `top20_countries` that has the two columns. The `prize` column should contain the total number of prizes won.
<img src=https://i.imgur.com/6HM8rfB.png width=350>
* Is it best to use `birth_country`, `birth_country_current` or `organization_country`?
* What are some potential problems when using `birth_country` or any of the others? Which column is the least problematic?
* Then use plotly to create a horizontal bar chart showing the number of prizes won by each country. Here's what you're after:
<img src=https://i.imgur.com/agcJdRS.png width=750>
* What is the ranking for the top 20 countries in terms of the number of prizes?
"""
top_countries = df_data.groupby(['birth_country_current'],
as_index=False).agg({'prize': pd.Series.count})
top_countries.sort_values(by='prize', inplace=True)
top20_countries = top_countries[-20:]
h_bar = px.bar(x=top20_countries.prize,
y=top20_countries.birth_country_current,
orientation='h',
color=top20_countries.prize,
color_continuous_scale='Viridis',
title='Top 20 Countries by Number of Prizes')
h_bar.update_layout(xaxis_title='Number of Prizes',
yaxis_title='Country',
coloraxis_showscale=False)
h_bar.show()
"""# Use a Choropleth Map to Show the Number of Prizes Won by Country
* Create this choropleth map using [the plotly documentation](https://plotly.com/python/choropleth-maps/):
<img src=https://i.imgur.com/s4lqYZH.png>
* Experiment with [plotly's available colours](https://plotly.com/python/builtin-colorscales/). I quite like the sequential colour `matter` on this map.
Hint: You'll need to use a 3 letter country code for each country.
"""
df_countries = df_data.groupby(['birth_country_current', 'ISO'],
as_index=False).agg({'prize': pd.Series.count})
df_countries.sort_values('prize', ascending=False)
world_map = px.choropleth(df_countries,
locations='ISO',
color='prize',
hover_name='birth_country_current',
color_continuous_scale=px.colors.sequential.matter)
world_map.update_layout(coloraxis_showscale=True,)
world_map.show()
"""# In Which Categories are the Different Countries Winning Prizes?
**Challenge**: See if you can divide up the plotly bar chart you created above to show the which categories made up the total number of prizes. Here's what you're aiming for:
<img src=https://i.imgur.com/iGaIKCL.png>
* In which category are Germany and Japan the weakest compared to the United States?
* In which category does Germany have more prizes than the UK?
* In which categories does France have more prizes than Germany?
* Which category makes up most of Australia's nobel prizes?
* Which category makes up half of the prizes in the Netherlands?
* Does the United States have more prizes in Economics than all of France? What about in Physics or Medicine?
The hard part is preparing the data for this chart!
*Hint*: Take a two-step approach. The first step is grouping the data by country and category. Then you can create a DataFrame that looks something like this:
<img src=https://i.imgur.com/VKjzKa1.png width=450>
"""
cat_country = df_data.groupby(['birth_country_current', 'category'],
as_index=False).agg({'prize': pd.Series.count})
cat_country.sort_values(by='prize', ascending=False, inplace=True)
merged_df = pd.merge(cat_country, top20_countries, on='birth_country_current')
# change column names
merged_df.columns = ['birth_country_current', 'category', 'cat_prize', 'total_prize']
merged_df.sort_values(by='total_prize', inplace=True)
cat_cntry_bar = px.bar(x=merged_df.cat_prize,
y=merged_df.birth_country_current,
color=merged_df.category,
orientation='h',
title='Top 20 Countries by Number of Prizes and Category')
cat_cntry_bar.update_layout(xaxis_title='Number of Prizes',
yaxis_title='Country')
cat_cntry_bar.show()
"""### Number of Prizes Won by Each Country Over Time
* When did the United States eclipse every other country in terms of the number of prizes won?
* Which country or countries were leading previously?
* Calculate the cumulative number of prizes won by each country in every year. Again, use the `birth_country_current` of the winner to calculate this.
* Create a [plotly line chart](https://plotly.com/python/line-charts/) where each country is a coloured line.
"""
prize_by_year = df_data.groupby(by=['birth_country_current', 'year'], as_index=False).count()
prize_by_year = prize_by_year.sort_values('year')[['year', 'birth_country_current', 'prize']]
cumulative_prizes = prize_by_year.groupby(by=['birth_country_current',
'year']).sum().groupby(level=[0]).cumsum()
cumulative_prizes.reset_index(inplace=True)
l_chart = px.line(cumulative_prizes,
x='year',
y='prize',
color='birth_country_current',
hover_name='birth_country_current')
l_chart.update_layout(xaxis_title='Year',
yaxis_title='Number of Prizes')
l_chart.show()
"""# What are the Top Research Organisations?
**Challenge**: Create a bar chart showing the organisations affiliated with the Nobel laureates. It should looks something like this:
<img src=https://i.imgur.com/zZihj2p.png width=600>
* Which organisations make up the top 20?
* How many Nobel prize winners are affiliated with the University of Chicago and Harvard University?
"""
top20_orgs = df_data.organization_name.value_counts()[:20]
top20_orgs.sort_values(ascending=True, inplace=True)
org_bar = px.bar(x = top20_orgs.values,
y = top20_orgs.index,
orientation='h',
color=top20_orgs.values,
color_continuous_scale=px.colors.sequential.haline,
title='Top 20 Research Institutions by Number of Prizes')
org_bar.update_layout(xaxis_title='Number of Prizes',
yaxis_title='Institution',
coloraxis_showscale=False)
org_bar.show()
"""# Which Cities Make the Most Discoveries?
Where do major discoveries take place?
**Challenge**:
* Create another plotly bar chart graphing the top 20 organisation cities of the research institutions associated with a Nobel laureate.
* Where is the number one hotspot for discoveries in the world?
* Which city in Europe has had the most discoveries?
"""
top20_org_cities = df_data.organization_city.value_counts()[:20]
top20_org_cities.sort_values(ascending=True, inplace=True)
city_bar2 = px.bar(x = top20_org_cities.values,
y = top20_org_cities.index,
orientation='h',
color=top20_org_cities.values,
color_continuous_scale=px.colors.sequential.Plasma,
title='Which Cities Do the Most Research?')
city_bar2.update_layout(xaxis_title='Number of Prizes',
yaxis_title='City',
coloraxis_showscale=False)
city_bar2.show()
"""# Where are Nobel Laureates Born? Chart the Laureate Birth Cities
**Challenge**:
* Create a plotly bar chart graphing the top 20 birth cities of Nobel laureates.
* Use a named colour scale called `Plasma` for the chart.
* What percentage of the United States prizes came from Nobel laureates born in New York?
* How many Nobel laureates were born in London, Paris and Vienna?
* Out of the top 5 cities, how many are in the United States?
"""
top20_cities = df_data.birth_city.value_counts()[:20]
top20_cities.sort_values(ascending=True, inplace=True)
city_bar = px.bar(x=top20_cities.values,
y=top20_cities.index,
orientation='h',
color=top20_cities.values,
color_continuous_scale=px.colors.sequential.Plasma,
title='Where were the Nobel Laureates Born?')
city_bar.update_layout(xaxis_title='Number of Prizes',
yaxis_title='City of Birth',
coloraxis_showscale=False)
city_bar.show()
"""# Plotly Sunburst Chart: Combine Country, City, and Organisation
**Challenge**:
* Create a DataFrame that groups the number of prizes by organisation.
* Then use the [plotly documentation to create a sunburst chart](https://plotly.com/python/sunburst-charts/)
* Click around in your chart, what do you notice about Germany and France?
Here's what you're aiming for:
<img src=https://i.imgur.com/cemX4m5.png width=300>
"""
country_city_org = df_data.groupby(by=['organization_country',
'organization_city',
'organization_name'], as_index=False).agg({'prize': pd.Series.count})
country_city_org = country_city_org.sort_values('prize', ascending=False)
burst = px.sunburst(country_city_org,
path=['organization_country', 'organization_city', 'organization_name'],
values='prize',
title='Where do Discoveries Take Place?')
burst.update_layout(xaxis_title='Number of Prizes',
yaxis_title='City',
coloraxis_showscale=False)
burst.show()
"""# Patterns in the Laureate Age at the Time of the Award
How Old Are the Laureates When the Win the Prize?
**Challenge**: Calculate the age of the laureate in the year of the ceremony and add this as a column called `winning_age` to the `df_data` DataFrame. Hint: you can use [this](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.html) to help you.
"""
birth_years = df_data.birth_date.dt.year
df_data['winning_age'] = df_data.year - birth_years
"""### Who were the oldest and youngest winners?
**Challenge**:
* What are the names of the youngest and oldest Nobel laureate?
* What did they win the prize for?
* What is the average age of a winner?
* 75% of laureates are younger than what age when they receive the prize?
* Use Seaborn to [create histogram](https://seaborn.pydata.org/generated/seaborn.histplot.html) to visualise the distribution of laureate age at the time of winning. Experiment with the number of `bins` to see how the visualisation changes.
"""
display(df_data.nlargest(n=1, columns='winning_age'))
display(df_data.nsmallest(n=1, columns='winning_age'))
"""### Descriptive Statistics for the Laureate Age at Time of Award
* Calculate the descriptive statistics for the age at the time of the award.
* Then visualise the distribution in the form of a histogram using [Seaborn's .histplot() function](https://seaborn.pydata.org/generated/seaborn.histplot.html).
* Experiment with the `bin` size. Try 10, 20, 30, and 50.
"""
plt.figure(figsize=(8, 4), dpi=200)
sns.histplot(data=df_data,
x=df_data.winning_age,
bins=30)
plt.xlabel('Age')
plt.title('Distribution of Age on Receipt of Prize')
plt.show()
"""### Age at Time of Award throughout History
Are Nobel laureates being nominated later in life than before? Have the ages of laureates at the time of the award increased or decreased over time?
**Challenge**
* Use Seaborn to [create a .regplot](https://seaborn.pydata.org/generated/seaborn.regplot.html?highlight=regplot#seaborn.regplot) with a trendline.
* Set the `lowess` parameter to `True` to show a moving average of the linear fit.
* According to the best fit line, how old were Nobel laureates in the years 1900-1940 when they were awarded the prize?
* According to the best fit line, what age would it predict for a Nobel laureate in 2020?
"""
plt.figure(figsize=(8,4), dpi=200)
with sns.axes_style("whitegrid"):
sns.regplot(data=df_data,
x='year',
y='winning_age',
lowess=True,
scatter_kws = {'alpha': 0.4},
line_kws={'color': 'black'})
plt.show()
"""### Winning Age Across the Nobel Prize Categories
How does the age of laureates vary by category?
* Use Seaborn's [`.boxplot()`](https://seaborn.pydata.org/generated/seaborn.boxplot.html?highlight=boxplot#seaborn.boxplot) to show how the mean, quartiles, max, and minimum values vary across categories. Which category has the longest "whiskers"?
* In which prize category are the average winners the oldest?
* In which prize category are the average winners the youngest?
"""
plt.figure(figsize=(8,4), dpi=200)
with sns.axes_style("whitegrid"):
sns.boxplot(data=df_data,
x='category',
y='winning_age')
plt.show()
"""**Challenge**
* Now use Seaborn's [`.lmplot()`](https://seaborn.pydata.org/generated/seaborn.lmplot.html?highlight=lmplot#seaborn.lmplot) and the `row` parameter to create 6 separate charts for each prize category. Again set `lowess` to `True`.
* What are the winning age trends in each category?
* Which category has the age trending up and which category has the age trending down?
* Is this `.lmplot()` telling a different story from the `.boxplot()`?
* Create another chart with Seaborn. This time use `.lmplot()` to put all 6 categories on the same chart using the `hue` parameter.
"""
with sns.axes_style('whitegrid'):
sns.lmplot(data=df_data,
x='year',
y='winning_age',
row = 'category',
lowess=True,
aspect=2,
scatter_kws = {'alpha': 0.6},
line_kws = {'color': 'black'},)
plt.show()
with sns.axes_style("whitegrid"):
sns.lmplot(data=df_data,
x='year',
y='winning_age',
hue='category',
lowess=True,
aspect=2,
scatter_kws={'alpha': 0.5},
line_kws={'linewidth': 5})
plt.show() | [
"pandas.read_csv",
"seaborn.histplot",
"pandas.to_datetime",
"numpy.arange",
"plotly.express.pie",
"seaborn.regplot",
"plotly.express.sunburst",
"matplotlib.pyplot.xlabel",
"plotly.express.choropleth",
"plotly.express.line",
"matplotlib.pyplot.yticks",
"seaborn.axes_style",
"matplotlib.pyplo... | [((1575, 1610), 'pandas.read_csv', 'pd.read_csv', (['"""nobel_prize_data.csv"""'], {}), "('nobel_prize_data.csv')\n", (1586, 1610), True, 'import pandas as pd\n'), ((3300, 3334), 'pandas.to_datetime', 'pd.to_datetime', (['df_data.birth_date'], {}), '(df_data.birth_date)\n', (3314, 3334), True, 'import pandas as pd\n'), ((3477, 3511), 'pandas.to_numeric', 'pd.to_numeric', (['separated_values[0]'], {}), '(separated_values[0])\n', (3490, 3511), True, 'import pandas as pd\n'), ((3526, 3560), 'pandas.to_numeric', 'pd.to_numeric', (['separated_values[1]'], {}), '(separated_values[1])\n', (3539, 3560), True, 'import pandas as pd\n'), ((3961, 4094), 'plotly.express.pie', 'px.pie', ([], {'labels': 'biology.index', 'values': 'biology.values', 'title': '"""Percentage of Male vs Female winners"""', 'names': 'biology.index', 'hole': '(0.4)'}), "(labels=biology.index, values=biology.values, title=\n 'Percentage of Male vs Female winners', names=biology.index, hole=0.4)\n", (3967, 4094), True, 'import plotly.express as px\n'), ((5299, 5489), 'plotly.express.bar', 'px.bar', ([], {'x': 'prizes_per_category.index', 'y': 'prizes_per_category.values', 'color': 'prizes_per_category.values', 'color_continuous_scale': '"""Aggrnyl"""', 'title': '"""Number of Prizes Awarded per Category"""'}), "(x=prizes_per_category.index, y=prizes_per_category.values, color=\n prizes_per_category.values, color_continuous_scale='Aggrnyl', title=\n 'Number of Prizes Awarded per Category')\n", (5305, 5489), True, 'import plotly.express as px\n'), ((6600, 6755), 'plotly.express.bar', 'px.bar', ([], {'x': 'cat_men_women.category', 'y': 'cat_men_women.prize', 'color': 'cat_men_women.sex', 'title': '"""Number of Prizes Awarded per Category split by Men and Women"""'}), "(x=cat_men_women.category, y=cat_men_women.prize, color=cat_men_women\n .sex, title='Number of Prizes Awarded per Category split by Men and Women')\n", (6606, 6755), True, 'import plotly.express as px\n'), ((8047, 8083), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)', 'dpi': '(200)'}), '(figsize=(16, 8), dpi=200)\n', (8057, 8083), True, 'import matplotlib.pyplot as plt\n'), ((8083, 8148), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of Nobel Prizes Awarded per Year"""'], {'fontsize': '(18)'}), "('Number of Nobel Prizes Awarded per Year', fontsize=18)\n", (8092, 8148), True, 'import matplotlib.pyplot as plt\n'), ((8149, 8172), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (8159, 8172), True, 'import matplotlib.pyplot as plt\n'), ((8278, 8287), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8285, 8287), True, 'import matplotlib.pyplot as plt\n'), ((8585, 8595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8593, 8595), True, 'import matplotlib.pyplot as plt\n'), ((9273, 9309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 8)', 'dpi': '(200)'}), '(figsize=(16, 8), dpi=200)\n', (9283, 9309), True, 'import matplotlib.pyplot as plt\n'), ((9309, 9374), 'matplotlib.pyplot.title', 'plt.title', (['"""Number of Nobel Prizes Awarded per Year"""'], {'fontsize': '(18)'}), "('Number of Nobel Prizes Awarded per Year', fontsize=18)\n", (9318, 9374), True, 'import matplotlib.pyplot as plt\n'), ((9375, 9398), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (9385, 9398), True, 'import matplotlib.pyplot as plt\n'), ((9505, 9514), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9512, 9514), True, 'import matplotlib.pyplot as plt\n'), ((9966, 9976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9974, 9976), True, 'import matplotlib.pyplot as plt\n'), ((10964, 11171), 'plotly.express.bar', 'px.bar', ([], {'x': 'top20_countries.prize', 'y': 'top20_countries.birth_country_current', 'orientation': '"""h"""', 'color': 'top20_countries.prize', 'color_continuous_scale': '"""Viridis"""', 'title': '"""Top 20 Countries by Number of Prizes"""'}), "(x=top20_countries.prize, y=top20_countries.birth_country_current,\n orientation='h', color=top20_countries.prize, color_continuous_scale=\n 'Viridis', title='Top 20 Countries by Number of Prizes')\n", (10970, 11171), True, 'import plotly.express as px\n'), ((12057, 12214), 'plotly.express.choropleth', 'px.choropleth', (['df_countries'], {'locations': '"""ISO"""', 'color': '"""prize"""', 'hover_name': '"""birth_country_current"""', 'color_continuous_scale': 'px.colors.sequential.matter'}), "(df_countries, locations='ISO', color='prize', hover_name=\n 'birth_country_current', color_continuous_scale=px.colors.sequential.matter\n )\n", (12070, 12214), True, 'import plotly.express as px\n'), ((13626, 13692), 'pandas.merge', 'pd.merge', (['cat_country', 'top20_countries'], {'on': '"""birth_country_current"""'}), "(cat_country, top20_countries, on='birth_country_current')\n", (13634, 13692), True, 'import pandas as pd\n'), ((13873, 14049), 'plotly.express.bar', 'px.bar', ([], {'x': 'merged_df.cat_prize', 'y': 'merged_df.birth_country_current', 'color': 'merged_df.category', 'orientation': '"""h"""', 'title': '"""Top 20 Countries by Number of Prizes and Category"""'}), "(x=merged_df.cat_prize, y=merged_df.birth_country_current, color=\n merged_df.category, orientation='h', title=\n 'Top 20 Countries by Number of Prizes and Category')\n", (13879, 14049), True, 'import plotly.express as px\n'), ((15145, 15264), 'plotly.express.line', 'px.line', (['cumulative_prizes'], {'x': '"""year"""', 'y': '"""prize"""', 'color': '"""birth_country_current"""', 'hover_name': '"""birth_country_current"""'}), "(cumulative_prizes, x='year', y='prize', color=\n 'birth_country_current', hover_name='birth_country_current')\n", (15152, 15264), True, 'import plotly.express as px\n'), ((15955, 16163), 'plotly.express.bar', 'px.bar', ([], {'x': 'top20_orgs.values', 'y': 'top20_orgs.index', 'orientation': '"""h"""', 'color': 'top20_orgs.values', 'color_continuous_scale': 'px.colors.sequential.haline', 'title': '"""Top 20 Research Institutions by Number of Prizes"""'}), "(x=top20_orgs.values, y=top20_orgs.index, orientation='h', color=\n top20_orgs.values, color_continuous_scale=px.colors.sequential.haline,\n title='Top 20 Research Institutions by Number of Prizes')\n", (15961, 16163), True, 'import plotly.express as px\n'), ((16916, 17128), 'plotly.express.bar', 'px.bar', ([], {'x': 'top20_org_cities.values', 'y': 'top20_org_cities.index', 'orientation': '"""h"""', 'color': 'top20_org_cities.values', 'color_continuous_scale': 'px.colors.sequential.Plasma', 'title': '"""Which Cities Do the Most Research?"""'}), "(x=top20_org_cities.values, y=top20_org_cities.index, orientation='h',\n color=top20_org_cities.values, color_continuous_scale=px.colors.\n sequential.Plasma, title='Which Cities Do the Most Research?')\n", (16922, 17128), True, 'import plotly.express as px\n'), ((17957, 18159), 'plotly.express.bar', 'px.bar', ([], {'x': 'top20_cities.values', 'y': 'top20_cities.index', 'orientation': '"""h"""', 'color': 'top20_cities.values', 'color_continuous_scale': 'px.colors.sequential.Plasma', 'title': '"""Where were the Nobel Laureates Born?"""'}), "(x=top20_cities.values, y=top20_cities.index, orientation='h', color=\n top20_cities.values, color_continuous_scale=px.colors.sequential.Plasma,\n title='Where were the Nobel Laureates Born?')\n", (17963, 18159), True, 'import plotly.express as px\n'), ((19173, 19342), 'plotly.express.sunburst', 'px.sunburst', (['country_city_org'], {'path': "['organization_country', 'organization_city', 'organization_name']", 'values': '"""prize"""', 'title': '"""Where do Discoveries Take Place?"""'}), "(country_city_org, path=['organization_country',\n 'organization_city', 'organization_name'], values='prize', title=\n 'Where do Discoveries Take Place?')\n", (19184, 19342), True, 'import plotly.express as px\n'), ((21046, 21081), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)', 'dpi': '(200)'}), '(figsize=(8, 4), dpi=200)\n', (21056, 21081), True, 'import matplotlib.pyplot as plt\n'), ((21082, 21140), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'df_data', 'x': 'df_data.winning_age', 'bins': '(30)'}), '(data=df_data, x=df_data.winning_age, bins=30)\n', (21094, 21140), True, 'import seaborn as sns\n'), ((21167, 21184), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Age"""'], {}), "('Age')\n", (21177, 21184), True, 'import matplotlib.pyplot as plt\n'), ((21185, 21237), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of Age on Receipt of Prize"""'], {}), "('Distribution of Age on Receipt of Prize')\n", (21194, 21237), True, 'import matplotlib.pyplot as plt\n'), ((21238, 21248), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21246, 21248), True, 'import matplotlib.pyplot as plt\n'), ((21910, 21945), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)', 'dpi': '(200)'}), '(figsize=(8, 4), dpi=200)\n', (21920, 21945), True, 'import matplotlib.pyplot as plt\n'), ((22191, 22201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22199, 22201), True, 'import matplotlib.pyplot as plt\n'), ((22687, 22722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)', 'dpi': '(200)'}), '(figsize=(8, 4), dpi=200)\n', (22697, 22722), True, 'import matplotlib.pyplot as plt\n'), ((22851, 22861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22859, 22861), True, 'import matplotlib.pyplot as plt\n'), ((23764, 23774), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23772, 23774), True, 'import matplotlib.pyplot as plt\n'), ((24068, 24078), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24076, 24078), True, 'import matplotlib.pyplot as plt\n'), ((21950, 21977), 'seaborn.axes_style', 'sns.axes_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (21964, 21977), True, 'import seaborn as sns\n'), ((21983, 22109), 'seaborn.regplot', 'sns.regplot', ([], {'data': 'df_data', 'x': '"""year"""', 'y': '"""winning_age"""', 'lowess': '(True)', 'scatter_kws': "{'alpha': 0.4}", 'line_kws': "{'color': 'black'}"}), "(data=df_data, x='year', y='winning_age', lowess=True,\n scatter_kws={'alpha': 0.4}, line_kws={'color': 'black'})\n", (21994, 22109), True, 'import seaborn as sns\n'), ((22727, 22754), 'seaborn.axes_style', 'sns.axes_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (22741, 22754), True, 'import seaborn as sns\n'), ((22760, 22816), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df_data', 'x': '"""category"""', 'y': '"""winning_age"""'}), "(data=df_data, x='category', y='winning_age')\n", (22771, 22816), True, 'import seaborn as sns\n'), ((23467, 23494), 'seaborn.axes_style', 'sns.axes_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (23481, 23494), True, 'import seaborn as sns\n'), ((23500, 23652), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'df_data', 'x': '"""year"""', 'y': '"""winning_age"""', 'row': '"""category"""', 'lowess': '(True)', 'aspect': '(2)', 'scatter_kws': "{'alpha': 0.6}", 'line_kws': "{'color': 'black'}"}), "(data=df_data, x='year', y='winning_age', row='category', lowess=\n True, aspect=2, scatter_kws={'alpha': 0.6}, line_kws={'color': 'black'})\n", (23510, 23652), True, 'import seaborn as sns\n'), ((23781, 23808), 'seaborn.axes_style', 'sns.axes_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (23795, 23808), True, 'import seaborn as sns\n'), ((23814, 23964), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'df_data', 'x': '"""year"""', 'y': '"""winning_age"""', 'hue': '"""category"""', 'lowess': '(True)', 'aspect': '(2)', 'scatter_kws': "{'alpha': 0.5}", 'line_kws': "{'linewidth': 5}"}), "(data=df_data, x='year', y='winning_age', hue='category', lowess=\n True, aspect=2, scatter_kws={'alpha': 0.5}, line_kws={'linewidth': 5})\n", (23824, 23964), True, 'import seaborn as sns\n'), ((8190, 8219), 'numpy.arange', 'np.arange', (['(1900)', '(2021)'], {'step': '(5)'}), '(1900, 2021, step=5)\n', (8199, 8219), True, 'import numpy as np\n'), ((9416, 9445), 'numpy.arange', 'np.arange', (['(1900)', '(2021)'], {'step': '(5)'}), '(1900, 2021, step=5)\n', (9425, 9445), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import meshio
import pygmsh
import numpy as np
import copy
import glob
from collections import Counter
import os
import sys
import json
import shutil
import scipy.optimize as opt
from EnergyMinimization import *
# which line of input file defines me?
line=int(sys.argv[1])
# read in arguments from file
reader=open("Parameters.txt","r")
parameters=reader.readlines()[line].split()
# Target mesh size:
target_a = 0.2
# continuum bending modulus: READ IN FROM COMMAND LINE
kc=float(parameters[0])
# continuum shear modulus:
mu=1
# Energetic penalty for volume change , READ IN FROM COMMAND LINE
B=100000
# The Material Nonlinearity parameter, between 0 and 1. READ IN FROM COMMAND LINE
MatNon=float(parameters[1])
# the spring prestress values
#g0coarse=np.arange(1,1.9,0.1)
#g0fine=np.arange(1.81,2.4,0.005)
#g0range=np.concatenate((g0coarse,g0fine))
g0range=np.arange(0,-1,-0.1)
# The microscopic values
kbend=kc/target_a
khook = mu
theta0=0
# root folder for data
#DataFolder='/mnt/jacb23-XDrive/Physics/ResearchProjects/ASouslov/RC-PH1229/ActiveElastocapillarity/2020-10-23-EnergyMinimization/'+"kc_"+"{0:0.1f}".format(kc)+"_alpha_"+"{0:0.2f}".format(MatNon)+"/"
DataFolder="/home/jackbinysh/Code/ActiveElastocapillarity/Python/EnergyMinimization/Data/Scratch/"
# Name of the current file
ScriptName="EnergyMinimizationScript3D.py"
# Name of the file of functions used for this run
FunctionFileName="EnergyMinimization.py"
try:
os.mkdir(DataFolder)
except OSError:
print ("Creation of the directory %s failed" % DataFolder)
else:
print ("Successfully created the directory %s " % DataFolder)
# try and clear out the folder of vtk files and log files, if there was a previous run in it
for filename in glob.glob(DataFolder+'*.vtk')+glob.glob(DataFolder+'*.log'):
file_path = os.path.join(DataFolder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
#Dump all the parameters to a file in the run folder
f=open(DataFolder+"Parameters.log","w+")
datadict= {
"a":target_a,
"kc":kc,
"B":B,
"mu":mu,
"alpha": MatNon
}
json.dump(datadict,f)
f.close()
# Dump an exact copy of this code into the data file
shutil.copyfile(ScriptName,DataFolder+ScriptName)
shutil.copyfile(FunctionFileName,DataFolder+FunctionFileName)
# Read in the Mesh
#InputMesh=meshio.read("InputMesh.vtk")
# Make the Mesh
with pygmsh.occ.Geometry() as geom:
geom.characteristic_length_max = target_a
ellipsoid = geom.add_ball([0.0, 0.0, 0.0], 1)
InputMesh = geom.generate_mesh()
#Make the bond lists, make the oriented boundary triangles list, make the mapping from bonds to boundary triangles
interiorbonds,edgebonds,boundarytris, bidxTotidx, tetras= MakeMeshData3D(InputMesh)
bonds=np.concatenate((interiorbonds,edgebonds))
orientedboundarytris=OrientTriangles(InputMesh.points,boundarytris,np.array([0,0,0]))
boundarytris=orientedboundarytris
# Write a copy of the input Mesh, for visualisation
cells=[ ("line", bonds ), ("triangle",boundarytris ), ("tetra",tetras)]
isbond= np.ones(len(bonds))
isedgebond= np.concatenate( ( np.zeros(len(interiorbonds)),np.ones(len(edgebonds)) ) )
CellDataDict={'isedgebond':[isedgebond,np.zeros(len(boundarytris)),np.zeros(len(tetras))]
,'isbond':[isbond,np.zeros(len(boundarytris)),np.zeros(len(tetras))]}
OutputMesh=meshio.Mesh(InputMesh.points, cells, {},CellDataDict)
OutputMesh.write(DataFolder+"InitialMesh.vtk",binary=True)
### ENERGY MINIMIIZATION ###
# make the preferred rest lengths of the interior springs
interiorpairs=InputMesh.points[interiorbonds]
interiorvecs = np.subtract(interiorpairs[:,0,:],interiorpairs[:,1,:])
InteriorBondRestLengths=np.linalg.norm(interiorvecs,axis=1)
# make the preferred rest lengths of the edge springs. Initially have the at g0=1, but then
#update them in the loop
edgepairs=InputMesh.points[edgebonds]
edgevecs = np.subtract(edgepairs[:,0,:],edgepairs[:,1,:])
InitialEdgeBondRestLengths=np.linalg.norm(edgevecs,axis=1)
# The volume constraint is simply that the target volume should be the initial volume
TargetVolumes=Volume3D_tetras(InputMesh.points,tetras)
# initial input points. Pout changes over time
Pout_ij =InputMesh.points
# this is some crazy stuff to make sure that numba understands the types of these guys,
# apparently it cant work it out
Pout_ij=np.zeros((InputMesh.points.shape[0],InputMesh.points.shape[1]), dtype=np.float64)
for i in range(InputMesh.points.shape[0]):
for j in range(InputMesh.points.shape[1]):
Pout_ij[i,j]=InputMesh.points[i,j]
newtetras=np.zeros((tetras.shape[0],tetras.shape[1]), dtype=np.int32)
for i in range(tetras.shape[0]):
for j in range(tetras.shape[1]):
newtetras[i,j]=tetras[i,j]
tetras=newtetras
for g0 in g0range:
print("Current g0"+"{0:0.4f}".format(g0))
rinterior0_ij=InteriorBondRestLengths
# the important bit! Giving it the prestress
#EdgeBondRestLengths= g0*InitialEdgeBondRestLengths
#r0_ij=np.concatenate((InteriorBondRestLengths,EdgeBondRestLengths))
# Make the vector of material Nonlinearity values. Here we trial alpha in the middle, 0 on the surface
#MatNonvec = np.concatenate((np.repeat(MatNon,len(interiorbonds)), np.repeat(0,len(edgebonds))))
#def Numbaenergy3D(P,InteriorBonds,SurfaceBonds,orientedboundarytris,bidxTotidx,tetras,rinterior0_ij,khook,kbend,gamma,theta0,B,MatNon,TargetVolumes):
Pout_ij = opt.minimize(Numbaenergy3D, Pout_ij.ravel()
,options={'gtol':1e-02,'disp': True}
,args=(interiorbonds
,edgebonds
,orientedboundarytris
,bidxTotidx
,tetras
,rinterior0_ij
,khook
,kbend
,g0
,theta0
,B
,MatNon
,TargetVolumes)
).x.reshape((-1, 3))
Name="g0_"+"{0:0.4f}".format(g0)+".vtk"
Output3D(Name
,DataFolder
,OutputMesh
,Pout_ij
,bonds
,orientedboundarytris
,bidxTotidx
,tetras
,r0_ij
,khook
,kbend
,theta0
,B
,MatNonvec
,TargetVolumes
,g0)
| [
"shutil.rmtree",
"numpy.arange",
"os.path.join",
"numpy.subtract",
"meshio.Mesh",
"os.path.isfile",
"numpy.array",
"shutil.copyfile",
"numpy.zeros",
"glob.glob",
"os.mkdir",
"numpy.concatenate",
"numpy.linalg.norm",
"pygmsh.occ.Geometry",
"os.unlink",
"os.path.islink",
"os.path.isdir... | [((900, 922), 'numpy.arange', 'np.arange', (['(0)', '(-1)', '(-0.1)'], {}), '(0, -1, -0.1)\n', (909, 922), True, 'import numpy as np\n'), ((2387, 2409), 'json.dump', 'json.dump', (['datadict', 'f'], {}), '(datadict, f)\n', (2396, 2409), False, 'import json\n'), ((2473, 2525), 'shutil.copyfile', 'shutil.copyfile', (['ScriptName', '(DataFolder + ScriptName)'], {}), '(ScriptName, DataFolder + ScriptName)\n', (2488, 2525), False, 'import shutil\n'), ((2523, 2587), 'shutil.copyfile', 'shutil.copyfile', (['FunctionFileName', '(DataFolder + FunctionFileName)'], {}), '(FunctionFileName, DataFolder + FunctionFileName)\n', (2538, 2587), False, 'import shutil\n'), ((3037, 3079), 'numpy.concatenate', 'np.concatenate', (['(interiorbonds, edgebonds)'], {}), '((interiorbonds, edgebonds))\n', (3051, 3079), True, 'import numpy as np\n'), ((3626, 3680), 'meshio.Mesh', 'meshio.Mesh', (['InputMesh.points', 'cells', '{}', 'CellDataDict'], {}), '(InputMesh.points, cells, {}, CellDataDict)\n', (3637, 3680), False, 'import meshio\n'), ((3889, 3948), 'numpy.subtract', 'np.subtract', (['interiorpairs[:, 0, :]', 'interiorpairs[:, 1, :]'], {}), '(interiorpairs[:, 0, :], interiorpairs[:, 1, :])\n', (3900, 3948), True, 'import numpy as np\n'), ((3968, 4004), 'numpy.linalg.norm', 'np.linalg.norm', (['interiorvecs'], {'axis': '(1)'}), '(interiorvecs, axis=1)\n', (3982, 4004), True, 'import numpy as np\n'), ((4171, 4222), 'numpy.subtract', 'np.subtract', (['edgepairs[:, 0, :]', 'edgepairs[:, 1, :]'], {}), '(edgepairs[:, 0, :], edgepairs[:, 1, :])\n', (4182, 4222), True, 'import numpy as np\n'), ((4245, 4277), 'numpy.linalg.norm', 'np.linalg.norm', (['edgevecs'], {'axis': '(1)'}), '(edgevecs, axis=1)\n', (4259, 4277), True, 'import numpy as np\n'), ((4626, 4713), 'numpy.zeros', 'np.zeros', (['(InputMesh.points.shape[0], InputMesh.points.shape[1])'], {'dtype': 'np.float64'}), '((InputMesh.points.shape[0], InputMesh.points.shape[1]), dtype=np.\n float64)\n', (4634, 4713), True, 'import numpy as np\n'), ((4852, 4912), 'numpy.zeros', 'np.zeros', (['(tetras.shape[0], tetras.shape[1])'], {'dtype': 'np.int32'}), '((tetras.shape[0], tetras.shape[1]), dtype=np.int32)\n', (4860, 4912), True, 'import numpy as np\n'), ((1481, 1501), 'os.mkdir', 'os.mkdir', (['DataFolder'], {}), '(DataFolder)\n', (1489, 1501), False, 'import os\n'), ((1767, 1798), 'glob.glob', 'glob.glob', (["(DataFolder + '*.vtk')"], {}), "(DataFolder + '*.vtk')\n", (1776, 1798), False, 'import glob\n'), ((1797, 1828), 'glob.glob', 'glob.glob', (["(DataFolder + '*.log')"], {}), "(DataFolder + '*.log')\n", (1806, 1828), False, 'import glob\n'), ((1844, 1878), 'os.path.join', 'os.path.join', (['DataFolder', 'filename'], {}), '(DataFolder, filename)\n', (1856, 1878), False, 'import os\n'), ((2667, 2688), 'pygmsh.occ.Geometry', 'pygmsh.occ.Geometry', ([], {}), '()\n', (2686, 2688), False, 'import pygmsh\n'), ((3146, 3165), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3154, 3165), True, 'import numpy as np\n'), ((1899, 1924), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1913, 1924), False, 'import os\n'), ((1928, 1953), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (1942, 1953), False, 'import os\n'), ((1967, 1987), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (1976, 1987), False, 'import os\n'), ((2001, 2025), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (2014, 2025), False, 'import os\n'), ((2039, 2063), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (2052, 2063), False, 'import shutil\n')] |
import numpy as np
import torch
import torch.nn as nn
from diffBP.networks.dnbp_hand import factors
class DNBP(nn.Module):
def __init__(self, graph, edge_set, inc_nghbrs, enc_hidden_feats_tot=32, enc_output_feats_tot=64, mode='low_var', batch_size=3, particle_count=100,
particle_size=2, est_bounds=1, est_bounds_z=0, use_time=True, std=0.1, lambd=0.8, device=torch.device('cpu'), precision='float32'):
super(DNBP, self).__init__()
# graph should be formatted in sorted COO format
self.graph = graph
# edge_set should index each edge single time on first occurence in graph
self.edge_set = edge_set
self.inc_nghbrs = inc_nghbrs
self.mode = mode
self.num_edges = self.graph.shape[1]//2
self.num_nodes = int(self.graph.max())+1
self.batch_size = batch_size
self.particle_count = particle_count
self.particle_size = particle_size
self.est_bounds = est_bounds
self.est_bounds_z = est_bounds_z
self.std = std
self.density_std = std
#self.starting_std = density_std
self.device = device
# Particle Filter Resampling [0,1]
self.lambd = lambd
self.time_step = 0
self.frac_resamp = 0.
# Use learned time sampling factor (True) or Gaussian diffusion (False)
self.use_time = use_time
if precision=="float32":
self.type = torch.float32
else:
self.type = torch.double
#
# FACTOR DEFININTIONS
#
# feature extractor network shared across nodes
self.likelihood_features = factors.LikelihoodFeatures().type(self.type).to(device=self.device)
# likelihood factors to measure unary potential for each node
self.node_likelihoods = nn.ModuleList([factors.UnaryDensity(gross_features=enc_output_feats_tot, in_features=64, particle_size=particle_size).to(device=self.device)
for _ in range(self.num_nodes)]).type(self.type)
self.edge_samplers = nn.ModuleList([factors.PairwiseSampler(particle_size=particle_size,
est_bounds=est_bounds,
device=self.device,
precision=self.type).to(device=self.device)
for _ in range(self.num_edges)]).type(self.type)
print(self.edge_samplers)
# edge density factors to compute compatibility of neighboring particles
self.edge_densities = nn.ModuleList([factors.PairwiseDensity(particle_size=particle_size).to(device=self.device)
for _ in range(self.num_edges)]).type(self.type)
print(self.edge_densities)
# time factors to propagate particles through time
self.time_samplers = nn.ModuleList([factors.PairwiseSampler(particle_size=particle_size,
est_bounds=est_bounds,
device=self.device,
precision=self.type).to(device=self.device)
for _ in range(self.num_nodes)]).type(self.type)
#
# END OF FACTOR DEFINITIONS
#
self.global_features = None
#
# PARTICLE DEFINITIONS
#
# initialize belief and message particles
# use a list for each because the graph may lead to variable sized neighbor dimensions
#self.reinit_particles(self.batch_size)
#
# END OF PARTICLE DEFINITIONS
#
# Helper function used in forward pass to avoid affecting likelihood gradients
# Used only where training the pairwise sampling networks to ensure no 'interference' during training
# likelihood factors' training should depend only on the corresponding node, not its neighbors
def turn_off_lik_grads(self):
for p in self.node_likelihoods.parameters():
p.requires_grad = False
# Helper function used in forward pass to undo the effect of turn_off_lik_grads
# Ensures likelihood factors have gradients updated in backward pass
def turn_on_lik_grads(self):
for p in self.node_likelihoods.parameters():
p.requires_grad = True
def uniform_init(self):
self.belief_particles = [torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
self.particle_size).to(device=self.device).uniform_(-self.est_bounds,
self.est_bounds).type(self.type)
for _ in range(self.num_nodes)]
self.belief_weights = [torch.ones(self.batch_size, len(self.inc_nghbrs[_]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[_]) * self.particle_count)
for _ in range(self.num_nodes)]
self.message_particles = [torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
self.particle_size).to(device=self.device).uniform_(-self.est_bounds,
self.est_bounds).type(self.type)
for _ in range(self.num_nodes)]
self.message_weights = [torch.ones(self.batch_size, len(self.inc_nghbrs[_]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[_]) * self.particle_count)
for _ in range(self.num_nodes)]
self.message_weights_unary = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
self.message_weights_neigh = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
self.belief_weights_lik = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
def gt_init(self, tru):
self.belief_particles = [tru[:,_,:].view(self.batch_size, 1, 1, self.particle_size).to(device=self.device)
+ torch.cat((torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
1).to(device=self.device).uniform_(-self.est_bounds,
self.est_bounds).type(self.type),
torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
1).to(device=self.device).uniform_(-self.est_bounds,
self.est_bounds).type(self.type),
torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
1).to(device=self.device).uniform_(-self.est_bounds_z,
self.est_bounds_z).type(self.type)), 3)
for _ in range(self.num_nodes)]
self.belief_weights = [torch.ones(self.batch_size, len(self.inc_nghbrs[_]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[_]) * self.particle_count)
for _ in range(self.num_nodes)]
self.message_particles = [tru[:,_,:].view(self.batch_size, 1, 1, self.particle_size).to(device=self.device)
+ torch.cat((torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
1).to(device=self.device).uniform_(-self.est_bounds,
self.est_bounds).type(self.type),
torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
1).to(device=self.device).uniform_(-self.est_bounds,
self.est_bounds).type(self.type),
torch.empty(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count,
1).to(device=self.device).uniform_(-self.est_bounds_z,
self.est_bounds_z).type(self.type)), 3)
for _ in range(self.num_nodes)]
self.message_weights = [torch.ones(self.batch_size, len(self.inc_nghbrs[_]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[_]) * self.particle_count)
for _ in range(self.num_nodes)]
self.message_weights_unary = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
self.message_weights_neigh = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
self.belief_weights_lik = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
def depth_init(self, imgs):
mean = -8.728770159651145
std = 45.024769450434384
depths = ((imgs.clone().detach()*std)+mean)/1000
self.belief_particles = []
for node_i in range(self.num_nodes):
sample_inits = []
for batch_i in range(depths.shape[0]):
rand_x = torch.randint(low=0, high=depths.shape[3], size=(len(self.inc_nghbrs[node_i])*self.particle_count,))
rand_y = torch.randint(low=0, high=depths.shape[2], size=(len(self.inc_nghbrs[node_i])*self.particle_count,))
rand_depths = depths[batch_i,0][rand_y,rand_x].reshape(1,len(self.inc_nghbrs[node_i]),self.particle_count,1).type(self.type)
rand_x = (rand_x.reshape(1,len(self.inc_nghbrs[node_i]),self.particle_count,1).type(self.type)-48)/96
rand_y = (rand_y.reshape(1,len(self.inc_nghbrs[node_i]),self.particle_count,1).type(self.type)-48)/96
sample_inits.append(torch.cat((rand_x,rand_y,rand_depths),dim=3))
self.belief_particles.append(torch.cat(sample_inits,dim=0).type(self.type).to(device=self.device))
self.belief_weights = [torch.ones(self.batch_size, len(self.inc_nghbrs[_]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[_]) * self.particle_count)
for _ in range(self.num_nodes)]
self.message_particles = []
for node_i in range(self.num_nodes):
sample_inits = []
for batch_i in range(depths.shape[0]):
rand_x = torch.randint(low=0, high=depths.shape[3], size=(len(self.inc_nghbrs[node_i])*self.particle_count,))
rand_y = torch.randint(low=0, high=depths.shape[2], size=(len(self.inc_nghbrs[node_i])*self.particle_count,))
rand_depths = depths[batch_i,0][rand_y,rand_x].reshape(1,len(self.inc_nghbrs[node_i]),self.particle_count,1).type(self.type)
rand_x = (rand_x.reshape(1,len(self.inc_nghbrs[node_i]),self.particle_count,1).type(self.type)-48)/96
rand_y = (rand_y.reshape(1,len(self.inc_nghbrs[node_i]),self.particle_count,1).type(self.type)-48)/96
sample_inits.append(torch.cat((rand_x,rand_y,rand_depths),dim=3))
self.message_particles.append(torch.cat(sample_inits,dim=0).type(self.type).to(device=self.device))
self.message_weights = [torch.ones(self.batch_size, len(self.inc_nghbrs[_]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[_]) * self.particle_count)
for _ in range(self.num_nodes)]
self.message_weights_unary = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
self.message_weights_neigh = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
self.belief_weights_lik = [torch.ones(self.batch_size, len(self.inc_nghbrs[i]),
self.particle_count).to(device=self.device).type(self.type)
/ (len(self.inc_nghbrs[i]) * self.particle_count)
for i in range(self.num_nodes)]
# Reset particles to uniform
# Particle positions sampled from uniform [-est_bounds, est_bounds]
# Particle weights set to uniform 1/num_particles
def reinit_particles(self, size, imgs):
self.batch_size = size
self.time_step = 0
self.frac_resamp = 0.0
#self.density_std = 2*self.starting_std
self.depth_init(imgs)
def update_time(self):
self.time_step += 1
self.frac_resamp = 1-(self.lambd ** self.time_step)
#self.density_std = max(self.starting_std, 2*self.starting_std*(self.lambd ** self.time_step))
# Perform message update step (resample, then calculate w_unary and w_neigh for all particles)
def message_update(self, glbl_feats, node_id=None, tru=None):
# Be sure that tru data is only past during training to avoid invalid results
assert((self.training and (tru is not None)) or (not self.training and (tru is None)))
# Global features needed for likelihood computation
# detach from computation graph to ensure no gradients go into feature extractor
# reasoning is we want to avoid letting the sampler interfere w/ likelihood training
glbl_feats = glbl_feats.detach()
# turn off likelihood gradients to ensure the node_likelihoods don't update gradients during message update
# be sure to turn back on the gradients for likelihood training
# this should work by ensuring no forward pass registers for likelihood networks, but
# backward can still propagate past likelihood networks and into samplers
self.turn_off_lik_grads()
# Store detached copy of initial messages for w_neigh calculation
old_messages = [old.clone() for old in self.message_particles]
for old in old_messages:
old.requires_grad = False
old_message_weights = [old.clone() for old in self.message_weights]
for old in old_message_weights:
old.requires_grad = False
if node_id is None:
iters = range(self.num_nodes)
else:
iters = [node_id]
# Iterate all destination nodes for message pass
for dst_ in iters:
# Start the estimation process with non-differentiable resampling
# All resampling is done batch-wise
# This is accomplished by using additional memory inplace of additional computation
# For example, we duplicate the cumulative sum of random chance for every particle that shares the sum
# This could certainly be made more efficient (e.g. custom resample kernel to share memory on hardware)
resamp_particles = int(self.frac_resamp * self.particle_count)
if resamp_particles>0:
# Explicitly detach particles that are being sampled from
# batch x pseudo_bel x particle_size
dst_belief_particles = self.belief_particles[dst_].view(self.batch_size, -1, self.particle_size).detach()
dst_belief_weights = self.belief_weights[dst_].view(self.batch_size, -1).detach()
if self.mode=='low_var':
rand_chance = ((torch.arange(resamp_particles) / float(resamp_particles)).repeat(self.batch_size,
len(self.inc_nghbrs[dst_]), 1)
+ (torch.rand(self.batch_size, len(self.inc_nghbrs[dst_]), 1) / float(resamp_particles)))
else:
rand_chance = torch.rand(self.batch_size, len(self.inc_nghbrs[dst_]), resamp_particles)
# batch x incoming x 1 x resamp_particles
rand_chance = rand_chance.unsqueeze(2).type(self.type).to(device=self.device)
# batch x incoming x pseudo_bel x resamp_particles
cum_sum = (dst_belief_weights).cumsum(1).unsqueeze(1).unsqueeze(3).repeat((1, len(self.inc_nghbrs[dst_]),
1, resamp_particles))
# Due to the fact that pytorch argmin/argmax does not guarantee a tiebreak, we currently use
# this inverse-argmax hack which ensures the difference closest to zero(positive side) is selected
# ISSUE: Theres a small chance that the denominator is zero and results in NaN. Hasn't been observed thus far.
# batch x incoming x pseudo_bel x resamp_particles
rand_ind = torch.argmax(1 / (cum_sum - rand_chance), dim=2)
# batch x incoming x resamp_particles
# Duplicate random indices for each of the particle_size dimensions in order to use torch.gather
rand_ind = rand_ind.unsqueeze(-1).repeat(1, 1, 1, self.particle_size)
# batch x incoming x resamp_particles x particle_size
# batch x incoming x pseudo_bel x particle_size
dst_belief_particles = dst_belief_particles.unsqueeze(1).repeat(1, len(self.inc_nghbrs[dst_]), 1, 1)
# batch x incoming x particles x particle_size
resampled_particles = torch.gather(dst_belief_particles, 2, rand_ind)
if self.use_time:
time_delta = self.time_samplers[dst_](self.batch_size * len(self.inc_nghbrs[dst_])
* resamp_particles).view(self.batch_size, len(self.inc_nghbrs[dst_]),
resamp_particles, -1)
else:
time_delta = torch.randn((self.batch_size, len(self.inc_nghbrs[dst_]), resamp_particles,
self.particle_size)).to(device=self.device) * self.std
self.message_particles[dst_][:,:,:resamp_particles] = resampled_particles + time_delta
if (self.particle_count-resamp_particles)>0:
# For remaining particles sample from uniform [-est_bounds, est_bounds]
self.message_particles[dst_][:,:,resamp_particles:] = tru[:,dst_,:].view(self.batch_size, 1, 1, self.particle_size).to(device=self.device) \
+ torch.empty(self.batch_size, len(self.inc_nghbrs[dst_]), self.particle_count-resamp_particles,
self.particle_size).to(device=self.device).uniform_(-self.est_bounds, self.est_bounds).type(self.type)
else:
self.message_particles[dst_] = tru[:,dst_,:].view(self.batch_size, 1, 1, self.particle_size).to(device=self.device) \
+ torch.empty(self.batch_size, len(self.inc_nghbrs[dst_]), self.particle_count,
self.particle_size).to(device=self.device).uniform_(-self.est_bounds, self.est_bounds).type(self.type)
# After resampling, particle weights must be set to uniform
# This step breaks differentiability to past weights
self.message_weights[dst_] = (torch.ones(self.batch_size, len(self.inc_nghbrs[dst_]),
self.particle_count).type(self.type).to(device=self.device)
/ (len(self.inc_nghbrs[dst_]) * self.particle_count))
# To calculate, modify standard PMPNBP by using multiple samples to smooth performance
# Multiple samples decreases weight deviation for particles (good particles more consistently have large weight)
num_int = 10
for src_i, src_ in enumerate(self.inc_nghbrs[dst_]):
# Determine edge index of message pass from src_->dst_
edge_i = ((self.graph == torch.tensor([[min(src_,dst_)],
[max(src_,dst_)]])).all(dim=0).nonzero().squeeze(0)
== self.edge_set).nonzero().squeeze(0)
# Isolate outgoing particles from src_->dst_ for w_unary calculation
# These outgoing particles are in the frame of dst_
msgs = self.message_particles[dst_][:,src_i].contiguous().view(-1,1,self.particle_size)
# Generate delta samples to translate particles from dst_ frame to src_ frame
if src_ < dst_:
s = self.edge_samplers[edge_i](msgs.shape[0]*num_int)
else:
s = -self.edge_samplers[edge_i](msgs.shape[0]*num_int)
# Translate particles into src_ frame using sampled deltas
samples = msgs + s.view(msgs.shape[0],num_int,-1)
# Change view for feeding into network
samples = samples.view(msgs.shape[0]*num_int,self.particle_size)
# Concatenate features with particles and feed into likelihood network of src_
# Remember, samples are now in src_'s frame
# This is the key step that causes us to turn off likelihood gradients; we don't want the src_ likelihood
# factor learning to weight particles based on feedback from dst_ ground truth
#if self.shared_feats:
wgts = self.node_likelihoods[src_](samples, glbl_feats).view(self.batch_size, self.particle_count, num_int)
#wgts = self.node_likelihoods[src_](torch.cat((samples,
# glbl_feats.unsqueeze(1).repeat((1, self.particle_count * num_int,
# 1)).view(self.batch_size
# * self.particle_count
# * num_int, -1)),
# dim=1)).view(self.batch_size, self.particle_count, num_int)
#else:
# wgts = self.node_likelihoods[src_](torch.cat((samples,
# glbl_feats[src_].unsqueeze(1).repeat((1, self.particle_count * num_int,
# 1)).view(self.batch_size
# * self.particle_count
# * num_int, -1)),
# dim=1)).view(self.batch_size, self.particle_count, num_int)
# Average over the num_int samples
wgts = wgts.mean(dim=-1)
# Normalize scores of outgoing particles, these are the w_unary scores
wgts = wgts / wgts.sum(dim=1,keepdim=True)
# Store unary scores for use later
self.message_weights_unary[dst_][:,src_i] = wgts
ign_indx = (torch.tensor(self.inc_nghbrs[src_])==dst_).nonzero().squeeze()
# relevant neighbor message shape: batch x Num_Neighbors x particle_count x particle_size
relv = torch.cat([old_messages[src_][:,:ign_indx],
old_messages[src_][:,(ign_indx+1):]], dim=1)
# print(src_,'->',dst_,relv.shape)
if relv.shape[1]>0:
if self.training:
# relv_weights = ((1/(self.density_std*np.sqrt(2*np.pi)))
# * torch.exp((-1/2) * (((relv-tru[:,src_,:].unsqueeze(1).unsqueeze(1))
# / self.density_std)**2).sum(dim=-1)))
relv_weights = ((1/(np.power(self.std, self.particle_size)*np.power(2*np.pi, self.particle_size/2)))
* torch.exp((-1/2) * (((relv-tru[:,src_,:].unsqueeze(1).unsqueeze(1))
/ self.density_std)**2).sum(dim=-1)))
else:
relv_weights = torch.cat([old_message_weights[src_][:,:ign_indx],
old_message_weights[src_][:,(ign_indx+1):]], dim=1)
# if src_>0 and src_<4:
# print(src_, tru[0,src_,:])
# Perform weighting in delta space (independent of postion)
# Ensure delta direction is same as w_unary to make plotting meaningful
# src_ frame (relv) = dst_ frame (msg_p) + delta (diff)
# delta (diff) = src_ frame (relv) - dst_ frame (msg_p)
# Graph:
# 1->2->3
# Where we are in the loop: Calculate the weights of the msgs from 2 to 3
# outgoing messages: msgs(2->3) i:M
# w_neigh(i) = \prod neighbors [(pairwise(msgs(1->2) - msgs(2->3){i})
# Where we are in the loop: Calculate the weights of the msgs from 1 to 2
# outgoing messages: msgs(1->2) i:M
# w_neigh(i) = sum_{neighbors of node 1 excluding node 2(destinate)}
if self.training:
if src_<dst_:
diff = tru[:,src_,:].unsqueeze(1).unsqueeze(1).unsqueeze(1) - self.message_particles[dst_][:,src_i].unsqueeze(2).unsqueeze(2)
else:
diff = self.message_particles[dst_][:,src_i].unsqueeze(2).unsqueeze(2) - tru[:,src_,:].unsqueeze(1).unsqueeze(1).unsqueeze(1)
relv_weights = 1
else:
if src_<dst_:
diff = relv.unsqueeze(1) - self.message_particles[dst_][:,src_i].unsqueeze(2).unsqueeze(2)
else:
diff = self.message_particles[dst_][:,src_i].unsqueeze(2).unsqueeze(2) - relv.unsqueeze(1)
relv_weights = relv_weights.unsqueeze(1)
# print(relv.unsqueeze(1).shape, self.message_particles[dst_][:,src_i].unsqueeze(2).unsqueeze(2).shape)
# print(diff.shape)
num_p_dst = diff.shape[1]
num_n = diff.shape[2]
num_p_src = diff.shape[3]
# reshape delta for feeding into network
diff = diff.view(self.batch_size*num_p_dst*num_n*num_p_src,self.particle_size)
dens = self.edge_densities[edge_i](diff)
dens = dens.view(self.batch_size, num_p_dst, num_n, num_p_src)
# print(dens.shape,relv_weights.unsqueeze(1).shape)
# Scale weights by neighbor scores
dens = dens * relv_weights
# In future, replace squeeze with product operation. Needed for graphs with more neighbors
dens = dens.sum(dim=-1)
# print(dens.shape)
dens = torch.prod(dens, dim=2)
dens = dens / dens.sum(dim=1, keepdim=True)
# print(dens.shape)
# dens = relv_weights = ((1/(self.density_std*np.sqrt(2*np.pi)))
# * torch.exp((-1/2) * (((self.message_particles[dst_][:,src_i].unsqueeze(1)-tru[:,src_,:].unsqueeze(1).unsqueeze(1))
# / self.density_std)**2).sum(dim=-1))).squeeze()
else:
dens = torch.ones_like(self.message_weights_neigh[dst_][:,src_i]) / self.particle_count
# print(dens.shape, relv.shape, self.message_particles[dst_][:,src_i].unsqueeze(2).unsqueeze(2).shape, tru[:,src_,:].unsqueeze(1).unsqueeze(1).shape)
self.message_weights_neigh[dst_][:,src_i] = dens
#THIS PART ABOVE WAS COMMENTED FOR FIRST STAGE******
return
# Belief update step, combine all incoming messages to form belief
def belief_update(self, glbl_feats, node_id=None):
# Turn gradients back on for training the unary potentials
self.turn_on_lik_grads()
if node_id is None:
iters = range(self.num_nodes)
else:
iters = [node_id]
# Iterate over each node in graph, update message weights to form belief set
for _ in iters:
# Calculate the destination node unary scores
message_liks = self.node_likelihoods[_](self.message_particles[_].view(-1, self.particle_size),
glbl_feats)
message_liks = message_liks.squeeze(1).view(self.batch_size, len(self.inc_nghbrs[_]), self.particle_count)
message_liks = message_liks / message_liks.sum(dim=2,keepdim=True)
self.belief_weights_lik[_][:,:,:] = message_liks
incoming_reweights = self.belief_weights_lik[_] * self.message_weights_unary[_] * self.message_weights_neigh[_]
incoming_reweights = incoming_reweights / incoming_reweights.sum(2, keepdim=True)
incoming_reweights = incoming_reweights / len(self.inc_nghbrs[_])
self.belief_weights[_] = incoming_reweights
self.belief_particles[_] = self.message_particles[_]
return
def compute_feats(self, x):
self.global_features = self.likelihood_features(x)
def max_marginals(self):
pred_particles = []
pred_weights = []
for node_id in range(self.num_nodes):
ind = torch.max(self.belief_weights[node_id].view(self.batch_size, -1), dim=1)
preds = torch.gather(self.belief_particles[node_id].view(self.batch_size, -1, self.particle_size), 1, ind[1].long().unsqueeze(1).unsqueeze(1).repeat(1,1,self.particle_size)).squeeze(1)
pred_particles.append(preds)
pred_weights.append(ind[0])
return pred_particles, pred_weights
def max_marginals(self):
pred_particles = []
pred_weights = []
for node_id in range(self.num_nodes):
ind = torch.max(self.belief_weights[node_id].view(self.batch_size, -1), dim=1)
preds = torch.gather(self.belief_particles[node_id].view(self.batch_size, -1, self.particle_size), 1, ind[1].long().unsqueeze(1).unsqueeze(1).repeat(1,1,self.particle_size)).squeeze(1)
pred_particles.append(preds)
pred_weights.append(ind[0])
return pred_particles, pred_weights
# Perform the full forward pass
def update(self, node_id=None, tru=None):
for dst_ in range(self.num_nodes):
self.belief_particles[dst_] = self.belief_particles[dst_].detach()
self.belief_weights[dst_] = self.belief_weights[dst_].detach()
self.message_particles[dst_] = self.message_particles[dst_].detach()
self.message_weights[dst_] = self.message_weights[dst_].detach()
self.belief_weights_lik[dst_] = self.belief_weights_lik[dst_].detach()
self.message_weights_unary[dst_] = self.message_weights_unary[dst_].detach()
self.message_weights_neigh[dst_] = self.message_weights_neigh[dst_].detach()
self.message_update(self.global_features, node_id=node_id, tru=tru)
self.belief_update(self.global_features, node_id=node_id)
return
def get_json_belief(self, img_id):
data = []
for node_id in range(self.num_nodes):
data.append([float(x) for p in 1000*self.belief_particles[node_id][img_id].view(-1,3) for x in p])
# Generate density estimate with weighted gaussian kernels
def density_estimation(self, node_id, x, mode='belief'):
belief_particles = self.belief_particles[node_id].view(self.batch_size, 1, -1, self.particle_size)
if mode=='belief':
weights = self.belief_weights[node_id].view(self.batch_size, 1, -1)
elif mode=='w_lik':
weights = self.belief_weights_lik[node_id].view(self.batch_size, 1, -1)
elif mode=='w_unary':
weights = self.message_weights_unary[node_id].view(self.batch_size, 1, -1)
elif mode=='w_neigh':
weights = self.message_weights_neigh[node_id].view(self.batch_size, 1, -1)
else:
raise
diffsq = (((x.double()-belief_particles.double())/self.std)**2).sum(dim=-1)
exp_val = torch.exp((-1/2) * diffsq)
fact = 1/(np.power(self.std, self.particle_size)*np.power(2*np.pi, self.particle_size/2))
#fact = 1/(np.power(self.std, 2)*np.power(2*np.pi, 2/2))
fact_ = fact * exp_val
out = (weights * fact_).sum(dim=-1)
return out
# particles: Batch x NumParticles x ParticleSize
# weights: Batch x NumParticles
def discrete_samples(self, particles, weights, num_samples):
batch_size = particles.shape[0]
particle_size = particles.shape[2]
rand_chance = ((torch.arange(num_samples) / float(num_samples)).repeat(batch_size, 1)
+ (torch.rand(batch_size, 1) / float(num_samples)))
# batch x 1 x resamp_particles
rand_chance = rand_chance.unsqueeze(1).type(self.type).to(device=self.device)
# batch x (incoming x pseudo_bel) x resamp_particles
cum_sum = (weights).cumsum(1).unsqueeze(2).repeat((1, 1, num_samples))
# Due to the fact that pytorch argmin/argmax does not guarantee a tiebreak, we currently use
# this inverse-argmax hack which ensures the difference closest to zero(positive side) is selected
# ISSUE: Theres a small chance that the denominator is zero and results in NaN. Hasn't been observed thus far.
# batch x (incoming x pseudo_bel) x resamp_particles
rand_ind = torch.argmax(1 / (cum_sum - rand_chance), dim=1)
# batch x resamp_particles
# Duplicate random indices for each of the particle_size dimensions in order to use torch.gather
rand_ind = rand_ind.unsqueeze(-1).repeat(1, 1, particle_size)
# batch x resamp_particles x particle_size
# batch x (incoming x pseudo_bel) x particle_size
#dst_belief_particles = dst_belief_particles
# batch x (incoming x particles) x particle_size
sampled_particles = torch.gather(particles, 1, rand_ind)
return sampled_particles
# belief_particles: Batch x IncNghbrs x NumParticles x ParticleSize
# belief_weights: Batch x IncNghbrs x NumParticles
def recursive_ancestral_sampling(self, belief_particles, belief_weights, ith, parent, parent_idx, visited, visited_samples, num_samples=15):
to_sample_belief_particles = belief_particles[ith].view(belief_particles[ith].shape[0], -1, belief_particles[ith].shape[3])
batch_size, particle_count, particle_size = to_sample_belief_particles.shape
to_sample_belief_weights = belief_weights[ith].view(batch_size, -1)
ith_idx = len(visited)
visited.append(ith)
if ith==0:
sampled_particles = self.discrete_samples(to_sample_belief_particles, to_sample_belief_weights, num_samples)
sampled_particles = sampled_particles.unsqueeze(2)
visited_samples.append(sampled_particles)
else:
edge_i = ((self.graph == torch.tensor([[min(ith,parent)],
[max(ith,parent)]])).all(dim=0).nonzero().squeeze(0)
== self.edge_set).nonzero().squeeze(0)
sampled_particles = self.discrete_samples(to_sample_belief_particles, to_sample_belief_weights, particle_count)
sampled_particles = sampled_particles.unsqueeze(1)
# print(parent,'->',ith, edge_i)
if parent<ith:
diff = visited_samples[parent_idx] - sampled_particles
else:
diff = sampled_particles - visited_samples[parent_idx]
cond_wgts = self.edge_densities[edge_i](diff.view(-1, particle_size)).view(batch_size, num_samples, diff.shape[2])
cond_wgts = cond_wgts.view(-1, cond_wgts.shape[2])
cond_wgts = cond_wgts / cond_wgts.sum(dim=1, keepdim=True)
# print(cond_wgts.shape)
sampled_particles = sampled_particles.view(-1, 1, sampled_particles.shape[2], particle_size).repeat(1,num_samples,1,1).view(-1, sampled_particles.shape[2], particle_size)
conditioned_particles = self.discrete_samples(sampled_particles, cond_wgts, 1).view(batch_size, num_samples, particle_size)
conditioned_particles = conditioned_particles.unsqueeze(2)
visited_samples.append(conditioned_particles)
for nghbr in self.inc_nghbrs[ith]:
if nghbr not in visited:
self.recursive_ancestral_sampling(belief_particles, belief_weights, nghbr, ith, ith_idx, visited, visited_samples, num_samples)
if ith==0:
return [x for _,x in sorted(zip(visited,visited_samples))] | [
"torch.ones_like",
"torch.device",
"numpy.power",
"torch.rand",
"diffBP.networks.dnbp_hand.factors.PairwiseDensity",
"torch.exp",
"diffBP.networks.dnbp_hand.factors.PairwiseSampler",
"diffBP.networks.dnbp_hand.factors.LikelihoodFeatures",
"torch.prod",
"torch.tensor",
"torch.arange",
"diffBP.n... | [((385, 404), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (397, 404), False, 'import torch\n'), ((37076, 37102), 'torch.exp', 'torch.exp', (['(-1 / 2 * diffsq)'], {}), '(-1 / 2 * diffsq)\n', (37085, 37102), False, 'import torch\n'), ((38452, 38500), 'torch.argmax', 'torch.argmax', (['(1 / (cum_sum - rand_chance))'], {'dim': '(1)'}), '(1 / (cum_sum - rand_chance), dim=1)\n', (38464, 38500), False, 'import torch\n'), ((38961, 38997), 'torch.gather', 'torch.gather', (['particles', '(1)', 'rand_ind'], {}), '(particles, 1, rand_ind)\n', (38973, 38997), False, 'import torch\n'), ((20397, 20445), 'torch.argmax', 'torch.argmax', (['(1 / (cum_sum - rand_chance))'], {'dim': '(2)'}), '(1 / (cum_sum - rand_chance), dim=2)\n', (20409, 20445), False, 'import torch\n'), ((21054, 21101), 'torch.gather', 'torch.gather', (['dst_belief_particles', '(2)', 'rand_ind'], {}), '(dst_belief_particles, 2, rand_ind)\n', (21066, 21101), False, 'import torch\n'), ((27640, 27734), 'torch.cat', 'torch.cat', (['[old_messages[src_][:, :ign_indx], old_messages[src_][:, ign_indx + 1:]]'], {'dim': '(1)'}), '([old_messages[src_][:, :ign_indx], old_messages[src_][:, ign_indx +\n 1:]], dim=1)\n', (27649, 27734), False, 'import torch\n'), ((37121, 37159), 'numpy.power', 'np.power', (['self.std', 'self.particle_size'], {}), '(self.std, self.particle_size)\n', (37129, 37159), True, 'import numpy as np\n'), ((37160, 37203), 'numpy.power', 'np.power', (['(2 * np.pi)', '(self.particle_size / 2)'], {}), '(2 * np.pi, self.particle_size / 2)\n', (37168, 37203), True, 'import numpy as np\n'), ((37728, 37753), 'torch.rand', 'torch.rand', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (37738, 37753), False, 'import torch\n'), ((12834, 12881), 'torch.cat', 'torch.cat', (['(rand_x, rand_y, rand_depths)'], {'dim': '(3)'}), '((rand_x, rand_y, rand_depths), dim=3)\n', (12843, 12881), False, 'import torch\n'), ((14194, 14241), 'torch.cat', 'torch.cat', (['(rand_x, rand_y, rand_depths)'], {'dim': '(3)'}), '((rand_x, rand_y, rand_depths), dim=3)\n', (14203, 14241), False, 'import torch\n'), ((31514, 31537), 'torch.prod', 'torch.prod', (['dens'], {'dim': '(2)'}), '(dens, dim=2)\n', (31524, 31537), False, 'import torch\n'), ((1731, 1759), 'diffBP.networks.dnbp_hand.factors.LikelihoodFeatures', 'factors.LikelihoodFeatures', ([], {}), '()\n', (1757, 1759), False, 'from diffBP.networks.dnbp_hand import factors\n'), ((28584, 28693), 'torch.cat', 'torch.cat', (['[old_message_weights[src_][:, :ign_indx], old_message_weights[src_][:, \n ign_indx + 1:]]'], {'dim': '(1)'}), '([old_message_weights[src_][:, :ign_indx], old_message_weights[\n src_][:, ign_indx + 1:]], dim=1)\n', (28593, 28693), False, 'import torch\n'), ((32049, 32108), 'torch.ones_like', 'torch.ones_like', (['self.message_weights_neigh[dst_][:, src_i]'], {}), '(self.message_weights_neigh[dst_][:, src_i])\n', (32064, 32108), False, 'import torch\n'), ((37632, 37657), 'torch.arange', 'torch.arange', (['num_samples'], {}), '(num_samples)\n', (37644, 37657), False, 'import torch\n'), ((1916, 2022), 'diffBP.networks.dnbp_hand.factors.UnaryDensity', 'factors.UnaryDensity', ([], {'gross_features': 'enc_output_feats_tot', 'in_features': '(64)', 'particle_size': 'particle_size'}), '(gross_features=enc_output_feats_tot, in_features=64,\n particle_size=particle_size)\n', (1936, 2022), False, 'from diffBP.networks.dnbp_hand import factors\n'), ((2199, 2319), 'diffBP.networks.dnbp_hand.factors.PairwiseSampler', 'factors.PairwiseSampler', ([], {'particle_size': 'particle_size', 'est_bounds': 'est_bounds', 'device': 'self.device', 'precision': 'self.type'}), '(particle_size=particle_size, est_bounds=est_bounds,\n device=self.device, precision=self.type)\n', (2222, 2319), False, 'from diffBP.networks.dnbp_hand import factors\n'), ((2864, 2916), 'diffBP.networks.dnbp_hand.factors.PairwiseDensity', 'factors.PairwiseDensity', ([], {'particle_size': 'particle_size'}), '(particle_size=particle_size)\n', (2887, 2916), False, 'from diffBP.networks.dnbp_hand import factors\n'), ((3173, 3293), 'diffBP.networks.dnbp_hand.factors.PairwiseSampler', 'factors.PairwiseSampler', ([], {'particle_size': 'particle_size', 'est_bounds': 'est_bounds', 'device': 'self.device', 'precision': 'self.type'}), '(particle_size=particle_size, est_bounds=est_bounds,\n device=self.device, precision=self.type)\n', (3196, 3293), False, 'from diffBP.networks.dnbp_hand import factors\n'), ((12921, 12951), 'torch.cat', 'torch.cat', (['sample_inits'], {'dim': '(0)'}), '(sample_inits, dim=0)\n', (12930, 12951), False, 'import torch\n'), ((14282, 14312), 'torch.cat', 'torch.cat', (['sample_inits'], {'dim': '(0)'}), '(sample_inits, dim=0)\n', (14291, 14312), False, 'import torch\n'), ((19031, 19061), 'torch.arange', 'torch.arange', (['resamp_particles'], {}), '(resamp_particles)\n', (19043, 19061), False, 'import torch\n'), ((27431, 27466), 'torch.tensor', 'torch.tensor', (['self.inc_nghbrs[src_]'], {}), '(self.inc_nghbrs[src_])\n', (27443, 27466), False, 'import torch\n'), ((28225, 28263), 'numpy.power', 'np.power', (['self.std', 'self.particle_size'], {}), '(self.std, self.particle_size)\n', (28233, 28263), True, 'import numpy as np\n'), ((28264, 28307), 'numpy.power', 'np.power', (['(2 * np.pi)', '(self.particle_size / 2)'], {}), '(2 * np.pi, self.particle_size / 2)\n', (28272, 28307), True, 'import numpy as np\n')] |
"""
======================================================
Monolayer (:mod:`graphene.monolayer`)
======================================================
Functions
=========
Band structure
--------------
.. toctree::
:maxdepth: 1
graphene.monolayer.Hamiltonian
graphene.monolayer.CarrierDispersion
graphene.monolayer.DensityOfStates
graphene.monolayer.FermiWavenumber
graphene.monolayer.CarrierDensity
graphene.monolayer.ChemicalPotential
Optical Properties
------------------
.. toctree::
:maxdepth: 1
graphene.monolayer.Polarizibility
graphene.monolayer.ScalarOpticalConductivity
graphene.monolayer.Permittivity
graphene.monolayer.FresnelReflection
Plasmonics
----------
.. toctree::
:maxdepth: 1
graphene.monolayer.PlasmonDispersion
"""
import numpy as np
import scipy.constants as sc
from scipy import special, optimize, integrate
import graphenemodeling.graphene._constants as _c
import graphenemodeling.statistical_distributions as sd
############
# Geometry #
############
def UnitCell(m,n):
'''Positions of unit cell
Parameters
----------
m, n: Unit cell indices.
References
----------
[1] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2009). The electronic properties of graphene. Rev. Mod. Phys. 81, 109–162. https://link.aps.org/doi/10.1103/RevModPhys.81.109.
'''
a1 = np.array(_c.a1)
a2 = np.array(_c.a2)
pos = m*a1 + n*a2
return pos
def AtomicPosition(m,n,i):
delta = _c.a/2*np.array([1,3**(1/2)])
pos = UnitCell(m,n)+i*delta
return pos
##################
# Band Structure #
##################
def Hamiltonian(k,model,g0prime=0):
'''Tight-binding Hamiltonian in momentum space.
Parameters
----------
k: array-like, complex, rad/m
Wavevector of carrier. Use complex ``k=kx + 1j*ky`` for 2D wavevectors.
model: string
``'LowEnergy'``, ``'FullTightBinding'``
g0prime: scalar, J
The particle-hole asymmetry parameter :math:`\\gamma_0'`. Typically :math:`0.02\\gamma_0\\leq\\gamma_0'\\leq 0.2\\gamma_0`.
Returns
----------
H: 2x2 complex ndarray
Tight-binding Hamiltonian evaluated at k.
Raises
------
ValueError
if `model` is not 'LowEnergy' or 'FullTightBinding'
Notes
-----
Let :math:`k=k_x+ik_y`. Then the ``model=FullTightBinding`` expression is given by
.. math::
H = \\left(\\array{
-\\gamma_0' & \\gamma_0f(k) \n
\\gamma_0f(k)^* & -\\gamma_0'
} \\right)
where :math:`f(k)= e^{ik_x a/2} + 2 e^{-i k_x a/ 2}\\cos(k_y a \\sqrt{3}/2)`
The more common ``model=LowEnergy`` approximation is
.. math::
H = \\hbar v_F\\left(\\array{
0 & k \n
k^* & 0
} \\right)
References
----------
[1] <NAME>. (1947). The Band Theory of Graphite. Phys. Rev. 71, 622–634.
https://link.aps.org/doi/10.1103/PhysRev.71.622
[1] <NAME>., and <NAME>. (1958). Band Structure of Graphite.
Phys. Rev. 109, 272–279. https://link.aps.org/doi/10.1103/PhysRev.109.272.
[2] <NAME>., and <NAME>. (2007). Space-time dispersion of graphene conductivity. Eur. Phys. J. B 56, 281–284.
https://link.springer.com/article/10.1140/epjb/e2007-00142-3.
'''
if model!='LowEnergy' and model!='FullTightBinding':
raise ValueError("Argument model must be 'LowEnergy' or 'FullTightBinding'")
if model == 'LowEnergy':
H11 = 0
H12 = sc.hbar * _c.vF * k
H21 = np.conj(H12)
H22 = 0
if model == 'FullTightBinding':
kx = np.real(k)
ky = np.imag(k)
H11 = -g0prime
H12 = _c.g0 * ( np.exp(1j*kx*_c.a/2)
+ 2*np.exp(-1j*kx*_c.a/2)*np.cos(ky*_c.a*np.sqrt(3)/2) )
H21 = np.conj(H12)
H22 = -g0prime
H = np.array( [[H11, H12],
[H12, H22] ])
return H
def CarrierDispersion(k,model,eh=1,g0prime=_c.g0prime):
'''The dispersion of Dirac fermions in monolayer graphene.
These are the eigenvalues of the Hamiltonian.
However, in both the ``LowEnergy`` model and the ``FullTightBinding`` model, we use closed form solutions rather than solving for the eigenvalues directly.
This saves time and make broadcasting easier.
Parameters
----------
k: array-like, complex, rad/m
Wavevector of Dirac fermion relative to K vector.
For 2D wavevectors, use :math:`k= k_x + i k_y`.
model: string
``'LowEnergy'``: Linear approximation of dispersion.
``'FullTightBinding'``: Eigenvalues of tight-binding approximation. We use a closed form rather than finding eigenvalues of Hamiltonian to save time and avoid broadcasting issues.
eh: int
Band index:
``eh=1`` returns conduction band,
``eh=-1`` returns valence band
Returns
----------
dispersion: complex ndarray
Dispersion relation evaluated at k.
Raises
------
ValueError
if `model` is not 'LowEnergy' or 'FullTightBinding'.
ValueError
if `eh` not 1 or -1.
Notes
-----
When ``model='LowEnergy'``,
.. math::
E =\\pm\\hbar v_F |k|
When ``model=FullTightBinding``,
.. math::
E = \\pm \\gamma_0 \\sqrt{3 + f(k)} - \\gamma_0'f(k)
where :math:`f(k)= 2 \\cos(\\sqrt{3}k_y a) + 4 \\cos(\\sqrt{3}k_y a/2)\\cos(3k_xa/2)`.
Both expressions are equivalent to diagonalizing the Hamiltonian of the corresponding ``model``.
Examples
--------
Plot the Fermion dispersion relation.
.. plot::
>>> import matplotlib.pyplot as plt
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge as eV
>>> eF = 0.4*eV
>>> kF = mlg.FermiWavenumber(eF,model='LowEnergy')
>>> k = np.linspace(-2*kF,2*kF,num=100)
>>> conduction_band = mlg.CarrierDispersion(k,model='LowEnergy')
>>> valence_band = mlg.CarrierDispersion(k,model='LowEnergy',eh=-1)
>>> fig, ax = plt.subplots(figsize=(5,6))
>>> ax.plot(k/kF,conduction_band/eF,'k')
[...
>>> ax.plot(k/kF,valence_band/eF, 'k')
[...
>>> ax.plot(k/kF,np.zeros_like(k),color='gray')
[...
>>> ax.axvline(x=0,ymin=0,ymax=1,color='gray')
<...
>>> ax.set_axis_off()
>>> plt.show()
Plot the full multi-dimensional dispersion relation with a particle-hole asymmetry. Replicates Figure 3 in Ref. [1].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from graphenemodeling.graphene import _constants as _c
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits import mplot3d # 3D plotting
>>> kmax = np.abs(_c.K)
>>> emax = mlg.CarrierDispersion(0,model='FullTightBinding',g0prime=-0.2*_c.g0)
>>> kx = np.linspace(-kmax,kmax,num=100)
>>> ky = np.copy(kx)
>>> k = (kx + 1j*ky[:,np.newaxis]) + _c.K # k is relative to K. Add K to move to center of Brillouin zone
>>> conduction_band = mlg.CarrierDispersion(k,model='FullTightBinding',eh=1,g0prime=-0.2*_c.g0)
>>> valence_band = mlg.CarrierDispersion(k,model='FullTightBinding',eh=-1,g0prime=-0.2*_c.g0)
>>> fig = plt.figure(figsize=(8,8))
>>> fullax = plt.axes(projection='3d')
>>> fullax.view_init(20,35)
>>> KX, KY = np.meshgrid(kx,ky)
>>> fullax.plot_surface(KX/kmax,KY/kmax,conduction_band/_c.g0,rstride=1,cstride=1,cmap='viridis',edgecolor='none')
<...
>>> fullax.plot_surface(KX/kmax,KY/kmax,valence_band/_c.g0,rstride=1,cstride=1,cmap='viridis',edgecolor='none')
<...
>>> fullax.set_xlabel('$k_x/|K|$')
Text...
>>> fullax.set_ylabel('$k_y/|K|$')
Text...
>>> fullax.set_zlabel('$\\epsilon/\\gamma_0$')
Text...
>>> fullax.set_title('Brillouin Zone of Graphene')
Text...
>>> plt.show()
References
----------
[1] <NAME>. (1947). The Band Theory of Graphite. Phys. Rev. 71, 622–634.
https://link.aps.org/doi/10.1103/PhysRev.71.622
[1] <NAME>., and <NAME>. (1958). Band Structure of Graphite.
Phys. Rev. 109, 272–279. https://link.aps.org/doi/10.1103/PhysRev.109.272.
[2] <NAME>., and <NAME>. (2007). Space-time dispersion of graphene conductivity. Eur. Phys. J. B 56, 281–284.
https://link.springer.com/article/10.1140/epjb/e2007-00142-3.
[4] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2009).
The electronic properties of graphene. Rev. Mod. Phys. 81, 109–162.
https://link.aps.org/doi/10.1103/RevModPhys.81.109.
'''
if model!='LowEnergy' and model!='FullTightBinding':
raise ValueError("Argument model must be 'LowEnergy' or 'FullTightBinding'")
if eh!=1 and eh!=-1:
raise ValueError('eh must be either 1 or -1')
if model == 'LowEnergy':
dispersion = eh*sc.hbar*_c.vF*np.abs(k)
if model == 'FullTightBinding':
k = k - _c.K
f = lambda k: (2*np.cos(np.sqrt(3)*np.imag(k)*_c.a)
+ 4*np.cos((np.sqrt(3)*np.imag(k)/2)*_c.a)*np.cos((3/2)*np.real(k)*_c.a) )
# [sic] eh only applies to first term
dispersion = eh*_c.g0*np.sqrt(3+ f(k)) - g0prime*f(k)
return dispersion
def FermiWavenumber(FermiLevel,model,g0prime=_c.g0prime):
'''
The Fermi wavenumber, i.e. the wavenumber of the state at
the Fermi energy.
Parameters
----------
FermiLevel: array-like, J
Fermi level
model: string
'LowEnergy' or 'FullTightBinding'.
Examples
--------
Confirm energy of Fermi wavevector is equal to Fermi level.
>>> from graphenemodeling import graphene
>>> from scipy.constants import elementary_charge as eV
>>> mlg = graphene.Monolayer()
>>> FermiLevel = 0.4 * eV
>>> kF = mlg.FermiWavenumber(FermiLevel, model='LowEnergy')
>>> mlg.CarrierDispersion(kF,model='LowEnergy')/eV
0.4
'''
if model == 'LowEnergy':
return np.abs(FermiLevel) / (sc.hbar*_c.vF)
if model == 'FullTightBinding':
'''
Need to finish off this code-finding procedure
'''
eh = np.sign(FermiLevel)
# f is zero when kf is correct value
f = lambda kf: FermiLevel - CarrierDispersion(kf, model='FullTightBinding',eh=eh,g0prime=g0prime)
# Choose LowEnergy answer for initial starting point
kf0 = FermiWavenumber(FermiLevel,model='LowEnergy',g0prime=g0prime)
result = optimize.root_scalar(f,x0=kf0,x1=kf0*.9,rtol=1e-10).root
return result
def DensityOfStates(E,model,g0prime=_c.g0prime):
'''
The density of states per square meter of graphene at energy :math:`E`.
Parameters
----------
E: array-like, J
Energy :math:`E` at which to evaluate density of states.
model: string
``'LowEnergy'``or ``'FullTightBinding'``
g0prime: scalar, J
The particle-hole asymmetry parameter :math:`\\gamma_0'`. Typically :math:`0.02\\gamma_0\\leq\\gamma_0'\\leq 0.2\\gamma_0`.
Returns
-------
array-like
Density of states, units are states per J-m^2
Notes
-----
For ``model==LowEnergy``, the form is simply
.. math::
\\rho(E)=\\frac{2}{\\pi}\\frac{|E|}{\\hbar^2 v_F^2}
whereas the ``FullTightBinding`` model has a much more complicated form (eqn. 14 of [2])
.. math::
\\rho(E)=\\frac{4}{\\pi^2}\\frac{|E|}{\\gamma_0^2}\\frac{1}{\\sqrt{Z_0}}\\mathbf{F}\\left(\\frac{\\pi}{2},\\sqrt{\\frac{Z_1}{Z_0}}\\right)
where :math:`\\mathbf{F}(\\pi/2,x)` is the complete elliptic integral of the first kind (see `scipy.special.ellipk`_) and
.. _scipy.special.ellipk: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ellipk.html
.. math::
Z_0 = \\left\\{\\array{
(1 + |E/\\gamma_0|)^2 - \\frac{[(E/\\gamma_0)^2-1]^2}{4}, & |E|\\leq \\gamma_0 \n
4|E/\\gamma_0|, & -3\\gamma_0\\leq E \\leq -\\gamma_0,\\gamma_0\\leq E\\leq 3\\gamma_0
}\\right.
Z_1 = \\left\\{\\array{
4|E/\\gamma_0|, & |E|\\leq \\gamma_0 \n
(1 + |E/\\gamma_0|)^2 - \\frac{[(E/\\gamma_0)^2-1]^2}{4}, & -3\\gamma_0\\leq E \\leq -\\gamma_0,\\gamma_0\\leq E\\leq 3\\gamma_0
}\\right.
Examples
--------
Plot the density of states for ``model=LowEnergy`` approximation and ``model=FullTightBinding`` model. Replicates Fig. 5 in Ref. [2].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from graphenemodeling.graphene import _constants as _c
>>> import matplotlib.pyplot as plt
>>> E = np.linspace(-3,3,num=200) * _c.g0
>>> DOS_low = mlg.DensityOfStates(E,model='LowEnergy')
>>> DOS_full = mlg.DensityOfStates(E,model='FullTightBinding')
>>> plt.plot(E/_c.g0,DOS_full/np.max(DOS_full),'k-',label='FullTightBinding')
[<...
>>> plt.plot(E/_c.g0,DOS_low/np.max(DOS_full),'k-.',label='LowEnergy')
[<...
>>> plt.xlabel('$E/\\gamma_0$')
Text...
>>> plt.ylabel('DOS (a.u.)')
Text...
>>> plt.legend()
<...
>>> plt.show()
References
----------
[1] <NAME>., and <NAME>. (1953). The Statistics of a Two-Dimensional, Hexagonal Net. Phys. Rev. 89, 662–662. https://link.aps.org/doi/10.1103/PhysRev.89.662.
[2] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2009).
The electronic properties of graphene. Rev. Mod. Phys. 81, 109–162.
https://link.aps.org/doi/10.1103/RevModPhys.81.109.
'''
if model=='LowEnergy':
E = np.abs(E)
DOS = 2 * E / (sc.pi*(sc.hbar*_c.vF)**2)
return DOS
elif model=='FullTightBinding':
if g0prime!=0:
raise Exception('Not supported for g0prime!=0.\nSetting g0prime=0')
g0prime=0
prefactor = 4*np.abs(E) / (sc.pi*_c.g0)**2
def fZ0(E):
if np.abs(E)<np.abs(_c.g0):
term1 = (1+np.abs(E/_c.g0))**2
term2 = -((E/_c.g0)**2 - 1)**2 / 4
return term1 + term2
else: return 4*np.abs(E/_c.g0)
def fZ1(E):
if np.abs(E)<np.abs(_c.g0):
return 4*np.abs(E/_c.g0)
else:
term1 = (1+np.abs(E/_c.g0))**2
term2 = -((E/_c.g0)**2 - 1)**2 / 4
return term1 + term2
dos = np.empty_like(E)
for j, e in np.ndenumerate(E):
Z0 = fZ0(e)
Z1 = fZ1(e)
ellip = special.ellipk(np.sqrt(Z1/Z0))
dos[j] = (1/np.sqrt(Z0)) * ellip
DOS = prefactor * dos /_c.A
return DOS
else:
print('The model %s is not available' % (model))
def CarrierDensity(mu,T,model):
'''
Computes the carrier density directly from the band structure.
Parameters
----------
mu: array-like, J
Chemical potential :math:`\\mu`
T: scalar, K
Temperature :math:`T`
Returns
-------
array-like
Notes
-----
When ``T>0``, we use the standard formula of integrating the Fermi-Dirac distribution over the density of states :math:`\\rho` .
.. math::
n = \\int_{-\\infty}^{\\infty} \\frac{1}{e^{(\\epsilon-\\mu)/k_BT}+1}\\rho(\\epsilon) d\\epsilon
For graphene at ``T=0``, this reduces to
.. math::
n = \\frac{\\mu^2}{\\pi\\hbar^2 v_F^2}
'''
if T<0:
raise ValueError('Temperature T must be nonnegative')
if T==0 and model=='LowEnergy':
FermiLevel=mu # chemical potential at T=0 is called Fermi level
n = (FermiLevel / (sc.hbar*_c.vF))**2 / np.pi
return n
if T>0:
n = np.empty_like(mu)
for i, m in np.ndenumerate(mu):
p_electron = lambda e: DensityOfStates(e,model) * sd.FermiDirac(e-m,T)
p_hole = lambda e: DensityOfStates(e,model) * (1 - sd.FermiDirac(e-m,T))
n[i] = ( integrate.quad(p_electron,0,3*_c.g0,points=(_c.g0,m))[0] -
integrate.quad(p_hole,-3*_c.g0,0,points=(-_c.g0,-m))[0] )
return n
def ChemicalPotential(n,T=0,model='LowEnergy'):
'''Returns the chemical potential given the carrier density.
Essentially the inverse of Carrier Density.
Parameters
----------
n: array-like, m :sup:`-2`
Carrier density
T: scalar, K
Temperature
Returns
-------
array-like
Notes
-----
When ``T=0`` and ``model='LowEnergy'`` simultaneously, a closed form expression is used.
.. math::
E_F = \\hbar v_F\\sqrt{\\pi n}
For ``T>0``, we use a numerical routine regardless of model.
'''
if T==0 and model=='LowEnergy':
return sc.hbar*_c.vF*np.sqrt(sc.pi*n)
else:
## Numerically solve
# f is zero when n is correct
f = lambda mu: n - CarrierDensity(mu,T,model=model)
# Use T=0 value to estimate start
mu0 = ChemicalPotential(n,T=0,model=model)
# Add 0.1 eV offset to x1 because in case mu0=0, x0 and x1 would be equal
result = optimize.root_scalar(f,x0=mu0,x1=mu0*1.1+0.1*sc.elementary_charge,
rtol=1e-10).root
return result
########################
# Electrical Transport #
########################
def Mobility(Te,mu0,T0):
'''
Temperature dependent mobility.
See page 4 of Reference 1.
35, 37, 38
References
----------
[1] Shiue et al. 2019
URL: http://www.nature.com/articles/s41467-018-08047-3
[2] Dorgan et al. 2013.
URL: https://doi.org/10.1021/nl400197w
[3] Bae et al. 2010
URL: https://doi.org/10.1021/nl1011596
'''
mu = mu0*(Te/T0)**2.3
return mu
def ScatteringRate(mobility,FermiLevel):
'''
Estimated DC scattering rate from mobility.
Parameters
----------
mobility: scalar, mobility (m^2/V-s)
FermiLevel: scalar, Fermi level (J)
Returns
----------
rate: scalar, scattering rate
'''
# Scattering time
tau = mobility*FermiLevel / (sc.elementary_charge*_c.vF**2)
rate = 1/tau
return rate
def Polarizibility(q,omega,gamma,FermiLevel,T=0):
'''
The Polarizibility :math:`\\chi^0`` of graphene.
Parameters
----------
q: array-like, rad/m
Difference between scattered wavevector and incident
omega: array-like, rad/s
Angular frequency
gamma: scalar, rad/s
scattering rate due to mechanisms such as impurities (i.e. not Landau Damping)
We use the Mermin-corrected Relaxation time approximation (Eqn 4.9 of Ref 1)
FermiLevel: scalar, J
Fermi level of graphene.
T: scalar, K
Temperature
Notes
-----
The Polarizibiliy function in graphene. Can be derived from a
self-consistent field method or the Kubo formula.
.. math::
\\chi^0(\\mathbf q, \\omega) = \\frac{g}{A}\\sum_{nn'\\mathbf k}\\frac{f_{n\\mathbf k} - f_{n'\\mathbf{k+q}}}{\\epsilon_{n\\mathbf k}-\\epsilon_{n'\\mathbf{k+q}}+\\hbar(\\omega+i\\eta)}|\\left<n\\mathbf k|e^{-i\\mathbf{q\\cdot r}}|n'\\mathbf{k+q}\\right>|^2
For ``gamma == 0``, this returns equation 17 of Ref 2, which is the
polarizibility for general complex frequencies.
For ``gamma > 0``, we return the Mermin-corrected Relaxation time approximation
(Eqn 4.9 of Ref 1), which calls the gamma==0 case.
Examples
--------
Plot the real and imaginary part of :math:`\\chi^0`, normalized to density of states. Replicates Fig. 1 of Ref. [3].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> import matplotlib.pyplot as plt
>>> from matplotlib import cm
>>> from scipy.constants import elementary_charge as eV
>>> from scipy.constants import hbar
>>> eF = 0.4*eV
>>> gamma = 0
>>> DOS = mlg.DensityOfStates(eF,model='LowEnergy')
>>> kF = mlg.FermiWavenumber(eF,model='LowEnergy')
>>> omega = np.linspace(0.01,2.5,num=250) / (hbar/eF)
>>> q = np.linspace(0.01,2.5,num=250) * kF
>>> pol = mlg.Polarizibility(q, omega[:,np.newaxis],gamma,eF)
>>> fig, (re_ax, im_ax) = plt.subplots(1,2,figsize=(11,4))
>>> re_img = re_ax.imshow( np.real(pol)/DOS,
... extent=(q[0]/kF,q[-1]/kF,hbar*omega[0]/eF,hbar*omega[-1]/eF),
... vmin=-2,vmax=1, cmap=cm.gray,
... origin = 'lower', aspect='auto'
... )
<...
>>> re_cb = fig.colorbar(re_img, ax = re_ax)
>>> im_img = im_ax.imshow( np.imag(pol)/DOS,
... extent=(q[0]/kF,q[-1]/kF,hbar*omega[0]/eF,hbar*omega[-1]/eF),
... vmin=-1,vmax=0, cmap=cm.gray,
... origin = 'lower', aspect='auto'
... )
<...
>>> im_cb = fig.colorbar(im_img, ax = im_ax)
>>> re_ax.set_ylabel('$\\hbar\\omega$/$\\epsilon_F$',fontsize=14)
Text...
>>> re_ax.set_xlabel('$q/k_F$',fontsize=14)
Text...
>>> im_ax.set_ylabel('$\\hbar\\omega$/$\\epsilon_F$',fontsize=14)
Text...
>>> im_ax.set_xlabel('$q/k_F$',fontsize=14)
Text...
>>> plt.show()
References
----------
[1] Christensen Thesis 2017
[2] <NAME>. Retarded interactions in graphene systems.
[3] <NAME>., <NAME>., <NAME>., and <NAME>. (2006).
Dynamical polarization of graphene at finite doping. New J. Phys. 8, 318–318.
https://doi.org/10.1088%2F1367-2630%2F8%2F12%2F318.
[4] <NAME>., and <NAME>. (2007).
Dielectric function, screening, and plasmons in two-dimensional graphene.
Phys. Rev. B 75, 205418.
https://link.aps.org/doi/10.1103/PhysRevB.75.205418.
'''
if gamma==0 and T==0:
'''
Equation 17 of Ref 2
'''
prefactor = -DensityOfStates(FermiLevel,model='LowEnergy')
kF = FermiWavenumber(FermiLevel, model='LowEnergy')
x = q / (2*kF)
zbar = sc.hbar*omega / (2*FermiLevel)
f = lambda x,zbar: (np.arcsin( (1-zbar)/x) + np.arcsin( (1+zbar)/x )
- ((zbar-1)/x)*np.sqrt(1 - ((zbar-1)/x)**2 )
+ ((zbar+1)/x)*np.sqrt(1 - ((zbar+1)/x)**2 ) )
dd = 1 + (x**2 / (4*np.sqrt(x**2 - (zbar+1e-9*1j)**2 ))) * (sc.pi - f(x,zbar+1e-9*1j))
return prefactor * dd
elif gamma !=0:
# Mermin-corrected Relaxation Time Approximation (Eqn 4.9 of Ref 1)
pol_complex_arg = Polarizibility(q,omega+1j*gamma,0,FermiLevel,T=0)
pol_0 = Polarizibility(q,0,0,FermiLevel,T=0)
numerator = (1 + 1j*gamma/omega) * pol_complex_arg
denominator = 1 + ( 1j*gamma/omega * pol_complex_arg / pol_0 )
return numerator / denominator
def dPolarizibility(q,omega,gamma,FermiLevel,T,dvar,diff=1e-7):
'''
Returns the derivative of the real part of the polarizibility at q, omega
with respect to the chosen variable, dvar.
Parameters
----------
q: array-like, rad/m
Difference between scattered wavevector and incident
omega: array-like, rad/s
Angular frequency
gamma: scalar, rad/s
The scattering rate
FermiLevel: scalar, J
the Fermi level
T: scalar, K
Temperature
dvar: 'omega': Take the partial wrt omega
'q': Take the partial wrt q
diff: Size of finite different to use when computing the derivative.
Method uses central difference.
'''
if dvar == 'omega':
P = lambda w: np.real(Polarizibility(q,w,gamma,FermiLevel,T))
wa, wb = omega*(1-diff), omega*(1+diff)
return (P(wb)-P(wa))/(2*omega*diff)
elif dvar == 'q':
P = lambda qv: np.real(Polarizibility(qv,omega,gamma,FermiLevel,T))
qa,qb = q*(1-diff), q*(1+diff)
return (P(qb)-P(qa))/(2*q*diff)
######################
# Optical Properties #
######################
def ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T,model=None):
'''The diagonal conductivity of graphene :math:`\\sigma_{xx}`.
Parameters
----------
q: array-like, rad/m
Wavenumber
omega: array-like, rad/s
Angular frequency
FermiLevel: scalar, J
the Fermi energy
gamma: scalar, rad/s
scattering rate
T: scalar, K
Temperature
model: string
Typically 'None', but for a specific model, specify it.
Returns
-------
array-like
conductivity at every value of omega
Examples
--------
Plot the optical conductivity normalized to intrinsic conductivity :math:`\\sigma_0`.
Replicates Fig. 4.4 in Ref. [1].
.. plot ::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> from graphenemodeling.graphene._constants import sigma_0
>>> import matplotlib.pyplot as plt
>>> eF = 0.4 * elementary_charge
>>> g = 0.012 * elementary_charge / hbar
>>> w = np.linspace(0.01,3,num=150) / (hbar/eF)
>>> s_0K_0g = mlg.ScalarOpticalConductivity(q=0,omega=w,gamma=0,FermiLevel=eF,T=0)
>>> s_0K_12g = mlg.ScalarOpticalConductivity(q=0,omega=w,gamma=g,FermiLevel=eF,T=0.01)
>>> s_300K_12g = mlg.ScalarOpticalConductivity(q=0,omega=w,gamma=g,FermiLevel=eF,T=300)
>>> fig, (re_ax, im_ax) = plt.subplots(1,2,figsize=(11,4))
>>> s_Re = np.real(s_0K_0g)
>>> s_Im = np.imag(s_0K_0g)
>>> re_ax.plot(w*hbar/eF,s_Re/sigma_0,'r',label='T=0, $\\hbar\\gamma$=0 meV')
<...
>>> im_ax.plot(w*hbar/eF,s_Im/sigma_0,'r',label='T=0, $\\hbar\\gamma$=0 meV')
<...
>>> s_Re = np.real(s_0K_12g)
>>> s_Im = np.imag(s_0K_12g)
>>> re_ax.plot(w*hbar/eF,s_Re/sigma_0,color='royalblue',label='T=0, $\\hbar\\gamma$=12 meV')
<...
>>> im_ax.plot(w*hbar/eF,s_Im/sigma_0,color='royalblue',label='T=0, $\\hbar\\gamma$=12 meV')
<...
>>> s_Re = np.real(s_300K_12g)
>>> s_Im = np.imag(s_300K_12g)
>>> re_ax.plot(w*hbar/eF,s_Re/sigma_0,color='green',label='T=300 K, $\\hbar\\gamma$=12 meV')
<...
>>> im_ax.plot(w*hbar/eF,s_Im/sigma_0,color='green',label='T=300 K, $\\hbar\\gamma$=12 meV')
<...
>>> re_ax.set_ylabel('Re[$\\sigma$]/$\\sigma_0$')
>>> re_ax.set_xlabel('$\\hbar\\omega/E_F$')
>>> re_ax.plot(w*hbar/eF,np.ones_like(w),'--',color='gray')
>>> re_ax.set_ylim(0,2)
>>> im_ax.set_ylabel('Im[$\\sigma$]/$\\sigma_0$')
>>> im_ax.set_xlabel('$\\hbar\\omega/E_F$')
>>> im_ax.plot(w*hbar/eF,np.zeros_like(w),'--',color='gray')
>>> im_ax.set_ylim(-2,3)
>>> plt.legend()
>>> plt.show()
Notes
-----
The *matrix* optical conductivity :math:`\\overleftrightarrow{\\sigma}(q,\\omega)`
relates the surface current :math:`\\mathbf K(\\omega)`
to an applied electric field :math:`\\mathbf E(\\omega)`.
The fully general, anisotropic, nonlocal expression is given by
.. math::
\\mathbf K(\\omega)=\\int \\overleftrightarrow\\sigma(q,\\omega)\\mathbf E(\\omega) dq
Here, :math:`\\omega` refers to the frequency and :math:`q` refers to the scattering wavevector.
The above expression fully general incorporating anisotropies and nonlocality and is rarely needed.
In most cases, :math:`\\overleftrightarrow{\\sigma}` is isotropic,
so the above equation can be reduced to a *scalar* equation
.. math::
K(\\omega)=\\int \\sigma(q,\\omega)E(\\omega)dq
This is the conductivity in this function. The most general expression for the conductivity is given by the Kubo formula
.. math::
\\sigma(q,\\omega)=\\frac{ie^2\\omega}{q^2}\\chi^0(q,\\omega).
where :math:`\\chi^0` is the Polarizibility. If ``q`` is nonzero, this form is used.
However, it is common to use a simpler limiting cases of this expression.
The local conductivity (called when ``q==0``) is the one which is most familiar
and it relates the surface current to the electric field linearly
.. math::
\\mathbf K(\\omega)=\\sigma(\\omega)\\mathbf E
It can be found from the nonlocal conductivity by taking the limit :math:`\\lim_{q\\to 0}\\sigma(q,\\omega)=\\sigma(\\omega)`
and takes the form :math:`\\sigma(\\omega)=\\sigma_{intra}(\\omega)+\\sigma_{inter}(\\omega)`,
where the intraband and interband components are given by
.. math::
\\sigma_{intra}(\\omega) = \\frac{2ie^2k_BT}{\\pi\\hbar^2(\\omega+i\\gamma)}\\ln\\left [ 2 \\cosh \\frac{E_F}{2k_BT} \\right ]
and
.. math::
\\sigma_{inter}(\\omega) = \\frac{e^2}{4\\hbar}\\left [ H(\\hbar\\omega/2) + \\frac{4i}{\\pi}\\hbar ( \\omega +i \\gamma )\\int_0^\\infty \\frac{H( \\epsilon )-H(\\hbar\\omega /2)}{\\hbar^2(\\omega +i\\gamma )^2-4\\epsilon^2} d\\epsilon\\right ]
where
.. math::
H(\\epsilon) = f(-\\epsilon)-f(\\epsilon) = \\frac{\\sinh(\\epsilon/k_BT)}{\\cosh(E_F/k_BT) + \\cosh(\\epsilon/k_BT)}
For ``T=0`` these expressions reduce to
.. math::
\\sigma_{intra}(\\omega) = \\frac{ie^2E_F}{\\pi\\hbar^2(\\omega+i\\gamma)}
.. math::
\\sigma_{inter}(\\omega) = \\frac{e^2}{4\\hbar}\\left [ \\Theta(\\hbar\\omega - 2E_F) + \\frac{i}{\\pi} \\ln\\left [\\frac{2E_F-\\hbar\\omega}{2E_F+\\hbar\\omega} \\right ] \\right ]
References
----------
[1] <NAME>. (2017).
From Classical to Quantum Plasmonics in Three and Two Dimensions (Cham: Springer International Publishing).
http://link.springer.com/10.1007/978-3-319-48562-1
'''
# Local case
if np.all(q) == 0:
if T!=0:
intra_pre = 4 * _c.sigma_0 * (2*1j*sc.k*T) / (sc.pi*sc.hbar)
inter_pre = _c.sigma_0
### Intraband Contribution ###
# Using np.logaddexp() avoids the large cosh in ln( cosh(1/T) )
x = FermiLevel / (2*sc.k*T)
intra = lambda w: intra_pre * ( 1 / (w + 1j*gamma) ) * np.logaddexp(x,-x)
### Interband Contribution ###
H = lambda energy: sd.FermiDirac(-energy-FermiLevel,T) - sd.FermiDirac(energy-FermiLevel,T)
integrand = lambda energy,w: ( H(energy) - H(sc.hbar*w/2) ) / (sc.hbar**2 * (w + 1j*gamma)**2 - 4 * energy**2)
def integral(w):
result = np.empty_like(w,dtype=complex)
for i, frequency in np.ndenumerate(w):
integrand_re = lambda e: np.real(integrand(e,frequency))
integrand_im = lambda e: np.imag(integrand(e,frequency))
result[i] =( integrate.quad(integrand_re,0,10*FermiLevel,points=(FermiLevel/sc.hbar,2*FermiLevel/sc.hbar))[0]
+ 1j*integrate.quad(integrand_im,0,10*FermiLevel,points=(FermiLevel/sc.hbar,2*FermiLevel/sc.hbar))[0] )
return result
inter = lambda w: inter_pre * ( H(sc.hbar * w / 2) +
(4*1j/sc.pi) * sc.hbar*(w + 1j*gamma)*integral(w) )
conductivity= intra(omega) + inter(omega)
if T==0:
intra = lambda w: 1j*_c.sigma_0 * 4*FermiLevel / (sc.pi*sc.hbar* (w + 1j*gamma))
inter = lambda w: _c.sigma_0 * ( np.heaviside(sc.hbar*w - 2*FermiLevel,0.5) +
(1j/sc.pi) * np.log(np.abs((2*FermiLevel-sc.hbar*w)/(2*FermiLevel+sc.hbar*w))))
conductivity = intra(omega) + inter(omega)
# Nonlocal case
else:
if T==0:
conductivity = 1j*sc.elementary_charge**2 * (omega / q**2) * Polarizibility(q,omega,gamma,FermiLevel,T)
else:
pass
return conductivity
def OpticalConductivityMatrix(q,omega,gamma, FermiLevel,T):
'''
Returns the conductivity matrix of monolayer graphene.
Parameters
----------
q: array-like, rad/m
Wavenumber
omega: array-like, rad/s
Angular frequency
FermiLevel: scalar, J
the Fermi energy
gamma: scalar, rad/s
scattering rate
T: scalar, K
Temperature
Returns
----------
sigma_matrix: 2x2 numpy array, conductivity of monolayer graphene
'''
conductivity_matrix = np.array([[ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T),0],
[0,ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T)]])
return conductivity_matrix
def Permittivity(q, omega,FermiLevel,T, gamma,epsR,model=None):
'''
Returns the in-plane permittivity of graphene.
Parameters
----------
q: array-like, rad/m
Wavenumber
omega: array-like, rad/s
Angular frequency
epsR: scalar, unitless
background relative permittivity
Notes
-----
Permittivity relates to the scalar optical conductivity through the expression
.. math::
\\epsilon(q, \\omega) = \\epsilon_0 + \\frac{i\\sigma(q,\\omega)}{\\omega}
At :math:`\\omega=0`, we can use the expression found in Ref. 2,
.. math::
\\epsilon(q) = \\kappa^* + \\frac{2\\pi e^2}{\\kappa q}\\Pi^+(q)
References
----------
[1] “Lumerical: Modeling Methodology.” n.d. Accessed April 1, 2019.
https://apps.lumerical.com/other_application_graphene_simulation_tips.html.
[2] <NAME>., and <NAME>. (2007).
Dielectric function, screening, and plasmons in two-dimensional graphene.
Phys. Rev. B 75, 205418. https://link.aps.org/doi/10.1103/PhysRevB.75.205418.
'''
if model=='Lumerical:Falkovsky':
'''
Use eqn 10 of Ref 1
'''
x1 = sc.elementary_charge
x2 = sc.hbar
x3 = FermiLevel
x4 = _c.vF
x5 = Mobility(T,mu0,mu0T) # mobility at the new temperature
x6 = epsR
x7 = _c.thickness # 3.4 angstroms by default
x8 = sc.epsilon_0
prefactor = x1**2*x3 / ( sc.pi * x2**2)
denominator = omega**2 + ( x1*x4**2 / (x5*x3) )**2
term1 = - prefactor*(omega*x8*x7)**(-1) * omega / denominator
term2 = 1j*prefactor * (x1*x4**2 / (omega*x5*x3*x8*x7)) / denominator
eps = x6 + term1 + term2
return sc.epsilon_0*eps
elif np.all(omega==0):
pass
else:
eps = 1 + 1j*ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T,model)/(omega*sc.epsilon_0)
return eps
def FresnelReflection(q,omega,gamma,FermiLevel,T,eps1,eps2,polarization):
'''
The Fresnel Reflection coefficients of light incident from above (medium 1, eps1).
Equation 5.4 of Ref 1
Parameters
----------
q: array-like, rad/m
Wavenumber at which to evaluate FresnelReflection.
In-plane momentum of incident light.
omega: array-like, rad/s
Angular frequency of incident light.
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
polarization: string
's'/'TE' or 'p'/'TM' for s- or p-polarization.
Examples
--------
Plot the TM polarized Fresnel Reflection coefficient. This will highlight the plasmon.
Replicates Fig. 5.2 in Ref [1].
.. plot::
>>> import matplotlib.pyplot as plt
>>> import matplotlib.cm as cm
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> eV = elementary_charge
>>> FermiLevel = 0.4 * eV
>>> gamma = 0.012 * eV / hbar
>>> kF = mlg.FermiWavenumber(FermiLevel,model='LowEnergy')
>>> q = np.linspace(1e-2,3,num=200) * kF
>>> w = np.linspace(1e-2,3,num=200) * FermiLevel / hbar
>>> fresnelTM = mlg.FresnelReflection(q,w[:,np.newaxis],gamma,FermiLevel,T=0,
... eps1=1,eps2=1,
... polarization='TM')
>>> fig, ax = plt.subplots(figsize=(6,6))
>>> ax.imshow(-np.imag(fresnelTM),
... extent=(q[0]/kF,q[-1]/kF,hbar*w[0]/FermiLevel,hbar*w[-1]/FermiLevel),
... origin='lower',aspect='auto',cmap=cm.hot,vmin=-16,vmax=0)
>>> ax.set_xlabel('$q/k_F$')
>>> ax.set_ylabel('$\\hbar\\omega/E_F$')
>>> ax.set_ylim(0,3)
>>> ax.set_xlim(0,3)
>>> fig.suptitle('Fresnel Reflection Coefficient (TM)')
>>> plt.show()
References
----------
[1] <NAME>. (2017).
From Classical to Quantum Plasmonics in Three and Two Dimensions (Cham: Springer International Publishing).
http://link.springer.com/10.1007/978-3-319-48562-1.
'''
kperp1, kperp2 = np.sqrt(eps1*(omega/sc.speed_of_light)**2 - q**2 + 1e-9*1j), np.sqrt(eps2*(omega/sc.speed_of_light)**2 - q**2 + 1e-9*1j)
sigma = ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T)
if polarization=='p' or polarization=='TM':
numerator = eps2*kperp1 - eps1*kperp2 + ( sigma*kperp1*kperp2 / (sc.epsilon_0*omega) )
denominator = eps2*kperp1 + eps1*kperp2 + ( sigma*kperp1*kperp2 / (sc.epsilon_0*omega) )
if polarization=='s' or polarization=='TE':
numerator = kperp1 - kperp2 - sc.mu_0*omega*sigma
denominator = kperp1 + kperp2 + sc.mu_0*omega*sigma
return numerator / denominator
def LocalDensityOfStates(d,omega,gamma,FermiLevel,T):
'''
The LDOS a distance d above a plane of graphene.
Eqn 44 of SM of Ref 1
Assuning plane in vauum for now.
References
-----------
[1] Miller et al. 2017
'''
k0 = (omega/sc.speed_of_light)
ldos0 = k0**2 / (2*sc.pi**2*sc.speed_of_light) # Free space LDOS
integral = np.empty_like(d)
for i, d0 in np.ndenumerate(d):
k0w = (omega/sc.speed_of_light)
Im_rp = lambda q: np.imag( FresnelReflection(q,omega,gamma,FermiLevel,T,1,1,'p') )
integrand = lambda q: (q**2/k0w**3)*Im_rp(q)*np.exp(-2*q*d0)
a,b = 1e-3, np.abs(_c.K) # Increasing this bound does not lead to more convergence
q_plasmon=InversePlasmonDispersion(omega,gamma,FermiLevel,1,1,T,model='nonlocal')
integral[i] = integrate.quad(integrand,a,b,
points=(q_plasmon),limit=100)[0]
return ldos0 * integral
#####################
# Phonon Properties #
#####################
def PhononSelfEnergy():
pass
##############
# Plasmonics #
##############
def PlasmonDispersion(q,gamma,FermiLevel,eps1,eps2,T,model):
'''
Returns the nonretarded plasmon dispersion relations E(q) for a surface
plasmon in an infinite sheet of graphene sandwiched between two
dielectrics eps1 and eps2.
All values returned are assumed to be at zero temperature with no loss (gamma).
Parameters
----------
q: array-like, rad/m
Wavenumber of the plasmon
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
model: string
'intra' for intraband dispersion,
'local' uses the intraband + interband constributions to the conductivity, and
'nonlocal' for fully nonlocal conductivity.
Returns
-------
omega: array-like
Frequency of the plasmon with wavenumber q
Notes
-----
``model=='intra'`` uses
.. math::
\\omega = \\frac{1}{\\hbar}\\sqrt{\\frac{e^2\\epsilon_F}{2\\pi\\epsilon_0\\bar\\epsilon}q}
``model=='local'`` uses
.. math::
1-\\frac{i\\text{Im}[\\sigma(q,\\omega)]}{2i\\epsilon_0\\bar\\epsilon\\omega}=0
and finally, ``model=='nonlocal'`` uses
.. math::
1 - \\frac{\\sigma(q,\\omega)q}{2i\\epsilon_0\\bar\\epsilon\\omega} = 0
Examples
--------
Plot the three expressions for the dispersion relation. Replicates Fig. 5.2 in Ref. [1].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> eV = elementary_charge
>>> gamma=0.012 * eV / hbar
>>> eF = 0.4*eV
>>> kF = mlg.FermiWavenumber(eF,model='LowEnergy')
>>> q = np.linspace(1e-3,3,num=200) * kF
>>> disp_intra = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='intra')
>>> disp_local = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='local')
>>> disp_nonlocal = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='nonlocal')
>>> fig, ax = plt.subplots(figsize=(6,6))
>>> ax.plot(q/kF,hbar*disp_intra/eF,'--',label='Intraband')
<...
>>> ax.plot(q/kF,hbar*disp_local/eF,'r-.',label='Local')
<...
>>> ax.plot(q[:190]/kF,hbar*disp_nonlocal[:190]/eF,'g',label='Nonlocal')
<...
>>> ax.plot(q/kF,q/kF,color='gray',linestyle='--')
<...
>>> ax.set_xlabel('$q/k_F$')
>>> ax.set_ylabel('$\\hbar\\omega/E_F$')
>>> ax.set_xlim(0,3)
>>> ax.set_ylim(0,3)
>>> plt.legend()
>>> plt.show()
Plot dispersion relation with a lower half-space permittivity of :math:`\\epsilon=4`` (an approximation for hexagonal boron nitride).
Replicates Fig. 1d in Ref. [2].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> eV = elementary_charge
>>> gamma=0.012 * eV / hbar
>>> eF = 0.4*eV
>>> kF = mlg.FermiWavenumber(eF,model='LowEnergy')
>>> q = np.linspace(1e-3,2,num=200) * kF
>>> vac_intra = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='intra')
>>> hbn_intra = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=4,T=0,model='intra')
>>> vac_nonlocal = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=1,T=0,model='nonlocal')
>>> hbn_nonlocal = mlg.PlasmonDispersion(q,gamma,eF,eps1=1,eps2=4,T=0,model='nonlocal')
>>> fig, ax = plt.subplots(figsize=(6,6))
>>> ax.plot(q/kF,hbar*vac_intra/eF,'k-.',label='Vacuum Intraband')
<...
>>> ax.plot(q/kF,hbar*hbn_intra/eF,linestyle='dotted',color='purple',label='hBN Intraband')
<...
>>> ax.plot(q/kF,hbar*vac_nonlocal/eF,'k-',label='Vacuum Nonlocal')
<...
>>> ax.plot(q/kF,hbar*hbn_nonlocal/eF,'--',color='purple',label='hBN Nonlocal')
<...
>>> ax.plot(q/kF,q/kF,color='gray',linestyle='--')
<...
>>> ax.set_xlabel('$q/k_F$')
>>> ax.set_ylabel('$\\hbar\\omega/E_F$')
>>> ax.set_xlim(0,2)
>>> ax.set_ylim(0,2)
>>> plt.legend()
>>> plt.show()
References
----------
[1] <NAME>. (2017).
From Classical to Quantum Plasmonics in Three and Two Dimensions (Cham: Springer International Publishing).
http://link.springer.com/10.1007/978-3-319-48562-1.
[2] <NAME>., <NAME>., and <NAME>. (2009).
Plasmonics in graphene at infrared frequencies. Phys. Rev. B 80, 245435.
https://link.aps.org/doi/10.1103/PhysRevB.80.245435.
'''
epsavg = (eps1+eps2)/2
# Analytical expression in intraband approximation
if model=='intra':
radical = q * sc.elementary_charge**2 * FermiLevel / (2*sc.pi * sc.epsilon_0 * epsavg)
return (1/sc.hbar) * np.sqrt(radical)
if model=='local':
omega = np.empty_like(q)
sigma = lambda w: ScalarOpticalConductivity(0,w,gamma,FermiLevel,T=0)
for i,q0 in np.ndenumerate(q):
root_eqn = lambda w: 1 - np.imag(sigma(w))*q0 / (2*sc.epsilon_0*epsavg*w)
a, b = PlasmonDispersion(q0,gamma,FermiLevel,eps1,eps2,T,model='intra'), 1e-8
omega[i] = optimize.brentq(root_eqn,a,b)
return omega
if model=='nonlocal':
omega = np.empty_like(q)
kF = FermiWavenumber(FermiLevel,model='LowEnergy')
for i, q0 in np.ndenumerate(q):
root_eqn = lambda w: PlasmonDispersionRoot(q0,w,gamma,FermiLevel,eps1,eps2,T=0)
# Frequency is definitely below 1,1 intraband dispersion
b = PlasmonDispersion(q0,gamma,FermiLevel,1,1,T,model='intra')
# Frequency is definitely above the minimum which should be <0
a = optimize.minimize_scalar(root_eqn,bounds=((FermiLevel/sc.hbar)*q0/kF,b),method='bounded').x
if root_eqn(a) > 0:
omega[i]=0
else:
root_eqn_abs= lambda w: np.abs(root_eqn(w))
omega[i] = optimize.minimize_scalar(root_eqn_abs,bounds=(a,b),method='bounded').x
# Maybe add in a fit to the width for the nonlocal version
return omega
def PlasmonDispersionRes(q,gamma,FermiLevel,eps1,eps2,T,exp_res=1):
'''
Uses the FresnelReflection coefficients to numerically search for the
plasmon dispersion
Parameters
----------
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
exp_res: expected number of resonances
'''
q = np.atleast_1d(q)
ErrorFunc = lambda p,x,y: sd.Lorentz(p,x) - y
pFit = np.empty((np.size(q),3))
for i,q0 in np.ndenumerate(q):
w1 = q0*_c.vF
w2 = PlasmonDispersion(q0,gamma,FermiLevel,eps1,eps2,T,model='intra')
w0 = (w2+w1)/2
# omega=np.linspace(q0*_c.vF,2*q0*_c.vF,num=300)
omega=np.linspace(w1,w2,num=300)
y = np.imag(FresnelReflection(q0,omega,gamma,FermiLevel,T,eps1,eps2,'TM'))
p0=[w0,0.1*w0,10]
fit = optimize.leastsq(ErrorFunc,p0,
args=(omega,y))
pFit[i,:] = fit[0]
return np.abs(pFit)
def InversePlasmonDispersion(omega,gamma,FermiLevel,eps1,eps2,T,model):
'''
Returns the wavenumber of a plasmon given the frequency.
Useful when determining the wavelength of a plasmon excited by light.
Parameters
----------
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
'''
kF = FermiWavenumber(FermiLevel,model='LowEnergy')
cutoff = 4*kF
q = np.empty_like(omega)
for i, omega in np.ndenumerate(omega):
root_eqn = lambda q: np.abs( omega - PlasmonDispersion(q,gamma,FermiLevel,eps1,eps2,T,model) )
reps = 1
while reps < 5:
q[i] = optimize.minimize_scalar(root_eqn,bounds=(1e-6,reps*cutoff),method='bounded').x
if q[i] >= cutoff:
reps=reps+1
else:
reps=5
return q
def PlasmonDispersionRoot(q,omega,gamma,FermiLevel, eps1,eps2 ,T):
'''
The equation used for numerically solving the plasmon dispersion in the nonretarded regime.
Parameters
----------
eps1: scalar, unitless
Permittivity in lower half-space
eps2: scalar, unitless
Permittivity in upper half-space
'''
epsavg = (eps1+eps2)/2
return 1 - np.imag(ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T))*q / (2*sc.epsilon_0*epsavg*omega)
def PlasmonDispersionLoss(omega,gamma,FermiLevel,eps1,eps2,T,model):
'''
The loss of the plasmon wavenumber q=q1+iq2. Returns q2. Equation 15 of Ref [1]
with tau = infinity (or gamma = 0). Assumes q2<<q1
Parameters
----------
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
References
----------
[1] Jablan et al. 2009 (note tau = 1/ gamma)
'''
if model == 'nonlocal':
q1 = InversePlasmonDispersion(omega,gamma,FermiLevel,eps1,eps2,T,model)
pol = Polarizibility(q1,omega,gamma,FermiLevel,T=0)
pol0= Polarizibility(q1,1e-9,gamma,FermiLevel,T=0)
dpolq = dPolarizibility(q1,omega,gamma,FermiLevel,T,dvar='q')
dpolw = dPolarizibility(q1,omega,gamma,FermiLevel,T,dvar='omega')
numerator = np.imag(-pol) + gamma * (-1)*dpolw + (gamma/omega) * np.real(-pol * (1- (pol/pol0)))
denominator = (1/q1) * np.real(-pol) - (-1)*dpolq
q2 = numerator / denominator
return q2
def dPlasmonDispersion(q,gamma,FermiLevel,eps1,eps2,T,model,dvar=None,diff=1e-7):
'''
Derivative of plasmon frequency with respect to dvar.
Parameters
----------
q: array-like, rad/m
omega: array-like, rad/s
gamma: scalar, rad/s
the scattering rate in units (1/s)
FermiLevel: scalar, J
the Fermi level
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittivity in lower half-space
T: scalar, K
Temperature
dvar: 'omega': Take the partial wrt omega
'q': Take the partial wrt q
diff: Size of finite different to use when computing the derivative.
Method uses central difference.
'''
if dvar == 'FermiLevel':
result = np.empty_like(q)
for i, q0 in np.ndenumerate(q):
w = lambda eF: PlasmonDispersion(q0,gamma,eF,eps1,eps2,T,model)
e1, e2 = FermiLevel*(1-diff), FermiLevel*(1+diff)
result[i] = (w(e2)-w(e1))/(2*FermiLevel*diff)
return result
if dvar=='q':
pass
pass
def DipoleDecayRate(z,omega,gamma,FermiLevel,T,eps1,eps2):
'''
Decay rate of a dipole emitter placed near graphene.
Right now, the dipole only points perpendicular.
This should be moved to the Emitter.Dipole object
Eqn 5 in the SM of Ref 1.
Parameters
----------
eps1: scalar, unitless
Permittivity in upper half-space
eps2: scalar, unitless
Permittiviy in lower half-space
References
----------
[1] Koppens et al. 2011
URL: https://doi.org/10.1021/nl201771h
'''
warnings.warn('Monolayer.DipoleDecayRate: Ignoring dipole components in xy-plane')
dipole = Emitter.Dipole()
d = np.array([0,0,1])
integral = np.empty_like(omega)
for i, w in np.ndenumerate(omega):
kperp = lambda q: np.sqrt(eps1*(w/sc.speed_of_light)**2 - q**2)
rp = lambda q: FresnelReflection(q,w,gamma,FermiLevel,T,eps1,eps2,'p')
perpterm = lambda q: 2*np.abs(d[2])**2 * q**2 * rp(q)
integrand = lambda q: np.real( (q - 1e-9*1j) * np.real( perpterm(q-1e-9*1j) * np.exp(2*1j*kperp(q-1e-9*1j)*z) / kperp(q-1e-9*1j) ) )
b = np.abs(_c.K) # Increasing this bound does not lead to more convergence
q_pol = np.sqrt(eps1) * (w/sc.speed_of_light)
q_plasmon=InversePlasmonDispersion(w,gamma,FermiLevel,eps1,eps2,T,model='nonlocal')
integral[i] = integrate.quad(integrand,1e-3,b,
points=(q_pol,q_plasmon),limit=100)[0]
return dipole.DecayRate(omega,d) + (1/sc.hbar) * integral
| [
"numpy.sqrt",
"numpy.logaddexp",
"numpy.array",
"graphenemodeling.statistical_distributions.Lorentz",
"numpy.imag",
"scipy.optimize.root_scalar",
"numpy.ndenumerate",
"numpy.heaviside",
"numpy.exp",
"numpy.real",
"numpy.linspace",
"scipy.optimize.leastsq",
"scipy.optimize.minimize_scalar",
... | [((1396, 1411), 'numpy.array', 'np.array', (['_c.a1'], {}), '(_c.a1)\n', (1404, 1411), True, 'import numpy as np\n'), ((1421, 1436), 'numpy.array', 'np.array', (['_c.a2'], {}), '(_c.a2)\n', (1429, 1436), True, 'import numpy as np\n'), ((4008, 4042), 'numpy.array', 'np.array', (['[[H11, H12], [H12, H22]]'], {}), '([[H11, H12], [H12, H22]])\n', (4016, 4042), True, 'import numpy as np\n'), ((38712, 38728), 'numpy.empty_like', 'np.empty_like', (['d'], {}), '(d)\n', (38725, 38728), True, 'import numpy as np\n'), ((38747, 38764), 'numpy.ndenumerate', 'np.ndenumerate', (['d'], {}), '(d)\n', (38761, 38764), True, 'import numpy as np\n'), ((46133, 46149), 'numpy.atleast_1d', 'np.atleast_1d', (['q'], {}), '(q)\n', (46146, 46149), True, 'import numpy as np\n'), ((46254, 46271), 'numpy.ndenumerate', 'np.ndenumerate', (['q'], {}), '(q)\n', (46268, 46271), True, 'import numpy as np\n'), ((46745, 46757), 'numpy.abs', 'np.abs', (['pFit'], {}), '(pFit)\n', (46751, 46757), True, 'import numpy as np\n'), ((47248, 47268), 'numpy.empty_like', 'np.empty_like', (['omega'], {}), '(omega)\n', (47261, 47268), True, 'import numpy as np\n'), ((47290, 47311), 'numpy.ndenumerate', 'np.ndenumerate', (['omega'], {}), '(omega)\n', (47304, 47311), True, 'import numpy as np\n'), ((51096, 51115), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (51104, 51115), True, 'import numpy as np\n'), ((51130, 51150), 'numpy.empty_like', 'np.empty_like', (['omega'], {}), '(omega)\n', (51143, 51150), True, 'import numpy as np\n'), ((51168, 51189), 'numpy.ndenumerate', 'np.ndenumerate', (['omega'], {}), '(omega)\n', (51182, 51189), True, 'import numpy as np\n'), ((1524, 1551), 'numpy.array', 'np.array', (['[1, 3 ** (1 / 2)]'], {}), '([1, 3 ** (1 / 2)])\n', (1532, 1551), True, 'import numpy as np\n'), ((3679, 3691), 'numpy.conj', 'np.conj', (['H12'], {}), '(H12)\n', (3686, 3691), True, 'import numpy as np\n'), ((3758, 3768), 'numpy.real', 'np.real', (['k'], {}), '(k)\n', (3765, 3768), True, 'import numpy as np\n'), ((3782, 3792), 'numpy.imag', 'np.imag', (['k'], {}), '(k)\n', (3789, 3792), True, 'import numpy as np\n'), ((3963, 3975), 'numpy.conj', 'np.conj', (['H12'], {}), '(H12)\n', (3970, 3975), True, 'import numpy as np\n'), ((10548, 10567), 'numpy.sign', 'np.sign', (['FermiLevel'], {}), '(FermiLevel)\n', (10555, 10567), True, 'import numpy as np\n'), ((14099, 14108), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (14105, 14108), True, 'import numpy as np\n'), ((16199, 16216), 'numpy.empty_like', 'np.empty_like', (['mu'], {}), '(mu)\n', (16212, 16216), True, 'import numpy as np\n'), ((16237, 16255), 'numpy.ndenumerate', 'np.ndenumerate', (['mu'], {}), '(mu)\n', (16251, 16255), True, 'import numpy as np\n'), ((30545, 30554), 'numpy.all', 'np.all', (['q'], {}), '(q)\n', (30551, 30554), True, 'import numpy as np\n'), ((35214, 35232), 'numpy.all', 'np.all', (['(omega == 0)'], {}), '(omega == 0)\n', (35220, 35232), True, 'import numpy as np\n'), ((37707, 37779), 'numpy.sqrt', 'np.sqrt', (['(eps1 * (omega / sc.speed_of_light) ** 2 - q ** 2 + 1e-09 * 1.0j)'], {}), '(eps1 * (omega / sc.speed_of_light) ** 2 - q ** 2 + 1e-09 * 1.0j)\n', (37714, 37779), True, 'import numpy as np\n'), ((37768, 37840), 'numpy.sqrt', 'np.sqrt', (['(eps2 * (omega / sc.speed_of_light) ** 2 - q ** 2 + 1e-09 * 1.0j)'], {}), '(eps2 * (omega / sc.speed_of_light) ** 2 - q ** 2 + 1e-09 * 1.0j)\n', (37775, 37840), True, 'import numpy as np\n'), ((44393, 44409), 'numpy.empty_like', 'np.empty_like', (['q'], {}), '(q)\n', (44406, 44409), True, 'import numpy as np\n'), ((44510, 44527), 'numpy.ndenumerate', 'np.ndenumerate', (['q'], {}), '(q)\n', (44524, 44527), True, 'import numpy as np\n'), ((44824, 44840), 'numpy.empty_like', 'np.empty_like', (['q'], {}), '(q)\n', (44837, 44840), True, 'import numpy as np\n'), ((44923, 44940), 'numpy.ndenumerate', 'np.ndenumerate', (['q'], {}), '(q)\n', (44937, 44940), True, 'import numpy as np\n'), ((46467, 46495), 'numpy.linspace', 'np.linspace', (['w1', 'w2'], {'num': '(300)'}), '(w1, w2, num=300)\n', (46478, 46495), True, 'import numpy as np\n'), ((46620, 46668), 'scipy.optimize.leastsq', 'optimize.leastsq', (['ErrorFunc', 'p0'], {'args': '(omega, y)'}), '(ErrorFunc, p0, args=(omega, y))\n', (46636, 46668), False, 'from scipy import special, optimize, integrate\n'), ((50092, 50108), 'numpy.empty_like', 'np.empty_like', (['q'], {}), '(q)\n', (50105, 50108), True, 'import numpy as np\n'), ((50130, 50147), 'numpy.ndenumerate', 'np.ndenumerate', (['q'], {}), '(q)\n', (50144, 50147), True, 'import numpy as np\n'), ((51567, 51579), 'numpy.abs', 'np.abs', (['_c.K'], {}), '(_c.K)\n', (51573, 51579), True, 'import numpy as np\n'), ((9262, 9271), 'numpy.abs', 'np.abs', (['k'], {}), '(k)\n', (9268, 9271), True, 'import numpy as np\n'), ((10382, 10400), 'numpy.abs', 'np.abs', (['FermiLevel'], {}), '(FermiLevel)\n', (10388, 10400), True, 'import numpy as np\n'), ((10876, 10933), 'scipy.optimize.root_scalar', 'optimize.root_scalar', (['f'], {'x0': 'kf0', 'x1': '(kf0 * 0.9)', 'rtol': '(1e-10)'}), '(f, x0=kf0, x1=kf0 * 0.9, rtol=1e-10)\n', (10896, 10933), False, 'from scipy import special, optimize, integrate\n'), ((14908, 14924), 'numpy.empty_like', 'np.empty_like', (['E'], {}), '(E)\n', (14921, 14924), True, 'import numpy as np\n'), ((14946, 14963), 'numpy.ndenumerate', 'np.ndenumerate', (['E'], {}), '(E)\n', (14960, 14963), True, 'import numpy as np\n'), ((17245, 17263), 'numpy.sqrt', 'np.sqrt', (['(sc.pi * n)'], {}), '(sc.pi * n)\n', (17252, 17263), True, 'import numpy as np\n'), ((17595, 17685), 'scipy.optimize.root_scalar', 'optimize.root_scalar', (['f'], {'x0': 'mu0', 'x1': '(mu0 * 1.1 + 0.1 * sc.elementary_charge)', 'rtol': '(1e-10)'}), '(f, x0=mu0, x1=mu0 * 1.1 + 0.1 * sc.elementary_charge,\n rtol=1e-10)\n', (17615, 17685), False, 'from scipy import special, optimize, integrate\n'), ((38993, 39005), 'numpy.abs', 'np.abs', (['_c.K'], {}), '(_c.K)\n', (38999, 39005), True, 'import numpy as np\n'), ((39178, 39238), 'scipy.integrate.quad', 'integrate.quad', (['integrand', 'a', 'b'], {'points': 'q_plasmon', 'limit': '(100)'}), '(integrand, a, b, points=q_plasmon, limit=100)\n', (39192, 39238), False, 'from scipy import special, optimize, integrate\n'), ((44336, 44352), 'numpy.sqrt', 'np.sqrt', (['radical'], {}), '(radical)\n', (44343, 44352), True, 'import numpy as np\n'), ((44729, 44760), 'scipy.optimize.brentq', 'optimize.brentq', (['root_eqn', 'a', 'b'], {}), '(root_eqn, a, b)\n', (44744, 44760), False, 'from scipy import special, optimize, integrate\n'), ((46180, 46196), 'graphenemodeling.statistical_distributions.Lorentz', 'sd.Lorentz', (['p', 'x'], {}), '(p, x)\n', (46190, 46196), True, 'import graphenemodeling.statistical_distributions as sd\n'), ((46222, 46232), 'numpy.size', 'np.size', (['q'], {}), '(q)\n', (46229, 46232), True, 'import numpy as np\n'), ((51220, 51273), 'numpy.sqrt', 'np.sqrt', (['(eps1 * (w / sc.speed_of_light) ** 2 - q ** 2)'], {}), '(eps1 * (w / sc.speed_of_light) ** 2 - q ** 2)\n', (51227, 51273), True, 'import numpy as np\n'), ((51654, 51667), 'numpy.sqrt', 'np.sqrt', (['eps1'], {}), '(eps1)\n', (51661, 51667), True, 'import numpy as np\n'), ((51807, 51880), 'scipy.integrate.quad', 'integrate.quad', (['integrand', '(0.001)', 'b'], {'points': '(q_pol, q_plasmon)', 'limit': '(100)'}), '(integrand, 0.001, b, points=(q_pol, q_plasmon), limit=100)\n', (51821, 51880), False, 'from scipy import special, optimize, integrate\n'), ((3843, 3871), 'numpy.exp', 'np.exp', (['(1.0j * kx * _c.a / 2)'], {}), '(1.0j * kx * _c.a / 2)\n', (3849, 3871), True, 'import numpy as np\n'), ((31262, 31293), 'numpy.empty_like', 'np.empty_like', (['w'], {'dtype': 'complex'}), '(w, dtype=complex)\n', (31275, 31293), True, 'import numpy as np\n'), ((31330, 31347), 'numpy.ndenumerate', 'np.ndenumerate', (['w'], {}), '(w)\n', (31344, 31347), True, 'import numpy as np\n'), ((38956, 38975), 'numpy.exp', 'np.exp', (['(-2 * q * d0)'], {}), '(-2 * q * d0)\n', (38962, 38975), True, 'import numpy as np\n'), ((45283, 45383), 'scipy.optimize.minimize_scalar', 'optimize.minimize_scalar', (['root_eqn'], {'bounds': '(FermiLevel / sc.hbar * q0 / kF, b)', 'method': '"""bounded"""'}), "(root_eqn, bounds=(FermiLevel / sc.hbar * q0 / kF,\n b), method='bounded')\n", (45307, 45383), False, 'from scipy import special, optimize, integrate\n'), ((47477, 47565), 'scipy.optimize.minimize_scalar', 'optimize.minimize_scalar', (['root_eqn'], {'bounds': '(1e-06, reps * cutoff)', 'method': '"""bounded"""'}), "(root_eqn, bounds=(1e-06, reps * cutoff), method=\n 'bounded')\n", (47501, 47565), False, 'from scipy import special, optimize, integrate\n'), ((49060, 49073), 'numpy.imag', 'np.imag', (['(-pol)'], {}), '(-pol)\n', (49067, 49073), True, 'import numpy as np\n'), ((49113, 49145), 'numpy.real', 'np.real', (['(-pol * (1 - pol / pol0))'], {}), '(-pol * (1 - pol / pol0))\n', (49120, 49145), True, 'import numpy as np\n'), ((49176, 49189), 'numpy.real', 'np.real', (['(-pol)'], {}), '(-pol)\n', (49183, 49189), True, 'import numpy as np\n'), ((14366, 14375), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (14372, 14375), True, 'import numpy as np\n'), ((14431, 14440), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (14437, 14440), True, 'import numpy as np\n'), ((14441, 14454), 'numpy.abs', 'np.abs', (['_c.g0'], {}), '(_c.g0)\n', (14447, 14454), True, 'import numpy as np\n'), ((14672, 14681), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (14678, 14681), True, 'import numpy as np\n'), ((14682, 14695), 'numpy.abs', 'np.abs', (['_c.g0'], {}), '(_c.g0)\n', (14688, 14695), True, 'import numpy as np\n'), ((15048, 15064), 'numpy.sqrt', 'np.sqrt', (['(Z1 / Z0)'], {}), '(Z1 / Z0)\n', (15055, 15064), True, 'import numpy as np\n'), ((16319, 16342), 'graphenemodeling.statistical_distributions.FermiDirac', 'sd.FermiDirac', (['(e - m)', 'T'], {}), '(e - m, T)\n', (16332, 16342), True, 'import graphenemodeling.statistical_distributions as sd\n'), ((16446, 16505), 'scipy.integrate.quad', 'integrate.quad', (['p_electron', '(0)', '(3 * _c.g0)'], {'points': '(_c.g0, m)'}), '(p_electron, 0, 3 * _c.g0, points=(_c.g0, m))\n', (16460, 16505), False, 'from scipy import special, optimize, integrate\n'), ((16526, 16584), 'scipy.integrate.quad', 'integrate.quad', (['p_hole', '(-3 * _c.g0)', '(0)'], {'points': '(-_c.g0, -m)'}), '(p_hole, -3 * _c.g0, 0, points=(-_c.g0, -m))\n', (16540, 16584), False, 'from scipy import special, optimize, integrate\n'), ((22925, 22959), 'numpy.sqrt', 'np.sqrt', (['(1 - ((zbar + 1) / x) ** 2)'], {}), '(1 - ((zbar + 1) / x) ** 2)\n', (22932, 22959), True, 'import numpy as np\n'), ((30915, 30934), 'numpy.logaddexp', 'np.logaddexp', (['x', '(-x)'], {}), '(x, -x)\n', (30927, 30934), True, 'import numpy as np\n'), ((31010, 31048), 'graphenemodeling.statistical_distributions.FermiDirac', 'sd.FermiDirac', (['(-energy - FermiLevel)', 'T'], {}), '(-energy - FermiLevel, T)\n', (31023, 31048), True, 'import graphenemodeling.statistical_distributions as sd\n'), ((31048, 31085), 'graphenemodeling.statistical_distributions.FermiDirac', 'sd.FermiDirac', (['(energy - FermiLevel)', 'T'], {}), '(energy - FermiLevel, T)\n', (31061, 31085), True, 'import graphenemodeling.statistical_distributions as sd\n'), ((45553, 45624), 'scipy.optimize.minimize_scalar', 'optimize.minimize_scalar', (['root_eqn_abs'], {'bounds': '(a, b)', 'method': '"""bounded"""'}), "(root_eqn_abs, bounds=(a, b), method='bounded')\n", (45577, 45624), False, 'from scipy import special, optimize, integrate\n'), ((3896, 3925), 'numpy.exp', 'np.exp', (['(-1.0j * kx * _c.a / 2)'], {}), '(-1.0j * kx * _c.a / 2)\n', (3902, 3925), True, 'import numpy as np\n'), ((14620, 14637), 'numpy.abs', 'np.abs', (['(E / _c.g0)'], {}), '(E / _c.g0)\n', (14626, 14637), True, 'import numpy as np\n'), ((14722, 14739), 'numpy.abs', 'np.abs', (['(E / _c.g0)'], {}), '(E / _c.g0)\n', (14728, 14739), True, 'import numpy as np\n'), ((15089, 15100), 'numpy.sqrt', 'np.sqrt', (['Z0'], {}), '(Z0)\n', (15096, 15100), True, 'import numpy as np\n'), ((16403, 16426), 'graphenemodeling.statistical_distributions.FermiDirac', 'sd.FermiDirac', (['(e - m)', 'T'], {}), '(e - m, T)\n', (16416, 16426), True, 'import graphenemodeling.statistical_distributions as sd\n'), ((22768, 22793), 'numpy.arcsin', 'np.arcsin', (['((1 - zbar) / x)'], {}), '((1 - zbar) / x)\n', (22777, 22793), True, 'import numpy as np\n'), ((22793, 22818), 'numpy.arcsin', 'np.arcsin', (['((1 + zbar) / x)'], {}), '((1 + zbar) / x)\n', (22802, 22818), True, 'import numpy as np\n'), ((22856, 22890), 'numpy.sqrt', 'np.sqrt', (['(1 - ((zbar - 1) / x) ** 2)'], {}), '(1 - ((zbar - 1) / x) ** 2)\n', (22863, 22890), True, 'import numpy as np\n'), ((22987, 23031), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 - (zbar + 1e-09 * 1.0j) ** 2)'], {}), '(x ** 2 - (zbar + 1e-09 * 1.0j) ** 2)\n', (22994, 23031), True, 'import numpy as np\n'), ((32179, 32226), 'numpy.heaviside', 'np.heaviside', (['(sc.hbar * w - 2 * FermiLevel)', '(0.5)'], {}), '(sc.hbar * w - 2 * FermiLevel, 0.5)\n', (32191, 32226), True, 'import numpy as np\n'), ((14483, 14500), 'numpy.abs', 'np.abs', (['(E / _c.g0)'], {}), '(E / _c.g0)\n', (14489, 14500), True, 'import numpy as np\n'), ((14784, 14801), 'numpy.abs', 'np.abs', (['(E / _c.g0)'], {}), '(E / _c.g0)\n', (14790, 14801), True, 'import numpy as np\n'), ((31541, 31651), 'scipy.integrate.quad', 'integrate.quad', (['integrand_re', '(0)', '(10 * FermiLevel)'], {'points': '(FermiLevel / sc.hbar, 2 * FermiLevel / sc.hbar)'}), '(integrand_re, 0, 10 * FermiLevel, points=(FermiLevel / sc.\n hbar, 2 * FermiLevel / sc.hbar))\n', (31555, 31651), False, 'from scipy import special, optimize, integrate\n'), ((51381, 51393), 'numpy.abs', 'np.abs', (['d[2]'], {}), '(d[2])\n', (51387, 51393), True, 'import numpy as np\n'), ((3933, 3943), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3940, 3943), True, 'import numpy as np\n'), ((9363, 9373), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (9370, 9373), True, 'import numpy as np\n'), ((9374, 9384), 'numpy.imag', 'np.imag', (['k'], {}), '(k)\n', (9381, 9384), True, 'import numpy as np\n'), ((9472, 9482), 'numpy.real', 'np.real', (['k'], {}), '(k)\n', (9479, 9482), True, 'import numpy as np\n'), ((31675, 31785), 'scipy.integrate.quad', 'integrate.quad', (['integrand_im', '(0)', '(10 * FermiLevel)'], {'points': '(FermiLevel / sc.hbar, 2 * FermiLevel / sc.hbar)'}), '(integrand_im, 0, 10 * FermiLevel, points=(FermiLevel / sc.\n hbar, 2 * FermiLevel / sc.hbar))\n', (31689, 31785), False, 'from scipy import special, optimize, integrate\n'), ((32289, 32360), 'numpy.abs', 'np.abs', (['((2 * FermiLevel - sc.hbar * w) / (2 * FermiLevel + sc.hbar * w))'], {}), '((2 * FermiLevel - sc.hbar * w) / (2 * FermiLevel + sc.hbar * w))\n', (32295, 32360), True, 'import numpy as np\n'), ((9428, 9438), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (9435, 9438), True, 'import numpy as np\n'), ((9439, 9449), 'numpy.imag', 'np.imag', (['k'], {}), '(k)\n', (9446, 9449), True, 'import numpy as np\n')] |
import numpy as np
from plotter import Plotter, plot_ifs
def get_random_transform(transforms):
choice = np.random.choice(transforms.shape[0], p=transforms[:, -1])
transform = transforms[choice, :-1]
abcd = transform[:4].reshape((2, 2))
ef = transform[4:6]
return abcd, ef
def ifs(x0, y0, transforms, num_iters):
xy = np.hstack((x0, y0))
XY = [xy]
for i in range(num_iters):
abcd, ef = get_random_transform(transforms)
xy = np.matmul(abcd, xy) + ef
XY.append(xy)
return np.array(XY)
def load_transforms():
barnsley = np.loadtxt('../transforms/barnsley.csv')
von_koch = np.loadtxt('../transforms/von_koch.csv')
transforms = [barnsley, von_koch]
return transforms
def get_savepath(title):
fname = title.lower().replace(' ', '_')
savepath = f'../images/{fname}.gif'
return savepath
def main():
transforms = load_transforms()
titles = ['Barnsley', '<NAME>']
N = [3000, 2000]
for transform, title, n in zip(transforms, titles, N):
xy = ifs(0, 0, transform, n)
plotter = Plotter(xy, title, incr=20)
savepath = get_savepath(title)
plotter.animate(savepath)
if __name__ == '__main__':
main()
| [
"numpy.hstack",
"numpy.random.choice",
"numpy.array",
"plotter.Plotter",
"numpy.matmul",
"numpy.loadtxt"
] | [((110, 168), 'numpy.random.choice', 'np.random.choice', (['transforms.shape[0]'], {'p': 'transforms[:, -1]'}), '(transforms.shape[0], p=transforms[:, -1])\n', (126, 168), True, 'import numpy as np\n'), ((345, 364), 'numpy.hstack', 'np.hstack', (['(x0, y0)'], {}), '((x0, y0))\n', (354, 364), True, 'import numpy as np\n'), ((537, 549), 'numpy.array', 'np.array', (['XY'], {}), '(XY)\n', (545, 549), True, 'import numpy as np\n'), ((590, 630), 'numpy.loadtxt', 'np.loadtxt', (['"""../transforms/barnsley.csv"""'], {}), "('../transforms/barnsley.csv')\n", (600, 630), True, 'import numpy as np\n'), ((646, 686), 'numpy.loadtxt', 'np.loadtxt', (['"""../transforms/von_koch.csv"""'], {}), "('../transforms/von_koch.csv')\n", (656, 686), True, 'import numpy as np\n'), ((1099, 1126), 'plotter.Plotter', 'Plotter', (['xy', 'title'], {'incr': '(20)'}), '(xy, title, incr=20)\n', (1106, 1126), False, 'from plotter import Plotter, plot_ifs\n'), ((479, 498), 'numpy.matmul', 'np.matmul', (['abcd', 'xy'], {}), '(abcd, xy)\n', (488, 498), True, 'import numpy as np\n')] |
''' Running full protein length long MCMC simulation! '''
import numpy as np
import time
import pickle
import os
#from IPython.display import SVG
#from keras.utils.vis_utils import model_to_dot
from EVCouplingsGen import *
from evcouplings.couplings import CouplingsModel
from EVCouplingsStuff.seq_sele import *
from multiprocessing import Process, Queue, cpu_count
from metropolis import MetropolisHastings
def StartMCMC(gen_model, experiment_dir, high_seqs, params):
# loading in the environment class, used to score the evolutionary hamiltonians
sampler = MetropolisHastings(gen_model, experiment_dir, x0 = high_seqs,
stride=params['stride'], mapper=None,
is_discrete=True,
nwalkers=params['nwalkers'], save_trajectory=True,
print_every =params['print_every'])
sampler.run(params['nsteps'])
def main(params):
start_time = time.time()
hillclimb_time = 1297.168361934026
params['stride'] = 1
date_time = str(datetime.now()).replace(' ', '_').replace(':', '_') # ensures there aren't any issues saving this as a file name.
experiment_name = params['exp_base_name']+'_datetime_'+str(date_time)
experiment_dir = 'hill_experiments/'+experiment_name
os.mkdir(experiment_dir)
experiment_dir = experiment_dir+'/'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Loading in EVCouplings model
focus_seqs = read_fa('EVCouplingsStuff/DYR_ECOLI_1_b0.5.a2m_trimmed.fa')
evc_model = CouplingsModel('EVCouplingsStuff/DYR.model')
# extracting the model parameters used to determine the evolutionary hamiltonian
h = evc_model.h_i
J = evc_model.J_ij
if params['protein_length'] > 0:
h = h[0:params['protein_length'], :]
J = J[0:params['protein_length'], 0:params['protein_length'], :,:]
# processing and plotting the natural sequences:
# first by converting amino acids into integers and also onehots.
enc_seqs=[]
oh = []
AA=h.shape[1] # number of amino acids
for seq in focus_seqs['seq']:
enc_seq = np.asarray(encode_aa(seq, evc_model.alphabet_map))
if params['protein_length'] > 0:
enc_seq = enc_seq[:params['protein_length']]
enc_seqs.append(enc_seq)
oh.append(onehot(enc_seq,AA)) # this could be made much more efficient with tensorflow operations.
enc_seqs = np.asarray(enc_seqs)
oh=np.asarray(oh) # of shape: [batch x L x AA]
N = oh.shape[0] # batch size
L = oh.shape[1] # length of the protein
print('number and dimensions of the natural sequences', oh.shape)
# loading in the environment class, used to score the evolutionary hamiltonians
gen_model = EVCouplingsGenerator(L, AA, h, J, device, params['is_discrete'], gaussian_cov_noise = 1.0)
# getting best natural sequences:
nat_energies = hamiltonians(oh, J, h)
high_ind = np.argsort(-nat_energies)
high_seqs = oh[high_ind][:params['nwalkers']]
high_seqs = high_seqs.reshape(high_seqs.shape[0], -1)
assert params['ncores'] != 0, "need to set at least one core!"
if params['ncores'] == -1:
params['ncores'] = cpu_count()
# multicore generate new samples
processes = [Process(target=StartMCMC, args=( gen_model,
experiment_dir+'worker_'+str(i)+'_', high_seqs, params )) for i in range(params['ncores'])]
for p in processes:
p.start()
for p in processes:
# waits for all the processes to have completed before
# continuing with the code.
p.join()
print('all processes are done!, trying to join together all of the files')
files_to_combine = ['MCMC_trajectories_energies.txt',
'MCMC_trajectories_seqs.txt']
for f in files_to_combine:
f_ending = f.split('.')[-1]
f_start = f.split('.')[0]
combo_file = experiment_dir+'combined_'+f_start+'.txt'
with open(combo_file,'w') as write_out:
for i in range(params['ncores']):
worker_file = experiment_dir+'worker_'+str(i)+'_'+f
if f_ending=='pickle':
temp = pickle.load(open(worker_file, 'rb'))
write_out.write('\n'.join('{} {} {}'.format(tup[0],tup[1], tup[2]) for tup in temp))
elif f_ending=='txt':
temp = np.loadtxt(worker_file)
if f_start.split('_')[-1] == 'seqs':
np.savetxt(write_out, temp, fmt='%i')
elif f_start.split('_')[-1] == 'energies':
np.savetxt(write_out, temp, fmt='%2g')
else:
raise Exception('File type not identified when trying to combine all worker outputs together!')
print('Total run time in minutes: '+str((time.time()-start_time)/60))
| [
"metropolis.MetropolisHastings",
"evcouplings.couplings.CouplingsModel",
"numpy.asarray",
"multiprocessing.cpu_count",
"numpy.argsort",
"os.mkdir",
"numpy.savetxt",
"numpy.loadtxt",
"time.time"
] | [((572, 782), 'metropolis.MetropolisHastings', 'MetropolisHastings', (['gen_model', 'experiment_dir'], {'x0': 'high_seqs', 'stride': "params['stride']", 'mapper': 'None', 'is_discrete': '(True)', 'nwalkers': "params['nwalkers']", 'save_trajectory': '(True)', 'print_every': "params['print_every']"}), "(gen_model, experiment_dir, x0=high_seqs, stride=params[\n 'stride'], mapper=None, is_discrete=True, nwalkers=params['nwalkers'],\n save_trajectory=True, print_every=params['print_every'])\n", (590, 782), False, 'from metropolis import MetropolisHastings\n'), ((952, 963), 'time.time', 'time.time', ([], {}), '()\n', (961, 963), False, 'import time\n'), ((1300, 1324), 'os.mkdir', 'os.mkdir', (['experiment_dir'], {}), '(experiment_dir)\n', (1308, 1324), False, 'import os\n'), ((1570, 1614), 'evcouplings.couplings.CouplingsModel', 'CouplingsModel', (['"""EVCouplingsStuff/DYR.model"""'], {}), "('EVCouplingsStuff/DYR.model')\n", (1584, 1614), False, 'from evcouplings.couplings import CouplingsModel\n'), ((2459, 2479), 'numpy.asarray', 'np.asarray', (['enc_seqs'], {}), '(enc_seqs)\n', (2469, 2479), True, 'import numpy as np\n'), ((2487, 2501), 'numpy.asarray', 'np.asarray', (['oh'], {}), '(oh)\n', (2497, 2501), True, 'import numpy as np\n'), ((2968, 2993), 'numpy.argsort', 'np.argsort', (['(-nat_energies)'], {}), '(-nat_energies)\n', (2978, 2993), True, 'import numpy as np\n'), ((3228, 3239), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (3237, 3239), False, 'from multiprocessing import Process, Queue, cpu_count\n'), ((4432, 4455), 'numpy.loadtxt', 'np.loadtxt', (['worker_file'], {}), '(worker_file)\n', (4442, 4455), True, 'import numpy as np\n'), ((4898, 4909), 'time.time', 'time.time', ([], {}), '()\n', (4907, 4909), False, 'import time\n'), ((4537, 4574), 'numpy.savetxt', 'np.savetxt', (['write_out', 'temp'], {'fmt': '"""%i"""'}), "(write_out, temp, fmt='%i')\n", (4547, 4574), True, 'import numpy as np\n'), ((4662, 4700), 'numpy.savetxt', 'np.savetxt', (['write_out', 'temp'], {'fmt': '"""%2g"""'}), "(write_out, temp, fmt='%2g')\n", (4672, 4700), True, 'import numpy as np\n')] |
from Constants import QUADRATIC_FORWARD_RMS, CUBIC_FORWARD_RMS, DATA_EXT, DATA_PREC, EXACT_CUBIC_CONSTANT
from LoewnerRunFactory import LoewnerRunFactory
from numpy import square, mean, array, savetxt, absolute
from math import sqrt
class RootMeanSquareError:
def __init__(self, start_time, final_time, outer_points, inner_points, resolutions):
# Set the time parameters for the root mean sqaure comparisons
self.start_time = start_time
self.final_time = final_time
# Set the resolution for the root mean sqaured comparisons
self.outer_points = outer_points
self.inner_points = inner_points
# Set the different resolutions that will be used to calculate the RMS
self.resolutions = resolutions
# Prevent the individual runs from compiling and saving plots and data
dont_compile = False
dont_save_data = False
dont_save_plot = False
# Create a LoewneRun factory for generaring LoewnerRuns that can be used to determine RMS
self.rms_factory = LoewnerRunFactory(start_time,final_time,outer_points,inner_points,dont_compile,dont_save_plot,dont_save_data)
def calculate_rms(self, array_a, array_b):
diff = array_a - array_b
return sqrt(mean(square(absolute(diff))))
def quadratic_forward_error(self, points=None):
# Create a list of driving functions that have an exact solution for the quadratic forward case
exact_solutions = self.rms_factory.create_exact_quadratic_forward()
# Use the resolutions that were created during class initialisation if no others are given
if points == None:
points = self.resolutions
# Iterate through the exact solutions
for exact_sol in exact_solutions:
# Declare an empty list for the error values
rms_list = []
# Carry out the exact solution
exact_sol.exact_quadratic_forward_loewner()
# Create a list of LoewnerRuns corresponding with exact solution that have different inner resolutions
approx_solutions = self.rms_factory.vary_inner_res(exact_sol.index, points)
# Iterate through the approx solutions
for approx_sol in approx_solutions:
# Execute the approx solutions
approx_sol.quadratic_forward_loewner()
print("Finished solution with inner res = " + str(approx_sol.inner_points) + " for driving function " + str(approx_sol.name))
# Calculate the root mean sqaure error
rms = self.calculate_rms(exact_sol.exact_quadratic_forward, approx_sol.forward_results)
# Add the RMS value to the list
rms_list.append([approx_sol.inner_points, rms])
# Create a filename for the error values
filename = QUADRATIC_FORWARD_RMS + str(exact_sol.index) + "-RMS" + DATA_EXT
# Save the error values to the filesystem
savetxt(filename, array(rms_list), fmt=DATA_PREC)
def cubic_forward_error(self, points=None):
# Create a list of driving functions that have an exact solution for the quadratic forward case
exact_solutions = self.rms_factory.create_exact_cubic()
# Use the resolutions that were created during class initialisation if no others are given
if points == None:
points = self.resolutions
# Iterate through the exact solutions
for exact_sol in exact_solutions:
# Declare empty lists for the error values
rms_list_a = []
rms_list_b = []
# Carry out the exact solution
exact_sol.exact_cubic_forward_loewner()
# Create a list of LoewnerRuns corresponding with exact solution that have different inner resolutions
approx_solutions = self.rms_factory.vary_inner_res(exact_sol.index,points, constant=EXACT_CUBIC_CONSTANT)
# Iterate through the approx solutions
for approx_sol in approx_solutions:
# Execute the approx solutions
approx_sol.cubic_forward_loewner()
print("Finished solution with inner res = " + str(approx_sol.inner_points) + " for driving function " + str(approx_sol.name))
# Calculate the root mean sqaure error
rms_a = self.calculate_rms(exact_sol.exact_cubic_sol_a, approx_sol.cubic_results_a)
rms_b = self.calculate_rms(exact_sol.exact_cubic_sol_b, approx_sol.cubic_results_b)
# Add the RMS value to the list
rms_list_a.append([approx_sol.inner_points, rms_a])
rms_list_b.append([approx_sol.inner_points, rms_b])
# Create a filename for the error values
filename_a = CUBIC_FORWARD_RMS + str(exact_sol.index) + "-RMS-A" + DATA_EXT
filename_b = CUBIC_FORWARD_RMS + str(exact_sol.index) + "-RMS-B" + DATA_EXT
# Save the error values to the filesystem
savetxt(filename_a, array(rms_list_a), fmt=DATA_PREC)
savetxt(filename_b, array(rms_list_b), fmt=DATA_PREC)
| [
"numpy.array",
"numpy.absolute",
"LoewnerRunFactory.LoewnerRunFactory"
] | [((1063, 1182), 'LoewnerRunFactory.LoewnerRunFactory', 'LoewnerRunFactory', (['start_time', 'final_time', 'outer_points', 'inner_points', 'dont_compile', 'dont_save_plot', 'dont_save_data'], {}), '(start_time, final_time, outer_points, inner_points,\n dont_compile, dont_save_plot, dont_save_data)\n', (1080, 1182), False, 'from LoewnerRunFactory import LoewnerRunFactory\n'), ((3027, 3042), 'numpy.array', 'array', (['rms_list'], {}), '(rms_list)\n', (3032, 3042), False, 'from numpy import square, mean, array, savetxt, absolute\n'), ((5073, 5090), 'numpy.array', 'array', (['rms_list_a'], {}), '(rms_list_a)\n', (5078, 5090), False, 'from numpy import square, mean, array, savetxt, absolute\n'), ((5139, 5156), 'numpy.array', 'array', (['rms_list_b'], {}), '(rms_list_b)\n', (5144, 5156), False, 'from numpy import square, mean, array, savetxt, absolute\n'), ((1287, 1301), 'numpy.absolute', 'absolute', (['diff'], {}), '(diff)\n', (1295, 1301), False, 'from numpy import square, mean, array, savetxt, absolute\n')] |
"""Provide functions to write the Order Parameters into files."""
import numpy as np
# For debugging.
# TODO: Remove it after implement logging feature
DEBUG=False
def pandasdf2pdb(df):
"""Return a string in PDB format from a pandas dataframe.
Parameters
----------
df : pandas dataframe with columns "atnum", "atname", "resname", "resnum",
"x", "y", "z"
Returns
-------
str
A string representing the PDB.
"""
s = ""
chain = ""
for _, row_atom in df.iterrows():
atnum, atname, resname, resnum, x, y, z = row_atom
atnum = int(atnum)
resnum = int(resnum)
# See for pdb format:
# https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html.
# "alt" means alternate location indicator
# "code" means code for insertions of residues
# "seg" means segment identifier
# "elt" means element symbol
if len(atname) == 4:
s += ("{record_type:6s}{atnum:5d} {atname:<4s}{alt:1s}{resname:>4s}"
"{chain:1s}{resnum:>4d}{code:1s} {x:>8.3f}{y:>8.3f}{z:>8.3f}"
"{occupancy:>6.2f}{temp_fact:>6.2f} {seg:<2s}{elt:>2s}\n"
.format(record_type="ATOM", atnum=atnum, atname=atname, alt="",
resname=resname, chain=chain, resnum=resnum, code="",
x=x, y=y, z=z, occupancy=1.0, temp_fact=0.0, seg="",
elt=atname[0]))
else:
s += ("{record_type:6s}{atnum:5d} {atname:<3s}{alt:1s}{resname:>4s}"
"{chain:1s}{resnum:>4d}{code:1s} {x:>8.3f}{y:>8.3f}{z:>8.3f}"
"{occupancy:>6.2f}{temp_fact:>6.2f} {seg:<2s}{elt:>2s}\n"
.format(record_type="ATOM", atnum=atnum, atname=atname, alt="",
resname=resname, chain=chain, resnum=resnum, code="",
x=x, y=y, z=z, occupancy=1.0, temp_fact=0.0, seg="",
elt=atname[0]))
return s
def write_OP(fileout, dic_atname2genericname, dic_OP, resname):
"""Write the order parameters into a file.
The output style comes from <NAME>'s script from NMRLipids project
(https://github.com/NMRLipids/MATCH/blob/master/scripts/calcOrderParameters.py)
Parameters
----------
fileout: str
name of the output file
dic_atname2genericname: ordered dictionary
dict of correspondance between generic H names and PDB names.
dic_OP : ordered dictionary
Each key of this dict is a couple carbon/H with the OP values as a list.
resname : str
lipid residue name taken from the json file.
"""
with open(fileout, "w") as f:
f.write("# {:18s} {:7s} {:5s} {:5s} {:7s} {:7s} {:7s}\n"
.format("OP_name", "resname", "atom1", "atom2", "OP_mean",
"OP_stddev", "OP_stem"))
f.write("#-------------------------------"
"-------------------------------------\n")
# Loop over each pair (C, H).
for Cname, Hname in dic_atname2genericname.keys():
name = dic_atname2genericname[(Cname, Hname)]
if DEBUG:
print("Pair ({}, {}):".format(Cname, Hname))
# Cast list of lists to a 2D-array. It should have dimensions
# (nb_lipids, nb_frames).
### Thus each sublist will contain OPs for one residue.
### e.g. ('C1', 'H11'), [[OP res 1 frame1, OP res1 frame2, ...],
### [OP res 2 frame1, OP res2 frame2, ...],
#### ...]
a = np.array(dic_OP[(Cname, Hname)])
if DEBUG:
print("Final OP array has shape (nb_lipids, nb_frames):", a.shape)
print()
# General mean over lipids and over frames (for that (C, H) pair).
mean = np.mean(a)
# Average over frames for each (C, H) pair. Because of how the
# array is organized (see above), we need to average horizontally
# (i.e. using axis=1).
# means is a 1D-array with nb_lipids elements.
means = np.mean(a, axis=1)
# Calc standard deviation and STEM (std error of the mean).
std_dev = np.std(means)
stem = np.std(means) / np.sqrt(len(means))
f.write("{:20s} {:7s} {:5s} {:5s} {: 2.5f} {: 2.5f} {: 2.5f}\n"
.format(name, resname, Cname, Hname, mean,
std_dev, stem))
def write_OP_alternate(fileout, universe_woH, dic_OP, resname):
"""Write the order parameters into a file with an alternate style.
This style comes from A. Pineiro's script from NMRLipids project
(https://github.com/NMRLipids/MATCH/blob/master/scratch/opAAUA_prod.py)
Parameters
----------
fileout: str
name of the output file
universe_woH : MDAnalysis universe instance
This is the universe *without* hydrogen.
dic_OP : ordered dictionary
Each key of this dict is a couple carbon/H with the OP values as a list.
resname : str
lipid residue name taken from the json file.
"""
with open(fileout, "w") as f:
f.write("Atom_name Hydrogen\tOP\t STD\t STDmean\n")
list_unique_Cnames = []
for Cname, Hname in dic_OP.keys():
if Cname not in list_unique_Cnames:
list_unique_Cnames.append(Cname)
# Order of carbons is similar to that in the PDB.
list_unique_Cnames_ordered = []
selection = f"resname {resname}"
for atom in universe_woH.select_atoms(selection).residues[0].atoms:
if atom.name in list_unique_Cnames:
list_unique_Cnames_ordered.append(atom.name)
# Now write output.
for Cname in list_unique_Cnames_ordered:
cumulative_list_for_that_carbon = []
for i, Hname in enumerate([H for C, H in dic_OP.keys() if C == Cname]):
cumulative_list_for_that_carbon += dic_OP[Cname, Hname]
a = np.array(dic_OP[Cname, Hname])
mean = np.mean(a)
means = np.mean(a, axis=1)
std_dev = np.std(means)
stem = np.std(means) / np.sqrt(len(means))
if i == 0:
f.write("{:>7s}\t{:>8s} {:10.5f}\t{:10.5f}\t{:10.5f}\n"
.format(Cname, "HR", mean, std_dev, stem))
elif i == 1:
f.write("{:>7s}\t{:>8s} {:10.5f}\t{:10.5f}\t{:10.5f}\n"
.format("", "HS", mean, std_dev, stem))
elif i == 2:
f.write("{:>7s}\t{:>8s} {:10.5f}\t{:10.5f}\t{:10.5f}\n"
.format("", "HT", mean, std_dev, stem))
a = np.array(cumulative_list_for_that_carbon)
mean = np.mean(a)
means = np.mean(a, axis=1)
std_dev = np.std(means)
stem = np.std(means) / np.sqrt(len(means))
f.write("{:>7s}\t{:>8s} {:10.5f}\t{:10.5f}\t{:10.5f}\n\n"
.format("", "AVG", mean, std_dev, stem))
| [
"numpy.array",
"numpy.mean",
"numpy.std"
] | [((3682, 3712), 'numpy.array', 'np.array', (['dic_OP[Cname, Hname]'], {}), '(dic_OP[Cname, Hname])\n', (3690, 3712), True, 'import numpy as np\n'), ((3942, 3952), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (3949, 3952), True, 'import numpy as np\n'), ((4221, 4239), 'numpy.mean', 'np.mean', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (4228, 4239), True, 'import numpy as np\n'), ((4334, 4347), 'numpy.std', 'np.std', (['means'], {}), '(means)\n', (4340, 4347), True, 'import numpy as np\n'), ((6885, 6926), 'numpy.array', 'np.array', (['cumulative_list_for_that_carbon'], {}), '(cumulative_list_for_that_carbon)\n', (6893, 6926), True, 'import numpy as np\n'), ((6946, 6956), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (6953, 6956), True, 'import numpy as np\n'), ((6977, 6995), 'numpy.mean', 'np.mean', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (6984, 6995), True, 'import numpy as np\n'), ((7018, 7031), 'numpy.std', 'np.std', (['means'], {}), '(means)\n', (7024, 7031), True, 'import numpy as np\n'), ((4367, 4380), 'numpy.std', 'np.std', (['means'], {}), '(means)\n', (4373, 4380), True, 'import numpy as np\n'), ((6139, 6169), 'numpy.array', 'np.array', (['dic_OP[Cname, Hname]'], {}), '(dic_OP[Cname, Hname])\n', (6147, 6169), True, 'import numpy as np\n'), ((6193, 6203), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (6200, 6203), True, 'import numpy as np\n'), ((6228, 6246), 'numpy.mean', 'np.mean', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (6235, 6246), True, 'import numpy as np\n'), ((6273, 6286), 'numpy.std', 'np.std', (['means'], {}), '(means)\n', (6279, 6286), True, 'import numpy as np\n'), ((7051, 7064), 'numpy.std', 'np.std', (['means'], {}), '(means)\n', (7057, 7064), True, 'import numpy as np\n'), ((6310, 6323), 'numpy.std', 'np.std', (['means'], {}), '(means)\n', (6316, 6323), True, 'import numpy as np\n')] |
import numpy as np
from gym import spaces
import vrep
from envs.VrepEnv import catch_errors, VrepEnv
class SawyerEnv(VrepEnv):
"""
Abstract parent class encapsulating behaviour common to environments with a Sawyer arm.
"""
num_joints = 7
action_space = spaces.Box(np.array([-0.3] * num_joints), np.array([0.3] * num_joints),
dtype=np.float32)
curr_action = np.array([0.] * num_joints)
scale = 0.01
identity = scale * np.identity(num_joints)
def __init__(self, *args, random_joints=True):
super().__init__(*args)
self.random_joints = random_joints
self.np_random = np.random.RandomState()
# Get the initial configuration of the robot (needed to later reset the robot's pose)
self.init_config_tree, _, _, _ = self.call_lua_function('get_configuration_tree')
_, self.init_joint_angles, _, _ = self.call_lua_function('get_joint_angles')
self.joint_handles = np.array([None] * self.num_joints)
for i in range(self.num_joints):
handle = catch_errors(vrep.simxGetObjectHandle(self.cid, 'Sawyer_joint' + str(i + 1),
vrep.simx_opmode_blocking))
self.joint_handles[i] = handle
# Start the simulation (the "Play" button in V-Rep should now be in a "Pressed" state)
catch_errors(vrep.simxStartSimulation(self.cid, vrep.simx_opmode_blocking))
def seed(self, seed=None):
self.np_random.seed(seed)
def reset(self):
if self.random_joints:
initial_pose = self.np_random.multivariate_normal(self.init_joint_angles, self.identity)
else:
initial_pose = self.init_joint_angles
self.call_lua_function('set_joint_angles', ints=self.init_config_tree, floats=initial_pose)
self.curr_action = np.array([0.] * 6)
def _get_obs(self):
_, joint_angles, _, _ = self.call_lua_function('get_joint_angles')
assert len(joint_angles) == self.num_joints
return joint_angles
def update_sim(self):
for handle, velocity in zip(self.joint_handles, self.curr_action):
catch_errors(vrep.simxSetJointTargetVelocity(self.cid,
int(handle), velocity, vrep.simx_opmode_oneshot))
vrep.simxSynchronousTrigger(self.cid)
vrep.simxGetPingTime(self.cid)
| [
"numpy.identity",
"vrep.simxSynchronousTrigger",
"vrep.simxGetPingTime",
"numpy.array",
"vrep.simxStartSimulation",
"numpy.random.RandomState"
] | [((414, 442), 'numpy.array', 'np.array', (['([0.0] * num_joints)'], {}), '([0.0] * num_joints)\n', (422, 442), True, 'import numpy as np\n'), ((287, 316), 'numpy.array', 'np.array', (['([-0.3] * num_joints)'], {}), '([-0.3] * num_joints)\n', (295, 316), True, 'import numpy as np\n'), ((318, 346), 'numpy.array', 'np.array', (['([0.3] * num_joints)'], {}), '([0.3] * num_joints)\n', (326, 346), True, 'import numpy as np\n'), ((482, 505), 'numpy.identity', 'np.identity', (['num_joints'], {}), '(num_joints)\n', (493, 505), True, 'import numpy as np\n'), ((659, 682), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (680, 682), True, 'import numpy as np\n'), ((983, 1017), 'numpy.array', 'np.array', (['([None] * self.num_joints)'], {}), '([None] * self.num_joints)\n', (991, 1017), True, 'import numpy as np\n'), ((1878, 1897), 'numpy.array', 'np.array', (['([0.0] * 6)'], {}), '([0.0] * 6)\n', (1886, 1897), True, 'import numpy as np\n'), ((2362, 2399), 'vrep.simxSynchronousTrigger', 'vrep.simxSynchronousTrigger', (['self.cid'], {}), '(self.cid)\n', (2389, 2399), False, 'import vrep\n'), ((2408, 2438), 'vrep.simxGetPingTime', 'vrep.simxGetPingTime', (['self.cid'], {}), '(self.cid)\n', (2428, 2438), False, 'import vrep\n'), ((1404, 1465), 'vrep.simxStartSimulation', 'vrep.simxStartSimulation', (['self.cid', 'vrep.simx_opmode_blocking'], {}), '(self.cid, vrep.simx_opmode_blocking)\n', (1428, 1465), False, 'import vrep\n')] |
import json
from datetime import datetime
from pathlib import Path
from typing import Dict
import numpy as np
import tensorflow as tf
from PIL import Image
from tensorflow.keras import Model
from .cell_cultures import PetriDish
from .cellar_automata import MorphCA
from .image_utils import to_rgb
def is_json_serializable(x):
try:
json.dumps(x)
return True
except (TypeError, OverflowError):
return False
class Watcher:
config = {}
def __init__(self, *args, **kwargs):
self.log(*args, **kwargs)
@staticmethod
def _create_log_struct(struct: Dict, name: str) -> Dict:
if name not in struct:
struct[name] = {}
return struct[name]
def log(self, *args, **kwargs):
if len(args) != 0:
log_structure = self.config
for arg in args:
log_structure = self._create_log_struct(log_structure, arg)
for att, val in kwargs.items():
if is_json_serializable(val):
log_structure[att] = val
setattr(self, att, val)
else:
for att, val in kwargs.items():
if is_json_serializable(val):
self.config[att] = val
setattr(self, att, val)
def rlog(self, *args, **kwargs):
self.log(*args, **kwargs)
values = tuple([v for v in kwargs.values()])
if len(values) == 1:
return values[0]
elif len(values) > 1:
return values
else:
return None
def save_conf(self, save_path: Path):
save_path = save_path.joinpath('exp_config.json')
with save_path.open('w') as outfile:
json.dump(self.config, outfile, indent=4)
class ExpWatcher(Watcher):
_checkpoints_folder = None
_pictures_folder = None
_video_folder = None
_tensorboard_logs = None
_petri_dish = None
_ca_growth_steps: int = 300
def __init__(self, exp_name: str, root: Path, *args, **kwargs):
super(ExpWatcher, self).__init__(exp_name=exp_name, root=str(root), *args, **kwargs)
date = datetime.now().strftime("%d.%m.%Y-%H.%M")
self.log(exp_date=date) # exp_name=exp_name, root=str(root)
self.exp_root = root.joinpath(exp_name + '_' + date)
self._experiments_preparation()
def _experiments_preparation(self):
self.exp_root.mkdir(parents=True, exist_ok=False)
self._checkpoints_folder = self.exp_root.joinpath('checkpoints')
self._checkpoints_folder.mkdir()
self._pictures_folder = self.exp_root.joinpath('train_pictures')
self._pictures_folder.mkdir()
self._video_folder = self.exp_root.joinpath('train_video')
self._video_folder.mkdir()
self._tensorboard_logs = self.exp_root.joinpath(f"tb_logs")
self._tensorboard_logs.mkdir()
file_writer = tf.summary.create_file_writer(str(self._tensorboard_logs))
file_writer.set_as_default()
def _save_model(self, trainable_rule: Model, train_step: int):
model_path = self._checkpoints_folder.joinpath("train_step_" + str(train_step))
model_path.mkdir()
trainable_rule.save(filepath=str(model_path), overwrite=True, save_format="tf")
def _save_ca_state_as_image(self,
train_step: int,
post_state: np.array,
img_count: int = 8,
max_img_count: int = 25,
img_in_line: int = 4):
path = open(str(self._pictures_folder) + f"/train_step_{train_step}.jpeg", 'wb')
assert img_count <= max_img_count, ""
assert len(post_state) >= img_count, ""
images = []
n_rows = img_count // img_in_line
for i in range(1, n_rows, 1):
images.append(np.hstack(to_rgb(post_state)[img_in_line * i:img_in_line * (i + 1)]))
image = np.vstack(images)
image = np.uint8(np.clip(image, 0, 1) * 255)
image = Image.fromarray(image)
image.save(path, 'jpeg', quality=95)
tf.summary.image("Example of CA figures", to_rgb(post_state[0])[None, ...], step=train_step)
def _save_ca_video(self, train_step: int, trainable_rule: Model):
print(f"[\n Saving a video recording of the growth of a cellular automaton ... ]")
print(f"[ Petri dish creation started ]")
self._petri_dish.rebase() # _petri_dish must be initialized
print(f"[ Petri dish creation completed ]")
# create cellar automata for embryogenesis
cellar_automata = MorphCA(petri_dish=self._petri_dish,
update_model=trainable_rule,
print_summary=False,
compatibility_test=False)
print(f"[ The simulation of the growth of the cellular automaton was launched ]")
cellar_automata.run_growth_simulation(steps=self._ca_growth_steps,
return_final_state=False,
write_video=True,
save_video_path=self._video_folder,
video_name=f"train_step_{train_step}")
print(f"[ The video recording of the cellular automaton growth is now complete. ]")
def save_config(self):
super(ExpWatcher, self).save_conf(self.exp_root)
def log_target(self, target: np.array):
target_image = to_rgb(target)
# Using the file writer, log the target image.
tf.summary.image("Target image", target_image[None, ...], step=0)
# Save target np.array as jpeg image
path = open(str(self.exp_root.joinpath('target_image.jpeg')), 'wb')
target_image = np.uint8(np.clip(target_image, 0, 1) * 255)
target_image = Image.fromarray(target_image)
target_image.save(path, 'jpeg', quality=95)
def log_petri_dish(self, petri_dish: PetriDish):
self._petri_dish = petri_dish
def log_train(self, step, loss, trainable_rule, next_state_batch):
if step == 1:
tf.summary.scalar('loss_log', data=np.log10(loss), step=step)
print(f"\r step: {step}, log10(loss): {np.round(np.log10(loss), decimals=3)}", end='')
self._save_ca_state_as_image(step, next_state_batch)
self._save_ca_video(step, trainable_rule)
self._save_model(trainable_rule, step)
if step % 10 == 0:
tf.summary.scalar('loss_log', data=np.log10(loss), step=step)
print(f"\r step: {step}, log10(loss): {np.round(np.log10(loss), decimals=3)}", end='')
if step % 100 == 0:
self._save_ca_state_as_image(step, next_state_batch)
if step % 1000 == 0:
self._save_model(trainable_rule, step)
self._save_ca_video(step, trainable_rule)
| [
"numpy.clip",
"PIL.Image.fromarray",
"numpy.log10",
"json.dumps",
"datetime.datetime.now",
"numpy.vstack",
"json.dump",
"tensorflow.summary.image"
] | [((347, 360), 'json.dumps', 'json.dumps', (['x'], {}), '(x)\n', (357, 360), False, 'import json\n'), ((3998, 4015), 'numpy.vstack', 'np.vstack', (['images'], {}), '(images)\n', (4007, 4015), True, 'import numpy as np\n'), ((4086, 4108), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (4101, 4108), False, 'from PIL import Image\n'), ((5673, 5738), 'tensorflow.summary.image', 'tf.summary.image', (['"""Target image"""', 'target_image[None, ...]'], {'step': '(0)'}), "('Target image', target_image[None, ...], step=0)\n", (5689, 5738), True, 'import tensorflow as tf\n'), ((5950, 5979), 'PIL.Image.fromarray', 'Image.fromarray', (['target_image'], {}), '(target_image)\n', (5965, 5979), False, 'from PIL import Image\n'), ((1741, 1782), 'json.dump', 'json.dump', (['self.config', 'outfile'], {'indent': '(4)'}), '(self.config, outfile, indent=4)\n', (1750, 1782), False, 'import json\n'), ((2157, 2171), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2169, 2171), False, 'from datetime import datetime\n'), ((4042, 4062), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (4049, 4062), True, 'import numpy as np\n'), ((5892, 5919), 'numpy.clip', 'np.clip', (['target_image', '(0)', '(1)'], {}), '(target_image, 0, 1)\n', (5899, 5919), True, 'import numpy as np\n'), ((6265, 6279), 'numpy.log10', 'np.log10', (['loss'], {}), '(loss)\n', (6273, 6279), True, 'import numpy as np\n'), ((6636, 6650), 'numpy.log10', 'np.log10', (['loss'], {}), '(loss)\n', (6644, 6650), True, 'import numpy as np\n'), ((6352, 6366), 'numpy.log10', 'np.log10', (['loss'], {}), '(loss)\n', (6360, 6366), True, 'import numpy as np\n'), ((6723, 6737), 'numpy.log10', 'np.log10', (['loss'], {}), '(loss)\n', (6731, 6737), True, 'import numpy as np\n')] |
import argparse
import pathlib
import tempfile
import warnings
import numpy as np
import qpimage
from drymass.cli import cli_convert, config, dialog, profile
def setup_config(pxsize=1e-6, medium_index=1.335, wavelength=550e-9):
_, path = tempfile.mkstemp(prefix="drymass_test_config_", suffix=".cfg")
cfg = config.ConfigFile(path=path)
cfg.set_value("bg", "phase profile", "tilt")
cfg.set_value("bg", "amplitude profile", "tilt")
cfg.set_value("roi", "dist border px", 3)
cfg.set_value("roi", "exclude overlap px", 5)
cfg.set_value("roi", "pad border px", 7)
cfg.set_value(section="meta", key="pixel size um", value=pxsize*1e6)
cfg.set_value(section="meta", key="wavelength nm",
value=wavelength*1e9)
cfg.set_value(section="meta", key="medium index", value=medium_index)
return cfg.path
def setup_test_data(radius_px=30, size=200, num=1):
x = np.arange(size).reshape(-1, 1)
y = np.arange(size).reshape(1, -1)
cx = 80
cy = 120
r = np.sqrt((x - cx)**2 + (y - cy)**2)
pha = (r < radius_px) * 1.3
amp = .5 + np.roll(pha, 10) / pha.max()
qpi = qpimage.QPImage(data=(pha, amp), which_data="phase,amplitude")
path_in = tempfile.mktemp(suffix=".h5", prefix="drymass_test_cli_profile")
path_in = pathlib.Path(path_in)
with qpimage.QPSeries(h5file=path_in, h5mode="w", identifier="tes") as qps:
for ii in range(num):
qps.add_qpimage(qpi, identifier="test_{}".format(ii))
path_out = path_in.with_name(path_in.name + dialog.OUTPUT_SUFFIX)
path_out.mkdir()
return qpi, path_in, path_out
def test_add_fail():
path = setup_config()
argsadd = argparse.Namespace(subparser_name="add",
name="test_8473_prof",
path=path)
profile.cli_profile(args=argsadd)
try:
profile.cli_profile(args=argsadd)
except OSError:
pass
else:
assert False
# remove the profile again
argsrem = argparse.Namespace(subparser_name="remove",
name="test_8473_prof")
profile.cli_profile(args=argsrem)
def test_add_remove():
path = setup_config()
argsadd = argparse.Namespace(subparser_name="add",
name="test_8472_prof",
path=path)
profile.cli_profile(args=argsadd)
# verify that the profile was imported
pps = profile.get_profile_path(name="test_8472_prof")
assert pps.exists()
# remove the profile again
argsrem = argparse.Namespace(subparser_name="remove",
name="test_8472_prof")
profile.cli_profile(args=argsrem)
assert not pps.exists()
def test_convert_with_profile():
cfgpath = setup_config(pxsize=1.34e-6,
medium_index=1.346,
wavelength=554.2e-9)
_, path_in, path_out = setup_test_data()
argsadd = argparse.Namespace(subparser_name="add",
name="test_8440_prof_convert",
path=cfgpath)
profile.cli_profile(args=argsadd)
# perform conversion
h5data = cli_convert(path=path_in,
ret_data=True,
profile="test_8440_prof_convert")
cfg = config.ConfigFile(path_out)
assert np.allclose(cfg["meta"]["medium index"], 1.346)
assert np.allclose(cfg["meta"]["pixel size um"], 1.34)
assert np.allclose(cfg["meta"]["wavelength nm"], 554.2)
with qpimage.QPSeries(h5file=h5data, h5mode="r") as qps:
assert np.allclose(qps[0]["medium index"], 1.346)
assert np.allclose(qps[0]["pixel size"], 1.34e-6)
assert np.allclose(qps[0]["wavelength"], 554.2e-9)
# cleanup
argsrem = argparse.Namespace(subparser_name="remove",
name="test_8440_prof_convert")
profile.cli_profile(args=argsrem)
def test_export():
path = setup_config()
argsadd = argparse.Namespace(subparser_name="add",
name="test_8491_prof",
path=path)
profile.cli_profile(args=argsadd)
# export
tdir = tempfile.mkdtemp(prefix="test_drymass_profile_export_")
argsexp = argparse.Namespace(subparser_name="export",
path=tdir)
profile.cli_profile(args=argsexp)
assert (pathlib.Path(tdir) / "profile_test_8491_prof.cfg").exists()
# cleanup
argsrem = argparse.Namespace(subparser_name="remove",
name="test_8491_prof")
profile.cli_profile(args=argsrem)
def test_get_profile_path():
path = setup_config()
assert path == profile.get_profile_path(name=path)
def test_list_none(capsys):
if profile.get_profiles():
warnings.warn("Test cannot succeed, b/c there are user profiles.")
else:
argslist = argparse.Namespace(subparser_name="list")
profile.cli_profile(args=argslist)
captured = capsys.readouterr()
assert "No profiles in local library." in captured.out.strip()
def test_list_profile(capsys):
path = setup_config()
argsadd = argparse.Namespace(subparser_name="add",
name="test_8490_prof",
path=path)
profile.cli_profile(args=argsadd)
argslist = argparse.Namespace(subparser_name="list")
profile.cli_profile(args=argslist)
captured = capsys.readouterr()
assert "- test_8490_prof:" in captured.out.strip()
argsrem = argparse.Namespace(subparser_name="remove",
name="test_8490_prof")
profile.cli_profile(args=argsrem)
def test_remove_fail():
argsrem = argparse.Namespace(subparser_name="remove",
name="test_8474_prof")
try:
profile.cli_profile(args=argsrem)
except OSError:
pass
else:
assert False
if __name__ == "__main__":
# Run all tests
print("Cannot run all tests b/c of usage of `capsys` fixture! "
"Please use py.test.")
| [
"numpy.allclose",
"numpy.sqrt",
"drymass.cli.config.ConfigFile",
"numpy.roll",
"pathlib.Path",
"drymass.cli.profile.get_profile_path",
"drymass.cli.cli_convert",
"numpy.arange",
"tempfile.mktemp",
"drymass.cli.profile.get_profiles",
"argparse.Namespace",
"tempfile.mkdtemp",
"qpimage.QPSeries... | [((246, 308), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""drymass_test_config_"""', 'suffix': '""".cfg"""'}), "(prefix='drymass_test_config_', suffix='.cfg')\n", (262, 308), False, 'import tempfile\n'), ((319, 347), 'drymass.cli.config.ConfigFile', 'config.ConfigFile', ([], {'path': 'path'}), '(path=path)\n', (336, 347), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((1019, 1057), 'numpy.sqrt', 'np.sqrt', (['((x - cx) ** 2 + (y - cy) ** 2)'], {}), '((x - cx) ** 2 + (y - cy) ** 2)\n', (1026, 1057), True, 'import numpy as np\n'), ((1140, 1202), 'qpimage.QPImage', 'qpimage.QPImage', ([], {'data': '(pha, amp)', 'which_data': '"""phase,amplitude"""'}), "(data=(pha, amp), which_data='phase,amplitude')\n", (1155, 1202), False, 'import qpimage\n'), ((1217, 1281), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'suffix': '""".h5"""', 'prefix': '"""drymass_test_cli_profile"""'}), "(suffix='.h5', prefix='drymass_test_cli_profile')\n", (1232, 1281), False, 'import tempfile\n'), ((1296, 1317), 'pathlib.Path', 'pathlib.Path', (['path_in'], {}), '(path_in)\n', (1308, 1317), False, 'import pathlib\n'), ((1682, 1756), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""add"""', 'name': '"""test_8473_prof"""', 'path': 'path'}), "(subparser_name='add', name='test_8473_prof', path=path)\n", (1700, 1756), False, 'import argparse\n'), ((1827, 1860), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsadd'}), '(args=argsadd)\n', (1846, 1860), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((2021, 2087), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""remove"""', 'name': '"""test_8473_prof"""'}), "(subparser_name='remove', name='test_8473_prof')\n", (2039, 2087), False, 'import argparse\n'), ((2125, 2158), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsrem'}), '(args=argsrem)\n', (2144, 2158), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((2224, 2298), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""add"""', 'name': '"""test_8472_prof"""', 'path': 'path'}), "(subparser_name='add', name='test_8472_prof', path=path)\n", (2242, 2298), False, 'import argparse\n'), ((2369, 2402), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsadd'}), '(args=argsadd)\n', (2388, 2402), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((2456, 2503), 'drymass.cli.profile.get_profile_path', 'profile.get_profile_path', ([], {'name': '"""test_8472_prof"""'}), "(name='test_8472_prof')\n", (2480, 2503), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((2573, 2639), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""remove"""', 'name': '"""test_8472_prof"""'}), "(subparser_name='remove', name='test_8472_prof')\n", (2591, 2639), False, 'import argparse\n'), ((2677, 2710), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsrem'}), '(args=argsrem)\n', (2696, 2710), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((2971, 3060), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""add"""', 'name': '"""test_8440_prof_convert"""', 'path': 'cfgpath'}), "(subparser_name='add', name='test_8440_prof_convert',\n path=cfgpath)\n", (2989, 3060), False, 'import argparse\n'), ((3127, 3160), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsadd'}), '(args=argsadd)\n', (3146, 3160), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((3199, 3273), 'drymass.cli.cli_convert', 'cli_convert', ([], {'path': 'path_in', 'ret_data': '(True)', 'profile': '"""test_8440_prof_convert"""'}), "(path=path_in, ret_data=True, profile='test_8440_prof_convert')\n", (3210, 3273), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((3334, 3361), 'drymass.cli.config.ConfigFile', 'config.ConfigFile', (['path_out'], {}), '(path_out)\n', (3351, 3361), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((3373, 3420), 'numpy.allclose', 'np.allclose', (["cfg['meta']['medium index']", '(1.346)'], {}), "(cfg['meta']['medium index'], 1.346)\n", (3384, 3420), True, 'import numpy as np\n'), ((3432, 3479), 'numpy.allclose', 'np.allclose', (["cfg['meta']['pixel size um']", '(1.34)'], {}), "(cfg['meta']['pixel size um'], 1.34)\n", (3443, 3479), True, 'import numpy as np\n'), ((3491, 3539), 'numpy.allclose', 'np.allclose', (["cfg['meta']['wavelength nm']", '(554.2)'], {}), "(cfg['meta']['wavelength nm'], 554.2)\n", (3502, 3539), True, 'import numpy as np\n'), ((3806, 3880), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""remove"""', 'name': '"""test_8440_prof_convert"""'}), "(subparser_name='remove', name='test_8440_prof_convert')\n", (3824, 3880), False, 'import argparse\n'), ((3918, 3951), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsrem'}), '(args=argsrem)\n', (3937, 3951), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((4013, 4087), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""add"""', 'name': '"""test_8491_prof"""', 'path': 'path'}), "(subparser_name='add', name='test_8491_prof', path=path)\n", (4031, 4087), False, 'import argparse\n'), ((4158, 4191), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsadd'}), '(args=argsadd)\n', (4177, 4191), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((4216, 4271), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""test_drymass_profile_export_"""'}), "(prefix='test_drymass_profile_export_')\n", (4232, 4271), False, 'import tempfile\n'), ((4286, 4340), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""export"""', 'path': 'tdir'}), "(subparser_name='export', path=tdir)\n", (4304, 4340), False, 'import argparse\n'), ((4378, 4411), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsexp'}), '(args=argsexp)\n', (4397, 4411), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((4512, 4578), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""remove"""', 'name': '"""test_8491_prof"""'}), "(subparser_name='remove', name='test_8491_prof')\n", (4530, 4578), False, 'import argparse\n'), ((4616, 4649), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsrem'}), '(args=argsrem)\n', (4635, 4649), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((4799, 4821), 'drymass.cli.profile.get_profiles', 'profile.get_profiles', ([], {}), '()\n', (4819, 4821), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((5195, 5269), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""add"""', 'name': '"""test_8490_prof"""', 'path': 'path'}), "(subparser_name='add', name='test_8490_prof', path=path)\n", (5213, 5269), False, 'import argparse\n'), ((5340, 5373), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsadd'}), '(args=argsadd)\n', (5359, 5373), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((5389, 5430), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""list"""'}), "(subparser_name='list')\n", (5407, 5430), False, 'import argparse\n'), ((5435, 5469), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argslist'}), '(args=argslist)\n', (5454, 5469), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((5574, 5640), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""remove"""', 'name': '"""test_8490_prof"""'}), "(subparser_name='remove', name='test_8490_prof')\n", (5592, 5640), False, 'import argparse\n'), ((5678, 5711), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsrem'}), '(args=argsrem)\n', (5697, 5711), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((5752, 5818), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""remove"""', 'name': '"""test_8474_prof"""'}), "(subparser_name='remove', name='test_8474_prof')\n", (5770, 5818), False, 'import argparse\n'), ((1327, 1389), 'qpimage.QPSeries', 'qpimage.QPSeries', ([], {'h5file': 'path_in', 'h5mode': '"""w"""', 'identifier': '"""tes"""'}), "(h5file=path_in, h5mode='w', identifier='tes')\n", (1343, 1389), False, 'import qpimage\n'), ((1878, 1911), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsadd'}), '(args=argsadd)\n', (1897, 1911), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((3550, 3593), 'qpimage.QPSeries', 'qpimage.QPSeries', ([], {'h5file': 'h5data', 'h5mode': '"""r"""'}), "(h5file=h5data, h5mode='r')\n", (3566, 3593), False, 'import qpimage\n'), ((3617, 3659), 'numpy.allclose', 'np.allclose', (["qps[0]['medium index']", '(1.346)'], {}), "(qps[0]['medium index'], 1.346)\n", (3628, 3659), True, 'import numpy as np\n'), ((3675, 3718), 'numpy.allclose', 'np.allclose', (["qps[0]['pixel size']", '(1.34e-06)'], {}), "(qps[0]['pixel size'], 1.34e-06)\n", (3686, 3718), True, 'import numpy as np\n'), ((3733, 3777), 'numpy.allclose', 'np.allclose', (["qps[0]['wavelength']", '(5.542e-07)'], {}), "(qps[0]['wavelength'], 5.542e-07)\n", (3744, 3777), True, 'import numpy as np\n'), ((4726, 4761), 'drymass.cli.profile.get_profile_path', 'profile.get_profile_path', ([], {'name': 'path'}), '(name=path)\n', (4750, 4761), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((4831, 4897), 'warnings.warn', 'warnings.warn', (['"""Test cannot succeed, b/c there are user profiles."""'], {}), "('Test cannot succeed, b/c there are user profiles.')\n", (4844, 4897), False, 'import warnings\n'), ((4927, 4968), 'argparse.Namespace', 'argparse.Namespace', ([], {'subparser_name': '"""list"""'}), "(subparser_name='list')\n", (4945, 4968), False, 'import argparse\n'), ((4977, 5011), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argslist'}), '(args=argslist)\n', (4996, 5011), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((5869, 5902), 'drymass.cli.profile.cli_profile', 'profile.cli_profile', ([], {'args': 'argsrem'}), '(args=argsrem)\n', (5888, 5902), False, 'from drymass.cli import cli_convert, config, dialog, profile\n'), ((916, 931), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (925, 931), True, 'import numpy as np\n'), ((955, 970), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (964, 970), True, 'import numpy as np\n'), ((1101, 1117), 'numpy.roll', 'np.roll', (['pha', '(10)'], {}), '(pha, 10)\n', (1108, 1117), True, 'import numpy as np\n'), ((4424, 4442), 'pathlib.Path', 'pathlib.Path', (['tdir'], {}), '(tdir)\n', (4436, 4442), False, 'import pathlib\n')] |
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from randomized_l1 import RandomizedLasso
def cs(data: pd.DataFrame, threshold=0.5):
"""
求cs
:param data: Dataframe
:param threshold: 阈值,默认为0.5
:return: 一个Series,包括
"""
# Dataframe转Array
data_value = data.values
rows = data_value.shape[0]
# 相关度矩阵
mat = np.zeros((rows, rows), dtype='float32')
for i in range(rows):
data_eql = np.equal(data_value[i], data_value)
for j in range(i + 1, rows):
mat[i][j] = np.sum(data_eql[j]) / data_value.shape[1]
mat += mat.T
# 每行符合阈值条件个数的数组
k = np.sum(mat > threshold, axis=0)
# 每行符合阈值条件的数之和的数组
w = np.sum(np.ma.MaskedArray(mat, mask=(mat <= threshold)), axis=0)
# 生成输出
if np.max(k) == 0:
output = pd.Series([0, 0, 0], index=['CS_i', 'CS_mean', 'GS'],
dtype='float32')
else:
output = pd.Series([np.max(w), np.max(w) / k[np.argmax(w)], k[np.argmax(w)]], index=['CS_i', 'CS_mean', 'GS'],
dtype='float32')
return output
def randomized_lasso(x: np.ndarray, y: list, names: list, threshold=0, best_features=0):
"""
随机lasso算法
:param x: 数据集
:param y: 标签
:param names: 不包含标签的列名称
:param threshold: 阈值
:param best_features: 重要特征的数量
:return: 重要特征集和调整特征集的列名称
"""
# RandomizedLasso
rlasso = RandomizedLasso(verbose=True)
rlasso.fit(x, y)
best_feature_set_names = [] # 重要特征集(列标签)
adjusted_feature_set_names = [] # 调整特征集(列标签)
result = sorted(zip(map(lambda ii: round(ii, len(names)), rlasso.scores_), names), reverse=True)
if threshold > 0:
for j in result:
if j[0] >= threshold:
best_feature_set_names.append(j[1])
print('best feature:', j[0], j[1], sep=' ')
else:
adjusted_feature_set_names.append(j[1])
print('adjusted feature:', j[0], j[1], sep=' ')
elif best_features > 0:
for i in result[:best_features]:
print('best feature:', i[0], i[1], sep=' ')
best_feature_set_names.append(i[1])
for i in result[best_features:]:
adjusted_feature_set_names.append(i[1])
print('adjusted feature:', i[0], i[1], sep=' ')
else:
raise ValueError('需指定阈值或重要特征数量')
return best_feature_set_names, adjusted_feature_set_names
def data_clean(data: pd.DataFrame):
"""
清洗数据
:param data: 一个DataFrame
:return: 清洗后的DataFrame
"""
# 数据标签分离
ad_y = data.iloc[:, -1]
ad_x = data.iloc[:, 0:-1]
# 处理布尔类型
for i in range(ad_x.shape[1]):
if ad_x.iloc[:, i].dtype == 'bool':
ad_x.iloc[:, i] = ad_x.iloc[:, i].astype('int')
# 处理哑变量
ad_x = pd.get_dummies(ad_x)
ad_y = LabelEncoder().fit_transform(ad_y)
names = list(ad_x.columns.values)
ad_x['label'] = ad_y
return ad_x, ad_y, names
def cast_features(best_feature_names: list, adjusted_feature_names: list, sep='_'):
"""
恢复哑变量处理后的特征名称并去重
:param best_feature_names: 重要特征名称
:param adjusted_feature_names: 调整特征名称
:param sep: 分隔符
:return: 处理后的特征名称
"""
best_features = []
adjusted_features = []
for i in best_feature_names:
if len(i) > 1:
best_features.append(i.split(sep)[0])
else:
best_features.append(i[0])
for i in adjusted_feature_names:
if len(i) > 1:
adjusted_features.append(i.split(sep)[0])
else:
adjusted_features.append(i[0])
best_features = list(set(best_features))
adjusted_features = list(set(adjusted_features).difference(set(best_features)))
return best_features, adjusted_features
def data_dropna(data: pd.DataFrame, illegal_value=None):
"""
处理缺失值用
:param data: 一个DataFrame
:param illegal_value: 指定非法值,也可以不指定,用于清理如”?“,”-“类型的非法值
:return:
"""
# 清洗缺失值
data = data.drop_duplicates()
# 处理非法值
if illegal_value is not None:
for cols in data:
data = data.drop(data[data[cols] == illegal_value].index)
return data
def split(data: pd.DataFrame, groups=1, random=False):
"""
按列分组DataFrame
:param data: 需要分组的DataFrame
:param groups: 分组数量
:param random: 是否随机
:return:
"""
column_count = data.shape[1] - 1
ids = np.array(range(column_count))
if random:
np.random.shuffle(ids)
column_ids = []
group_size = np.math.ceil(column_count / groups)
for i in range(groups):
if group_size - i != 1:
column_ids.append(ids[:group_size])
ids = ids[group_size:]
else:
column_ids.append(ids)
data_list = []
for array in column_ids:
data_list.append(pd.concat([data.iloc[:, array], data.iloc[:, -1]], axis=1))
# for array in column_ids:
# data_list.append(data.iloc[:, array])
return data_list
def get_mcd(data: pd.DataFrame, groups=1, random=False, times=1):
"""
获取mcd
:param data: 处理缺失之后数据集
:param groups: 拆分的组数
:param random: 是否随机拆分
:param times: 如果选择了随机拆分,得到的mcd值可能会有较大波动,可以让算法多次计算求平均值以稳定mcd
:return: mcd
"""
cs_array = []
d_array = split(data, groups=groups, random=random)
for i in range(times):
for d in d_array:
cs_d = cs(d)
cs_array.append(cs_d['CS_mean'])
mcd = np.mean(cs_array)
return mcd | [
"pandas.Series",
"numpy.mean",
"numpy.math.ceil",
"sklearn.preprocessing.LabelEncoder",
"randomized_l1.RandomizedLasso",
"numpy.ma.MaskedArray",
"numpy.argmax",
"numpy.equal",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"pandas.get_dummies",
"pandas.concat",
"numpy.random.shuffle"
] | [((404, 443), 'numpy.zeros', 'np.zeros', (['(rows, rows)'], {'dtype': '"""float32"""'}), "((rows, rows), dtype='float32')\n", (412, 443), True, 'import numpy as np\n'), ((680, 711), 'numpy.sum', 'np.sum', (['(mat > threshold)'], {'axis': '(0)'}), '(mat > threshold, axis=0)\n', (686, 711), True, 'import numpy as np\n'), ((1475, 1504), 'randomized_l1.RandomizedLasso', 'RandomizedLasso', ([], {'verbose': '(True)'}), '(verbose=True)\n', (1490, 1504), False, 'from randomized_l1 import RandomizedLasso\n'), ((2897, 2917), 'pandas.get_dummies', 'pd.get_dummies', (['ad_x'], {}), '(ad_x)\n', (2911, 2917), True, 'import pandas as pd\n'), ((4653, 4688), 'numpy.math.ceil', 'np.math.ceil', (['(column_count / groups)'], {}), '(column_count / groups)\n', (4665, 4688), True, 'import numpy as np\n'), ((5602, 5619), 'numpy.mean', 'np.mean', (['cs_array'], {}), '(cs_array)\n', (5609, 5619), True, 'import numpy as np\n'), ((491, 526), 'numpy.equal', 'np.equal', (['data_value[i]', 'data_value'], {}), '(data_value[i], data_value)\n', (499, 526), True, 'import numpy as np\n'), ((751, 796), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['mat'], {'mask': '(mat <= threshold)'}), '(mat, mask=mat <= threshold)\n', (768, 796), True, 'import numpy as np\n'), ((828, 837), 'numpy.max', 'np.max', (['k'], {}), '(k)\n', (834, 837), True, 'import numpy as np\n'), ((862, 932), 'pandas.Series', 'pd.Series', (['[0, 0, 0]'], {'index': "['CS_i', 'CS_mean', 'GS']", 'dtype': '"""float32"""'}), "([0, 0, 0], index=['CS_i', 'CS_mean', 'GS'], dtype='float32')\n", (871, 932), True, 'import pandas as pd\n'), ((4591, 4613), 'numpy.random.shuffle', 'np.random.shuffle', (['ids'], {}), '(ids)\n', (4608, 4613), True, 'import numpy as np\n'), ((2934, 2948), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2946, 2948), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4963, 5021), 'pandas.concat', 'pd.concat', (['[data.iloc[:, array], data.iloc[:, -1]]'], {'axis': '(1)'}), '([data.iloc[:, array], data.iloc[:, -1]], axis=1)\n', (4972, 5021), True, 'import pandas as pd\n'), ((590, 609), 'numpy.sum', 'np.sum', (['data_eql[j]'], {}), '(data_eql[j])\n', (596, 609), True, 'import numpy as np\n'), ((1001, 1010), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (1007, 1010), True, 'import numpy as np\n'), ((1012, 1021), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (1018, 1021), True, 'import numpy as np\n'), ((1043, 1055), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (1052, 1055), True, 'import numpy as np\n'), ((1026, 1038), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (1035, 1038), True, 'import numpy as np\n')] |
import torch
import numpy as np
# static list of metrics
metricList = ['r1', 'r5', 'r10', 'mean', 'mrr']
# +1 - greater the better
# -1 - lower the better
trends = [1, 1, 1, -1, -1, 1]
def evaluateMetric(ranks, metric):
ranks = ranks.data.numpy()
if metric == 'r1':
ranks = ranks.reshape(-1)
return 100 * (ranks == 1).sum() / float(ranks.shape[0])
if metric == 'r5':
ranks = ranks.reshape(-1)
return 100 * (ranks <= 5).sum() / float(ranks.shape[0])
if metric == 'r10':
# ranks = ranks.view(-1)
ranks = ranks.reshape(-1)
# return 100*torch.sum(ranks <= 10).data[0]/float(ranks.size(0))
return 100 * (ranks <= 10).sum() / float(ranks.shape[0])
if metric == 'mean':
# ranks = ranks.view(-1).float()
ranks = ranks.reshape(-1).astype(float)
return ranks.mean()
if metric == 'mrr':
# ranks = ranks.view(-1).float()
ranks = ranks.reshape(-1).astype(float)
# return torch.reciprocal(ranks).mean().data[0]
return (1 / ranks).mean()
def computeMetrics(ranks):
results = {metric: evaluateMetric(ranks, metric) for metric in metricList}
return results
def scores_to_ranks(scores: torch.Tensor):
"""Convert model output scores into ranks."""
batch_size, num_rounds, num_options = scores.size()
scores = scores.view(-1, num_options)
# sort in descending order - largest score gets highest rank
sorted_ranks, ranked_idx = scores.sort(1, descending=True)
# i-th position in ranked_idx specifies which score shall take this position
# but we want i-th position to have rank of score at that position, do this conversion
ranks = ranked_idx.clone().fill_(0)
for i in range(ranked_idx.size(0)):
for j in range(num_options):
ranks[i,ranked_idx[i][j].data] = j
# convert from 0-99 ranks to 1-100 ranks
ranks += 1
ranks = ranks.view(batch_size, num_rounds, num_options)
return ranks
class NDCG(object):
def __init__(self):
self._ndcg_numerator = 0.0
self._ndcg_denominator = 0.0
self.ndcg_vals = []
def observe(self,
predicted_scores: torch.Tensor,
target_relevance: torch.Tensor):
"""
Observe model output scores and target ground truth relevance and accumulate NDCG metric.
Parameters
----------
predicted_scores: torch.Tensor
A tensor of shape (batch_size, num_options), because dense annotations are
available for only one randomly picked round out of ten.
target_relevance: torch.Tensor
A tensor of shape same as predicted scores, indicating ground truth relevance of
each answer option for a particular round.
"""
predicted_scores = predicted_scores.detach()
# shape: (batch_size, 1, num_options)
predicted_scores = predicted_scores.unsqueeze(1)
predicted_ranks = scores_to_ranks(predicted_scores)
# shape: (batch_size, num_options)
predicted_ranks = predicted_ranks.squeeze()
batch_size, num_options = predicted_ranks.size()
k = torch.sum(target_relevance != 0, dim=-1)
# shape: (batch_size, num_options)
_, rankings = torch.sort(predicted_ranks, dim=-1)
# Sort relevance in descending order so highest relevance gets top rank.
_, best_rankings = torch.sort(target_relevance, dim=-1, descending=True)
# shape: (batch_size, )
batch_ndcg = []
for batch_index in range(batch_size):
num_relevant = int(k.data[batch_index])
dcg = self._dcg(
rankings[batch_index][:num_relevant], target_relevance[batch_index]
)
best_dcg = self._dcg(
best_rankings[batch_index][:num_relevant], target_relevance[batch_index]
)
batch_ndcg.append(dcg / best_dcg)
self.ndcg_vals.append(dcg.data/best_dcg.data)
self._ndcg_denominator += batch_size
self._ndcg_numerator += sum(batch_ndcg)
def _dcg(self, rankings: torch.Tensor, relevance: torch.Tensor):
rankings = rankings.cuda()
sorted_relevance = relevance[rankings].cpu().float()
discounts = np.log2(torch.arange(len(rankings)).float().numpy() + 2)
discounts = torch.autograd.Variable(torch.from_numpy(discounts))
return torch.sum(sorted_relevance / discounts, dim=-1)
def retrieve(self, reset: bool = True):
if self._ndcg_denominator > 0:
metrics = {
"ndcg": float(self._ndcg_numerator / self._ndcg_denominator),
"ndcg_std": np.std(np.array(self.ndcg_vals))
}
else:
metrics = {}
if reset:
self.reset()
return metrics
def reset(self):
self._ndcg_numerator = 0.0
self._ndcg_denominator = 0.0
self.ndcg_vals = []
| [
"torch.sort",
"numpy.array",
"torch.sum",
"torch.from_numpy"
] | [((3181, 3221), 'torch.sum', 'torch.sum', (['(target_relevance != 0)'], {'dim': '(-1)'}), '(target_relevance != 0, dim=-1)\n', (3190, 3221), False, 'import torch\n'), ((3288, 3323), 'torch.sort', 'torch.sort', (['predicted_ranks'], {'dim': '(-1)'}), '(predicted_ranks, dim=-1)\n', (3298, 3323), False, 'import torch\n'), ((3432, 3485), 'torch.sort', 'torch.sort', (['target_relevance'], {'dim': '(-1)', 'descending': '(True)'}), '(target_relevance, dim=-1, descending=True)\n', (3442, 3485), False, 'import torch\n'), ((4434, 4481), 'torch.sum', 'torch.sum', (['(sorted_relevance / discounts)'], {'dim': '(-1)'}), '(sorted_relevance / discounts, dim=-1)\n', (4443, 4481), False, 'import torch\n'), ((4390, 4417), 'torch.from_numpy', 'torch.from_numpy', (['discounts'], {}), '(discounts)\n', (4406, 4417), False, 'import torch\n'), ((4703, 4727), 'numpy.array', 'np.array', (['self.ndcg_vals'], {}), '(self.ndcg_vals)\n', (4711, 4727), True, 'import numpy as np\n')] |
import argparse
import torch
from rl import rl_model, omss_env, rl_utils
from utils import config as cfg
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from sklearn.model_selection import train_test_split
import os
import wandb
from tqdm import tqdm
import time
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def omss_train_val_test_split(val_pct, test_pct=None, test_idxs=None):
mel_dir = os.path.join(cfg.SALAMI_DIR, 'internet_melspecs')
files = os.listdir(mel_dir)
fps = np.array(list(map(lambda x: os.path.join(mel_dir, x), files)))
if test_idxs:
test_dataset = fps[test_idxs]
remain_idxs = np.setdiff1d(np.arange(len(files)), test_idxs)
train_val_dataset = fps[remain_idxs]
else:
train_val_dataset, test_dataset = train_test_split(fps, test_size=test_pct, random_state=args.seed)
train_dataset, val_dataset = train_test_split(train_val_dataset, test_size=val_pct, random_state=args.seed)
return train_dataset, val_dataset, test_dataset
def validation(q_net, policy, val_dataset, args):
q_net.eval()
score = 0
f1 = 0
count = len(val_dataset)
with torch.no_grad():
for k in tqdm(range(len(val_dataset))):
fp = val_dataset[k]
env = omss_env.OMSSEnv(#q_net.module.get_frontend(),
q_net.get_frontend(),
args.num_clusters,
fp,
args.seq_max_len, # TODO don't need this in val
cluster_encode=args.cluster_encode,
mode='test')
if not env.check_anno():
count -= 1
continue
state = env.make()
done = False
while not done:
action = policy.take_action(state, env, args.test_eps, args.num_clusters)
print(action['action'])
next_state, reward, done, info = env.step(action)
state = next_state
score += reward.item()
f1 += reward.item()
# print(reward.item())
score /= count
f1 /= count
q_net.train()
return score, f1
def train(args):
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# initialize model
gpus = [0, 1, 2, 3]
backend_input_size = cfg.EMBEDDING_DIM + args.num_clusters if args.cluster_encode else cfg.EMBEDDING_DIM
nets = [rl_model.QNet(input_shape=(cfg.BIN, cfg.CHUNK_LEN),
embedding_size=backend_input_size,
hidden_size=args.hidden_size,
num_layers=args.num_layers,
num_heads=args.num_heads,
num_clusters=args.num_clusters,
cluster_encode=args.cluster_encode,
use_rnn=args.use_rnn,
freeze_frontend=args.freeze_frontend).to(device) for _ in range(2)]
q_net = nets[0]
# load pretrained model
if len(args.pretrained) > 3:
q_net.load_frontend(args.pretrained)
print('load pretrained frontend model!')
elif args.resume_path:
checkpoint = torch.load(args.resume_path)
print(checkpoint['best_score'])
q_net.load_state_dict(checkpoint['state_dict'])
print('load best q net!')
# set target network to eval mode
target_q_net = nets[1]
target_q_net.load_state_dict(q_net.state_dict())
target_q_net.eval()
if args.parallel:
# q_net, target_q_net = [nn.parallel.DistributedDataParallel(
q_net, target_q_net = [nn.DataParallel(
net, device_ids=gpus, output_device=gpus[0]) for net in (q_net, target_q_net)]
# prepare dataset (file paths)
test_idxs = None
if args.test_idxs:
# load test set indexs TODO
test_idxs = []
train_dataset, val_dataset, test_dataset = omss_train_val_test_split(cfg.val_pct, cfg.test_pct, test_idxs)
# optimizer
if args.freeze_frontend:
optim = torch.optim.Adam(q_net._backend.parameters(), lr=args.lr)
else:
optim = torch.optim.Adam(q_net.parameters(), lr=args.lr)
# policy
policy = rl_utils.Policy(q_net=q_net,
target_q_net=target_q_net,
gamma=args.gamma,
target_update_freq=args.target_update_freq,
n_step=args.n_step,
optim=optim,
device=device)
# use two buffer to calculate one step and n step losses
buffer = rl_utils.ReplayBuffer(args.buffer_size,
args.num_clusters,
backend_input_size,
1, args.priority, args.alpha, args.gamma)
if args.n_step > 1:
n_buffer = rl_utils.ReplayBuffer(args.buffer_size,
args.num_clusters,
backend_input_size,
args.n_step, args.priority, args.alpha, args.gamma)
# log
run_id = time.strftime("%m%d%H%M", time.localtime())
if args.wandb:
wandb.login(key='1dd98ff229fabf915050f551d8d8adadc9276b51')
wandb.init(project='online_mss', name=run_id, config=args)
wandb.config.update(args)
exp_dir = os.path.join(cfg.RL_EXP_DIR, run_id)
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
# train loop
best_score = 0
for i in range(args.epoch_num):
eps = args.train_eps # TODO: where to put these?
beta = args.beta
step_count = 0
train_score = 0
# iterate over train set
np.random.shuffle(train_dataset)
for j in tqdm(range(len(train_dataset))):
continue
fp = train_dataset[j]
print(fp)
# start a new environment TODO: to save memory, load spectrogram every epoch or load all from training start?
env = omss_env.OMSSEnv(#q_net.module.get_frontend(),
q_net.get_frontend(),
args.num_clusters,
fp,
args.seq_max_len,
cluster_encode=args.cluster_encode,
mode='train') # TODO: use which frontend?
if not env.check_anno():
continue
state = env.make()
done = False
song_score = 0
song_update_count = 0
mean_loss = 0
# step loop
tic = time.time()
while not done:
# take action
action = policy.take_action(state, env, eps, args.num_clusters)
selected_action = action['action']
# take a step
next_state, reward, done, info = env.step(action)
song_score += reward.item()
# print(song_score)
# add transition to buffer
if args.n_step > 1:
# this will return none if not reaching n_step else the first transition
one_step_transition = n_buffer.store(state, selected_action, reward, next_state, done)
if one_step_transition:
buffer.store(*one_step_transition)
else:
buffer.store(state, selected_action, reward, next_state, done)
state = next_state
# when buffer reaches the required size, training is ready
if len(buffer) >= args.batch_size:
batch = buffer.sample(args.batch_size, beta)
idxs = batch['idxs']
n_batch = n_buffer.sample_from_idxs(idxs, beta) if args.n_step > 1 else None
ele_wise_loss, loss = policy.update(batch, n_batch, optim)
mean_loss += loss.item()
if args.priority:
# PER: update priorities TODO not understand
loss_for_prior = ele_wise_loss.detach().cpu().numpy()
#print(loss_for_prior)
new_priorities = loss_for_prior + args.prior_eps
buffer.update_priorities(idxs, new_priorities)
song_update_count += 1
# linearly decrease epsilon
if eps > args.final_train_eps:
eps -= (args.train_eps - args.final_train_eps) * args.train_eps_decay
print(step_count, eps)
# update beta
if args.priority:
if step_count <= args.beta_anneal_step:
beta = args.beta - step_count / args.beta_anneal_step * \
(args.beta - args.final_beta)
else:
beta = args.final_beta
if args.wandb:
wandb.log({
'explore/action': selected_action,
'explore/reward': reward,
'explore/eps': eps,
'explore/beta': beta})
step_count += 1
toc = time.time()
print('an episode takes {}s'.format(toc-tic))
# reset buffer after iterate over one song TODO: maybe not useful
# buffer.reset()
# n_buffer.reset()
train_score += song_score
if args.wandb:
# record the metric for every song in an epoch
episode_metrics = {
'episode/loss': mean_loss / song_update_count,
'episode/score': song_score,
'episode/epoch': (j + 1 + (len(train_dataset) * i)) / len(train_dataset)
}
wandb.log(episode_metrics)
# val after one epoch
score, f1 = validation(q_net, policy, val_dataset, args)
# print(score, f1)
if args.wandb:
val_metrics = {
'val/train_score': train_score,
'val/score': score,
'val/f1': f1
}
wandb.log(val_metrics)
checkpoint = {
'best_score': best_score,
'state_dict': q_net.state_dict()
}
#print(score)
if f1 > best_score:
checkpoint['best_score'] = f1
best_score = f1
torch.save(checkpoint, os.path.join(exp_dir, "best_q_net.pth"))
torch.save(checkpoint, os.path.join(exp_dir, "last_q_net.pth"))
wandb.finish()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
so_far_best_pretrained = os.path.join(cfg.SUP_EXP_DIR, '03020414', 'unsup_embedding_best.pt')
parser.add_argument('--seed', type=int, default=8)
parser.add_argument('--freeze_frontend', action='store_true')
parser.add_argument('--test_idxs', type=str, default=None)
parser.add_argument('--wandb', action='store_true')
parser.add_argument('--parallel', action='store_true')
# embedding model
parser.add_argument('--pretrained', type=str, default=so_far_best_pretrained)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--hidden_size', type=int, default=128) #*
parser.add_argument('--num_layers', type=int, default=1) #*
parser.add_argument('--num_heads', type=int, default=1) # *
# rl
# backend
parser.add_argument('--resume_path', type=str, default=None)
parser.add_argument('--seq_max_len', type=int, default=128)
parser.add_argument('--num_clusters', type=int, default=5) # *
parser.add_argument('--cluster_encode', action='store_true')
#parser.add_argument('--max_grad_norm', type=float, default=2.0)
parser.add_argument('--use_rnn', action='store_true')
parser.add_argument('--epoch_num', type=int, default=10)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--target_update_freq', type=int, default=100)
## eps greedy search
parser.add_argument('--train_eps', type=float, default=1.)
parser.add_argument('--final_train_eps', type=float, default=0.05)
parser.add_argument('--train_eps_decay', type=float, default=1/2000)
parser.add_argument('--test_eps', type=float, default=0.)
## priority buffer
parser.add_argument('--priority', action='store_true')
parser.add_argument('--prior_eps', type=float, default=1e-6)
parser.add_argument('--alpha', type=float, default=0.2)
parser.add_argument('--beta', type=float, default=0.4)
parser.add_argument('--beta_anneal_step', type=int, default=1e5) # 400 * n_song
parser.add_argument('--final_beta', type=float, default=1.)
## buffer
parser.add_argument('--buffer_size', type=int, default=128)
parser.add_argument('--batch_size', type=int, default=64)
## n step
parser.add_argument('--n_step', type=int, default=3)
args = parser.parse_args()
train(args)
| [
"rl.rl_utils.Policy",
"wandb.log",
"wandb.init",
"torch.cuda.is_available",
"wandb.login",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"rl.rl_model.QNet",
"wandb.config.update",
"numpy.random.seed",
"time.localtime",
"sklearn.model_selection.train_test_split",
"rl.rl_utils.R... | [((447, 496), 'os.path.join', 'os.path.join', (['cfg.SALAMI_DIR', '"""internet_melspecs"""'], {}), "(cfg.SALAMI_DIR, 'internet_melspecs')\n", (459, 496), False, 'import os\n'), ((509, 528), 'os.listdir', 'os.listdir', (['mel_dir'], {}), '(mel_dir)\n', (519, 528), False, 'import os\n'), ((928, 1006), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_val_dataset'], {'test_size': 'val_pct', 'random_state': 'args.seed'}), '(train_val_dataset, test_size=val_pct, random_state=args.seed)\n', (944, 1006), False, 'from sklearn.model_selection import train_test_split\n'), ((2347, 2372), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2361, 2372), True, 'import numpy as np\n'), ((2377, 2405), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2394, 2405), False, 'import torch\n'), ((4355, 4529), 'rl.rl_utils.Policy', 'rl_utils.Policy', ([], {'q_net': 'q_net', 'target_q_net': 'target_q_net', 'gamma': 'args.gamma', 'target_update_freq': 'args.target_update_freq', 'n_step': 'args.n_step', 'optim': 'optim', 'device': 'device'}), '(q_net=q_net, target_q_net=target_q_net, gamma=args.gamma,\n target_update_freq=args.target_update_freq, n_step=args.n_step, optim=\n optim, device=device)\n', (4370, 4529), False, 'from rl import rl_model, omss_env, rl_utils\n'), ((4766, 4890), 'rl.rl_utils.ReplayBuffer', 'rl_utils.ReplayBuffer', (['args.buffer_size', 'args.num_clusters', 'backend_input_size', '(1)', 'args.priority', 'args.alpha', 'args.gamma'], {}), '(args.buffer_size, args.num_clusters,\n backend_input_size, 1, args.priority, args.alpha, args.gamma)\n', (4787, 4890), False, 'from rl import rl_model, omss_env, rl_utils\n'), ((5565, 5601), 'os.path.join', 'os.path.join', (['cfg.RL_EXP_DIR', 'run_id'], {}), '(cfg.RL_EXP_DIR, run_id)\n', (5577, 5601), False, 'import os\n'), ((11003, 11017), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (11015, 11017), False, 'import wandb\n'), ((11063, 11088), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11086, 11088), False, 'import argparse\n'), ((11119, 11187), 'os.path.join', 'os.path.join', (['cfg.SUP_EXP_DIR', '"""03020414"""', '"""unsup_embedding_best.pt"""'], {}), "(cfg.SUP_EXP_DIR, '03020414', 'unsup_embedding_best.pt')\n", (11131, 11187), False, 'import os\n'), ((322, 347), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (345, 347), False, 'import torch\n'), ((824, 889), 'sklearn.model_selection.train_test_split', 'train_test_split', (['fps'], {'test_size': 'test_pct', 'random_state': 'args.seed'}), '(fps, test_size=test_pct, random_state=args.seed)\n', (840, 889), False, 'from sklearn.model_selection import train_test_split\n'), ((1196, 1211), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1209, 1211), False, 'import torch\n'), ((5039, 5173), 'rl.rl_utils.ReplayBuffer', 'rl_utils.ReplayBuffer', (['args.buffer_size', 'args.num_clusters', 'backend_input_size', 'args.n_step', 'args.priority', 'args.alpha', 'args.gamma'], {}), '(args.buffer_size, args.num_clusters,\n backend_input_size, args.n_step, args.priority, args.alpha, args.gamma)\n', (5060, 5173), False, 'from rl import rl_model, omss_env, rl_utils\n'), ((5345, 5361), 'time.localtime', 'time.localtime', ([], {}), '()\n', (5359, 5361), False, 'import time\n'), ((5390, 5449), 'wandb.login', 'wandb.login', ([], {'key': '"""1dd98ff229fabf915050f551d8d8adadc9276b51"""'}), "(key='1dd98ff229fabf915050f551d8d8adadc9276b51')\n", (5401, 5449), False, 'import wandb\n'), ((5458, 5516), 'wandb.init', 'wandb.init', ([], {'project': '"""online_mss"""', 'name': 'run_id', 'config': 'args'}), "(project='online_mss', name=run_id, config=args)\n", (5468, 5516), False, 'import wandb\n'), ((5525, 5550), 'wandb.config.update', 'wandb.config.update', (['args'], {}), '(args)\n', (5544, 5550), False, 'import wandb\n'), ((5613, 5636), 'os.path.exists', 'os.path.exists', (['exp_dir'], {}), '(exp_dir)\n', (5627, 5636), False, 'import os\n'), ((5646, 5666), 'os.makedirs', 'os.makedirs', (['exp_dir'], {}), '(exp_dir)\n', (5657, 5666), False, 'import os\n'), ((5911, 5943), 'numpy.random.shuffle', 'np.random.shuffle', (['train_dataset'], {}), '(train_dataset)\n', (5928, 5943), True, 'import numpy as np\n'), ((3351, 3379), 'torch.load', 'torch.load', (['args.resume_path'], {}), '(args.resume_path)\n', (3361, 3379), False, 'import torch\n'), ((3775, 3835), 'torch.nn.DataParallel', 'nn.DataParallel', (['net'], {'device_ids': 'gpus', 'output_device': 'gpus[0]'}), '(net, device_ids=gpus, output_device=gpus[0])\n', (3790, 3835), True, 'import torch.nn as nn\n'), ((6868, 6879), 'time.time', 'time.time', ([], {}), '()\n', (6877, 6879), False, 'import time\n'), ((9596, 9607), 'time.time', 'time.time', ([], {}), '()\n', (9605, 9607), False, 'import time\n'), ((10592, 10614), 'wandb.log', 'wandb.log', (['val_metrics'], {}), '(val_metrics)\n', (10601, 10614), False, 'import wandb\n'), ((10958, 10997), 'os.path.join', 'os.path.join', (['exp_dir', '"""last_q_net.pth"""'], {}), "(exp_dir, 'last_q_net.pth')\n", (10970, 10997), False, 'import os\n'), ((2575, 2891), 'rl.rl_model.QNet', 'rl_model.QNet', ([], {'input_shape': '(cfg.BIN, cfg.CHUNK_LEN)', 'embedding_size': 'backend_input_size', 'hidden_size': 'args.hidden_size', 'num_layers': 'args.num_layers', 'num_heads': 'args.num_heads', 'num_clusters': 'args.num_clusters', 'cluster_encode': 'args.cluster_encode', 'use_rnn': 'args.use_rnn', 'freeze_frontend': 'args.freeze_frontend'}), '(input_shape=(cfg.BIN, cfg.CHUNK_LEN), embedding_size=\n backend_input_size, hidden_size=args.hidden_size, num_layers=args.\n num_layers, num_heads=args.num_heads, num_clusters=args.num_clusters,\n cluster_encode=args.cluster_encode, use_rnn=args.use_rnn,\n freeze_frontend=args.freeze_frontend)\n', (2588, 2891), False, 'from rl import rl_model, omss_env, rl_utils\n'), ((10244, 10270), 'wandb.log', 'wandb.log', (['episode_metrics'], {}), '(episode_metrics)\n', (10253, 10270), False, 'import wandb\n'), ((10886, 10925), 'os.path.join', 'os.path.join', (['exp_dir', '"""best_q_net.pth"""'], {}), "(exp_dir, 'best_q_net.pth')\n", (10898, 10925), False, 'import os\n'), ((567, 591), 'os.path.join', 'os.path.join', (['mel_dir', 'x'], {}), '(mel_dir, x)\n', (579, 591), False, 'import os\n'), ((9317, 9435), 'wandb.log', 'wandb.log', (["{'explore/action': selected_action, 'explore/reward': reward, 'explore/eps':\n eps, 'explore/beta': beta}"], {}), "({'explore/action': selected_action, 'explore/reward': reward,\n 'explore/eps': eps, 'explore/beta': beta})\n", (9326, 9435), False, 'import wandb\n')] |
#-*- encoding:utf-8 -*-
import jieba
import math
from string import punctuation
from heapq import nlargest
from itertools import product, count
from gensim.models import Word2Vec
from . import util2 as util
import numpy as np
import os
from itertools import count
import codecs
class FastTextRank4Word(object):
def __init__(self, use_stopword=False, stop_words_file=None, max_iter=100, tol=0.0001, window=2, window_strict=True):
"""
:param max_iter: 最大的迭代轮次
:param tol: 最大的容忍误差
:param window: 词语窗口
:return:
"""
self.__use_stopword = use_stopword
self.__max_iter = max_iter
self.__tol = tol
self.__window = window
self.__window_strict = window_strict
self.__stop_words = set()
self.__stop_words_file = self.get_default_stop_words_file()
if type(stop_words_file) is str:
self.__stop_words_file = stop_words_file
if use_stopword:
for word in codecs.open(self.__stop_words_file, 'r', 'utf-8', 'ignore'):
self.__stop_words.add(word.strip())
# Print a RuntimeWarning for all types of floating-point errors
np.seterr(all='warn')
def get_default_stop_words_file(self):
d = os.path.dirname(os.path.realpath(__file__))
return os.path.join(d, 'stopwords.txt')
def build_worddict(self, sents):
"""
构建字典,是词语和下标之间生成一对一的联系,为之后的词图构建做准备
:param sents:
:return:
"""
word_index = {}
index_word = {}
words_number = 0
for word_list in sents:
for word in word_list:
if word not in word_index:
word_index[word] = words_number
index_word[words_number] = word
words_number += 1
return word_index, index_word, words_number
def build_word_grah(self, sents, words_number, word_index, window=2):
graph = [[0.0 for _ in range(words_number)] for _ in range(words_number)]
for word_list in sents:
if self.__window_strict:
pair = util.combine2(word_list, window)
else:
pair = util.combine(word_list, window)
for w1, w2 in pair: # 用严格窗口util.combine2()
if w1 in word_index and w2 in word_index:
index1 = word_index[w1]
index2 = word_index[w2]
graph[index1][index2] += 1.0
graph[index2][index1] += 1.0
return graph
def summarize(self, text, n):
text = text.replace('\n', '。')
text = text.replace('\r', '')
text = util.as_text(text) # 处理编码问题
tokens = util.cut_sentences(text)
# sentences用于记录文章最原本的句子,sents用于各种计算操作
sentences, sents = util.psegcut_filter_words(tokens, self.__stop_words, self.__use_stopword)
word_index, index_word, words_number = self.build_worddict(sents)
self.words = index_word
graph = self.build_word_grah(sents, words_number, word_index, window=self.__window)
scores = util.weight_map_rank(graph, max_iter=self.__max_iter, tol=self.__tol)
sent_selected = nlargest(n, zip(scores, count()))
self.scores = scores
# 测试区
# print('nlargest', type(sent_selected))
# print(sent_selected)
sent_index = []
for i in range(min(n, len(sent_selected))): # 防止越界
sent_index.append(sent_selected[i][1]) # 添加入关键词在原来文章中的下标
if not sent_index:
return [None], [None] # 防止越界
return [a[0] for a in sent_selected[:len(sent_index)]], [index_word[i] for i in sent_index]
| [
"os.path.join",
"os.path.realpath",
"itertools.count",
"codecs.open",
"numpy.seterr"
] | [((1182, 1203), 'numpy.seterr', 'np.seterr', ([], {'all': '"""warn"""'}), "(all='warn')\n", (1191, 1203), True, 'import numpy as np\n'), ((1319, 1351), 'os.path.join', 'os.path.join', (['d', '"""stopwords.txt"""'], {}), "(d, 'stopwords.txt')\n", (1331, 1351), False, 'import os\n'), ((989, 1048), 'codecs.open', 'codecs.open', (['self.__stop_words_file', '"""r"""', '"""utf-8"""', '"""ignore"""'], {}), "(self.__stop_words_file, 'r', 'utf-8', 'ignore')\n", (1000, 1048), False, 'import codecs\n'), ((1276, 1302), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1292, 1302), False, 'import os\n'), ((3227, 3234), 'itertools.count', 'count', ([], {}), '()\n', (3232, 3234), False, 'from itertools import count\n')] |
#! /usr/bin/env python
# -*- coding : utf8 -*-
import numpy as np
#from shs.calc import SiestaCalc
from calc import SiestaCalc
from geom import r as dist
def setup():
# dir = '/home/andrey/calc/FeC/Fe161C39/NVT/1'
dir = '/home/andrey/calc/Fe/Spin/MD/FCC/MagMom'
c = SiestaCalc(dir, type = 'out', steps = range(-1400, 0))
return c
def full_mag_mom(c):
steps = []
mm = []
for step, geom in c.evol:
spins = geom.atoms['up'] - geom.atoms['dn']
steps.append(step)
mm.append(sum(spins))
return steps, mm
def spin_product(c, r_max = 10., dr = 0.4):
nat = len(c.evol.geom[0].atoms)
nr = int(r_max / dr)
sp = [[] for _ in range(nr)]
for es in c.evol.geom:
n = es.filter('label','Fe')
spin = es[n]['up'] - es[n]['dn']
# spin product
spin2 = np.outer(spin, spin)
# dist
r = dist(es['crd'], es.vc, [n,n])
r2 = np.sum(r*r, axis = 1)
ri = np.floor(np.sqrt(r2.reshape(len(n[0]), len(n[0]))) / dr)
for i in range(len(n[0])):
for j in range(len(n[0])):
if ri[i,j] < nr:
sp[int(ri[i,j])].append(spin2[i,j])
sp_mean = [0. for _ in sp]
for i, spi in enumerate(sp):
if len(spi) != 0:
sp_mean[i] += sum(spi)/len(spi)
return np.arange(0., r_max, dr), sp_mean
if __name__== '__main__':
c = setup()
# x, sp = spin_product(c)
x, sp = full_mag_mom(c)
f = open('full_mm_Fe.dat', 'a')
for s, m in zip(x, sp):
f.write('%f\t\t%f\n' % (s,m))
| [
"numpy.sum",
"numpy.outer",
"geom.r",
"numpy.arange"
] | [((852, 872), 'numpy.outer', 'np.outer', (['spin', 'spin'], {}), '(spin, spin)\n', (860, 872), True, 'import numpy as np\n'), ((900, 930), 'geom.r', 'dist', (["es['crd']", 'es.vc', '[n, n]'], {}), "(es['crd'], es.vc, [n, n])\n", (904, 930), True, 'from geom import r as dist\n'), ((943, 964), 'numpy.sum', 'np.sum', (['(r * r)'], {'axis': '(1)'}), '(r * r, axis=1)\n', (949, 964), True, 'import numpy as np\n'), ((1343, 1368), 'numpy.arange', 'np.arange', (['(0.0)', 'r_max', 'dr'], {}), '(0.0, r_max, dr)\n', (1352, 1368), True, 'import numpy as np\n')] |
# Author: <NAME>
# Created: 2019-01-24
# Copyright (C) 2018, <NAME>
# License: MIT
import cairo
import numpy as np
from PIL import Image
'''
The movie functions operate pn lazy sequences of images. The images are stored as numpy arrays.
'''
def normalise_array(array):
"""
If greyscale array has a shape [a, b, 1] it must be normalised to [a, b] otherwise
the pillow fromarray function will give n#an error
:param array: The array
:return: squeezed array if necessary, else the original array
"""
if array.ndim == 3 and array.shape[2] == 1:
return np.squeeze(array, axis=2)
return array
def duplicate_frame(frame, count):
'''
Duplicate a single frame, multiple times
:param frame: the frame, a numpy array
:param count: Number of times to duplicate
:return: Generator
'''
for i in range(count):
yield frame
def save_frame(outfile, frame):
"""
Save a frame as a png image
:param outfile: Full name and path of the file (.png extension optional)
:param frame: The sequence of frames
:return:
"""
if outfile.lower().endswith('.png'):
outfile = outfile[:-4]
image = Image.fromarray(normalise_array(frame))
image.save(outfile + '.png')
def save_frames(outfile, frames):
"""
Save a sequence of frame as a sequence of png images
:param outfile: Base name and path of the file (.png extension optional)
:param frames: The sequence of frames
:return:
"""
if outfile.lower().endswith('.png'):
outfile = outfile[:-4]
for i, frame in enumerate(frames):
image = Image.fromarray(normalise_array(frame))
image.save(outfile + str(i).zfill(8) + '.png')
| [
"numpy.squeeze"
] | [((588, 613), 'numpy.squeeze', 'np.squeeze', (['array'], {'axis': '(2)'}), '(array, axis=2)\n', (598, 613), True, 'import numpy as np\n')] |
"""
Created on Thur July 4 9:26:00 2019
@author: <EMAIL>
# https://github.com/anujshah1003/Transfer-Learning-in-keras---custom-data
"""
import numpy as np
import os
import time
from resnet50 import ResNet50
from keras.preprocessing import image
from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Flatten
from imagenet_utils import preprocess_input, decode_predictions
from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
#from sklearn.cross_validation import train_test_split ## it’s just an old way of doing split
from sklearn.model_selection import train_test_split ## similar way doing split
import matplotlib.pyplot as plt
from keras.utils import plot_model
from keras.utils.vis_utils import model_to_dot
from IPython.display import SVG
from keras.models import Model, load_model
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
#get_ipython().run_line_magic('matplotlib', 'inline')
############# Loading and pre-processing an image #######################
img_path = 'images\elephant.jpg'
#Load the image using load_img() function specifying the target size.
img = image.load_img(img_path, target_size=(224, 224))
#Keras loads the image in PIL format (height, width) which shall be converted into NumPy format (height, width, channels) using image_to_array() function.
x = image.img_to_array(img)
print (x.shape)
#Then the input image shall be converted to a 4-dimensional Tensor (batchsize, height, width, channels) using NumPy’s expand_dims function.
x = np.expand_dims(x, axis=0)
print (x.shape)
#Normalizing the image
#Some models use images with values ranging from 0 to 1 or from -1 to +1 or “caffe” style.
#The input_image is further to be normalized by subtracting the mean of the ImageNet data.
#We don’t need to worry about internal details and we can use the preprocess_input() function from each model to normalize the image.
x = preprocess_input(x)
print('Input image shape:', x.shape)
plt.imshow(x[0])
model =ResNet50(include_top ='True', weights='imagenet')
prediction= model.predict(x)
print('Predicted:', decode_predictions(prediction))
################# Loading BSL training data #############################
PATH = os.getcwd()
print("PATH", PATH)
# Define data path
#data_path = PATH + '/data' #separated
data_path = PATH + '/data_v' #vertical stack over
#data_path = PATH + '/data_h' #horizontal stack over
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('\n Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
img_data_list.append(x)
img_data = np.array(img_data_list)
#img_data = img_data.astype('float32')
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
# Plot a image out
#plt.imshow(np.uint8(img_data[0]))
plt.imshow(img_data[0])
#################### Define the number of classes ##########################
num_classes = 2
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:79]=0 # dementia class 0
labels[79:]=1 # healthy class 1
names = ['dementia','healthy']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
#print("Y", Y)
# Shuffle the dataset
xs,ys = shuffle(img_data,Y, random_state=2) #if I use the same random_state with the same dataset, then I am always guaranteed to have the same shuffle
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(xs, ys, test_size=0.2, random_state=2)
###############################################################################
# * Custom_resnet_model_1 #
# Train the Model as a Classifier #
# Only the classifier layer (last layer) is trainable, #
# parameters of other layers are freezed. #
# Used with Smaller Datasets #
# Early Stopping is used for avoid Overfitting # #
###############################################################################
image_input = Input(shape=(224, 224, 3))
# Creat ResNet50 Model
# "include_top= True" means include the final dense layers
model_resnet = ResNet50(input_tensor=image_input, include_top=True,weights='imagenet')
# Print Model Layers Details/ Plot a Graph Layout
model_resnet.summary()
plot_model(model_resnet,to_file='C:/Users/User/Documents/Github_Clone/deep-learning-models/ResNet50/ResNet50Model.png')
SVG(model_to_dot(model_resnet).create(prog='dot', format='svg'))
# Get the last layer "avg_pool" out and from there to add/create your own network layers
last_layer = model_resnet.get_layer('avg_pool').output
x= Flatten(name='flatten')(last_layer)
out = Dense(num_classes, activation='softmax', name='softmax2outputs')(x)
custom_resnet_model_1 = Model(inputs=image_input,outputs= out)
# Print custom_resnet_model_1 Layers Details/ Plot a Graph Layout
custom_resnet_model_1.summary()
plot_model(custom_resnet_model_1,to_file='C:/Users/user/Documents/Github_Clone/deep-learning-models/ResNet50/train_accuracy 69.7674% val_accuracy_freeze/XingRestNet50Model_classifier.png')
SVG(model_to_dot(custom_resnet_model_1).create(prog='dot', format='svg'))
# Freeze the parameters of previous layers, except the last layer
for layer in custom_resnet_model_1.layers[:-1]:
layer.trainable = False
#custom_resnet_model_1.layers[-1].trainable
################# Compile the Model ######################################
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
custom_resnet_model_1.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])
################# Train the Model ########################################
monitor = EarlyStopping(monitor='val_loss', min_delta=0,patience=5,verbose=1, mode='auto', baseline =None, restore_best_weights=True)
t=time.time()
hist3 = custom_resnet_model_1.fit(X_train, y_train, batch_size=1, epochs=100, verbose=1, validation_data=(X_test, y_test),callbacks=[monitor])
print('Training time: %s' % (time.time()-t))
train_accuracy3= hist3.history['acc']
print("[INFO] Model_ResNet train_accuracy: {:.4f}%".format(train_accuracy3[-6] * 100))
################# Test the Model #######################################
(loss3, accuracy3) = custom_resnet_model_1.evaluate(X_test, y_test, batch_size=1, verbose=3)
print("[INFO] loss={:.4f}, val_accuracy: {:.4f}%".format(loss3,accuracy3 * 100))
################# Save the Model #######################################
custom_resnet_model_1.save_weights('C:\\Users\\user\\Documents\\Github_Clone\\deep-learning-models\\ResNet50\\train_accuracy 69.7674% val_accuracy\\my_ResNet50_model_weights_classifier.h5')
custom_resnet_model_1.save('C:\\Users\\user\\Documents\\Github_Clone\\deep-learning-models\\ResNet50\\train_accuracy 69.7674% val_accuracy\\my_ResNet50_model_classifier.h5')
###############################################################################
# * Custom_resnet_model_2 #
# Fine Tune the Model #
# Add on personalised dense layers (FC Layers) #
# Only last 6 layers are trainable #
# Used with Larger Datasets #
###############################################################################
# Creat the Model
model = ResNet50(weights='imagenet',include_top=False)
model.summary()
last_layer = model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(last_layer)
# add fully-connected & dropout layers
x = Dense(512, activation='relu',name='fc-1')(x)
x = Dropout(0.5)(x)
x = Dense(256, activation='relu',name='fc-2')(x)
x = Dropout(0.5)(x)
# a softmax layer for 4 classes
out = Dense(num_classes, activation='softmax',name='softmax2outputs')(x)
# this is the model we will train
custom_resnet_model_2 = Model(inputs=model.input, outputs=out)
custom_resnet_model_2.summary()
for layer in custom_resnet_model_2.layers[:-6]:
layer.trainable = False
custom_resnet_model_2.layers[-1].trainable
# Compile the Model
custom_resnet_model_2.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
# Train the Model
t=time.time()
hist = custom_resnet_model_2.fit(X_train, y_train, batch_size=3, epochs=30, verbose=1, validation_data=(X_test, y_test))
print('Training time: %s' % (t - time.time()))
# Test the Model
(loss, accuracy) = custom_resnet_model_2.evaluate(X_test, y_test, batch_size=3, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
custom_resnet_model_2.save_weights('C:/Users/liangx/Documents/Github_Clone/deep-learning-models/ResNet50/my_ResNet50_model_weights_finetune.h5')
custom_resnet_model_2.save('C:/Users/liangx/Documents/Github_Clone/deep-learning-models/ResNet50/my_ResNet50_models_finetune.h5')
###############################################################################
# #
# Model as a Feature Extractor #
# #
###############################################################################
model = ResNet50(weights='imagenet',include_top=False)
model.summary()
img_path = 'images\elephant.jpg'
#Load the image using load_img() function specifying the target size.
ima = image.load_img(img_path, target_size=(224, 224))
#Keras loads the image in PIL format (width, height) which shall be converted into NumPy format (height, width, channels) using image_to_array() function.
x = image.img_to_array(ima)
#Then the input image shall be converted to a 4-dimensional Tensor (batchsize, height, width, channels) using NumPy’s expand_dims function.
x = np.expand_dims(x, axis=0)
#Normalizing the image
x = preprocess_input(x)
#Use the model as an image feature extractor
features=model.predict(x)
print('Input image features:', features)
###################### Plot Results #######################################
import matplotlib.pyplot as plt
# visualizing losses and accuracy
hist=hist3
train_loss=hist.history['loss']
val_loss=hist.history['val_loss']
train_acc=hist.history['acc']
val_acc=hist.history['val_acc']
xc=range(8) # epoch number
plt.figure(1,figsize=(7,5))
plt.plot(xc,train_loss)
plt.plot(xc,val_loss)
plt.xlabel('num of Epochs')
plt.ylabel('loss')
plt.title('train_loss vs val_loss')
plt.grid(True)
plt.legend(['train','val'])
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
plt.figure(2,figsize=(7,5))
plt.plot(xc,train_acc)
plt.plot(xc,val_acc)
plt.xlabel('num of Epochs')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train','val'],loc=4)
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
############## Model Evaluation/Prediction ###################################
#img_path = 'C:/Users/liangx/Documents/Github_Clone/deep-learning-models/data/dementia/1_left2d_big.png'
#img_path = 'C:/Users/liangx/Documents/Github_Clone/deep-learning-models/data/healthy/6_right2d_big.png'
#img_path ='C:/Users/liangx/Documents/Github_Clone/deep-learning-models/data_h/dementia/1_combine_2d_h.png'
img_path ='C:/Users/user/Documents/Github_Clone/deep-learning-models/data_v/dementia/1_combine_2d_v.png'
img_path ='C:/Users/user/Documents/Github_Clone/deep-learning-models/data_v/healthy/81_combine_2d_v.png'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = custom_resnet_model_1.predict(x)
print('Predicted:',preds)
| [
"keras.preprocessing.image.img_to_array",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"imagenet_utils.decode_predictions",
"numpy.rollaxis",
"numpy.array",
"keras.layers.Dense",
"matplotlib.pyplot.imshow",
"os.listdir",
"resnet50.ResNet50",
"keras.utils.plot_model",
"matplotlib.pyplo... | [((1199, 1247), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (1213, 1247), False, 'from keras.preprocessing import image\n'), ((1408, 1431), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (1426, 1431), False, 'from keras.preprocessing import image\n'), ((1593, 1618), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1607, 1618), True, 'import numpy as np\n'), ((1982, 2001), 'imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1998, 2001), False, 'from imagenet_utils import preprocess_input, decode_predictions\n'), ((2043, 2059), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x[0]'], {}), '(x[0])\n', (2053, 2059), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2116), 'resnet50.ResNet50', 'ResNet50', ([], {'include_top': '"""True"""', 'weights': '"""imagenet"""'}), "(include_top='True', weights='imagenet')\n", (2076, 2116), False, 'from resnet50 import ResNet50\n'), ((2289, 2300), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2298, 2300), False, 'import os\n'), ((2501, 2522), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (2511, 2522), False, 'import os\n'), ((2980, 3003), 'numpy.array', 'np.array', (['img_data_list'], {}), '(img_data_list)\n', (2988, 3003), True, 'import numpy as np\n'), ((3075, 3102), 'numpy.rollaxis', 'np.rollaxis', (['img_data', '(1)', '(0)'], {}), '(img_data, 1, 0)\n', (3086, 3102), True, 'import numpy as np\n'), ((3224, 3247), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_data[0]'], {}), '(img_data[0])\n', (3234, 3247), True, 'import matplotlib.pyplot as plt\n'), ((3392, 3433), 'numpy.ones', 'np.ones', (['(num_of_samples,)'], {'dtype': '"""int64"""'}), "((num_of_samples,), dtype='int64')\n", (3399, 3433), True, 'import numpy as np\n'), ((3583, 3627), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['labels', 'num_classes'], {}), '(labels, num_classes)\n', (3606, 3627), False, 'from keras.utils import np_utils\n'), ((3674, 3710), 'sklearn.utils.shuffle', 'shuffle', (['img_data', 'Y'], {'random_state': '(2)'}), '(img_data, Y, random_state=2)\n', (3681, 3710), False, 'from sklearn.utils import shuffle\n'), ((3874, 3929), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xs', 'ys'], {'test_size': '(0.2)', 'random_state': '(2)'}), '(xs, ys, test_size=0.2, random_state=2)\n', (3890, 3929), False, 'from sklearn.model_selection import train_test_split\n'), ((4654, 4680), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (4659, 4680), False, 'from keras.layers import Input\n'), ((4784, 4856), 'resnet50.ResNet50', 'ResNet50', ([], {'input_tensor': 'image_input', 'include_top': '(True)', 'weights': '"""imagenet"""'}), "(input_tensor=image_input, include_top=True, weights='imagenet')\n", (4792, 4856), False, 'from resnet50 import ResNet50\n'), ((4932, 5062), 'keras.utils.plot_model', 'plot_model', (['model_resnet'], {'to_file': '"""C:/Users/User/Documents/Github_Clone/deep-learning-models/ResNet50/ResNet50Model.png"""'}), "(model_resnet, to_file=\n 'C:/Users/User/Documents/Github_Clone/deep-learning-models/ResNet50/ResNet50Model.png'\n )\n", (4942, 5062), False, 'from keras.utils import plot_model\n'), ((5399, 5437), 'keras.models.Model', 'Model', ([], {'inputs': 'image_input', 'outputs': 'out'}), '(inputs=image_input, outputs=out)\n', (5404, 5437), False, 'from keras.models import Model, load_model\n'), ((5538, 5737), 'keras.utils.plot_model', 'plot_model', (['custom_resnet_model_1'], {'to_file': '"""C:/Users/user/Documents/Github_Clone/deep-learning-models/ResNet50/train_accuracy 69.7674% val_accuracy_freeze/XingRestNet50Model_classifier.png"""'}), "(custom_resnet_model_1, to_file=\n 'C:/Users/user/Documents/Github_Clone/deep-learning-models/ResNet50/train_accuracy 69.7674% val_accuracy_freeze/XingRestNet50Model_classifier.png'\n )\n", (5548, 5737), False, 'from keras.utils import plot_model\n'), ((6080, 6135), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'amsgrad': '(False)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)\n', (6084, 6135), False, 'from keras.optimizers import Adam\n'), ((6331, 6460), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(1)', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode=\n 'auto', baseline=None, restore_best_weights=True)\n", (6344, 6460), False, 'from keras.callbacks import EarlyStopping\n'), ((6458, 6469), 'time.time', 'time.time', ([], {}), '()\n', (6467, 6469), False, 'import time\n'), ((8079, 8126), 'resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (8087, 8126), False, 'from resnet50 import ResNet50\n'), ((8596, 8634), 'keras.models.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': 'out'}), '(inputs=model.input, outputs=out)\n', (8601, 8634), False, 'from keras.models import Model, load_model\n'), ((8929, 8940), 'time.time', 'time.time', ([], {}), '()\n', (8938, 8940), False, 'import time\n'), ((9984, 10031), 'resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (9992, 10031), False, 'from resnet50 import ResNet50\n'), ((10158, 10206), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (10172, 10206), False, 'from keras.preprocessing import image\n'), ((10367, 10390), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['ima'], {}), '(ima)\n', (10385, 10390), False, 'from keras.preprocessing import image\n'), ((10536, 10561), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (10550, 10561), True, 'import numpy as np\n'), ((10590, 10609), 'imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (10606, 10609), False, 'from imagenet_utils import preprocess_input, decode_predictions\n'), ((11043, 11072), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(7, 5)'}), '(1, figsize=(7, 5))\n', (11053, 11072), True, 'import matplotlib.pyplot as plt\n'), ((11071, 11095), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'train_loss'], {}), '(xc, train_loss)\n', (11079, 11095), True, 'import matplotlib.pyplot as plt\n'), ((11095, 11117), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'val_loss'], {}), '(xc, val_loss)\n', (11103, 11117), True, 'import matplotlib.pyplot as plt\n'), ((11117, 11144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""num of Epochs"""'], {}), "('num of Epochs')\n", (11127, 11144), True, 'import matplotlib.pyplot as plt\n'), ((11145, 11163), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (11155, 11163), True, 'import matplotlib.pyplot as plt\n'), ((11164, 11199), 'matplotlib.pyplot.title', 'plt.title', (['"""train_loss vs val_loss"""'], {}), "('train_loss vs val_loss')\n", (11173, 11199), True, 'import matplotlib.pyplot as plt\n'), ((11200, 11214), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11208, 11214), True, 'import matplotlib.pyplot as plt\n'), ((11215, 11243), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {}), "(['train', 'val'])\n", (11225, 11243), True, 'import matplotlib.pyplot as plt\n'), ((11313, 11339), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['classic']"], {}), "(['classic'])\n", (11326, 11339), True, 'import matplotlib.pyplot as plt\n'), ((11341, 11370), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(7, 5)'}), '(2, figsize=(7, 5))\n', (11351, 11370), True, 'import matplotlib.pyplot as plt\n'), ((11369, 11392), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'train_acc'], {}), '(xc, train_acc)\n', (11377, 11392), True, 'import matplotlib.pyplot as plt\n'), ((11392, 11413), 'matplotlib.pyplot.plot', 'plt.plot', (['xc', 'val_acc'], {}), '(xc, val_acc)\n', (11400, 11413), True, 'import matplotlib.pyplot as plt\n'), ((11413, 11440), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""num of Epochs"""'], {}), "('num of Epochs')\n", (11423, 11440), True, 'import matplotlib.pyplot as plt\n'), ((11441, 11463), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (11451, 11463), True, 'import matplotlib.pyplot as plt\n'), ((11464, 11497), 'matplotlib.pyplot.title', 'plt.title', (['"""train_acc vs val_acc"""'], {}), "('train_acc vs val_acc')\n", (11473, 11497), True, 'import matplotlib.pyplot as plt\n'), ((11498, 11512), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (11506, 11512), True, 'import matplotlib.pyplot as plt\n'), ((11513, 11548), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '(4)'}), "(['train', 'val'], loc=4)\n", (11523, 11548), True, 'import matplotlib.pyplot as plt\n'), ((11617, 11643), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['classic']"], {}), "(['classic'])\n", (11630, 11643), True, 'import matplotlib.pyplot as plt\n'), ((12262, 12310), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (12276, 12310), False, 'from keras.preprocessing import image\n'), ((12315, 12338), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (12333, 12338), False, 'from keras.preprocessing import image\n'), ((12343, 12368), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (12357, 12368), True, 'import numpy as np\n'), ((12373, 12392), 'imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (12389, 12392), False, 'from imagenet_utils import preprocess_input, decode_predictions\n'), ((2167, 2197), 'imagenet_utils.decode_predictions', 'decode_predictions', (['prediction'], {}), '(prediction)\n', (2185, 2197), False, 'from imagenet_utils import preprocess_input, decode_predictions\n'), ((2582, 2619), 'os.listdir', 'os.listdir', (["(data_path + '/' + dataset)"], {}), "(data_path + '/' + dataset)\n", (2592, 2619), False, 'import os\n'), ((5265, 5288), 'keras.layers.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (5272, 5288), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((5307, 5371), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""', 'name': '"""softmax2outputs"""'}), "(num_classes, activation='softmax', name='softmax2outputs')\n", (5312, 5371), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((8218, 8242), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (8240, 8242), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((8298, 8340), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'name': '"""fc-1"""'}), "(512, activation='relu', name='fc-1')\n", (8303, 8340), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((8347, 8359), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8354, 8359), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((8367, 8409), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""', 'name': '"""fc-2"""'}), "(256, activation='relu', name='fc-2')\n", (8372, 8409), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((8416, 8428), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8423, 8428), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((8470, 8534), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""', 'name': '"""softmax2outputs"""'}), "(num_classes, activation='softmax', name='softmax2outputs')\n", (8475, 8534), False, 'from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten\n'), ((2764, 2812), 'keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (2778, 2812), False, 'from keras.preprocessing import image\n'), ((2820, 2843), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (2838, 2843), False, 'from keras.preprocessing import image\n'), ((2850, 2875), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2864, 2875), True, 'import numpy as np\n'), ((2882, 2901), 'imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (2898, 2901), False, 'from imagenet_utils import preprocess_input, decode_predictions\n'), ((5056, 5082), 'keras.utils.vis_utils.model_to_dot', 'model_to_dot', (['model_resnet'], {}), '(model_resnet)\n', (5068, 5082), False, 'from keras.utils.vis_utils import model_to_dot\n'), ((5731, 5766), 'keras.utils.vis_utils.model_to_dot', 'model_to_dot', (['custom_resnet_model_1'], {}), '(custom_resnet_model_1)\n', (5743, 5766), False, 'from keras.utils.vis_utils import model_to_dot\n'), ((6642, 6653), 'time.time', 'time.time', ([], {}), '()\n', (6651, 6653), False, 'import time\n'), ((9095, 9106), 'time.time', 'time.time', ([], {}), '()\n', (9104, 9106), False, 'import time\n')] |
import os
import sys
import getopt
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image, ImageOps
def load_data(path):
X = np.array([]).reshape((0, 28, 28))
y = np.array([])
for root, dirs, files in os.walk(path):
for file in files:
if ".png" in file:
img = Image.open(os.path.join(root, file))
img = img.resize((28, 28))
img = img.convert("L")
img = ImageOps.equalize(img)
img = np.array(img)
img = img.reshape(1, *img.shape)
X = np.vstack((X, img))
elif ".csv" in file:
data = pd.read_csv(os.path.join(root, file), names=["labels"])
y = data["labels"].to_numpy()
return X, y
def preprocess(X, y):
X = X.reshape(X.shape[0], 28, 28, 1)
X = X / 255
y = tf.keras.utils.to_categorical(y, 10)
return X, y
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["name=", "dataset="])
except getopt.error as err:
sys.exit(2)
name = "model.h5"
root = ""
for opt, val in opts:
if opt == "--name": name = val
elif opt == "--dataset": root = val
X_test, y_test = load_data(root) if root else tf.keras.datasets.mnist.load_data()[1]
X_test, y_test = preprocess(X_test, y_test)
if X_test.shape[0] != y_test.shape[0]:
print("The number of images is not equal to the number of labels")
sys.exit(2)
if X_test.shape[1:-1] != (28, 28):
print("The dimensions of the images are not 28x28")
sys.exit(2)
model = tf.keras.models.load_model(name)
loss, acc = model.evaluate(X_test, y_test, verbose=0)
print(f"Model {name} stats:")
print(f'loss: {loss}')
print(f'acc: {acc}') | [
"tensorflow.keras.utils.to_categorical",
"getopt.getopt",
"tensorflow.keras.datasets.mnist.load_data",
"os.path.join",
"numpy.array",
"tensorflow.keras.models.load_model",
"numpy.vstack",
"sys.exit",
"PIL.ImageOps.equalize",
"os.walk"
] | [((203, 215), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (211, 215), True, 'import numpy as np\n'), ((250, 263), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (257, 263), False, 'import os\n'), ((898, 934), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y', '(10)'], {}), '(y, 10)\n', (927, 934), True, 'import tensorflow as tf\n'), ((1673, 1705), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['name'], {}), '(name)\n', (1699, 1705), True, 'import tensorflow as tf\n'), ((1009, 1063), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '""""""', "['name=', 'dataset=']"], {}), "(sys.argv[1:], '', ['name=', 'dataset='])\n", (1022, 1063), False, 'import getopt\n'), ((1528, 1539), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1536, 1539), False, 'import sys\n'), ((1648, 1659), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1656, 1659), False, 'import sys\n'), ((161, 173), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (169, 173), True, 'import numpy as np\n'), ((1104, 1115), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1112, 1115), False, 'import sys\n'), ((1314, 1349), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (1347, 1349), True, 'import tensorflow as tf\n'), ((486, 508), 'PIL.ImageOps.equalize', 'ImageOps.equalize', (['img'], {}), '(img)\n', (503, 508), False, 'from PIL import Image, ImageOps\n'), ((531, 544), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (539, 544), True, 'import numpy as np\n'), ((614, 633), 'numpy.vstack', 'np.vstack', (['(X, img)'], {}), '((X, img))\n', (623, 633), True, 'import numpy as np\n'), ((356, 380), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (368, 380), False, 'import os\n'), ((703, 727), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (715, 727), False, 'import os\n')] |
# imports
import numpy as np
import skfuzzy as fuzz
from scipy.stats import skew
import os
from alibi.utils.discretizer import Discretizer
from alibi.datasets import fetch_adult
import pandas as pd
import matplotlib.pyplot as plt
## Some variables and helper functions
rating_5 = ["VL","L","M","H","VH"]
rating_3 = ["L", "M", "H"]
rating_2 = ["No", "Yes"]
all_ratings = [rating_2, rating_3, rating_5]
# memberhsip values
mems_corr_unscaled = [[0.0,0.0,0.2,0.3],
[0.2,0.3,0.4,0.5],
[0.4,0.5,0.6,0.7],
[0.6,0.7,0.8,0.9],
[0.8,0.9,1,1]]
def plot_memberships(universe,mem_funcs,mem_names=["L","M","H"],colors = ["b", "g", "r", "y", "m"],figsize=(8,4), save=None):
f = plt.figure(figsize=figsize)
ax = f.add_subplot(111)
# universe, membership function, color
for mem,label,color in zip(mem_funcs, mem_names, colors):
ax.plot(universe, mem, color, linewidth=1.5, label=label)
# Hide the right and top spines
#ax.spines['right'].set_visible(False)
#ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.ylabel('Zugehörigkeitswert')
plt.xlabel('Diskursuniversum')
#ax.set_title(self.name)
#ax.legend(loc=2)
ax.legend()
if save:
plt.savefig(f"./{save}.png",dpi=300)
def plot_distribution(feat, name, legend,figsize=(9,6), savedir=None):
f = plt.figure(figsize=figsize)
ax = f.add_subplot(111)
plt.hist(feat)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.labelpad = 15
ax.yaxis.labelpad = 15
plt.ylabel('Anzahl')
plt.xlabel(legend)
if savedir is not None:
plt.tight_layout()
plt.savefig(os.path.join(savedir,"{}.png".format(name)),dpi=300)
def get_mems(mem_funcs, universe,vals):
'''
@param mem_vals: membership functions of feature
@param universe: universe of feature
@vals: values to determine memberships
returns: Dict of membership degrees per feature
'''
mems = {}
add = 0
if type(vals) != dict:
global_vals = {}
for i,c in enumerate(vals):
# create dict
global_vals[i] = c
else:
global_vals = vals
for label,val in global_vals.items():
mems[label] = [fuzz.interp_membership(universe, m, val) for m in mem_funcs]
add += val
return mems
def get_all_memberships(memberships,universe, val, labels=None):
'''
Returns the memberships of the given value
@param universe: universe of feature
@param memberships: memberships of feature
@param val: value to determine memberships (has to be scaled to 100)
@param labels: labels for returned membership dict (optional)
'''
if labels is not None:
rating = labels
else:
rating = [r for r in all_ratings if len(r) == len(memberships)][0]
mems = {}
for idx,m in enumerate(memberships):
mems[rating[idx]] = np.around(fuzz.interp_membership(universe, m, val),3)
return mems
def get_features_per_mem(mem_vals,labels=None):
'''
@param mem_vals: dict of membership values per feature
returns:
- Features sorted by Rating
- Dict of number of features per rating
'''
# get labels
rating = labels if labels is not None else [r for r in all_ratings if len(r) == len(list(mem_vals.values())[0])][0]
crisp_ratings = {}
r_nums = {}
for ri,rating in enumerate(rating):
crisp_ratings[rating] = [n for n,v in mem_vals.items() if v[ri] > 0]
r_nums[rating] = len(crisp_ratings[rating])
return crisp_ratings,r_nums
def get_features_per_mem_exact(mem_vals,labels=None):
'''
@param mem_vals: dict of membership values per feature
returns:
- Features sorted by Rating
- Dict of number of features per rating
'''
rating = labels if labels is not None else [r for r in all_ratings if len(r) == len(list(mem_vals.values())[0])][0]
crisp_ratings = {}
for ri,rating in enumerate(rating):
crisp_ratings[rating] = [n for n,v in mem_vals.items() if v[ri] > 0]
r_nums = {}
c = 0
for rating,(idx,val) in zip(crisp_ratings.keys(),enumerate(crisp_ratings.values())):
for v in val:
c += mem_vals[v][idx]
r_nums[rating] = c
c = 0
return crisp_ratings,r_nums
def get_all_membership_values(universe, memberships, val, rating=["L","M","H"]):
mems = {}
for idx,m in enumerate(memberships):
mems[rating[idx]] = np.around(fuzz.interp_membership(universe, m, val),3)
return mems
def calc_mad(data, axis=None):
return np.mean(np.absolute(data - np.mean(data, axis)), axis) | [
"numpy.mean",
"skfuzzy.interp_membership",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout"
] | [((740, 767), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (750, 767), True, 'import matplotlib.pyplot as plt\n'), ((1227, 1259), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Zugehörigkeitswert"""'], {}), "('Zugehörigkeitswert')\n", (1237, 1259), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1294), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Diskursuniversum"""'], {}), "('Diskursuniversum')\n", (1274, 1294), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1551), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1534, 1551), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1598), 'matplotlib.pyplot.hist', 'plt.hist', (['feat'], {}), '(feat)\n', (1592, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1814), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Anzahl"""'], {}), "('Anzahl')\n", (1804, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1819, 1837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['legend'], {}), '(legend)\n', (1829, 1837), True, 'import matplotlib.pyplot as plt\n'), ((1390, 1427), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""./{save}.png"""'], {'dpi': '(300)'}), "(f'./{save}.png', dpi=300)\n", (1401, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1897), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1895, 1897), True, 'import matplotlib.pyplot as plt\n'), ((2523, 2563), 'skfuzzy.interp_membership', 'fuzz.interp_membership', (['universe', 'm', 'val'], {}), '(universe, m, val)\n', (2545, 2563), True, 'import skfuzzy as fuzz\n'), ((3227, 3267), 'skfuzzy.interp_membership', 'fuzz.interp_membership', (['universe', 'm', 'val'], {}), '(universe, m, val)\n', (3249, 3267), True, 'import skfuzzy as fuzz\n'), ((4873, 4913), 'skfuzzy.interp_membership', 'fuzz.interp_membership', (['universe', 'm', 'val'], {}), '(universe, m, val)\n', (4895, 4913), True, 'import skfuzzy as fuzz\n'), ((5003, 5022), 'numpy.mean', 'np.mean', (['data', 'axis'], {}), '(data, axis)\n', (5010, 5022), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
def gauss_grid(size_x, size_y=None):
if size_y == None:
size_y = size_x
sigma_x = 0.17 * size_x
sigma_y = 0.17 * size_y
assert isinstance(size_x, int)
assert isinstance(size_y, int)
x0 = size_x // 2
y0 = size_y // 2
x = np.arange(0, size_x, dtype=float)
y = np.arange(0, size_y, dtype=float)[:,np.newaxis]
x -= x0
y -= y0
exp_part = x**2/(2*sigma_x**2)+ y**2/(2*sigma_y**2)
dist = 1/(2*np.pi*sigma_x*sigma_y) * np.exp(-exp_part)
return dist * (256/dist.max())
def alpha_gradient_grid( size_x, size_y=None, color="#888888", alpha=0.1 ):
arr = []
for row in gauss_grid( size_x, size_y ).tolist():
for item in row:
arr.append( "{}{:02X}".format( color, math.floor( alpha * item ) ) )
return arr
| [
"numpy.exp",
"numpy.arange",
"math.floor"
] | [((330, 363), 'numpy.arange', 'np.arange', (['(0)', 'size_x'], {'dtype': 'float'}), '(0, size_x, dtype=float)\n', (339, 363), True, 'import numpy as np\n'), ((372, 405), 'numpy.arange', 'np.arange', (['(0)', 'size_y'], {'dtype': 'float'}), '(0, size_y, dtype=float)\n', (381, 405), True, 'import numpy as np\n'), ((543, 560), 'numpy.exp', 'np.exp', (['(-exp_part)'], {}), '(-exp_part)\n', (549, 560), True, 'import numpy as np\n'), ((815, 839), 'math.floor', 'math.floor', (['(alpha * item)'], {}), '(alpha * item)\n', (825, 839), False, 'import math\n')] |
from __future__ import print_function
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
import numpy as np
from util import get_poke_xy
x_train, x_test, y_train, y_test = get_poke_xy()
#get indexes for 90/10 train/val split (stratified by y)
splits = StratifiedShuffleSplit(y_train, n_iter=1, test_size=0.1, random_state=42)
# form split
for train_index, val_index in splits:
x_train, x_val = x_train[train_index], x_train[val_index]
y_train, y_val = y_train[train_index], y_train[val_index]
#ks to eval
k_vals = range(1, 30, 2)
accuracies = []
# loop through k_vals and find best performance on val
for k in xrange(1, 30, 2):
# train the knn with current k
model = KNeighborsClassifier(n_neighbors=k)
model.fit(x_train, y_train)
# eval the model and update the accuracies list
score = model.score(x_val, y_val)
# print("k=%d, accuracy=%.2f%%" % (k, score * 100))
accuracies.append(score)
# find the value of k that has the largest accuracy
i = np.argmax(accuracies)
print("k=%d achieved highest accuracy of %.2f%% on validation data" % (k_vals[i],
accuracies[i] * 100))
# build model with best k from train
model = KNeighborsClassifier(n_neighbors=k_vals[i])
model.fit(x_train, y_train)
predictions = model.predict(x_test)
# deploy and eval results
print("\n[RESULTS] KNN w/ k={}".format(i))
print(classification_report(y_test, predictions))
| [
"sklearn.cross_validation.StratifiedShuffleSplit",
"sklearn.metrics.classification_report",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.argmax",
"util.get_poke_xy"
] | [((283, 296), 'util.get_poke_xy', 'get_poke_xy', ([], {}), '()\n', (294, 296), False, 'from util import get_poke_xy\n'), ((364, 437), 'sklearn.cross_validation.StratifiedShuffleSplit', 'StratifiedShuffleSplit', (['y_train'], {'n_iter': '(1)', 'test_size': '(0.1)', 'random_state': '(42)'}), '(y_train, n_iter=1, test_size=0.1, random_state=42)\n', (386, 437), False, 'from sklearn.cross_validation import StratifiedShuffleSplit\n'), ((1075, 1096), 'numpy.argmax', 'np.argmax', (['accuracies'], {}), '(accuracies)\n', (1084, 1096), True, 'import numpy as np\n'), ((1248, 1291), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'k_vals[i]'}), '(n_neighbors=k_vals[i])\n', (1268, 1291), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((787, 822), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (807, 822), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1433, 1475), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1454, 1475), False, 'from sklearn.metrics import classification_report\n')] |
import databricks.koalas as ks
import numpy as np
# These function can return a Column Expression or a list of columns expression
# Must return None if the data type can not be handle
import pandas as pd
from pyspark.sql import functions as F
from optimus.engines.base.commons.functions import word_tokenize
from optimus.engines.base.dataframe.functions import DataFrameBaseFunctions
from optimus.engines.base.pandas.functions import PandasBaseFunctions
from optimus.helpers.core import val_to_list, one_list_to_val
from optimus.helpers.raiseit import RaiseIt
class SparkFunctions(PandasBaseFunctions, DataFrameBaseFunctions):
_engine = ks
def _to_float(self, series):
"""
Converts a series values to floats
"""
return series.astype("float")
def _to_integer(self, series, default=0):
"""
Converts a series values to integers
"""
return series.astype("integer")
def _to_string(self, series):
"""
Converts a series values to strings
"""
return series.astype("str")
def _to_boolean(self, series):
"""
Converts a series values to bool
"""
return series.astype("bool")
def hist(col_name, df, buckets, min_max=None, dtype=None):
"""
Create a columns expression to calculate a column histogram
:param col_name:
:param df:
:param buckets:
:param min_max: Min and max value necessary to calculate the buckets
:param dtype: Column datatype to calculate the related histogram. Int, String and Dates return different histograms
:return:
"""
PYSPARK_NUMERIC_TYPES = ["byte", "short", "big", "int", "double", "float"]
def is_column_a(df, column, dtypes):
"""
Check if column match a list of data types
:param df: dataframe
:param column: column to be compared with
:param dtypes: types to be checked
:return:
"""
column = val_to_list(column)
if len(column) > 1:
RaiseIt.length_error(column, 1)
data_type = tuple(val_to_list(parse_spark_dtypes(dtypes)))
column = one_list_to_val(column)
# Filter columns by data type
return isinstance(df.schema[column].dataType, data_type)
def create_exprs(_input_col, _buckets, _func):
def count_exprs(_exprs):
return F.sum(F.when(_exprs, 1).otherwise(0))
_exprs = []
for i, b in enumerate(_buckets):
lower = b["lower"]
upper = b["upper"]
if is_numeric(lower):
lower = round(lower, 2)
if is_numeric(upper):
upper = round(upper, 2)
if len(_buckets) == 1:
count = count_exprs(
(_func(_input_col) == lower))
else:
if i == len(_buckets):
count = count_exprs(
(_func(_input_col) > lower) & (_func(_input_col) <= upper))
else:
count = count_exprs(
(_func(_input_col) >= lower) & (_func(_input_col) < upper))
info = F.create_map(F.lit("count"), count.cast("int"), F.lit("lower"), F.lit(lower), F.lit("upper"),
F.lit(upper)).alias(
"hist_agg" + "_" + _input_col + "_" + str(b["bucket"]))
_exprs.append(info)
_exprs = F.array(*_exprs).alias("hist" + _input_col)
return _exprs
def hist_numeric(_min_max, _buckets):
if _min_max is None:
_min_max = df.agg(F.min(col_name).alias("min"), F.max(col_name).alias("max")).to_dict()[0]
if _min_max["min"] is not None and _min_max["max"] is not None:
_buckets = create_buckets(_min_max["min"], _min_max["max"], _buckets)
_exprs = create_exprs(col_name, _buckets, F.col)
else:
_exprs = None
return _exprs
def hist_string(_buckets):
_buckets = create_buckets(0, 50, _buckets)
func = F.length
return create_exprs(col_name, _buckets, func)
def hist_date():
now = datetime.datetime.now()
current_year = now.year
oldest_year = 1950
# Year
_buckets = create_buckets(oldest_year, current_year, current_year - oldest_year)
func = F.year
year = create_exprs(col_name, _buckets, func)
# Month
_buckets = create_buckets(1, 12, 11)
func = F.month
month = create_exprs(col_name, _buckets, func)
# Day
_buckets = create_buckets(1, 31, 31)
func = F.dayofweek
day = create_exprs(col_name, _buckets, func)
# Hour
_buckets = create_buckets(0, 23, 23)
func = F.hour
hour = create_exprs(col_name, _buckets, func)
# Min
_buckets = create_buckets(0, 60, 60)
func = F.minute
minutes = create_exprs(col_name, _buckets, func)
# Second
_buckets = create_buckets(0, 60, 60)
func = F.second
second = create_exprs(col_name, _buckets, func)
exprs = F.create_map(F.lit("years"), year, F.lit("months"), month, F.lit("weekdays"), day,
F.lit("hours"), hour, F.lit("minutes"), minutes, F.lit("seconds"), second)
return exprs
if dtype is not None:
col_dtype = dtype[col_name]["dtype"]
if col_dtype == "int" or col_dtype == "decimal":
exprs = hist_numeric(min_max, buckets)
elif col_dtype == "string":
exprs = hist_string(buckets)
elif col_dtype == "date":
exprs = hist_date()
else:
exprs = None
else:
if is_column_a(df, col_name, PYSPARK_NUMERIC_TYPES):
exprs = hist_numeric(min_max, buckets)
elif is_column_a(df, col_name, "str"):
exprs = hist_string(buckets)
elif is_column_a(df, col_name, "date") or is_column_a(df, col_name, "timestamp"):
exprs = hist_date()
else:
exprs = None
return exprs
def create_exprs(_input_col, _buckets, _func):
def count_exprs(_exprs):
return F.sum(F.when(_exprs, 1).otherwise(0))
_exprs = []
for i, b in enumerate(_buckets):
lower = b["lower"]
upper = b["upper"]
if is_numeric(lower):
lower = round(lower, 2)
if is_numeric(upper):
upper = round(upper, 2)
if len(_buckets) == 1:
count = count_exprs(
(_func(_input_col) == lower))
else:
if i == len(_buckets):
count = count_exprs(
(_func(_input_col) > lower) & (_func(_input_col) <= upper))
else:
count = count_exprs(
(_func(_input_col) >= lower) & (_func(_input_col) < upper))
info = F.create_map(F.lit("count"), count.cast("int"), F.lit("lower"), F.lit(lower), F.lit("upper"),
F.lit(upper)).alias(
"hist_agg" + "_" + _input_col + "_" + str(b["bucket"]))
_exprs.append(info)
_exprs = F.array(*_exprs).alias("hist" + _input_col)
return _exprs
@staticmethod
def dask_to_compatible(dfd):
from optimus.helpers.converter import dask_dataframe_to_pandas
return ks.from_pandas(dask_dataframe_to_pandas(dfd))
@staticmethod
def new_df(*args, **kwargs):
return ks.from_pandas(pd.DataFrame(*args, **kwargs))
@staticmethod
def df_concat(df_list):
return ks.concat(df_list, axis=0, ignore_index=True)
def word_tokenize(self, series):
return self.to_string(series).map(word_tokenize, na_action=None)
def count_zeros(self, series, *args):
return int((self.to_float(series).values == 0).sum())
def kurtosis(self, series):
return self.to_float(series).kurtosis()
def skew(self, series):
return self.to_float(series).skew()
def exp(self, series):
return np.exp(self.to_float(series))
def sqrt(self, series):
return np.sqrt(self.to_float(series))
def reciprocal(self, series):
return np.reciprocal(self.to_float(series))
def radians(self, series):
return np.radians(self.to_float(series))
def degrees(self, series):
return np.degrees(self.to_float(series))
def ln(self, series):
return np.log(self.to_float(series))
def log(self, series, base=10):
return np.log(self.to_float(series)) / np.log(base)
def sin(self, series):
return np.sin(self.to_float(series))
def cos(self, series):
return np.cos(self.to_float(series))
def tan(self, series):
return np.tan(self.to_float(series))
def asin(self, series):
return np.arcsin(self.to_float(series))
def acos(self, series):
return np.arccos(self.to_float(series))
def atan(self, series):
return np.arctan(self.to_float(series))
def sinh(self, series):
return np.arcsinh(self.to_float(series))
def cosh(self, series):
return np.cosh(self.to_float(series))
def tanh(self, series):
return np.tanh(self.to_float(series))
def asinh(self, series):
return np.arcsinh(self.to_float(series))
def acosh(self, series):
return np.arccosh(self.to_float(series))
def atanh(self, series):
return np.arctanh(self.to_float(series))
def floor(self, series):
return np.floor(self.to_float(series))
def ceil(self, series):
return np.ceil(self.to_float(series))
def normalize_chars(self, series):
return series.str.normalize("NFKD").str.encode('ascii', errors='ignore').str.decode('utf8')
def format_date(self, series, current_format=None, output_format=None):
return ks.to_datetime(series, format=current_format,
errors="coerce").dt.strftime(output_format).reset_index(drop=True)
def time_between(self, series, value=None, date_format=None):
value_date_format = date_format
if is_list_or_tuple(date_format) and len(date_format) == 2:
date_format, value_date_format = date_format
if is_list_or_tuple(value) and len(value) == 2:
value, value_date_format = value
date = pd.to_datetime(series, format=date_format, errors="coerce")
value = pd.Timestamp.now() if value is None else pd.to_datetime(value, format=value_date_format,
errors="coerce")
return (value - date)
| [
"pyspark.sql.functions.lit",
"pandas.DataFrame",
"pandas.Timestamp.now",
"numpy.log",
"pyspark.sql.functions.array",
"databricks.koalas.to_datetime",
"optimus.helpers.raiseit.RaiseIt.length_error",
"pyspark.sql.functions.max",
"pyspark.sql.functions.min",
"pyspark.sql.functions.when",
"optimus.h... | [((8144, 8189), 'databricks.koalas.concat', 'ks.concat', (['df_list'], {'axis': '(0)', 'ignore_index': '(True)'}), '(df_list, axis=0, ignore_index=True)\n', (8153, 8189), True, 'import databricks.koalas as ks\n'), ((10920, 10979), 'pandas.to_datetime', 'pd.to_datetime', (['series'], {'format': 'date_format', 'errors': '"""coerce"""'}), "(series, format=date_format, errors='coerce')\n", (10934, 10979), True, 'import pandas as pd\n'), ((2052, 2071), 'optimus.helpers.core.val_to_list', 'val_to_list', (['column'], {}), '(column)\n', (2063, 2071), False, 'from optimus.helpers.core import val_to_list, one_list_to_val\n'), ((2246, 2269), 'optimus.helpers.core.one_list_to_val', 'one_list_to_val', (['column'], {}), '(column)\n', (2261, 2269), False, 'from optimus.helpers.core import val_to_list, one_list_to_val\n'), ((7938, 7967), 'optimus.helpers.converter.dask_dataframe_to_pandas', 'dask_dataframe_to_pandas', (['dfd'], {}), '(dfd)\n', (7962, 7967), False, 'from optimus.helpers.converter import dask_dataframe_to_pandas\n'), ((8051, 8080), 'pandas.DataFrame', 'pd.DataFrame', (['*args'], {}), '(*args, **kwargs)\n', (8063, 8080), True, 'import pandas as pd\n'), ((9113, 9125), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (9119, 9125), True, 'import numpy as np\n'), ((10996, 11014), 'pandas.Timestamp.now', 'pd.Timestamp.now', ([], {}), '()\n', (11012, 11014), True, 'import pandas as pd\n'), ((11037, 11101), 'pandas.to_datetime', 'pd.to_datetime', (['value'], {'format': 'value_date_format', 'errors': '"""coerce"""'}), "(value, format=value_date_format, errors='coerce')\n", (11051, 11101), True, 'import pandas as pd\n'), ((2121, 2152), 'optimus.helpers.raiseit.RaiseIt.length_error', 'RaiseIt.length_error', (['column', '(1)'], {}), '(column, 1)\n', (2141, 2152), False, 'from optimus.helpers.raiseit import RaiseIt\n'), ((5526, 5540), 'pyspark.sql.functions.lit', 'F.lit', (['"""years"""'], {}), "('years')\n", (5531, 5540), True, 'from pyspark.sql import functions as F\n'), ((5548, 5563), 'pyspark.sql.functions.lit', 'F.lit', (['"""months"""'], {}), "('months')\n", (5553, 5563), True, 'from pyspark.sql import functions as F\n'), ((5572, 5589), 'pyspark.sql.functions.lit', 'F.lit', (['"""weekdays"""'], {}), "('weekdays')\n", (5577, 5589), True, 'from pyspark.sql import functions as F\n'), ((5629, 5643), 'pyspark.sql.functions.lit', 'F.lit', (['"""hours"""'], {}), "('hours')\n", (5634, 5643), True, 'from pyspark.sql import functions as F\n'), ((5651, 5667), 'pyspark.sql.functions.lit', 'F.lit', (['"""minutes"""'], {}), "('minutes')\n", (5656, 5667), True, 'from pyspark.sql import functions as F\n'), ((5678, 5694), 'pyspark.sql.functions.lit', 'F.lit', (['"""seconds"""'], {}), "('seconds')\n", (5683, 5694), True, 'from pyspark.sql import functions as F\n'), ((7719, 7735), 'pyspark.sql.functions.array', 'F.array', (['*_exprs'], {}), '(*_exprs)\n', (7726, 7735), True, 'from pyspark.sql import functions as F\n'), ((3642, 3658), 'pyspark.sql.functions.array', 'F.array', (['*_exprs'], {}), '(*_exprs)\n', (3649, 3658), True, 'from pyspark.sql import functions as F\n'), ((6673, 6690), 'pyspark.sql.functions.when', 'F.when', (['_exprs', '(1)'], {}), '(_exprs, 1)\n', (6679, 6690), True, 'from pyspark.sql import functions as F\n'), ((7464, 7478), 'pyspark.sql.functions.lit', 'F.lit', (['"""count"""'], {}), "('count')\n", (7469, 7478), True, 'from pyspark.sql import functions as F\n'), ((7499, 7513), 'pyspark.sql.functions.lit', 'F.lit', (['"""lower"""'], {}), "('lower')\n", (7504, 7513), True, 'from pyspark.sql import functions as F\n'), ((7515, 7527), 'pyspark.sql.functions.lit', 'F.lit', (['lower'], {}), '(lower)\n', (7520, 7527), True, 'from pyspark.sql import functions as F\n'), ((7529, 7543), 'pyspark.sql.functions.lit', 'F.lit', (['"""upper"""'], {}), "('upper')\n", (7534, 7543), True, 'from pyspark.sql import functions as F\n'), ((7577, 7589), 'pyspark.sql.functions.lit', 'F.lit', (['upper'], {}), '(upper)\n', (7582, 7589), True, 'from pyspark.sql import functions as F\n'), ((2504, 2521), 'pyspark.sql.functions.when', 'F.when', (['_exprs', '(1)'], {}), '(_exprs, 1)\n', (2510, 2521), True, 'from pyspark.sql import functions as F\n'), ((3371, 3385), 'pyspark.sql.functions.lit', 'F.lit', (['"""count"""'], {}), "('count')\n", (3376, 3385), True, 'from pyspark.sql import functions as F\n'), ((3406, 3420), 'pyspark.sql.functions.lit', 'F.lit', (['"""lower"""'], {}), "('lower')\n", (3411, 3420), True, 'from pyspark.sql import functions as F\n'), ((3422, 3434), 'pyspark.sql.functions.lit', 'F.lit', (['lower'], {}), '(lower)\n', (3427, 3434), True, 'from pyspark.sql import functions as F\n'), ((3436, 3450), 'pyspark.sql.functions.lit', 'F.lit', (['"""upper"""'], {}), "('upper')\n", (3441, 3450), True, 'from pyspark.sql import functions as F\n'), ((3488, 3500), 'pyspark.sql.functions.lit', 'F.lit', (['upper'], {}), '(upper)\n', (3493, 3500), True, 'from pyspark.sql import functions as F\n'), ((10425, 10487), 'databricks.koalas.to_datetime', 'ks.to_datetime', (['series'], {'format': 'current_format', 'errors': '"""coerce"""'}), "(series, format=current_format, errors='coerce')\n", (10439, 10487), True, 'import databricks.koalas as ks\n'), ((3826, 3841), 'pyspark.sql.functions.min', 'F.min', (['col_name'], {}), '(col_name)\n', (3831, 3841), True, 'from pyspark.sql import functions as F\n'), ((3856, 3871), 'pyspark.sql.functions.max', 'F.max', (['col_name'], {}), '(col_name)\n', (3861, 3871), True, 'from pyspark.sql import functions as F\n')] |
#!/usr/bin/env python
# coding: utf-8
"""Clusterless Decoding Analysis W-Track | Compute decoded position and
distance metrics based on CA1 Marks and associated body position |
Inputs: Marks, Posdlc, Task, Tetinfo | https://github.com/edeno/pose_analysis
"""
import logging
import os
import numpy as np
import xarray as xr
from loren_frank_data_processing import make_epochs_dataframe
from replay_trajectory_classification import ClusterlessClassifier
from sklearn.model_selection import KFold
from src.analysis import calculate_replay_distance
from src.load_data import load_data, make_track_graph
from src.parameters import (ANIMALS, EDGE_ORDER, EDGE_SPACING,
PROCESSED_DATA_DIR, classifier_parameters,
discrete_state_transition)
from trajectory_analysis_tools import (get_distance_metrics,
get_highest_posterior_threshold,
get_HPD_spatial_coverage,
get_trajectory_data)
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(level='INFO', format=FORMAT, datefmt='%d-%b-%y %H:%M:%S')
def run_analysis(epoch_key):
logging.info(epoch_key)
# Load Data
# Specifiy animal, day, epoch and body position estimate to be used to
# encode pos-mark relationship.
logging.info("Loading data...")
data = load_data(epoch_key,
position_to_linearize=['nose_x', 'nose_y'],
max_distance_from_well=30,
min_distance_traveled=50,
)
track_graph, center_well_id = make_track_graph(epoch_key, ANIMALS)
is_running = np.abs(data["position_info"].nose_vel) > 0
# is_running = np.abs(data["position_info"].forepawR_vel) > 4
# is_outbound = data["position_info"].task == "Outbound"
# Calculate posterior
# Builds the classifier and calculates the posterior estimates for each
# bin. Default is 5x cross validation. Some concerns if that is appropriate
# in 15 minute run epochs,but AJ checked that the overal posterior was
# similar in 2x,3x, and 5x versions. Maybe stick to 3x for 15 minute data?
cv = KFold()
cv_classifier_clusterless_results = []
logging.info("Decoding...")
for fold_ind, (train, test) in enumerate(
cv.split(data["position_info"].index)):
logging.info(f"\tFold #{fold_ind + 1}")
# train = train[is_outbound[train].values]
cv_classifier = ClusterlessClassifier(**classifier_parameters)
logging.info("\tFitting model...")
cv_classifier.fit(
position=data["position_info"].iloc[train].linear_position,
multiunits=data["multiunits"].isel(time=train),
is_training=is_running.iloc[train],
track_graph=track_graph,
center_well_id=center_well_id,
edge_order=EDGE_ORDER,
edge_spacing=EDGE_SPACING,
)
cv_classifier.discrete_state_transition_ = discrete_state_transition
logging.info('\tPredicting posterior...')
cv_classifier_clusterless_results.append(
cv_classifier.predict(
data["multiunits"].isel(time=test),
time=data["position_info"].iloc[test].index /
np.timedelta64(1, "s"),
)
)
# concatenate cv classifier results
cv_classifier_clusterless_results = xr.concat(
cv_classifier_clusterless_results, dim="time"
)
# Calculate Distance Metrics
logging.info("Calculating metrics...")
# Important calculate distance metrics. Loads Causal Posterior and
# get_trajectory_data and get_distance_metrics to calculate ahead-behind
# distance based on body_dir
# CAUSAL
posterior_causal = (cv_classifier_clusterless_results["causal_posterior"]
.sum("state", skipna=False))
# extracting the peak of the posterior
trajectory_data = get_trajectory_data(
posterior=posterior_causal,
track_graph=track_graph,
decoder=cv_classifier,
position_info=data["position_info"],
direction_variable="body_dir"
)
distance_metrics = get_distance_metrics(
track_graph, *trajectory_data)
ahead_behind_distance_causal = (
distance_metrics.mental_position_ahead_behind_animal *
distance_metrics.mental_position_distance_from_animal)
# Calculate the corresponding 95% HPD credible interval
hpd_threshold_95_causal = get_highest_posterior_threshold(
posterior_causal.dropna("position"), coverage=0.95)
spatial_coverage_95_causal = get_HPD_spatial_coverage(
posterior_causal, hpd_threshold_95_causal)
# Calculate the corresponding 50% HPD credible interval
hpd_threshold_50_causal = get_highest_posterior_threshold(
posterior_causal.dropna("position"), coverage=0.50)
spatial_coverage_50_causal = get_HPD_spatial_coverage(
posterior_causal, hpd_threshold_50_causal)
# calculate distance metrics acausal posterior. Loads acausal posterior and
# distance metrics. ACAUSAL
posterior_acausal = (cv_classifier_clusterless_results["acausal_posterior"]
.sum("state", skipna=False))
# extracting the peak of the posterior
trajectory_data = get_trajectory_data(
posterior=posterior_acausal,
track_graph=track_graph,
decoder=cv_classifier,
position_info=data["position_info"],
direction_variable="body_dir"
)
distance_metrics = get_distance_metrics(
track_graph, *trajectory_data)
ahead_behind_distance_acausal = (
distance_metrics.mental_position_ahead_behind_animal *
distance_metrics.mental_position_distance_from_animal)
# ACAUSAL 95% CI
hpd_threshold_95_acausal = get_highest_posterior_threshold(
posterior_acausal.dropna("position"), coverage=0.95)
spatial_coverage_95_acausal = get_HPD_spatial_coverage(
posterior_acausal, hpd_threshold_95_acausal)
# ACAUSAL 50% CI
hpd_threshold_50_acausal = get_highest_posterior_threshold(
posterior_acausal.dropna("position"), coverage=0.50)
spatial_coverage_50_acausal = get_HPD_spatial_coverage(
posterior_acausal, hpd_threshold_50_acausal)
# WHILE WE ARE AT IT, ALSO A GOOD IDEA TO CALCULATE THE ABSOLUTE DISTANCE.
# CAUSAL
replay_distance_from_animal_position_causal = calculate_replay_distance(
posterior=cv_classifier_clusterless_results.causal_posterior.sum(
'state'),
track_graph=track_graph,
decoder=cv_classifier,
position_2D=data['position_info'].loc[:, ["nose_x", "nose_y"]],
track_segment_id=data['position_info'].loc[:, ["track_segment_id"]],
)
# WHILE WE ARE AT IT, ALSO A GOOD IDEA TO CALCULATE THE ABSOLUTE DISTANCE.
# ACAUSAL
replay_distance_from_animal_position_acausal = calculate_replay_distance(
posterior=cv_classifier_clusterless_results.acausal_posterior.sum(
'state'),
track_graph=track_graph,
decoder=cv_classifier,
position_2D=data['position_info'].loc[:, ["nose_x", "nose_y"]],
track_segment_id=data['position_info'].loc[:, ["track_segment_id"]],
)
# ### Save the distance and CI values with the classifier results
cv_classifier_clusterless_results[
'abs_distance_from_animal_position_causal'] = (
('time'), replay_distance_from_animal_position_causal)
cv_classifier_clusterless_results[
'abs_distance_from_animal_position_acausal'] = (
('time'), replay_distance_from_animal_position_acausal)
# maybe this will works and we can save both distances
cv_classifier_clusterless_results[
'rel_distance_from_animal_position_causal'] = (
('time'), ahead_behind_distance_causal)
cv_classifier_clusterless_results[
'rel_distance_from_animal_position_acausal'] = (
('time'), ahead_behind_distance_acausal)
# get HPD estimate of the distance associated
cv_classifier_clusterless_results['hpd_threshold_95_causal'] = (
('time'), hpd_threshold_95_causal)
cv_classifier_clusterless_results['hpd_threshold_50_causal'] = (
('time'), hpd_threshold_50_causal)
cv_classifier_clusterless_results['hpd_threshold_95_acausal'] = (
('time'), hpd_threshold_95_acausal)
cv_classifier_clusterless_results['hpd_threshold_50_acausal'] = (
('time'), hpd_threshold_50_acausal)
# get CI of the distance associated
cv_classifier_clusterless_results['credible_interval_95_causal'] = (
('time'), spatial_coverage_95_causal)
cv_classifier_clusterless_results['credible_interval_50_causal'] = (
('time'), spatial_coverage_50_causal)
cv_classifier_clusterless_results['credible_interval_95_acausal'] = (
('time'), spatial_coverage_95_acausal)
cv_classifier_clusterless_results['credible_interval_50_acausal'] = (
('time'), spatial_coverage_50_acausal)
logging.info("Saving results...")
# save the results as .nc format. ncread matlab can read these
epoch_identifier = f"{epoch_key[0]}_{epoch_key[1]:02d}_{epoch_key[2]:02d}"
cv_classifier_clusterless_results.to_netcdf(
os.path.join(
PROCESSED_DATA_DIR,
(f"{epoch_identifier}_cv_classifier_clusterless_vel_0_nose_alltime"
"_results.nc")
)
)
# save the model
cv_classifier.save_model(
os.path.join(PROCESSED_DATA_DIR,
f"{epoch_identifier}_cv_classifier_clusterless_nose.pkl"))
# Save position info
data['position_info'].to_csv(
os.path.join(PROCESSED_DATA_DIR,
f"{epoch_identifier }_position_info_nose.csv"))
def main():
epoch_info = make_epochs_dataframe(ANIMALS)
for epoch in epoch_info.index:
run_analysis(epoch)
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"trajectory_analysis_tools.get_trajectory_data",
"numpy.abs",
"trajectory_analysis_tools.get_distance_metrics",
"os.path.join",
"src.load_data.make_track_graph",
"loren_frank_data_processing.make_epochs_dataframe",
"xarray.concat",
"numpy.timedelta64",
"trajectory_analysis_t... | [((1084, 1161), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': '"""INFO"""', 'format': 'FORMAT', 'datefmt': '"""%d-%b-%y %H:%M:%S"""'}), "(level='INFO', format=FORMAT, datefmt='%d-%b-%y %H:%M:%S')\n", (1103, 1161), False, 'import logging\n'), ((1197, 1220), 'logging.info', 'logging.info', (['epoch_key'], {}), '(epoch_key)\n', (1209, 1220), False, 'import logging\n'), ((1352, 1383), 'logging.info', 'logging.info', (['"""Loading data..."""'], {}), "('Loading data...')\n", (1364, 1383), False, 'import logging\n'), ((1395, 1516), 'src.load_data.load_data', 'load_data', (['epoch_key'], {'position_to_linearize': "['nose_x', 'nose_y']", 'max_distance_from_well': '(30)', 'min_distance_traveled': '(50)'}), "(epoch_key, position_to_linearize=['nose_x', 'nose_y'],\n max_distance_from_well=30, min_distance_traveled=50)\n", (1404, 1516), False, 'from src.load_data import load_data, make_track_graph\n'), ((1634, 1670), 'src.load_data.make_track_graph', 'make_track_graph', (['epoch_key', 'ANIMALS'], {}), '(epoch_key, ANIMALS)\n', (1650, 1670), False, 'from src.load_data import load_data, make_track_graph\n'), ((2205, 2212), 'sklearn.model_selection.KFold', 'KFold', ([], {}), '()\n', (2210, 2212), False, 'from sklearn.model_selection import KFold\n'), ((2260, 2287), 'logging.info', 'logging.info', (['"""Decoding..."""'], {}), "('Decoding...')\n", (2272, 2287), False, 'import logging\n'), ((3440, 3496), 'xarray.concat', 'xr.concat', (['cv_classifier_clusterless_results'], {'dim': '"""time"""'}), "(cv_classifier_clusterless_results, dim='time')\n", (3449, 3496), True, 'import xarray as xr\n'), ((3549, 3587), 'logging.info', 'logging.info', (['"""Calculating metrics..."""'], {}), "('Calculating metrics...')\n", (3561, 3587), False, 'import logging\n'), ((3980, 4151), 'trajectory_analysis_tools.get_trajectory_data', 'get_trajectory_data', ([], {'posterior': 'posterior_causal', 'track_graph': 'track_graph', 'decoder': 'cv_classifier', 'position_info': "data['position_info']", 'direction_variable': '"""body_dir"""'}), "(posterior=posterior_causal, track_graph=track_graph,\n decoder=cv_classifier, position_info=data['position_info'],\n direction_variable='body_dir')\n", (3999, 4151), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((4214, 4265), 'trajectory_analysis_tools.get_distance_metrics', 'get_distance_metrics', (['track_graph', '*trajectory_data'], {}), '(track_graph, *trajectory_data)\n', (4234, 4265), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((4656, 4723), 'trajectory_analysis_tools.get_HPD_spatial_coverage', 'get_HPD_spatial_coverage', (['posterior_causal', 'hpd_threshold_95_causal'], {}), '(posterior_causal, hpd_threshold_95_causal)\n', (4680, 4723), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((4950, 5017), 'trajectory_analysis_tools.get_HPD_spatial_coverage', 'get_HPD_spatial_coverage', (['posterior_causal', 'hpd_threshold_50_causal'], {}), '(posterior_causal, hpd_threshold_50_causal)\n', (4974, 5017), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((5340, 5512), 'trajectory_analysis_tools.get_trajectory_data', 'get_trajectory_data', ([], {'posterior': 'posterior_acausal', 'track_graph': 'track_graph', 'decoder': 'cv_classifier', 'position_info': "data['position_info']", 'direction_variable': '"""body_dir"""'}), "(posterior=posterior_acausal, track_graph=track_graph,\n decoder=cv_classifier, position_info=data['position_info'],\n direction_variable='body_dir')\n", (5359, 5512), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((5575, 5626), 'trajectory_analysis_tools.get_distance_metrics', 'get_distance_metrics', (['track_graph', '*trajectory_data'], {}), '(track_graph, *trajectory_data)\n', (5595, 5626), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((5982, 6051), 'trajectory_analysis_tools.get_HPD_spatial_coverage', 'get_HPD_spatial_coverage', (['posterior_acausal', 'hpd_threshold_95_acausal'], {}), '(posterior_acausal, hpd_threshold_95_acausal)\n', (6006, 6051), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((6243, 6312), 'trajectory_analysis_tools.get_HPD_spatial_coverage', 'get_HPD_spatial_coverage', (['posterior_acausal', 'hpd_threshold_50_acausal'], {}), '(posterior_acausal, hpd_threshold_50_acausal)\n', (6267, 6312), False, 'from trajectory_analysis_tools import get_distance_metrics, get_highest_posterior_threshold, get_HPD_spatial_coverage, get_trajectory_data\n'), ((9077, 9110), 'logging.info', 'logging.info', (['"""Saving results..."""'], {}), "('Saving results...')\n", (9089, 9110), False, 'import logging\n'), ((9858, 9888), 'loren_frank_data_processing.make_epochs_dataframe', 'make_epochs_dataframe', (['ANIMALS'], {}), '(ANIMALS)\n', (9879, 9888), False, 'from loren_frank_data_processing import make_epochs_dataframe\n'), ((1688, 1726), 'numpy.abs', 'np.abs', (["data['position_info'].nose_vel"], {}), "(data['position_info'].nose_vel)\n", (1694, 1726), True, 'import numpy as np\n'), ((2394, 2433), 'logging.info', 'logging.info', (['f"""\tFold #{fold_ind + 1}"""'], {}), "(f'\\tFold #{fold_ind + 1}')\n", (2406, 2433), False, 'import logging\n'), ((2509, 2555), 'replay_trajectory_classification.ClusterlessClassifier', 'ClusterlessClassifier', ([], {}), '(**classifier_parameters)\n', (2530, 2555), False, 'from replay_trajectory_classification import ClusterlessClassifier\n'), ((2564, 2598), 'logging.info', 'logging.info', (['"""\tFitting model..."""'], {}), "('\\tFitting model...')\n", (2576, 2598), False, 'import logging\n'), ((3055, 3096), 'logging.info', 'logging.info', (['"""\tPredicting posterior..."""'], {}), "('\\tPredicting posterior...')\n", (3067, 3096), False, 'import logging\n'), ((9314, 9434), 'os.path.join', 'os.path.join', (['PROCESSED_DATA_DIR', 'f"""{epoch_identifier}_cv_classifier_clusterless_vel_0_nose_alltime_results.nc"""'], {}), "(PROCESSED_DATA_DIR,\n f'{epoch_identifier}_cv_classifier_clusterless_vel_0_nose_alltime_results.nc'\n )\n", (9326, 9434), False, 'import os\n'), ((9544, 9638), 'os.path.join', 'os.path.join', (['PROCESSED_DATA_DIR', 'f"""{epoch_identifier}_cv_classifier_clusterless_nose.pkl"""'], {}), "(PROCESSED_DATA_DIR,\n f'{epoch_identifier}_cv_classifier_clusterless_nose.pkl')\n", (9556, 9638), False, 'import os\n'), ((9725, 9803), 'os.path.join', 'os.path.join', (['PROCESSED_DATA_DIR', 'f"""{epoch_identifier}_position_info_nose.csv"""'], {}), "(PROCESSED_DATA_DIR, f'{epoch_identifier}_position_info_nose.csv')\n", (9737, 9803), False, 'import os\n'), ((3312, 3334), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""s"""'], {}), "(1, 's')\n", (3326, 3334), True, 'import numpy as np\n')] |
"""
Trust Region Policy Optimization (Schulman et al, 2017)
"""
import time
import gym.wrappers.time_limit
import numpy as np
import scipy.signal
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm.auto import trange
from envs.vector import SyncVectorEnv
from utils.logx import EpochLogger
tfl = tfp.layers
tfd = tfp.distributions
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
@tf.function
def flat_grads(grads):
print(f'Tracing flat_grads grads={len(grads)}')
grads = [tf.reshape(grad, shape=(-1,)) for grad in grads]
return tf.concat(grads, axis=0)
@tf.function
def get_flat_params_from(model: tf.keras.Model):
print(f'Tracing get_flat_params_from model={model.name}')
params = [tf.reshape(p, shape=(-1,)) for p in model.trainable_variables]
flat_params = tf.concat(params, axis=0)
return flat_params
@tf.function
def set_flat_params_to(model: tf.keras.Model, flat_params):
print(f'Tracing set_flat_params_to model={model.name}, flat_params={len(flat_params)}')
prev_ind = 0
for param in model.trainable_variables:
flat_size = tf.reduce_prod(param.shape)
param.assign(tf.reshape(flat_params[prev_ind:prev_ind + flat_size], shape=param.shape))
prev_ind += flat_size
class GAEBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, num_envs, length, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(shape=(num_envs, length, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(shape=(num_envs, length, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.rew_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.ret_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.val_buf = np.zeros(shape=(num_envs, length), dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.num_envs = num_envs
self.max_size = length
self.obs_dim = obs_dim
self.act_dim = act_dim
self.reset()
def reset(self):
self.ptr, self.path_start_idx = 0, np.zeros(shape=(self.num_envs), dtype=np.int32)
def store(self, obs, act, rew, val):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[:, self.ptr] = obs
self.act_buf[:, self.ptr] = act
self.rew_buf[:, self.ptr] = rew
self.val_buf[:, self.ptr] = val
self.ptr += 1
def finish_path(self, dones, last_vals):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
for i in range(self.num_envs):
if dones[i]:
path_slice = slice(self.path_start_idx[i], self.ptr)
rews = np.append(self.rew_buf[i, path_slice], last_vals[i])
vals = np.append(self.val_buf[i, path_slice], last_vals[i])
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[i, path_slice] = discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[i, path_slice] = discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx[i] = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
assert np.all(self.path_start_idx == self.ptr)
self.reset()
# ravel the data
obs_buf = np.reshape(self.obs_buf, newshape=(-1, self.obs_dim))
act_buf = np.reshape(self.act_buf, newshape=(-1, self.act_dim))
ret_buf = np.reshape(self.ret_buf, newshape=(-1,))
adv_buf = np.reshape(self.adv_buf, newshape=(-1,))
# the next two lines implement the advantage normalization trick
adv_mean, adv_std = np.mean(adv_buf), np.std(adv_buf)
adv_buf = (adv_buf - adv_mean) / adv_std
data = dict(obs=obs_buf, act=act_buf, ret=ret_buf, adv=adv_buf)
return {k: tf.convert_to_tensor(v, dtype=tf.float32) for k, v in data.items()}
class SqueezeLayer(tf.keras.layers.Layer):
def __init__(self, axis=-1):
super(SqueezeLayer, self).__init__()
self.axis = axis
def call(self, inputs, **kwargs):
return tf.squeeze(inputs, axis=self.axis)
def build_mlp(input_dim, output_dim, mlp_hidden, num_layers=3,
activation='relu', out_activation=None, squeeze=False):
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(input_dim,)))
for _ in range(num_layers - 1):
model.add(tf.keras.layers.Dense(mlp_hidden, activation=activation))
model.add(tf.keras.layers.Dense(output_dim, activation=out_activation))
if output_dim == 1 and squeeze is True:
model.add(SqueezeLayer(axis=-1))
return model
def make_normal_distribution(loc_params, scale_params):
scale_params = tf.math.softplus(scale_params)
loc_params = tf.tanh(loc_params)
pi_distribution = tfd.Independent(distribution=tfd.Normal(loc=loc_params, scale=scale_params),
reinterpreted_batch_ndims=1)
return pi_distribution
class NormalActor(tf.keras.Model):
def __init__(self, obs_dim, act_dim, act_lim, mlp_hidden):
super(NormalActor, self).__init__()
self.net = build_mlp(input_dim=obs_dim, output_dim=act_dim, mlp_hidden=mlp_hidden)
self.log_std = tf.Variable(initial_value=-0.5 * tf.ones(act_dim))
self.pi_dist_layer = tfp.layers.DistributionLambda(
make_distribution_fn=lambda t: make_normal_distribution(t, self.log_std))
self.act_lim = act_lim
def call(self, inputs):
params = self.net(inputs)
pi_distribution = self.pi_dist_layer(params)
return pi_distribution
class TRPOAgent(tf.keras.Model):
def __init__(self, obs_dim, act_dim, act_lim, mlp_hidden=64,
delta=0.01, vf_lr=1e-3, damping_coeff=0.1, cg_iters=10, backtrack_iters=10,
backtrack_coeff=0.8, train_v_iters=80, algo='npg'
):
super(TRPOAgent, self).__init__()
self.policy_net = NormalActor(obs_dim=obs_dim, act_dim=act_dim,
act_lim=act_lim, mlp_hidden=mlp_hidden)
self.v_optimizer = tf.keras.optimizers.Adam(learning_rate=vf_lr)
self.value_net = build_mlp(input_dim=obs_dim, output_dim=1, squeeze=True, mlp_hidden=mlp_hidden)
self.value_net.compile(optimizer=self.v_optimizer, loss='mse')
self.delta = delta
self.damping_coeff = damping_coeff
self.cg_iters = cg_iters
self.backtrack_iters = backtrack_iters
self.backtrack_coeff = backtrack_coeff
self.train_v_iters = train_v_iters
self.algo = algo
self.obs_dim = obs_dim
self.act_dim = act_dim
self.act_lim = act_lim
self.logger = None
def _build_tf_function(self):
self.act_batch = tf.function(func=self.act_batch, input_signature=[
tf.TensorSpec(shape=[None, self.obs_dim], dtype=tf.float32)
])
def set_logger(self, logger):
self.logger = logger
def log_tabular(self):
self.logger.log_tabular('LossPi', average_only=True)
self.logger.log_tabular('LossV', average_only=True)
self.logger.log_tabular('KL', average_only=True)
self.logger.log_tabular('DeltaLossPi', average_only=True)
self.logger.log_tabular('DeltaLossV', average_only=True)
self.logger.log_tabular('BacktrackIters', average_only=True)
def call(self, inputs, training=None, mask=None):
pi_distribution = self.policy_net(inputs)
pi_action = pi_distribution.sample()
return pi_action
def act_batch(self, obs):
pi_distribution = self.policy_net(obs)
pi_action = pi_distribution.sample()
v = self.value_net(obs)
return pi_action, v
def _compute_kl(self, obs, old_pi):
pi = self.policy_net(obs)
kl_loss = tfp.distributions.kl_divergence(pi, old_pi)
kl_loss = tf.reduce_mean(kl_loss)
return kl_loss
def _compute_loss_pi(self, obs, act, logp, adv):
distribution = self.policy_net(obs)
log_prob = distribution.log_prob(act)
negative_approx_kl = log_prob - logp
ratio = tf.exp(negative_approx_kl)
surr1 = ratio * adv
policy_loss = -tf.reduce_mean(surr1, axis=0)
return policy_loss
def _compute_gradient(self, obs, act, logp, adv):
# compute pi gradients
with tf.GradientTape() as tape:
policy_loss = self._compute_loss_pi(obs, act, logp, adv)
grads = tape.gradient(policy_loss, self.policy_net.trainable_variables)
grads = flat_grads(grads)
# flat grads
return grads, policy_loss
def _hessian_vector_product(self, obs, p):
# compute Hx
old_pi = self.policy_net(obs)
with tf.GradientTape() as t2:
with tf.GradientTape() as t1:
kl = self._compute_kl(obs, old_pi)
inner_grads = t1.gradient(kl, self.policy_net.trainable_variables)
# flat gradients
inner_grads = flat_grads(inner_grads)
kl_v = tf.reduce_sum(inner_grads * p)
grads = t2.gradient(kl_v, self.policy_net.trainable_variables)
grads = flat_grads(grads)
_Avp = grads + p * self.damping_coeff
return _Avp
@tf.function
def _conjugate_gradients(self, obs, b, nsteps, residual_tol=1e-10):
"""
Args:
Avp: a callable computes matrix vector produce. Note that vector here has NO dummy dimension
b: A^{-1}b
nsteps: max number of steps
residual_tol:
Returns:
"""
print(f'Tracing _conjugate_gradients b={b}, nsteps={nsteps}')
x = tf.zeros_like(b)
r = tf.identity(b)
p = tf.identity(b)
rdotr = tf.tensordot(r, r, axes=1)
for _ in tf.range(nsteps):
_Avp = self._hessian_vector_product(obs, p)
# compute conjugate gradient
alpha = rdotr / tf.tensordot(p, _Avp, axes=1)
x += alpha * p
r -= alpha * _Avp
new_rdotr = tf.tensordot(r, r, axes=1)
betta = new_rdotr / rdotr
p = r + betta * p
rdotr = new_rdotr
if rdotr < residual_tol:
break
return x
def _compute_natural_gradient(self, obs, act, logp, adv):
print(f'Tracing _compute_natural_gradient with obs={obs}, act={act}, logp={logp}, adv={adv}')
grads, policy_loss = self._compute_gradient(obs, act, logp, adv)
x = self._conjugate_gradients(obs, grads, self.cg_iters)
alpha = tf.sqrt(2. * self.delta / (tf.tensordot(x, self._hessian_vector_product(obs, x),
axes=1) + 1e-8))
return alpha * x, policy_loss
def _set_and_eval(self, obs, act, logp, adv, old_params, old_pi, natural_gradient, step):
new_params = old_params - natural_gradient * step
set_flat_params_to(self.policy_net, new_params)
loss_pi = self._compute_loss_pi(obs, act, logp, adv)
kl_loss = self._compute_kl(obs, old_pi)
return kl_loss, loss_pi
@tf.function
def _update_actor(self, obs, act, adv):
print(f'Tracing _update_actor with obs={obs}, act={act}, adv={adv}')
old_params = get_flat_params_from(self.policy_net)
old_pi = self.policy_net(obs)
logp = old_pi.log_prob(act)
natural_gradient, pi_l_old = self._compute_natural_gradient(obs, act, logp, adv)
if self.algo == 'npg':
# npg has no backtracking or hard kl constraint enforcement
kl, pi_l_new = self._set_and_eval(obs, act, logp, adv, old_params, old_pi,
natural_gradient, step=1.)
j = tf.constant(value=0, dtype=tf.int32)
elif self.algo == 'trpo':
# trpo augments npg with backtracking line search, hard kl
pi_l_new = tf.zeros(shape=(), dtype=tf.float32)
kl = tf.zeros(shape=(), dtype=tf.float32)
for j in tf.range(self.backtrack_iters):
steps = tf.pow(self.backtrack_coeff, tf.cast(j, dtype=tf.float32))
kl, pi_l_new = self._set_and_eval(obs, act, logp, adv, old_params, old_pi,
natural_gradient, step=steps)
if kl <= self.delta and pi_l_new <= pi_l_old:
tf.print('Accepting new params at step', j, 'of line search.')
break
if j == self.backtrack_iters - 1:
tf.print('Line search failed! Keeping old params.')
kl, pi_l_new = self._set_and_eval(obs, act, logp, adv, old_params, old_pi,
natural_gradient, step=0.)
info = dict(
LossPi=pi_l_old, KL=kl,
DeltaLossPi=(pi_l_new - pi_l_old),
BacktrackIters=j
)
return info
def update(self, obs, act, ret, adv):
assert tf.is_tensor(obs), f'obs must be a tf tensor. Got {obs}'
info = self._update_actor(obs, act, adv)
for key, item in info.items():
info[key] = item.numpy()
# train the value network
v_l_old = self.value_net.evaluate(x=obs, y=ret, verbose=False)
for i in range(self.train_v_iters):
loss_v = self.value_net.train_on_batch(x=obs, y=ret)
info['LossV'] = v_l_old
info['DeltaLossV'] = loss_v - v_l_old
# Log changes from update
self.logger.store(**info)
def trpo(env_name, env_fn=None, mlp_hidden=128, seed=0,
steps_per_epoch=5000, epochs=200, gamma=0.99, delta=0.01, vf_lr=1e-3,
train_v_iters=80, damping_coeff=0.1, cg_iters=10, backtrack_iters=10,
backtrack_coeff=0.8, lam=0.97, max_ep_len=1000, logger_kwargs=dict(),
save_freq=10, algo='trpo'):
if env_fn is None:
env_fn = lambda: gym.make(env_name)
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
# Random seed
tf.random.set_seed(seed)
np.random.seed(seed)
# Instantiate environment
assert steps_per_epoch % max_ep_len == 0
num_envs = steps_per_epoch // max_ep_len
env = SyncVectorEnv(env_fns=[env_fn for _ in range(num_envs)])
env.seed(seed)
dummy_env = env_fn()
obs_dim = dummy_env.observation_space.shape[0]
act_dim = dummy_env.action_space.shape[0]
act_lim = dummy_env.action_space.high[0]
del dummy_env
assert act_lim == 1., f'act_lim must be 1. Got {act_lim}'
# Instantiate policy
agent = TRPOAgent(obs_dim=obs_dim, act_dim=act_dim, act_lim=act_lim, mlp_hidden=mlp_hidden,
delta=delta, vf_lr=vf_lr, damping_coeff=damping_coeff, cg_iters=cg_iters,
backtrack_iters=backtrack_iters, backtrack_coeff=backtrack_coeff,
train_v_iters=train_v_iters, algo=algo)
agent.set_logger(logger)
buffer = GAEBuffer(obs_dim=obs_dim, act_dim=act_dim, num_envs=num_envs, length=max_ep_len,
gamma=gamma, lam=lam)
def collect_trajectories():
obs = env.reset()
ep_ret = np.zeros(shape=num_envs, dtype=np.float32)
ep_len = np.zeros(shape=num_envs, dtype=np.int32)
for t in trange(max_ep_len, desc='Collecting'):
act, val = agent.act_batch(tf.convert_to_tensor(obs, dtype=tf.float32))
act = act.numpy()
val = val.numpy()
obs2, rew, dones, infos = env.step(act)
buffer.store(obs, act, rew, val)
logger.store(VVals=val)
ep_ret += rew
ep_len += 1
# There are four cases there:
# 1. if done is False. Bootstrap (truncated due to trajectory length)
# 2. if done is True, if TimeLimit.truncated not in info. Don't bootstrap (didn't truncate)
# 3. if done is True, if TimeLimit.truncated in info, if it is True, Bootstrap (true truncated)
# 4. if done is True, if TimeLimit.truncated in info, if it is False. Don't bootstrap (same time)
if t == max_ep_len - 1:
time_truncated_dones = np.array([info.get('TimeLimit.truncated', False) for info in infos],
dtype=np.bool_)
# need to finish path for all the environments
last_vals = agent.value_net.predict(obs2)
last_vals = last_vals * np.logical_or(np.logical_not(dones), time_truncated_dones)
buffer.finish_path(dones=np.ones(shape=num_envs, dtype=np.bool_),
last_vals=last_vals)
logger.store(EpRet=ep_ret[dones], EpLen=ep_len[dones])
obs = None
elif np.any(dones):
time_truncated_dones = np.array([info.get('TimeLimit.truncated', False) for info in infos],
dtype=np.bool_)
last_vals = agent.value_net.predict(obs2) * time_truncated_dones
buffer.finish_path(dones=dones,
last_vals=last_vals)
logger.store(EpRet=ep_ret[dones], EpLen=ep_len[dones])
ep_ret[dones] = 0.
ep_len[dones] = 0
obs = env.reset_done()
else:
obs = obs2
start_time = time.time()
for epoch in range(epochs):
collect_trajectories()
agent.update(**buffer.get())
logger.log_tabular('Epoch', epoch + 1)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', (epoch + 1) * steps_per_epoch)
agent.log_tabular()
logger.dump_tabular()
if __name__ == '__main__':
import argparse
from utils.run_utils import setup_logger_kwargs
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', type=str, default='Hopper-v2')
parser.add_argument('--seed', type=int, default=1)
args = vars(parser.parse_args())
logger_kwargs = setup_logger_kwargs(exp_name=args['env_name'] + '_trpo_test', data_dir='data', seed=args['seed'])
trpo(**args, logger_kwargs=logger_kwargs)
| [
"tensorflow.tanh",
"tensorflow.reduce_sum",
"numpy.logical_not",
"tensorflow_probability.distributions.kl_divergence",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.math.softplus",
"numpy.mean",
"tensorflow.tensordot",
"num... | [((1055, 1079), 'tensorflow.concat', 'tf.concat', (['grads'], {'axis': '(0)'}), '(grads, axis=0)\n', (1064, 1079), True, 'import tensorflow as tf\n'), ((1301, 1326), 'tensorflow.concat', 'tf.concat', (['params'], {'axis': '(0)'}), '(params, axis=0)\n', (1310, 1326), True, 'import tensorflow as tf\n'), ((6315, 6336), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (6334, 6336), True, 'import tensorflow as tf\n'), ((6772, 6802), 'tensorflow.math.softplus', 'tf.math.softplus', (['scale_params'], {}), '(scale_params)\n', (6788, 6802), True, 'import tensorflow as tf\n'), ((6820, 6839), 'tensorflow.tanh', 'tf.tanh', (['loc_params'], {}), '(loc_params)\n', (6827, 6839), True, 'import tensorflow as tf\n'), ((16045, 16073), 'utils.logx.EpochLogger', 'EpochLogger', ([], {}), '(**logger_kwargs)\n', (16056, 16073), False, 'from utils.logx import EpochLogger\n'), ((16130, 16154), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (16148, 16154), True, 'import tensorflow as tf\n'), ((16159, 16179), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16173, 16179), True, 'import numpy as np\n'), ((19478, 19489), 'time.time', 'time.time', ([], {}), '()\n', (19487, 19489), False, 'import time\n'), ((20063, 20088), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (20086, 20088), False, 'import argparse\n'), ((20272, 20374), 'utils.run_utils.setup_logger_kwargs', 'setup_logger_kwargs', ([], {'exp_name': "(args['env_name'] + '_trpo_test')", 'data_dir': '"""data"""', 'seed': "args['seed']"}), "(exp_name=args['env_name'] + '_trpo_test', data_dir=\n 'data', seed=args['seed'])\n", (20291, 20374), False, 'from utils.run_utils import setup_logger_kwargs\n'), ((473, 491), 'numpy.isscalar', 'np.isscalar', (['shape'], {}), '(shape)\n', (484, 491), True, 'import numpy as np\n'), ((995, 1024), 'tensorflow.reshape', 'tf.reshape', (['grad'], {'shape': '(-1,)'}), '(grad, shape=(-1,))\n', (1005, 1024), True, 'import tensorflow as tf\n'), ((1220, 1246), 'tensorflow.reshape', 'tf.reshape', (['p'], {'shape': '(-1,)'}), '(p, shape=(-1,))\n', (1230, 1246), True, 'import tensorflow as tf\n'), ((1598, 1625), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['param.shape'], {}), '(param.shape)\n', (1612, 1625), True, 'import tensorflow as tf\n'), ((2110, 2171), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_envs, length, obs_dim)', 'dtype': 'np.float32'}), '(shape=(num_envs, length, obs_dim), dtype=np.float32)\n', (2118, 2171), True, 'import numpy as np\n'), ((2195, 2256), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_envs, length, act_dim)', 'dtype': 'np.float32'}), '(shape=(num_envs, length, act_dim), dtype=np.float32)\n', (2203, 2256), True, 'import numpy as np\n'), ((2280, 2332), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_envs, length)', 'dtype': 'np.float32'}), '(shape=(num_envs, length), dtype=np.float32)\n', (2288, 2332), True, 'import numpy as np\n'), ((2356, 2408), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_envs, length)', 'dtype': 'np.float32'}), '(shape=(num_envs, length), dtype=np.float32)\n', (2364, 2408), True, 'import numpy as np\n'), ((2432, 2484), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_envs, length)', 'dtype': 'np.float32'}), '(shape=(num_envs, length), dtype=np.float32)\n', (2440, 2484), True, 'import numpy as np\n'), ((2508, 2560), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_envs, length)', 'dtype': 'np.float32'}), '(shape=(num_envs, length), dtype=np.float32)\n', (2516, 2560), True, 'import numpy as np\n'), ((5240, 5279), 'numpy.all', 'np.all', (['(self.path_start_idx == self.ptr)'], {}), '(self.path_start_idx == self.ptr)\n', (5246, 5279), True, 'import numpy as np\n'), ((5344, 5397), 'numpy.reshape', 'np.reshape', (['self.obs_buf'], {'newshape': '(-1, self.obs_dim)'}), '(self.obs_buf, newshape=(-1, self.obs_dim))\n', (5354, 5397), True, 'import numpy as np\n'), ((5416, 5469), 'numpy.reshape', 'np.reshape', (['self.act_buf'], {'newshape': '(-1, self.act_dim)'}), '(self.act_buf, newshape=(-1, self.act_dim))\n', (5426, 5469), True, 'import numpy as np\n'), ((5488, 5528), 'numpy.reshape', 'np.reshape', (['self.ret_buf'], {'newshape': '(-1,)'}), '(self.ret_buf, newshape=(-1,))\n', (5498, 5528), True, 'import numpy as np\n'), ((5547, 5587), 'numpy.reshape', 'np.reshape', (['self.adv_buf'], {'newshape': '(-1,)'}), '(self.adv_buf, newshape=(-1,))\n', (5557, 5587), True, 'import numpy as np\n'), ((6133, 6167), 'tensorflow.squeeze', 'tf.squeeze', (['inputs'], {'axis': 'self.axis'}), '(inputs, axis=self.axis)\n', (6143, 6167), True, 'import tensorflow as tf\n'), ((6351, 6403), 'tensorflow.keras.layers.InputLayer', 'tf.keras.layers.InputLayer', ([], {'input_shape': '(input_dim,)'}), '(input_shape=(input_dim,))\n', (6377, 6403), True, 'import tensorflow as tf\n'), ((6531, 6591), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {'activation': 'out_activation'}), '(output_dim, activation=out_activation)\n', (6552, 6591), True, 'import tensorflow as tf\n'), ((8166, 8211), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'vf_lr'}), '(learning_rate=vf_lr)\n', (8190, 8211), True, 'import tensorflow as tf\n'), ((9891, 9934), 'tensorflow_probability.distributions.kl_divergence', 'tfp.distributions.kl_divergence', (['pi', 'old_pi'], {}), '(pi, old_pi)\n', (9922, 9934), True, 'import tensorflow_probability as tfp\n'), ((9953, 9976), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_loss'], {}), '(kl_loss)\n', (9967, 9976), True, 'import tensorflow as tf\n'), ((10205, 10231), 'tensorflow.exp', 'tf.exp', (['negative_approx_kl'], {}), '(negative_approx_kl)\n', (10211, 10231), True, 'import tensorflow as tf\n'), ((11745, 11761), 'tensorflow.zeros_like', 'tf.zeros_like', (['b'], {}), '(b)\n', (11758, 11761), True, 'import tensorflow as tf\n'), ((11774, 11788), 'tensorflow.identity', 'tf.identity', (['b'], {}), '(b)\n', (11785, 11788), True, 'import tensorflow as tf\n'), ((11801, 11815), 'tensorflow.identity', 'tf.identity', (['b'], {}), '(b)\n', (11812, 11815), True, 'import tensorflow as tf\n'), ((11832, 11858), 'tensorflow.tensordot', 'tf.tensordot', (['r', 'r'], {'axes': '(1)'}), '(r, r, axes=1)\n', (11844, 11858), True, 'import tensorflow as tf\n'), ((11876, 11892), 'tensorflow.range', 'tf.range', (['nsteps'], {}), '(nsteps)\n', (11884, 11892), True, 'import tensorflow as tf\n'), ((15087, 15104), 'tensorflow.is_tensor', 'tf.is_tensor', (['obs'], {}), '(obs)\n', (15099, 15104), True, 'import tensorflow as tf\n'), ((17250, 17292), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_envs', 'dtype': 'np.float32'}), '(shape=num_envs, dtype=np.float32)\n', (17258, 17292), True, 'import numpy as np\n'), ((17310, 17350), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_envs', 'dtype': 'np.int32'}), '(shape=num_envs, dtype=np.int32)\n', (17318, 17350), True, 'import numpy as np\n'), ((17368, 17405), 'tqdm.auto.trange', 'trange', (['max_ep_len'], {'desc': '"""Collecting"""'}), "(max_ep_len, desc='Collecting')\n", (17374, 17405), False, 'from tqdm.auto import trange\n'), ((1647, 1720), 'tensorflow.reshape', 'tf.reshape', (['flat_params[prev_ind:prev_ind + flat_size]'], {'shape': 'param.shape'}), '(flat_params[prev_ind:prev_ind + flat_size], shape=param.shape)\n', (1657, 1720), True, 'import tensorflow as tf\n'), ((2815, 2860), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.num_envs', 'dtype': 'np.int32'}), '(shape=self.num_envs, dtype=np.int32)\n', (2823, 2860), True, 'import numpy as np\n'), ((5689, 5705), 'numpy.mean', 'np.mean', (['adv_buf'], {}), '(adv_buf)\n', (5696, 5705), True, 'import numpy as np\n'), ((5707, 5722), 'numpy.std', 'np.std', (['adv_buf'], {}), '(adv_buf)\n', (5713, 5722), True, 'import numpy as np\n'), ((5863, 5904), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['v'], {'dtype': 'tf.float32'}), '(v, dtype=tf.float32)\n', (5883, 5904), True, 'import tensorflow as tf\n'), ((6459, 6515), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['mlp_hidden'], {'activation': 'activation'}), '(mlp_hidden, activation=activation)\n', (6480, 6515), True, 'import tensorflow as tf\n'), ((10283, 10312), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['surr1'], {'axis': '(0)'}), '(surr1, axis=0)\n', (10297, 10312), True, 'import tensorflow as tf\n'), ((10439, 10456), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10454, 10456), True, 'import tensorflow as tf\n'), ((10824, 10841), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10839, 10841), True, 'import tensorflow as tf\n'), ((11119, 11149), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(inner_grads * p)'], {}), '(inner_grads * p)\n', (11132, 11149), True, 'import tensorflow as tf\n'), ((12130, 12156), 'tensorflow.tensordot', 'tf.tensordot', (['r', 'r'], {'axes': '(1)'}), '(r, r, axes=1)\n', (12142, 12156), True, 'import tensorflow as tf\n'), ((13833, 13869), 'tensorflow.constant', 'tf.constant', ([], {'value': '(0)', 'dtype': 'tf.int32'}), '(value=0, dtype=tf.int32)\n', (13844, 13869), True, 'import tensorflow as tf\n'), ((4271, 4323), 'numpy.append', 'np.append', (['self.rew_buf[i, path_slice]', 'last_vals[i]'], {}), '(self.rew_buf[i, path_slice], last_vals[i])\n', (4280, 4323), True, 'import numpy as np\n'), ((4347, 4399), 'numpy.append', 'np.append', (['self.val_buf[i, path_slice]', 'last_vals[i]'], {}), '(self.val_buf[i, path_slice], last_vals[i])\n', (4356, 4399), True, 'import numpy as np\n'), ((10866, 10883), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10881, 10883), True, 'import tensorflow as tf\n'), ((12019, 12048), 'tensorflow.tensordot', 'tf.tensordot', (['p', '_Avp'], {'axes': '(1)'}), '(p, _Avp, axes=1)\n', (12031, 12048), True, 'import tensorflow as tf\n'), ((13998, 14034), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (14006, 14034), True, 'import tensorflow as tf\n'), ((14052, 14088), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '()', 'dtype': 'tf.float32'}), '(shape=(), dtype=tf.float32)\n', (14060, 14088), True, 'import tensorflow as tf\n'), ((14110, 14140), 'tensorflow.range', 'tf.range', (['self.backtrack_iters'], {}), '(self.backtrack_iters)\n', (14118, 14140), True, 'import tensorflow as tf\n'), ((17446, 17489), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['obs'], {'dtype': 'tf.float32'}), '(obs, dtype=tf.float32)\n', (17466, 17489), True, 'import tensorflow as tf\n'), ((18863, 18876), 'numpy.any', 'np.any', (['dones'], {}), '(dones)\n', (18869, 18876), True, 'import numpy as np\n'), ((7324, 7340), 'tensorflow.ones', 'tf.ones', (['act_dim'], {}), '(act_dim)\n', (7331, 7340), True, 'import tensorflow as tf\n'), ((8899, 8958), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[None, self.obs_dim]', 'dtype': 'tf.float32'}), '(shape=[None, self.obs_dim], dtype=tf.float32)\n', (8912, 8958), True, 'import tensorflow as tf\n'), ((14195, 14223), 'tensorflow.cast', 'tf.cast', (['j'], {'dtype': 'tf.float32'}), '(j, dtype=tf.float32)\n', (14202, 14223), True, 'import tensorflow as tf\n'), ((14478, 14540), 'tensorflow.print', 'tf.print', (['"""Accepting new params at step"""', 'j', '"""of line search."""'], {}), "('Accepting new params at step', j, 'of line search.')\n", (14486, 14540), True, 'import tensorflow as tf\n'), ((14638, 14689), 'tensorflow.print', 'tf.print', (['"""Line search failed! Keeping old params."""'], {}), "('Line search failed! Keeping old params.')\n", (14646, 14689), True, 'import tensorflow as tf\n'), ((18565, 18586), 'numpy.logical_not', 'np.logical_not', (['dones'], {}), '(dones)\n', (18579, 18586), True, 'import numpy as np\n'), ((18651, 18690), 'numpy.ones', 'np.ones', ([], {'shape': 'num_envs', 'dtype': 'np.bool_'}), '(shape=num_envs, dtype=np.bool_)\n', (18658, 18690), True, 'import numpy as np\n')] |
from SimpleNN import SimpleNN
import numpy as np
def _activation(x):
return 1/(1+np.exp(-x))
def _derivative(output):
return output*(1-output)
def test_SimpleNN():
X = np.array([[0,0],[0,1],[1,0],[1,1]]).reshape((4,2))
T = np.array([[0],[1],[1],[0]]).reshape((4,1))
network = SimpleNN.Network(2)
iterations,errors = (network.dense('hidden_layer_1',4,_activation,_derivative)
.dense('output_layer',1,_activation,_derivative)
.train(X,T,learning_rate = 0.1))
assert iterations >= 0
assert iterations <=20000
assert isinstance(errors,list) | [
"numpy.exp",
"SimpleNN.SimpleNN.Network",
"numpy.array"
] | [((304, 323), 'SimpleNN.SimpleNN.Network', 'SimpleNN.Network', (['(2)'], {}), '(2)\n', (320, 323), False, 'from SimpleNN import SimpleNN\n'), ((86, 96), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (92, 96), True, 'import numpy as np\n'), ((183, 225), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (191, 225), True, 'import numpy as np\n'), ((242, 272), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (250, 272), True, 'import numpy as np\n')] |
import numpy as np
from pathlib import Path
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--source", type=Path, required=True)
parser.add_argument("--output_dir", type=Path)
parser.add_argument("--keep_dir", action="store_true")
args = parser.parse_args()
source_file = args.source
output_dir = args.output_dir or source_file.parent
stem = source_file.stem
n = 0
source_npz = np.load(source_file)
for path, value in source_npz.items():
n += 1
if not args.keep_dir:
path = path.replace("/", "_")
output_path = (output_dir / path).with_suffix(f".{stem}")
output_path.parent.mkdir(parents=True, exist_ok=True)
print(output_path)
np.savetxt(output_path, value)
print(f"unpacked {n} files.")
| [
"numpy.savetxt",
"numpy.load",
"argparse.ArgumentParser"
] | [((121, 182), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'formatter_class': 'ArgumentDefaultsHelpFormatter'}), '(formatter_class=ArgumentDefaultsHelpFormatter)\n', (135, 182), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((495, 515), 'numpy.load', 'np.load', (['source_file'], {}), '(source_file)\n', (502, 515), True, 'import numpy as np\n'), ((777, 807), 'numpy.savetxt', 'np.savetxt', (['output_path', 'value'], {}), '(output_path, value)\n', (787, 807), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 9 12:12:48 2021
@author: rdavi
Preprocess datasets, including silence trimming and spliting in 1s chunks
"""
# %% Import libraries
import os
import numpy as np
import opensmile
import pickle
import librosa
import matplotlib.pyplot as plt
# %% Define the dataset
dataset = 'DEMOS'
# dataset = 'RAVDESS'
# dataset = 'TESS'
# dataset = 'AEMOTION'
path = '../../data/raw/' +dataset+ '_Emotions/'
# %% Extract features
def load_wav(filename):
# Load and resample
audio, fs = librosa.load(filename, sr = 16000)
# Silence trim
interv = librosa.effects.split(audio, top_db=20, frame_length=4096, hop_length=1)
start, end = interv[0]
audio_out = audio[start : end]
return audio_out, fs
# Initialize opensmile feature set
smile = opensmile.Smile(feature_set=opensmile.FeatureSet.eGeMAPSv02,
feature_level=opensmile.FeatureLevel.LowLevelDescriptors)
# Sweep
lst = []
i = -2
duration = 3 # define signal duration in each chunk
for subdir, dirs, files in os.walk(path):
i+=1
print(subdir)
print(i)
for file in files:
# Load file
filename = os.path.join(subdir,file)
data, Fs = load_wav(filename)
# # Make chunks
N = int(np.floor(duration*Fs)) # Number of samples in two second
data_chunk = np.empty(shape=(N))
if np.size(data) > N:
data = data[:N]
data_chunk[:np.size(data)] = data
# Opensmile
X_smile = smile.process_signal(data_chunk, Fs)
# Append to list
arr = X_smile.values, i
lst.append(arr)
# %% Save smile dataset
X, y = zip(*lst)
X, y = np.asarray(X), np.asarray(y)
with open('../../data/processed/dataset_smile_' +dataset+ '.pckl', 'wb') as f:
pickle.dump([X, y], f)
print("All done!")
# %%
| [
"pickle.dump",
"numpy.size",
"numpy.asarray",
"os.path.join",
"numpy.floor",
"opensmile.Smile",
"numpy.empty",
"librosa.effects.split",
"os.walk",
"librosa.load"
] | [((811, 934), 'opensmile.Smile', 'opensmile.Smile', ([], {'feature_set': 'opensmile.FeatureSet.eGeMAPSv02', 'feature_level': 'opensmile.FeatureLevel.LowLevelDescriptors'}), '(feature_set=opensmile.FeatureSet.eGeMAPSv02, feature_level=\n opensmile.FeatureLevel.LowLevelDescriptors)\n', (826, 934), False, 'import opensmile\n'), ((1050, 1063), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1057, 1063), False, 'import os\n'), ((538, 570), 'librosa.load', 'librosa.load', (['filename'], {'sr': '(16000)'}), '(filename, sr=16000)\n', (550, 570), False, 'import librosa\n'), ((606, 678), 'librosa.effects.split', 'librosa.effects.split', (['audio'], {'top_db': '(20)', 'frame_length': '(4096)', 'hop_length': '(1)'}), '(audio, top_db=20, frame_length=4096, hop_length=1)\n', (627, 678), False, 'import librosa\n'), ((1693, 1706), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (1703, 1706), True, 'import numpy as np\n'), ((1708, 1721), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1718, 1721), True, 'import numpy as np\n'), ((1805, 1827), 'pickle.dump', 'pickle.dump', (['[X, y]', 'f'], {}), '([X, y], f)\n', (1816, 1827), False, 'import pickle\n'), ((1167, 1193), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (1179, 1193), False, 'import os\n'), ((1356, 1373), 'numpy.empty', 'np.empty', ([], {'shape': 'N'}), '(shape=N)\n', (1364, 1373), True, 'import numpy as np\n'), ((1277, 1300), 'numpy.floor', 'np.floor', (['(duration * Fs)'], {}), '(duration * Fs)\n', (1285, 1300), True, 'import numpy as np\n'), ((1387, 1400), 'numpy.size', 'np.size', (['data'], {}), '(data)\n', (1394, 1400), True, 'import numpy as np\n'), ((1454, 1467), 'numpy.size', 'np.size', (['data'], {}), '(data)\n', (1461, 1467), True, 'import numpy as np\n')] |
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from divmachines.utility.helper import check_random_state
def _get_cv(cv):
try:
cv = CROSS_VALIDATOR[cv]
except KeyError:
raise ValueError("Consistent Cross Validator must be provided")
return cv
class CrossValidator(metaclass=ABCMeta):
"""
Base class of the Data Partitioning strategy or
Cross Validation strategy
"""
def __init__(self):
pass
@abstractmethod
def _iter_indices_mask(self, x, y, indices):
raise NotImplementedError
def split(self, x, y):
"""
Data partitioning function,
it returns the training and the test indexes
Parameters
----------
x: ndarray
training samples
y: ndarray
target value for samples
Returns
-------
train_index : ndarray
The training set indices for that split.
test_index : ndarray
The testing set indices for that split.
"""
indices = np.arange(len(x))
for test_index in self._iter_indices_mask(x, y, indices):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
class KFold(CrossValidator):
"""
K-fold cross validation strategy
It divides the dataset into k independent fold
and at each iteration considers one fold as the
test set and the remaining folds as the training sets
Parameters
----------
folds: int, optional
Number of fold to use for the k-fold cross validation.
Minimum is 2 an default is 3.
shuffle: boolean, optional
Whether to shuffle the data before splitting into batches.
By default it shuffle the data
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self, folds=3, shuffle=True, random_state=None):
super(KFold, self).__init__()
if folds < 2:
raise ValueError("Number of folds too low, minimum value is 2")
else:
self._folds = folds
self._shuffle = shuffle
self._random_state = random_state
def _iter_indices_mask(self, x, y, indices):
if self._shuffle:
check_random_state(self._random_state).shuffle(indices)
n_splits = self._folds
fold_sizes = (len(x) // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:len(x) % n_splits] += 1
current = 0
mask = np.zeros(len(indices), dtype=np.bool)
for fold_size in fold_sizes:
start, stop = current, current + fold_size
copy_mask = np.copy(mask)
copy_mask[indices[start:stop]] = True
current = stop
yield copy_mask
class LeaveOneOut(CrossValidator):
"""
Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Parameters
----------
shuffle: boolean, optional
Whether to shuffle the data before splitting into batches.
By default it shuffle the data
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self, shuffle=True, random_state=None):
super(LeaveOneOut, self).__init__()
self._shuffle = shuffle
self._random_state = random_state
def _iter_indices_mask(self, x, y, indices):
if self._shuffle:
check_random_state(self._random_state).shuffle(indices)
mask = np.zeros(len(indices), dtype=np.bool)
for i in indices:
new_mask = mask.copy()
new_mask[i] = True
yield new_mask
class NaiveHoldOut(CrossValidator):
"""
Naive Hold-Out cross-validator
Provides train/test indices to split data in train/test sets.
The partitioning is performed by randomly withholding some ratings
for some of the users.
The naive hold-out cross validation removes from the test set all
the user that are not present in the train set.
The classifiers could not handle the cold start problem.
Parameters
----------
ratio: float, optional
Ratio between the train set .
For instance, 0.7 means that the train set is 70% of the
original dataset, while the test set is 30% of it.
Default is 80% for the train set and 20% for the test set.
times: int, optional
Number of times to run Hold-out cross validation.
Higher values of it result in less variance in the result score.
Default is 10.
user_idx: int, optional
Indicates the user column index in the transaction data.
Default is 0.
item_idx: int, optional
Indicates the item column index in the transaction data
Default is 1.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self,
ratio=0.8,
times=10,
user_idx=0,
item_idx=1,
random_state=None):
super(NaiveHoldOut, self).__init__()
self._times = times
self._ratio = ratio
self._user_idx = user_idx
self._item_idx = item_idx
self._random_state = random_state
def split(self, x, y):
data = pd.DataFrame(x)
n_samples = data.shape[0]
indices = np.arange(len(x))
for i in range(self._times):
check_random_state(self._random_state).shuffle(indices)
train_size = int(n_samples * self._ratio)
# split data according to the shuffled index and the holdout size
train_idx = indices[:train_size]
test_idx = indices[train_size:]
# This block of code checks whether the user and the item
# in the test set belong to the train_set otherwise
# they are dropped
# train_split = data.ix[indices[:train_size]]
# test_split = data.ix[indices[train_size:]]
# train_users = train_split[self._user_idx].unique()
# train_items = train_split[self._item_idx].unique()
# test_idx = test_split.index[(test_split[self._user_idx].isin(train_users)) &
# (test_split[self._item_idx].isin(train_items))]
yield train_idx, test_idx
def _iter_indices_mask(self, x, y, indices):
pass
class UserHoldOut(CrossValidator):
"""
User Hold-Out cross-validator
Provides train/test indices to split data in train/test sets.
The partitioning is performed by randomly withholding some ratings
for all or some of the users.
The User Hold-Out cross validation removes from the test set
all the user that are not present in the training set.
Parameters
----------
ratio: float, optional
Ratio between the train set .
For instance, 0.7 means that the train set is 70% of the
original dataset, while the test set is 30% of it.
Default is 80% for the train set and 20% for the test set.
times: int, optional
Number of times to run Hold-out cross validation.
Higher values of it result in less variance in the result score.
Default is 10.
user_idx: int, optional
Indicates the user column index in the transaction data.
Default is 0.
item_idx: int, optional
Indicates the item column index in the transaction data
Default is 1.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self,
ratio=0.8,
times=10,
user_idx=0,
item_idx=1,
random_state=None):
super(UserHoldOut, self).__init__()
self._times = times
self._ratio = ratio
self._user_idx = user_idx
self._item_idx = item_idx
self._random_state = check_random_state(random_state)
def _iter_indices_mask(self, x, y, indices):
data = pd.DataFrame(x)[[self._user_idx, self._item_idx]]
mask = np.zeros(data.shape[0], dtype=np.bool)
for i in range(self._times):
copy_mask = np.copy(mask)
grouped = data.groupby(0)
for _, g in grouped:
idx_shuffled = g.index.values.reshape(-1)
n_observed = int((1 - self._ratio) * len(idx_shuffled))
self._random_state.shuffle(idx_shuffled)
if n_observed == 0:
if self._random_state.randn() < 0.5:
copy_mask[idx_shuffled[0]] = True
else:
copy_mask[idx_shuffled[0:n_observed]] = True
# This block o f code checks whether the user and the item
# in the test set belong to the train_set otherwise
# they are dropped
# # cleaning
# train_split = data.ix[indices[np.logical_not(copy_mask)]]
# test_split = data.ix[indices[copy_mask]]
#
# # remove new user and new items from the test split
# train_items = train_split[self._item_idx].unique()
# test_idx = test_split.index[~test_split[self._item_idx].isin(train_items)]
# copy_mask[test_idx] = False
yield copy_mask
def create_cross_validator(cv):
"""
Return an instance of a cross validator.
Parameters
----------
cv: string, :class:`divmachines.validate`, optional
Determines the cross-validation splitting strategy.
Returns
-------
cv: :class:`divmachines.validate.CrossValidator`
An instance of a Cross Validation strategy
"""
if isinstance(cv, str):
cv = _get_cv(cv)()
elif not hasattr(cv, "split"):
raise ValueError("Input Cross Validator must be an "
"instance child of CrossValidator class")
return cv
CROSS_VALIDATOR = dict(
kFold=KFold,
leaveOneOut=LeaveOneOut,
naiveHoldOut=NaiveHoldOut,
userHoldOut=UserHoldOut
) | [
"numpy.copy",
"numpy.ones",
"divmachines.utility.helper.check_random_state",
"numpy.logical_not",
"numpy.zeros",
"pandas.DataFrame"
] | [((6365, 6380), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (6377, 6380), True, 'import pandas as pd\n'), ((9287, 9319), 'divmachines.utility.helper.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (9305, 9319), False, 'from divmachines.utility.helper import check_random_state\n'), ((9450, 9488), 'numpy.zeros', 'np.zeros', (['data.shape[0]'], {'dtype': 'np.bool'}), '(data.shape[0], dtype=np.bool)\n', (9458, 9488), True, 'import numpy as np\n'), ((2766, 2797), 'numpy.ones', 'np.ones', (['n_splits'], {'dtype': 'np.int'}), '(n_splits, dtype=np.int)\n', (2773, 2797), True, 'import numpy as np\n'), ((3031, 3044), 'numpy.copy', 'np.copy', (['mask'], {}), '(mask)\n', (3038, 3044), True, 'import numpy as np\n'), ((9385, 9400), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (9397, 9400), True, 'import pandas as pd\n'), ((9550, 9563), 'numpy.copy', 'np.copy', (['mask'], {}), '(mask)\n', (9557, 9563), True, 'import numpy as np\n'), ((1203, 1229), 'numpy.logical_not', 'np.logical_not', (['test_index'], {}), '(test_index)\n', (1217, 1229), True, 'import numpy as np\n'), ((2634, 2672), 'divmachines.utility.helper.check_random_state', 'check_random_state', (['self._random_state'], {}), '(self._random_state)\n', (2652, 2672), False, 'from divmachines.utility.helper import check_random_state\n'), ((4211, 4249), 'divmachines.utility.helper.check_random_state', 'check_random_state', (['self._random_state'], {}), '(self._random_state)\n', (4229, 4249), False, 'from divmachines.utility.helper import check_random_state\n'), ((6501, 6539), 'divmachines.utility.helper.check_random_state', 'check_random_state', (['self._random_state'], {}), '(self._random_state)\n', (6519, 6539), False, 'from divmachines.utility.helper import check_random_state\n')] |
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t5_layers."""
from absl.testing import absltest
from flax import linen as nn
from jax import numpy as jnp
from jax import random
import numpy as np
from flaxformer.architectures.t5 import t5_common_layers
from flaxformer.components import embedding
EMBEDDING_DIM = 7
MLP_DIM = 32
NUM_HEADS = 2
NUM_LAYERS = 3
ACTIVATIONS = ('gelu',)
DROPOUT_RATE = 0.14
HEAD_DIM = 4
class T5BaseTest(absltest.TestCase):
def test_encoder_layer(self):
layer = t5_common_layers.encoder_layer(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
activations=ACTIVATIONS,
dropout_rate=DROPOUT_RATE)
inputs = np.array(
[
# Batch 1.
[[101, 183, 20, 75, 10]],
# Batch 2.
[[101, 392, 19, 7, 20]],
],
dtype=np.int32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
)
input_inner_dim = 5
# Validate that the QKV dims are being set appropriately.
attention_params = variables['params']['attention']
expected_qkv_shape = [input_inner_dim, HEAD_DIM * NUM_HEADS]
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being set appropriately.
mlp_params = variables['params']['mlp']
np.testing.assert_equal([input_inner_dim, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, input_inner_dim],
np.shape(mlp_params['wo']['kernel']))
# Validate that the activations are being set.
self.assertEqual(ACTIVATIONS, layer.mlp.activations)
# Validate the dropout rate is being respected.
self.assertEqual(DROPOUT_RATE, layer.attention.dropout_rate)
self.assertEqual(DROPOUT_RATE, layer.mlp.intermediate_dropout_rate)
self.assertEqual(0.0, layer.mlp.final_dropout_rate)
def test_decoder_layer(self):
layer = t5_common_layers.decoder_layer(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
activations=ACTIVATIONS,
dropout_rate=DROPOUT_RATE)
targets = np.array(
# Batch 1.
[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]],
# Batch 2.
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]
],
dtype=np.float32)
encoded = np.array(
# Batch 1.
[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
# Batch 2.
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]
],
dtype=np.float32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
targets,
enable_dropout=False,
encoded=encoded,
)
input_inner_dim = 2
# Validate that the QKV dims are being set appropriately.
expected_qkv_shape = [input_inner_dim, HEAD_DIM * NUM_HEADS]
attention_params = variables['params']['self_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
attention_params = variables['params']['encoder_decoder_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being set appropriately.
mlp_params = variables['params']['mlp']
np.testing.assert_equal([input_inner_dim, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, input_inner_dim],
np.shape(mlp_params['wo']['kernel']))
# Validate that the activations are being set.
self.assertEqual(ACTIVATIONS, layer.mlp.activations)
# Validate the dropout rate is being respected.
self.assertEqual(DROPOUT_RATE, layer.self_attention.dropout_rate)
self.assertEqual(DROPOUT_RATE, layer.mlp.intermediate_dropout_rate)
self.assertEqual(0.0, layer.mlp.final_dropout_rate)
def test_encoder(self):
shared_embedder = embedding.Embed(
num_embeddings=5,
features=EMBEDDING_DIM,
cast_input_dtype=jnp.int32,
dtype=jnp.float32,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
name='token_embedder')
layer = t5_common_layers.encoder(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
num_layers=NUM_LAYERS,
shared_token_embedder=shared_embedder,
activations=ACTIVATIONS,
dropout_rate=DROPOUT_RATE)
inputs = np.array(
[
# Batch 1.
[101, 183, 20, 75],
# Batch 2.
[101, 392, 19, 7],
],
dtype=np.int32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
inputs,
enable_dropout=False,
)
# Validate that there are 3 encoder layers.
self.assertContainsSubset(['layers_0', 'layers_1', 'layers_2'],
list(variables['params'].keys()))
# Validate that the QKV dims are being passed appropriately.
attention_params = variables['params']['layers_2']['attention']
expected_qkv_shape = [EMBEDDING_DIM, HEAD_DIM * NUM_HEADS]
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being passed appropriately.
mlp_params = variables['params']['layers_2']['mlp']
np.testing.assert_equal([EMBEDDING_DIM, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, EMBEDDING_DIM],
np.shape(mlp_params['wo']['kernel']))
def test_decoder(self):
shared_embedder = embedding.Embed(
num_embeddings=5,
features=EMBEDDING_DIM,
cast_input_dtype=jnp.int32,
dtype=jnp.float32,
attend_dtype=jnp.float32, # for logit training stability
embedding_init=nn.initializers.normal(stddev=1.0),
name='token_embedder')
layer = t5_common_layers.decoder(
num_heads=NUM_HEADS,
head_dim=HEAD_DIM,
mlp_dim=MLP_DIM,
num_layers=NUM_LAYERS,
shared_token_embedder=shared_embedder,
activations=('relu',),
dropout_rate=0.1)
decoder_input_tokens = np.array(
[
# Batch 1.
[101, 183, 20, 75, 10],
# Batch 2.
[101, 392, 19, 7, 20],
],
dtype=np.int32)
encoder_outputs = np.array(
# Batch 1.
[
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]],
# Batch 2.
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]
],
dtype=np.float32)
_, variables = layer.init_with_output(
random.PRNGKey(0),
encoder_outputs,
decoder_input_tokens,
enable_dropout=False,
)
# Validate that there are 3 encoder layers.
self.assertContainsSubset(['layers_0', 'layers_1', 'layers_2'],
list(variables['params'].keys()))
# Validate that the QKV dims are being passed appropriately.
expected_qkv_shape = [EMBEDDING_DIM, HEAD_DIM * NUM_HEADS]
attention_params = variables['params']['layers_2']['self_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['value']['kernel']))
encoder_inner_dim = 2
expected_encoder_kv_shape = [encoder_inner_dim, HEAD_DIM * NUM_HEADS]
attention_params = variables['params']['layers_2'][
'encoder_decoder_attention']
np.testing.assert_equal(expected_qkv_shape,
np.shape(attention_params['query']['kernel']))
np.testing.assert_equal(expected_encoder_kv_shape,
np.shape(attention_params['key']['kernel']))
np.testing.assert_equal(expected_encoder_kv_shape,
np.shape(attention_params['value']['kernel']))
# Validate that the MLP dims are being passed appropriately.
mlp_params = variables['params']['layers_2']['mlp']
np.testing.assert_equal([EMBEDDING_DIM, MLP_DIM],
np.shape(mlp_params['wi']['kernel']))
np.testing.assert_equal([MLP_DIM, EMBEDDING_DIM],
np.shape(mlp_params['wo']['kernel']))
if __name__ == '__main__':
absltest.main()
| [
"jax.random.PRNGKey",
"flaxformer.architectures.t5.t5_common_layers.decoder",
"absl.testing.absltest.main",
"flaxformer.architectures.t5.t5_common_layers.decoder_layer",
"numpy.array",
"flax.linen.initializers.normal",
"flaxformer.architectures.t5.t5_common_layers.encoder",
"numpy.shape",
"flaxforme... | [((10355, 10370), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (10368, 10370), False, 'from absl.testing import absltest\n'), ((1042, 1185), 'flaxformer.architectures.t5.t5_common_layers.encoder_layer', 't5_common_layers.encoder_layer', ([], {'num_heads': 'NUM_HEADS', 'head_dim': 'HEAD_DIM', 'mlp_dim': 'MLP_DIM', 'activations': 'ACTIVATIONS', 'dropout_rate': 'DROPOUT_RATE'}), '(num_heads=NUM_HEADS, head_dim=HEAD_DIM,\n mlp_dim=MLP_DIM, activations=ACTIVATIONS, dropout_rate=DROPOUT_RATE)\n', (1072, 1185), False, 'from flaxformer.architectures.t5 import t5_common_layers\n'), ((1236, 1313), 'numpy.array', 'np.array', (['[[[101, 183, 20, 75, 10]], [[101, 392, 19, 7, 20]]]'], {'dtype': 'np.int32'}), '([[[101, 183, 20, 75, 10]], [[101, 392, 19, 7, 20]]], dtype=np.int32)\n', (1244, 1313), True, 'import numpy as np\n'), ((2861, 3004), 'flaxformer.architectures.t5.t5_common_layers.decoder_layer', 't5_common_layers.decoder_layer', ([], {'num_heads': 'NUM_HEADS', 'head_dim': 'HEAD_DIM', 'mlp_dim': 'MLP_DIM', 'activations': 'ACTIVATIONS', 'dropout_rate': 'DROPOUT_RATE'}), '(num_heads=NUM_HEADS, head_dim=HEAD_DIM,\n mlp_dim=MLP_DIM, activations=ACTIVATIONS, dropout_rate=DROPOUT_RATE)\n', (2891, 3004), False, 'from flaxformer.architectures.t5 import t5_common_layers\n'), ((3056, 3220), 'numpy.array', 'np.array', (['[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]], [[1.0, 2.0],\n [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]], [[\n 1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]], dtype=np.\n float32)\n', (3064, 3220), True, 'import numpy as np\n'), ((3318, 3427), 'numpy.array', 'np.array', (['[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[1.0, 2.0], [3.0, 4.0], [\n 5.0, 6.0]]], dtype=np.float32)\n', (3326, 3427), True, 'import numpy as np\n'), ((5750, 5954), 'flaxformer.architectures.t5.t5_common_layers.encoder', 't5_common_layers.encoder', ([], {'num_heads': 'NUM_HEADS', 'head_dim': 'HEAD_DIM', 'mlp_dim': 'MLP_DIM', 'num_layers': 'NUM_LAYERS', 'shared_token_embedder': 'shared_embedder', 'activations': 'ACTIVATIONS', 'dropout_rate': 'DROPOUT_RATE'}), '(num_heads=NUM_HEADS, head_dim=HEAD_DIM, mlp_dim=\n MLP_DIM, num_layers=NUM_LAYERS, shared_token_embedder=shared_embedder,\n activations=ACTIVATIONS, dropout_rate=DROPOUT_RATE)\n', (5774, 5954), False, 'from flaxformer.architectures.t5 import t5_common_layers\n'), ((6016, 6081), 'numpy.array', 'np.array', (['[[101, 183, 20, 75], [101, 392, 19, 7]]'], {'dtype': 'np.int32'}), '([[101, 183, 20, 75], [101, 392, 19, 7]], dtype=np.int32)\n', (6024, 6081), True, 'import numpy as np\n'), ((7765, 7958), 'flaxformer.architectures.t5.t5_common_layers.decoder', 't5_common_layers.decoder', ([], {'num_heads': 'NUM_HEADS', 'head_dim': 'HEAD_DIM', 'mlp_dim': 'MLP_DIM', 'num_layers': 'NUM_LAYERS', 'shared_token_embedder': 'shared_embedder', 'activations': "('relu',)", 'dropout_rate': '(0.1)'}), "(num_heads=NUM_HEADS, head_dim=HEAD_DIM, mlp_dim=\n MLP_DIM, num_layers=NUM_LAYERS, shared_token_embedder=shared_embedder,\n activations=('relu',), dropout_rate=0.1)\n", (7789, 7958), False, 'from flaxformer.architectures.t5 import t5_common_layers\n'), ((8034, 8107), 'numpy.array', 'np.array', (['[[101, 183, 20, 75, 10], [101, 392, 19, 7, 20]]'], {'dtype': 'np.int32'}), '([[101, 183, 20, 75, 10], [101, 392, 19, 7, 20]], dtype=np.int32)\n', (8042, 8107), True, 'import numpy as np\n'), ((8228, 8392), 'numpy.array', 'np.array', (['[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]], [[1.0, 2.0],\n [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]]'], {'dtype': 'np.float32'}), '([[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]], [[\n 1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]]], dtype=np.\n float32)\n', (8236, 8392), True, 'import numpy as np\n'), ((1463, 1480), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1477, 1480), False, 'from jax import random\n'), ((1819, 1864), 'numpy.shape', 'np.shape', (["attention_params['query']['kernel']"], {}), "(attention_params['query']['kernel'])\n", (1827, 1864), True, 'import numpy as np\n'), ((1942, 1985), 'numpy.shape', 'np.shape', (["attention_params['key']['kernel']"], {}), "(attention_params['key']['kernel'])\n", (1950, 1985), True, 'import numpy as np\n'), ((2063, 2108), 'numpy.shape', 'np.shape', (["attention_params['value']['kernel']"], {}), "(attention_params['value']['kernel'])\n", (2071, 2108), True, 'import numpy as np\n'), ((2301, 2337), 'numpy.shape', 'np.shape', (["mlp_params['wi']['kernel']"], {}), "(mlp_params['wi']['kernel'])\n", (2309, 2337), True, 'import numpy as np\n'), ((2423, 2459), 'numpy.shape', 'np.shape', (["mlp_params['wo']['kernel']"], {}), "(mlp_params['wo']['kernel'])\n", (2431, 2459), True, 'import numpy as np\n'), ((3568, 3585), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (3582, 3585), False, 'from jax import random\n'), ((3953, 3998), 'numpy.shape', 'np.shape', (["attention_params['query']['kernel']"], {}), "(attention_params['query']['kernel'])\n", (3961, 3998), True, 'import numpy as np\n'), ((4076, 4119), 'numpy.shape', 'np.shape', (["attention_params['key']['kernel']"], {}), "(attention_params['key']['kernel'])\n", (4084, 4119), True, 'import numpy as np\n'), ((4197, 4242), 'numpy.shape', 'np.shape', (["attention_params['value']['kernel']"], {}), "(attention_params['value']['kernel'])\n", (4205, 4242), True, 'import numpy as np\n'), ((4393, 4438), 'numpy.shape', 'np.shape', (["attention_params['query']['kernel']"], {}), "(attention_params['query']['kernel'])\n", (4401, 4438), True, 'import numpy as np\n'), ((4516, 4559), 'numpy.shape', 'np.shape', (["attention_params['key']['kernel']"], {}), "(attention_params['key']['kernel'])\n", (4524, 4559), True, 'import numpy as np\n'), ((4637, 4682), 'numpy.shape', 'np.shape', (["attention_params['value']['kernel']"], {}), "(attention_params['value']['kernel'])\n", (4645, 4682), True, 'import numpy as np\n'), ((4875, 4911), 'numpy.shape', 'np.shape', (["mlp_params['wi']['kernel']"], {}), "(mlp_params['wi']['kernel'])\n", (4883, 4911), True, 'import numpy as np\n'), ((4997, 5033), 'numpy.shape', 'np.shape', (["mlp_params['wo']['kernel']"], {}), "(mlp_params['wo']['kernel'])\n", (5005, 5033), True, 'import numpy as np\n'), ((6232, 6249), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (6246, 6249), False, 'from jax import random\n'), ((6757, 6802), 'numpy.shape', 'np.shape', (["attention_params['query']['kernel']"], {}), "(attention_params['query']['kernel'])\n", (6765, 6802), True, 'import numpy as np\n'), ((6880, 6923), 'numpy.shape', 'np.shape', (["attention_params['key']['kernel']"], {}), "(attention_params['key']['kernel'])\n", (6888, 6923), True, 'import numpy as np\n'), ((7001, 7046), 'numpy.shape', 'np.shape', (["attention_params['value']['kernel']"], {}), "(attention_params['value']['kernel'])\n", (7009, 7046), True, 'import numpy as np\n'), ((7252, 7288), 'numpy.shape', 'np.shape', (["mlp_params['wi']['kernel']"], {}), "(mlp_params['wi']['kernel'])\n", (7260, 7288), True, 'import numpy as np\n'), ((7372, 7408), 'numpy.shape', 'np.shape', (["mlp_params['wo']['kernel']"], {}), "(mlp_params['wo']['kernel'])\n", (7380, 7408), True, 'import numpy as np\n'), ((8527, 8544), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (8541, 8544), False, 'from jax import random\n'), ((9096, 9141), 'numpy.shape', 'np.shape', (["attention_params['query']['kernel']"], {}), "(attention_params['query']['kernel'])\n", (9104, 9141), True, 'import numpy as np\n'), ((9219, 9262), 'numpy.shape', 'np.shape', (["attention_params['key']['kernel']"], {}), "(attention_params['key']['kernel'])\n", (9227, 9262), True, 'import numpy as np\n'), ((9340, 9385), 'numpy.shape', 'np.shape', (["attention_params['value']['kernel']"], {}), "(attention_params['value']['kernel'])\n", (9348, 9385), True, 'import numpy as np\n'), ((9657, 9702), 'numpy.shape', 'np.shape', (["attention_params['query']['kernel']"], {}), "(attention_params['query']['kernel'])\n", (9665, 9702), True, 'import numpy as np\n'), ((9787, 9830), 'numpy.shape', 'np.shape', (["attention_params['key']['kernel']"], {}), "(attention_params['key']['kernel'])\n", (9795, 9830), True, 'import numpy as np\n'), ((9915, 9960), 'numpy.shape', 'np.shape', (["attention_params['value']['kernel']"], {}), "(attention_params['value']['kernel'])\n", (9923, 9960), True, 'import numpy as np\n'), ((10166, 10202), 'numpy.shape', 'np.shape', (["mlp_params['wi']['kernel']"], {}), "(mlp_params['wi']['kernel'])\n", (10174, 10202), True, 'import numpy as np\n'), ((10286, 10322), 'numpy.shape', 'np.shape', (["mlp_params['wo']['kernel']"], {}), "(mlp_params['wo']['kernel'])\n", (10294, 10322), True, 'import numpy as np\n'), ((5671, 5705), 'flax.linen.initializers.normal', 'nn.initializers.normal', ([], {'stddev': '(1.0)'}), '(stddev=1.0)\n', (5693, 5705), True, 'from flax import linen as nn\n'), ((7686, 7720), 'flax.linen.initializers.normal', 'nn.initializers.normal', ([], {'stddev': '(1.0)'}), '(stddev=1.0)\n', (7708, 7720), True, 'from flax import linen as nn\n')] |
#!/usr/bin/env python
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import CompressedImage
class view_camera(object):
def __init__(self):
self.node_name = rospy.get_name()
self.sub_compressed_img = rospy.Subscriber("~compressed",CompressedImage,self.cbImg,queue_size=1)
def cbImg(self,msg):
rospy.loginfo("get img")
np_arr = np.fromstring(msg.data, np.uint8)
img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
cv2.imshow("camera",img)
cv2.waitKey(1)
if __name__ == '__main__':
rospy.init_node('view_camera',anonymous=False)
view_camera = view_camera()
rospy.spin()
| [
"rospy.init_node",
"numpy.fromstring",
"cv2.imshow",
"rospy.spin",
"rospy.get_name",
"cv2.imdecode",
"rospy.Subscriber",
"cv2.waitKey",
"rospy.loginfo"
] | [((558, 605), 'rospy.init_node', 'rospy.init_node', (['"""view_camera"""'], {'anonymous': '(False)'}), "('view_camera', anonymous=False)\n", (573, 605), False, 'import rospy\n'), ((641, 653), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (651, 653), False, 'import rospy\n'), ((186, 202), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (200, 202), False, 'import rospy\n'), ((237, 311), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~compressed"""', 'CompressedImage', 'self.cbImg'], {'queue_size': '(1)'}), "('~compressed', CompressedImage, self.cbImg, queue_size=1)\n", (253, 311), False, 'import rospy\n'), ((342, 366), 'rospy.loginfo', 'rospy.loginfo', (['"""get img"""'], {}), "('get img')\n", (355, 366), False, 'import rospy\n'), ((384, 417), 'numpy.fromstring', 'np.fromstring', (['msg.data', 'np.uint8'], {}), '(msg.data, np.uint8)\n', (397, 417), True, 'import numpy as np\n'), ((432, 470), 'cv2.imdecode', 'cv2.imdecode', (['np_arr', 'cv2.IMREAD_COLOR'], {}), '(np_arr, cv2.IMREAD_COLOR)\n', (444, 470), False, 'import cv2\n'), ((479, 504), 'cv2.imshow', 'cv2.imshow', (['"""camera"""', 'img'], {}), "('camera', img)\n", (489, 504), False, 'import cv2\n'), ((512, 526), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (523, 526), False, 'import cv2\n')] |
import sys
import csv
import glob
import os
import pandas as pd
import collections
import traceback
from os.path import basename, dirname
from pkg_resources import resource_filename
import argparse
#from PyQt5.QtCore import *
#from PyQt5.QtGui import QFileDialog
#from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox
from PyQt5.Qt import QMainWindow, qApp #, QTimer
#from PyQt5.QtCore import (QCoreApplication, QObject, QRunnable, QThread, QThreadPool,pyqtSignal, pyqtSlot)
from PyQt5 import uic
import numpy as np
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from matplotlib import colors
#from matplotlib.cm import ScalarMappable
from .mcr import ftir_function as ff
from .miccs import ExceptionDialog
Ui_MainWindow = uic.loadUiType(resource_filename(__name__, "mcr/clustering_ui.ui"))[0]
class MyMainWindow(QMainWindow, Ui_MainWindow):
def __init__(self,parent=None):
super(MyMainWindow, self).__init__(parent)
qApp.installEventFilter(self)
self.setupUi(self)
self.pushButtonLoadSpec.clicked.connect(self.Load_chose)
self.lock_un(False)
self.pushButtonExpandSpectra.clicked.connect(self.ExpandSpectra)
self.pushButtonExpandProjection.clicked.connect(self.ExpandProjection)
self.comboBoxCmaps.currentIndexChanged.connect(self.coloring)
self.comboBoxMethod.currentIndexChanged.connect(self.ImageProjection)
self.pushButtonLoadConc.clicked.connect(self.LoadPurest)
self.horizontalSliderWavenumber.valueChanged.connect(self.Wavenumbercal)
self.pushButtonCluster.clicked.connect(self.ClusteringCal)
self.comboBoxVisualize.currentIndexChanged.connect(self.Cvisualize)
self.pushButtonExpandVisual.clicked.connect(self.ExpandVis)
self.pushButtonExpandAve.clicked.connect(self.ExpandAve)
self.spinBoxNcluster.valueChanged.connect(self.Nclus_on)
self.pushButtonExpandCluster.clicked.connect(self.ExpandCluster)
self.pushButtonReduce.clicked.connect(self.Reduce)
self.pushButtonSaveSpectra.clicked.connect(self.SaveAverage)
self.pushButtonLoadWhite.clicked.connect(self.IMG)
self.pushButtonRefresh.clicked.connect(self.Refresh)
self.pushButtonNext.clicked.connect(self.Next)
self.pushButtonPrevious.clicked.connect(self.Previous)
self.pushButtonSC.clicked.connect(self.SC)
self.lineEditHeight.returnPressed.connect(self.ValidationX)
self.lineEditWidth.returnPressed.connect(self.ValidationY)
ExceptionDialog.install(self)
def closeEvent(self, event):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Question)
msgBox.setText("Warning")
msgBox.setInformativeText('Are you sure to close the window ?')
msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
msgBox.setDefaultButton(QMessageBox.No)
reply = msgBox.exec_()
if reply == QMessageBox.Yes:
plt.close('all')
qApp.quit()
else:
event.ignore()
def Load_chose(self):
if self.comboBoxSingMul.currentIndex() == 1:
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.foldername = QFileDialog.getExistingDirectory(self,"Open the input data")
if self.foldername:
self.search_whole_folder(self.foldername)
self.pushButtonNext.setEnabled(True)
self.pushButtonPrevious.setEnabled(True)
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText('No file is loaded')
msg.setInformativeText("Please select a file")
msg.setWindowTitle("Warning")
msg.setStandardButtons(QMessageBox.Ok )
msg.exec_()
elif self.comboBoxSingMul.currentIndex() == 0:
self.LoadSpec()
self.pushButtonNext.setEnabled(False)
self.pushButtonPrevious.setEnabled(False)
def LoadSpec(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
self.fileName, __ = QFileDialog.getOpenFileName(self,"Open Matrix File", "","Matrix File (*.mat)")#, options=options)
if self.fileName:
self.foldername = dirname(self.fileName)
self.lineEditFileNum.setText('1')
self.lineEditTotal.setText('1')
self.readfile(self.fileName)
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText('No file is loaded')
msg.setInformativeText("Please select a file")
msg.setWindowTitle("Warning")
msg.setStandardButtons(QMessageBox.Ok )
msg.exec_()
def Next(self):
if int(self.lineEditFileNum.text()) != int(self.lineEditTotal.text()):
df = pd.read_csv(self.foldername+"//Fileall.csv",header=None)
count = int(self.lineEditFileNum.text())
filename = df.iloc[count,1]
self.lineEditFileNum.setText(str(count+1))
self.readfile(filename)
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText('No More File')
msg.setInformativeText("Finish")
msg.setWindowTitle("Warning")
msg.setStandardButtons(QMessageBox.Ok )
msg.exec_()
def Previous(self):
if int(self.lineEditFileNum.text()) != 1:
df = pd.read_csv(self.foldername+"//Fileall.csv",header=None)
count = int(self.lineEditFileNum.text())
count -=1
filename = df.iloc[count-1,1]
self.lineEditFileNum.setText(str(count))
self.readfile(filename)
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText('This is the first file')
msg.setInformativeText("Click Next for net")
msg.setWindowTitle("Warning")
msg.setStandardButtons(QMessageBox.Ok )
msg.exec_()
def ValidationX(self):
x = int(self.lineEditHeight.text())
y = int(self.lineEditWidth.text())
z = int(self.lineEditLength.text())
xy = int(self.sx*self.sy)
if x == 0 or y == 0:
self.lineEditHeight.setText(str(self.sy))
self.lineEditWidth.setText(str(self.sx))
x = self.sx
y = self.sy
elif int(x*y) != xy:
excess = np.mod(xy,x)
if excess == 0 :
y=xy/x
y = int(y)
self.lineEditWidth.setText(str(y))
else:
self.lineEditHeight.setText(str(self.sy))
self.lineEditWidth.setText(str(self.sx))
x = self.sx
y = self.sy
else:
self.lineEditHeight.setText(str(x))
self.lineEditWidth.setText(str(y))
self.p = np.reshape(self.p,(z,x,y))
self.ImageProjection()
self.Cvisualize()
self.ClusteringCal()
def ValidationY(self):
x = int(self.lineEditHeight.text())
y = int(self.lineEditWidth.text())
z = int(self.lineEditLength.text())
xy = int(self.sx*self.sy)
if x == 0 or y == 0:
self.lineEditHeight.setText(str(self.sy))
self.lineEditWidth.setText(str(self.sx))
x = self.sx
y = self.sy
elif int(x*y) != xy:
excess = np.mod(xy,y)
if excess == 0:
x=xy/y
x = int(x)
self.lineEditHeight.setText(str(x))
else:
self.lineEditHeight.setText(str(self.sy))
self.lineEditWidth.setText(str(self.sx))
x = self.sx
y = self.sy
else:
self.lineEditHeight.setText(str(x))
self.lineEditWidth.setText(str(y))
self.p = np.reshape(self.p,(z,x,y))
self.ImageProjection()
self.Cvisualize()
self.ClusteringCal()
def SC(self):
name = self.lineEditFilename.text()
QApplication.primaryScreen().grabWindow(self.winId()).save(self.foldername+'/'+name+'_SC'+'.png')
def search_whole_folder(self, foldername):
count = 0
name = {}
a = [x[0] for x in os.walk(foldername)]
for i in a:
os.chdir(i)
for file in glob.glob('*.mat'):
name[count] = str(i+'/'+file)
count += 1
w = csv.writer(open(foldername+"//Fileall.csv", "w"))
for key, val in sorted(name.items(), key=lambda item: item[1]):
# for key, val in sorted(name.items()):
w.writerow([key, val])
self.lineEditTotal.setText(str(count))
self.lineEditFileNum.setText(str(1))
self.readfile(name[0])
def readfile(self,fileName):
self.img = None
try:
self.img = plt.imread(fileName.replace(fileName.split('.0')[-1],'.jpg'))
except:
pass
try:
self.sx,self.sy, self.p ,self.wavenumber, self.sp = ff.readmat(fileName)
except:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText('No file is loaded')
msg.setInformativeText('File can not be opened due to the the structure of data')
msg.setWindowTitle("Warning")
msg.setStandardButtons(QMessageBox.Ok )
msg.exec_()
self.lineEditFileNum.setText('')
self.lineEditTotal.setText('')
pass
self.index = np.random.randint(0,int(self.sx*self.sy),(10))
self.clear_all()
self.lock_un(True)
self.lineEditFilename.setText((basename(fileName).replace('.mat','')))
self.lineEditDirSpectra.setText(fileName)
self.plot_specta.canvas.ax.clear()
self.plot_specta.canvas.ax.plot(self.wavenumber,self.sp[:,self.index])
self.plot_specta.canvas.fig.tight_layout()
self.plot_specta.canvas.draw()
self.labelMinwn.setText(str("%.2f" % np.min(self.wavenumber)))
self.labelMaxwn.setText(str("%.2f" % np.max(self.wavenumber)))
self.lineEditLength.setText(str(len(self.wavenumber)))
self.lineEditWavenumber.setText(str("%.2f" % np.min(self.wavenumber)))
try:
x = int(self.lineEditHeight.text())
y = int(self.lineEditWidth.text())
z = int(self.lineEditLength.text())
self.p = np.reshape(self.p,(z,x,y))
except ValueError:
self.lineEditWidth.setText(str(self.sx))
self.lineEditHeight.setText(str(self.sy))
self.ImageProjection()
if self.img is not None:
self.plot_White.canvas.ax.clear()
self.plot_White.canvas.ax.imshow(self.img)
self.plot_White.canvas.fig.tight_layout()
self.plot_White.canvas.draw()
filepure = fileName.replace('.mat','')
filepure = filepure+'_purest.csv'
if os.path.isfile(filepure) == True:
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Question)
msgBox.setText("Information")
msgBox.setInformativeText('The '+basename(filepure)+' is available are you going to use it as purest spectra ?')
msgBox.setStandardButtons(QMessageBox.Yes| QMessageBox.No)
msgBox.setDefaultButton(QMessageBox.No)
reply = msgBox.exec_()
if reply == QMessageBox.Yes:
self.AutoLoad(filepure)
else:
self.LoadPurest()
else:
self.LoadPurest()
def LoadPurest(self):
sugest = self.foldername+'//'+self.lineEditFilename.text()+'_purest.csv'
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
filepurest, _ = QFileDialog.getOpenFileName(self,"Open Matrix File",sugest,"Matrix File (*.csv)", options=options)
if filepurest:
self.comboBoxVisualize.clear()
self.comboBoxVisualize.addItem('Spectra and White Light Image')
self.lineEditDirPurest.setText(filepurest)
dfpurest = pd.read_csv(filepurest, header= None)
self.df_spec = dfpurest.iloc[:int(self.lineEditLength.text()),:]
self.df_conc = dfpurest.iloc[int(self.lineEditLength.text()):,:]
self.ClusteringCal()
self.Nclus_on()
for comp1 in range(0,len(self.df_conc.T)):
self.comboBoxVisualize.addItem("component_"+str(comp1+1))
def AutoLoad(self,filepurest):
self.comboBoxVisualize.clear()
self.comboBoxVisualize.addItem('Spectra and White Light Image')
self.lineEditDirPurest.setText(filepurest)
dfpurest = pd.read_csv(filepurest, header= None)
self.df_spec = dfpurest.iloc[:int(self.lineEditLength.text()),:]
self.df_conc = dfpurest.iloc[int(self.lineEditLength.text()):,:]
self.ClusteringCal()
self.Nclus_on()
for comp1 in range(0,len(self.df_conc.T)):
self.comboBoxVisualize.addItem("component_"+str(comp1+1))
def IMG(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"Open Image File", "",
"Image (*.jpg *.jpeg *.bmp *.png .tif *.tiff)", options=options)
if fileName:
self.img = plt.imread(fileName)
self.plot_White.canvas.ax.clear()
self.plot_White.canvas.ax.imshow(self.img)
self.plot_White.canvas.fig.tight_layout()
self.plot_White.canvas.draw()
self.Cvisualize()
def ExpandCluster(self):
plt.close("Segmentation Map")
plt.figure("Segmentation Map", tight_layout={'pad':.5})
plt.imshow(self.mapping,cmap=self.cmap)
plt.colorbar()
plt.show()
def ExpandClusterU(self):
if plt.fignum_exists("Segmentation Map"):
self.ExpandCluster()
else:
pass
def ClusteringCal(self):
nclus = int(self.spinBoxNcluster.value())
method = int(self.comboBoxMethodCluster.currentIndex())
if self.comboBoxNormalization.currentIndex() == 0:
data = self.df_conc
else:
data = StandardScaler().fit_transform(self.df_conc)
if method == 0:
self.clus = KMeans(n_clusters = nclus, random_state=0).fit(data)
else:
self.clus = MiniBatchKMeans(n_clusters = nclus, random_state=0).fit(data)
self.mapping = np.reshape(self.clus.labels_,(int(self.lineEditHeight.text()),
int(self.lineEditWidth.text())))
c1 = str(self.comboBoxC1.currentText())
c2 = str(self.comboBoxC2.currentText())
c3 = str(self.comboBoxC3.currentText())
c4 = str(self.comboBoxC4.currentText())
c5 = str(self.comboBoxC5.currentText())
c6 = str(self.comboBoxC6.currentText())
c7 = str(self.comboBoxC7.currentText())
c8 = str(self.comboBoxC8.currentText())
t1 = self.lineEditA1.text()
t2 = self.lineEditA2.text()
t3 = self.lineEditA3.text()
t4 = self.lineEditA4.text()
t5 = self.lineEditA5.text()
t6 = self.lineEditA6.text()
t7 = self.lineEditA7.text()
t8 = self.lineEditA8.text()
if nclus == 1:
self.clis =[c1]
self.label = [t1]
elif nclus == 2:
self.clis =[c1,c2]
self.label = [t1,t2]
elif nclus == 3:
self.clis =[c1,c2,c3]
self.label = [t1,t2,t3]
elif nclus == 4:
self.clis =[c1,c2,c3,c4]
self.label = [t1,t2,t3,t4]
elif nclus == 5:
self.clis =[c1,c2,c3,c4,c5]
self.label = [t1,t2,t3,t4,t5]
elif nclus == 6:
self.clis =[c1,c2,c3,c4,c5,c6]
self.label = [t1,t2,t3,t4,t5,t6]
elif nclus == 7:
self.clis =[c1,c2,c3,c4,c5,c6,c7]
self.label = [t1,t2,t3,t4,t5,t6,t7]
elif nclus == 8:
self.clis =[c1,c2,c3,c4,c5,c6,c7,c8]
self.label = [t1,t2,t3,t4,t5,t6,t7,t8]
if nclus > 8:
self.cmap = str(self.comboBoxColorBig.currentText())
else:
self.cmap = colors.ListedColormap(self.clis)
self.plotCluster.canvas.ax.clear()
self.plotCluster.canvas.ax.imshow(self.mapping,cmap=self.cmap)
self.plotCluster.canvas.fig.tight_layout()
self.plotCluster.canvas.draw()
self.ExpandClusterU()
self.color=[c1,c2,c3,c4,c5,c6,c7,c8]
self.plotAverage.canvas.ax.clear()
self.plotAverage.canvas.ax.set_prop_cycle(color=self.color)
self.pushButtonReduce.setEnabled(True)
indori = self.clus.labels_
self.inde ={}
self.spmean = np.zeros((nclus,len(self.sp)))
for jj in range(0,nclus):
self.inde[jj]=[i for i, e in enumerate(indori) if e == jj]
self.spmean[jj,:]= np.mean(self.sp[:,self.inde[jj]], axis = 1)
self.plotAverage.canvas.ax.plot(self.wavenumber, self.spmean.T )
if nclus <= 8:
self.plotAverage.canvas.ax.legend(self.label,loc='best')
self.plotAverage.canvas.fig.tight_layout()
else:
self.plotAverage.canvas.fig.tight_layout()
self.ExpandAveU()
self.plotAverage.canvas.draw()
self.Similar()
#
#fig = plt.figure()
#ax = fig.add_subplot(111)
def ExpandAve(self):
plt.close("Average Spectra")
fig = plt.figure("Average Spectra", tight_layout={'pad':.5})
ax = fig.add_subplot(111)
ax.set_prop_cycle(color=self.color)
ax.plot(self.wavenumber, self.spmean.T)
plt.legend(self.label,loc='best')
plt.show("Average Spectra")
def ExpandAveU(self):
if plt.fignum_exists("Average Spectra"):
self.ExpandAve()
else:
pass
def ExpandSpectra(self):
plt.close('Spectra')
plt.figure('Spectra', tight_layout={'pad':.5})
if self.comboBoxVisualize.currentIndex() == 0:
plt.plot(self.wavenumber,self.sp[:,self.index])
if self.comboBoxMethod.currentIndex() == 2:
plt.axvline(x=self.wavenumv)
else:
plt.plot(self.wavenumber, self.datas)
plt.xlabel("Wavenumber(1/cm)")#,fontsize=24)
plt.ylabel("Absorption(arb. units)")#,fontsize=24)
plt.tick_params(axis='both',direction='in', length=8, width=1)
plt.tick_params(axis='both',which='major')#,labelsize=24)
plt.show()
def ExpandSpectraU(self):
if plt.fignum_exists("Spectra"):
fig = plt.figure("Spectra")
ax = fig.gca()
ax.clear()
if self.comboBoxVisualize.currentIndex() == 0:
ax.plot(self.wavenumber,self.sp[:,self.index])
if self.comboBoxMethod.currentIndex() == 2:
ax.axvline(x=self.wavenumv)
else:
ax.plot(self.wavenumber, self.datas)
fig.canvas.draw_idle()
else:
pass
def ExpandProjection(self):
plt.close("Image Projection")
plt.figure("Image Projection", tight_layout={'pad':.5})
plt.imshow(self.projection,str(self.comboBoxCmaps.currentText()))
plt.colorbar()
plt.show()
def ExpandProjU(self):
if plt.fignum_exists("Image Projection"):
fig = plt.figure("Image Projection")
fig.clf()
plt.imshow(self.projection,str(self.comboBoxCmaps.currentText()))
fig.canvas.draw_idle()
else:
pass
def coloring(self):
self.plot_visual.canvas.ax.clear()
self.plot_visual.canvas.ax.imshow(self.projection,str(self.comboBoxCmaps.currentText()))
self.plot_visual.canvas.fig.tight_layout()
self.plot_visual.canvas.draw()
self.Cvisualize()
self.ExpandProjU()
def ImageProjection(self):
if self.comboBoxMethod.currentIndex() == 0:
self.lineEditWavenumber.setEnabled(False)
self.horizontalSliderWavenumber.setEnabled(False)
self.projection = ff.proarea(self.p,self.wavenumber)
if self.comboBoxMethod.currentIndex() == 1:
self.lineEditWavenumber.setEnabled(False)
self.horizontalSliderWavenumber.setEnabled(False)
self.projection = ff.promip(self.p)
if self.comboBoxMethod.currentIndex() == 2:
self.lineEditWavenumber.setEnabled(True)
self.horizontalSliderWavenumber.setEnabled(True)
self.wavenumv = float(self.lineEditWavenumber.text())
self.projection = ff.prowavenum(self.p,self.wavenumber,self.wavenumv)
self.plot_visual.canvas.ax.clear()
self.plot_visual.canvas.ax.imshow(self.projection,str(self.comboBoxCmaps.currentText()))
self.plot_visual.canvas.fig.tight_layout()
self.plot_visual.canvas.draw()
self.ExpandProjU()
self.ExpandSpectraU()
def Wavenumbercal(self):
nnow1 = ((np.max(self.wavenumber) - np.min(self.wavenumber))
*float(self.horizontalSliderWavenumber.value())/10000.0 + np.min(self.wavenumber))
nnow1 = "%.2f" % nnow1
self.lineEditWavenumber.setText(str(nnow1))
self.wavenumv = float(self.lineEditWavenumber.text())
self.projection = ff.prowavenum(self.p,self.wavenumber,self.wavenumv)
self.plot_specta.canvas.ax.clear()
self.plot_specta.canvas.ax.plot(self.wavenumber,self.sp[:,self.index])
self.plot_specta.canvas.ax.axvline(x=self.wavenumv)
self.plot_specta.canvas.fig.tight_layout()
self.plot_specta.canvas.draw()
self.plot_visual.canvas.ax.clear()
self.plot_visual.canvas.ax.imshow(self.projection,str(self.comboBoxCmaps.currentText()))
self.plot_visual.canvas.fig.tight_layout()
self.plot_visual.canvas.draw()
self.ExpandProjU()
self.ExpandSpectraU()
def Cvisualize (self):
if self.comboBoxVisualize.currentIndex() == 0:
if self.img is not None:
self.plotMultiVisual.canvas.ax.clear()
self.plotMultiVisual.canvas.ax.imshow(self.img)
self.plotMultiVisual.canvas.fig.tight_layout()
self.plotMultiVisual.canvas.draw()
self.plot_specta.canvas.ax.clear()
self.plot_specta.canvas.ax.plot(self.wavenumber,self.sp[:,self.index])
self.plot_specta.canvas.fig.tight_layout()
self.plot_specta.canvas.draw()
if 'component_' in self.comboBoxVisualize.currentText():
import re
val = int(re.search(r'\d+', self.comboBoxVisualize.currentText()).group()) - 1
self.datap = self.df_conc.iloc[:,val].to_numpy()
# global component
self.component = np.reshape(self.datap,(int(self.lineEditHeight.text()),
int(self.lineEditWidth.text())))
self.plotMultiVisual.canvas.ax.clear()
self.plotMultiVisual.canvas.ax.imshow(self.component,str(self.comboBoxCmaps.currentText()))
self.plotMultiVisual.canvas.fig.tight_layout()
self.plotMultiVisual.canvas.draw()
# global datas
self.datas = self.df_spec.iloc[:,val].to_numpy()
self.plot_specta.canvas.ax.clear()
self.plot_specta.canvas.ax.plot(self.wavenumber, self.datas)
self.plot_specta.canvas.fig.tight_layout()
self.plot_specta.canvas.draw()
self.ExpandSpectraU()
self.ExpandVisU()
def ExpandVis(self):
plt.close('Image Projection')
plt.figure('Image Projection', tight_layout={'pad':.5})
if self.comboBoxVisualize.currentIndex() == 0:
plt.imshow(self.img)
plt.show()
else:
plt.imshow(self.component,str(self.comboBoxCmaps.currentText()))
plt.colorbar()
plt.show()
def ExpandVisU(self):
if plt.fignum_exists('Image Projection'):
fig = plt.figure('Image Projection')
ax = fig.gca()
ax.clear()
if self.comboBoxVisualize.currentIndex() == 0:
ax.imshow(self.img)
else:
ax.imshow(self.component,str(self.comboBoxCmaps.currentText()))
fig.canvas.draw_idle()
else:
pass
def Similar(self):
anotclus=len(set(self.clis))
self.lineEditAnotClus.setText(str(anotclus))
def Reduce(self):
# global spmean, label, color
anotclus=len(set(self.clis))
# sp_new = spmean.copy()
self.lineEditAnotClus.setText(str(anotclus))
if self.spinBoxNcluster.value() != anotclus:
sim = [item for item, count in collections.Counter(self.clis).items() if count > 1]
ind = ff.listindexes(self.clis)
new = np.zeros((len(self.wavenumber),len(sim)))
suma = np.zeros((len(sim)))
olddict = dict(zip(self.clis,self.spmean))
for ii in range(0,len(sim)):
for jj in range(len(ind[sim[ii]])):
auin = int(ind[sim[ii]][jj])
suma[ii] = int(suma[ii]) + len(self.inde[auin])
new[:,ii] = new[:,ii] + self.spmean[auin,:]*len(self.inde[auin])
new[:,ii]/= suma[ii]
#Color of new and multiple spectra
nosim = [item for item, count in collections.Counter(self.clis).items() if count > 0]
count = 0
new_col = 0
new_col = sim
if len(sim) != anotclus:
single = list(set(nosim)-set(sim))
dumy = np.zeros((len(self.wavenumber),len(single)))
for jj in single:
dumy[:,count] = olddict[jj]
count+=1
new_col.append(jj)
new = np.concatenate((new,dumy), axis = 1)
else:
new_label = [item for item, count in collections.Counter(self.label).items() if count > 0]
new_label=[]
for mem in new_col:
index = self.color.index(mem)
new_label.append(self.label[index])
self.spmean = new.T
self.label = new_label
self.color = new_col
self.plotAverage.canvas.ax.clear()
self.plotAverage.canvas.ax.set_prop_cycle(color=self.color)
self.plotAverage.canvas.ax.plot(self.wavenumber, self.spmean.T )
self.plotAverage.canvas.ax.legend(self.label,loc= 'best')
self.plotAverage.canvas.fig.tight_layout()
self.plotAverage.canvas.draw()
self.ExpandAveU()
self.pushButtonReduce.setEnabled(False)
def Nclus_on(self):
nclus = int(self.spinBoxNcluster.value())
if nclus == 1:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(False)
self.comboBoxC3.setEnabled(False)
self.comboBoxC4.setEnabled(False)
self.comboBoxC5.setEnabled(False)
self.comboBoxC6.setEnabled(False)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(False)
self.lineEditA3.setEnabled(False)
self.lineEditA4.setEnabled(False)
self.lineEditA5.setEnabled(False)
self.lineEditA6.setEnabled(False)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
elif nclus == 2:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(True)
self.comboBoxC3.setEnabled(False)
self.comboBoxC4.setEnabled(False)
self.comboBoxC5.setEnabled(False)
self.comboBoxC6.setEnabled(False)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(True)
self.lineEditA3.setEnabled(False)
self.lineEditA4.setEnabled(False)
self.lineEditA5.setEnabled(False)
self.lineEditA6.setEnabled(False)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
elif nclus == 3:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(True)
self.comboBoxC3.setEnabled(True)
self.comboBoxC4.setEnabled(False)
self.comboBoxC5.setEnabled(False)
self.comboBoxC6.setEnabled(False)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(True)
self.lineEditA3.setEnabled(True)
self.lineEditA4.setEnabled(False)
self.lineEditA5.setEnabled(False)
self.lineEditA6.setEnabled(False)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
elif nclus == 4:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(True)
self.comboBoxC3.setEnabled(True)
self.comboBoxC4.setEnabled(True)
self.comboBoxC5.setEnabled(False)
self.comboBoxC6.setEnabled(False)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(True)
self.lineEditA3.setEnabled(True)
self.lineEditA4.setEnabled(True)
self.lineEditA5.setEnabled(False)
self.lineEditA6.setEnabled(False)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
elif nclus == 5:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(True)
self.comboBoxC3.setEnabled(True)
self.comboBoxC4.setEnabled(True)
self.comboBoxC5.setEnabled(True)
self.comboBoxC6.setEnabled(False)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(True)
self.lineEditA3.setEnabled(True)
self.lineEditA4.setEnabled(True)
self.lineEditA5.setEnabled(True)
self.lineEditA6.setEnabled(False)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
elif nclus == 6:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(True)
self.comboBoxC3.setEnabled(True)
self.comboBoxC4.setEnabled(True)
self.comboBoxC5.setEnabled(True)
self.comboBoxC6.setEnabled(True)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(True)
self.lineEditA3.setEnabled(True)
self.lineEditA4.setEnabled(True)
self.lineEditA5.setEnabled(True)
self.lineEditA6.setEnabled(True)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
elif nclus == 7:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(True)
self.comboBoxC3.setEnabled(True)
self.comboBoxC4.setEnabled(True)
self.comboBoxC5.setEnabled(True)
self.comboBoxC6.setEnabled(True)
self.comboBoxC7.setEnabled(True)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(True)
self.lineEditA3.setEnabled(True)
self.lineEditA4.setEnabled(True)
self.lineEditA5.setEnabled(True)
self.lineEditA6.setEnabled(True)
self.lineEditA7.setEnabled(True)
self.lineEditA8.setEnabled(False)
elif nclus == 8:
self.comboBoxC1.setEnabled(True)
self.comboBoxC2.setEnabled(True)
self.comboBoxC3.setEnabled(True)
self.comboBoxC4.setEnabled(True)
self.comboBoxC5.setEnabled(True)
self.comboBoxC6.setEnabled(True)
self.comboBoxC7.setEnabled(True)
self.comboBoxC8.setEnabled(True)
self.lineEditA1.setEnabled(True)
self.lineEditA2.setEnabled(True)
self.lineEditA3.setEnabled(True)
self.lineEditA4.setEnabled(True)
self.lineEditA5.setEnabled(True)
self.lineEditA6.setEnabled(True)
self.lineEditA7.setEnabled(True)
self.lineEditA8.setEnabled(True)
elif nclus > 8:
self.comboBoxC1.setEnabled(False)
self.comboBoxC2.setEnabled(False)
self.comboBoxC3.setEnabled(False)
self.comboBoxC4.setEnabled(False)
self.comboBoxC5.setEnabled(False)
self.comboBoxC6.setEnabled(False)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(False)
self.lineEditA2.setEnabled(False)
self.lineEditA3.setEnabled(False)
self.lineEditA4.setEnabled(False)
self.lineEditA5.setEnabled(False)
self.lineEditA6.setEnabled(False)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
if nclus > 8:
self.comboBoxColorBig.setEnabled(True)
self.lineEditAnotClus.setEnabled(False)
else:
self.comboBoxColorBig.setEnabled(False)
self.lineEditAnotClus.setEnabled(True)
def SaveAverage(self):
# wavenumber, spmean.T
suggested = self.lineEditFilename.text()+'_clus.xls'
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
filesave, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()",suggested,"Excel Files(*.xls)", options=options)
if filesave:
__ , ext = os.path.splitext(filesave)
if ext == '.xls':
filesave = filesave
else:
filesave = filesave+'.xls'
df_save = pd.DataFrame({'Wavenumber':self.wavenumber})
df_spec = pd.DataFrame(self.spmean.T, columns = [self.label])
df_save = pd.concat([df_save, df_spec], axis=1, sort=False)
df_save.to_excel(filesave,index=False)
def clear_all(self):
# plt.close('all')
self.lineEditDirSpectra.setText('')
self.lineEditDirPurest.setText('')
self.lineEditFilename.setText('')
self.lineEditLength.setText('')
# self.lineEditWidth.setText('')
# self.lineEditHeight.setText('')
self.comboBoxMethod.setCurrentIndex(0)
self.comboBoxCmaps.setCurrentIndex(0)
self.horizontalSliderWavenumber.setSliderPosition(0)
self.lineEditWavenumber.setText('')
self.Refresh()
self.comboBoxMethodCluster.setCurrentIndex(0)
self.spinBoxNcluster.setValue(8)
self.lineEditAnotClus.setText('')
self.comboBoxVisualize.setCurrentIndex(0)
self.comboBoxC1.setEnabled(False)
self.comboBoxC2.setEnabled(False)
self.comboBoxC3.setEnabled(False)
self.comboBoxC4.setEnabled(False)
self.comboBoxC5.setEnabled(False)
self.comboBoxC6.setEnabled(False)
self.comboBoxC7.setEnabled(False)
self.comboBoxC8.setEnabled(False)
self.lineEditA1.setEnabled(False)
self.lineEditA2.setEnabled(False)
self.lineEditA3.setEnabled(False)
self.lineEditA4.setEnabled(False)
self.lineEditA5.setEnabled(False)
self.lineEditA6.setEnabled(False)
self.lineEditA7.setEnabled(False)
self.lineEditA8.setEnabled(False)
self.plotMultiVisual.canvas.ax.clear()
self.plotMultiVisual.canvas.draw()
self.plot_visual.canvas.ax.clear()
self.plot_visual.canvas.draw()
self.plot_specta.canvas.ax.clear()
self.plot_specta.canvas.draw()
self.plot_White.canvas.ax.clear()
self.plot_White.canvas.draw()
self.plotAverage.canvas.ax.clear()
self.plotAverage.canvas.draw()
self.plotCluster.canvas.ax.clear()
self.plotCluster.canvas.draw()
self.comboBoxVisualize.clear()
self.comboBoxVisualize.addItem('Spectra and White Light Image')
def Refresh(self):
self.comboBoxC1.setCurrentIndex(0)
self.comboBoxC2.setCurrentIndex(1)
self.comboBoxC3.setCurrentIndex(2)
self.comboBoxC4.setCurrentIndex(3)
self.comboBoxC5.setCurrentIndex(4)
self.comboBoxC6.setCurrentIndex(5)
self.comboBoxC7.setCurrentIndex(6)
self.comboBoxC8.setCurrentIndex(7)
self.lineEditA1.setText("")
self.lineEditA2.setText("")
self.lineEditA3.setText("")
self.lineEditA4.setText("")
self.lineEditA5.setText("")
self.lineEditA6.setText("")
self.lineEditA7.setText("")
self.lineEditA8.setText("")
def lock_un(self,stat):
self.pushButtonLoadConc.setEnabled(stat)
self.pushButtonPrevious.setEnabled(stat)
self.pushButtonNext.setEnabled(stat)
self.pushButtonExpandSpectra.setEnabled(stat)
self.comboBoxMethod.setEnabled(stat)
self.comboBoxCmaps.setEnabled(stat)
self.pushButtonExpandProjection.setEnabled(stat)
self.comboBoxVisualize.setEnabled(stat)
self.pushButtonExpandVisual.setEnabled(stat)
self.pushButtonExpandAve.setEnabled(stat)
self.pushButtonSaveSpectra.setEnabled(stat)
self.pushButtonExpandCluster.setEnabled(stat)
self.pushButtonRefresh.setEnabled(stat)
self.comboBoxMethodCluster.setEnabled(stat)
self.spinBoxNcluster.setEnabled(stat)
self.lineEditAnotClus.setEnabled(stat)
self.pushButtonCluster.setEnabled(stat)
self.pushButtonReduce.setEnabled(stat)
self.comboBoxNormalization.setEnabled(stat)
self.pushButtonSC.setEnabled(stat)
def main():
parser = argparse.ArgumentParser(
description='Graphical application for clustering of MCR-ALS output.')
args = parser.parse_args()
try:
app = QApplication.instance()
if not app:
app = QApplication(sys.argv)
window = MyMainWindow()
window.show()
res = app.exec_()
except Exception:
traceback.print_exc()
print('Press some key to quit')
input()
sys.exit(res)
if __name__ == '__main__':
main()
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"PyQt5.QtWidgets.QMessageBox",
"PyQt5.QtWidgets.QApplication",
"sys.exit",
"PyQt5.QtWidgets.QApplication.primaryScreen",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"numpy.mod",
"os.walk",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.reshap... | [((38939, 39038), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Graphical application for clustering of MCR-ALS output."""'}), "(description=\n 'Graphical application for clustering of MCR-ALS output.')\n", (38962, 39038), False, 'import argparse\n'), ((39378, 39391), 'sys.exit', 'sys.exit', (['res'], {}), '(res)\n', (39386, 39391), False, 'import sys\n'), ((869, 920), 'pkg_resources.resource_filename', 'resource_filename', (['__name__', '"""mcr/clustering_ui.ui"""'], {}), "(__name__, 'mcr/clustering_ui.ui')\n", (886, 920), False, 'from pkg_resources import resource_filename\n'), ((1071, 1100), 'PyQt5.Qt.qApp.installEventFilter', 'qApp.installEventFilter', (['self'], {}), '(self)\n', (1094, 1100), False, 'from PyQt5.Qt import QMainWindow, qApp\n'), ((2723, 2736), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (2734, 2736), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((4214, 4235), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (4233, 4235), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((4315, 4400), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Open Matrix File"""', '""""""', '"""Matrix File (*.mat)"""'], {}), "(self, 'Open Matrix File', '', 'Matrix File (*.mat)'\n )\n", (4342, 4400), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((7155, 7184), 'numpy.reshape', 'np.reshape', (['self.p', '(z, x, y)'], {}), '(self.p, (z, x, y))\n', (7165, 7184), True, 'import numpy as np\n'), ((8157, 8186), 'numpy.reshape', 'np.reshape', (['self.p', '(z, x, y)'], {}), '(self.p, (z, x, y))\n', (8167, 8186), True, 'import numpy as np\n'), ((12033, 12054), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (12052, 12054), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((12130, 12235), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Open Matrix File"""', 'sugest', '"""Matrix File (*.csv)"""'], {'options': 'options'}), "(self, 'Open Matrix File', sugest,\n 'Matrix File (*.csv)', options=options)\n", (12157, 12235), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((13049, 13085), 'pandas.read_csv', 'pd.read_csv', (['filepurest'], {'header': 'None'}), '(filepurest, header=None)\n', (13060, 13085), True, 'import pandas as pd\n'), ((13446, 13467), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (13465, 13467), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((13541, 13666), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Open Image File"""', '""""""', '"""Image (*.jpg *.jpeg *.bmp *.png .tif *.tiff)"""'], {'options': 'options'}), "(self, 'Open Image File', '',\n 'Image (*.jpg *.jpeg *.bmp *.png .tif *.tiff)', options=options)\n", (13568, 13666), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((14019, 14048), 'matplotlib.pyplot.close', 'plt.close', (['"""Segmentation Map"""'], {}), "('Segmentation Map')\n", (14028, 14048), True, 'import matplotlib.pyplot as plt\n'), ((14057, 14114), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Segmentation Map"""'], {'tight_layout': "{'pad': 0.5}"}), "('Segmentation Map', tight_layout={'pad': 0.5})\n", (14067, 14114), True, 'import matplotlib.pyplot as plt\n'), ((14121, 14161), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.mapping'], {'cmap': 'self.cmap'}), '(self.mapping, cmap=self.cmap)\n', (14131, 14161), True, 'import matplotlib.pyplot as plt\n'), ((14169, 14183), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (14181, 14183), True, 'import matplotlib.pyplot as plt\n'), ((14192, 14202), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14200, 14202), True, 'import matplotlib.pyplot as plt\n'), ((14246, 14283), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['"""Segmentation Map"""'], {}), "('Segmentation Map')\n", (14263, 14283), True, 'import matplotlib.pyplot as plt\n'), ((17910, 17938), 'matplotlib.pyplot.close', 'plt.close', (['"""Average Spectra"""'], {}), "('Average Spectra')\n", (17919, 17938), True, 'import matplotlib.pyplot as plt\n'), ((17953, 18009), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Average Spectra"""'], {'tight_layout': "{'pad': 0.5}"}), "('Average Spectra', tight_layout={'pad': 0.5})\n", (17963, 18009), True, 'import matplotlib.pyplot as plt\n'), ((18142, 18176), 'matplotlib.pyplot.legend', 'plt.legend', (['self.label'], {'loc': '"""best"""'}), "(self.label, loc='best')\n", (18152, 18176), True, 'import matplotlib.pyplot as plt\n'), ((18184, 18211), 'matplotlib.pyplot.show', 'plt.show', (['"""Average Spectra"""'], {}), "('Average Spectra')\n", (18192, 18211), True, 'import matplotlib.pyplot as plt\n'), ((18250, 18286), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['"""Average Spectra"""'], {}), "('Average Spectra')\n", (18267, 18286), True, 'import matplotlib.pyplot as plt\n'), ((18387, 18407), 'matplotlib.pyplot.close', 'plt.close', (['"""Spectra"""'], {}), "('Spectra')\n", (18396, 18407), True, 'import matplotlib.pyplot as plt\n'), ((18416, 18464), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Spectra"""'], {'tight_layout': "{'pad': 0.5}"}), "('Spectra', tight_layout={'pad': 0.5})\n", (18426, 18464), True, 'import matplotlib.pyplot as plt\n'), ((18751, 18781), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavenumber(1/cm)"""'], {}), "('Wavenumber(1/cm)')\n", (18761, 18781), True, 'import matplotlib.pyplot as plt\n'), ((18804, 18840), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absorption(arb. units)"""'], {}), "('Absorption(arb. units)')\n", (18814, 18840), True, 'import matplotlib.pyplot as plt\n'), ((18863, 18926), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'direction': '"""in"""', 'length': '(8)', 'width': '(1)'}), "(axis='both', direction='in', length=8, width=1)\n", (18878, 18926), True, 'import matplotlib.pyplot as plt\n'), ((18934, 18977), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""'}), "(axis='both', which='major')\n", (18949, 18977), True, 'import matplotlib.pyplot as plt\n'), ((19000, 19010), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19008, 19010), True, 'import matplotlib.pyplot as plt\n'), ((19053, 19081), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['"""Spectra"""'], {}), "('Spectra')\n", (19070, 19081), True, 'import matplotlib.pyplot as plt\n'), ((19582, 19611), 'matplotlib.pyplot.close', 'plt.close', (['"""Image Projection"""'], {}), "('Image Projection')\n", (19591, 19611), True, 'import matplotlib.pyplot as plt\n'), ((19620, 19677), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Image Projection"""'], {'tight_layout': "{'pad': 0.5}"}), "('Image Projection', tight_layout={'pad': 0.5})\n", (19630, 19677), True, 'import matplotlib.pyplot as plt\n'), ((19758, 19772), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (19770, 19772), True, 'import matplotlib.pyplot as plt\n'), ((19781, 19791), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19789, 19791), True, 'import matplotlib.pyplot as plt\n'), ((19831, 19868), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['"""Image Projection"""'], {}), "('Image Projection')\n", (19848, 19868), True, 'import matplotlib.pyplot as plt\n'), ((24134, 24163), 'matplotlib.pyplot.close', 'plt.close', (['"""Image Projection"""'], {}), "('Image Projection')\n", (24143, 24163), True, 'import matplotlib.pyplot as plt\n'), ((24172, 24229), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Image Projection"""'], {'tight_layout': "{'pad': 0.5}"}), "('Image Projection', tight_layout={'pad': 0.5})\n", (24182, 24229), True, 'import matplotlib.pyplot as plt\n'), ((24465, 24475), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24473, 24475), True, 'import matplotlib.pyplot as plt\n'), ((24515, 24552), 'matplotlib.pyplot.fignum_exists', 'plt.fignum_exists', (['"""Image Projection"""'], {}), "('Image Projection')\n", (24532, 24552), True, 'import matplotlib.pyplot as plt\n'), ((34569, 34590), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (34588, 34590), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((34664, 34784), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QFileDialog.getSaveFileName', (['self', '"""QFileDialog.getSaveFileName()"""', 'suggested', '"""Excel Files(*.xls)"""'], {'options': 'options'}), "(self, 'QFileDialog.getSaveFileName()',\n suggested, 'Excel Files(*.xls)', options=options)\n", (34691, 34784), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((39101, 39124), 'PyQt5.QtWidgets.QApplication.instance', 'QApplication.instance', ([], {}), '()\n', (39122, 39124), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((3084, 3100), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3093, 3100), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3124), 'PyQt5.Qt.qApp.quit', 'qApp.quit', ([], {}), '()\n', (3122, 3124), False, 'from PyQt5.Qt import QMainWindow, qApp\n'), ((3269, 3290), 'PyQt5.QtWidgets.QFileDialog.Options', 'QFileDialog.Options', ([], {}), '()\n', (3288, 3290), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((3376, 3437), 'PyQt5.QtWidgets.QFileDialog.getExistingDirectory', 'QFileDialog.getExistingDirectory', (['self', '"""Open the input data"""'], {}), "(self, 'Open the input data')\n", (3408, 3437), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((4469, 4491), 'os.path.dirname', 'dirname', (['self.fileName'], {}), '(self.fileName)\n', (4476, 4491), False, 'from os.path import basename, dirname\n'), ((4656, 4669), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (4667, 4669), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((5056, 5115), 'pandas.read_csv', 'pd.read_csv', (["(self.foldername + '//Fileall.csv')"], {'header': 'None'}), "(self.foldername + '//Fileall.csv', header=None)\n", (5067, 5115), True, 'import pandas as pd\n'), ((5329, 5342), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (5340, 5342), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((5685, 5744), 'pandas.read_csv', 'pd.read_csv', (["(self.foldername + '//Fileall.csv')"], {'header': 'None'}), "(self.foldername + '//Fileall.csv', header=None)\n", (5696, 5744), True, 'import pandas as pd\n'), ((5980, 5993), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (5991, 5993), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((8606, 8617), 'os.chdir', 'os.chdir', (['i'], {}), '(i)\n', (8614, 8617), False, 'import os\n'), ((8642, 8660), 'glob.glob', 'glob.glob', (['"""*.mat"""'], {}), "('*.mat')\n", (8651, 8660), False, 'import glob\n'), ((10749, 10778), 'numpy.reshape', 'np.reshape', (['self.p', '(z, x, y)'], {}), '(self.p, (z, x, y))\n', (10759, 10778), True, 'import numpy as np\n'), ((11277, 11301), 'os.path.isfile', 'os.path.isfile', (['filepure'], {}), '(filepure)\n', (11291, 11301), False, 'import os\n'), ((11333, 11346), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (11344, 11346), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((12450, 12486), 'pandas.read_csv', 'pd.read_csv', (['filepurest'], {'header': 'None'}), '(filepurest, header=None)\n', (12461, 12486), True, 'import pandas as pd\n'), ((13732, 13752), 'matplotlib.pyplot.imread', 'plt.imread', (['fileName'], {}), '(fileName)\n', (13742, 13752), True, 'import matplotlib.pyplot as plt\n'), ((16679, 16711), 'matplotlib.colors.ListedColormap', 'colors.ListedColormap', (['self.clis'], {}), '(self.clis)\n', (16700, 16711), False, 'from matplotlib import colors\n'), ((17402, 17444), 'numpy.mean', 'np.mean', (['self.sp[:, self.inde[jj]]'], {'axis': '(1)'}), '(self.sp[:, self.inde[jj]], axis=1)\n', (17409, 17444), True, 'import numpy as np\n'), ((18530, 18579), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wavenumber', 'self.sp[:, self.index]'], {}), '(self.wavenumber, self.sp[:, self.index])\n', (18538, 18579), True, 'import matplotlib.pyplot as plt\n'), ((18705, 18742), 'matplotlib.pyplot.plot', 'plt.plot', (['self.wavenumber', 'self.datas'], {}), '(self.wavenumber, self.datas)\n', (18713, 18742), True, 'import matplotlib.pyplot as plt\n'), ((19101, 19122), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Spectra"""'], {}), "('Spectra')\n", (19111, 19122), True, 'import matplotlib.pyplot as plt\n'), ((19888, 19918), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Image Projection"""'], {}), "('Image Projection')\n", (19898, 19918), True, 'import matplotlib.pyplot as plt\n'), ((21648, 21671), 'numpy.min', 'np.min', (['self.wavenumber'], {}), '(self.wavenumber)\n', (21654, 21671), True, 'import numpy as np\n'), ((24295, 24315), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.img'], {}), '(self.img)\n', (24305, 24315), True, 'import matplotlib.pyplot as plt\n'), ((24328, 24338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24336, 24338), True, 'import matplotlib.pyplot as plt\n'), ((24442, 24456), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (24454, 24456), True, 'import matplotlib.pyplot as plt\n'), ((24572, 24602), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Image Projection"""'], {}), "('Image Projection')\n", (24582, 24602), True, 'import matplotlib.pyplot as plt\n'), ((34822, 34848), 'os.path.splitext', 'os.path.splitext', (['filesave'], {}), '(filesave)\n', (34838, 34848), False, 'import os\n'), ((34999, 35044), 'pandas.DataFrame', 'pd.DataFrame', (["{'Wavenumber': self.wavenumber}"], {}), "({'Wavenumber': self.wavenumber})\n", (35011, 35044), True, 'import pandas as pd\n'), ((35066, 35115), 'pandas.DataFrame', 'pd.DataFrame', (['self.spmean.T'], {'columns': '[self.label]'}), '(self.spmean.T, columns=[self.label])\n', (35078, 35115), True, 'import pandas as pd\n'), ((35140, 35189), 'pandas.concat', 'pd.concat', (['[df_save, df_spec]'], {'axis': '(1)', 'sort': '(False)'}), '([df_save, df_spec], axis=1, sort=False)\n', (35149, 35189), True, 'import pandas as pd\n'), ((39163, 39185), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (39175, 39185), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((39296, 39317), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (39315, 39317), False, 'import traceback\n'), ((3677, 3690), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (3688, 3690), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((6695, 6708), 'numpy.mod', 'np.mod', (['xy', 'x'], {}), '(xy, x)\n', (6701, 6708), True, 'import numpy as np\n'), ((7697, 7710), 'numpy.mod', 'np.mod', (['xy', 'y'], {}), '(xy, y)\n', (7703, 7710), True, 'import numpy as np\n'), ((8553, 8572), 'os.walk', 'os.walk', (['foldername'], {}), '(foldername)\n', (8560, 8572), False, 'import os\n'), ((9399, 9412), 'PyQt5.QtWidgets.QMessageBox', 'QMessageBox', ([], {}), '()\n', (9410, 9412), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((18650, 18678), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'self.wavenumv'}), '(x=self.wavenumv)\n', (18661, 18678), True, 'import matplotlib.pyplot as plt\n'), ((26445, 26480), 'numpy.concatenate', 'np.concatenate', (['(new, dumy)'], {'axis': '(1)'}), '((new, dumy), axis=1)\n', (26459, 26480), True, 'import numpy as np\n'), ((9982, 10000), 'os.path.basename', 'basename', (['fileName'], {}), '(fileName)\n', (9990, 10000), False, 'from os.path import basename, dirname\n'), ((10332, 10355), 'numpy.min', 'np.min', (['self.wavenumber'], {}), '(self.wavenumber)\n', (10338, 10355), True, 'import numpy as np\n'), ((10403, 10426), 'numpy.max', 'np.max', (['self.wavenumber'], {}), '(self.wavenumber)\n', (10409, 10426), True, 'import numpy as np\n'), ((10545, 10568), 'numpy.min', 'np.min', (['self.wavenumber'], {}), '(self.wavenumber)\n', (10551, 10568), True, 'import numpy as np\n'), ((14619, 14635), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (14633, 14635), False, 'from sklearn.preprocessing import StandardScaler\n'), ((14713, 14753), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'nclus', 'random_state': '(0)'}), '(n_clusters=nclus, random_state=0)\n', (14719, 14753), False, 'from sklearn.cluster import MiniBatchKMeans, KMeans\n'), ((14804, 14853), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'nclus', 'random_state': '(0)'}), '(n_clusters=nclus, random_state=0)\n', (14819, 14853), False, 'from sklearn.cluster import MiniBatchKMeans, KMeans\n'), ((8341, 8369), 'PyQt5.QtWidgets.QApplication.primaryScreen', 'QApplication.primaryScreen', ([], {}), '()\n', (8367, 8369), False, 'from PyQt5.QtWidgets import QApplication, QFileDialog, QMessageBox\n'), ((11486, 11504), 'os.path.basename', 'basename', (['filepure'], {}), '(filepure)\n', (11494, 11504), False, 'from os.path import basename, dirname\n'), ((21531, 21554), 'numpy.max', 'np.max', (['self.wavenumber'], {}), '(self.wavenumber)\n', (21537, 21554), True, 'import numpy as np\n'), ((21557, 21580), 'numpy.min', 'np.min', (['self.wavenumber'], {}), '(self.wavenumber)\n', (21563, 21580), True, 'import numpy as np\n'), ((25311, 25341), 'collections.Counter', 'collections.Counter', (['self.clis'], {}), '(self.clis)\n', (25330, 25341), False, 'import collections\n'), ((25990, 26020), 'collections.Counter', 'collections.Counter', (['self.clis'], {}), '(self.clis)\n', (26009, 26020), False, 'import collections\n'), ((26558, 26589), 'collections.Counter', 'collections.Counter', (['self.label'], {}), '(self.label)\n', (26577, 26589), False, 'import collections\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
from scipy import constants
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def dynamic_press(n_s, v_xyz, specie: str = "i"):
r"""Computes dynamic pressure.
Parameters
----------
n_s : xarray.DataArray
Time series of the number density of the specie.
v_xyz : xarray.DataArray
Time series of the bulk velocity of the specie.
specie : {"i", "e"}, Optional
Specie. default "i".
Returns
-------
p_dyn : xarray.DataArray
Time series of the dynamic pressure of the specie.
Examples
--------
>>> from pyrfu import mms, pyrf
Time interval
>>> tint = ["2019-09-14T07:54:00.000", "2019-09-14T08:11:00.000"]
Spacecraft index
>>> mms_id = 1
Load ion bulk velocity and remove spintone
>>> v_xyz_i = mms.get_data("Vi_gse_fpi_fast_l2", tint, mms_id)
>>> st_xyz_i = mms.get_data("STi_gse_fpi_fast_l2", tint, mms_id)
>>> v_xyz_i = v_xyz_i - st_xyz_i
Ion number density
>>> n_i = mms.get_data("Ni_fpi_fast_l2", tint, mms_id)
Compute dynamic pressure
>>> p = pyrf.dynamic_press(n_i, v_xyz_i, specie="i")
"""
if specie == "i":
mass = constants.proton_mass
elif specie == "e":
mass = constants.electron_mass
else:
raise ValueError("Unknown specie")
p_dyn = n_s * mass * np.linalg.norm(v_xyz, axis=0) ** 2
return p_dyn
| [
"numpy.linalg.norm"
] | [((1528, 1557), 'numpy.linalg.norm', 'np.linalg.norm', (['v_xyz'], {'axis': '(0)'}), '(v_xyz, axis=0)\n', (1542, 1557), True, 'import numpy as np\n')] |
import keras.backend as K
import numpy as np
import pandas as pd
from datetime import time, datetime
import tensorflow as tf
from argparse import ArgumentParser
def get_flops(model):
run_meta = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
# We use the Keras session graph in the call to the profiler.
flops = tf.profiler.profile(graph=K.get_session().graph,
run_meta=run_meta, cmd='op', options=opts)
return flops.total_float_ops # Prints the "flops" of the model.
def get_df_time_slice(df, hour, minute):
t = time(hour, minute, 0)
mask = df.date.apply(lambda x: x.to_pydatetime().time()) == t
return df[mask]
def shuffle_x_y(X, y):
idxs = np.arange(X.shape[0])
np.random.shuffle(idxs)
return X[idxs], y[idxs]
def split_on_date(df, split_date='2007/1/1'):
df = df.sort_values('date').reset_index()
split_pt = min(df[df['date'] == datetime.strptime(split_date, '%Y/%m/%d').date()].index)
return df.iloc[:split_pt], df.iloc[split_pt:]
def set_datetime_index(df, datetime_col='datetime'):
if not isinstance(df.index, pd.core.indexes.datetimes.DatetimeIndex):
try:
dt_idx = pd.DatetimeIndex(df[datetime_col])
df = df.set_index(dt_idx, drop=False)
return df
except ValueError:
raise ValueError('{0} is not the correct datetime column name or the column values '
'are not formatted correctly (use datetime)'.format(datetime_col))
else:
return df
def get_args():
parser = ArgumentParser()
parser.add_argument('--add_config', type=str, default=None,
help="full path to the yaml file containing the experiment's (hyper)parameters.")
parser.add_argument('--grid_search', action='store_true')
parser.add_argument("--observer", type=str, default='mongodb', help="mongodb or file")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--load", type=str, default=None, help="full path to the model's weights")
args = parser.parse_args()
return args | [
"datetime.time",
"argparse.ArgumentParser",
"tensorflow.profiler.ProfileOptionBuilder.float_operation",
"pandas.DatetimeIndex",
"datetime.datetime.strptime",
"keras.backend.get_session",
"tensorflow.RunMetadata",
"numpy.arange",
"numpy.random.shuffle"
] | [((200, 216), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (214, 216), True, 'import tensorflow as tf\n'), ((228, 278), 'tensorflow.profiler.ProfileOptionBuilder.float_operation', 'tf.profiler.ProfileOptionBuilder.float_operation', ([], {}), '()\n', (276, 278), True, 'import tensorflow as tf\n'), ((603, 624), 'datetime.time', 'time', (['hour', 'minute', '(0)'], {}), '(hour, minute, 0)\n', (607, 624), False, 'from datetime import time, datetime\n'), ((747, 768), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (756, 768), True, 'import numpy as np\n'), ((773, 796), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (790, 796), True, 'import numpy as np\n'), ((1611, 1627), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1625, 1627), False, 'from argparse import ArgumentParser\n'), ((1225, 1259), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['df[datetime_col]'], {}), '(df[datetime_col])\n', (1241, 1259), True, 'import pandas as pd\n'), ((384, 399), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (397, 399), True, 'import keras.backend as K\n'), ((955, 996), 'datetime.datetime.strptime', 'datetime.strptime', (['split_date', '"""%Y/%m/%d"""'], {}), "(split_date, '%Y/%m/%d')\n", (972, 996), False, 'from datetime import time, datetime\n')] |
import numpy as np
import tqdm
from losses.dsm import dsm_score_estimation
import torch.nn.functional as F
import logging
import torch
import os
import shutil
import tensorboardX
import torch.optim as optim
from torchvision.datasets import MNIST, CIFAR10, FashionMNIST
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Subset
from datasets.celeba import CelebA
from models.refinenet_dilated_baseline import RefineNetDilated
__all__ = ['BaselineRunner']
class BaselineRunner():
def __init__(self, args, config):
self.args = args
self.config = config
def get_optimizer(self, parameters):
if self.config.optim.optimizer == 'Adam':
return optim.Adam(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay,
betas=(self.config.optim.beta1, 0.999), amsgrad=self.config.optim.amsgrad)
elif self.config.optim.optimizer == 'RMSProp':
return optim.RMSprop(parameters, lr=self.config.optim.lr, weight_decay=self.config.optim.weight_decay)
elif self.config.optim.optimizer == 'SGD':
return optim.SGD(parameters, lr=self.config.optim.lr, momentum=0.9)
else:
raise NotImplementedError('Optimizer {} not understood.'.format(self.config.optim.optimizer))
def logit_transform(self, image, lam=1e-6):
image = lam + (1 - 2 * lam) * image
return torch.log(image) - torch.log1p(-image)
def train(self):
if self.config.data.random_flip is False:
tran_transform = test_transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
else:
tran_transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor()
])
test_transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
if self.config.data.dataset == 'CIFAR10':
dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True,
transform=tran_transform)
test_dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10_test'), train=False, download=True,
transform=test_transform)
elif self.config.data.dataset == 'MNIST':
dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True,
transform=tran_transform)
test_dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist_test'), train=False, download=True,
transform=test_transform)
elif self.config.data.dataset == 'CELEBA':
if self.config.data.random_flip:
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]), download=True)
else:
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='train',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.ToTensor(),
]), download=True)
test_dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba_test'), split='test',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.ToTensor(),
]), download=True)
dataloader = DataLoader(dataset, batch_size=self.config.training.batch_size, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=self.config.training.batch_size, shuffle=True,
num_workers=4, drop_last=True)
test_iter = iter(test_loader)
self.config.input_dim = self.config.data.image_size ** 2 * self.config.data.channels
tb_path = os.path.join(self.args.run, 'tensorboard', self.args.doc)
if os.path.exists(tb_path):
shutil.rmtree(tb_path)
tb_logger = tensorboardX.SummaryWriter(log_dir=tb_path)
score = RefineNetDilated(self.config).to(self.config.device)
score = torch.nn.DataParallel(score)
optimizer = self.get_optimizer(score.parameters())
if self.args.resume_training:
states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'))
score.load_state_dict(states[0])
optimizer.load_state_dict(states[1])
step = 0
for epoch in range(self.config.training.n_epochs):
for i, (X, y) in enumerate(dataloader):
step += 1
score.train()
X = X.to(self.config.device)
X = X / 256. * 255. + torch.rand_like(X) / 256.
if self.config.data.logit_transform:
X = self.logit_transform(X)
loss = dsm_score_estimation(score, X, sigma=0.01)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tb_logger.add_scalar('loss', loss, global_step=step)
logging.info("step: {}, loss: {}".format(step, loss.item()))
if step >= self.config.training.n_iters:
return 0
if step % 100 == 0:
score.eval()
try:
test_X, test_y = next(test_iter)
except StopIteration:
test_iter = iter(test_loader)
test_X, test_y = next(test_iter)
test_X = test_X.to(self.config.device)
test_X = test_X / 256. * 255. + torch.rand_like(test_X) / 256.
if self.config.data.logit_transform:
test_X = self.logit_transform(test_X)
with torch.no_grad():
test_dsm_loss = dsm_score_estimation(score, test_X, sigma=0.01)
tb_logger.add_scalar('test_dsm_loss', test_dsm_loss, global_step=step)
if step % self.config.training.snapshot_freq == 0:
states = [
score.state_dict(),
optimizer.state_dict(),
]
torch.save(states, os.path.join(self.args.log, 'checkpoint_{}.pth'.format(step)))
torch.save(states, os.path.join(self.args.log, 'checkpoint.pth'))
def Langevin_dynamics(self, x_mod, scorenet, n_steps=1000, step_lr=0.00002):
images = []
with torch.no_grad():
for _ in range(n_steps):
images.append(torch.clamp(x_mod, 0.0, 1.0).to('cpu'))
noise = torch.randn_like(x_mod) * np.sqrt(step_lr * 2)
grad = scorenet(x_mod)
x_mod = x_mod + step_lr * grad + noise
print("modulus of grad components: mean {}, max {}".format(grad.abs().mean(), grad.abs().max()))
return images
def test(self):
states = torch.load(os.path.join(self.args.log, 'checkpoint.pth'), map_location=self.config.device)
score = RefineNetDilated(self.config).to(self.config.device)
score = torch.nn.DataParallel(score)
score.load_state_dict(states[0])
if not os.path.exists(self.args.image_folder):
os.makedirs(self.args.image_folder)
score.eval()
if self.config.data.dataset == 'MNIST' or self.config.data.dataset == 'FashionMNIST':
transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
if self.config.data.dataset == 'MNIST':
dataset = MNIST(os.path.join(self.args.run, 'datasets', 'mnist'), train=True, download=True,
transform=transform)
else:
dataset = FashionMNIST(os.path.join(self.args.run, 'datasets', 'fmnist'), train=True, download=True,
transform=transform)
dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4)
data_iter = iter(dataloader)
samples, _ = next(data_iter)
samples = samples.cuda()
samples = torch.rand_like(samples)
all_samples = self.Langevin_dynamics(samples, score, 1000, 0.00002)
for i, sample in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size,
self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
elif self.config.data.dataset == 'CELEBA':
dataset = CelebA(root=os.path.join(self.args.run, 'datasets', 'celeba'), split='test',
transform=transforms.Compose([
transforms.CenterCrop(140),
transforms.Resize(self.config.data.image_size),
transforms.ToTensor(),
]), download=True)
dataloader = DataLoader(dataset, batch_size=64, shuffle=True, num_workers=4)
samples, _ = next(iter(dataloader))
samples = torch.rand(100, 3, self.config.data.image_size, self.config.data.image_size,
device=self.config.device)
all_samples = self.Langevin_dynamics(samples, score, 1000, 0.00002)
for i, sample in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size,
self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
else:
transform = transforms.Compose([
transforms.Resize(self.config.data.image_size),
transforms.ToTensor()
])
if self.config.data.dataset == 'CIFAR10':
dataset = CIFAR10(os.path.join(self.args.run, 'datasets', 'cifar10'), train=True, download=True,
transform=transform)
dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=4)
data_iter = iter(dataloader)
samples, _ = next(data_iter)
samples = samples.cuda()
samples = torch.rand_like(samples)
all_samples = self.Langevin_dynamics(samples, score, 1000, 0.00002)
for i, sample in enumerate(tqdm.tqdm(all_samples)):
sample = sample.view(100, self.config.data.channels, self.config.data.image_size,
self.config.data.image_size)
if self.config.data.logit_transform:
sample = torch.sigmoid(sample)
torch.save(sample, os.path.join(self.args.image_folder, 'samples_{}.pth'.format(i)))
| [
"numpy.sqrt",
"torch.utils.data.DataLoader",
"os.path.exists",
"tensorboardX.SummaryWriter",
"torch.optim.RMSprop",
"torchvision.transforms.ToTensor",
"torch.optim.SGD",
"torch.rand_like",
"torchvision.transforms.RandomHorizontalFlip",
"torch.randn_like",
"losses.dsm.dsm_score_estimation",
"to... | [((4436, 4533), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.config.training.batch_size', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset, batch_size=self.config.training.batch_size, shuffle=\n True, num_workers=4)\n', (4446, 4533), False, 'from torch.utils.data import DataLoader, Subset\n'), ((4551, 4668), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'self.config.training.batch_size', 'shuffle': '(True)', 'num_workers': '(4)', 'drop_last': '(True)'}), '(test_dataset, batch_size=self.config.training.batch_size,\n shuffle=True, num_workers=4, drop_last=True)\n', (4561, 4668), False, 'from torch.utils.data import DataLoader, Subset\n'), ((4849, 4906), 'os.path.join', 'os.path.join', (['self.args.run', '"""tensorboard"""', 'self.args.doc'], {}), "(self.args.run, 'tensorboard', self.args.doc)\n", (4861, 4906), False, 'import os\n'), ((4918, 4941), 'os.path.exists', 'os.path.exists', (['tb_path'], {}), '(tb_path)\n', (4932, 4941), False, 'import os\n'), ((4999, 5042), 'tensorboardX.SummaryWriter', 'tensorboardX.SummaryWriter', ([], {'log_dir': 'tb_path'}), '(log_dir=tb_path)\n', (5025, 5042), False, 'import tensorboardX\n'), ((5129, 5157), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['score'], {}), '(score)\n', (5150, 5157), False, 'import torch\n'), ((8182, 8210), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['score'], {}), '(score)\n', (8203, 8210), False, 'import torch\n'), ((718, 895), 'torch.optim.Adam', 'optim.Adam', (['parameters'], {'lr': 'self.config.optim.lr', 'weight_decay': 'self.config.optim.weight_decay', 'betas': '(self.config.optim.beta1, 0.999)', 'amsgrad': 'self.config.optim.amsgrad'}), '(parameters, lr=self.config.optim.lr, weight_decay=self.config.\n optim.weight_decay, betas=(self.config.optim.beta1, 0.999), amsgrad=\n self.config.optim.amsgrad)\n', (728, 895), True, 'import torch.optim as optim\n'), ((1445, 1461), 'torch.log', 'torch.log', (['image'], {}), '(image)\n', (1454, 1461), False, 'import torch\n'), ((1464, 1483), 'torch.log1p', 'torch.log1p', (['(-image)'], {}), '(-image)\n', (1475, 1483), False, 'import torch\n'), ((4955, 4977), 'shutil.rmtree', 'shutil.rmtree', (['tb_path'], {}), '(tb_path)\n', (4968, 4977), False, 'import shutil\n'), ((7539, 7554), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7552, 7554), False, 'import torch\n'), ((8017, 8062), 'os.path.join', 'os.path.join', (['self.args.log', '"""checkpoint.pth"""'], {}), "(self.args.log, 'checkpoint.pth')\n", (8029, 8062), False, 'import os\n'), ((8269, 8307), 'os.path.exists', 'os.path.exists', (['self.args.image_folder'], {}), '(self.args.image_folder)\n', (8283, 8307), False, 'import os\n'), ((8321, 8356), 'os.makedirs', 'os.makedirs', (['self.args.image_folder'], {}), '(self.args.image_folder)\n', (8332, 8356), False, 'import os\n'), ((9072, 9136), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(100)', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset, batch_size=100, shuffle=True, num_workers=4)\n', (9082, 9136), False, 'from torch.utils.data import DataLoader, Subset\n'), ((9279, 9303), 'torch.rand_like', 'torch.rand_like', (['samples'], {}), '(samples)\n', (9294, 9303), False, 'import torch\n'), ((990, 1090), 'torch.optim.RMSprop', 'optim.RMSprop', (['parameters'], {'lr': 'self.config.optim.lr', 'weight_decay': 'self.config.optim.weight_decay'}), '(parameters, lr=self.config.optim.lr, weight_decay=self.config\n .optim.weight_decay)\n', (1003, 1090), True, 'import torch.optim as optim\n'), ((2225, 2275), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""cifar10"""'], {}), "(self.args.run, 'datasets', 'cifar10')\n", (2237, 2275), False, 'import os\n'), ((2395, 2450), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""cifar10_test"""'], {}), "(self.args.run, 'datasets', 'cifar10_test')\n", (2407, 2450), False, 'import os\n'), ((5059, 5088), 'models.refinenet_dilated_baseline.RefineNetDilated', 'RefineNetDilated', (['self.config'], {}), '(self.config)\n', (5075, 5088), False, 'from models.refinenet_dilated_baseline import RefineNetDilated\n'), ((5289, 5334), 'os.path.join', 'os.path.join', (['self.args.log', '"""checkpoint.pth"""'], {}), "(self.args.log, 'checkpoint.pth')\n", (5301, 5334), False, 'import os\n'), ((5851, 5893), 'losses.dsm.dsm_score_estimation', 'dsm_score_estimation', (['score', 'X'], {'sigma': '(0.01)'}), '(score, X, sigma=0.01)\n', (5871, 5893), False, 'from losses.dsm import dsm_score_estimation\n'), ((8113, 8142), 'models.refinenet_dilated_baseline.RefineNetDilated', 'RefineNetDilated', (['self.config'], {}), '(self.config)\n', (8129, 8142), False, 'from models.refinenet_dilated_baseline import RefineNetDilated\n'), ((9424, 9446), 'tqdm.tqdm', 'tqdm.tqdm', (['all_samples'], {}), '(all_samples)\n', (9433, 9446), False, 'import tqdm\n'), ((10303, 10366), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(64)', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset, batch_size=64, shuffle=True, num_workers=4)\n', (10313, 10366), False, 'from torch.utils.data import DataLoader, Subset\n'), ((10438, 10545), 'torch.rand', 'torch.rand', (['(100)', '(3)', 'self.config.data.image_size', 'self.config.data.image_size'], {'device': 'self.config.device'}), '(100, 3, self.config.data.image_size, self.config.data.image_size,\n device=self.config.device)\n', (10448, 10545), False, 'import torch\n'), ((11518, 11582), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(100)', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset, batch_size=100, shuffle=True, num_workers=4)\n', (11528, 11582), False, 'from torch.utils.data import DataLoader, Subset\n'), ((11724, 11748), 'torch.rand_like', 'torch.rand_like', (['samples'], {}), '(samples)\n', (11739, 11748), False, 'import torch\n'), ((1156, 1216), 'torch.optim.SGD', 'optim.SGD', (['parameters'], {'lr': 'self.config.optim.lr', 'momentum': '(0.9)'}), '(parameters, lr=self.config.optim.lr, momentum=0.9)\n', (1165, 1216), True, 'import torch.optim as optim\n'), ((1639, 1685), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (1656, 1685), True, 'import torchvision.transforms as transforms\n'), ((1703, 1724), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1722, 1724), True, 'import torchvision.transforms as transforms\n'), ((1820, 1866), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (1837, 1866), True, 'import torchvision.transforms as transforms\n'), ((1884, 1922), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1915, 1922), True, 'import torchvision.transforms as transforms\n'), ((1940, 1961), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1959, 1961), True, 'import torchvision.transforms as transforms\n'), ((2043, 2089), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (2060, 2089), True, 'import torchvision.transforms as transforms\n'), ((2107, 2128), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2126, 2128), True, 'import torchvision.transforms as transforms\n'), ((2619, 2667), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""mnist"""'], {}), "(self.args.run, 'datasets', 'mnist')\n", (2631, 2667), False, 'import os\n'), ((2783, 2836), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""mnist_test"""'], {}), "(self.args.run, 'datasets', 'mnist_test')\n", (2795, 2836), False, 'import os\n'), ((7687, 7710), 'torch.randn_like', 'torch.randn_like', (['x_mod'], {}), '(x_mod)\n', (7703, 7710), False, 'import torch\n'), ((7713, 7733), 'numpy.sqrt', 'np.sqrt', (['(step_lr * 2)'], {}), '(step_lr * 2)\n', (7720, 7733), True, 'import numpy as np\n'), ((8535, 8581), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (8552, 8581), True, 'import torchvision.transforms as transforms\n'), ((8599, 8620), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8618, 8620), True, 'import torchvision.transforms as transforms\n'), ((8721, 8769), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""mnist"""'], {}), "(self.args.run, 'datasets', 'mnist')\n", (8733, 8769), False, 'import os\n'), ((8908, 8957), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""fmnist"""'], {}), "(self.args.run, 'datasets', 'fmnist')\n", (8920, 8957), False, 'import os\n'), ((9696, 9717), 'torch.sigmoid', 'torch.sigmoid', (['sample'], {}), '(sample)\n', (9709, 9717), False, 'import torch\n'), ((10696, 10718), 'tqdm.tqdm', 'tqdm.tqdm', (['all_samples'], {}), '(all_samples)\n', (10705, 10718), False, 'import tqdm\n'), ((11870, 11892), 'tqdm.tqdm', 'tqdm.tqdm', (['all_samples'], {}), '(all_samples)\n', (11879, 11892), False, 'import tqdm\n'), ((5700, 5718), 'torch.rand_like', 'torch.rand_like', (['X'], {}), '(X)\n', (5715, 5718), False, 'import torch\n'), ((6825, 6840), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6838, 6840), False, 'import torch\n'), ((6882, 6929), 'losses.dsm.dsm_score_estimation', 'dsm_score_estimation', (['score', 'test_X'], {'sigma': '(0.01)'}), '(score, test_X, sigma=0.01)\n', (6902, 6929), False, 'from losses.dsm import dsm_score_estimation\n'), ((7376, 7421), 'os.path.join', 'os.path.join', (['self.args.log', '"""checkpoint.pth"""'], {}), "(self.args.log, 'checkpoint.pth')\n", (7388, 7421), False, 'import os\n'), ((9906, 9955), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""celeba"""'], {}), "(self.args.run, 'datasets', 'celeba')\n", (9918, 9955), False, 'import os\n'), ((10968, 10989), 'torch.sigmoid', 'torch.sigmoid', (['sample'], {}), '(sample)\n', (10981, 10989), False, 'import torch\n'), ((11168, 11214), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (11185, 11214), True, 'import torchvision.transforms as transforms\n'), ((11232, 11253), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (11251, 11253), True, 'import torchvision.transforms as transforms\n'), ((11358, 11408), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""cifar10"""'], {}), "(self.args.run, 'datasets', 'cifar10')\n", (11370, 11408), False, 'import os\n'), ((12142, 12163), 'torch.sigmoid', 'torch.sigmoid', (['sample'], {}), '(sample)\n', (12155, 12163), False, 'import torch\n'), ((4012, 4066), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""celeba_test"""'], {}), "(self.args.run, 'datasets', 'celeba_test')\n", (4024, 4066), False, 'import os\n'), ((6649, 6672), 'torch.rand_like', 'torch.rand_like', (['test_X'], {}), '(test_X)\n', (6664, 6672), False, 'import torch\n'), ((7623, 7651), 'torch.clamp', 'torch.clamp', (['x_mod', '(0.0)', '(1.0)'], {}), '(x_mod, 0.0, 1.0)\n', (7634, 7651), False, 'import torch\n'), ((3060, 3109), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""celeba"""'], {}), "(self.args.run, 'datasets', 'celeba')\n", (3072, 3109), False, 'import os\n'), ((3580, 3629), 'os.path.join', 'os.path.join', (['self.args.run', '"""datasets"""', '"""celeba"""'], {}), "(self.args.run, 'datasets', 'celeba')\n", (3592, 3629), False, 'import os\n'), ((10064, 10090), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(140)'], {}), '(140)\n', (10085, 10090), True, 'import torchvision.transforms as transforms\n'), ((10125, 10171), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (10142, 10171), True, 'import torchvision.transforms as transforms\n'), ((10206, 10227), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10225, 10227), True, 'import torchvision.transforms as transforms\n'), ((4185, 4211), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(140)'], {}), '(140)\n', (4206, 4211), True, 'import torchvision.transforms as transforms\n'), ((4251, 4297), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (4268, 4297), True, 'import torchvision.transforms as transforms\n'), ((4337, 4358), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4356, 4358), True, 'import torchvision.transforms as transforms\n'), ((3227, 3253), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(140)'], {}), '(140)\n', (3248, 3253), True, 'import torchvision.transforms as transforms\n'), ((3292, 3338), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (3309, 3338), True, 'import torchvision.transforms as transforms\n'), ((3377, 3410), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3408, 3410), True, 'import torchvision.transforms as transforms\n'), ((3449, 3470), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3468, 3470), True, 'import torchvision.transforms as transforms\n'), ((3747, 3773), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(140)'], {}), '(140)\n', (3768, 3773), True, 'import torchvision.transforms as transforms\n'), ((3812, 3858), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.config.data.image_size'], {}), '(self.config.data.image_size)\n', (3829, 3858), True, 'import torchvision.transforms as transforms\n'), ((3897, 3918), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3916, 3918), True, 'import torchvision.transforms as transforms\n')] |
""" baseball with negative binomial distribution simulations
This code uses numerical simulations to estimate the winning percentages
for the three teams. The file ftebb.py does exact calculations
"""
import attr
import sys
import pandas as pd
import numpy as np
import requests
import json
import logging
import argparse
MAX_INNING = 99
class Team:
def __init__(self, success_prob, runs_offset, rng):
self.success_prob = success_prob
self.failure_prob = 1 - success_prob
self.runs_offset = runs_offset
self.rng = rng
def valid_rng(self, rng):
return rng if rng else self.rng
def generate_nonout_pa(self, rng=None):
rng = self.valid_rng(rng)
return rng.negative_binomial(3, self.failure_prob)
def generate_score(self, rng=None):
return max(0, self.generate_nonout_pa(rng) - self.runs_offset)
def sim_score(self, rng=None, innings=1):
return sum([self.generate_score(rng) for _ in range(innings)])
def _parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--random-seed", default=20190927, type=int)
parser.add_argument("--max-innings", default=9, type=int)
parser.add_argument("--number-games", default=10000, type=int)
args = parser.parse_args(args)
return args
def sim_game(teamA, teamB, rng, max_inning=None):
if max_inning is None:
max_inning = MAX_INNING
inning = 0
score_diff = 0
scores = [0, 0]
while inning < max_inning or score_diff == 0:
scoreA = teamA.sim_score(rng=rng, innings=1)
scoreB = teamB.sim_score(rng=rng, innings=1)
inning += 1
scores[0] += scoreA
scores[1] += scoreB
score_diff = scores[1] - scores[0]
return (scores[0], scores[1], inning)
def team_matchup_df(teamA, teamB, rng, n=10000):
df = pd.DataFrame([sim_game(teamA, teamB, rng) for _ in range(n)]).rename(
columns={0: "scoreA", 1: "scoreB", 2: "innings"}
)
return df
def get_result(df):
return (
df.assign(w=df.scoreB > df.scoreA)
.assign(w=lambda r: r.w.astype(int))
.assign(i9=df.innings > MAX_INNING, c=1)
)
def print_result(teamA, teamB, rng, N=1000000):
df = get_result(team_matchup_df(teamA, teamB, rng, n=N))
print(df.groupby("i9").mean())
print(
pd.concat((df.mean(), df.std()), axis=1, ignore_index=False).rename(
columns={0: "mean", 1: "std"}
)
)
if __name__ == "__main__":
args = _parse_args(sys.argv[1:])
rng = np.random.RandomState(args.random_seed)
N = args.number_games
MAX_INNING = args.max_innings
team_walk = Team(success_prob=0.4, runs_offset=3, rng=None)
team_hr = Team(success_prob=0.1, runs_offset=0, rng=None)
team_double = Team(success_prob=0.2, runs_offset=1, rng=None)
fmt_str = "*****\n** A: {} - B: {}\n*****"
print(fmt_str.format("walk", "double"))
print_result(team_walk, team_double, rng, N)
print(fmt_str.format("walk", "hr"))
print_result(team_walk, team_hr, rng, N)
print(fmt_str.format("double", "hr"))
print_result(team_double, team_hr, rng, N)
| [
"argparse.ArgumentParser",
"numpy.random.RandomState"
] | [((1036, 1061), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1059, 1061), False, 'import argparse\n'), ((2546, 2585), 'numpy.random.RandomState', 'np.random.RandomState', (['args.random_seed'], {}), '(args.random_seed)\n', (2567, 2585), True, 'import numpy as np\n')] |
from typing import Union
import numpy as np
def sigmoid_der(y: Union[int, float, np.array]) -> Union[int, float, np.array]:
return np.multiply(np.subtract(1.0, y), y)
def tanh_derivative(y: Union[int, float, np.array]) -> Union[int, float, np.array]:
return np.subtract(1.0, np.square(y))
def sigmoid(x: Union[int, float, np.array]) -> Union[int, float, np.array]:
return np.divide(1.0, np.add(1.0, np.exp(np.negative(x))))
def relu(x: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
if isinstance(x, np.ndarray):
y = x.copy()
y[x < 0] = 0.0
return y
elif isinstance(x, float):
return x if x > 0.0 else 0.0
else:
return x if x > 0 else 0
def relu_derivative(y: Union[int, float, np.ndarray]) -> Union[int, float, np.ndarray]:
if isinstance(y, np.ndarray):
dx = y.copy()
dx[y > 0] = 1.0
return y
elif isinstance(y, float):
return 1.0 if y > 0.0 else 0.0
else:
return 1 if y > 0 else 0
class ActivationFunctions:
TANH = np.tanh
SIGMOID = sigmoid
RELU = relu
class ActivationFunctionsDerivatives:
TANH_DERIVATIVE = tanh_derivative
SIGMOID_DERIVATIVE = sigmoid_der
RELU_DERIVATIVE = relu_derivative
| [
"numpy.negative",
"numpy.subtract",
"numpy.square"
] | [((149, 168), 'numpy.subtract', 'np.subtract', (['(1.0)', 'y'], {}), '(1.0, y)\n', (160, 168), True, 'import numpy as np\n'), ((287, 299), 'numpy.square', 'np.square', (['y'], {}), '(y)\n', (296, 299), True, 'import numpy as np\n'), ((424, 438), 'numpy.negative', 'np.negative', (['x'], {}), '(x)\n', (435, 438), True, 'import numpy as np\n')] |
# Copyright 2016-2021 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom Callbacks for DeepCell"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import timeit
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
class InferenceTimer(tf.keras.callbacks.Callback):
"""Callback to log inference speed per epoch."""
def __init__(self, samples=100):
super(InferenceTimer, self).__init__()
self._samples = int(samples)
self._batch_times = []
self._samples_seen = []
self._timer = None
def on_predict_begin(self, epoch, logs=None):
self._batch_times = []
self._samples_seen = []
def on_predict_batch_begin(self, batch, logs=None):
self._timer = timeit.default_timer()
def on_predict_batch_end(self, batch, logs=None):
t = timeit.default_timer() - self._timer
self._batch_times.append(t)
outputs = logs.get('outputs', np.empty((1,)))
if isinstance(self.model.output_shape, list):
outputs = outputs[0]
self._samples_seen.append(outputs.shape[0])
def on_predict_end(self, logs=None):
total_samples = np.sum(self._samples_seen)
per_sample = [t / float(s) for t, s in
zip(self._batch_times, self._samples_seen)]
avg = np.mean(per_sample)
std = np.std(per_sample)
print('Average inference speed per sample for %s total samples: '
'%0.5fs ± %0.5fs.' % (total_samples, avg, std))
def on_epoch_end(self, epoch, logs=None):
shape = tuple([self._samples] + list(self.model.input_shape[1:]))
test_batch = np.random.random(shape)
self.model.predict(test_batch, callbacks=self)
| [
"numpy.mean",
"numpy.random.random",
"timeit.default_timer",
"numpy.sum",
"numpy.empty",
"numpy.std"
] | [((1988, 2010), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2008, 2010), False, 'import timeit\n'), ((2410, 2436), 'numpy.sum', 'np.sum', (['self._samples_seen'], {}), '(self._samples_seen)\n', (2416, 2436), True, 'import numpy as np\n'), ((2566, 2585), 'numpy.mean', 'np.mean', (['per_sample'], {}), '(per_sample)\n', (2573, 2585), True, 'import numpy as np\n'), ((2600, 2618), 'numpy.std', 'np.std', (['per_sample'], {}), '(per_sample)\n', (2606, 2618), True, 'import numpy as np\n'), ((2898, 2921), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (2914, 2921), True, 'import numpy as np\n'), ((2078, 2100), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2098, 2100), False, 'import timeit\n'), ((2189, 2203), 'numpy.empty', 'np.empty', (['(1,)'], {}), '((1,))\n', (2197, 2203), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" Example script to show QTT capabilities
@author: eendebakpt
"""
#%% Load packages
from imp import reload
import numpy as np
import matplotlib.pyplot as plt
import tempfile
from collections import OrderedDict
import qcodes
from qcodes import MatPlot
import qtt
from qtt.gui.parameterviewer import createParameterWidget
from qtt.algorithms.gatesweep import analyseGateSweep
from qtt.measurements.scans import scanjob_t
from qtt.instrument_drivers.virtual_gates import virtual_gates
from qtt import save_state, load_state
import qtt.measurements.videomode
import qtt.simulation.virtual_dot_array
datadir = tempfile.mkdtemp(prefix='qtt_example')
qcodes.DataSet.default_io = qcodes.DiskIO(datadir)
#%% Create a virtual model for testing
#
# The model resembles the spin-qubit dot setup. The hardware consists of a virtual
# keithley, IVVI racks and a virtual gates object
nr_dots = 3
station = qtt.simulation.virtual_dot_array.initialize(reinit=True, nr_dots=nr_dots, maxelectrons=2)
keithley1 = station.keithley1
keithley3 = station.keithley3
# virtual gates for the model
gates = station.gates
model = station.model
#%% Setup measurement windows
mwindows = qtt.gui.live_plotting.setupMeasurementWindows(station, create_parameter_widget=False)
pv = createParameterWidget([gates, ])
logviewer = qtt.gui.dataviewer.DataViewer()
logviewer.show()
#%% Read out instruments
print('value: %f' % keithley3.readnext())
snapshotdata = station.snapshot()
#%% Simple 1D scan loop
param_left=station.model.bottomgates[0]
param_right=station.model.bottomgates[-1]
scanjob = scanjob_t({'sweepdata': dict({'param': param_right, 'start': -500, 'end': 1, 'step': .8, 'wait_time': 3e-3}), 'minstrument': ['keithley3.amplitude']})
data1d = qtt.measurements.scans.scan1D(station, scanjob, location=None, verbose=1)
#%% Save the current state of the system to disk
save_state(station)
#%% Print the scanned data
print(data1d.default_parameter_name())
#%% Make a 2D scan
start = -500
scanjob = scanjob_t()
scanjob.add_sweep(param_right, start=start, end=start+400, step=4., wait_time=0.)
scanjob.add_sweep(param_left, start=start, end=start+400, step=5)
scanjob.add_minstrument(['keithley1.amplitude'])
data = qtt.measurements.scans.scan2D(station, scanjob)
gates.set(param_right, -300); gates.set(param_left, -300)
gv=gates.allvalues()
#%% Fit 1D pinch-off scan:
adata = analyseGateSweep(data1d, fig=100)
#%% Fit 2D cross
try:
from projects.autotune4dot.autotuning import analyse2dot
qtt.measurements.scans.plotData(data, fig=30)
pt, resultsfine = analyse2dot(data, fig=300, efig=400, istep=1, verbose=2)
except:
pass
#%% Make virtual gates
np.set_printoptions(precision=2, suppress=True)
crosscap_map = OrderedDict((
('VP1', OrderedDict((('P1', 1), ('P2', 0.56), ('P3', 0.15)))),
('VP2', OrderedDict((('P1', 0.62), ('P2', 1), ('P3', 0.593)))),
('VP3', OrderedDict((('P1', 0.14), ('P2', 0.62), ('P3', 1))))
))
virts = virtual_gates(qtt.measurements.scans.instrumentName('vgates'), gates, crosscap_map)
virts.print_matrix()
gates.resetgates(gv, gv, verbose=0)
virts.VP2.set(-60)
cc1= virts.VP1()
cc2=virts.VP2()
r=80
scanjob = scanjob_t({'sweepdata': dict({'param': virts.VP1, 'start': cc1-100, 'end': cc1 + 100, 'step': 4.}), 'minstrument': ['keithley1.amplitude'], 'wait_time': 0.})
scanjob['stepdata'] = dict({'param': virts.VP2, 'start': cc2 - r, 'end': cc2 +r, 'step': 2.})
data = qtt.measurements.scans.scan2D(station, scanjob)
gates.resetgates(gv, gv, verbose=0)
print('virtual and physical gates: ' + ','.join( '%.2f' % x for x in [virts.VP1(),virts.VP2(),virts.VP3(), gates.P1(), gates.P2(), gates.P3() ]) )
vgates=['vSD1b'] + virts.vgates() + ['vSD1a']
pgates=['SD1b'] + virts.pgates() + ['SD1a']
virts2= qtt.instrument_drivers.virtual_gates.extend_virtual_gates(vgates, pgates, virts, name='vgates')
#%% Send data to powerpoint
print('add copy data to Powerpoint use the following:')
print(' qtt.utilities.tools.addPPT_dataset(data);')
if 0:
qtt.utilities.tools.addPPT_dataset(data)
#%% Test objects
qtt.instrument_drivers.virtual_gates.test_virtual_gates()
qtt.measurements.scans.test_scan2D()
#%% Start videomode
digitizer=station.sdigitizer
station.awg=station.vawg
print('starting videomode in background...')
gates.P3.increment(40)
vm = qtt.measurements.videomode.VideoMode(station, ['P1', 'P2'], [160]*2,
minstrument=(digitizer.name,[1,1]), resolution = [96,96],
diff_dir=[None, 'g'], name='physical gates' )
vm.crosshair(True)
vm.stopreadout()
vm.updatebg()
#%%
#gates.P3.increment(-40)
s1=qtt.measurements.scans.create_vectorscan(virts.VP1, 160)
s2=qtt.measurements.scans.create_vectorscan(virts.VP2, 160)
vm = qtt.measurements.videomode.VideoMode(station, {'gates_horz': s1['param'],'gates_vert': s2['param']}, [200,180],
minstrument=(digitizer.name,[1,1]), resolution = [96,96],
diff_dir=[None, 'g'], name='virtual gates' )
vm.crosshair(True)
vm.stopreadout()
vm.updatebg() | [
"qtt.gui.parameterviewer.createParameterWidget",
"projects.autotune4dot.autotuning.analyse2dot",
"qtt.save_state",
"qtt.simulation.virtual_dot_array.initialize",
"qtt.utilities.tools.addPPT_dataset",
"qtt.instrument_drivers.virtual_gates.test_virtual_gates",
"qtt.measurements.scans.create_vectorscan",
... | [((637, 675), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""qtt_example"""'}), "(prefix='qtt_example')\n", (653, 675), False, 'import tempfile\n'), ((704, 726), 'qcodes.DiskIO', 'qcodes.DiskIO', (['datadir'], {}), '(datadir)\n', (717, 726), False, 'import qcodes\n'), ((926, 1019), 'qtt.simulation.virtual_dot_array.initialize', 'qtt.simulation.virtual_dot_array.initialize', ([], {'reinit': '(True)', 'nr_dots': 'nr_dots', 'maxelectrons': '(2)'}), '(reinit=True, nr_dots=nr_dots,\n maxelectrons=2)\n', (969, 1019), False, 'import qtt\n'), ((1196, 1285), 'qtt.gui.live_plotting.setupMeasurementWindows', 'qtt.gui.live_plotting.setupMeasurementWindows', (['station'], {'create_parameter_widget': '(False)'}), '(station,\n create_parameter_widget=False)\n', (1241, 1285), False, 'import qtt\n'), ((1287, 1317), 'qtt.gui.parameterviewer.createParameterWidget', 'createParameterWidget', (['[gates]'], {}), '([gates])\n', (1308, 1317), False, 'from qtt.gui.parameterviewer import createParameterWidget\n'), ((1333, 1364), 'qtt.gui.dataviewer.DataViewer', 'qtt.gui.dataviewer.DataViewer', ([], {}), '()\n', (1362, 1364), False, 'import qtt\n'), ((1763, 1836), 'qtt.measurements.scans.scan1D', 'qtt.measurements.scans.scan1D', (['station', 'scanjob'], {'location': 'None', 'verbose': '(1)'}), '(station, scanjob, location=None, verbose=1)\n', (1792, 1836), False, 'import qtt\n'), ((1889, 1908), 'qtt.save_state', 'save_state', (['station'], {}), '(station)\n', (1899, 1908), False, 'from qtt import save_state, load_state\n'), ((2020, 2031), 'qtt.measurements.scans.scanjob_t', 'scanjob_t', ([], {}), '()\n', (2029, 2031), False, 'from qtt.measurements.scans import scanjob_t\n'), ((2236, 2283), 'qtt.measurements.scans.scan2D', 'qtt.measurements.scans.scan2D', (['station', 'scanjob'], {}), '(station, scanjob)\n', (2265, 2283), False, 'import qtt\n'), ((2401, 2434), 'qtt.algorithms.gatesweep.analyseGateSweep', 'analyseGateSweep', (['data1d'], {'fig': '(100)'}), '(data1d, fig=100)\n', (2417, 2434), False, 'from qtt.algorithms.gatesweep import analyseGateSweep\n'), ((2698, 2745), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)', 'suppress': '(True)'}), '(precision=2, suppress=True)\n', (2717, 2745), True, 'import numpy as np\n'), ((3446, 3493), 'qtt.measurements.scans.scan2D', 'qtt.measurements.scans.scan2D', (['station', 'scanjob'], {}), '(station, scanjob)\n', (3475, 3493), False, 'import qtt\n'), ((3778, 3877), 'qtt.instrument_drivers.virtual_gates.extend_virtual_gates', 'qtt.instrument_drivers.virtual_gates.extend_virtual_gates', (['vgates', 'pgates', 'virts'], {'name': '"""vgates"""'}), "(vgates, pgates,\n virts, name='vgates')\n", (3835, 3877), False, 'import qtt\n'), ((4087, 4144), 'qtt.instrument_drivers.virtual_gates.test_virtual_gates', 'qtt.instrument_drivers.virtual_gates.test_virtual_gates', ([], {}), '()\n', (4142, 4144), False, 'import qtt\n'), ((4145, 4181), 'qtt.measurements.scans.test_scan2D', 'qtt.measurements.scans.test_scan2D', ([], {}), '()\n', (4179, 4181), False, 'import qtt\n'), ((4333, 4516), 'qtt.measurements.videomode.VideoMode', 'qtt.measurements.videomode.VideoMode', (['station', "['P1', 'P2']", '([160] * 2)'], {'minstrument': '(digitizer.name, [1, 1])', 'resolution': '[96, 96]', 'diff_dir': "[None, 'g']", 'name': '"""physical gates"""'}), "(station, ['P1', 'P2'], [160] * 2,\n minstrument=(digitizer.name, [1, 1]), resolution=[96, 96], diff_dir=[\n None, 'g'], name='physical gates')\n", (4369, 4516), False, 'import qtt\n'), ((4635, 4691), 'qtt.measurements.scans.create_vectorscan', 'qtt.measurements.scans.create_vectorscan', (['virts.VP1', '(160)'], {}), '(virts.VP1, 160)\n', (4675, 4691), False, 'import qtt\n'), ((4695, 4751), 'qtt.measurements.scans.create_vectorscan', 'qtt.measurements.scans.create_vectorscan', (['virts.VP2', '(160)'], {}), '(virts.VP2, 160)\n', (4735, 4751), False, 'import qtt\n'), ((4757, 4981), 'qtt.measurements.videomode.VideoMode', 'qtt.measurements.videomode.VideoMode', (['station', "{'gates_horz': s1['param'], 'gates_vert': s2['param']}", '[200, 180]'], {'minstrument': '(digitizer.name, [1, 1])', 'resolution': '[96, 96]', 'diff_dir': "[None, 'g']", 'name': '"""virtual gates"""'}), "(station, {'gates_horz': s1['param'],\n 'gates_vert': s2['param']}, [200, 180], minstrument=(digitizer.name, [1,\n 1]), resolution=[96, 96], diff_dir=[None, 'g'], name='virtual gates')\n", (4793, 4981), False, 'import qtt\n'), ((2523, 2568), 'qtt.measurements.scans.plotData', 'qtt.measurements.scans.plotData', (['data'], {'fig': '(30)'}), '(data, fig=30)\n', (2554, 2568), False, 'import qtt\n'), ((2596, 2652), 'projects.autotune4dot.autotuning.analyse2dot', 'analyse2dot', (['data'], {'fig': '(300)', 'efig': '(400)', 'istep': '(1)', 'verbose': '(2)'}), '(data, fig=300, efig=400, istep=1, verbose=2)\n', (2607, 2652), False, 'from projects.autotune4dot.autotuning import analyse2dot\n'), ((2990, 3037), 'qtt.measurements.scans.instrumentName', 'qtt.measurements.scans.instrumentName', (['"""vgates"""'], {}), "('vgates')\n", (3027, 3037), False, 'import qtt\n'), ((4027, 4067), 'qtt.utilities.tools.addPPT_dataset', 'qtt.utilities.tools.addPPT_dataset', (['data'], {}), '(data)\n', (4061, 4067), False, 'import qtt\n'), ((2784, 2836), 'collections.OrderedDict', 'OrderedDict', (["(('P1', 1), ('P2', 0.56), ('P3', 0.15))"], {}), "((('P1', 1), ('P2', 0.56), ('P3', 0.15)))\n", (2795, 2836), False, 'from collections import OrderedDict\n'), ((2847, 2900), 'collections.OrderedDict', 'OrderedDict', (["(('P1', 0.62), ('P2', 1), ('P3', 0.593))"], {}), "((('P1', 0.62), ('P2', 1), ('P3', 0.593)))\n", (2858, 2900), False, 'from collections import OrderedDict\n'), ((2911, 2963), 'collections.OrderedDict', 'OrderedDict', (["(('P1', 0.14), ('P2', 0.62), ('P3', 1))"], {}), "((('P1', 0.14), ('P2', 0.62), ('P3', 1)))\n", (2922, 2963), False, 'from collections import OrderedDict\n')] |
""" Useful neuroimaging coordinate map makers and utilities """
import numpy as np
from .coordinate_system import CoordSysMaker
from .coordinate_map import CoordMapMaker
from ..transforms.affines import from_matrix_vector
scanner_names = ['scanner-' + label for label in 'xyz'] + ['t']
mni_names = ['mni-' + label for label in 'xyz'] + ['t']
talairach_names = ['talairach-' + label for label in 'xyz'] + ['t']
# Some standard coordinate system makers
voxel_cs = CoordSysMaker('ijkl', 'array')
scanner_cs = CoordSysMaker(scanner_names, 'scanner')
mni_cs = CoordSysMaker(mni_names, 'mni')
talairach_cs = CoordSysMaker(talairach_names, 'talairach')
# Standard coordinate map makers
vox2scanner = CoordMapMaker(voxel_cs, scanner_cs)
vox2mni = CoordMapMaker(voxel_cs, mni_cs)
vox2talairach = CoordMapMaker(voxel_cs, talairach_cs)
# Register these xyzs as known
known_names = {}
for _rcs in (scanner_names, mni_names, talairach_names):
for _name, _coord in zip(_rcs[:3], 'xyz'):
known_names[_name] = _coord
class SpaceError(Exception):
pass
class SpaceTypeError(SpaceError):
pass
class AxesError(SpaceError):
pass
class AffineError(SpaceError):
pass
def xyz_affine(coordmap, name2xyz=None):
""" Return voxel to XYZ affine for `coordmap`
Parameters
----------
coordmap : ``CoordinateMap`` instance
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Returns
-------
xyz_aff : (4,4) array
voxel to X, Y, Z affine mapping
Raises
------
SpaceTypeError : if this is not an affine coordinate map
AxesError : if not all of x, y, z recognized in `coordmap` range
AffineError : if axes dropped from the affine contribute to x, y, z
coordinates
Examples
--------
>>> cmap = vox2mni(np.diag([2,3,4,5,1]))
>>> cmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='array', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> xyz_affine(cmap)
array([[ 2., 0., 0., 0.],
[ 0., 3., 0., 0.],
[ 0., 0., 4., 0.],
[ 0., 0., 0., 1.]])
"""
if name2xyz is None:
name2xyz = known_names
try:
affine = coordmap.affine
except AttributeError:
raise SpaceTypeError('Need affine coordinate map')
order = xyz_order(coordmap.function_range, name2xyz)
affine = affine[order[:3]]
# Check that dropped dimensions don't provide xyz coordinate info
extra_cols = affine[:,3:-1]
if not np.allclose(extra_cols, 0):
raise AffineError('Dropped dimensions not orthogonal to xyz')
return from_matrix_vector(affine[:3,:3], affine[:3,-1])
def xyz_order(coordsys, name2xyz=None):
""" Vector of orders for sorting coordsys axes in xyz first order
Parameters
----------
coordsys : ``CoordinateSystem`` instance
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Returns
-------
xyz_order : list
Ordering of axes to get xyz first ordering. See the examples.
Raises
------
AxesError : if there are not all of x, y and z axes
Examples
--------
>>> from nipy.core.api import CoordinateSystem
>>> xyzt_cs = mni_cs(4) # coordsys with t (time) last
>>> xyzt_cs
CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64)
>>> xyz_order(xyzt_cs)
[0, 1, 2, 3]
>>> tzyx_cs = CoordinateSystem(xyzt_cs.coord_names[::-1], 'reversed')
>>> tzyx_cs
CoordinateSystem(coord_names=('t', 'mni-z', 'mni-y', 'mni-x'), name='reversed', coord_dtype=float64)
>>> xyz_order(tzyx_cs)
[3, 2, 1, 0]
"""
if name2xyz is None:
name2xyz = known_names
names = coordsys.coord_names
N = len(names)
axvals = np.zeros(N, dtype=int)
for i, name in enumerate(names):
try:
xyz_char = name2xyz[name]
except KeyError:
axvals[i] = N+i
else:
axvals[i] = 'xyz'.index(xyz_char)
if not set(axvals).issuperset(range(3)):
raise AxesError("Not all of x, y, z recognized in coordinate map")
return list(np.argsort(axvals))
def is_xyz_affable(coordmap, name2xyz=None):
""" Return True if the coordap has an xyz affine
Parameters
----------
coordmap : ``CoordinateMap`` instance
Coordinate map to test
name2xyz : None or mapping
Object such that ``name2xyz[ax_name]`` returns 'x', or 'y' or 'z' or
raises a KeyError for a str ``ax_name``. None means use module default.
Returns
-------
tf : bool
True if `coordmap` has an xyz affine, False otherwise
Examples
--------
>>> cmap = vox2mni(np.diag([2,3,4,5,1]))
>>> cmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('i', 'j', 'k', 'l'), name='array', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64),
affine=array([[ 2., 0., 0., 0., 0.],
[ 0., 3., 0., 0., 0.],
[ 0., 0., 4., 0., 0.],
[ 0., 0., 0., 5., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(cmap)
True
>>> time0_cmap = cmap.reordered_domain([3,0,1,2])
>>> time0_cmap
AffineTransform(
function_domain=CoordinateSystem(coord_names=('l', 'i', 'j', 'k'), name='array', coord_dtype=float64),
function_range=CoordinateSystem(coord_names=('mni-x', 'mni-y', 'mni-z', 't'), name='mni', coord_dtype=float64),
affine=array([[ 0., 2., 0., 0., 0.],
[ 0., 0., 3., 0., 0.],
[ 0., 0., 0., 4., 0.],
[ 5., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1.]])
)
>>> is_xyz_affable(time0_cmap)
False
"""
try:
xyz_affine(coordmap, name2xyz)
except SpaceError:
return False
return True
| [
"numpy.argsort",
"numpy.zeros",
"numpy.allclose"
] | [((4397, 4419), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'int'}), '(N, dtype=int)\n', (4405, 4419), True, 'import numpy as np\n'), ((3002, 3028), 'numpy.allclose', 'np.allclose', (['extra_cols', '(0)'], {}), '(extra_cols, 0)\n', (3013, 3028), True, 'import numpy as np\n'), ((4757, 4775), 'numpy.argsort', 'np.argsort', (['axvals'], {}), '(axvals)\n', (4767, 4775), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
from abc import ABC, abstractmethod
import pandas as pd
import json
import bdmodule.bd_utils as bd_utils
#from module.data_loading_fromlocal import read_turkprob
class UserSimulator(ABC):
@abstractmethod
def answer(self ):
pass
class NoisyUser(UserSimulator):
def __init__(self, args):
print('using oracle based user. ')
self.uncertainty = args['user_uncertain']
def answer(self, best_ft, batch, mode='test'):
self.qr_fact = batch[1]
answer = []
for i in range(len(best_ft)):
a = self.qr_fact[i, best_ft[i]]
if np.random.binomial(1, self.uncertainty):
#print('lying')
a = 1 if np.random.binomial(1, 0.5) else 0
answer.append(a)
return answer
class Singleexample_User(UserSimulator):
def __init__(self, args):
print('using oracle based user. ')
self.uncertainty = args['user_uncertain']
label2att, att2label = bd_utils.att2label()
if args['using_categorical']:
binary_idx, categorical, tag_new2old, tag_old2new = bd_utils.parse_cat_binary()
self.label2att = {lb:[tag_old2new[i] for i in label2att[lb]] for lb in label2att}
self.att2label = [att2label[tag_new2old[new]] for new in tag_new2old]
else:
self.label2att, self.att2label = label2att, att2label
self.labellist = list(self.label2att.keys())
def answer_label(self, best_ft, batch, mode='test'):
self.qr_fact = batch[1]
answer = []
answered_ft = []
for i in range(len(best_ft)):
ft = best_ft[i]
if ft in self.qr_fact[i]:
answer.append(1)
answered_ft.append([ft])
continue
label = self.labellist[self.att2label[ft]]
related_att = self.label2att[label]
a=0
for att in related_att :
a += int( att in self.qr_fact[i])
if a==0: ## Nothing in the same category
answer.append(-1)
answered_ft.append(related_att)
else: ## somthing in the category and the one asked is wrong
answer.append(0)
answered_ft.append([ft])
return answer, answered_ft
def answer_label_cat(self, best_ft, batch, aa, mode='test'):
self.qr_fact = batch[1]
self.tgts = batch[2].cpu().numpy()
answer = []
answered_ft = []
for i in range(len(best_ft)):
ft = best_ft[i]
if ft < aa.nq_bi:
if ft in self.qr_fact[i]:
answer.append(1)
answered_ft.append([ft])
continue
label = self.labellist[self.att2label[ft]]
related_att = self.label2att[label]
a=0
for att in related_att :
a += int( att in self.qr_fact[i])
if a==0:
answer.append(-1)
answered_ft.append(related_att)
else:
answer.append(0)
answered_ft.append([ft])
else:
cat = aa.categorical[ ft-aa.nq_bi ]['idx']
overlap = set(cat) & set(self.qr_fact[i])
if len(overlap) ==0:
a = -1
else:
a = np.random.choice(list(overlap))
answer.append(a)
answered_ft.append([ft])
assert len(answer) == len(answered_ft)
return answer, answered_ft
def answer(self, best_ft, batch, mode='test'):
self.qr_fact = batch[1]
answer = []
answered_ft = []
for i in range(len(best_ft)):
ft = best_ft[i]
if ft in self.qr_fact[i]:
a = 1
else:
a =0
answer.append(a)
return answer
# class PersonaUser(UserSimulator):
# def __init__(self, aa, args):
# print('using persona based user. ')
# self.args = args
# self.lamda = 1 #args['user_lamda']
# self.uncertainty = args['user_uncertain']
# self.init_user_fromprob(aa)
# #self.init_user_data(aa)
# def init_user_fromprob(self, aa):
# faq_probs = read_turkprob('full/')
# #faq_probs = read_turkprob('sampled/sampled_')
# prob_weight = [1, 1, 1, 1, 1]
# query = aa.queryfile
# datarecords= query.to_dict('records')
# fq_tag_user = np.zeros(aa.gold_table.shape)
# for i in range(len( datarecords)):
# dr = datarecords[i]
# tgttext = dr['faqtext'] if 'faqtext' in dr else dr['faq_original']
# tgt_ids = aa.faqs.index(tgttext)
# if i != tgt_ids:
# print(i)
# faqid = str(dr['faq_id'])
# labeled = [int(faqid in fp) for fp in faq_probs]
# if not 0 in labeled:
# for i in range(len(faq_probs)):
# probdict = faq_probs[i][faqid]
# for tg in probdict.keys():
# if tg in aa.tag_w2i:
# fq_tag_user[tgt_ids, aa.tag_w2i[tg]] = probdict[tg]* prob_weight[i]
# #fq_tag_user[tgt_ids, aa.tag_w2i[tg]] = int(probdict[tg] >=0.4)
# else:
# print('no data')
# taglist = dr['taglist']
# for tg in taglist:
# if tg in aa.tag_w2i:
# fq_tag_user[tgt_ids, aa.tag_w2i[tg]] = 1
# goldtable = aa.gold_table
# self.qtag_belief = self.lamda *fq_tag_user + (1- self.lamda )*goldtable
# def answer(self, best_ft, batch, mode='test'):
# self.tgts = batch[2].cpu().numpy()
# answer = []
# for i in range(len(best_ft)):
# pa = min(1, self.qtag_belief[ self.tgts[i], best_ft[i]])
# a = 1 if np.random.binomial(1, pa) else 0
# '''
# if mode == 'train':
# a = int(pa >0.4)
# else:
# a = 1 if np.random.binomial(1, pa) else 0
# '''
# #if np.random.binomial(1, self.uncertainty):
# # #print('lying')
# # a = 1 if np.random.binomial(1, 0.5) else 0
# answer.append(a)
# return answer
# # def readfold_cvs(self, path ):
# # #fold = self.args['cv_n']
# # #path = 'paf_anno_result/tag_anno_{}_result.csv'.format(fold)
# # tagfile = pd.read_csv(path)
# # tagfile = tagfile.drop(['HITId', 'HITTypeId', 'Title', 'Description', 'Keywords', 'Reward',
# # 'CreationTime', 'MaxAssignments', 'RequesterAnnotation',
# # 'AssignmentDurationInSeconds', 'AutoApprovalDelayInSeconds',
# # 'Expiration', 'NumberOfSimilarHITs', 'LifetimeInSeconds',
# # 'AssignmentId', 'WorkerId', 'AssignmentStatus', 'AcceptTime',
# # 'SubmitTime', 'AutoApprovalTime', 'ApprovalTime', 'RejectionTime',
# # 'RequesterFeedback', 'WorkTimeInSeconds', 'LifetimeApprovalRate',
# # 'Last30DaysApprovalRate', 'Last7DaysApprovalRate', ], 1)
# # tagfile['pos_tag'] = tagfile.apply(lambda x: [x['Input.'+key] if key!='na' else None for key in x['Answer.faq'].split('|')], 1)
# # tags= tagfile.groupby(['Input.faq_id']).apply(lambda x: [tag for cnt in x['pos_tag'].tolist() for tag in cnt]).reset_index()
# # tags.rename(columns = {0:'all_pos_tag'}, inplace = True)
# # turk_cnt = tagfile.groupby(['Input.faq_id']).apply(lambda x: len(x)).reset_index()
# # turk_cnt.rename(columns = {0:'turk_cnt'}, inplace = True)
# # tags = tags.join(turk_cnt.set_index('Input.faq_id'), on='Input.faq_id', how='outer')
# # tags = tags.rename(columns={'Input.faq_id':'faq_id'})
# # return tags
# # def init_user_data(self, aa):
# # #================Reading and merging files ================
# # alltags = []
# # for i in range(5):
# # path = 'paf_anno_result/tag_anno_{}_result.csv'.format(str(i))
# # alltags.append(self.readfold(path))
# # tags_0_5 = pd.concat(alltags)
# # alltags = []
# # for i in range(5):
# # path = 'paf_anno_result/tag_from5_to15_file{}_result.csv'.format(str(i))
# # #print(path)
# # alltags.append(self.readfold(path))
# # tags_5_15 = pd.concat(alltags)
# # tag0_15 = tags_0_5.join(tags_5_15.set_index('faq_id'), on='faq_id',lsuffix='_5', rsuffix='_15', how='outer')
# # tag0_15['all_pos_tag'] = tag0_15['all_pos_tag_5'] +tag0_15['all_pos_tag_15']
# # tag0_15['turk_cnt'] = (tag0_15['turk_cnt_5'] +tag0_15['turk_cnt_15'])/2
# # tags=tag0_15
# # #================Build the belief table ================
# # data_test = aa.queryfile
# # test_tag = data_test.join(tags.set_index('faq_id'), on='faq_id', how='outer').replace(np.nan, 0, regex=True)
# # test_tag_record = test_tag.to_dict('records')
# # aa_to_user_idx={}
# # fq_tag=[]
# # tgt_in_aa = []
# # for i in range(len( test_tag_record )):
# # dr = test_tag_record[i]
# # tgttext = dr['faqtext'] if 'faqtext' in dr else dr['faq_original']
# # tgt_ids = aa.faqs.index(tgttext)
# # tgt_in_aa.append(tgt_ids)
# # aa_to_user_idx[tgt_ids] = i
# # tag_cnt = [0]*len(aa.tag_w2i)
# # turk_cnt = dr['turk_cnt']
# # dr['all_pos_tag'] = [] if dr['all_pos_tag']== 0 else dr['all_pos_tag']
# # while turk_cnt<3:
# # dr['all_pos_tag'] += dr['taglist']
# # turk_cnt +=1
# # print('for faq :{}, get tags from gold table {}'.format(dr['faq_id'], dr['taglist']))
# # for tg in dr['all_pos_tag']:
# # if tg in aa.tag_w2i:
# # tag_cnt[ aa.tag_w2i[tg]] = min(3, tag_cnt[ aa.tag_w2i[tg]] +1)
# # fq_tag.append(tag_cnt)
# # fq_tag = np.array(fq_tag)/3
# # goldtable = aa.gold_table[tgt_in_aa]
# # self.aa_to_user_idx = aa_to_user_idx
# # self.qtag_belief = self.lamda *fq_tag + (1- self.lamda )*goldtable
| [
"bdmodule.bd_utils.att2label",
"numpy.random.binomial",
"bdmodule.bd_utils.parse_cat_binary"
] | [((1040, 1060), 'bdmodule.bd_utils.att2label', 'bd_utils.att2label', ([], {}), '()\n', (1058, 1060), True, 'import bdmodule.bd_utils as bd_utils\n'), ((658, 697), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'self.uncertainty'], {}), '(1, self.uncertainty)\n', (676, 697), True, 'import numpy as np\n'), ((1164, 1191), 'bdmodule.bd_utils.parse_cat_binary', 'bd_utils.parse_cat_binary', ([], {}), '()\n', (1189, 1191), True, 'import bdmodule.bd_utils as bd_utils\n'), ((757, 783), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (775, 783), True, 'import numpy as np\n')] |
"""
.. module:: test_penalty
:synopsis: Test constrained optimization strategy
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from pySOT import Keane, SyncStrategyPenalty, RBFInterpolant, \
CubicKernel, LinearTail, SymmetricLatinHypercube, CandidateDYCORS
from poap.controller import ThreadController, BasicWorkerThread
import numpy as np
import os.path
import logging
def main():
if not os.path.exists("./logfiles"):
os.makedirs("logfiles")
if os.path.exists("./logfiles/test_penalty.log"):
os.remove("./logfiles/test_penalty.log")
logging.basicConfig(filename="./logfiles/test_penalty.log",
level=logging.INFO)
print("\nNumber of threads: 4")
print("Maximum number of evaluations: 500")
print("Sampling method: CandidateDYCORS")
print("Experimental design: Symmetric Latin Hypercube")
print("Surrogate: Cubic RBF")
nthreads = 4
maxeval = 500
penalty = 1e6
nsamples = nthreads
data = Keane(dim=10)
print(data.info)
# Create a strategy and a controller
controller = ThreadController()
controller.strategy = \
SyncStrategyPenalty(
worker_id=0, data=data,
maxeval=maxeval, nsamples=nsamples,
response_surface=RBFInterpolant(kernel=CubicKernel, tail=LinearTail, maxp=maxeval),
exp_design=SymmetricLatinHypercube(dim=data.dim, npts=2*(data.dim+1)),
sampling_method=CandidateDYCORS(data=data, numcand=100*data.dim),
penalty=penalty)
# Launch the threads
for _ in range(nthreads):
worker = BasicWorkerThread(controller, data.objfunction)
controller.launch_worker(worker)
# Use penalty based merit
def feasible_merit(record):
xx = np.zeros((1, record.params[0].shape[0]))
xx[0, :] = record.params[0]
return record.value + controller.strategy.penalty_fun(xx)[0, 0]
result = controller.run(merit=feasible_merit)
best, xbest = result.value, result.params[0]
print('Best value: {0}'.format(best))
print('Best solution: {0}'.format(
np.array_str(xbest, max_line_width=np.inf,
precision=5, suppress_small=True)))
print('Feasible: {0}\n'.format(np.max(data.eval_ineq_constraints(xbest)) <= 0.0))
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"poap.controller.BasicWorkerThread",
"pySOT.SymmetricLatinHypercube",
"poap.controller.ThreadController",
"pySOT.Keane",
"numpy.array_str",
"pySOT.CandidateDYCORS",
"numpy.zeros",
"pySOT.RBFInterpolant"
] | [((562, 641), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./logfiles/test_penalty.log"""', 'level': 'logging.INFO'}), "(filename='./logfiles/test_penalty.log', level=logging.INFO)\n", (581, 641), False, 'import logging\n'), ((981, 994), 'pySOT.Keane', 'Keane', ([], {'dim': '(10)'}), '(dim=10)\n', (986, 994), False, 'from pySOT import Keane, SyncStrategyPenalty, RBFInterpolant, CubicKernel, LinearTail, SymmetricLatinHypercube, CandidateDYCORS\n'), ((1075, 1093), 'poap.controller.ThreadController', 'ThreadController', ([], {}), '()\n', (1091, 1093), False, 'from poap.controller import ThreadController, BasicWorkerThread\n'), ((1594, 1641), 'poap.controller.BasicWorkerThread', 'BasicWorkerThread', (['controller', 'data.objfunction'], {}), '(controller, data.objfunction)\n', (1611, 1641), False, 'from poap.controller import ThreadController, BasicWorkerThread\n'), ((1759, 1799), 'numpy.zeros', 'np.zeros', (['(1, record.params[0].shape[0])'], {}), '((1, record.params[0].shape[0]))\n', (1767, 1799), True, 'import numpy as np\n'), ((1264, 1329), 'pySOT.RBFInterpolant', 'RBFInterpolant', ([], {'kernel': 'CubicKernel', 'tail': 'LinearTail', 'maxp': 'maxeval'}), '(kernel=CubicKernel, tail=LinearTail, maxp=maxeval)\n', (1278, 1329), False, 'from pySOT import Keane, SyncStrategyPenalty, RBFInterpolant, CubicKernel, LinearTail, SymmetricLatinHypercube, CandidateDYCORS\n'), ((1354, 1416), 'pySOT.SymmetricLatinHypercube', 'SymmetricLatinHypercube', ([], {'dim': 'data.dim', 'npts': '(2 * (data.dim + 1))'}), '(dim=data.dim, npts=2 * (data.dim + 1))\n', (1377, 1416), False, 'from pySOT import Keane, SyncStrategyPenalty, RBFInterpolant, CubicKernel, LinearTail, SymmetricLatinHypercube, CandidateDYCORS\n'), ((1442, 1492), 'pySOT.CandidateDYCORS', 'CandidateDYCORS', ([], {'data': 'data', 'numcand': '(100 * data.dim)'}), '(data=data, numcand=100 * data.dim)\n', (1457, 1492), False, 'from pySOT import Keane, SyncStrategyPenalty, RBFInterpolant, CubicKernel, LinearTail, SymmetricLatinHypercube, CandidateDYCORS\n'), ((2098, 2174), 'numpy.array_str', 'np.array_str', (['xbest'], {'max_line_width': 'np.inf', 'precision': '(5)', 'suppress_small': '(True)'}), '(xbest, max_line_width=np.inf, precision=5, suppress_small=True)\n', (2110, 2174), True, 'import numpy as np\n')] |
"""
coulomb.py
"""
import numpy as np
def coulomb(grid, Za, Zb):
"""
Calculates the external field from nuclei with charges Za, Zb
"""
v = -1.0/grid.a * ((Za + Zb) * np.cosh(grid.Xr) - (Za - Zb) * np.cos(grid.Xa)) / (
np.cosh(grid.Xr)**2 - np.cos(grid.Xa)**2
)
return v
| [
"numpy.cos",
"numpy.cosh"
] | [((277, 293), 'numpy.cosh', 'np.cosh', (['grid.Xr'], {}), '(grid.Xr)\n', (284, 293), True, 'import numpy as np\n'), ((299, 314), 'numpy.cos', 'np.cos', (['grid.Xa'], {}), '(grid.Xa)\n', (305, 314), True, 'import numpy as np\n'), ((185, 201), 'numpy.cosh', 'np.cosh', (['grid.Xr'], {}), '(grid.Xr)\n', (192, 201), True, 'import numpy as np\n'), ((216, 231), 'numpy.cos', 'np.cos', (['grid.Xa'], {}), '(grid.Xa)\n', (222, 231), True, 'import numpy as np\n')] |
import os
import json
import csv
import pickle
from torchtext.vocab import GloVe
from sklearn.decomposition import PCA
import numpy as np
from config_attrib_selection import attrib_selection
temp = {}
for k,v in attrib_selection.items():
temp[k.split(" ")[0]] = v
attrib_selection = temp
glove = GloVe(name="42B", dim=300, cache="./.vector_cache")
with open(os.path.join("./", "./senticap/wordform_sentiments.json"), "r") as read_file:
word_sentiments = json.load(read_file)
wordforms_attribs_tsvpath = "./senticap/constraint_wordforms_attribs_exp.tsv"
wordforms_attribs = {}
with open(wordforms_attribs_tsvpath, "r") as wordforms_file:
reader = csv.DictReader(wordforms_file, delimiter="\t", fieldnames=["class_name", "words"])
for row in reader:
word_class = {"counts": 0, "words": {}}
for word in row["words"].split(","):
# Constraint words can be "multi-word" (may have more than one tokens).
# Add all tokens to the vocabulary separately.
word_class["words"][word] = 0
wordforms_attribs[row["class_name"]] = word_class
wordform_list_all = []
for k,v in attrib_selection.items():
word = k.split(" ")[0]
wordform_list_all.append([word, word_sentiments[word][0] - word_sentiments[word][2], v])
wordform_list_selected = [w[0] for w in wordform_list_all if w[2]]
wordform_list_selected_glove = []
for w in wordform_list_selected:
wordform_list_selected_glove.append(glove[w])
wordform_list_selected_glove = np.stack(wordform_list_selected_glove)
wordform_list_all = sorted(wordform_list_all, key=lambda item: item[1], reverse=False)
wordform_list_all = [w[0] for w in wordform_list_all]
wordform_list_all_glove = []
for w in wordform_list_all:
wordform_list_all_glove.append(glove[w])
wordform_list_all_glove = np.stack(wordform_list_all_glove)
wordform_top10_neg = wordform_list_all[:10]
wordform_top10_pos = wordform_list_all[-10:]
wordform_top10_pos_glove = []
wordform_top10_neg_glove = []
for w in wordform_top10_pos:
wordform_top10_pos_glove.append(glove[w])
for w in wordform_top10_neg:
wordform_top10_neg_glove.append(glove[w])
wordform_top10_pos_glove = np.stack(wordform_top10_pos_glove)
wordform_top10_neg_glove = np.stack(wordform_top10_neg_glove)
both = np.concatenate((wordform_top10_pos_glove, wordform_top10_neg_glove), axis=0)
word_list_all = []
word_list_all_glove = []
word_list_selected = []
word_list_selected_glove = []
for k, v in wordforms_attribs.items():
try:
if attrib_selection[k]:
word_list_selected.extend(list(wordforms_attribs[k]["words"].keys()))
word_list_all.extend(list(wordforms_attribs[k]["words"].keys()))
except:
pass
word_list_selected = [w for w in word_list_selected if w not in wordform_list_selected]
for w in word_list_all:
word_list_all_glove.append(glove[w])
for w in word_list_selected:
word_list_selected_glove.append(glove[w])
word_list_all_glove = np.stack(word_list_all_glove)
word_list_selected_glove = np.stack(word_list_selected_glove)
n_components = 10
pca = PCA(n_components=n_components)
pca.fit(both)
pca_wordform_list_all = pca.transform(wordform_list_all_glove)
pca_wordform_list_selected = pca.transform(wordform_list_selected_glove)
pca_word_list_selected = pca.transform(word_list_selected_glove)
wordlist = wordform_list_all
pca_list = pca_wordform_list_all
glove_n = dict(zip(wordlist, pca_list))
with open("sentiglove" + str(n_components) + ".pkl", "wb") as f:
pickle.dump(glove_n, f) | [
"config_attrib_selection.attrib_selection.items",
"csv.DictReader",
"pickle.dump",
"sklearn.decomposition.PCA",
"os.path.join",
"numpy.stack",
"numpy.concatenate",
"json.load",
"torchtext.vocab.GloVe"
] | [((216, 240), 'config_attrib_selection.attrib_selection.items', 'attrib_selection.items', ([], {}), '()\n', (238, 240), False, 'from config_attrib_selection import attrib_selection\n'), ((305, 356), 'torchtext.vocab.GloVe', 'GloVe', ([], {'name': '"""42B"""', 'dim': '(300)', 'cache': '"""./.vector_cache"""'}), "(name='42B', dim=300, cache='./.vector_cache')\n", (310, 356), False, 'from torchtext.vocab import GloVe\n'), ((1144, 1168), 'config_attrib_selection.attrib_selection.items', 'attrib_selection.items', ([], {}), '()\n', (1166, 1168), False, 'from config_attrib_selection import attrib_selection\n'), ((1514, 1552), 'numpy.stack', 'np.stack', (['wordform_list_selected_glove'], {}), '(wordform_list_selected_glove)\n', (1522, 1552), True, 'import numpy as np\n'), ((1823, 1856), 'numpy.stack', 'np.stack', (['wordform_list_all_glove'], {}), '(wordform_list_all_glove)\n', (1831, 1856), True, 'import numpy as np\n'), ((2186, 2220), 'numpy.stack', 'np.stack', (['wordform_top10_pos_glove'], {}), '(wordform_top10_pos_glove)\n', (2194, 2220), True, 'import numpy as np\n'), ((2248, 2282), 'numpy.stack', 'np.stack', (['wordform_top10_neg_glove'], {}), '(wordform_top10_neg_glove)\n', (2256, 2282), True, 'import numpy as np\n'), ((2290, 2366), 'numpy.concatenate', 'np.concatenate', (['(wordform_top10_pos_glove, wordform_top10_neg_glove)'], {'axis': '(0)'}), '((wordform_top10_pos_glove, wordform_top10_neg_glove), axis=0)\n', (2304, 2366), True, 'import numpy as np\n'), ((2981, 3010), 'numpy.stack', 'np.stack', (['word_list_all_glove'], {}), '(word_list_all_glove)\n', (2989, 3010), True, 'import numpy as np\n'), ((3038, 3072), 'numpy.stack', 'np.stack', (['word_list_selected_glove'], {}), '(word_list_selected_glove)\n', (3046, 3072), True, 'import numpy as np\n'), ((3098, 3128), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (3101, 3128), False, 'from sklearn.decomposition import PCA\n'), ((469, 489), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (478, 489), False, 'import json\n'), ((666, 752), 'csv.DictReader', 'csv.DictReader', (['wordforms_file'], {'delimiter': '"""\t"""', 'fieldnames': "['class_name', 'words']"}), "(wordforms_file, delimiter='\\t', fieldnames=['class_name',\n 'words'])\n", (680, 752), False, 'import csv\n'), ((3517, 3540), 'pickle.dump', 'pickle.dump', (['glove_n', 'f'], {}), '(glove_n, f)\n', (3528, 3540), False, 'import pickle\n'), ((369, 426), 'os.path.join', 'os.path.join', (['"""./"""', '"""./senticap/wordform_sentiments.json"""'], {}), "('./', './senticap/wordform_sentiments.json')\n", (381, 426), False, 'import os\n')] |
import collections
from typing import List
import numpy as np
import torch
from genrl.agents.deep.dqn.base import DQN
def ddqn_q_target(
agent: DQN, next_states: torch.Tensor, rewards: torch.Tensor, dones: torch.Tensor,
) -> torch.Tensor:
"""Double Q-learning target
Can be used to replace the `get_target_values` method of the Base DQN
class in any DQN algorithm
Args:
agent (:obj:`DQN`): The agent
next_states (:obj:`torch.Tensor`): Next states being encountered by the agent
rewards (:obj:`torch.Tensor`): Rewards received by the agent
dones (:obj:`torch.Tensor`): Game over status of each environment
Returns:
target_q_values (:obj:`torch.Tensor`): Target Q values using Double Q-learning
"""
next_q_value_dist = agent.model(next_states)
next_best_actions = torch.argmax(next_q_value_dist, dim=-1).unsqueeze(-1)
rewards, dones = rewards.unsqueeze(-1), dones.unsqueeze(-1)
next_q_target_value_dist = agent.target_model(next_states)
max_next_q_target_values = next_q_target_value_dist.gather(2, next_best_actions)
target_q_values = rewards + agent.gamma * torch.mul(
max_next_q_target_values, (1 - dones)
)
return target_q_values
def prioritized_q_loss(agent: DQN, batch: collections.namedtuple):
"""Function to calculate the loss of the Q-function
Returns:
agent (:obj:`DQN`): The agent
loss (:obj:`torch.Tensor`): Calculateed loss of the Q-function
"""
q_values = agent.get_q_values(batch.states, batch.actions)
target_q_values = agent.get_target_q_values(
batch.next_states, batch.rewards, batch.dones
)
# Weighted MSE Loss
loss = batch.weights * (q_values - target_q_values.detach()) ** 2
# Priorities are taken as the td-errors + some small value to avoid 0s
priorities = loss + 1e-5
loss = loss.mean()
agent.replay_buffer.update_priorities(
batch.indices, priorities.detach().cpu().numpy()
)
agent.logs["value_loss"].append(loss.item())
return loss
def categorical_greedy_action(agent: DQN, state: torch.Tensor) -> np.ndarray:
"""Greedy action selection for Categorical DQN
Args:
agent (:obj:`DQN`): The agent
state (:obj:`np.ndarray`): Current state of the environment
Returns:
action (:obj:`np.ndarray`): Action taken by the agent
"""
q_value_dist = agent.model(state.unsqueeze(0)).detach().numpy()
# We need to scale and discretise the Q-value distribution obtained above
q_value_dist = q_value_dist * np.linspace(agent.v_min, agent.v_max, agent.num_atoms)
# Then we find the action with the highest Q-values for all discrete regions
# Current shape of the q_value_dist is [1, n_envs, action_dim, num_atoms]
# So we take the sum of all the individual atom q_values and then take argmax
# along action dim to get the optimal action. Since batch_size is 1 for this
# function, we squeeze the first dimension out.
action = np.argmax(q_value_dist.sum(-1), axis=-1).squeeze(0)
return action
def categorical_q_values(agent: DQN, states: torch.Tensor, actions: torch.Tensor):
"""Get Q values given state for a Categorical DQN
Args:
agent (:obj:`DQN`): The agent
states (:obj:`torch.Tensor`): States being replayed
actions (:obj:`torch.Tensor`): Actions being replayed
Returns:
q_values (:obj:`torch.Tensor`): Q values for the given states and actions
"""
q_value_dist = agent.model(states)
# Size of q_value_dist should be [batch_size, n_envs, action_dim, num_atoms] here
# To gather the q_values of the respective actions, actions must be of the shape:
# [batch_size, n_envs, 1, num_atoms]. It's current shape is [batch_size, n_envs, 1]
actions = actions.unsqueeze(-1).expand(
agent.batch_size, agent.env.n_envs, 1, agent.num_atoms
)
# Now as we gather q_values from the action_dim dimension which is at index 2
q_values = q_value_dist.gather(2, actions)
# But after this the shape of q_values would be [batch_size, n_envs, 1, 51] where as
# it needs to be the same as the target_q_values: [batch_size, n_envs, 51]
q_values = q_values.squeeze(2) # Hence the squeeze
# Clamp Q-values to get positive and stable Q-values between 0 and 1
q_values = q_values.clamp(0.01, 0.99)
return q_values
def categorical_q_target(
agent: DQN, next_states: np.ndarray, rewards: List[float], dones: List[bool],
):
"""Projected Distribution of Q-values
Helper function for Categorical/Distributional DQN
Args:
agent (:obj:`DQN`): The agent
next_states (:obj:`torch.Tensor`): Next states being encountered by the agent
rewards (:obj:`torch.Tensor`): Rewards received by the agent
dones (:obj:`torch.Tensor`): Game over status of each environment
Returns:
target_q_values (object): Projected Q-value Distribution or Target Q Values
"""
delta_z = float(agent.v_max - agent.v_min) / (agent.num_atoms - 1)
support = torch.linspace(agent.v_min, agent.v_max, agent.num_atoms)
next_q_value_dist = agent.target_model(next_states) * support
next_actions = torch.argmax(next_q_value_dist.sum(-1), axis=-1)
next_actions = next_actions[:, :, np.newaxis, np.newaxis]
next_actions = next_actions.expand(
agent.batch_size, agent.env.n_envs, 1, agent.num_atoms
)
next_q_values = next_q_value_dist.gather(2, next_actions).squeeze(2)
rewards = rewards.unsqueeze(-1).expand_as(next_q_values)
dones = dones.unsqueeze(-1).expand_as(next_q_values)
# Refer to the paper in section 4 for notation
Tz = rewards + (1 - dones) * 0.99 * support
Tz = Tz.clamp(min=agent.v_min, max=agent.v_max)
bz = (Tz - agent.v_min) / delta_z
l = bz.floor().long()
u = bz.ceil().long()
offset = (
torch.linspace(
0,
(agent.batch_size * agent.env.n_envs - 1) * agent.num_atoms,
agent.batch_size * agent.env.n_envs,
)
.long()
.view(agent.batch_size, agent.env.n_envs, 1)
.expand(agent.batch_size, agent.env.n_envs, agent.num_atoms)
)
target_q_values = torch.zeros(next_q_values.size())
target_q_values.view(-1).index_add_(
0, (l + offset).view(-1), (next_q_values * (u.float() - bz)).view(-1),
)
target_q_values.view(-1).index_add_(
0, (u + offset).view(-1), (next_q_values * (bz - l.float())).view(-1),
)
return target_q_values
def categorical_q_loss(agent: DQN, batch: collections.namedtuple):
"""Categorical DQN loss function to calculate the loss of the Q-function
Args:
agent (:obj:`DQN`): The agent
batch (:obj:`collections.namedtuple` of :obj:`torch.Tensor`): Batch of experiences
Returns:
loss (:obj:`torch.Tensor`): Calculateed loss of the Q-function
"""
q_values = agent.get_q_values(batch.states, batch.actions)
target_q_values = agent.get_target_q_values(
batch.next_states, batch.rewards, batch.dones
)
# For the loss, we take the difference
loss = -(target_q_values * q_values.log()).sum(1).mean()
return loss
| [
"torch.mul",
"numpy.linspace",
"torch.linspace",
"torch.argmax"
] | [((5091, 5148), 'torch.linspace', 'torch.linspace', (['agent.v_min', 'agent.v_max', 'agent.num_atoms'], {}), '(agent.v_min, agent.v_max, agent.num_atoms)\n', (5105, 5148), False, 'import torch\n'), ((2582, 2636), 'numpy.linspace', 'np.linspace', (['agent.v_min', 'agent.v_max', 'agent.num_atoms'], {}), '(agent.v_min, agent.v_max, agent.num_atoms)\n', (2593, 2636), True, 'import numpy as np\n'), ((845, 884), 'torch.argmax', 'torch.argmax', (['next_q_value_dist'], {'dim': '(-1)'}), '(next_q_value_dist, dim=-1)\n', (857, 884), False, 'import torch\n'), ((1159, 1205), 'torch.mul', 'torch.mul', (['max_next_q_target_values', '(1 - dones)'], {}), '(max_next_q_target_values, 1 - dones)\n', (1168, 1205), False, 'import torch\n'), ((5912, 6032), 'torch.linspace', 'torch.linspace', (['(0)', '((agent.batch_size * agent.env.n_envs - 1) * agent.num_atoms)', '(agent.batch_size * agent.env.n_envs)'], {}), '(0, (agent.batch_size * agent.env.n_envs - 1) * agent.\n num_atoms, agent.batch_size * agent.env.n_envs)\n', (5926, 6032), False, 'import torch\n')] |
# Application of different OpenCV filters here
import cv2 # import OpenCV 3 with *CONTRIBUTIONS*
import random
import numpy as np
camera = cv2.VideoCapture(0) # get default camera
window_name = 'My camera'
modes = {
'0': 'Unchanged', # show unchanged frame
'1': 'Canny', # apply Canny edge detection
'2': 'Threshold', # adaptive Gaussian thresholding
'3': 'Harris', # detect corners in an image
'4': 'SIFT', # Scale-Invariant Feature Transform (SIFT) - patented
'5': 'SURF', # Speeded-Up Robust Features (SURF) - patented
'6': 'ORB', # Oriented FAST and Rotated BRIEF (ORB) - not patented!
'7': 'BRIEF', # BRIEF descriptors with the help of CenSurE (STAR) detector
'8': 'Contours', # Draw contours and mean colors inside contours
'9': 'Blur', # Blur
'a': 'Motion', # Motion detection
'b': 'Background', # Background substractor (KNN, MOG2 or GMG)
'c': 'Skin', # Detect skin tones
'd': 'OptFlow', # Lucas Kanade optical flow
'e': 'Affine1', # Affine random rotation and shift
'f': 'Affine2', # Affine random transformations
'g': 'Perspective', # Perspective random transformations
'h': 'Equalize', # Histogram Equalization
'i': 'CLAHE', # CLAHE Contrast Limited Adaptive Histogram Equalization
'j': 'LAB', # Increase the contrast of an image (LAB color space + CLAHE)
'k': 'Pyramid', # Image pyramid
'l': 'Laplacian', # Laplacian gradient filter
'm': 'Sobel X', # Sobel / Scharr vertical gradient filter
'n': 'Sobel Y', # Sobel / Scharr horizontal gradient filter
'o': 'Blobs', # Blob detection
}
mode_unchanged = modes['0']
mode_canny = modes['1']
mode_threshold = modes['2']
mode_harris = modes['3']
mode_sift = modes['4']
mode_surf = modes['5']
mode_orb = modes['6']
mode_brief = modes['7']
mode_contours = modes['8']
mode_blur = modes['9']
mode_motion = modes['a']
mode_bground = modes['b']
mode_skin = modes['c']
mode_optflow = modes['d']
mode_affine1 = modes['e']
mode_affine2 = modes['f']
mode_perspective = modes['g']
mode_equalize = modes['h']
mode_clahe = modes['i']
mode_lab = modes['j']
mode_pyramid = modes['k']
mode_laplacian = modes['l']
mode_sobelx = modes['m']
mode_sobely = modes['n']
mode_blobs = modes['o']
mode = mode_canny # default mode
algorithms = {
mode_sift: cv2.xfeatures2d.SIFT_create(),
mode_surf: cv2.xfeatures2d.SURF_create(4000),
mode_orb: cv2.ORB_create(),
mode_brief: [cv2.xfeatures2d.StarDetector_create(),
cv2.xfeatures2d.BriefDescriptorExtractor_create()]
}
bs = None
old_gray = None
rotation = 0
shift = [0, 0]
ptrs1 = np.float32([[0,0],[400,0],[0,400]])
ptrs2 = np.copy(ptrs1)
ptrs3 = np.float32([[0,0],[400,0],[0,400],[400,400]])
ptrs4 = np.copy(ptrs3)
detector1 = None
while True:
ok, frame = camera.read() # read frame
if not ok: continue # skip underlying part, if frame didn't read correctly
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert to grayscale
if mode == mode_canny:
frame = cv2.Canny(gray, 100, 200) # Canny edge detection
if mode == mode_threshold:
frame = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 2) # adaptive Gaussian thresholding
if mode == mode_harris:
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 23, 0.04) # 3rd parameter is odd and must be [3,31]
frame[dst > 0.01 * dst.max()] = [0, 0, 255]
if mode in [mode_sift, mode_surf, mode_orb, mode_brief]:
algorithm = algorithms[mode]
if mode == mode_brief:
keypoints = algorithm[0].detect(gray, None)
keypoints, descriptor = algorithm[1].compute(gray, keypoints)
else:
keypoints, descriptor = algorithm.detectAndCompute(gray, None)
frame = cv2.drawKeypoints(image=frame, outImage=frame, keypoints=keypoints,
flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, color=(51, 163, 236))
if mode == mode_motion:
ok, frame2 = camera.read() # read second frame
if not ok: continue # skip underlying part, if frame didn't read correctly
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY) # convert to grayscale
frame = cv2.absdiff(gray, gray2) # get absolute difference between two frames
if mode == mode_blur:
#frame = cv2.GaussianBlur(frame, (29, 29), 0) # Gaussian blur
#frame = cv2.blur(frame, (29, 29)) # Blur
#frame = cv2.medianBlur(frame, 29) # Median blur
frame = cv2.bilateralFilter(frame, 11, 80, 80) # Bilateral filter preserves the edges
if mode == mode_contours:
frame2 = frame.copy() # make a copy
for threshold in [15, 50, 100, 240]: # use various thresholds
ret, thresh = cv2.threshold(gray, threshold, 255, 0)
image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
mask = np.zeros(gray.shape, np.uint8) # create empty mask
cv2.drawContours(mask, [contour], 0, 255, -1) # fill mask with white color
mean = cv2.mean(frame, mask=mask) # find mean color inside mask
cv2.drawContours(frame2, [contour], 0, mean, -1) # draw frame with masked mean color
cv2.drawContours(frame2, contours, -1, (0,0,0), 1) # draw contours with black color
frame = frame2
if mode == mode_bground:
if bs is None:
bs = cv2.createBackgroundSubtractorKNN(detectShadows=True)
fgmask = bs.apply(frame)
frame = frame & cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
if mode == mode_skin:
# determine upper and lower HSV limits for (my) skin tones
lower = np.array([0, 100, 0], dtype="uint8")
upper = np.array([50, 255, 255], dtype="uint8")
# switch to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# find mask of pixels within HSV range
skinMask = cv2.inRange(hsv, lower, upper)
# denoise
skinMask = cv2.GaussianBlur(skinMask, (9, 9), 0)
# kernel for morphology operation
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4, 4))
# CLOSE (dilate / erode)
skinMask = cv2.morphologyEx(skinMask, cv2.MORPH_CLOSE, kernel, iterations=3)
# denoise the mask
skinMask = cv2.GaussianBlur(skinMask, (9, 9), 0)
# only display the masked pixels
frame = cv2.bitwise_and(frame, frame, mask=skinMask)
if mode == mode_optflow:
if old_gray is None:
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=100,
qualityLevel=0.3,
minDistance=7,
blockSize=7)
# Parameters for lucas kanade optical flow
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0, 255, (100, 3))
# Take first frame and find corners in it
old_frame = frame.copy()
old_gray = gray.copy()
ret, frame = camera.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
try: # If motion is large this method will fail. Ignore exceptions
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, gray, p0, None, **lk_params)
# Select good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# draw the tracks
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (a, b), (c, d), color[i].tolist(), 2)
frame = cv2.circle(frame, (a, b), 5, color[i].tolist(), -1)
frame = cv2.add(frame, mask)
# Now update the previous frame and previous points
old_gray = gray.copy()
p0 = good_new.reshape(-1, 1, 2)
except:
old_gray = None # set optical flow to None if exception occurred
if mode == mode_affine1:
rotation += random.choice([-1, 1]) # random rotation anticlockwise/clockwise
shift[0] += random.choice([-1, 1]) # random shift left/right on 1 pixel
shift[1] += random.choice([-1, 1]) # random shift up/bottom on 1 pixel
rows, cols = frame.shape[:2]
m = cv2.getRotationMatrix2D((cols/2, rows/2), rotation, 1) # rotation matrix
frame = cv2.warpAffine(frame, m, (cols, rows))
m = np.float32([[1, 0, shift[0]], [0, 1, shift[1]]]) # translation matrix
frame = cv2.warpAffine(frame, m, (cols, rows))
if mode == mode_affine2:
for ptr in np.nditer(ptrs2, op_flags=['readwrite']):
ptr += random.choice([-1, 1]) # apply random shift on 1 pixel foreach element
rows, cols = frame.shape[:2]
m = cv2.getAffineTransform(ptrs1, ptrs2)
frame = cv2.warpAffine(frame, m, (cols, rows))
if mode == mode_perspective:
for ptr in np.nditer(ptrs4, op_flags=['readwrite']):
ptr += random.choice([-1, 1]) # apply random shift on 1 pixel foreach element
rows, cols = frame.shape[:2]
m = cv2.getPerspectiveTransform(ptrs3, ptrs4)
frame = cv2.warpPerspective(frame, m, (cols, rows))
if mode == mode_equalize:
b, g, r = cv2.split(frame) # split on blue, green and red channels
b2 = cv2.equalizeHist(b) # apply Histogram Equalization to each channel
g2 = cv2.equalizeHist(g)
r2 = cv2.equalizeHist(r)
frame = cv2.merge((b2,g2,r2)) # merge changed channels to the current frame
if mode == mode_clahe:
# clipLimit is 40 by default; tileSize is 8x8 by default
clahe = cv2.createCLAHE(clipLimit=10., tileGridSize=(8,8))
b, g, r = cv2.split(frame) # split on blue, green and red channels
b2 = clahe.apply(b) # apply CLAHE to each channel
g2 = clahe.apply(g)
r2 = clahe.apply(r)
frame = cv2.merge((b2, g2, r2)) # merge changed channels to the current frame
if mode == mode_lab:
lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB) # convert image to LAB color model
l, a, b = cv2.split(lab) # split on l, a, b channels
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
l2 = clahe.apply(l) # apply CLAHE to L-channel
lab = cv2.merge((l2,a,b)) # merge enhanced L-channel with the a and b channels
frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
if mode == mode_pyramid:
h, w = frame.shape[:2]
x, y = 0, int(h+h/2)
image = np.zeros((y, w, 3), np.uint8) # empty matrix filled with zeros
image[:h, :w, :3] = frame
for i in range(8):
frame = cv2.pyrDown(frame)
h, w = frame.shape[:2]
image[y-h:y, x:x+w] = frame
x += w
frame = image
if mode == mode_laplacian:
#frame = cv2.Laplacian(gray, cv2.CV_8U)
frame = np.uint8(np.absolute(cv2.Laplacian(gray, cv2.CV_64F)))
if mode == mode_sobelx:
#frame = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=5)
#frame = np.uint8(np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5)))
# If ksize=-1, a 3x3 Scharr filter is used which gives better results than 3x3 Sobel filter
frame = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=-1)
#frame = np.uint8(np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=-1)))
if mode == mode_sobely:
#frame = cv2.Sobel(gray, cv2.CV_8U, 0, 1, ksize=5)
#frame = np.uint8(np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5)))
# If ksize=-1, a 3x3 Scharr filter is used which gives better results than 3x3 Sobel filter
frame = cv2.Sobel(gray, cv2.CV_8U, 0, 1, ksize=-1)
#frame = np.uint8(np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=-1)))
if mode == mode_blobs:
if detector1 is None:
# Setup SimpleBlobDetector parameters
params = cv2.SimpleBlobDetector_Params()
params.filterByColor = True
params.blobColor = 255 # extract light blobs
params.filterByArea = True
params.maxArea = 40000
params.filterByCircularity = True
params.minCircularity = 0.7 # circularity of a square is 0.785
# Set up the detector with default parameters.
detector1 = cv2.SimpleBlobDetector_create(params)
params.blobColor = 0 # extract dark blobs
detector2 = cv2.SimpleBlobDetector_create(params)
# Detect blobs
keypoints1 = detector1.detect(frame)
keypoints2 = detector2.detect(frame)
# Draw detected blobs as green and blue circles
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle
# corresponds to the size of a blob
frame2 = cv2.drawKeypoints(frame, keypoints1, np.array([]), (0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
frame = cv2.drawKeypoints(frame2, keypoints2, np.array([]), (255, 0, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# write text on image
cv2.putText(frame, mode, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (51, 163, 236), 1, cv2.LINE_AA)
cv2.imshow(window_name, frame) # show frame
key = cv2.waitKey(1) & 0xff # read keystroke
if key == 255: continue # skip underlying part, if key hasn't been pressed
if key == 27: break # <Escape> key pressed, exit from cycle
for m in modes:
if key == ord(m): mode = modes[m] # if key coincide, set the appropriate mode
camera.release() # release web camera
cv2.destroyAllWindows()
| [
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"cv2.xfeatures2d.SIFT_create",
"cv2.pyrDown",
"cv2.Laplacian",
"cv2.threshold",
"numpy.zeros_like",
"cv2.xfeatures2d.SURF_create",
"cv2.ORB_create",
"cv2.SimpleBlobDetector_Params",
"cv2.Sobel",
"cv2.xfeatures2d.S... | [((141, 160), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (157, 160), False, 'import cv2\n'), ((2834, 2874), 'numpy.float32', 'np.float32', (['[[0, 0], [400, 0], [0, 400]]'], {}), '([[0, 0], [400, 0], [0, 400]])\n', (2844, 2874), True, 'import numpy as np\n'), ((2878, 2892), 'numpy.copy', 'np.copy', (['ptrs1'], {}), '(ptrs1)\n', (2885, 2892), True, 'import numpy as np\n'), ((2901, 2953), 'numpy.float32', 'np.float32', (['[[0, 0], [400, 0], [0, 400], [400, 400]]'], {}), '([[0, 0], [400, 0], [0, 400], [400, 400]])\n', (2911, 2953), True, 'import numpy as np\n'), ((2955, 2969), 'numpy.copy', 'np.copy', (['ptrs3'], {}), '(ptrs3)\n', (2962, 2969), True, 'import numpy as np\n'), ((14406, 14429), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (14427, 14429), False, 'import cv2\n'), ((2530, 2559), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (2557, 2559), False, 'import cv2\n'), ((2577, 2610), 'cv2.xfeatures2d.SURF_create', 'cv2.xfeatures2d.SURF_create', (['(4000)'], {}), '(4000)\n', (2604, 2610), False, 'import cv2\n'), ((2628, 2644), 'cv2.ORB_create', 'cv2.ORB_create', ([], {}), '()\n', (2642, 2644), False, 'import cv2\n'), ((3135, 3174), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3147, 3174), False, 'import cv2\n'), ((13917, 14018), 'cv2.putText', 'cv2.putText', (['frame', 'mode', '(5, 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', '(51, 163, 236)', '(1)', 'cv2.LINE_AA'], {}), '(frame, mode, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (51, 163,\n 236), 1, cv2.LINE_AA)\n', (13928, 14018), False, 'import cv2\n'), ((14019, 14049), 'cv2.imshow', 'cv2.imshow', (['window_name', 'frame'], {}), '(window_name, frame)\n', (14029, 14049), False, 'import cv2\n'), ((2663, 2700), 'cv2.xfeatures2d.StarDetector_create', 'cv2.xfeatures2d.StarDetector_create', ([], {}), '()\n', (2698, 2700), False, 'import cv2\n'), ((2719, 2768), 'cv2.xfeatures2d.BriefDescriptorExtractor_create', 'cv2.xfeatures2d.BriefDescriptorExtractor_create', ([], {}), '()\n', (2766, 2768), False, 'import cv2\n'), ((3243, 3268), 'cv2.Canny', 'cv2.Canny', (['gray', '(100)', '(200)'], {}), '(gray, 100, 200)\n', (3252, 3268), False, 'import cv2\n'), ((3340, 3435), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['gray', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(11)', '(2)'], {}), '(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 11, 2)\n', (3361, 3435), False, 'import cv2\n'), ((3546, 3562), 'numpy.float32', 'np.float32', (['gray'], {}), '(gray)\n', (3556, 3562), True, 'import numpy as np\n'), ((3577, 3612), 'cv2.cornerHarris', 'cv2.cornerHarris', (['gray', '(2)', '(23)', '(0.04)'], {}), '(gray, 2, 23, 0.04)\n', (3593, 3612), False, 'import cv2\n'), ((4072, 4216), 'cv2.drawKeypoints', 'cv2.drawKeypoints', ([], {'image': 'frame', 'outImage': 'frame', 'keypoints': 'keypoints', 'flags': 'cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS', 'color': '(51, 163, 236)'}), '(image=frame, outImage=frame, keypoints=keypoints, flags=\n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, color=(51, 163, 236))\n', (4089, 4216), False, 'import cv2\n'), ((4430, 4470), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2', 'cv2.COLOR_BGR2GRAY'], {}), '(frame2, cv2.COLOR_BGR2GRAY)\n', (4442, 4470), False, 'import cv2\n'), ((4511, 4535), 'cv2.absdiff', 'cv2.absdiff', (['gray', 'gray2'], {}), '(gray, gray2)\n', (4522, 4535), False, 'import cv2\n'), ((4804, 4842), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['frame', '(11)', '(80)', '(80)'], {}), '(frame, 11, 80, 80)\n', (4823, 4842), False, 'import cv2\n'), ((6037, 6073), 'numpy.array', 'np.array', (['[0, 100, 0]'], {'dtype': '"""uint8"""'}), "([0, 100, 0], dtype='uint8')\n", (6045, 6073), True, 'import numpy as np\n'), ((6090, 6129), 'numpy.array', 'np.array', (['[50, 255, 255]'], {'dtype': '"""uint8"""'}), "([50, 255, 255], dtype='uint8')\n", (6098, 6129), True, 'import numpy as np\n'), ((6168, 6206), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (6180, 6206), False, 'import cv2\n'), ((6273, 6303), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower', 'upper'], {}), '(hsv, lower, upper)\n', (6284, 6303), False, 'import cv2\n'), ((6341, 6378), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['skinMask', '(9, 9)', '(0)'], {}), '(skinMask, (9, 9), 0)\n', (6357, 6378), False, 'import cv2\n'), ((6438, 6490), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(4, 4)'], {}), '(cv2.MORPH_ELLIPSE, (4, 4))\n', (6463, 6490), False, 'import cv2\n'), ((6543, 6608), 'cv2.morphologyEx', 'cv2.morphologyEx', (['skinMask', 'cv2.MORPH_CLOSE', 'kernel'], {'iterations': '(3)'}), '(skinMask, cv2.MORPH_CLOSE, kernel, iterations=3)\n', (6559, 6608), False, 'import cv2\n'), ((6655, 6692), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['skinMask', '(9, 9)', '(0)'], {}), '(skinMask, (9, 9), 0)\n', (6671, 6692), False, 'import cv2\n'), ((6750, 6794), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'skinMask'}), '(frame, frame, mask=skinMask)\n', (6765, 6794), False, 'import cv2\n'), ((8798, 8820), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (8811, 8820), False, 'import random\n'), ((8884, 8906), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (8897, 8906), False, 'import random\n'), ((8965, 8987), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (8978, 8987), False, 'import random\n'), ((9074, 9132), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', 'rotation', '(1)'], {}), '((cols / 2, rows / 2), rotation, 1)\n', (9097, 9132), False, 'import cv2\n'), ((9164, 9202), 'cv2.warpAffine', 'cv2.warpAffine', (['frame', 'm', '(cols, rows)'], {}), '(frame, m, (cols, rows))\n', (9178, 9202), False, 'import cv2\n'), ((9215, 9263), 'numpy.float32', 'np.float32', (['[[1, 0, shift[0]], [0, 1, shift[1]]]'], {}), '([[1, 0, shift[0]], [0, 1, shift[1]]])\n', (9225, 9263), True, 'import numpy as np\n'), ((9302, 9340), 'cv2.warpAffine', 'cv2.warpAffine', (['frame', 'm', '(cols, rows)'], {}), '(frame, m, (cols, rows))\n', (9316, 9340), False, 'import cv2\n'), ((9389, 9429), 'numpy.nditer', 'np.nditer', (['ptrs2'], {'op_flags': "['readwrite']"}), "(ptrs2, op_flags=['readwrite'])\n", (9398, 9429), True, 'import numpy as np\n'), ((9571, 9607), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['ptrs1', 'ptrs2'], {}), '(ptrs1, ptrs2)\n', (9593, 9607), False, 'import cv2\n'), ((9624, 9662), 'cv2.warpAffine', 'cv2.warpAffine', (['frame', 'm', '(cols, rows)'], {}), '(frame, m, (cols, rows))\n', (9638, 9662), False, 'import cv2\n'), ((9715, 9755), 'numpy.nditer', 'np.nditer', (['ptrs4'], {'op_flags': "['readwrite']"}), "(ptrs4, op_flags=['readwrite'])\n", (9724, 9755), True, 'import numpy as np\n'), ((9897, 9938), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['ptrs3', 'ptrs4'], {}), '(ptrs3, ptrs4)\n', (9924, 9938), False, 'import cv2\n'), ((9955, 9998), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame', 'm', '(cols, rows)'], {}), '(frame, m, (cols, rows))\n', (9974, 9998), False, 'import cv2\n'), ((10047, 10063), 'cv2.split', 'cv2.split', (['frame'], {}), '(frame)\n', (10056, 10063), False, 'import cv2\n'), ((10118, 10137), 'cv2.equalizeHist', 'cv2.equalizeHist', (['b'], {}), '(b)\n', (10134, 10137), False, 'import cv2\n'), ((10199, 10218), 'cv2.equalizeHist', 'cv2.equalizeHist', (['g'], {}), '(g)\n', (10215, 10218), False, 'import cv2\n'), ((10232, 10251), 'cv2.equalizeHist', 'cv2.equalizeHist', (['r'], {}), '(r)\n', (10248, 10251), False, 'import cv2\n'), ((10268, 10291), 'cv2.merge', 'cv2.merge', (['(b2, g2, r2)'], {}), '((b2, g2, r2))\n', (10277, 10291), False, 'import cv2\n'), ((10445, 10497), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(10.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=10.0, tileGridSize=(8, 8))\n', (10460, 10497), False, 'import cv2\n'), ((10514, 10530), 'cv2.split', 'cv2.split', (['frame'], {}), '(frame)\n', (10523, 10530), False, 'import cv2\n'), ((10703, 10726), 'cv2.merge', 'cv2.merge', (['(b2, g2, r2)'], {}), '((b2, g2, r2))\n', (10712, 10726), False, 'import cv2\n'), ((10813, 10851), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2LAB'], {}), '(frame, cv2.COLOR_BGR2LAB)\n', (10825, 10851), False, 'import cv2\n'), ((10906, 10920), 'cv2.split', 'cv2.split', (['lab'], {}), '(lab)\n', (10915, 10920), False, 'import cv2\n'), ((10966, 11017), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(3.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=3.0, tileGridSize=(8, 8))\n', (10981, 11017), False, 'import cv2\n'), ((11088, 11109), 'cv2.merge', 'cv2.merge', (['(l2, a, b)'], {}), '((l2, a, b))\n', (11097, 11109), False, 'import cv2\n'), ((11178, 11214), 'cv2.cvtColor', 'cv2.cvtColor', (['lab', 'cv2.COLOR_LAB2BGR'], {}), '(lab, cv2.COLOR_LAB2BGR)\n', (11190, 11214), False, 'import cv2\n'), ((11320, 11349), 'numpy.zeros', 'np.zeros', (['(y, w, 3)', 'np.uint8'], {}), '((y, w, 3), np.uint8)\n', (11328, 11349), True, 'import numpy as np\n'), ((12036, 12078), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_8U', '(1)', '(0)'], {'ksize': '(-1)'}), '(gray, cv2.CV_8U, 1, 0, ksize=-1)\n', (12045, 12078), False, 'import cv2\n'), ((12449, 12491), 'cv2.Sobel', 'cv2.Sobel', (['gray', 'cv2.CV_8U', '(0)', '(1)'], {'ksize': '(-1)'}), '(gray, cv2.CV_8U, 0, 1, ksize=-1)\n', (12458, 12491), False, 'import cv2\n'), ((14074, 14088), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (14085, 14088), False, 'import cv2\n'), ((5055, 5093), 'cv2.threshold', 'cv2.threshold', (['gray', 'threshold', '(255)', '(0)'], {}), '(gray, threshold, 255, 0)\n', (5068, 5093), False, 'import cv2\n'), ((5135, 5199), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (5151, 5199), False, 'import cv2\n'), ((5599, 5651), 'cv2.drawContours', 'cv2.drawContours', (['frame2', 'contours', '(-1)', '(0, 0, 0)', '(1)'], {}), '(frame2, contours, -1, (0, 0, 0), 1)\n', (5615, 5651), False, 'import cv2\n'), ((5776, 5829), 'cv2.createBackgroundSubtractorKNN', 'cv2.createBackgroundSubtractorKNN', ([], {'detectShadows': '(True)'}), '(detectShadows=True)\n', (5809, 5829), False, 'import cv2\n'), ((5887, 5927), 'cv2.cvtColor', 'cv2.cvtColor', (['fgmask', 'cv2.COLOR_GRAY2BGR'], {}), '(fgmask, cv2.COLOR_GRAY2BGR)\n', (5899, 5927), False, 'import cv2\n'), ((7405, 7440), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(100, 3)'], {}), '(0, 255, (100, 3))\n', (7422, 7440), True, 'import numpy as np\n'), ((7625, 7664), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (7637, 7664), False, 'import cv2\n'), ((7682, 7744), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['old_gray'], {'mask': 'None'}), '(old_gray, mask=None, **feature_params)\n', (7705, 7744), False, 'import cv2\n'), ((7819, 7843), 'numpy.zeros_like', 'np.zeros_like', (['old_frame'], {}), '(old_frame)\n', (7832, 7843), True, 'import numpy as np\n'), ((7983, 8046), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['old_gray', 'gray', 'p0', 'None'], {}), '(old_gray, gray, p0, None, **lk_params)\n', (8007, 8046), False, 'import cv2\n'), ((8491, 8511), 'cv2.add', 'cv2.add', (['frame', 'mask'], {}), '(frame, mask)\n', (8498, 8511), False, 'import cv2\n'), ((9450, 9472), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9463, 9472), False, 'import random\n'), ((9776, 9798), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9789, 9798), False, 'import random\n'), ((11465, 11483), 'cv2.pyrDown', 'cv2.pyrDown', (['frame'], {}), '(frame)\n', (11476, 11483), False, 'import cv2\n'), ((12704, 12735), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (12733, 12735), False, 'import cv2\n'), ((13113, 13150), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (13142, 13150), False, 'import cv2\n'), ((13230, 13267), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (13259, 13267), False, 'import cv2\n'), ((13621, 13633), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13629, 13633), True, 'import numpy as np\n'), ((13781, 13793), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13789, 13793), True, 'import numpy as np\n'), ((5260, 5290), 'numpy.zeros', 'np.zeros', (['gray.shape', 'np.uint8'], {}), '(gray.shape, np.uint8)\n', (5268, 5290), True, 'import numpy as np\n'), ((5328, 5373), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[contour]', '(0)', '(255)', '(-1)'], {}), '(mask, [contour], 0, 255, -1)\n', (5344, 5373), False, 'import cv2\n'), ((5427, 5453), 'cv2.mean', 'cv2.mean', (['frame'], {'mask': 'mask'}), '(frame, mask=mask)\n', (5435, 5453), False, 'import cv2\n'), ((5501, 5549), 'cv2.drawContours', 'cv2.drawContours', (['frame2', '[contour]', '(0)', 'mean', '(-1)'], {}), '(frame2, [contour], 0, mean, -1)\n', (5517, 5549), False, 'import cv2\n'), ((11716, 11747), 'cv2.Laplacian', 'cv2.Laplacian', (['gray', 'cv2.CV_64F'], {}), '(gray, cv2.CV_64F)\n', (11729, 11747), False, 'import cv2\n')] |
__author__ = '<NAME>'
from matplotlib import pyplot as plt
import numpy as np
import scipy.interpolate
import matplotlib as mpl
#--------------------------------
# Functions
#--------------------------------
# ------------------------------------------
# Radial Function Cos
# ------------------------------------------
def cutoffcos(X,Rc):
Xt = X
for i in range(0,Xt.shape[0]):
if Xt[i] > Rc:
Xt[i] = Rc
return 0.5 * (np.cos((np.pi * Xt)/Rc) + 1.0)
# ------------------------------------------
# Radial Function Cos
# ------------------------------------------
def radialfunctioncos(X,eta,Rc,Rs):
F = np.exp(-eta*(X-Rs)**2.0) * cutoffcos(X,Rc)
return F
# ------------------------------------------
# Radial Function Cos
# ------------------------------------------
def angularradialfunctioncos(X,eta,Rc,Rs):
F = np.sqrt(np.exp(-eta*(X-Rs)**2.0) * cutoffcos(X,Rc))
return F
# ------------------------------------------
# Radial Function Cos
# ------------------------------------------
def angularradialfunctioncos2(X,Y,eta,Rc,Rs):
F = np.exp(-eta*((X + Y)/2.0 - Rs)**2) * np.sqrt(cutoffcos(X,Rc) * cutoffcos(Y,Rc))
return F
# ------------------------------------------
# Angular Function
# ------------------------------------------
def angularfunction(T,zeta,lam,Ts):
F = 0.5 * (2.0**(1.0-zeta)) * ((1.0 + lam * np.cos(T-Ts))**zeta)
return F
# ------------------------------------------
# Calculate The Steps for a Radial Dataset-
# ------------------------------------------
def computecutoffdataset(x1,x2,pts,Rc,plt,scolor,slabel):
X = np.linspace(x1, x2, pts, endpoint=True)
F = cutoffcos(X,Rc)
plt.plot(X, F, label=slabel, color=scolor, linewidth=2)
# ------------------------------------------
# Calculate The Steps for a Radial Dataset-
# ------------------------------------------
def computeradialdataset(x1,x2,pts,eta,Rc,Rs,plt,scolor,slabel):
X = np.linspace(x1, x2, pts, endpoint=True)
F = radialfunctioncos(X,eta,Rc,Rs)
plt.plot(X, F, label=slabel, color=scolor, linewidth=2)
# ------------------------------------------
# Calculate The Steps for a Radial Dataset
# ------------------------------------------
def computeangularradialdataset(x1,x2,pts,eta,Rc,Rs,plt,scolor,slabel):
X = np.linspace(x1, x2, pts, endpoint=True)
F = angularradialfunctioncos2(X,X,eta,Rc,Rs)
plt.plot(X, F, label=slabel, color=scolor, linewidth=2)
# ------------------------------------------
# Calculate The Steps for an angular Dataset
# ------------------------------------------
def computeangulardataset(t1,t2,pts,zeta,lam,Ts,plt,scolor,slabel):
T = np.linspace(t1, t2, pts, endpoint=True)
F = angularfunction(T,zeta,lam,Ts)
plt.plot(T, F, label=slabel, color=scolor, linewidth=2)
# ------------------------------------------
# Calculate The Steps for an angular Dataset
# ------------------------------------------
def expcost(X,tau):
F = 2/tau * X * np.exp((X*X)/tau)
return F
def msecost(X):
F = X
return F
def graphexpcost(t1,t2,pts,tau,plt,scolor,slabel):
T = np.linspace(t1, t2, pts, endpoint=True)
F = expcost(T,tau)
plt.plot(T, F, label=slabel, color='red', linewidth=2)
F = expcost(T,0.5)
plt.plot(T, F, label=slabel, color='green', linewidth=2)
G = msecost(T)
plt.plot(T, G, label=slabel, color='blue', linewidth=2)
# ------------------------------------------
# Calculate The Steps for an angular Dataset
# ------------------------------------------
def printdatatofile(f,title,X,N):
f.write(title + ' = [')
for i in range(0,N):
if i < N-1:
s = "{:.7e}".format(X[i]) + ','
else:
s = "{:.7e}".format(X[i])
f.write(s)
f.write(']\n')
# ------------------------------------------
# Simple Addition Function
# ------------------------------------------
def add (x,y):
return x+y
# ----------------------------------------------------
# Show a 2d Contour Plot of the Angular Env Functions
# ----------------------------------------------------
def show2dcontangulargraph (ShfA,ShfZ,eta,zeta,Rc,func,title):
N = 200000
x, y = 2.0 * Rc * np.random.random((2, N)) - Rc
#print(x)
R = np.sqrt(x**2 + y**2)
T = np.arctan2(x,y)
z = np.zeros(N)
for i in ShfZ:
for j in ShfA:
print( 'ShfZ: ' + str(i) + ' ShfA: ' + str(j) )
#zt = angularfunction(T,zeta,1.0,i) * angularradialfunctioncos(R,eta,Rc,j) * angularradialfunctioncos(R,eta,Rc,j)
zt = angularfunction(T,zeta,1.0,i) * angularradialfunctioncos2(R,R,eta,Rc,j)
#zt = angularradialfunctioncos(R1,R2,eta,Rc,j)
for k in range(1,z.shape[0]):
z[k] = func(z[k],zt[k])
#print(z[k])
# Set up a regular grid of interpolation points
xi, yi = np.linspace(x.min(), y.max(), 600), np.linspace(x.min(), y.max(), 600)
xi, yi = np.meshgrid(xi, yi)
zi = scipy.interpolate.griddata((x, y), z, (xi, yi), method='linear')
plt.imshow(zi, vmin=z.min(), vmax=z.max(), origin='lower',
extent=[x.min(), y.max(), x.min(), y.max()])
plt.title(title)
plt.ylabel('Distance ($\AA$)')
plt.xlabel('Distance ($\AA$)')
font = {'family': 'Bitstream Vera Sans',
'weight': 'normal',
'size': 18}
plt.rc('font', **font)
plt.colorbar()
plt.show()
# ----------------------------------------------------
# Show a 2d Contour Plot of the Radial Env Functions
# ----------------------------------------------------
def show2dcontradialgraph (ShfR,eta,Rc,func,title):
N = 200000
x, y = 2.0 * Rc * np.random.random((2, N)) - Rc
print(x)
R = np.sqrt(x**2 + y**2)
T = np.arctan2(x,y)
z = np.zeros(N)
for j in ShfR:
print( 'ShfZ: ' + str(i) + ' ShfA: ' + str(j) )
zt = radialfunctioncos(R,eta,Rc,j)
for k in range(1,z.shape[0]):
z[k] = func(z[k],zt[k])
# Set up a regular grid of interpolation points
xi, yi = np.linspace(x.min(), y.max(), 300), np.linspace(x.min(), y.max(), 300)
xi, yi = np.meshgrid(xi, yi)
zi = scipy.interpolate.griddata((x, y), z, (xi, yi), method='linear')
plt.imshow(zi, vmin=z.min(), vmax=z.max(), origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()])
font = {'family': 'Bitstream Vera Sans',
'weight': 'normal',
'size': 18}
plt.rc('font', **font)
plt.title(title)
plt.ylabel('Distance ($\AA$)')
plt.xlabel('Distance ($\AA$)')
plt.colorbar()
plt.show()
# ****************************************************
#--------------------------------
# Set Parameters
#--------------------------------
#File name
#pf = '/home/jsmith48/scratch/ANI-1x_retrain/train_ens/rHCNO-4.6R_16-3.1A_a4-8.params' # Output filename
#pf = '/home/jsmith48/scratch/roman_tz_train/train/rHCNOSFCl-4.8R_16-3.1A_a4-8.params' # Output filename
#pf = '/home/jsmith48/scratch/auto_dim_al/modeldim/rHCNO-5.2R_16-4.0A_a4-8.params' # Output filename
#pf = '/nh/nest/u/jsmith/Research/gutzwiller_research/train_test/rX-5.0A_16-3.2A_a4-8.params'
#pf = '/nh/nest/u/jsmith/Research/datasets/iso17/train/mol0/rHCO-5.0A_16-3.4A_a4-8.params'
#pf = '/nh/nest/u/jsmith/Research/gutzwiller_research/training-data/model_training/params/rX-2.8R_32-2.0A_a8-8.params'
#pf = '/nh/nest/u/jsmith/Research/train_qm7/train/rHCNOS-5.0R_16-3.4A_a8-8.params'
#pf = '/nh/nest/u/jsmith/scratch/Research/dipole_training/test_ani-1x/train/rHO-5.8R_32-3.5A_a4-8.params'
#pf = '/home/jujuman/Research/rHO-6.0R_32-6.0A_a4-8.params'
pf = '/home/jujuman/Research/tin_research/rSn-6.0R_32-4.5A_a8-8.params'
Nrr = 32 # Number of shifting radial functions
Na = 1 # Number of atom types
Nar = 8 # Number of shifting angular/radial parameters
Nzt = 8 # Number of angular shifting parameters
TM = 1
Rcr = 6.0 # radial cutoff
Rca = 4.5 # Angular cutoff
xs = 2.0
#Atyp = '[H,C,N,O,S,F,Cl]'
Atyp = '[Sn]'
EtaR = np.array([64.0]) # Radial eta parameters
EtaA = np.array([16.0]) # Angular/Radial eta parameters
Zeta = np.array([64.0]) # Angular zeta parameters
# ****************************************************
cmap = mpl.cm.brg
#graphexpcost(-2.0,2.0,400,2.0,plt,'red','test')
#plt.show()
#computecutoffdataset(0.0,Rc,1000,Rc,plt,'blue','cutoff function')
#plt.show()
fontsize = 18
#--------------------------------
# Main Program
# (Build Env Params File)
#--------------------------------
Nrt = Nrr * Na
ShfR = np.zeros(Nrr)
#Now instead of multiple etaR we use multiple shifts with a single large EtaR
for i in range(0,Nrr):
stepsize = (Rcr-xs) / float(Nrr)
step = i * stepsize + xs
color = i/float(Nrr)
computeradialdataset(0.0, Rcr, 1000, EtaR[0], Rcr,step, plt, cmap(color), '$R_s$ = '+ "{:.2f}".format(step))
ShfR[i] = step
plt.title('Radial environment functions (REF) \n' + r"${\eta}$ = " + "{:.2f}".format(EtaR[0]))
plt.ylabel('REF Output')
plt.xlabel('Angstroms')
plt.legend(bbox_to_anchor=(0.8, 0.98), loc=2, borderaxespad=0.)
font = {'family': 'Bitstream Vera Sans',
'weight': 'normal',
'size': fontsize}
plt.rc('font', **font)
plt.show()
#Uncomment for pretty contour plots of the radial environments using a sum and then max function
#show2dcontradialgraph(ShfR,EtaR,Rc,add,'Sum Radial Output')
#show2dcontradialgraph(ShfR,EtaR,Rcr,max,'Maximum radial function output')
ShfZ = np.zeros(Nzt)
Nat = Nar * (Na*(Na+1)/2) * Nzt
for i in range(0,Nzt):
stepsize = np.pi / float(Nzt)
step = i*stepsize+stepsize/2.0
color = i/float(Nrr)
computeangulardataset(-np.pi,np.pi,1000,Zeta[0],1.0,step,plt, cmap(color), r"${\theta}_s$ = " + "{:.2f}".format(step))
ShfZ[i] = step
#for i in range(0,Nzt):
# stepsize = (2.0 * np.pi) / (float(Nzt))
# step = i*stepsize
# stepp = 0
# if i is 0:
# stepp = 0.1
# else:
# stepp = 1
# color = i/float(Nrr)
# computeangulardataset(-np.pi,np.pi,1000,2.0,1.0,step,plt, cmap(color), r"${\lambda}$ = " + "{:.2f}".format(stepp))
# ShfZ[i] = step
plt.title('Modified Angular Environment Functions (AEF) \n' + r"${\zeta}$ = " + "{:.2f}".format(Zeta[0]))
# plt.title('Original Angular Environment Functions (OAEF) \n' + r"${\zeta}$ = " + "{:.2f}".format(2.0))
plt.ylabel('OAEF Output')
plt.xlabel('Radians')
plt.legend(bbox_to_anchor=(0.7, 0.95), loc=2, borderaxespad=0.)
font = {'family': 'Bitstream Vera Sans',
'weight': 'normal',
'size': fontsize}
plt.rc('font', **font)
plt.show()
ShfA = np.zeros(Nar)
for i in range(0,Nar):
stepsize = (Rca-xs) / float(Nar)
step = (i * stepsize + xs)
color = i/float(Nrr)
computeangularradialdataset(0.0, Rca, 1000, EtaA[0], Rca,step, plt, cmap(color), r"${R_s}$ = " + "{:.2f}".format(step))
ShfA[i] = step
plt.title('Angular (Only Radial) Environment Functions (AREF)')
plt.ylabel('AREF Output')
plt.xlabel('Angstroms')
plt.legend(bbox_to_anchor=(0.7, 0.95), loc=2, borderaxespad=0.)
font = {'family': 'Bitstream Vera Sans',
'weight': 'normal',
'size': fontsize}
plt.rc('font', **font)
plt.show()
#Uncomment for pretty contour plots of the angular environments using a sum and then max function
#show2dcontangulargraph(ShfA,ShfZ,EtaA[0],Zeta[0],Rca,add,'Sum Angular Output')
#show2dcontangulargraph(ShfA,ShfZ,EtaA[0],Zeta[0],Rca,max,'Maximum angular output')
Nt = Nat + Nrt
print('Total Environmental Vector Size: ',int(Nt))
# Open File
f = open(pf,'w')
#Write data to parameters file
f.write('TM = ' + str(TM) + '\n')
f.write('Rcr = ' + "{:.4e}".format(Rcr) + '\n')
f.write('Rca = ' + "{:.4e}".format(Rca) + '\n')
#f.write('EtaR = ' + "{:.4e}".format(EtaR) + '\n')
printdatatofile(f,'EtaR',EtaR,EtaR.shape[0])
printdatatofile(f,'ShfR',ShfR,Nrr)
#f.write('Zeta = ' + "{:.4e}".format(Zeta) + '\n')
printdatatofile(f,'Zeta',Zeta,Zeta.shape[0])
printdatatofile(f,'ShfZ',ShfZ,Nzt)
#f.write('EtaA = ' + "{:.4e}".format(EtaA1) + '\n')
printdatatofile(f,'EtaA',EtaA,EtaA.shape[0])
printdatatofile(f,'ShfA',ShfA,Nar)
f.write('Atyp = ' + Atyp + '\n')
f.close()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.arctan2",
"matplotlib.pyplot.rc",
"numpy.meshgrid",
"numpy.cos",
"matpl... | [((8074, 8090), 'numpy.array', 'np.array', (['[64.0]'], {}), '([64.0])\n', (8082, 8090), True, 'import numpy as np\n'), ((8122, 8138), 'numpy.array', 'np.array', (['[16.0]'], {}), '([16.0])\n', (8130, 8138), True, 'import numpy as np\n'), ((8178, 8194), 'numpy.array', 'np.array', (['[64.0]'], {}), '([64.0])\n', (8186, 8194), True, 'import numpy as np\n'), ((8594, 8607), 'numpy.zeros', 'np.zeros', (['Nrr'], {}), '(Nrr)\n', (8602, 8607), True, 'import numpy as np\n'), ((9029, 9053), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""REF Output"""'], {}), "('REF Output')\n", (9039, 9053), True, 'from matplotlib import pyplot as plt\n'), ((9054, 9077), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angstroms"""'], {}), "('Angstroms')\n", (9064, 9077), True, 'from matplotlib import pyplot as plt\n'), ((9078, 9142), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.8, 0.98)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(0.8, 0.98), loc=2, borderaxespad=0.0)\n', (9088, 9142), True, 'from matplotlib import pyplot as plt\n'), ((9237, 9259), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (9243, 9259), True, 'from matplotlib import pyplot as plt\n'), ((9260, 9270), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9268, 9270), True, 'from matplotlib import pyplot as plt\n'), ((9513, 9526), 'numpy.zeros', 'np.zeros', (['Nzt'], {}), '(Nzt)\n', (9521, 9526), True, 'import numpy as np\n'), ((10373, 10398), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""OAEF Output"""'], {}), "('OAEF Output')\n", (10383, 10398), True, 'from matplotlib import pyplot as plt\n'), ((10399, 10420), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Radians"""'], {}), "('Radians')\n", (10409, 10420), True, 'from matplotlib import pyplot as plt\n'), ((10421, 10485), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.7, 0.95)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(0.7, 0.95), loc=2, borderaxespad=0.0)\n', (10431, 10485), True, 'from matplotlib import pyplot as plt\n'), ((10580, 10602), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (10586, 10602), True, 'from matplotlib import pyplot as plt\n'), ((10603, 10613), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10611, 10613), True, 'from matplotlib import pyplot as plt\n'), ((10623, 10636), 'numpy.zeros', 'np.zeros', (['Nar'], {}), '(Nar)\n', (10631, 10636), True, 'import numpy as np\n'), ((10898, 10961), 'matplotlib.pyplot.title', 'plt.title', (['"""Angular (Only Radial) Environment Functions (AREF)"""'], {}), "('Angular (Only Radial) Environment Functions (AREF)')\n", (10907, 10961), True, 'from matplotlib import pyplot as plt\n'), ((10962, 10987), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""AREF Output"""'], {}), "('AREF Output')\n", (10972, 10987), True, 'from matplotlib import pyplot as plt\n'), ((10988, 11011), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angstroms"""'], {}), "('Angstroms')\n", (10998, 11011), True, 'from matplotlib import pyplot as plt\n'), ((11012, 11076), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.7, 0.95)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(0.7, 0.95), loc=2, borderaxespad=0.0)\n', (11022, 11076), True, 'from matplotlib import pyplot as plt\n'), ((11171, 11193), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (11177, 11193), True, 'from matplotlib import pyplot as plt\n'), ((11194, 11204), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11202, 11204), True, 'from matplotlib import pyplot as plt\n'), ((1679, 1718), 'numpy.linspace', 'np.linspace', (['x1', 'x2', 'pts'], {'endpoint': '(True)'}), '(x1, x2, pts, endpoint=True)\n', (1690, 1718), True, 'import numpy as np\n'), ((1747, 1802), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'F'], {'label': 'slabel', 'color': 'scolor', 'linewidth': '(2)'}), '(X, F, label=slabel, color=scolor, linewidth=2)\n', (1755, 1802), True, 'from matplotlib import pyplot as plt\n'), ((2012, 2051), 'numpy.linspace', 'np.linspace', (['x1', 'x2', 'pts'], {'endpoint': '(True)'}), '(x1, x2, pts, endpoint=True)\n', (2023, 2051), True, 'import numpy as np\n'), ((2095, 2150), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'F'], {'label': 'slabel', 'color': 'scolor', 'linewidth': '(2)'}), '(X, F, label=slabel, color=scolor, linewidth=2)\n', (2103, 2150), True, 'from matplotlib import pyplot as plt\n'), ((2366, 2405), 'numpy.linspace', 'np.linspace', (['x1', 'x2', 'pts'], {'endpoint': '(True)'}), '(x1, x2, pts, endpoint=True)\n', (2377, 2405), True, 'import numpy as np\n'), ((2459, 2514), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'F'], {'label': 'slabel', 'color': 'scolor', 'linewidth': '(2)'}), '(X, F, label=slabel, color=scolor, linewidth=2)\n', (2467, 2514), True, 'from matplotlib import pyplot as plt\n'), ((2728, 2767), 'numpy.linspace', 'np.linspace', (['t1', 't2', 'pts'], {'endpoint': '(True)'}), '(t1, t2, pts, endpoint=True)\n', (2739, 2767), True, 'import numpy as np\n'), ((2811, 2866), 'matplotlib.pyplot.plot', 'plt.plot', (['T', 'F'], {'label': 'slabel', 'color': 'scolor', 'linewidth': '(2)'}), '(T, F, label=slabel, color=scolor, linewidth=2)\n', (2819, 2866), True, 'from matplotlib import pyplot as plt\n'), ((3175, 3214), 'numpy.linspace', 'np.linspace', (['t1', 't2', 'pts'], {'endpoint': '(True)'}), '(t1, t2, pts, endpoint=True)\n', (3186, 3214), True, 'import numpy as np\n'), ((3242, 3296), 'matplotlib.pyplot.plot', 'plt.plot', (['T', 'F'], {'label': 'slabel', 'color': '"""red"""', 'linewidth': '(2)'}), "(T, F, label=slabel, color='red', linewidth=2)\n", (3250, 3296), True, 'from matplotlib import pyplot as plt\n'), ((3325, 3381), 'matplotlib.pyplot.plot', 'plt.plot', (['T', 'F'], {'label': 'slabel', 'color': '"""green"""', 'linewidth': '(2)'}), "(T, F, label=slabel, color='green', linewidth=2)\n", (3333, 3381), True, 'from matplotlib import pyplot as plt\n'), ((3406, 3461), 'matplotlib.pyplot.plot', 'plt.plot', (['T', 'G'], {'label': 'slabel', 'color': '"""blue"""', 'linewidth': '(2)'}), "(T, G, label=slabel, color='blue', linewidth=2)\n", (3414, 3461), True, 'from matplotlib import pyplot as plt\n'), ((4314, 4338), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4321, 4338), True, 'import numpy as np\n'), ((4343, 4359), 'numpy.arctan2', 'np.arctan2', (['x', 'y'], {}), '(x, y)\n', (4353, 4359), True, 'import numpy as np\n'), ((4368, 4379), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4376, 4379), True, 'import numpy as np\n'), ((5019, 5038), 'numpy.meshgrid', 'np.meshgrid', (['xi', 'yi'], {}), '(xi, yi)\n', (5030, 5038), True, 'import numpy as np\n'), ((5239, 5255), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (5248, 5255), True, 'from matplotlib import pyplot as plt\n'), ((5260, 5291), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance ($\\\\AA$)"""'], {}), "('Distance ($\\\\AA$)')\n", (5270, 5291), True, 'from matplotlib import pyplot as plt\n'), ((5295, 5326), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance ($\\\\AA$)"""'], {}), "('Distance ($\\\\AA$)')\n", (5305, 5326), True, 'from matplotlib import pyplot as plt\n'), ((5432, 5454), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (5438, 5454), True, 'from matplotlib import pyplot as plt\n'), ((5460, 5474), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5472, 5474), True, 'from matplotlib import pyplot as plt\n'), ((5479, 5489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5487, 5489), True, 'from matplotlib import pyplot as plt\n'), ((5796, 5820), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (5803, 5820), True, 'import numpy as np\n'), ((5825, 5841), 'numpy.arctan2', 'np.arctan2', (['x', 'y'], {}), '(x, y)\n', (5835, 5841), True, 'import numpy as np\n'), ((5850, 5861), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (5858, 5861), True, 'import numpy as np\n'), ((6206, 6225), 'numpy.meshgrid', 'np.meshgrid', (['xi', 'yi'], {}), '(xi, yi)\n', (6217, 6225), True, 'import numpy as np\n'), ((6527, 6549), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (6533, 6549), True, 'from matplotlib import pyplot as plt\n'), ((6555, 6571), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6564, 6571), True, 'from matplotlib import pyplot as plt\n'), ((6576, 6607), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distance ($\\\\AA$)"""'], {}), "('Distance ($\\\\AA$)')\n", (6586, 6607), True, 'from matplotlib import pyplot as plt\n'), ((6611, 6642), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Distance ($\\\\AA$)"""'], {}), "('Distance ($\\\\AA$)')\n", (6621, 6642), True, 'from matplotlib import pyplot as plt\n'), ((6647, 6661), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6659, 6661), True, 'from matplotlib import pyplot as plt\n'), ((6666, 6676), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6674, 6676), True, 'from matplotlib import pyplot as plt\n'), ((671, 701), 'numpy.exp', 'np.exp', (['(-eta * (X - Rs) ** 2.0)'], {}), '(-eta * (X - Rs) ** 2.0)\n', (677, 701), True, 'import numpy as np\n'), ((1141, 1181), 'numpy.exp', 'np.exp', (['(-eta * ((X + Y) / 2.0 - Rs) ** 2)'], {}), '(-eta * ((X + Y) / 2.0 - Rs) ** 2)\n', (1147, 1181), True, 'import numpy as np\n'), ((3043, 3062), 'numpy.exp', 'np.exp', (['(X * X / tau)'], {}), '(X * X / tau)\n', (3049, 3062), True, 'import numpy as np\n'), ((474, 497), 'numpy.cos', 'np.cos', (['(np.pi * Xt / Rc)'], {}), '(np.pi * Xt / Rc)\n', (480, 497), True, 'import numpy as np\n'), ((908, 938), 'numpy.exp', 'np.exp', (['(-eta * (X - Rs) ** 2.0)'], {}), '(-eta * (X - Rs) ** 2.0)\n', (914, 938), True, 'import numpy as np\n'), ((4260, 4284), 'numpy.random.random', 'np.random.random', (['(2, N)'], {}), '((2, N))\n', (4276, 4284), True, 'import numpy as np\n'), ((5743, 5767), 'numpy.random.random', 'np.random.random', (['(2, N)'], {}), '((2, N))\n', (5759, 5767), True, 'import numpy as np\n'), ((1443, 1457), 'numpy.cos', 'np.cos', (['(T - Ts)'], {}), '(T - Ts)\n', (1449, 1457), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.runtime import Core, Model, Tensor, PartialShape, Type
from openvino.runtime import opset8 as opset
from openvino.runtime.op import Constant, Parameter, tensor_iterator
from openvino.runtime.passes import Manager
from openvino.runtime.utils.types import get_dtype
import openvino as ov
import numpy as np
import sys
import os, errno
import struct
import argparse
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
class Colors:
""" ANSI color codes """
BLACK = "\033[0;30m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
BROWN = "\033[0;33m"
BLUE = "\033[0;34m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
LIGHT_GRAY = "\033[0;37m"
DARK_GRAY = "\033[1;30m"
LIGHT_RED = "\033[1;31m"
LIGHT_GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
LIGHT_BLUE = "\033[1;34m"
LIGHT_PURPLE = "\033[1;35m"
LIGHT_CYAN = "\033[1;36m"
LIGHT_WHITE = "\033[1;37m"
BOLD = "\033[1m"
FAINT = "\033[2m"
ITALIC = "\033[3m"
UNDERLINE = "\033[4m"
BLINK = "\033[5m"
NEGATIVE = "\033[7m"
CROSSED = "\033[9m"
END = "\033[0m"
def mkdirp(d):
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def fill_tensors_with_random(input):
dtype = get_dtype(input.get_element_type())
rand_min, rand_max = (0, 1) if dtype == np.bool else (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max)
# np.random.uniform excludes high: add 1 to have it generated
if np.dtype(dtype).kind in ['i', 'u', 'b']:
rand_max += 1
rs = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(0)))
shape = input.get_shape()
a = rs.uniform(rand_min, rand_max, list(shape)).astype(dtype)
return Tensor(a)
def fill_tensors_from_image(input, input_file):
dtype = get_dtype(input.get_element_type())
shape = input.get_shape()
data = np.load(input_file, allow_pickle=True)
for itm in data.files:
print(itm)
print(data[itm])
return Tensor(data[data.files[0]].astype(dtype).reshape(shape))
class IEB:
precision_table = {
10:(np.float32, 4),
40:(np.uint8, 1),
50:(np.int8, 1),
70:(np.int32, 4),
74:(np.uint32, 4),
72:(np.int64, 8),
73:(np.uint64, 8)
}
@classmethod
def dump(cls, ieb_file, nparray):
# b'IEB0', 256, 10, 4, 1, 32, 1104, 1104, 0, 0, 0, 255, 0, 0, 0, 72, 156008448, 0, 0
fmt = "@4sHBB7IB3BLLLL"
magic, ver = b'IEB0', 256
precision = -1
for k,v in IEB.precision_table.items():
if (v[0] == nparray.dtype):
precision = k
assert(precision >= 0)
ndims = len(nparray.shape)
dims = [0 for _ in range(7)]
for i, s in enumerate(nparray.shape):
dims[i] = s
scaling_axis = 255
reserved = [0,0,0]
data_offset = struct.calcsize(fmt)
data_size = np.prod(nparray.shape) * nparray.itemsize
scaling_data_offset = 0
scaling_data_size = 0
header = struct.pack(fmt, magic, ver, precision, ndims,
dims[0], dims[1], dims[2], dims[3], dims[4], dims[5], dims[6],
scaling_axis, reserved[0], reserved[1], reserved[2],
data_offset, data_size, scaling_data_offset, scaling_data_size)
with open(ieb_file,"wb") as f:
f.write(header)
f.write(nparray.tobytes())
return
def __init__(self, ieb_file) -> None:
with open(ieb_file,"rb") as f:
data = f.read() # bytes
header = struct.unpack_from("@4sHBB7IB3BLLLL", data, offset=0)
# print(header, len(header))
(self.magic, self.ver, self.precision, self.ndims,
self.dims0, self.dims1, self.dims2, self.dims3, self.dims4, self.dims5, self.dims6,
self.scaling_axis,
self.reserved0, self.reserved1, self.reserved2,
self.data_offset, self.data_size, self.scaling_data_offset, self.scaling_data_size) = header
(dtype, type_size, ) = IEB.precision_table[self.precision]
count = self.data_size//type_size
# recover the data as numpy array
self.dims = np.array([self.dims0, self.dims1, self.dims2, self.dims3, self.dims4, self.dims5, self.dims6])
self.dims = self.dims[0:self.ndims]
self.value = np.frombuffer(data, dtype = dtype, count=count, offset=self.data_offset)
self.value = np.reshape(self.value, self.dims)
# self.values = struct.unpack_from(f"@{count}{stype}", data, offset=self.data_offset)
# print(self.values.shape, self.values.dtype)
pass
class DumpIndex:
def __init__(self, args) -> None:
(self.ExecIndex, self.Name, self.OriginalLayers, self.tag, self.itag, self.ieb_file) = args
def dump_tensors(core, model, dump_dir = "./cpu_dump", dump_ports="OUT", device_target="CPU"):
os.environ["OV_CPU_BLOB_DUMP_DIR"] = dump_dir
os.environ["OV_CPU_BLOB_DUMP_FORMAT"] = "BIN"
os.environ["OV_CPU_BLOB_DUMP_NODE_PORTS"] = dump_ports
mkdirp(dump_dir)
device_config = {"PERF_COUNT": "NO",
"AFFINITY": "CORE",
"PERFORMANCE_HINT_NUM_REQUESTS":0,
"PERFORMANCE_HINT":"",
"INFERENCE_PRECISION_HINT": "f32",
"NUM_STREAMS":1,
"INFERENCE_NUM_THREADS":1}
print("compiling model with {}".format(device_config))
exec_net = core.compile_model(model, device_target, device_config)
req = exec_net.create_infer_request()
print("fill input with random data:")
inputs={}
for i in exec_net.inputs:
inputs[i] = fill_tensors_with_random(i)
print(f" {i}")
print("infer with dump..")
result = req.infer(inputs)
# dump result as ieb, so even no dump_ports, you can still know
# final correctness
print("Dump result as ieb...")
result_exec_id = 999900
for out, value in result.items():
names = [name.replace(":","_").replace("/","_") for name in out.names]
names.sort()
ieb_name = os.path.join(dump_dir, "#{}_{}.ieb".format(result_exec_id, "~".join(names)))
print(" {}..".format(ieb_name))
IEB.dump(ieb_name, value)
result_exec_id += 1
runtime_func = exec_net.get_runtime_model()
base_name = dump_dir.split('/')
base_name = base_name[-1].split('\\')
xml_path = f"{base_name[-1]}.xml"
bin_path = f"{base_name[-1]}.bin"
pass_manager = Manager()
pass_manager.register_pass("Serialize", xml_path=xml_path, bin_path=bin_path)
pass_manager.run_passes(runtime_func)
print(f"{device_target} Runtime model (exec_graph) is serialized to {xml_path}.")
def visualize_diff_abs(diff_abs):
vis_abs = diff_abs
cur_shape = diff_abs.shape
if len(vis_abs.shape) > 3:
vis_abs = vis_abs.reshape(-1,cur_shape[-2],cur_shape[-1])
fig, ax = plt.subplots()
# first channel with diff
for cur_channel in range(0, vis_abs.shape[0]):
diff_img = vis_abs[cur_channel,:,:]
if np.amax(diff_img) > 1e-8:
break
im = ax.imshow(vis_abs[cur_channel,:,:])
def update_channel(val):
nonlocal cur_channel
val = int(val)
cur_channel = val
diff_img = vis_abs[val,:,:]
max_diff = np.amax(diff_img)
ax.set_title(" channel:{} shape:{} Max diff: {:.8f}".format(
val, diff_img.shape, np.amax(diff_img)))
# normalize intensity
im.set_data(diff_img * 255 / max_diff)
fig.canvas.draw_idle()
update_channel(cur_channel)
ax_ch_slider = plt.axes([0.1, 0.25, 0.0225, 0.63])
ch_slider = Slider(
ax=ax_ch_slider,
label="Channels",
valmin=0,
valmax=vis_abs.shape[0],
valinit=0,
valstep=1,
orientation="vertical"
)
ch_slider.on_changed(update_channel)
def on_press(event):
# print('press', event.key, 'cur_channel', cur_channel)
sys.stdout.flush()
if event.key == 'escape':
print("escape key detected, exit.")
sys.exit(1)
if event.key == 'up':
for c in range(cur_channel+1, vis_abs.shape[0]):
diff_img = vis_abs[c,:,:]
if np.amax(diff_img) > 1e-8:
ch_slider.set_val(c)
break
if event.key == 'down':
for c in range(cur_channel-1, -1, -1):
diff_img = vis_abs[c,:,:]
if np.amax(diff_img) > 1e-8:
ch_slider.set_val(c)
break
fig.canvas.mpl_connect('key_press_event', on_press)
plt.show()
def compare_dumps(model, atol, rtol, visualize, dump_dir1, dump_dir2):
output_tensors = []
for out in model.outputs:
for oname in out.get_names():
output_tensors.append(oname.split(":")[0])
def is_output(name):
for tag in output_tensors:
if tag in name:
return True
return False
def get_sorted_ied_list(dir):
iebs = []
for file_name in os.listdir(dir):
if file_name.endswith(".ieb"):
k = file_name.find("_")
id = int(file_name[1:k])
name = file_name[k:]
iebs.append((id, name, file_name))
return sorted(iebs, key=lambda item:item[0])
ieb_list1 = get_sorted_ied_list(dump_dir1)
ieb_list2 = get_sorted_ied_list(dump_dir2)
def get_match_ieb_file2(f1):
for f2 in ieb_list2:
if f1[1] == f2[1]:
return f2
return None
MAX_atol = {}
for f1 in ieb_list1:
f2 = get_match_ieb_file2(f1)
if not f2:
print("{}[ SKIPPED ]: not found {} in {} {}".format(Colors.YELLOW, f1[-1], dump_dir2, Colors.END))
continue
ieb_file1 = f1[-1]
ieb_file2 = f2[-1]
# compare
ieb1 = IEB(os.path.join(dump_dir1, ieb_file1))
ieb2 = IEB(os.path.join(dump_dir2, ieb_file2))
if "Input_Constant" in ieb_file1 and "Input_Constant" in ieb_file2:
print("Skipped Input_Constant {ieb_file1} vs {ieb_file2}")
continue
if not np.allclose(ieb1.value, ieb2.value, atol=atol, rtol=rtol):
diff_abs = np.abs(ieb1.value.astype('float32') - ieb2.value.astype('float32'))
thresh = atol + rtol * np.abs(ieb2.value)
idx = np.where(diff_abs >= thresh)
atol_max = np.amax(diff_abs[idx])
if ieb1.value.dtype in MAX_atol:
if MAX_atol[ieb1.value.dtype] < atol_max:
MAX_atol[ieb1.value.dtype] = atol_max
else:
MAX_atol[ieb1.value.dtype] = 0
prefixERR = Colors.RED
if is_output(f1[-1]):
prefixERR += Colors.UNDERLINE
print("{}[ FAILED ]: {} {} {}".format(prefixERR, f1[-1], f2[-1], Colors.END))
info = ""
if (np.prod(diff_abs.shape) < 8):
info = "{} vs {}".format(ieb1.value.reshape(-1), ieb2.value.reshape(-1))
max_abs = np.amax(diff_abs[idx])
max_idx = np.where(diff_abs[idx] >= max_abs)
max_org = np.abs(ieb2.value)[idx][max_idx]
print(" {} {} ({:.2e} ~ {:.2e}/{:.2e}={:.2e}) @ mean:{:.2e} std:{:.2e} detail: {}".format(
diff_abs.shape, diff_abs.dtype,
np.amin(diff_abs[idx]), max_abs,
max_org[0], max_abs / (max_org[0] + 0.000001),
np.mean(diff_abs[idx]), np.std(diff_abs[idx]), info))
if (visualize):
visualize_diff_abs(diff_abs)
else:
print("{}[ OK ]: {} {} {}".format(Colors.GREEN, f1[-1], f2[-1], Colors.END))
pass
print("============================================")
if (len(MAX_atol) == 0):
print("Pass")
else:
for prec in MAX_atol:
print("Max atol {} : {}".format(prec, MAX_atol[prec]))
def compare_dump_file(ieb_file1, ieb_file2, visualize):
ieb1 = IEB(ieb_file1)
ieb2 = IEB(ieb_file2)
if ieb1.value.shape != ieb2.value.shape :
print(" Shape mismatch {} != {} , will compare in flatten.".format(ieb1.value.shape, ieb2.value.shape))
diff_abs = np.abs(ieb1.value.reshape(-1) - ieb2.value.reshape(-1))
else:
diff_abs = np.abs(ieb1.value - ieb2.value)
max_abs = np.amax(diff_abs)
max_idx = np.where(diff_abs >= max_abs)
max_org = np.abs(ieb2.value)[max_idx]
print(" {} {} ({:.2e} ~ {:.2e}/{:.2e}={:.2e}) @ mean:{:.2e} std:{:.2e} ".format(
diff_abs.shape, diff_abs.dtype,
np.amin(diff_abs), max_abs,
max_org[0], max_abs / (max_org[0] + 0.00001),
np.mean(diff_abs), np.std(diff_abs)))
if (visualize):
visualize_diff_abs(diff_abs)
def main():
parser = argparse.ArgumentParser("cpu_cross_check")
parser.add_argument("-m", type=str, default="", required=True, help="Model file path")
parser.add_argument("-atol", type=float, default=1e-8, help="absolute error")
parser.add_argument("-rtol", type=float, default=1e-4, help="relative error")
parser.add_argument("-v", action="store_true", help="visualize error")
parser.add_argument("-p", "--ports", type=str, default="OUT", help="dump ports: OUT | ALL")
parser.add_argument("dumps", type=str, default="", nargs="+", help="dump folders or files")
args = parser.parse_args()
print(f"Read model {args.m}...")
core = Core()
model = core.read_model(args.m)
if len(args.dumps) == 1:
dump_tensors(core, model, args.dumps[0], args.ports)
else:
assert(len(args.dumps) == 2)
if (os.path.isdir(args.dumps[0])):
compare_dumps(model, args.atol, args.rtol, args.v, args.dumps[0], args.dumps[1])
else:
compare_dump_file(args.dumps[0], args.dumps[1], args.v)
if __name__ == "__main__":
main()
| [
"struct.calcsize",
"numpy.prod",
"numpy.iinfo",
"numpy.array",
"sys.exit",
"matplotlib.widgets.Slider",
"struct.unpack_from",
"numpy.mean",
"os.listdir",
"numpy.reshape",
"argparse.ArgumentParser",
"numpy.where",
"os.path.isdir",
"numpy.frombuffer",
"sys.stdout.flush",
"numpy.dtype",
... | [((1864, 1873), 'openvino.runtime.Tensor', 'Tensor', (['a'], {}), '(a)\n', (1870, 1873), False, 'from openvino.runtime import Core, Model, Tensor, PartialShape, Type\n'), ((2013, 2051), 'numpy.load', 'np.load', (['input_file'], {'allow_pickle': '(True)'}), '(input_file, allow_pickle=True)\n', (2020, 2051), True, 'import numpy as np\n'), ((6746, 6755), 'openvino.runtime.passes.Manager', 'Manager', ([], {}), '()\n', (6753, 6755), False, 'from openvino.runtime.passes import Manager\n'), ((7177, 7191), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7189, 7191), True, 'import matplotlib.pyplot as plt\n'), ((7897, 7932), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.1, 0.25, 0.0225, 0.63]'], {}), '([0.1, 0.25, 0.0225, 0.63])\n', (7905, 7932), True, 'import matplotlib.pyplot as plt\n'), ((7949, 8075), 'matplotlib.widgets.Slider', 'Slider', ([], {'ax': 'ax_ch_slider', 'label': '"""Channels"""', 'valmin': '(0)', 'valmax': 'vis_abs.shape[0]', 'valinit': '(0)', 'valstep': '(1)', 'orientation': '"""vertical"""'}), "(ax=ax_ch_slider, label='Channels', valmin=0, valmax=vis_abs.shape[0],\n valinit=0, valstep=1, orientation='vertical')\n", (7955, 8075), False, 'from matplotlib.widgets import Slider, Button\n'), ((8942, 8952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8950, 8952), True, 'import matplotlib.pyplot as plt\n'), ((12764, 12781), 'numpy.amax', 'np.amax', (['diff_abs'], {}), '(diff_abs)\n', (12771, 12781), True, 'import numpy as np\n'), ((12796, 12825), 'numpy.where', 'np.where', (['(diff_abs >= max_abs)'], {}), '(diff_abs >= max_abs)\n', (12804, 12825), True, 'import numpy as np\n'), ((13232, 13274), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""cpu_cross_check"""'], {}), "('cpu_cross_check')\n", (13255, 13274), False, 'import argparse\n'), ((13877, 13883), 'openvino.runtime.Core', 'Core', ([], {}), '()\n', (13881, 13883), False, 'from openvino.runtime import Core, Model, Tensor, PartialShape, Type\n'), ((1258, 1272), 'os.makedirs', 'os.makedirs', (['d'], {}), '(d)\n', (1269, 1272), False, 'import os, errno\n'), ((3043, 3063), 'struct.calcsize', 'struct.calcsize', (['fmt'], {}), '(fmt)\n', (3058, 3063), False, 'import struct\n'), ((3205, 3444), 'struct.pack', 'struct.pack', (['fmt', 'magic', 'ver', 'precision', 'ndims', 'dims[0]', 'dims[1]', 'dims[2]', 'dims[3]', 'dims[4]', 'dims[5]', 'dims[6]', 'scaling_axis', 'reserved[0]', 'reserved[1]', 'reserved[2]', 'data_offset', 'data_size', 'scaling_data_offset', 'scaling_data_size'], {}), '(fmt, magic, ver, precision, ndims, dims[0], dims[1], dims[2],\n dims[3], dims[4], dims[5], dims[6], scaling_axis, reserved[0], reserved\n [1], reserved[2], data_offset, data_size, scaling_data_offset,\n scaling_data_size)\n', (3216, 3444), False, 'import struct\n'), ((7582, 7599), 'numpy.amax', 'np.amax', (['diff_img'], {}), '(diff_img)\n', (7589, 7599), True, 'import numpy as np\n'), ((8274, 8292), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8290, 8292), False, 'import sys\n'), ((9389, 9404), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (9399, 9404), False, 'import os, errno\n'), ((12717, 12748), 'numpy.abs', 'np.abs', (['(ieb1.value - ieb2.value)'], {}), '(ieb1.value - ieb2.value)\n', (12723, 12748), True, 'import numpy as np\n'), ((12840, 12858), 'numpy.abs', 'np.abs', (['ieb2.value'], {}), '(ieb2.value)\n', (12846, 12858), True, 'import numpy as np\n'), ((14070, 14098), 'os.path.isdir', 'os.path.isdir', (['args.dumps[0]'], {}), '(args.dumps[0])\n', (14083, 14098), False, 'import os, errno\n'), ((1617, 1632), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (1625, 1632), True, 'import numpy as np\n'), ((1729, 1754), 'numpy.random.SeedSequence', 'np.random.SeedSequence', (['(0)'], {}), '(0)\n', (1751, 1754), True, 'import numpy as np\n'), ((3084, 3106), 'numpy.prod', 'np.prod', (['nparray.shape'], {}), '(nparray.shape)\n', (3091, 3106), True, 'import numpy as np\n'), ((3782, 3835), 'struct.unpack_from', 'struct.unpack_from', (['"""@4sHBB7IB3BLLLL"""', 'data'], {'offset': '(0)'}), "('@4sHBB7IB3BLLLL', data, offset=0)\n", (3800, 3835), False, 'import struct\n'), ((4433, 4532), 'numpy.array', 'np.array', (['[self.dims0, self.dims1, self.dims2, self.dims3, self.dims4, self.dims5,\n self.dims6]'], {}), '([self.dims0, self.dims1, self.dims2, self.dims3, self.dims4, self.\n dims5, self.dims6])\n', (4441, 4532), True, 'import numpy as np\n'), ((4601, 4671), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'dtype', 'count': 'count', 'offset': 'self.data_offset'}), '(data, dtype=dtype, count=count, offset=self.data_offset)\n', (4614, 4671), True, 'import numpy as np\n'), ((4699, 4732), 'numpy.reshape', 'np.reshape', (['self.value', 'self.dims'], {}), '(self.value, self.dims)\n', (4709, 4732), True, 'import numpy as np\n'), ((7329, 7346), 'numpy.amax', 'np.amax', (['diff_img'], {}), '(diff_img)\n', (7336, 7346), True, 'import numpy as np\n'), ((8387, 8398), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8395, 8398), False, 'import sys\n'), ((10242, 10276), 'os.path.join', 'os.path.join', (['dump_dir1', 'ieb_file1'], {}), '(dump_dir1, ieb_file1)\n', (10254, 10276), False, 'import os, errno\n'), ((10297, 10331), 'os.path.join', 'os.path.join', (['dump_dir2', 'ieb_file2'], {}), '(dump_dir2, ieb_file2)\n', (10309, 10331), False, 'import os, errno\n'), ((10518, 10575), 'numpy.allclose', 'np.allclose', (['ieb1.value', 'ieb2.value'], {'atol': 'atol', 'rtol': 'rtol'}), '(ieb1.value, ieb2.value, atol=atol, rtol=rtol)\n', (10529, 10575), True, 'import numpy as np\n'), ((10740, 10768), 'numpy.where', 'np.where', (['(diff_abs >= thresh)'], {}), '(diff_abs >= thresh)\n', (10748, 10768), True, 'import numpy as np\n'), ((10792, 10814), 'numpy.amax', 'np.amax', (['diff_abs[idx]'], {}), '(diff_abs[idx])\n', (10799, 10814), True, 'import numpy as np\n'), ((11442, 11464), 'numpy.amax', 'np.amax', (['diff_abs[idx]'], {}), '(diff_abs[idx])\n', (11449, 11464), True, 'import numpy as np\n'), ((11487, 11521), 'numpy.where', 'np.where', (['(diff_abs[idx] >= max_abs)'], {}), '(diff_abs[idx] >= max_abs)\n', (11495, 11521), True, 'import numpy as np\n'), ((13012, 13029), 'numpy.amin', 'np.amin', (['diff_abs'], {}), '(diff_abs)\n', (13019, 13029), True, 'import numpy as np\n'), ((13110, 13127), 'numpy.mean', 'np.mean', (['diff_abs'], {}), '(diff_abs)\n', (13117, 13127), True, 'import numpy as np\n'), ((13129, 13145), 'numpy.std', 'np.std', (['diff_abs'], {}), '(diff_abs)\n', (13135, 13145), True, 'import numpy as np\n'), ((1496, 1514), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (1504, 1514), True, 'import numpy as np\n'), ((1520, 1538), 'numpy.iinfo', 'np.iinfo', (['np.uint8'], {}), '(np.uint8)\n', (1528, 1538), True, 'import numpy as np\n'), ((7716, 7733), 'numpy.amax', 'np.amax', (['diff_img'], {}), '(diff_img)\n', (7723, 7733), True, 'import numpy as np\n'), ((11288, 11311), 'numpy.prod', 'np.prod', (['diff_abs.shape'], {}), '(diff_abs.shape)\n', (11295, 11311), True, 'import numpy as np\n'), ((8551, 8568), 'numpy.amax', 'np.amax', (['diff_img'], {}), '(diff_img)\n', (8558, 8568), True, 'import numpy as np\n'), ((8788, 8805), 'numpy.amax', 'np.amax', (['diff_img'], {}), '(diff_img)\n', (8795, 8805), True, 'import numpy as np\n'), ((10703, 10721), 'numpy.abs', 'np.abs', (['ieb2.value'], {}), '(ieb2.value)\n', (10709, 10721), True, 'import numpy as np\n'), ((11544, 11562), 'numpy.abs', 'np.abs', (['ieb2.value'], {}), '(ieb2.value)\n', (11550, 11562), True, 'import numpy as np\n'), ((11755, 11777), 'numpy.amin', 'np.amin', (['diff_abs[idx]'], {}), '(diff_abs[idx])\n', (11762, 11777), True, 'import numpy as np\n'), ((11875, 11897), 'numpy.mean', 'np.mean', (['diff_abs[idx]'], {}), '(diff_abs[idx])\n', (11882, 11897), True, 'import numpy as np\n'), ((11899, 11920), 'numpy.std', 'np.std', (['diff_abs[idx]'], {}), '(diff_abs[idx])\n', (11905, 11920), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from math import factorial
from astar import astar, create_grid, Node
from dijkstras import shortest_path_mat
from scipy import ndimage
def plot_fr_field_2d(f,delay=None):
for i in range(len(f)):
maxval = np.max(np.max(f[i]))
if maxval > 0:
plt.title("Neuron %d" % i)
plt.imshow(f[i],origin='lower')
if delay:
plt.show(block=False) ; plt.pause(delay)
else:
#plt.show()
plt.savefig('occfr%d.png' % i)
plt.close()
def plot_fr_field_1d(f,delay=None):
for i in range(len(f)):
maxval = np.max(f[i])
if maxval > 0:
plt.title("Neuron %d" % i)
plt.plot(f[i]/maxval)
if delay:
plt.show(block=False) ; plt.pause(delay) ; plt.cla()
else:
plt.show()
plt.close()
def find_closest(A, targets):
inds = np.clip(A.searchsorted(targets), 1, len(A)-1)
left = A[inds-1]
right = A[inds]
return inds-(targets-left < right-targets)
def sum_neighbours(M,i,j):
(h,w) = M.shape
val = 0
if i > 0: val += M[j,i-1]
if i < w-1: val += M[j,i+1]
if j > 0: val += M[j-1,i]
if j < h-1: val += M[j+1,i]
return val
def matmax(M):
(h,w) = M.shape
maxval = -np.inf
maxidx = [-1,-1]
for j in range(h):
for i in range(w):
if M[j,i] > maxval:
maxval = M[j,i]
maxidx = np.array([j,i])
return maxval, maxidx
def vecmax(vec):
return np.max(vec), np.argmax(vec)
class Decoder:
def __init__(self,pos,spk,spatial_bin_length,lin_point=None):
self.pos = pos
self.spk = spk
# discretisation parameters
#self.map_dimensions = np.array([17,31])
#map_size = np.array([max(self.pos[:,2]),max(self.pos[:,1])])
#self.spatial_bin_size = map_size/(self.map_dimensions-1)
self.spatial_bin_length = spatial_bin_length
self.spatial_bin_size = np.array([spatial_bin_length, self.spatial_bin_length])
map_size = np.array([max(self.pos[:,2]),max(self.pos[:,1])])
self.map_dimensions = np.ceil((map_size/self.spatial_bin_size)+1).astype(int)
print(self.map_dimensions)
# calculate prior and determine tranversable areas of map
self.p_x = self.occ_mat(self.pos,1) # prior probability (occupancy normalised to probability)
posmask = self.p_x > 0 # ah pos-bin that was accessed marked accessible
self.accmask = np.array(
[[posmask[j,i] or sum_neighbours(posmask,i,j)>2 for i in range(posmask.shape[1])] for j in range(posmask.shape[0])]
)
# only consider the biggest island (to prevent invalid path in shortest path calculation)
labeled_array, num_features = ndimage.label(self.accmask, np.ones((3,3)))
label_counts = np.array([np.sum(labeled_array==i) for i in range(1,num_features+1)])
self.accmask = (labeled_array==np.argmax(label_counts)+1).astype(int)
#plt.imshow(self.accmask)
#plt.show()
# convert spatial inform to 1D if linearisation function has been provided
if lin_point is not None:
self.dist1d = np.round(shortest_path_mat(self.accmask,lin_point))#.astype(int)
self.lim1d = np.nanmax(self.dist1d[np.isfinite(self.dist1d)]).astype(int)+1
self.p_x1d = self.occ_vec(self.pos,1) # prior probability (occupancy normalised to probability)
'''
for r in range(self.dist1d.shape[0]):
for c in range(self.dist1d.shape[1]):
if np.isnan(self.dist1d[r,c]):
print('.',end=' ')
elif np.isinf(self.dist1d[r,c]):
print('X',end=' ')
else:
print('%.0f' % (self.dist1d[r,c]/10),end=' ')
print()
print(self.occ_vec(self.pos))
print(self.p_x1d)
plt.imshow(self.dist1d,origin='lower')
plt.show()
'''
# ===========================
# === AUXILIARY FUNCTIONS ===
# ===========================
# get positions within interval
def get_pos(self,interval):
return self.pos[np.logical_and(interval[0]<=self.pos[:,0], self.pos[:,0]<=interval[1]),:]
# get spike times within interval
def get_spike_times(self,i,interval):
return self.spk[i][np.logical_and(interval[0]<=self.spk[i], self.spk[i]<=interval[1])]
# get number of spikes for each neuron within interval
def get_n(self,interval):
return np.array([self.get_spike_times(i,interval).size for i in range(len(self.spk))])
# maintains input times but uses nearest positions
def approx_pos_at_time(self,times):
return np.append(np.array([times]).T, self.pos[find_closest(self.pos[:,0], times), 1:3], axis=1)
# uses times associated with nearest positions
def nearest_pos_at_time(self,times):
return self.pos[find_closest(self.pos[:,0], times),:]
# convert position to 2D co-ordinate
def pos_to_x(self,pos):
pos_r = np.append([pos[:,1]],[pos[:,0]],axis=0).T
return np.round(pos_r/self.spatial_bin_size).astype(int)
# convert 2D co-ordinate to 1D co-ordinate
def x_to_x1d(self,xs):
if xs.ndim == 2:
return np.array(list(map(lambda x: self.dist1d[tuple(x)], xs)))
else: #xs.ndim == 1
return self.dist1d[tuple(xs)]
# convert position to 1D co-ordinate
def pos_to_x1d(self,pos):
return self.x_to_x1d(self.pos_to_x(pos))
# 2D occupancy map
def occ_mat(self,pos,a=None):
bin_pos = self.pos_to_x(pos[:,1:3])
occ = np.zeros(self.map_dimensions)
for j in range(0,self.map_dimensions[0]):
for i in range(0,self.map_dimensions[1]):
occ[j,i] = np.sum(np.all(bin_pos == [j,i],axis=1))
if a != None:
occ = (a/np.sum(np.sum(occ)))*occ
occ[np.isnan(occ)] = 0
return occ
# 1D occupancy map
def occ_vec(self,pos,a=None):
rows,cols = self.dist1d.shape
occ = self.occ_mat(pos)
vec = np.array([
sum([occ[r,c] for r in range(rows) for c in range(cols) if self.dist1d[r,c]==dist])
for dist in range(self.lim1d)
])
if a != None:
vec = (a/np.sum(vec))*vec
vec[np.isnan(vec)] = 0
return vec
# =====================================
# === PARAMETER GENERATOR FUNCTIONS ===
# =====================================
# returns occupancy normalised 2D fire rate map for each neuron
def calc_f_2d(self,interval):
# calculate (approximate) occupancy (total time spent in location bins)
print("calculating 2D occupancy map...", end="")
occ = self.occ_mat(self.get_pos(interval),interval[1]-interval[0]) # count no. ticks in each pos-bin & norm. by total dur.
posmask = occ > 0 # ah pos-bin that was accessed marked accessible
print("COMPLETE.")
# approximate position of neuron firing
f = np.empty((len(self.spk),self.map_dimensions[0],self.map_dimensions[1]))
for i in range(len(self.spk)):
print("processing neurons: %d / %d\r" % (i, len(self.spk)), end="")
tspk = self.get_spike_times(i,interval) # get times neuron spiked during interval
f[i] = self.occ_mat(self.approx_pos_at_time(tspk)) # count number of spikes occuring at each pos-bin
f[i][posmask] = f[i][posmask] / occ[posmask] # fr = spike count / time spent in each pos-bin
f[i] = gaussian_filter(f[i],1.0)*self.accmask # blur a little
print("processing neurons...COMPLETE.")
print("all done.")
return f
# returns occupancy normalised 1D fire rate map for each neuron
def calc_f_1d(self,interval):
# calculate (approximate) occupancy (total time spent in location bins)
print("calculating 1D occupancy map...", end="")
occ = self.occ_vec(self.get_pos(interval),interval[1]-interval[0]) # count no. ticks in each pos-bin & norm. by total dur.
posmask = occ > 0
print("COMPLETE.")
# approximate position of neuron firing
f = np.empty((len(self.spk),len(occ)))
for i in range(len(self.spk)):
print("processing neurons: %d / %d\r" % (i, len(self.spk)), end="")
tspk = self.get_spike_times(i,interval) # get times neuron spiked during interval
f[i] = self.occ_vec(self.approx_pos_at_time(tspk)) # count number of spikes occuring at each pos-bin
f[i][posmask] = f[i][posmask] / occ[posmask] # count number of spikes occuring at each pos-bin
f[i] = gaussian_filter(f[i],1.0) # blur a little
print("processing neurons...COMPLETE.")
print("all done.")
return f
# ===========================
# === OUTPUT FUNCTIONS 2D ===
# ===========================
# per-position likelihood
def prob_n_given_x(self,n,x,f,tau):
xidx = tuple(x)
ngtz = n[n > 0]
return np.prod([((tau*f[i][xidx])**n[i]/factorial(n[i]))*np.exp(-tau*f[i][xidx]) for i in range(len(ngtz))])
# likelihood
def prob_n_given_X(self,n,f,tau):
return np.array(
[[self.prob_n_given_x(n,(j,i),f,tau) for i in range(self.map_dimensions[1])] for j in range(self.map_dimensions[0])]
)
# posterior
def prob_X_given_n(self,n,f,tau):
prob = self.p_x*self.prob_n_given_X(n,f,tau)
#prob = self.prob_n_given_X(n,f,tau)
#prob = self.p_x
C = 1/np.sum(np.sum(prob)) if np.sum(np.sum(prob)) > 0 else 0
return C*prob
# expectation
def ex_n_given_x(self,x,f,tau):
xidx = tuple(x)
return np.array([f[i][xidx]*tau for i in range(len(self.spk))])
# ===========================
# === OUTPUT FUNCTIONS 1D ===
# ===========================
# per-position likelihood
def prob_n_given_x1d(self,n,x1d,f,tau):
ngtz = n[n > 0]
return np.prod([((tau*f[i][x1d])**n[i]/factorial(n[i]))*np.exp(-tau*f[i][x1d]) for i in range(len(ngtz))])
# likelihood
def prob_n_given_X1d(self,n,f,tau):
return np.array(
[self.prob_n_given_x1d(n,x1d,f,tau) for x1d in range(self.lim1d)]
)
# posterior
def prob_X1d_given_n(self,n,f,tau):
prob = self.p_x1d*self.prob_n_given_X1d(n,f,tau)
C = 1/np.sum(prob) if np.sum(prob) > 0 else 0
return C*prob
# expectation
def ex_n_given_x1d(self,x1d,f,tau):
return np.array([f[i][x1d]*tau for i in range(len(self.spk))])
# ==================
# === CONVERSION ===
# ==================
# 2D to 1D conversion
def prob_X2_to_X1(self,p_x):
prob = np.zeros(self.lim1d)
rows,cols = p_x.shape
for r in range(rows):
for c in range(cols):
x1d = self.x_to_x1d(np.array([r,c]))
if not np.isnan(x1d) and np.isfinite(x1d):
prob[int(x1d)] += p_x[r,c]
C = 1/np.sum(prob) if np.sum(prob) > 0 else 0
return C*prob
# ======================
# === TEST FUNCTIONS ===
# ======================
# returns number of spikes and position within an interval
def approx_n_and_x(self,interval,time_bin_size):
num_time_bins = int(np.round((interval[1]-interval[0])/time_bin_size))
pos = self.get_pos(interval)
tidx = np.floor((pos[:,0]-np.min(pos[:,0]))/time_bin_size)
x = self.pos_to_x(np.array([np.mean(pos[tidx==i,1:3],axis=0) for i in range(num_time_bins)]))
n = np.empty((len(self.spk),num_time_bins))
for i in range(0,len(self.spk)):
#print("processing neurons: %d / %d\r" % (i, len(self.spk)), end="")
tspk = self.get_spike_times(i,interval)
if tspk.size: # check is not empty
tidx = np.floor((tspk-np.min(tspk))/time_bin_size)
n[i] = np.array([np.sum(tidx==i) for i in range(num_time_bins)])
else:
n[i] = np.zeros(num_time_bins)
#print("processing neurons...COMPLETE.")
#print("all done.")
return (n, x)
# picks a random time and associated position within an interval
def random_t_x(self,interval):
rand = np.random.uniform(interval[0],interval[1])
pos = self.nearest_pos_at_time([rand])
t = pos[0,0]
x = self.pos_to_x(pos[:,1:3]).flatten()
return (t,x)
| [
"scipy.ndimage.filters.gaussian_filter",
"numpy.array",
"numpy.isfinite",
"matplotlib.pyplot.imshow",
"numpy.mean",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.min",
"matplotlib.pyplot.cla",
"numpy.round",
"numpy.ceil",
"matplotlib.pyplot.savefig",... | [((563, 574), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (572, 574), True, 'import matplotlib.pyplot as plt\n'), ((854, 865), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (863, 865), True, 'import matplotlib.pyplot as plt\n'), ((651, 663), 'numpy.max', 'np.max', (['f[i]'], {}), '(f[i])\n', (657, 663), True, 'import numpy as np\n'), ((1473, 1484), 'numpy.max', 'np.max', (['vec'], {}), '(vec)\n', (1479, 1484), True, 'import numpy as np\n'), ((1486, 1500), 'numpy.argmax', 'np.argmax', (['vec'], {}), '(vec)\n', (1495, 1500), True, 'import numpy as np\n'), ((1902, 1957), 'numpy.array', 'np.array', (['[spatial_bin_length, self.spatial_bin_length]'], {}), '([spatial_bin_length, self.spatial_bin_length])\n', (1910, 1957), True, 'import numpy as np\n'), ((5333, 5362), 'numpy.zeros', 'np.zeros', (['self.map_dimensions'], {}), '(self.map_dimensions)\n', (5341, 5362), True, 'import numpy as np\n'), ((10161, 10181), 'numpy.zeros', 'np.zeros', (['self.lim1d'], {}), '(self.lim1d)\n', (10169, 10181), True, 'import numpy as np\n'), ((11556, 11599), 'numpy.random.uniform', 'np.random.uniform', (['interval[0]', 'interval[1]'], {}), '(interval[0], interval[1])\n', (11573, 11599), True, 'import numpy as np\n'), ((321, 333), 'numpy.max', 'np.max', (['f[i]'], {}), '(f[i])\n', (327, 333), True, 'import numpy as np\n'), ((360, 386), 'matplotlib.pyplot.title', 'plt.title', (["('Neuron %d' % i)"], {}), "('Neuron %d' % i)\n", (369, 386), True, 'import matplotlib.pyplot as plt\n'), ((393, 425), 'matplotlib.pyplot.imshow', 'plt.imshow', (['f[i]'], {'origin': '"""lower"""'}), "(f[i], origin='lower')\n", (403, 425), True, 'import matplotlib.pyplot as plt\n'), ((689, 715), 'matplotlib.pyplot.title', 'plt.title', (["('Neuron %d' % i)"], {}), "('Neuron %d' % i)\n", (698, 715), True, 'import matplotlib.pyplot as plt\n'), ((722, 745), 'matplotlib.pyplot.plot', 'plt.plot', (['(f[i] / maxval)'], {}), '(f[i] / maxval)\n', (730, 745), True, 'import matplotlib.pyplot as plt\n'), ((2700, 2715), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2707, 2715), True, 'import numpy as np\n'), ((4128, 4198), 'numpy.logical_and', 'np.logical_and', (['(interval[0] <= self.spk[i])', '(self.spk[i] <= interval[1])'], {}), '(interval[0] <= self.spk[i], self.spk[i] <= interval[1])\n', (4142, 4198), True, 'import numpy as np\n'), ((4791, 4834), 'numpy.append', 'np.append', (['[pos[:, 1]]', '[pos[:, 0]]'], {'axis': '(0)'}), '([pos[:, 1]], [pos[:, 0]], axis=0)\n', (4800, 4834), True, 'import numpy as np\n'), ((5582, 5595), 'numpy.isnan', 'np.isnan', (['occ'], {}), '(occ)\n', (5590, 5595), True, 'import numpy as np\n'), ((5946, 5959), 'numpy.isnan', 'np.isnan', (['vec'], {}), '(vec)\n', (5954, 5959), True, 'import numpy as np\n'), ((8226, 8252), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['f[i]', '(1.0)'], {}), '(f[i], 1.0)\n', (8241, 8252), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((10682, 10735), 'numpy.round', 'np.round', (['((interval[1] - interval[0]) / time_bin_size)'], {}), '((interval[1] - interval[0]) / time_bin_size)\n', (10690, 10735), True, 'import numpy as np\n'), ((449, 470), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (457, 470), True, 'import matplotlib.pyplot as plt\n'), ((473, 489), 'matplotlib.pyplot.pause', 'plt.pause', (['delay'], {}), '(delay)\n', (482, 489), True, 'import matplotlib.pyplot as plt\n'), ((530, 560), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('occfr%d.png' % i)"], {}), "('occfr%d.png' % i)\n", (541, 560), True, 'import matplotlib.pyplot as plt\n'), ((768, 789), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (776, 789), True, 'import matplotlib.pyplot as plt\n'), ((792, 808), 'matplotlib.pyplot.pause', 'plt.pause', (['delay'], {}), '(delay)\n', (801, 808), True, 'import matplotlib.pyplot as plt\n'), ((811, 820), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (818, 820), True, 'import matplotlib.pyplot as plt\n'), ((841, 851), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (849, 851), True, 'import matplotlib.pyplot as plt\n'), ((1406, 1422), 'numpy.array', 'np.array', (['[j, i]'], {}), '([j, i])\n', (1414, 1422), True, 'import numpy as np\n'), ((2049, 2094), 'numpy.ceil', 'np.ceil', (['(map_size / self.spatial_bin_size + 1)'], {}), '(map_size / self.spatial_bin_size + 1)\n', (2056, 2094), True, 'import numpy as np\n'), ((2745, 2771), 'numpy.sum', 'np.sum', (['(labeled_array == i)'], {}), '(labeled_array == i)\n', (2751, 2771), True, 'import numpy as np\n'), ((3064, 3106), 'dijkstras.shortest_path_mat', 'shortest_path_mat', (['self.accmask', 'lin_point'], {}), '(self.accmask, lin_point)\n', (3081, 3106), False, 'from dijkstras import shortest_path_mat\n'), ((3954, 4030), 'numpy.logical_and', 'np.logical_and', (['(interval[0] <= self.pos[:, 0])', '(self.pos[:, 0] <= interval[1])'], {}), '(interval[0] <= self.pos[:, 0], self.pos[:, 0] <= interval[1])\n', (3968, 4030), True, 'import numpy as np\n'), ((4486, 4503), 'numpy.array', 'np.array', (['[times]'], {}), '([times])\n', (4494, 4503), True, 'import numpy as np\n'), ((4844, 4883), 'numpy.round', 'np.round', (['(pos_r / self.spatial_bin_size)'], {}), '(pos_r / self.spatial_bin_size)\n', (4852, 4883), True, 'import numpy as np\n'), ((7163, 7189), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['f[i]', '(1.0)'], {}), '(f[i], 1.0)\n', (7178, 7189), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((9858, 9870), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (9864, 9870), True, 'import numpy as np\n'), ((9842, 9854), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (9848, 9854), True, 'import numpy as np\n'), ((10421, 10433), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (10427, 10433), True, 'import numpy as np\n'), ((10405, 10417), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (10411, 10417), True, 'import numpy as np\n'), ((11333, 11356), 'numpy.zeros', 'np.zeros', (['num_time_bins'], {}), '(num_time_bins)\n', (11341, 11356), True, 'import numpy as np\n'), ((5483, 5516), 'numpy.all', 'np.all', (['(bin_pos == [j, i])'], {'axis': '(1)'}), '(bin_pos == [j, i], axis=1)\n', (5489, 5516), True, 'import numpy as np\n'), ((5921, 5932), 'numpy.sum', 'np.sum', (['vec'], {}), '(vec)\n', (5927, 5932), True, 'import numpy as np\n'), ((8635, 8660), 'numpy.exp', 'np.exp', (['(-tau * f[i][xidx])'], {}), '(-tau * f[i][xidx])\n', (8641, 8660), True, 'import numpy as np\n'), ((9092, 9104), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (9098, 9104), True, 'import numpy as np\n'), ((9068, 9080), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (9074, 9080), True, 'import numpy as np\n'), ((9522, 9546), 'numpy.exp', 'np.exp', (['(-tau * f[i][x1d])'], {}), '(-tau * f[i][x1d])\n', (9528, 9546), True, 'import numpy as np\n'), ((10290, 10306), 'numpy.array', 'np.array', (['[r, c]'], {}), '([r, c])\n', (10298, 10306), True, 'import numpy as np\n'), ((10340, 10356), 'numpy.isfinite', 'np.isfinite', (['x1d'], {}), '(x1d)\n', (10351, 10356), True, 'import numpy as np\n'), ((10796, 10813), 'numpy.min', 'np.min', (['pos[:, 0]'], {}), '(pos[:, 0])\n', (10802, 10813), True, 'import numpy as np\n'), ((10861, 10897), 'numpy.mean', 'np.mean', (['pos[tidx == i, 1:3]'], {'axis': '(0)'}), '(pos[tidx == i, 1:3], axis=0)\n', (10868, 10897), True, 'import numpy as np\n'), ((2840, 2863), 'numpy.argmax', 'np.argmax', (['label_counts'], {}), '(label_counts)\n', (2849, 2863), True, 'import numpy as np\n'), ((5556, 5567), 'numpy.sum', 'np.sum', (['occ'], {}), '(occ)\n', (5562, 5567), True, 'import numpy as np\n'), ((8618, 8633), 'math.factorial', 'factorial', (['n[i]'], {}), '(n[i])\n', (8627, 8633), False, 'from math import factorial\n'), ((9505, 9520), 'math.factorial', 'factorial', (['n[i]'], {}), '(n[i])\n', (9514, 9520), False, 'from math import factorial\n'), ((10322, 10335), 'numpy.isnan', 'np.isnan', (['x1d'], {}), '(x1d)\n', (10330, 10335), True, 'import numpy as np\n'), ((11258, 11275), 'numpy.sum', 'np.sum', (['(tidx == i)'], {}), '(tidx == i)\n', (11264, 11275), True, 'import numpy as np\n'), ((11204, 11216), 'numpy.min', 'np.min', (['tspk'], {}), '(tspk)\n', (11210, 11216), True, 'import numpy as np\n'), ((3161, 3185), 'numpy.isfinite', 'np.isfinite', (['self.dist1d'], {}), '(self.dist1d)\n', (3172, 3185), True, 'import numpy as np\n')] |
import numpy as np
import numpy.ma as ma
from PIL import Image
from dataclasses import dataclass
from skimage.transform import resize
import time
from typing import Tuple
nsamples = 2
width, height = 600, 400
# virtual width/height for FSAA
vwidth, vheight = width * nsamples, height * nsamples
bg_color = (0.2, 0.7, 0.8)
# TODO: np.seterr(all='raise') and work out more numerical issues.
# FINISH all TODO/FIXME LINES
@dataclass
class Sphere:
center: np.ndarray
radius: float
def ray_intersect(self, orig, direction) -> ma.array:
'''
:returns: array of distances (floats), with the non-hits masked out
'''
L = self.center - orig
tca = v3_dots(L, direction)
d2 = v3_dots(L, L) - tca**2
# early check would normally check d2 > self.radius**2
thc = ma.sqrt(ma.array(self.radius**2 - d2, mask=d2 > self.radius**2))
t0 = tca - thc
t1 = tca + thc
t0[t0 < 0] = t1[t0 < 0]
t0.mask |= t0 < 0
return ma.array(t0)
@dataclass
class Plane:
corner: np.ndarray
a: np.ndarray
b: np.ndarray
def ray_intersect(self, orig, direction) -> ma.array:
normal = np.cross(self.a, self.b)
ndotray = ma.array(v3_dots(normal, direction))
ndotray.mask = ma.abs(ndotray) < 1e-3
d = v3_dots(normal, orig)
t = (v3_dots(normal, self.corner) + d) / ndotray
t.mask |= t < 0
pt = orig + t.reshape(-1, 1) * direction
# TODO: try an alternative to find points in a parallelogram (or plane)
# if that's easier. Maybe project onto a plane then use a parallelogram
# as a basis in 2d?
# https://stackoverflow.com/questions/59128744/raytracing-ray-vs-parallelogram
# edge 0
t.mask |= v3_dots(normal, np.cross(self.a, pt - self.corner)) < 0
# edge 1
t.mask |= v3_dots(normal, np.cross(pt - self.corner, self.b)) < 0
# edge 2
t.mask |= v3_dots(normal, np.cross(pt - self.corner - self.b, self.a)) < 0
# edge 3
t.mask |= v3_dots(normal, np.cross(self.b, pt - self.corner - self.a)) < 0
return t
# FIXME: disgusting globals. Need a proper object manager/kdtree impl.
planes = [Plane(np.array([(-5, -4, -20)]), np.array([(0, 0, 20)]), np.array([(20, 0, 0)])),
Plane(np.array([(0, 3.2, -9)]), np.array([(0, 1, 0)]), np.array([(1, 0, 0)]))]
spheres = [
Sphere(np.array((-3, 0, -16)), 2),
Sphere(np.array((1, -1.5, -12)), 2),
Sphere(np.array((1.5, -0.5, -18)), 3),
Sphere(np.array((7, 5, -18)), 4)
]
def normalize(v):
return v / np.linalg.norm(v, axis=1)[:, np.newaxis]
def scene_intersect(orig, dirs) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
obj_dists = np.inf * np.ones((dirs.shape[0]))
id_map = -1 * np.ones_like(obj_dists)
N = np.zeros_like(dirs)
for i, sphere in enumerate(spheres):
dists = sphere.ray_intersect(orig, dirs)
visible_pixels = ~dists.mask & (dists < obj_dists)
id_map[visible_pixels] = i
obj_dists[visible_pixels] = dists[visible_pixels]
# TODO: refactor so we don't compute points in this function as well as
# in the outer cast_ray function
points = (orig + dists.reshape((-1, 1)) * dirs)[visible_pixels]
N[visible_pixels] = normalize(points - sphere.center)
# FIXME: needs its own id (otherwise it'll overlap with sphere ids when assigning materials)
for i, plane in enumerate(planes):
# yuck -- why is this duplicated
dists = plane.ray_intersect(orig, dirs)
visible_pixels = ~dists.mask & (dists < obj_dists)
id_map[visible_pixels] = i
obj_dists[visible_pixels] = dists[visible_pixels]
N[visible_pixels] = (0, 1, 0)
# TODO: determine if I need this
id_map[obj_dists > 1000] = -1
return obj_dists, N[id_map != -1], id_map.astype(np.int8)
def v3_dots(a, b):
'''
perform a dot product across the vector3s in a and b.
e.g. v3_dots([v1, v2, v3], [v4, v5, v6]) => [v1 @ v4, v2 @ v5, v3 @ v6]
'''
assert len(a.shape) == len(b.shape) == 2, (a.shape, b.shape)
assert a.shape[0] == b.shape[0] or (1 in (
a.shape[0], b.shape[0])), f"can't broadcast a and b: {a.shape=}, {b.shape=}"
assert a.shape[1] == b.shape[1] == 3, (a.shape, b.shape)
return np.sum(a * b, axis=1)
def reflect(I, N):
return I - 2 * N * v3_dots(I, N).reshape((-1, 1))
def refract(I, N, refractive_index):
'''Snell's law'''
cosi = -np.clip(v3_dots(I, N), -1, 1).reshape((-1, 1))
etai = np.ones_like(cosi)
etat = refractive_index.reshape((-1, 1)) * np.ones_like(cosi)
n = N * np.ones_like(cosi)
swap_mask = cosi < 0
cosi[swap_mask] = -cosi[swap_mask]
etai[swap_mask], etat[swap_mask] = etat[swap_mask], etai[swap_mask]
n[swap_mask.ravel()] = -n[swap_mask.ravel()]
eta = etai / etat
k = 1 - eta**2 * (1 - cosi**2)
return np.where(k < 0, (0, 0, 0), I * eta + n * (eta * cosi - np.sqrt(k)))
def cast_rays(origs, dirs, lights, n_bounces=3):
# ior albedo color spec_exponent
sphere_materials = np.array(
[(1.0, (0.6, 0.3, 0.1, 0.0), (0.4, 0.4, 0.3), 50),
(1.5, (0.0, 0.5, 0.1, 0.8), (0.6, 0.7, 0.8), 125),
(1.0, (0.9, 0.1, 0.0, 0.0), (0.3, 0.1, 0.1), 10),
(1.0, (0.0, 10.0, 0.8, 0.0), (1.0, 1.0, 1.0), 1425)
],
dtype=[
('ior', 'f4'),
('albedo', 'f4', 4),
('diffuse_color', 'f4', 3),
('specular_exponent', 'f4')])
dists, N, object_map = scene_intersect(origs, dirs)
# points are filtered down only to rays that hit anything
points = (origs + dists.reshape((-1, 1)) * dirs)[object_map != -1]
hit_object_map = object_map[object_map != -1]
hit_origs = origs[object_map != -1] if len(origs) > 1 else origs
hit_dirs = dirs[object_map != -1]
diffuse_intensity = np.zeros_like(points, dtype=np.float64)
specular_intensity = np.zeros_like(points, dtype=np.float64)
for i in range(len(lights)):
light_dir = normalize(lights['position'][i] - points).reshape((-1, 3))
light_distance = np.linalg.norm(lights['position'][i] - points, axis=1)
shadow_orig = np.where((v3_dots(light_dir, N) < 0).reshape((-1, 1)),
points - N * 1e-3,
points + N * 1e-3)
shadow_dists, _, shadow_map = scene_intersect(
shadow_orig, light_dir)
shadow_points = hit_origs + shadow_dists.reshape((-1, 1)) * light_dir
shadow_mask = (
(shadow_map != -1) &
(np.linalg.norm(shadow_points - shadow_orig, axis=1) < light_distance))
d = v3_dots(light_dir, N)
diffuse_intensity += np.where((d < 0) | shadow_mask,
0, lights['intensity'][i] * d)[:, np.newaxis]
spec_exponents = sphere_materials['specular_exponent'][hit_object_map]
specular_intensity += (
(shadow_map == -1) *
lights['intensity'][i] *
np.clip(v3_dots(reflect(light_dir, N), hit_dirs), 0, None) ** spec_exponents)[:, np.newaxis]
reflect_dir = normalize(reflect(hit_dirs, N))
refract_dir = refract(hit_dirs, N, sphere_materials['ior'][hit_object_map])
reflect_origs = np.where((v3_dots(reflect_dir, N) < 0)[
:, np.newaxis], points - N*1e-3, points + N*1e-3)
refract_origs = np.where((v3_dots(refract_dir, N) < 0)[
:, np.newaxis], points - N*1e-3, points + N*1e-3)
if n_bounces == 0:
refract_colors = reflect_colors = np.ones_like(
reflect_origs) * bg_color
else:
reflect_colors = cast_rays(
reflect_origs, reflect_dir, lights, n_bounces-1)
refract_colors = cast_rays(
refract_origs, refract_dir, lights, n_bounces-1)
r = np.zeros_like(dirs)
r[:] = bg_color
r[object_map != -1] = (
# diffuse
sphere_materials['diffuse_color'][hit_object_map]
* (diffuse_intensity * sphere_materials['albedo'][hit_object_map][:, 0, np.newaxis])
# specular
+ (np.array([(1, 1, 1)]) * specular_intensity) *
sphere_materials['albedo'][hit_object_map][:, 1, np.newaxis]
# reflection
+ reflect_colors *
sphere_materials['albedo'][hit_object_map][:, 2, np.newaxis]
# refraction
+ refract_colors *
sphere_materials['albedo'][hit_object_map][:, 3, np.newaxis]
)
return r
def render():
fov = np.pi/3
framebuffer = np.zeros((vheight * vwidth, 3))
start = time.time()
X, Y = np.meshgrid(np.arange(vwidth), np.arange(vheight))
xs = (2*(X+0.5) / vwidth - 1) * np.tan(fov/2) * vwidth/vheight
ys = (2*(Y+0.5) / vheight - 1) * np.tan(fov/2)
dirs = normalize(
np.dstack((xs, ys, -1 * np.ones((vheight, vwidth)))).reshape((-1, 3)))
orig = np.array([(0, 0, 0)])
lights = np.array(
[((-20, 20, 20), 1.5),
((30, 50, -25), 1.8),
((30, 20, 30), 1.7)],
dtype=[('position', 'f4', 3), ('intensity', 'f4')])
# clear to bg color
framebuffer[:, :] = cast_rays(orig, dirs, lights)
end = time.time()
print(f'took {end-start:.2f}s')
# HACK: not sure if this normalization is technically correct
max_channel = framebuffer.max(axis=1)
framebuffer[max_channel >
1] /= max_channel[max_channel > 1].reshape((-1, 1))
framebuffer[framebuffer < 0] = 0
framebuffer[framebuffer > 1] = 1
return resize(framebuffer.reshape((vheight, vwidth, 3)), (height, width))
| [
"numpy.ones_like",
"numpy.sqrt",
"numpy.cross",
"numpy.ones",
"numpy.ma.array",
"numpy.tan",
"numpy.where",
"numpy.linalg.norm",
"numpy.sum",
"numpy.array",
"numpy.zeros",
"time.time",
"numpy.ma.abs",
"numpy.zeros_like",
"numpy.arange"
] | [((2838, 2857), 'numpy.zeros_like', 'np.zeros_like', (['dirs'], {}), '(dirs)\n', (2851, 2857), True, 'import numpy as np\n'), ((4347, 4368), 'numpy.sum', 'np.sum', (['(a * b)'], {'axis': '(1)'}), '(a * b, axis=1)\n', (4353, 4368), True, 'import numpy as np\n'), ((4575, 4593), 'numpy.ones_like', 'np.ones_like', (['cosi'], {}), '(cosi)\n', (4587, 4593), True, 'import numpy as np\n'), ((5158, 5491), 'numpy.array', 'np.array', (['[(1.0, (0.6, 0.3, 0.1, 0.0), (0.4, 0.4, 0.3), 50), (1.5, (0.0, 0.5, 0.1, \n 0.8), (0.6, 0.7, 0.8), 125), (1.0, (0.9, 0.1, 0.0, 0.0), (0.3, 0.1, 0.1\n ), 10), (1.0, (0.0, 10.0, 0.8, 0.0), (1.0, 1.0, 1.0), 1425)]'], {'dtype': "[('ior', 'f4'), ('albedo', 'f4', 4), ('diffuse_color', 'f4', 3), (\n 'specular_exponent', 'f4')]"}), "([(1.0, (0.6, 0.3, 0.1, 0.0), (0.4, 0.4, 0.3), 50), (1.5, (0.0, 0.5,\n 0.1, 0.8), (0.6, 0.7, 0.8), 125), (1.0, (0.9, 0.1, 0.0, 0.0), (0.3, 0.1,\n 0.1), 10), (1.0, (0.0, 10.0, 0.8, 0.0), (1.0, 1.0, 1.0), 1425)], dtype=\n [('ior', 'f4'), ('albedo', 'f4', 4), ('diffuse_color', 'f4', 3), (\n 'specular_exponent', 'f4')])\n", (5166, 5491), True, 'import numpy as np\n'), ((5953, 5992), 'numpy.zeros_like', 'np.zeros_like', (['points'], {'dtype': 'np.float64'}), '(points, dtype=np.float64)\n', (5966, 5992), True, 'import numpy as np\n'), ((6018, 6057), 'numpy.zeros_like', 'np.zeros_like', (['points'], {'dtype': 'np.float64'}), '(points, dtype=np.float64)\n', (6031, 6057), True, 'import numpy as np\n'), ((7947, 7966), 'numpy.zeros_like', 'np.zeros_like', (['dirs'], {}), '(dirs)\n', (7960, 7966), True, 'import numpy as np\n'), ((8640, 8671), 'numpy.zeros', 'np.zeros', (['(vheight * vwidth, 3)'], {}), '((vheight * vwidth, 3))\n', (8648, 8671), True, 'import numpy as np\n'), ((8685, 8696), 'time.time', 'time.time', ([], {}), '()\n', (8694, 8696), False, 'import time\n'), ((8991, 9012), 'numpy.array', 'np.array', (['[(0, 0, 0)]'], {}), '([(0, 0, 0)])\n', (8999, 9012), True, 'import numpy as np\n'), ((9026, 9157), 'numpy.array', 'np.array', (['[((-20, 20, 20), 1.5), ((30, 50, -25), 1.8), ((30, 20, 30), 1.7)]'], {'dtype': "[('position', 'f4', 3), ('intensity', 'f4')]"}), "([((-20, 20, 20), 1.5), ((30, 50, -25), 1.8), ((30, 20, 30), 1.7)],\n dtype=[('position', 'f4', 3), ('intensity', 'f4')])\n", (9034, 9157), True, 'import numpy as np\n'), ((9279, 9290), 'time.time', 'time.time', ([], {}), '()\n', (9288, 9290), False, 'import time\n'), ((1012, 1024), 'numpy.ma.array', 'ma.array', (['t0'], {}), '(t0)\n', (1020, 1024), True, 'import numpy.ma as ma\n'), ((1186, 1210), 'numpy.cross', 'np.cross', (['self.a', 'self.b'], {}), '(self.a, self.b)\n', (1194, 1210), True, 'import numpy as np\n'), ((2242, 2267), 'numpy.array', 'np.array', (['[(-5, -4, -20)]'], {}), '([(-5, -4, -20)])\n', (2250, 2267), True, 'import numpy as np\n'), ((2269, 2291), 'numpy.array', 'np.array', (['[(0, 0, 20)]'], {}), '([(0, 0, 20)])\n', (2277, 2291), True, 'import numpy as np\n'), ((2293, 2315), 'numpy.array', 'np.array', (['[(20, 0, 0)]'], {}), '([(20, 0, 0)])\n', (2301, 2315), True, 'import numpy as np\n'), ((2334, 2358), 'numpy.array', 'np.array', (['[(0, 3.2, -9)]'], {}), '([(0, 3.2, -9)])\n', (2342, 2358), True, 'import numpy as np\n'), ((2360, 2381), 'numpy.array', 'np.array', (['[(0, 1, 0)]'], {}), '([(0, 1, 0)])\n', (2368, 2381), True, 'import numpy as np\n'), ((2383, 2404), 'numpy.array', 'np.array', (['[(1, 0, 0)]'], {}), '([(1, 0, 0)])\n', (2391, 2404), True, 'import numpy as np\n'), ((2431, 2453), 'numpy.array', 'np.array', (['(-3, 0, -16)'], {}), '((-3, 0, -16))\n', (2439, 2453), True, 'import numpy as np\n'), ((2470, 2494), 'numpy.array', 'np.array', (['(1, -1.5, -12)'], {}), '((1, -1.5, -12))\n', (2478, 2494), True, 'import numpy as np\n'), ((2511, 2537), 'numpy.array', 'np.array', (['(1.5, -0.5, -18)'], {}), '((1.5, -0.5, -18))\n', (2519, 2537), True, 'import numpy as np\n'), ((2554, 2575), 'numpy.array', 'np.array', (['(7, 5, -18)'], {}), '((7, 5, -18))\n', (2562, 2575), True, 'import numpy as np\n'), ((2763, 2785), 'numpy.ones', 'np.ones', (['dirs.shape[0]'], {}), '(dirs.shape[0])\n', (2770, 2785), True, 'import numpy as np\n'), ((2806, 2829), 'numpy.ones_like', 'np.ones_like', (['obj_dists'], {}), '(obj_dists)\n', (2818, 2829), True, 'import numpy as np\n'), ((4641, 4659), 'numpy.ones_like', 'np.ones_like', (['cosi'], {}), '(cosi)\n', (4653, 4659), True, 'import numpy as np\n'), ((4672, 4690), 'numpy.ones_like', 'np.ones_like', (['cosi'], {}), '(cosi)\n', (4684, 4690), True, 'import numpy as np\n'), ((6196, 6250), 'numpy.linalg.norm', 'np.linalg.norm', (["(lights['position'][i] - points)"], {'axis': '(1)'}), "(lights['position'][i] - points, axis=1)\n", (6210, 6250), True, 'import numpy as np\n'), ((8721, 8738), 'numpy.arange', 'np.arange', (['vwidth'], {}), '(vwidth)\n', (8730, 8738), True, 'import numpy as np\n'), ((8740, 8758), 'numpy.arange', 'np.arange', (['vheight'], {}), '(vheight)\n', (8749, 8758), True, 'import numpy as np\n'), ((8864, 8879), 'numpy.tan', 'np.tan', (['(fov / 2)'], {}), '(fov / 2)\n', (8870, 8879), True, 'import numpy as np\n'), ((836, 895), 'numpy.ma.array', 'ma.array', (['(self.radius ** 2 - d2)'], {'mask': '(d2 > self.radius ** 2)'}), '(self.radius ** 2 - d2, mask=d2 > self.radius ** 2)\n', (844, 895), True, 'import numpy.ma as ma\n'), ((1290, 1305), 'numpy.ma.abs', 'ma.abs', (['ndotray'], {}), '(ndotray)\n', (1296, 1305), True, 'import numpy.ma as ma\n'), ((2617, 2642), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (2631, 2642), True, 'import numpy as np\n'), ((6804, 6866), 'numpy.where', 'np.where', (['((d < 0) | shadow_mask)', '(0)', "(lights['intensity'][i] * d)"], {}), "((d < 0) | shadow_mask, 0, lights['intensity'][i] * d)\n", (6812, 6866), True, 'import numpy as np\n'), ((7682, 7709), 'numpy.ones_like', 'np.ones_like', (['reflect_origs'], {}), '(reflect_origs)\n', (7694, 7709), True, 'import numpy as np\n'), ((1805, 1839), 'numpy.cross', 'np.cross', (['self.a', '(pt - self.corner)'], {}), '(self.a, pt - self.corner)\n', (1813, 1839), True, 'import numpy as np\n'), ((1896, 1930), 'numpy.cross', 'np.cross', (['(pt - self.corner)', 'self.b'], {}), '(pt - self.corner, self.b)\n', (1904, 1930), True, 'import numpy as np\n'), ((1987, 2030), 'numpy.cross', 'np.cross', (['(pt - self.corner - self.b)', 'self.a'], {}), '(pt - self.corner - self.b, self.a)\n', (1995, 2030), True, 'import numpy as np\n'), ((2087, 2130), 'numpy.cross', 'np.cross', (['self.b', '(pt - self.corner - self.a)'], {}), '(self.b, pt - self.corner - self.a)\n', (2095, 2130), True, 'import numpy as np\n'), ((6669, 6720), 'numpy.linalg.norm', 'np.linalg.norm', (['(shadow_points - shadow_orig)'], {'axis': '(1)'}), '(shadow_points - shadow_orig, axis=1)\n', (6683, 6720), True, 'import numpy as np\n'), ((8796, 8811), 'numpy.tan', 'np.tan', (['(fov / 2)'], {}), '(fov / 2)\n', (8802, 8811), True, 'import numpy as np\n'), ((5002, 5012), 'numpy.sqrt', 'np.sqrt', (['k'], {}), '(k)\n', (5009, 5012), True, 'import numpy as np\n'), ((8216, 8237), 'numpy.array', 'np.array', (['[(1, 1, 1)]'], {}), '([(1, 1, 1)])\n', (8224, 8237), True, 'import numpy as np\n'), ((8932, 8958), 'numpy.ones', 'np.ones', (['(vheight, vwidth)'], {}), '((vheight, vwidth))\n', (8939, 8958), True, 'import numpy as np\n')] |
import numpy as np
from colvar.geometry import get_d, get_angle, get_dihedral, get_point_plane
def eval_constant(x, value):
gradient = np.zeros(x.shape)
return value, gradient
def eval_cartesian(x, index):
gradient = np.zeros(x.shape)
gradient[index] = 1
return x[index], gradient
def eval_geometric(x, centers, f):
centers_values, centers_jacobian = evaluate_schema(x, centers)
value, grad = f(*centers_values)
return value, np.einsum('ij,ijk->k', grad, centers_jacobian)
def geometric_evaluator(f):
return lambda x, centers: eval_geometric(x, centers, f)
def eval_sigmoid(x, colvar, L, k, x0):
cv_value, cv_grad = evaluate_schema(x, colvar)
e = np.exp(k * (cv_value - x0))
value = e / (e + 1)
grad = k * value * (1 - value)
return value * L, cv_grad * grad * L
def eval_linear(x, colvars, weights, normalize):
cv_values, cv_jacobian = evaluate_schema(x, colvars)
w_values, w_jacobian = evaluate_schema(x, weights)
if normalize:
sumw = np.sum(w_values)
sumw_grad = np.sum(w_jacobian, axis=0)
else:
sumw, sumw_grad = eval_constant(x, 1)
value = w_values.dot(cv_values) / sumw
grad = (w_values.dot(cv_jacobian) + cv_values.dot(w_jacobian) -
value * sumw_grad) / sumw
return value, grad
EVALUATORS = {"constant": eval_constant,
"x": eval_cartesian,
"distance": geometric_evaluator(get_d),
"angle": geometric_evaluator(get_angle),
"dihedral": geometric_evaluator(get_dihedral),
"point_plane": geometric_evaluator(get_point_plane),
"sigmoid": eval_sigmoid,
"linear": eval_linear}
def stack_colvars(colvars):
return list(map(np.array, zip(*colvars)))
def evaluate_schema(x, schema):
if isinstance(schema, list):
return stack_colvars([evaluate_schema(x, _) for _ in schema])
return EVALUATORS[schema["type"]](x, **schema["params"])
| [
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.einsum"
] | [((141, 158), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (149, 158), True, 'import numpy as np\n'), ((233, 250), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (241, 250), True, 'import numpy as np\n'), ((701, 728), 'numpy.exp', 'np.exp', (['(k * (cv_value - x0))'], {}), '(k * (cv_value - x0))\n', (707, 728), True, 'import numpy as np\n'), ((464, 510), 'numpy.einsum', 'np.einsum', (['"""ij,ijk->k"""', 'grad', 'centers_jacobian'], {}), "('ij,ijk->k', grad, centers_jacobian)\n", (473, 510), True, 'import numpy as np\n'), ((1025, 1041), 'numpy.sum', 'np.sum', (['w_values'], {}), '(w_values)\n', (1031, 1041), True, 'import numpy as np\n'), ((1062, 1088), 'numpy.sum', 'np.sum', (['w_jacobian'], {'axis': '(0)'}), '(w_jacobian, axis=0)\n', (1068, 1088), True, 'import numpy as np\n')] |
"""
Class for interfacing with the Primesense RGBD sensor
Author: <NAME>
"""
import logging
import numpy as np
import time
from cv_bridge import CvBridge, CvBridgeError
import rospy
import sensor_msgs.msg
from autolab_core import CameraIntrinsics, ColorImage, DepthImage, Image
from autolab_core.constants import MM_TO_METERS
from .camera_sensor import CameraSensor
class Kinect2BridgedQuality:
"""Kinect quality for bridged mode"""
HD = "hd"
QUARTER_HD = "qhd"
SD = "sd"
class KinectSensorBridged(CameraSensor):
"""Class for interacting with a Kinect v2 RGBD sensor through the kinect
bridge https://github.com/code-iai/iai_kinect2. This is preferrable for
visualization and debug because the kinect bridge will continuously
publish image and point cloud info.
"""
def __init__(
self,
quality=Kinect2BridgedQuality.HD,
frame="kinect2_rgb_optical_frame",
):
"""Initialize a Kinect v2 sensor which connects to the
iai_kinect2 bridge
----------
quality : :obj:`str`
The quality (HD, Quarter-HD, SD) of the image data that
should be subscribed to
frame : :obj:`str`
The name of the frame of reference in which the sensor resides.
If None, this will be set to 'kinect2_rgb_optical_frame'
"""
# set member vars
self._frame = frame
self.topic_image_color = "/kinect2/%s/image_color_rect" % (quality)
self.topic_image_depth = "/kinect2/%s/image_depth_rect" % (quality)
self.topic_info_camera = "/kinect2/%s/camera_info" % (quality)
self._initialized = False
self._format = None
self._camera_intr = None
self._cur_depth_im = None
self._running = False
self._bridge = CvBridge()
def __del__(self):
"""Automatically stop the sensor for safety."""
if self.is_running:
self.stop()
def _set_camera_properties(self, msg):
"""Set the camera intrinsics from an info msg."""
focal_x = msg.K[0]
focal_y = msg.K[4]
center_x = msg.K[2]
center_y = msg.K[5]
im_height = msg.height
im_width = msg.width
self._camera_intr = CameraIntrinsics(
self._frame,
focal_x,
focal_y,
center_x,
center_y,
height=im_height,
width=im_width,
)
def _process_image_msg(self, msg):
"""Process an image message and return a numpy array with the image data
Returns
-------
:obj:`numpy.ndarray` containing the image in the image message
Raises
------
CvBridgeError
If the bridge is not able to convert the image
"""
encoding = msg.encoding
try:
image = self._bridge.imgmsg_to_cv2(msg, encoding)
except CvBridgeError as e:
rospy.logerr(e)
return image
def _color_image_callback(self, image_msg):
"""subscribe to image topic and keep it up to date"""
color_arr = self._process_image_msg(image_msg)
self._cur_color_im = ColorImage(color_arr[:, :, ::-1], self._frame)
def _depth_image_callback(self, image_msg):
"""subscribe to depth image topic and keep it up to date"""
encoding = image_msg.encoding
try:
depth_arr = self._bridge.imgmsg_to_cv2(image_msg, encoding)
except CvBridgeError as e:
rospy.logerr(e)
depth = np.array(depth_arr * MM_TO_METERS, np.float32)
self._cur_depth_im = DepthImage(depth, self._frame)
def _camera_info_callback(self, msg):
"""Callback for reading camera info."""
self._camera_info_sub.unregister()
self._set_camera_properties(msg)
@property
def ir_intrinsics(self):
""":obj:`CameraIntrinsics` : IR camera intrinsics of Kinect."""
return self._camera_intr
@property
def is_running(self):
"""bool : True if the stream is running, or false otherwise."""
return self._running
@property
def frame(self):
""":obj:`str` : The reference frame of the sensor."""
return self._frame
def start(self):
"""Start the sensor"""
# initialize subscribers
self._image_sub = rospy.Subscriber(
self.topic_image_color,
sensor_msgs.msg.Image,
self._color_image_callback,
)
self._depth_sub = rospy.Subscriber(
self.topic_image_depth,
sensor_msgs.msg.Image,
self._depth_image_callback,
)
self._camera_info_sub = rospy.Subscriber(
self.topic_info_camera,
sensor_msgs.msg.CameraInfo,
self._camera_info_callback,
)
timeout = 10
try:
rospy.loginfo("waiting to recieve a message from the Kinect")
rospy.wait_for_message(
self.topic_image_color, sensor_msgs.msg.Image, timeout=timeout
)
rospy.wait_for_message(
self.topic_image_depth, sensor_msgs.msg.Image, timeout=timeout
)
rospy.wait_for_message(
self.topic_info_camera,
sensor_msgs.msg.CameraInfo,
timeout=timeout,
)
except rospy.ROSException as e:
print("KINECT NOT FOUND")
rospy.logerr("Kinect topic not found, Kinect not started")
rospy.logerr(e)
while self._camera_intr is None:
time.sleep(0.1)
self._running = True
def stop(self):
"""Stop the sensor"""
# check that everything is running
if not self._running:
logging.warning("Kinect not running. Aborting stop")
return False
# stop subs
self._image_sub.unregister()
self._depth_sub.unregister()
self._camera_info_sub.unregister
self._running = False
return True
def frames(self):
"""Retrieve a new frame from the Kinect and convert it to a ColorImage,
a DepthImage is always none for this type
Returns
-------
:obj:`tuple` of :obj:`ColorImage`, :obj:`DepthImage`
The ColorImage and DepthImage of the current frame.
Raises
------
RuntimeError
If the Kinect stream is not running.
"""
# wait for a new image
while self._cur_depth_im is None or self._cur_color_im is None:
time.sleep(0.01)
# read next image
depth_im = self._cur_depth_im
color_im = self._cur_color_im
self._cur_color_im = None
self._cur_depth_im = None
# TODO add ir image
return color_im, depth_im
def median_depth_img(self, num_img=1, fill_depth=0.0):
"""Collect a series of depth images and return the median of the set.
Parameters
----------
num_img : int
The number of consecutive frames to process.
Returns
-------
:obj:`DepthImage`
The median DepthImage collected from the frames.
"""
depths = []
for _ in range(num_img):
_, depth, _ = self.frames()
depths.append(depth)
median_depth = Image.median_images(depths)
median_depth.data[median_depth.data == 0.0] = fill_depth
return median_depth
| [
"rospy.logerr",
"autolab_core.Image.median_images",
"logging.warning",
"rospy.wait_for_message",
"time.sleep",
"autolab_core.CameraIntrinsics",
"cv_bridge.CvBridge",
"numpy.array",
"autolab_core.DepthImage",
"rospy.Subscriber",
"autolab_core.ColorImage",
"rospy.loginfo"
] | [((1822, 1832), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (1830, 1832), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((2265, 2371), 'autolab_core.CameraIntrinsics', 'CameraIntrinsics', (['self._frame', 'focal_x', 'focal_y', 'center_x', 'center_y'], {'height': 'im_height', 'width': 'im_width'}), '(self._frame, focal_x, focal_y, center_x, center_y, height=\n im_height, width=im_width)\n', (2281, 2371), False, 'from autolab_core import CameraIntrinsics, ColorImage, DepthImage, Image\n'), ((3196, 3242), 'autolab_core.ColorImage', 'ColorImage', (['color_arr[:, :, ::-1]', 'self._frame'], {}), '(color_arr[:, :, ::-1], self._frame)\n', (3206, 3242), False, 'from autolab_core import CameraIntrinsics, ColorImage, DepthImage, Image\n'), ((3562, 3608), 'numpy.array', 'np.array', (['(depth_arr * MM_TO_METERS)', 'np.float32'], {}), '(depth_arr * MM_TO_METERS, np.float32)\n', (3570, 3608), True, 'import numpy as np\n'), ((3638, 3668), 'autolab_core.DepthImage', 'DepthImage', (['depth', 'self._frame'], {}), '(depth, self._frame)\n', (3648, 3668), False, 'from autolab_core import CameraIntrinsics, ColorImage, DepthImage, Image\n'), ((4372, 4468), 'rospy.Subscriber', 'rospy.Subscriber', (['self.topic_image_color', 'sensor_msgs.msg.Image', 'self._color_image_callback'], {}), '(self.topic_image_color, sensor_msgs.msg.Image, self.\n _color_image_callback)\n', (4388, 4468), False, 'import rospy\n'), ((4537, 4633), 'rospy.Subscriber', 'rospy.Subscriber', (['self.topic_image_depth', 'sensor_msgs.msg.Image', 'self._depth_image_callback'], {}), '(self.topic_image_depth, sensor_msgs.msg.Image, self.\n _depth_image_callback)\n', (4553, 4633), False, 'import rospy\n'), ((4708, 4809), 'rospy.Subscriber', 'rospy.Subscriber', (['self.topic_info_camera', 'sensor_msgs.msg.CameraInfo', 'self._camera_info_callback'], {}), '(self.topic_info_camera, sensor_msgs.msg.CameraInfo, self.\n _camera_info_callback)\n', (4724, 4809), False, 'import rospy\n'), ((7394, 7421), 'autolab_core.Image.median_images', 'Image.median_images', (['depths'], {}), '(depths)\n', (7413, 7421), False, 'from autolab_core import CameraIntrinsics, ColorImage, DepthImage, Image\n'), ((4899, 4960), 'rospy.loginfo', 'rospy.loginfo', (['"""waiting to recieve a message from the Kinect"""'], {}), "('waiting to recieve a message from the Kinect')\n", (4912, 4960), False, 'import rospy\n'), ((4973, 5063), 'rospy.wait_for_message', 'rospy.wait_for_message', (['self.topic_image_color', 'sensor_msgs.msg.Image'], {'timeout': 'timeout'}), '(self.topic_image_color, sensor_msgs.msg.Image,\n timeout=timeout)\n', (4995, 5063), False, 'import rospy\n'), ((5102, 5192), 'rospy.wait_for_message', 'rospy.wait_for_message', (['self.topic_image_depth', 'sensor_msgs.msg.Image'], {'timeout': 'timeout'}), '(self.topic_image_depth, sensor_msgs.msg.Image,\n timeout=timeout)\n', (5124, 5192), False, 'import rospy\n'), ((5231, 5326), 'rospy.wait_for_message', 'rospy.wait_for_message', (['self.topic_info_camera', 'sensor_msgs.msg.CameraInfo'], {'timeout': 'timeout'}), '(self.topic_info_camera, sensor_msgs.msg.CameraInfo,\n timeout=timeout)\n', (5253, 5326), False, 'import rospy\n'), ((5617, 5632), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5627, 5632), False, 'import time\n'), ((5799, 5851), 'logging.warning', 'logging.warning', (['"""Kinect not running. Aborting stop"""'], {}), "('Kinect not running. Aborting stop')\n", (5814, 5851), False, 'import logging\n'), ((6603, 6619), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (6613, 6619), False, 'import time\n'), ((2964, 2979), 'rospy.logerr', 'rospy.logerr', (['e'], {}), '(e)\n', (2976, 2979), False, 'import rospy\n'), ((3530, 3545), 'rospy.logerr', 'rospy.logerr', (['e'], {}), '(e)\n', (3542, 3545), False, 'import rospy\n'), ((5476, 5534), 'rospy.logerr', 'rospy.logerr', (['"""Kinect topic not found, Kinect not started"""'], {}), "('Kinect topic not found, Kinect not started')\n", (5488, 5534), False, 'import rospy\n'), ((5547, 5562), 'rospy.logerr', 'rospy.logerr', (['e'], {}), '(e)\n', (5559, 5562), False, 'import rospy\n')] |
"""
Cluster
=======
This filter will identify clusters of atoms in the system. It uses a recursive
algorithm to build the clusters using a fixed cut-off. There are options to
calculate the volumes of the clusters and also to draw convex hulls around the
clusters to highlight them.
Parameters are:
.. glossary::
Neighbour radius
When constructing clusters two atoms are said to belong to the same
cluster if their separation is less than this value.
Minimum cluster size
Clusters are only visible if they contain at least this number of atoms.
Maximum cluster size
Clusters are only visible if they contain less than this number of atoms.
Set this parameter to `-1` if you do not want an upper limit on the
cluster size.
Draw convex hulls
Compute and draw a convex hull around each cluster to highlight it.
Hull colour
The colour of the convex hulls, if `Draw convex hulls` is selected.
Hull opacity
The opacity of the convex hulls, if `Draw convex hulls` is selected.
Hide atoms
If `Draw convex hulls` is selected this will make the atoms invisible,
so just the hulls are shown. Cannot be selected at the same time as
`Show atoms inside hulls` or `Show atoms outside hulls`.
Show atoms inside hulls
If `Draw convex hulls` is selected this will make all atoms that fall
within a convex hull visible, regardless of previous filters (i.e. it
acts on the original input lattice). Cannot be selected at the same
time as `Hide atoms` or `Show atoms outside hulls`.
Show atoms outside hulls
If `Draw convex hulls` is selected this will make all atoms that fall
outside the convex hulls visible, regardless of previous filters (i.e. it
acts on the original input lattice). Cannot be selected at the same
time as `Hide atoms` or `Show atoms inside hulls`.
Calculate volumes
Calculate the volumes of the clusters of atoms.
Calculate volumes Voronoi
Sum the Voronoi volumes of the atoms in the cluster in order to calculate
the cluster volume. The Voronoi volumes are computed using the *Voronoi
settings* on the :ref:`voronoi_options_label` page.
Calculate volumes hull
The cluster volume is calculated from the volume of convex hulls of the
set of points in the cluster.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import copy
import numpy as np
from scipy.spatial import Delaunay
from . import base
from .. import clusters
from .. import _clusters
from six.moves import range
class ClusterFilterSettings(base.BaseSettings):
"""
Settings for the cluster filter
"""
def __init__(self):
super(ClusterFilterSettings, self).__init__()
self.registerSetting("calculateVolumes", default=False)
self.registerSetting("calculateVolumesVoro", default=True)
self.registerSetting("calculateVolumesHull", default=False)
self.registerSetting("hideAtoms", default=False)
self.registerSetting("showAtomsInHulls", default=False)
self.registerSetting("showAtomsOutHulls", default=False)
self.registerSetting("neighbourRadius", default=5.0)
self.registerSetting("hullCol", default=[0, 0, 1])
self.registerSetting("hullOpacity", default=0.5)
self.registerSetting("minClusterSize", default=8)
self.registerSetting("maxClusterSize", default=-1)
self.registerSetting("drawConvexHulls", default=False)
class ClusterFilter(base.BaseFilter):
"""
Cluster filter.
"""
def apply(self, filterInput, settings):
"""Apply the filter."""
# unpack inputs
lattice = filterInput.inputState
NScalars = filterInput.NScalars
fullScalars = filterInput.fullScalars
NVectors = filterInput.NVectors
fullVectors = filterInput.fullVectors
visibleAtoms = filterInput.visibleAtoms
PBC = lattice.PBC
voronoiCalculator = filterInput.voronoiAtoms
# settings
minSize = settings.getSetting("minClusterSize")
maxSize = settings.getSetting("maxClusterSize")
nebRad = settings.getSetting("neighbourRadius")
calcVols = settings.getSetting("calculateVolumes")
self.logger.debug("Cluster size: %d -> %d", minSize, maxSize)
self.logger.debug("Neighbour radius: %f", nebRad)
self.logger.debug("Calculating volumes: %r", calcVols)
# arrays for the cluster calculation
atomCluster = np.empty(len(visibleAtoms), np.int32)
result = np.empty(2, np.int32)
# call C lib
_clusters.findClusters(visibleAtoms, lattice.pos, atomCluster, nebRad, lattice.cellDims, PBC,
minSize, maxSize, result, NScalars, fullScalars, NVectors, fullVectors)
NVisible = result[0]
NClusters = result[1]
# resize arrays
visibleAtoms.resize(NVisible, refcheck=False)
atomCluster.resize(NVisible, refcheck=False)
# build cluster lists
clusterList = []
for i in range(NClusters):
clusterList.append(clusters.AtomCluster(lattice))
# add atoms to cluster lists
clusterIndexMapper = {}
count = 0
for i in range(NVisible):
atomIndex = visibleAtoms[i]
clusterIndex = atomCluster[i]
if clusterIndex not in clusterIndexMapper:
clusterIndexMapper[clusterIndex] = count
count += 1
clusterListIndex = clusterIndexMapper[clusterIndex]
clusterList[clusterListIndex].addAtom(atomIndex)
# show all atoms inside or outside the clusters
drawHulls = settings.getSetting("drawConvexHulls")
showInHulls = settings.getSetting("showAtomsInHulls")
showOutHulls = settings.getSetting("showAtomsOutHulls")
if drawHulls and (showInHulls or showOutHulls):
# first we calculate the Delaunay triangulation of each cluster (including periodic images)
self.logger.debug("Calculating Delaunay triangulations for showing atoms")
hulls, hullsMap = self.computeDelaunayForClusters(lattice, clusterList, nebRad)
# for each atom determine whether it lies within a cluster or not
self.logger.debug("Determining location of atoms (inside or outside clusters)")
# TODO: write in C
inClusterMask = np.zeros(lattice.NAtoms, np.int32)
pos = np.empty((1, 3), np.float64)
for i in range(lattice.NAtoms):
pos[0][:] = lattice.atomPos(i)[:]
for hull, hullMap in zip(hulls, hullsMap):
res = hull.find_simplex(pos) >= 0
if res[0]:
# set the mask to in cluster
inClusterMask[i] = 1
# add to the cluster if doesn't already belong
cluster = clusterList[hullMap]
if i not in cluster:
cluster.addAtom(i)
break
# make sure all cluster atoms are included
for cluster in clusterList:
for index in cluster:
inClusterMask[index] = 1
# make the new visible atoms array, starting with full system
self.logger.info("Overriding visible atoms based on cluster occupancy")
visibleAtoms.resize(lattice.NAtoms, refcheck=False)
visibleMask = 1 if showInHulls else 0
# TODO: write in C
numVisible = 0
for i in range(lattice.NAtoms):
if inClusterMask[i] == visibleMask:
visibleAtoms[numVisible] = i
numVisible += 1
visibleAtoms.resize(numVisible, refcheck=False)
# TODO: set cluster list to be empty on case of show out
if showOutHulls:
clusterList = []
# calculate volumes
if calcVols:
self.logger.debug("Calculating cluster volumes")
for i, cluster in enumerate(clusterList):
cluster.calculateVolume(voronoiCalculator, settings)
volume = cluster.getVolume()
if volume is not None:
self.logger.debug("Cluster %d: volume is %f", i, volume)
area = cluster.getFacetArea()
if area is not None:
self.logger.debug("Cluster %d: facet area is %f", i, area)
# hide atoms if required
if drawHulls and settings.getSetting("hideAtoms"):
visibleAtoms.resize(0, refcheck=False)
# result
result = base.FilterResult()
result.setClusterList(clusterList)
return result
def computeDelaunayForClusters(self, lattice, clusterList, neighbourRadius):
"""Compute Delaunay triangulation for each cluster, including periodic images."""
# build and store convex hulls for each cluster (unapply PBCs!?)
self.logger.debug("Computing Delaunay for each hull (unapplying PBCs)")
cellDims = lattice.cellDims
hulls = []
hullClusterMap = []
for clusterIndex, cluster in enumerate(clusterList):
appliedPBCs = np.zeros(7, np.int32)
clusterPos = cluster.makeClusterPos()
_clusters.prepareClusterToDrawHulls(len(cluster), clusterPos, cellDims, np.ones(3, np.int32), appliedPBCs,
neighbourRadius)
hulls.append(self.makeDelaunay(clusterPos))
hullClusterMap.append(clusterIndex)
# handle PBCs here
while max(appliedPBCs) > 0:
tmpClusterPos = copy.deepcopy(clusterPos)
clusters.applyPBCsToCluster(tmpClusterPos, cellDims, appliedPBCs)
hulls.append(self.makeDelaunay(tmpClusterPos))
hullClusterMap.append(clusterIndex)
return hulls, hullClusterMap
def makeDelaunay(self, clusterPos):
"""Calculate Delaunay for the given position."""
# make pts
# TODO: C or view
num = len(clusterPos) // 3
pts = np.empty((num, 3), np.float64)
for i in range(num):
i3 = 3 * i
pts[i][0] = clusterPos[i3]
pts[i][1] = clusterPos[i3 + 1]
pts[i][2] = clusterPos[i3 + 2]
# make hull
hull = Delaunay(pts)
return hull
| [
"six.moves.range",
"copy.deepcopy",
"numpy.ones",
"numpy.zeros",
"numpy.empty",
"scipy.spatial.Delaunay"
] | [((4793, 4814), 'numpy.empty', 'np.empty', (['(2)', 'np.int32'], {}), '(2, np.int32)\n', (4801, 4814), True, 'import numpy as np\n'), ((5339, 5355), 'six.moves.range', 'range', (['NClusters'], {}), '(NClusters)\n', (5344, 5355), False, 'from six.moves import range\n'), ((5532, 5547), 'six.moves.range', 'range', (['NVisible'], {}), '(NVisible)\n', (5537, 5547), False, 'from six.moves import range\n'), ((10707, 10737), 'numpy.empty', 'np.empty', (['(num, 3)', 'np.float64'], {}), '((num, 3), np.float64)\n', (10715, 10737), True, 'import numpy as np\n'), ((10755, 10765), 'six.moves.range', 'range', (['num'], {}), '(num)\n', (10760, 10765), False, 'from six.moves import range\n'), ((10959, 10972), 'scipy.spatial.Delaunay', 'Delaunay', (['pts'], {}), '(pts)\n', (10967, 10972), False, 'from scipy.spatial import Delaunay\n'), ((6752, 6786), 'numpy.zeros', 'np.zeros', (['lattice.NAtoms', 'np.int32'], {}), '(lattice.NAtoms, np.int32)\n', (6760, 6786), True, 'import numpy as np\n'), ((6805, 6833), 'numpy.empty', 'np.empty', (['(1, 3)', 'np.float64'], {}), '((1, 3), np.float64)\n', (6813, 6833), True, 'import numpy as np\n'), ((6855, 6876), 'six.moves.range', 'range', (['lattice.NAtoms'], {}), '(lattice.NAtoms)\n', (6860, 6876), False, 'from six.moves import range\n'), ((8023, 8044), 'six.moves.range', 'range', (['lattice.NAtoms'], {}), '(lattice.NAtoms)\n', (8028, 8044), False, 'from six.moves import range\n'), ((9740, 9761), 'numpy.zeros', 'np.zeros', (['(7)', 'np.int32'], {}), '(7, np.int32)\n', (9748, 9761), True, 'import numpy as np\n'), ((9909, 9929), 'numpy.ones', 'np.ones', (['(3)', 'np.int32'], {}), '(3, np.int32)\n', (9916, 9929), True, 'import numpy as np\n'), ((10242, 10267), 'copy.deepcopy', 'copy.deepcopy', (['clusterPos'], {}), '(clusterPos)\n', (10255, 10267), False, 'import copy\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import cv2, imageio, os
def check_dir(out_dir):
if not os.path.exists(out_dir):
os.mkdir(out_dir)
def imread(img_path, norm=True):
img = imageio.imread(img_path)
return img / 255. if norm else img
def imsave(save_path, img):
imageio.imsave(save_path, img)
def mimsave(save_path, imgs, fps=10):
imageio.mimsave(save_path, imgs, fps=fps)
def imresize(img, h, w, method='LINEAR'):
if method == 'LINEAR':
return cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
elif method == 'NEAREST':
return cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST)
def center_crop(img):
h, w = img.shape[:2]
if h >= w:
return img[h // 2 - w // 2: h // 2 - w // 2 + w, :]
else:
return img[:, w // 2 - h // 2: w // 2 - h // 2 + h]
def imnorm(img):
return (img - 0.5) * 2.
def imdenorm(img):
return (img + 1.) / 2.
def montage(imgs):
N, H, W, C = imgs.shape
n = int(np.ceil(np.sqrt(N)))
result = np.ones((n * H, n * W, C))
for i in range(N):
r, c = i // n, i % n
result[r * H: (r + 1) * H, c * W: (c + 1) * W] = imgs[i]
return result
def lerp_np(start, end, ratio):
return start + (end - start) * np.clip(ratio, 0.0, 1.0)
def ceil(x):
return int(np.ceil(x))
def floor(x):
return int(np.floor(x))
def get_nonzero_region(img):
non = np.nonzero(img)
if len(non[0]) == 0 or len(non[1]) == 0:
y0, y1, x0, x1 = 0, img.shape[0] - 1, 0, img.shape[1] - 1
else:
y0 = np.min(non[0])
y1 = np.max(non[0])
x0 = np.min(non[1])
x1 = np.max(non[1])
return y0, y1, (y0 + y1) // 2, x0, x1, (x0 + x1) // 2
def array_to_list(array):
return np.reshape(array, [array.size]).tolist() | [
"numpy.clip",
"os.path.exists",
"numpy.ceil",
"numpy.sqrt",
"numpy.ones",
"imageio.imsave",
"numpy.reshape",
"numpy.floor",
"numpy.max",
"os.mkdir",
"numpy.nonzero",
"numpy.min",
"imageio.imread",
"imageio.mimsave",
"cv2.resize"
] | [((187, 211), 'imageio.imread', 'imageio.imread', (['img_path'], {}), '(img_path)\n', (201, 211), False, 'import cv2, imageio, os\n'), ((278, 308), 'imageio.imsave', 'imageio.imsave', (['save_path', 'img'], {}), '(save_path, img)\n', (292, 308), False, 'import cv2, imageio, os\n'), ((349, 390), 'imageio.mimsave', 'imageio.mimsave', (['save_path', 'imgs'], {'fps': 'fps'}), '(save_path, imgs, fps=fps)\n', (364, 390), False, 'import cv2, imageio, os\n'), ((961, 987), 'numpy.ones', 'np.ones', (['(n * H, n * W, C)'], {}), '((n * H, n * W, C))\n', (968, 987), True, 'import numpy as np\n'), ((1311, 1326), 'numpy.nonzero', 'np.nonzero', (['img'], {}), '(img)\n', (1321, 1326), True, 'import numpy as np\n'), ((101, 124), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (115, 124), False, 'import cv2, imageio, os\n'), ((128, 145), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (136, 145), False, 'import cv2, imageio, os\n'), ((467, 522), 'cv2.resize', 'cv2.resize', (['img', '(w, h)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(img, (w, h), interpolation=cv2.INTER_LINEAR)\n', (477, 522), False, 'import cv2, imageio, os\n'), ((1222, 1232), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (1229, 1232), True, 'import numpy as np\n'), ((1261, 1272), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (1269, 1272), True, 'import numpy as np\n'), ((1443, 1457), 'numpy.min', 'np.min', (['non[0]'], {}), '(non[0])\n', (1449, 1457), True, 'import numpy as np\n'), ((1465, 1479), 'numpy.max', 'np.max', (['non[0]'], {}), '(non[0])\n', (1471, 1479), True, 'import numpy as np\n'), ((1487, 1501), 'numpy.min', 'np.min', (['non[1]'], {}), '(non[1])\n', (1493, 1501), True, 'import numpy as np\n'), ((1509, 1523), 'numpy.max', 'np.max', (['non[1]'], {}), '(non[1])\n', (1515, 1523), True, 'import numpy as np\n'), ((559, 615), 'cv2.resize', 'cv2.resize', (['img', '(w, h)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img, (w, h), interpolation=cv2.INTER_NEAREST)\n', (569, 615), False, 'import cv2, imageio, os\n'), ((937, 947), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (944, 947), True, 'import numpy as np\n'), ((1171, 1195), 'numpy.clip', 'np.clip', (['ratio', '(0.0)', '(1.0)'], {}), '(ratio, 0.0, 1.0)\n', (1178, 1195), True, 'import numpy as np\n'), ((1614, 1645), 'numpy.reshape', 'np.reshape', (['array', '[array.size]'], {}), '(array, [array.size])\n', (1624, 1645), True, 'import numpy as np\n')] |
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 733 $"
import random
import numpy
from population import Population
class MPA(Population):
"""The Mulliken population analysis."""
def __init__(self, *args):
# Call the __init__ method of the superclass.
super(MPA, self).__init__(logname="MPA", *args)
def __str__(self):
"""Return a string representation of the object."""
return "MPA of" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'MPA("%s")' % (self.data)
def calculate(self, indices=None, fupdate=0.05):
"""Perform a Mulliken population analysis."""
# Do we have the needed attributes in the data object?
if not hasattr(self.data, "mocoeffs"):
self.logger.error("Missing mocoeffs")
return False
if not (hasattr(self.data, "aooverlaps") \
or hasattr(self.data, "fooverlaps") ):
self.logger.error("Missing overlap matrix")
return False
if not hasattr(self.data, "nbasis"):
self.logger.error("Missing nbasis")
return False
if not hasattr(self.data, "homos"):
self.logger.error("Missing homos")
return False
# Determine number of steps, and whether process involves beta orbitals.
self.logger.info("Creating attribute aoresults: [array[2]]")
nbasis = self.data.nbasis
alpha = len(self.data.mocoeffs[0])
self.aoresults = [ numpy.zeros([alpha, nbasis], "d") ]
nstep = alpha
unrestricted = (len(self.data.mocoeffs) == 2)
if unrestricted:
beta = len(self.data.mocoeffs[1])
self.aoresults.append(numpy.zeros([beta, nbasis], "d"))
nstep += beta
# Intialize progress if available.
if self.progress:
self.progress.initialize(nstep)
step = 0
for spin in range(len(self.data.mocoeffs)):
for i in range(len(self.data.mocoeffs[spin])):
if self.progress and random.random() < fupdate:
self.progress.update(step, "Mulliken Population Analysis")
#X_{ai} = \sum_b c_{ai} c_{bi} S_{ab}
# = c_{ai} \sum_b c_{bi} S_{ab}
# = c_{ai} C(i) \cdot S(a)
# X = C(i) * [C(i) \cdot S]
# C(i) is 1xn and S is nxn, result of matrix mult is 1xn
ci = self.data.mocoeffs[spin][i]
if hasattr(self.data, "aooverlaps"):
temp = numpy.dot(ci, self.data.aooverlaps)
elif hasattr(self.data, "fooverlaps"):
temp = numpy.dot(ci, self.data.fooverlaps)
self.aoresults[spin][i] = numpy.multiply(ci, temp).astype("d")
step += 1
if self.progress:
self.progress.update(nstep, "Done")
retval = super(MPA, self).partition(indices)
if not retval:
self.logger.error("Error in partitioning results")
return False
# Create array for mulliken charges.
self.logger.info("Creating fragcharges: array[1]")
size = len(self.fragresults[0][0])
self.fragcharges = numpy.zeros([size], "d")
for spin in range(len(self.fragresults)):
for i in range(self.data.homos[spin] + 1):
temp = numpy.reshape(self.fragresults[spin][i], (size,))
self.fragcharges = numpy.add(self.fragcharges, temp)
if not unrestricted:
self.fragcharges = numpy.multiply(self.fragcharges, 2)
return True
if __name__ == "__main__":
import doctest, mpa
doctest.testmod(mpa, verbose=False)
| [
"numpy.multiply",
"numpy.reshape",
"numpy.add",
"numpy.zeros",
"numpy.dot",
"doctest.testmod",
"random.random"
] | [((3897, 3932), 'doctest.testmod', 'doctest.testmod', (['mpa'], {'verbose': '(False)'}), '(mpa, verbose=False)\n', (3912, 3932), False, 'import doctest, mpa\n'), ((3432, 3456), 'numpy.zeros', 'numpy.zeros', (['[size]', '"""d"""'], {}), "([size], 'd')\n", (3443, 3456), False, 'import numpy\n'), ((1685, 1718), 'numpy.zeros', 'numpy.zeros', (['[alpha, nbasis]', '"""d"""'], {}), "([alpha, nbasis], 'd')\n", (1696, 1718), False, 'import numpy\n'), ((3784, 3819), 'numpy.multiply', 'numpy.multiply', (['self.fragcharges', '(2)'], {}), '(self.fragcharges, 2)\n', (3798, 3819), False, 'import numpy\n'), ((1902, 1934), 'numpy.zeros', 'numpy.zeros', (['[beta, nbasis]', '"""d"""'], {}), "([beta, nbasis], 'd')\n", (1913, 1934), False, 'import numpy\n'), ((3596, 3645), 'numpy.reshape', 'numpy.reshape', (['self.fragresults[spin][i]', '(size,)'], {}), '(self.fragresults[spin][i], (size,))\n', (3609, 3645), False, 'import numpy\n'), ((3681, 3714), 'numpy.add', 'numpy.add', (['self.fragcharges', 'temp'], {}), '(self.fragcharges, temp)\n', (3690, 3714), False, 'import numpy\n'), ((2755, 2790), 'numpy.dot', 'numpy.dot', (['ci', 'self.data.aooverlaps'], {}), '(ci, self.data.aooverlaps)\n', (2764, 2790), False, 'import numpy\n'), ((2244, 2259), 'random.random', 'random.random', ([], {}), '()\n', (2257, 2259), False, 'import random\n'), ((2873, 2908), 'numpy.dot', 'numpy.dot', (['ci', 'self.data.fooverlaps'], {}), '(ci, self.data.fooverlaps)\n', (2882, 2908), False, 'import numpy\n'), ((2952, 2976), 'numpy.multiply', 'numpy.multiply', (['ci', 'temp'], {}), '(ci, temp)\n', (2966, 2976), False, 'import numpy\n')] |
import pandas as pd
import os
import numpy as np
from fastprogress import master_bar,progress_bar
from tqdm import tqdm
import argparse
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_dir', type=str, default='/data/data20180901/processed/')
parser.add_argument('-c', '--city', type=str, default='Berlin')
parser.add_argument('-ch', '--channel', type=int, default=0)
args = parser.parse_args()
return args
if __name__=='__main__':
args = getArgs()
root = args.input_dir
city = args.city
channel = args.channel
meta = pd.read_csv(f'{root}/meta.csv')
meta=meta[meta['set']!='test']
meta=meta[[city,'set']].sort_values(city)
meta['date']=pd.to_datetime(meta[city],format='%Y%m%d')
meta['month']=meta.date.dt.month
meta['weekday']=meta.date.dt.weekday
for m,df in tqdm(meta.groupby('month')):
d_dict={}
for d in df[city]:
t_arr=[]
for t in tqdm(range(288)):
t=str(t).zfill(3)
t_arr.append(np.load(f'{root}/{city}/{d}/{t}/{channel}.npy').reshape(1,495,436))
t_arr=np.concatenate(t_arr).reshape(1,288,495,436)
d_dict[d]=t_arr
a=list(d_dict.values())
a=np.concatenate(a)
a=a.mean(0)
for t in tqdm(range(288)):
try:
os.makedirs(f'{root}/{city}/Month/{m}/{str(t).zfill(3)}/')
except:
pass
np.save(f'{root}/{city}/Month/{m}/{str(t).zfill(3)}/{channel}.npy',a[t])
for w,ddf in tqdm(df.groupby('weekday')):
a=[]
for t in ddf[city]:
a.append(d_dict[t])
a = np.concatenate(a)
a = a.mean(0)
for t in tqdm(range(288)):
try:
os.makedirs(f'{root}/{city}/Week/{m}/{w}/{str(t).zfill(3)}/')
except:
pass
np.save(f'{root}/{city}/Week/{m}/{w}/{str(t).zfill(3)}/{channel}.npy',a[t]) | [
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.concatenate",
"numpy.load",
"pandas.to_datetime"
] | [((164, 189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (187, 189), False, 'import argparse\n'), ((598, 629), 'pandas.read_csv', 'pd.read_csv', (['f"""{root}/meta.csv"""'], {}), "(f'{root}/meta.csv')\n", (609, 629), True, 'import pandas as pd\n'), ((728, 771), 'pandas.to_datetime', 'pd.to_datetime', (['meta[city]'], {'format': '"""%Y%m%d"""'}), "(meta[city], format='%Y%m%d')\n", (742, 771), True, 'import pandas as pd\n'), ((1268, 1285), 'numpy.concatenate', 'np.concatenate', (['a'], {}), '(a)\n', (1282, 1285), True, 'import numpy as np\n'), ((1710, 1727), 'numpy.concatenate', 'np.concatenate', (['a'], {}), '(a)\n', (1724, 1727), True, 'import numpy as np\n'), ((1153, 1174), 'numpy.concatenate', 'np.concatenate', (['t_arr'], {}), '(t_arr)\n', (1167, 1174), True, 'import numpy as np\n'), ((1067, 1114), 'numpy.load', 'np.load', (['f"""{root}/{city}/{d}/{t}/{channel}.npy"""'], {}), "(f'{root}/{city}/{d}/{t}/{channel}.npy')\n", (1074, 1114), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
#
# In this file test glue layer between Fortran 95 code and module generated by f2py from numpy.
# Here and bellow `native solution` means solution that was obtained with native fortran code
# and in opposite `glue solution` means native solution transmitted through glue layer.
#
# (c) <NAME> <<EMAIL>>, 2016
#
from __future__ import print_function
from nls import nls
from numpy import array, arange, diag, abs, max
from scipy.sparse import diags
from subprocess import Popen, PIPE
VERBOSE = True
FLOAT_THRESHOLD = 1e-3
NX, ITERS, ORDER = 200, 10000, 5
DT, DX = 1.0e-6, 1.0e-3
def test_make_banded_matrix():
print('Test make_banded_matrix():')
def test_banded_matvec():
print('Test banded_matvec():')
def test_make_laplacian():
print('Test make_laplacian():')
n, m, h = 8, 8, 0.1
r = arange(0, h * m - 1e-10, h)
D1 = diags((1, -8, 0, 8, -1), (-2, -1, 0, 1, 2), shape=(m, m)).toarray()
D2 = diags((-1, 16, -30, 16, -1), (-2, -1, 0, 1, 2), shape=(m, m)).toarray()
r[0] = 1
D1[0, :] = 0
D1[1, 1] += 1
D2[0, :3] = [-60, 64, -4]
D2[1, 1] += -1
D = D2 / (24 * h ** 2) + diag(1.0 / r).dot(D1) / (12 * h)
L = nls.make_laplacian(n, 5, h)
tolerance = 5.0e-5
for k in xrange(0, 3):
err = max(abs(diag(D, 2 - k) - L[k, 2 - k:]))
print('Diagonal # ', 2 - k, ': ', err)
if err >= tolerance:
print(diag(D, 2 - k))
print(L[k, 2 - k:])
for k in xrange(1, 3):
err = max(abs(diag(D, -k) - L[2 + k, :-k]))
print('Diagonal #', -k, ': ', err)
if err >= tolerance:
print(diag(D, -k))
print(L[2 + k, :-k])
def test_runge_kutta():
"""Call runge_kutta() fortran subroutine directly through f2py wrapper layer.
"""
print('Test runge_kutta():')
u0 = array([1, 1, 1, 1, 1, 1, 1, 1])
op = nls.make_laplacian(8, 5, 0.01)
u = nls.runge_kutta(0.00001, 0.0, 0.01, u0, op, 10)
print(u)
def get_native_solution():
pid = Popen(['./solve'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, _ = pid.communicate()
return array([float(x) for x in output.split()])
def get_glue_solution():
u = nls.solve_nls(DT, DX, NX, ORDER, ITERS)
return abs(u.conj() * u)
def compare_solutions():
native = get_native_solution()
glue = get_native_solution()
difference = abs(native - glue) < FLOAT_THRESHOLD
different = reduce(lambda acc, val: acc if val[1] else acc + [val[0]], enumerate(difference), [])
return all(difference), different
def report_solution_comparison(is_equal, different):
print('Equal:', is_equal)
if VERBOSE and not is_equal:
print('Next elements are different:')
print(different)
print('Native:\n', native[different])
print('Glue:\n', glue[different])
def test_glue_layer():
"""Run native fortran code and its python bindings in order to compare these solutions and test
their equality.
"""
print('Test glue layer:')
is_equal, different = compare_solutions()
report_solution_comparison(is_equal, different)
def test():
test_make_banded_matrix()
test_banded_matvec()
test_make_laplacian()
test_runge_kutta()
test_glue_layer()
if __name__ == '__main__':
test()
| [
"numpy.abs",
"nls.nls.make_laplacian",
"scipy.sparse.diags",
"subprocess.Popen",
"numpy.diag",
"numpy.array",
"nls.nls.runge_kutta",
"nls.nls.solve_nls",
"numpy.arange"
] | [((846, 873), 'numpy.arange', 'arange', (['(0)', '(h * m - 1e-10)', 'h'], {}), '(0, h * m - 1e-10, h)\n', (852, 873), False, 'from numpy import array, arange, diag, abs, max\n'), ((1200, 1227), 'nls.nls.make_laplacian', 'nls.make_laplacian', (['n', '(5)', 'h'], {}), '(n, 5, h)\n', (1218, 1227), False, 'from nls import nls\n'), ((1852, 1883), 'numpy.array', 'array', (['[1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1])\n', (1857, 1883), False, 'from numpy import array, arange, diag, abs, max\n'), ((1893, 1923), 'nls.nls.make_laplacian', 'nls.make_laplacian', (['(8)', '(5)', '(0.01)'], {}), '(8, 5, 0.01)\n', (1911, 1923), False, 'from nls import nls\n'), ((1932, 1977), 'nls.nls.runge_kutta', 'nls.runge_kutta', (['(1e-05)', '(0.0)', '(0.01)', 'u0', 'op', '(10)'], {}), '(1e-05, 0.0, 0.01, u0, op, 10)\n', (1947, 1977), False, 'from nls import nls\n'), ((2031, 2087), 'subprocess.Popen', 'Popen', (["['./solve']"], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE'}), "(['./solve'], stdin=PIPE, stdout=PIPE, stderr=PIPE)\n", (2036, 2087), False, 'from subprocess import Popen, PIPE\n'), ((2210, 2249), 'nls.nls.solve_nls', 'nls.solve_nls', (['DT', 'DX', 'NX', 'ORDER', 'ITERS'], {}), '(DT, DX, NX, ORDER, ITERS)\n', (2223, 2249), False, 'from nls import nls\n'), ((2390, 2408), 'numpy.abs', 'abs', (['(native - glue)'], {}), '(native - glue)\n', (2393, 2408), False, 'from numpy import array, arange, diag, abs, max\n'), ((883, 940), 'scipy.sparse.diags', 'diags', (['(1, -8, 0, 8, -1)', '(-2, -1, 0, 1, 2)'], {'shape': '(m, m)'}), '((1, -8, 0, 8, -1), (-2, -1, 0, 1, 2), shape=(m, m))\n', (888, 940), False, 'from scipy.sparse import diags\n'), ((960, 1021), 'scipy.sparse.diags', 'diags', (['(-1, 16, -30, 16, -1)', '(-2, -1, 0, 1, 2)'], {'shape': '(m, m)'}), '((-1, 16, -30, 16, -1), (-2, -1, 0, 1, 2), shape=(m, m))\n', (965, 1021), False, 'from scipy.sparse import diags\n'), ((1429, 1443), 'numpy.diag', 'diag', (['D', '(2 - k)'], {}), '(D, 2 - k)\n', (1433, 1443), False, 'from numpy import array, arange, diag, abs, max\n'), ((1649, 1660), 'numpy.diag', 'diag', (['D', '(-k)'], {}), '(D, -k)\n', (1653, 1660), False, 'from numpy import array, arange, diag, abs, max\n'), ((1158, 1171), 'numpy.diag', 'diag', (['(1.0 / r)'], {}), '(1.0 / r)\n', (1162, 1171), False, 'from numpy import array, arange, diag, abs, max\n'), ((1300, 1314), 'numpy.diag', 'diag', (['D', '(2 - k)'], {}), '(D, 2 - k)\n', (1304, 1314), False, 'from numpy import array, arange, diag, abs, max\n'), ((1526, 1537), 'numpy.diag', 'diag', (['D', '(-k)'], {}), '(D, -k)\n', (1530, 1537), False, 'from numpy import array, arange, diag, abs, max\n')] |
#
# Analytics server
#
import pickle
import jsonpickle
import platform
import json
import io
import os
import sys
import pika
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
import datetime
sns.set()
from sklearn.metrics import r2_score, median_absolute_error, mean_absolute_error
from sklearn.metrics import median_absolute_error, mean_squared_error, mean_squared_log_error
from scipy.optimize import minimize
# import statsmodels.tsa.api as smt
# import statsmodels.api as sm
# from tqdm import tqdm_notebook
import warnings
warnings.filterwarnings('ignore')
from itertools import product
from analytics_handler import analytics_handler
rabbitMQHost = os.getenv("RABBITMQ_SERVICE_HOST") or "localhost"
analytics_db = analytics_handler()
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
hostname = platform.node()
def plot_moving_average(series, field, window=10, plot_intervals=False, scale=1.96, filename='moving_average.png'):
rolling_mean = series.rolling(window=window).mean()
plt.figure(figsize=(17,8))
plt.title('Moving average - {}\n window size = {}'.format(field, window))
plt.plot(rolling_mean, 'g', label='Rolling mean trend')
#Plot confidence intervals for smoothed values
if plot_intervals:
mae = mean_absolute_error(series[window:], rolling_mean[window:])
deviation = np.std(series[window:] - rolling_mean[window:])
lower_bound = rolling_mean - (mae + scale * deviation)
upper_bound = rolling_mean + (mae + scale * deviation)
plt.plot(upper_bound, 'r--', label='Upper bound / Lower bound')
plt.plot(lower_bound, 'r--')
plt.plot(series[window:], label='Actual values')
plt.legend(loc='best')
plt.grid(True)
img_bytes = io.BytesIO()
plt.savefig(img_bytes, format='png')
img_bytes.seek(0)
return img_bytes
def exponential_smoothing(series, alpha):
if len(series) > 0:
result = [series[0]]
else:
result = []
for n in range(1, len(series)):
result.append(alpha * series[n] + (1 - alpha) * result[n-1])
return result
def plot_exponential_smoothing(series, field, alphas=[0.05,0.3], filename='exponential_smoothing.png'):
plt.figure(figsize=(17, 8))
for alpha in alphas:
plt.plot(exponential_smoothing(series, alpha), label="Alpha {}".format(alpha))
plt.plot(series.values, "c", label = "Actual")
plt.legend(loc="best")
plt.axis('tight')
plt.title('Exponential Smoothing - {}'.format(field))
plt.grid(True)
img_bytes = io.BytesIO()
plt.savefig(img_bytes, format='png')
img_bytes.seek(0)
return img_bytes
def double_exponential_smoothing(series, alpha, beta):
result = [series[0]]
for n in range(1, len(series)+1):
if n == 1:
level, trend = series[0], series[1] - series[0]
if n >= len(series): # forecasting
value = result[-1]
else:
value = series[n]
last_level, level = level, alpha * value + (1 - alpha) * (level + trend)
trend = beta * (level - last_level) + (1 - beta) * trend
result.append(level + trend)
return result
def plot_double_exponential_smoothing(series, field, alphas=[0.9,0.02], betas=[0.9,0.02], filename='double_exponential_smoothing.png'):
plt.figure(figsize=(17, 8))
for alpha in alphas:
for beta in betas:
plt.plot(double_exponential_smoothing(series, alpha, beta), label="Alpha {}, beta {}".format(alpha, beta))
plt.plot(series.values, label = "Actual")
plt.legend(loc="best")
plt.axis('tight')
plt.title('Double Exponential Smoothing - {}'.format(field))
plt.grid(True)
img_bytes = io.BytesIO()
plt.savefig(img_bytes, format='png')
img_bytes.seek(0)
return img_bytes
# def tsplot(y, field, lags=30, figsize=(12, 7), syle='bmh', filename='ts_plot.png'):
# if not isinstance(y, pd.Series):
# y = pd.Series(y)
# with plt.style.context(style='bmh'):
# fig = plt.figure(figsize=figsize)
# layout = (2,2)
# ts_ax = plt.subplot2grid(layout, (0,0), colspan=2)
# acf_ax = plt.subplot2grid(layout, (1,0))
# pacf_ax = plt.subplot2grid(layout, (1,1))
# y.plot(ax=ts_ax)
# p_value = sm.tsa.stattools.adfuller(y)[1]
# ts_ax.set_title('Time Series Analysis Plots - {}\n Dickey-Fuller: p={0:.5f}'.format(field, p_value))
# smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)
# smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)
# plt.tight_layout()
# plt.savefig(filename)
# with Image.open(filename) as image:
# img_bytes = io.BytesIO(image)
# return img_bytes
# def optimize_SARIMA(y, parameters_list, d, D, s):
# """
# Return dataframe with parameters and corresponding AIC
# parameters_list - list with (p, q, P, Q) tuples
# d - integration order
# D - seasonal integration order
# s - length of season
# """
# results = []
# best_aic = float('inf')
# for param in tqdm_notebook(parameters_list):
# try: model = sm.tsa.statespace.SARIMAX(y, order=(param[0], d, param[1]),
# seasonal_order=(param[2], D, param[3], s)).fit(disp=-1)
# except:
# continue
# aic = model.aic
# #Save best model, AIC and parameters
# if aic < best_aic:
# best_model = model
# best_aic = aic
# best_param = param
# results.append([param, model.aic])
# result_table = pd.DataFrame(results)
# result_table.columns = ['parameters', 'aic']
# #Sort in ascending order, lower AIC is better
# result_table = result_table.sort_values(by='aic', ascending=True).reset_index(drop=True)
# return result_table
def receive():
rabbitMQ = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitMQHost))
rabbitMQChannel = rabbitMQ.channel()
rabbitMQChannel.exchange_declare(exchange='toAnalytics',exchange_type='direct')
result = rabbitMQChannel.queue_declare(queue='', exclusive=True)
queue_name = result.method.queue
rabbitMQChannel.queue_bind(exchange='toAnalytics', queue=queue_name, routing_key='data')
# rabbitMQChannel.queue_declare(queue="queue_toAnalytics", durable=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
def callback(ch, method, properties, body):
unpickled = pickle.load(jsonpickle.decode(body))
sendLogs('{} - ANALYTICS {}- Received job for analytics {} at RabbitMQ Host-{}'.format(datetime.datetime.now(), hostname, unpickled, rabbitMQHost))
jobid = unpickled['job_id']
df = unpickled['data']
operation = unpickled['op']
params = unpickled['params']
fieldset = params['fields']
result = []
for i in range(len(fieldset)):
if(operation == 'moving_average'):
result.append(plot_moving_average(df.iloc[:, i], fieldset[i], window=int(params['window'])))
if(operation == 'exponential_smoothing'):
result.append(plot_exponential_smoothing(df.iloc[:, i], fieldset[i], alphas =[params['alpha1'], params['alpha2']]))
if(operation == 'double_exponential_smoothing'):
result.append(plot_double_exponential_smoothing(df.iloc[:, i], fieldset[i] ,alphas=[params['alpha1'], params['alpha2']], betas=[params['beta1'], params['beta2']]))
if(operation == 'ts_plot'):
result.append(tsplot(df.iloc[:, i], fieldset[i], lags=params['lags']))
# print(result)
analytics_db.jobid_result_db.set(jobid,jsonpickle.encode(result))
# ch.basic_ack(delivery_tag=method.delivery_tag)
# if(operation == 'sarima_stats'):
# ps = range(0, 4)
# d = 1
# qs = range(0, 4)
# Ps = range(0, 4)
# D = 1
# Qs = range(0, 4)
# s = 4
# #Create a list with all possible combinations of parameters
# parameters = product(ps, qs, Ps, Qs)
# parameters_list = list(parameters)
# result_table = optimize_SARIMA(df.iloc[:, i], parameters_list, d, D, s)
# p, q, P, Q = result_table.parameters[0]
# best_model = sm.tsa.statespace.SARIMAX(df.iloc[:, i], order=(p, d, q),
# seasonal_order=(P, D, Q, s)).fit(disp=-1)
# print(best_model.summary())
# print(best_model.predict(start=df.iloc[:, i].shape[0], end=df.iloc[:, i].shape[0] + 5))
# print(mean_absolute_percentage_error(df.iloc[:, i][s+d:], best_model.fittedvalues[s+d:]))
rabbitMQChannel.basic_qos(prefetch_count=1)
rabbitMQChannel.basic_consume(queue=queue_name, on_message_callback=callback, auto_ack=True)
# rabbitMQChannel.basic_consume(queue="queue_toAnalytics", on_message_callback=callback)
rabbitMQChannel.start_consuming()
print("done")
def sendLogs(logdata):
rabbitMQ = pika.BlockingConnection(
pika.ConnectionParameters(host=rabbitMQHost))
rabbitMQChannel = rabbitMQ.channel()
rabbitMQChannel.exchange_declare(exchange='logs',
exchange_type='direct')
rabbitMQChannel.basic_publish(exchange='logs',
routing_key='logdata',
body=logdata)
rabbitMQ.close()
if __name__ == '__main__':
try:
receive()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0) | [
"matplotlib.pyplot.grid",
"platform.node",
"io.BytesIO",
"sys.exit",
"seaborn.set",
"matplotlib.pyplot.plot",
"jsonpickle.decode",
"analytics_handler.analytics_handler",
"matplotlib.pyplot.axis",
"sklearn.metrics.mean_absolute_error",
"numpy.abs",
"matplotlib.pyplot.savefig",
"pika.Connectio... | [((258, 267), 'seaborn.set', 'sns.set', ([], {}), '()\n', (265, 267), True, 'import seaborn as sns\n'), ((598, 631), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (621, 631), False, 'import warnings\n'), ((791, 810), 'analytics_handler.analytics_handler', 'analytics_handler', ([], {}), '()\n', (808, 810), False, 'from analytics_handler import analytics_handler\n'), ((937, 952), 'platform.node', 'platform.node', ([], {}), '()\n', (950, 952), False, 'import platform\n'), ((726, 760), 'os.getenv', 'os.getenv', (['"""RABBITMQ_SERVICE_HOST"""'], {}), "('RABBITMQ_SERVICE_HOST')\n", (735, 760), False, 'import os\n'), ((1136, 1163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(17, 8)'}), '(figsize=(17, 8))\n', (1146, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1300), 'matplotlib.pyplot.plot', 'plt.plot', (['rolling_mean', '"""g"""'], {'label': '"""Rolling mean trend"""'}), "(rolling_mean, 'g', label='Rolling mean trend')\n", (1253, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1822), 'matplotlib.pyplot.plot', 'plt.plot', (['series[window:]'], {'label': '"""Actual values"""'}), "(series[window:], label='Actual values')\n", (1782, 1822), True, 'import matplotlib.pyplot as plt\n'), ((1827, 1849), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1837, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1868), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1862, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1897), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1895, 1897), False, 'import io\n'), ((1902, 1938), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_bytes'], {'format': '"""png"""'}), "(img_bytes, format='png')\n", (1913, 1938), True, 'import matplotlib.pyplot as plt\n'), ((2340, 2367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(17, 8)'}), '(figsize=(17, 8))\n', (2350, 2367), True, 'import matplotlib.pyplot as plt\n'), ((2484, 2528), 'matplotlib.pyplot.plot', 'plt.plot', (['series.values', '"""c"""'], {'label': '"""Actual"""'}), "(series.values, 'c', label='Actual')\n", (2492, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2535, 2557), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2545, 2557), True, 'import matplotlib.pyplot as plt\n'), ((2562, 2579), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (2570, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2642, 2656), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2650, 2656), True, 'import matplotlib.pyplot as plt\n'), ((2673, 2685), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2683, 2685), False, 'import io\n'), ((2690, 2726), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_bytes'], {'format': '"""png"""'}), "(img_bytes, format='png')\n", (2701, 2726), True, 'import matplotlib.pyplot as plt\n'), ((3434, 3461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(17, 8)'}), '(figsize=(17, 8))\n', (3444, 3461), True, 'import matplotlib.pyplot as plt\n'), ((3637, 3676), 'matplotlib.pyplot.plot', 'plt.plot', (['series.values'], {'label': '"""Actual"""'}), "(series.values, label='Actual')\n", (3645, 3676), True, 'import matplotlib.pyplot as plt\n'), ((3683, 3705), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3693, 3705), True, 'import matplotlib.pyplot as plt\n'), ((3710, 3727), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (3718, 3727), True, 'import matplotlib.pyplot as plt\n'), ((3797, 3811), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3805, 3811), True, 'import matplotlib.pyplot as plt\n'), ((3828, 3840), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3838, 3840), False, 'import io\n'), ((3845, 3881), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img_bytes'], {'format': '"""png"""'}), "(img_bytes, format='png')\n", (3856, 3881), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1453), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['series[window:]', 'rolling_mean[window:]'], {}), '(series[window:], rolling_mean[window:])\n', (1413, 1453), False, 'from sklearn.metrics import r2_score, median_absolute_error, mean_absolute_error\n'), ((1474, 1521), 'numpy.std', 'np.std', (['(series[window:] - rolling_mean[window:])'], {}), '(series[window:] - rolling_mean[window:])\n', (1480, 1521), True, 'import numpy as np\n'), ((1656, 1719), 'matplotlib.pyplot.plot', 'plt.plot', (['upper_bound', '"""r--"""'], {'label': '"""Upper bound / Lower bound"""'}), "(upper_bound, 'r--', label='Upper bound / Lower bound')\n", (1664, 1719), True, 'import matplotlib.pyplot as plt\n'), ((1728, 1756), 'matplotlib.pyplot.plot', 'plt.plot', (['lower_bound', '"""r--"""'], {}), "(lower_bound, 'r--')\n", (1736, 1756), True, 'import matplotlib.pyplot as plt\n'), ((6127, 6171), 'pika.ConnectionParameters', 'pika.ConnectionParameters', ([], {'host': 'rabbitMQHost'}), '(host=rabbitMQHost)\n', (6152, 6171), False, 'import pika\n'), ((9415, 9459), 'pika.ConnectionParameters', 'pika.ConnectionParameters', ([], {'host': 'rabbitMQHost'}), '(host=rabbitMQHost)\n', (9440, 9459), False, 'import pika\n'), ((883, 917), 'numpy.abs', 'np.abs', (['((y_true - y_pred) / y_true)'], {}), '((y_true - y_pred) / y_true)\n', (889, 917), True, 'import numpy as np\n'), ((6722, 6745), 'jsonpickle.decode', 'jsonpickle.decode', (['body'], {}), '(body)\n', (6739, 6745), False, 'import jsonpickle\n'), ((7921, 7946), 'jsonpickle.encode', 'jsonpickle.encode', (['result'], {}), '(result)\n', (7938, 7946), False, 'import jsonpickle\n'), ((6842, 6865), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6863, 6865), False, 'import datetime\n'), ((9910, 9921), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9918, 9921), False, 'import sys\n'), ((9961, 9972), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (9969, 9972), False, 'import os\n')] |
import numpy as np
import math
import random
import h5py
A=0.01
class act:#激活函数
def sigmoid(x):
return 1.0/(1.0+np.exp(-x))
def tanh(x):
return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
def none(x):
return x
def relu(x):
return 1 * (x > 0) * x
def leaky_relu(x):
return np.where(x <=0, x*A, x)
def elu(x):
return np.where(x <=0, A*(np.exp(x)-1), x)
class der:#导数
def sigmoid(x):
return act.sigmoid(x)*(1-act.sigmoid(x))
def tanh(x):
return 1-math.pow(act.tanh(x),2)
def none(x):
return 1
def relu(x):
return 1 * (x > 0) * 1
def leaky_relu(x):
return np.where(x >0, 1, A)
def elu(x):
return np.where(x <=0, A*np.exp(x), 1)
class loss:#损失函数
def ms(tru,fed):
return 0.5*np.sum((fed-tru)**2)
def square(tru,fed):
return np.sum((fed-tru)**2)
class tensor:
def __init__(self,x:np):
self.a=x
def lens(self):
return len(self.a)
def set(self,x,y):
self.a[x]=y
def get(self,x):
return self.a[x]
def gets(self):
return self.a
class bp:
def __init__(self,sizes,ders,acts,insizes=1,los=loss.ms):#定义
self.size=sizes#神经网络大小
self.derx=ders#导数
self.actx=acts#激活函数
self.losx=los#损失函数
self.w=np.random.rand(len(sizes),max(sizes),len(sizes),max(sizes),insizes)#权重
self.b=np.random.rand(len(sizes),max(sizes),len(sizes),max(sizes),insizes)#偏置
self.insize=insizes#权重偏置的大小
def getw(self,x,y,xx,yy):#获取权重
return self.w[x,y,xx,yy]
def getb(self,x,y,xx,yy):#获取偏置
return self.b[x,y,xx,yy]
def getws(self):#获取权重数组
return self.w
def getbs(self):#获取偏置数组
return self.b
def los(self,input,out):#损失
a=[]#中间处理用的变量
for i in out:
a.append(i.gets())
a=np.array(a)
return self.losx(a,self.feedforward(input))
def feedforward(self,input):#前向传播
ass=input#input的拷贝
a=[[]]#激活函数的输出
ab=[[]]#导数的输出
a.append([])
ab.append([])
for i in ass:
a[0].append(i.gets())
ab[0].append(i.gets())
for i in range(len(self.size)-1):
a.append([])
ab.append([])
for j in range(self.size[i+1]):
jj=np.array(0)#激活函数
js=np.array(0)#导数
for js in range(self.size[i]):
jj=jj+self.actx[i]((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
js=js+self.derx[i]((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
a[i+1].append(jj)
ab[i+1].append(js)
self.feed=np.array(a)
self.feeds=np.array(ab)
return np.array(a[len(a)-2])
def feedforwards(self,input):#前向传播多输出版本
ass=input
a=[[]]
ab=[[]]
a.append([])
ab.append([])
for i in ass:
a[0].append(i.gets())
ab[0].append(i.gets())
for i in range(len(self.size)-1):
a.append([])
ab.append([])
for j in range(self.size[i+1]):
jj=np.array(0)
js=np.array(0)
for js in range(self.size[i]):
jj=jj+self.actx((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
js=js+self.derx((a[i][js]*self.w[i][js][i+1][j])+self.b[i][js][i+1][j])
a[i+1].append(jj)
ab[i+1].append(js)
self.feed=np.array(a)
self.feeds=np.array(ab)
ret=[]
for i in a[len(a)-2]:
ret.append(tensor(np.array(i)))
return ret
def op(self,input,out,eta):#优化
for i in range(len(input)):
self.feedforward(input[i])
self.updatew(input,self.back(out[i]),eta)
def back(self,out):#反向传播
b=np.zeros((len(self.size),max(self.size),self.insize))
out=np.array(out)
ij=len(self.size)-1
ib=0
for i in out:
a1=np.array(self.feed[len(self.feed)-2])
a2=np.array(i.gets())
a3=np.array(self.feeds[len(self.feed)-2])
a5=-(a1-a2)*a3
for ix in range(len(a5)):
b[0][ix]=a5[ix][0]
ib=ib+1
for i in range(self.size[len(self.size)-1]):
for j in range(self.size[len(self.size)-2]):
b[1][i+j-1]=b[0][i]*self.w[len(self.w)-1][j][len(self.w)-2][i]+self.b[len(self.w)-1][j][len(self.w)-2][i]
for i in range(len(self.size)-1):
for j in range(self.size[ij-i]*self.size[ij-i-1]):
b1=b[i+1][int(j/self.size[ij-i-1])]
b3=self.feeds[ij-i][int(j/self.size[ij-i-1])]
b[i+1][int(j/self.size[ij-i-1])]=np.array(b1*b3)
for js in range(self.size[ij-i-1]):
b[i+1][js]=(b[i+1][int(j/self.size[ij-i-1])]*self.w[ij-i][js][ij-i-1][int(j/self.size[ij-i-1])-1]+self.b[ij-i][js][ij-i-1][int(j/self.size[ij-i-1])-1])
return b
def updatew(self,input,bs,eta):#更改权重偏置
for i in range(len(self.size)-1):
for j in range(self.size[i]):
for js in range(self.size[i+1]):
self.w[i][j][i+1][js]=np.array(self.w[i][j][i+1][js])+np.array(self.feed[i][j]*bs[i+1][js]*eta)
self.b[i][j][i+1][js]=np.array(self.b[i][j][i+1][js])-np.array(eta*bs[i+1][js])
# | [
"numpy.where",
"numpy.sum",
"numpy.array",
"numpy.exp"
] | [((334, 360), 'numpy.where', 'np.where', (['(x <= 0)', '(x * A)', 'x'], {}), '(x <= 0, x * A, x)\n', (342, 360), True, 'import numpy as np\n'), ((686, 707), 'numpy.where', 'np.where', (['(x > 0)', '(1)', 'A'], {}), '(x > 0, 1, A)\n', (694, 707), True, 'import numpy as np\n'), ((888, 912), 'numpy.sum', 'np.sum', (['((fed - tru) ** 2)'], {}), '((fed - tru) ** 2)\n', (894, 912), True, 'import numpy as np\n'), ((1901, 1912), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1909, 1912), True, 'import numpy as np\n'), ((2780, 2791), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2788, 2791), True, 'import numpy as np\n'), ((2812, 2824), 'numpy.array', 'np.array', (['ab'], {}), '(ab)\n', (2820, 2824), True, 'import numpy as np\n'), ((3637, 3648), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (3645, 3648), True, 'import numpy as np\n'), ((3669, 3681), 'numpy.array', 'np.array', (['ab'], {}), '(ab)\n', (3677, 3681), True, 'import numpy as np\n'), ((4063, 4076), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (4071, 4076), True, 'import numpy as np\n'), ((827, 851), 'numpy.sum', 'np.sum', (['((fed - tru) ** 2)'], {}), '((fed - tru) ** 2)\n', (833, 851), True, 'import numpy as np\n'), ((124, 134), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (130, 134), True, 'import numpy as np\n'), ((169, 178), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (175, 178), True, 'import numpy as np\n'), ((179, 189), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (185, 189), True, 'import numpy as np\n'), ((192, 201), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (198, 201), True, 'import numpy as np\n'), ((202, 212), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (208, 212), True, 'import numpy as np\n'), ((756, 765), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (762, 765), True, 'import numpy as np\n'), ((2398, 2409), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (2406, 2409), True, 'import numpy as np\n'), ((2435, 2446), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (2443, 2446), True, 'import numpy as np\n'), ((3269, 3280), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3277, 3280), True, 'import numpy as np\n'), ((3301, 3312), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3309, 3312), True, 'import numpy as np\n'), ((4901, 4918), 'numpy.array', 'np.array', (['(b1 * b3)'], {}), '(b1 * b3)\n', (4909, 4918), True, 'import numpy as np\n'), ((408, 417), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (414, 417), True, 'import numpy as np\n'), ((3760, 3771), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (3768, 3771), True, 'import numpy as np\n'), ((5376, 5409), 'numpy.array', 'np.array', (['self.w[i][j][i + 1][js]'], {}), '(self.w[i][j][i + 1][js])\n', (5384, 5409), True, 'import numpy as np\n'), ((5408, 5455), 'numpy.array', 'np.array', (['(self.feed[i][j] * bs[i + 1][js] * eta)'], {}), '(self.feed[i][j] * bs[i + 1][js] * eta)\n', (5416, 5455), True, 'import numpy as np\n'), ((5492, 5525), 'numpy.array', 'np.array', (['self.b[i][j][i + 1][js]'], {}), '(self.b[i][j][i + 1][js])\n', (5500, 5525), True, 'import numpy as np\n'), ((5524, 5553), 'numpy.array', 'np.array', (['(eta * bs[i + 1][js])'], {}), '(eta * bs[i + 1][js])\n', (5532, 5553), True, 'import numpy as np\n')] |
import unittest
from theano import theano, tensor as tt
import numpy as np
import pymc3 as pm
from pymc3.distributions import HalfCauchy, Normal
from pymc3 import Potential, Deterministic
from pymc3.theanof import generator
class NewModel(pm.Model):
def __init__(self, name='', model=None):
super(NewModel, self).__init__(name, model)
assert pm.modelcontext(None) is self
# 1) init variables with Var method
self.Var('v1', pm.Normal.dist())
self.v2 = pm.Normal('v2', mu=0, sd=1)
# 2) Potentials and Deterministic variables with method too
# be sure that names will not overlap with other same models
pm.Deterministic('d', tt.constant(1))
pm.Potential('p', tt.constant(1))
class DocstringModel(pm.Model):
def __init__(self, mean=0, sd=1, name='', model=None):
super(DocstringModel, self).__init__(name, model)
self.Var('v1', Normal.dist(mu=mean, sd=sd))
Normal('v2', mu=mean, sd=sd)
Normal('v3', mu=mean, sd=HalfCauchy('sd', beta=10, testval=1.))
Deterministic('v3_sq', self.v3 ** 2)
Potential('p1', tt.constant(1))
class TestBaseModel(unittest.TestCase):
def test_setattr_properly_works(self):
with pm.Model() as model:
pm.Normal('v1')
self.assertEqual(len(model.vars), 1)
with pm.Model('sub') as submodel:
submodel.Var('v1', pm.Normal.dist())
self.assertTrue(hasattr(submodel, 'v1'))
self.assertEqual(len(submodel.vars), 1)
self.assertEqual(len(model.vars), 2)
with submodel:
submodel.Var('v2', pm.Normal.dist())
self.assertTrue(hasattr(submodel, 'v2'))
self.assertEqual(len(submodel.vars), 2)
self.assertEqual(len(model.vars), 3)
def test_context_passes_vars_to_parent_model(self):
with pm.Model() as model:
# a set of variables is created
NewModel()
# another set of variables are created but with prefix 'another'
usermodel2 = NewModel(name='another')
# you can enter in a context with submodel
with usermodel2:
usermodel2.Var('v3', pm.Normal.dist())
pm.Normal('v4')
# this variable is created in parent model too
self.assertIn('another_v2', model.named_vars)
self.assertIn('another_v3', model.named_vars)
self.assertIn('another_v3', usermodel2.named_vars)
self.assertIn('another_v4', model.named_vars)
self.assertIn('another_v4', usermodel2.named_vars)
self.assertTrue(hasattr(usermodel2, 'v3'))
self.assertTrue(hasattr(usermodel2, 'v2'))
self.assertTrue(hasattr(usermodel2, 'v4'))
# When you create a class based model you should follow some rules
with model:
m = NewModel('one_more')
self.assertTrue(m.d is model['one_more_d'])
self.assertTrue(m['d'] is model['one_more_d'])
self.assertTrue(m['one_more_d'] is model['one_more_d'])
class TestNested(unittest.TestCase):
def test_nest_context_works(self):
with pm.Model() as m:
new = NewModel()
with new:
self.assertTrue(
pm.modelcontext(None) is new
)
self.assertTrue(
pm.modelcontext(None) is m
)
self.assertIn('v1', m.named_vars)
self.assertIn('v2', m.named_vars)
def test_named_context(self):
with pm.Model() as m:
NewModel(name='new')
self.assertIn('new_v1', m.named_vars)
self.assertIn('new_v2', m.named_vars)
def test_docstring_example1(self):
usage1 = DocstringModel()
self.assertIn('v1', usage1.named_vars)
self.assertIn('v2', usage1.named_vars)
self.assertIn('v3', usage1.named_vars)
self.assertIn('v3_sq', usage1.named_vars)
self.assertTrue(len(usage1.potentials), 1)
def test_docstring_example2(self):
with pm.Model() as model:
DocstringModel(name='prefix')
self.assertIn('prefix_v1', model.named_vars)
self.assertIn('prefix_v2', model.named_vars)
self.assertIn('prefix_v3', model.named_vars)
self.assertIn('prefix_v3_sq', model.named_vars)
self.assertTrue(len(model.potentials), 1)
def test_duplicates_detection(self):
with pm.Model():
DocstringModel(name='prefix')
self.assertRaises(ValueError, DocstringModel, name='prefix')
def test_model_root(self):
with pm.Model() as model:
self.assertTrue(model is model.root)
with pm.Model() as sub:
self.assertTrue(model is sub.root)
class TestScaling(unittest.TestCase):
def test_density_scaling(self):
with pm.Model() as model1:
Normal('n', observed=[[1]], total_size=1)
p1 = theano.function([], model1.logpt)
with pm.Model() as model2:
Normal('n', observed=[[1]], total_size=2)
p2 = theano.function([], model2.logpt)
self.assertEqual(p1() * 2, p2())
def test_density_scaling_with_genarator(self):
# We have different size generators
def gen1():
i = 0
while True:
yield np.ones((10, 100)) * i
i += 1
def gen2():
i = 0
while True:
yield np.ones((20, 100)) * i
i += 1
# We have same size models
with pm.Model() as model1:
Normal('n', observed=gen1(), total_size=100)
p1 = theano.function([], model1.logpt)
with pm.Model() as model2:
gen_var = generator(gen2())
Normal('n', observed=gen_var, total_size=100)
p2 = theano.function([], model2.logpt)
# We want densities to be equal
for _ in range(10):
np.testing.assert_almost_equal(p1(), p2())
# Done
| [
"pymc3.Deterministic",
"theano.tensor.constant",
"pymc3.distributions.Normal.dist",
"pymc3.Normal.dist",
"pymc3.distributions.HalfCauchy",
"numpy.ones",
"pymc3.modelcontext",
"pymc3.distributions.Normal",
"theano.theano.function",
"pymc3.Model",
"pymc3.Normal"
] | [((497, 524), 'pymc3.Normal', 'pm.Normal', (['"""v2"""'], {'mu': '(0)', 'sd': '(1)'}), "('v2', mu=0, sd=1)\n", (506, 524), True, 'import pymc3 as pm\n'), ((961, 989), 'pymc3.distributions.Normal', 'Normal', (['"""v2"""'], {'mu': 'mean', 'sd': 'sd'}), "('v2', mu=mean, sd=sd)\n", (967, 989), False, 'from pymc3.distributions import HalfCauchy, Normal\n'), ((1070, 1106), 'pymc3.Deterministic', 'Deterministic', (['"""v3_sq"""', '(self.v3 ** 2)'], {}), "('v3_sq', self.v3 ** 2)\n", (1083, 1106), False, 'from pymc3 import Potential, Deterministic\n'), ((364, 385), 'pymc3.modelcontext', 'pm.modelcontext', (['None'], {}), '(None)\n', (379, 385), True, 'import pymc3 as pm\n'), ((461, 477), 'pymc3.Normal.dist', 'pm.Normal.dist', ([], {}), '()\n', (475, 477), True, 'import pymc3 as pm\n'), ((692, 706), 'theano.tensor.constant', 'tt.constant', (['(1)'], {}), '(1)\n', (703, 706), True, 'from theano import theano, tensor as tt\n'), ((734, 748), 'theano.tensor.constant', 'tt.constant', (['(1)'], {}), '(1)\n', (745, 748), True, 'from theano import theano, tensor as tt\n'), ((924, 951), 'pymc3.distributions.Normal.dist', 'Normal.dist', ([], {'mu': 'mean', 'sd': 'sd'}), '(mu=mean, sd=sd)\n', (935, 951), False, 'from pymc3.distributions import HalfCauchy, Normal\n'), ((1131, 1145), 'theano.tensor.constant', 'tt.constant', (['(1)'], {}), '(1)\n', (1142, 1145), True, 'from theano import theano, tensor as tt\n'), ((1245, 1255), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (1253, 1255), True, 'import pymc3 as pm\n'), ((1278, 1293), 'pymc3.Normal', 'pm.Normal', (['"""v1"""'], {}), "('v1')\n", (1287, 1293), True, 'import pymc3 as pm\n'), ((1916, 1926), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (1924, 1926), True, 'import pymc3 as pm\n'), ((3192, 3202), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (3200, 3202), True, 'import pymc3 as pm\n'), ((3578, 3588), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (3586, 3588), True, 'import pymc3 as pm\n'), ((4089, 4099), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (4097, 4099), True, 'import pymc3 as pm\n'), ((4472, 4482), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (4480, 4482), True, 'import pymc3 as pm\n'), ((4644, 4654), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (4652, 4654), True, 'import pymc3 as pm\n'), ((4890, 4900), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (4898, 4900), True, 'import pymc3 as pm\n'), ((4924, 4965), 'pymc3.distributions.Normal', 'Normal', (['"""n"""'], {'observed': '[[1]]', 'total_size': '(1)'}), "('n', observed=[[1]], total_size=1)\n", (4930, 4965), False, 'from pymc3.distributions import HalfCauchy, Normal\n'), ((4983, 5016), 'theano.theano.function', 'theano.function', (['[]', 'model1.logpt'], {}), '([], model1.logpt)\n', (4998, 5016), False, 'from theano import theano, tensor as tt\n'), ((5031, 5041), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5039, 5041), True, 'import pymc3 as pm\n'), ((5065, 5106), 'pymc3.distributions.Normal', 'Normal', (['"""n"""'], {'observed': '[[1]]', 'total_size': '(2)'}), "('n', observed=[[1]], total_size=2)\n", (5071, 5106), False, 'from pymc3.distributions import HalfCauchy, Normal\n'), ((5124, 5157), 'theano.theano.function', 'theano.function', (['[]', 'model2.logpt'], {}), '([], model2.logpt)\n', (5139, 5157), False, 'from theano import theano, tensor as tt\n'), ((5605, 5615), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5613, 5615), True, 'import pymc3 as pm\n'), ((5701, 5734), 'theano.theano.function', 'theano.function', (['[]', 'model1.logpt'], {}), '([], model1.logpt)\n', (5716, 5734), False, 'from theano import theano, tensor as tt\n'), ((5749, 5759), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (5757, 5759), True, 'import pymc3 as pm\n'), ((5823, 5868), 'pymc3.distributions.Normal', 'Normal', (['"""n"""'], {'observed': 'gen_var', 'total_size': '(100)'}), "('n', observed=gen_var, total_size=100)\n", (5829, 5868), False, 'from pymc3.distributions import HalfCauchy, Normal\n'), ((5886, 5919), 'theano.theano.function', 'theano.function', (['[]', 'model2.logpt'], {}), '([], model2.logpt)\n', (5901, 5919), False, 'from theano import theano, tensor as tt\n'), ((1023, 1061), 'pymc3.distributions.HalfCauchy', 'HalfCauchy', (['"""sd"""'], {'beta': '(10)', 'testval': '(1.0)'}), "('sd', beta=10, testval=1.0)\n", (1033, 1061), False, 'from pymc3.distributions import HalfCauchy, Normal\n'), ((1360, 1375), 'pymc3.Model', 'pm.Model', (['"""sub"""'], {}), "('sub')\n", (1368, 1375), True, 'import pymc3 as pm\n'), ((2286, 2301), 'pymc3.Normal', 'pm.Normal', (['"""v4"""'], {}), "('v4')\n", (2295, 2301), True, 'import pymc3 as pm\n'), ((4731, 4741), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (4739, 4741), True, 'import pymc3 as pm\n'), ((1424, 1440), 'pymc3.Normal.dist', 'pm.Normal.dist', ([], {}), '()\n', (1438, 1440), True, 'import pymc3 as pm\n'), ((1666, 1682), 'pymc3.Normal.dist', 'pm.Normal.dist', ([], {}), '()\n', (1680, 1682), True, 'import pymc3 as pm\n'), ((2252, 2268), 'pymc3.Normal.dist', 'pm.Normal.dist', ([], {}), '()\n', (2266, 2268), True, 'import pymc3 as pm\n'), ((3405, 3426), 'pymc3.modelcontext', 'pm.modelcontext', (['None'], {}), '(None)\n', (3420, 3426), True, 'import pymc3 as pm\n'), ((3313, 3334), 'pymc3.modelcontext', 'pm.modelcontext', (['None'], {}), '(None)\n', (3328, 3334), True, 'import pymc3 as pm\n'), ((5379, 5397), 'numpy.ones', 'np.ones', (['(10, 100)'], {}), '((10, 100))\n', (5386, 5397), True, 'import numpy as np\n'), ((5510, 5528), 'numpy.ones', 'np.ones', (['(20, 100)'], {}), '((20, 100))\n', (5517, 5528), True, 'import numpy as np\n')] |
"""
Test SharedMutationJoiningSolver in Cassiopeia.solver.
"""
import unittest
from functools import partial
from typing import Dict
import itertools
import networkx as nx
import numba
import numpy as np
import pandas as pd
import scipy
from cassiopeia.data.CassiopeiaTree import CassiopeiaTree
from cassiopeia.data import utilities as data_utilities
from cassiopeia.solver.SharedMutationJoiningSolver import (
SharedMutationJoiningSolver,
SharedMutationJoiningSolverWarning,
)
from cassiopeia.solver import dissimilarity_functions
from cassiopeia.solver import solver_utilities
def find_triplet_structure(triplet, T):
a, b, c = triplet[0], triplet[1], triplet[2]
a_ancestors = [node for node in nx.ancestors(T, a)]
b_ancestors = [node for node in nx.ancestors(T, b)]
c_ancestors = [node for node in nx.ancestors(T, c)]
ab_common = len(set(a_ancestors) & set(b_ancestors))
ac_common = len(set(a_ancestors) & set(c_ancestors))
bc_common = len(set(b_ancestors) & set(c_ancestors))
structure = "-"
if ab_common > bc_common and ab_common > ac_common:
structure = "ab"
elif ac_common > bc_common and ac_common > ab_common:
structure = "ac"
elif bc_common > ab_common and bc_common > ac_common:
structure = "bc"
return structure
class TestSharedMutationJoiningSolver(unittest.TestCase):
def setUp(self):
# --------------------- General NJ ---------------------
cm = pd.DataFrame.from_dict(
{
"a": [0, 1, 2],
"b": [1, 1, 2],
"c": [2, 2, 2],
"d": [1, 1, 1],
"e": [0, 0, 0],
},
orient="index",
columns=["x1", "x2", "x3"],
)
delta = pd.DataFrame.from_dict(
{
"a": [0, 2, 1, 1, 0],
"b": [2, 0, 1, 2, 0],
"c": [1, 1, 0, 0, 0],
"d": [1, 2, 0, 0, 0],
"e": [0, 0, 0, 0, 0],
},
orient="index",
columns=["a", "b", "c", "d", "e"],
)
self.basic_similarity_map = delta
self.basic_tree = CassiopeiaTree(
character_matrix=cm, dissimilarity_map=delta
)
self.smj_solver = SharedMutationJoiningSolver(
similarity_function=dissimilarity_functions.hamming_similarity_without_missing
)
self.smj_solver_no_numba = SharedMutationJoiningSolver(
similarity_function=partial(
dissimilarity_functions.cluster_dissimilarity,
dissimilarity_functions.hamming_similarity_without_missing,
)
)
# ---------------- Lineage Tracing NJ ----------------
pp_cm = pd.DataFrame.from_dict(
{
"a": [1, 2, 2],
"b": [1, 2, 1],
"c": [1, 2, 0],
"d": [2, 0, 0],
"e": [2, 0, 2],
},
orient="index",
columns=["x1", "x2", "x3"],
)
self.pp_tree = CassiopeiaTree(character_matrix=pp_cm)
self.smj_solver_pp = SharedMutationJoiningSolver(
similarity_function=dissimilarity_functions.hamming_similarity_without_missing
)
# ------------- CM with Duplicates and Missing Data -----------------------
duplicates_cm = pd.DataFrame.from_dict(
{
"a": [1, -1, 0],
"b": [2, -1, 2],
"c": [2, 0, 2],
"d": [2, 0, -1],
"e": [2, 0, 2],
"f": [2, -1, 2],
},
orient="index",
columns=["x1", "x2", "x3"],
)
self.duplicate_tree = CassiopeiaTree(character_matrix=duplicates_cm)
# ------------- Hamming similarity with weights ------------
priors = {0: {1: 0.5, 2: 0.5}, 1: {1: 0.2, 2: 0.8}, 2: {1: 0.9, 2: 0.1}}
self.pp_tree_priors = CassiopeiaTree(
character_matrix=pp_cm, priors=priors
)
self.smj_solver_modified_pp = SharedMutationJoiningSolver(
similarity_function=dissimilarity_functions.hamming_similarity_without_missing
)
def test_init(self):
# This should numbaize
solver = SharedMutationJoiningSolver(
similarity_function=dissimilarity_functions.hamming_similarity_without_missing
)
self.assertTrue(
isinstance(
solver.nb_similarity_function, numba.core.registry.CPUDispatcher
)
)
self.assertTrue(
isinstance(
solver._SharedMutationJoiningSolver__update_similarity_map,
numba.core.registry.CPUDispatcher,
)
)
# This shouldn't numbaize
with self.assertWarns(SharedMutationJoiningSolverWarning):
solver = SharedMutationJoiningSolver(
similarity_function=partial(
dissimilarity_functions.cluster_dissimilarity,
dissimilarity_functions.hamming_similarity_without_missing,
)
)
self.assertFalse(
isinstance(
solver.nb_similarity_function,
numba.core.registry.CPUDispatcher,
)
)
self.assertFalse(
isinstance(
solver._SharedMutationJoiningSolver__update_similarity_map,
numba.core.registry.CPUDispatcher,
)
)
def test_find_cherry(self):
cherry = self.smj_solver.find_cherry(self.basic_similarity_map.values)
delta = self.basic_similarity_map
node_i, node_j = (delta.index[cherry[0]], delta.index[cherry[1]])
self.assertIn((node_i, node_j), [("a", "b"), ("b", "a")])
def test_create_similarity_map(self):
character_matrix = self.pp_tree_priors.character_matrix.copy()
weights = solver_utilities.transform_priors(
self.pp_tree_priors.priors, "negative_log"
)
similarity_map = data_utilities.compute_dissimilarity_map(
character_matrix.to_numpy(),
character_matrix.shape[0],
dissimilarity_functions.hamming_similarity_without_missing,
weights,
self.pp_tree_priors.missing_state_indicator,
)
similarity_map = scipy.spatial.distance.squareform(similarity_map)
similarity_map = pd.DataFrame(
similarity_map,
index=character_matrix.index,
columns=character_matrix.index,
)
expected_similarity = -np.log(0.5) - np.log(0.8)
self.assertEqual(similarity_map.loc["a", "b"], expected_similarity)
expected_similarity = -np.log(0.1)
self.assertEqual(similarity_map.loc["a", "e"], expected_similarity)
def test_update_similarity_map_and_character_matrix(self):
nb_similarity = numba.jit(
dissimilarity_functions.hamming_similarity_without_missing,
nopython=True,
)
nb_weights = numba.typed.Dict.empty(
numba.types.int64,
numba.types.DictType(numba.types.int64, numba.types.float64),
)
cm = self.basic_tree.character_matrix.copy()
delta = self.basic_similarity_map
cherry = self.smj_solver.find_cherry(delta.values)
node_i, node_j = (delta.index[cherry[0]], delta.index[cherry[1]])
delta = self.smj_solver.update_similarity_map_and_character_matrix(
cm, nb_similarity, delta, (node_i, node_j), "ab", weights=nb_weights
)
expected_delta = pd.DataFrame.from_dict(
{
"ab": [0, 1, 1, 0],
"c": [1, 0, 0, 0],
"d": [1, 0, 0, 0],
"e": [0, 0, 0, 0],
},
orient="index",
columns=["ab", "c", "d", "e"],
)
for sample in expected_delta.index:
for sample2 in expected_delta.index:
self.assertEqual(
delta.loc[sample, sample2],
expected_delta.loc[sample, sample2],
)
cherry = self.smj_solver.find_cherry(delta.values)
node_i, node_j = (delta.index[cherry[0]], delta.index[cherry[1]])
delta = self.smj_solver.update_similarity_map_and_character_matrix(
cm,
nb_similarity,
delta,
(node_i, node_j),
"abc",
weights=nb_weights,
)
expected_delta = pd.DataFrame.from_dict(
{"abc": [0, 0, 0], "d": [0, 0, 0], "e": [0, 0, 0]},
orient="index",
columns=["abc", "d", "e"],
)
for sample in expected_delta.index:
for sample2 in expected_delta.index:
self.assertEqual(
delta.loc[sample, sample2],
expected_delta.loc[sample, sample2],
)
expected_cm = pd.DataFrame.from_dict(
{"abc": [0, 0, 2], "d": [1, 1, 1], "e": [0, 0, 0]},
orient="index",
columns=["x1", "x2", "x3"],
)
for sample in expected_cm.index:
for col in expected_cm.columns:
self.assertEqual(
cm.loc[sample, col], expected_cm.loc[sample, col]
)
def test_basic_solver(self):
self.smj_solver.solve(self.basic_tree)
# test that the dissimilarity map and character matrix were not altered
cm = pd.DataFrame.from_dict(
{
"a": [0, 1, 2],
"b": [1, 1, 2],
"c": [2, 2, 2],
"d": [1, 1, 1],
"e": [0, 0, 0],
},
orient="index",
columns=["x1", "x2", "x3"],
)
for i in self.basic_similarity_map.index:
for j in self.basic_similarity_map.columns:
self.assertEqual(
self.basic_similarity_map.loc[i, j],
self.basic_tree.get_dissimilarity_map().loc[i, j],
)
for i in self.basic_tree.character_matrix.index:
for j in self.basic_tree.character_matrix.columns:
self.assertEqual(
cm.loc[i, j], self.basic_tree.character_matrix.loc[i, j]
)
# test leaves exist in tree
_leaves = self.basic_tree.leaves
self.assertEqual(len(_leaves), self.basic_similarity_map.shape[0])
for _leaf in _leaves:
self.assertIn(_leaf, self.basic_similarity_map.index.values)
# test for expected number of edges
edges = list(self.basic_tree.edges)
self.assertEqual(len(edges), 8)
# test relationships between samples
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("5", "a"),
("5", "b"),
("6", "5"),
("6", "c"),
("7", "d"),
("7", "e"),
("8", "6"),
("8", "7"),
]
)
observed_tree = self.basic_tree.get_tree_topology()
triplets = itertools.combinations(["a", "b", "c", "d", "e"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, observed_tree)
self.assertEqual(expected_triplet, observed_triplet)
# compare tree distances
observed_tree = observed_tree.to_undirected()
expected_tree = expected_tree.to_undirected()
for i in range(len(_leaves)):
sample1 = _leaves[i]
for j in range(i + 1, len(_leaves)):
sample2 = _leaves[j]
self.assertEqual(
nx.shortest_path_length(observed_tree, sample1, sample2),
nx.shortest_path_length(expected_tree, sample1, sample2),
)
def test_solver_no_numba(self):
self.smj_solver_no_numba.solve(self.basic_tree)
# test that the dissimilarity map and character matrix were not altered
cm = pd.DataFrame.from_dict(
{
"a": [0, 1, 2],
"b": [1, 1, 2],
"c": [2, 2, 2],
"d": [1, 1, 1],
"e": [0, 0, 0],
},
orient="index",
columns=["x1", "x2", "x3"],
)
for i in self.basic_similarity_map.index:
for j in self.basic_similarity_map.columns:
self.assertEqual(
self.basic_similarity_map.loc[i, j],
self.basic_tree.get_dissimilarity_map().loc[i, j],
)
for i in self.basic_tree.character_matrix.index:
for j in self.basic_tree.character_matrix.columns:
self.assertEqual(
cm.loc[i, j], self.basic_tree.character_matrix.loc[i, j]
)
# test leaves exist in tree
_leaves = self.basic_tree.leaves
self.assertEqual(len(_leaves), self.basic_similarity_map.shape[0])
for _leaf in _leaves:
self.assertIn(_leaf, self.basic_similarity_map.index.values)
# test for expected number of edges
edges = list(self.basic_tree.edges)
self.assertEqual(len(edges), 8)
# test relationships between samples
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("5", "a"),
("5", "b"),
("6", "5"),
("6", "c"),
("7", "d"),
("7", "e"),
("8", "6"),
("8", "7"),
]
)
observed_tree = self.basic_tree.get_tree_topology()
triplets = itertools.combinations(["a", "b", "c", "d", "e"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, observed_tree)
self.assertEqual(expected_triplet, observed_triplet)
# compare tree distances
observed_tree = observed_tree.to_undirected()
expected_tree = expected_tree.to_undirected()
for i in range(len(_leaves)):
sample1 = _leaves[i]
for j in range(i + 1, len(_leaves)):
sample2 = _leaves[j]
self.assertEqual(
nx.shortest_path_length(observed_tree, sample1, sample2),
nx.shortest_path_length(expected_tree, sample1, sample2),
)
def test_smj_solver_weights(self):
self.smj_solver_modified_pp.solve(self.pp_tree_priors)
observed_tree = self.pp_tree_priors.get_tree_topology()
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("5", "a"),
("5", "e"),
("6", "b"),
("6", "c"),
("7", "5"),
("7", "d"),
("8", "6"),
("8", "7"),
]
)
triplets = itertools.combinations(["a", "b", "c", "d", "e"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, observed_tree)
self.assertEqual(expected_triplet, observed_triplet)
self.smj_solver_pp.solve(self.pp_tree, collapse_mutationless_edges=True)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("5", "a"),
("5", "e"),
("6", "b"),
("6", "c"),
("8", "5"),
("8", "d"),
("8", "6"),
]
)
def test_pp_solver(self):
self.smj_solver_pp.solve(self.pp_tree)
observed_tree = self.pp_tree.get_tree_topology()
pp_cm = pd.DataFrame.from_dict(
{
"a": [1, 2, 2],
"b": [1, 2, 1],
"c": [1, 2, 0],
"d": [2, 0, 0],
"e": [2, 0, 2],
},
orient="index",
columns=["x1", "x2", "x3"],
)
self.assertIsNone(self.pp_tree.get_dissimilarity_map())
for i in self.pp_tree.character_matrix.index:
for j in self.pp_tree.character_matrix.columns:
self.assertEqual(
pp_cm.loc[i, j], self.pp_tree.character_matrix.loc[i, j]
)
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("5", "a"),
("5", "b"),
("6", "5"),
("6", "c"),
("7", "d"),
("7", "e"),
("8", "6"),
("8", "7"),
]
)
triplets = itertools.combinations(["a", "b", "c", "d", "e"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, observed_tree)
self.assertEqual(expected_triplet, observed_triplet)
self.smj_solver_pp.solve(self.pp_tree, collapse_mutationless_edges=True)
observed_tree = self.pp_tree.get_tree_topology()
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, observed_tree)
self.assertEqual(expected_triplet, observed_triplet)
def test_duplicate(self):
# In this case, we see that the missing data can break up a duplicate
# pair if the behavior is to ignore missing data
self.smj_solver_pp.solve(self.duplicate_tree)
observed_tree = self.duplicate_tree.get_tree_topology()
expected_tree = nx.DiGraph()
expected_tree.add_edges_from(
[
("5", "b"),
("5", "c"),
("6", "e"),
("6", "f"),
("7", "5"),
("7", "6"),
("8", "7"),
("8", "d"),
("9", "8"),
("9", "a"),
]
)
triplets = itertools.combinations(["a", "b", "c", "d", "e", "f"], 3)
for triplet in triplets:
expected_triplet = find_triplet_structure(triplet, expected_tree)
observed_triplet = find_triplet_structure(triplet, observed_tree)
self.assertEqual(expected_triplet, observed_triplet)
if __name__ == "__main__":
unittest.main()
| [
"cassiopeia.solver.solver_utilities.transform_priors",
"cassiopeia.data.CassiopeiaTree.CassiopeiaTree",
"scipy.spatial.distance.squareform",
"numba.types.DictType",
"pandas.DataFrame",
"cassiopeia.solver.SharedMutationJoiningSolver.SharedMutationJoiningSolver",
"networkx.DiGraph",
"numpy.log",
"netw... | [((18834, 18849), 'unittest.main', 'unittest.main', ([], {}), '()\n', (18847, 18849), False, 'import unittest\n'), ((1467, 1619), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'a': [0, 1, 2], 'b': [1, 1, 2], 'c': [2, 2, 2], 'd': [1, 1, 1], 'e': [0, 0, 0]\n }"], {'orient': '"""index"""', 'columns': "['x1', 'x2', 'x3']"}), "({'a': [0, 1, 2], 'b': [1, 1, 2], 'c': [2, 2, 2], 'd':\n [1, 1, 1], 'e': [0, 0, 0]}, orient='index', columns=['x1', 'x2', 'x3'])\n", (1489, 1619), True, 'import pandas as pd\n'), ((1775, 1969), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'a': [0, 2, 1, 1, 0], 'b': [2, 0, 1, 2, 0], 'c': [1, 1, 0, 0, 0], 'd': [1,\n 2, 0, 0, 0], 'e': [0, 0, 0, 0, 0]}"], {'orient': '"""index"""', 'columns': "['a', 'b', 'c', 'd', 'e']"}), "({'a': [0, 2, 1, 1, 0], 'b': [2, 0, 1, 2, 0], 'c': [1,\n 1, 0, 0, 0], 'd': [1, 2, 0, 0, 0], 'e': [0, 0, 0, 0, 0]}, orient=\n 'index', columns=['a', 'b', 'c', 'd', 'e'])\n", (1797, 1969), True, 'import pandas as pd\n'), ((2172, 2232), 'cassiopeia.data.CassiopeiaTree.CassiopeiaTree', 'CassiopeiaTree', ([], {'character_matrix': 'cm', 'dissimilarity_map': 'delta'}), '(character_matrix=cm, dissimilarity_map=delta)\n', (2186, 2232), False, 'from cassiopeia.data.CassiopeiaTree import CassiopeiaTree\n'), ((2282, 2394), 'cassiopeia.solver.SharedMutationJoiningSolver.SharedMutationJoiningSolver', 'SharedMutationJoiningSolver', ([], {'similarity_function': 'dissimilarity_functions.hamming_similarity_without_missing'}), '(similarity_function=dissimilarity_functions.\n hamming_similarity_without_missing)\n', (2309, 2394), False, 'from cassiopeia.solver.SharedMutationJoiningSolver import SharedMutationJoiningSolver, SharedMutationJoiningSolverWarning\n'), ((2761, 2913), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'a': [1, 2, 2], 'b': [1, 2, 1], 'c': [1, 2, 0], 'd': [2, 0, 0], 'e': [2, 0, 2]\n }"], {'orient': '"""index"""', 'columns': "['x1', 'x2', 'x3']"}), "({'a': [1, 2, 2], 'b': [1, 2, 1], 'c': [1, 2, 0], 'd':\n [2, 0, 0], 'e': [2, 0, 2]}, orient='index', columns=['x1', 'x2', 'x3'])\n", (2783, 2913), True, 'import pandas as pd\n'), ((3076, 3114), 'cassiopeia.data.CassiopeiaTree.CassiopeiaTree', 'CassiopeiaTree', ([], {'character_matrix': 'pp_cm'}), '(character_matrix=pp_cm)\n', (3090, 3114), False, 'from cassiopeia.data.CassiopeiaTree import CassiopeiaTree\n'), ((3145, 3257), 'cassiopeia.solver.SharedMutationJoiningSolver.SharedMutationJoiningSolver', 'SharedMutationJoiningSolver', ([], {'similarity_function': 'dissimilarity_functions.hamming_similarity_without_missing'}), '(similarity_function=dissimilarity_functions.\n hamming_similarity_without_missing)\n', (3172, 3257), False, 'from cassiopeia.solver.SharedMutationJoiningSolver import SharedMutationJoiningSolver, SharedMutationJoiningSolverWarning\n'), ((3384, 3560), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'a': [1, -1, 0], 'b': [2, -1, 2], 'c': [2, 0, 2], 'd': [2, 0, -1], 'e': [2,\n 0, 2], 'f': [2, -1, 2]}"], {'orient': '"""index"""', 'columns': "['x1', 'x2', 'x3']"}), "({'a': [1, -1, 0], 'b': [2, -1, 2], 'c': [2, 0, 2],\n 'd': [2, 0, -1], 'e': [2, 0, 2], 'f': [2, -1, 2]}, orient='index',\n columns=['x1', 'x2', 'x3'])\n", (3406, 3560), True, 'import pandas as pd\n'), ((3742, 3788), 'cassiopeia.data.CassiopeiaTree.CassiopeiaTree', 'CassiopeiaTree', ([], {'character_matrix': 'duplicates_cm'}), '(character_matrix=duplicates_cm)\n', (3756, 3788), False, 'from cassiopeia.data.CassiopeiaTree import CassiopeiaTree\n'), ((3970, 4023), 'cassiopeia.data.CassiopeiaTree.CassiopeiaTree', 'CassiopeiaTree', ([], {'character_matrix': 'pp_cm', 'priors': 'priors'}), '(character_matrix=pp_cm, priors=priors)\n', (3984, 4023), False, 'from cassiopeia.data.CassiopeiaTree import CassiopeiaTree\n'), ((4084, 4196), 'cassiopeia.solver.SharedMutationJoiningSolver.SharedMutationJoiningSolver', 'SharedMutationJoiningSolver', ([], {'similarity_function': 'dissimilarity_functions.hamming_similarity_without_missing'}), '(similarity_function=dissimilarity_functions.\n hamming_similarity_without_missing)\n', (4111, 4196), False, 'from cassiopeia.solver.SharedMutationJoiningSolver import SharedMutationJoiningSolver, SharedMutationJoiningSolverWarning\n'), ((4288, 4400), 'cassiopeia.solver.SharedMutationJoiningSolver.SharedMutationJoiningSolver', 'SharedMutationJoiningSolver', ([], {'similarity_function': 'dissimilarity_functions.hamming_similarity_without_missing'}), '(similarity_function=dissimilarity_functions.\n hamming_similarity_without_missing)\n', (4315, 4400), False, 'from cassiopeia.solver.SharedMutationJoiningSolver import SharedMutationJoiningSolver, SharedMutationJoiningSolverWarning\n'), ((5996, 6073), 'cassiopeia.solver.solver_utilities.transform_priors', 'solver_utilities.transform_priors', (['self.pp_tree_priors.priors', '"""negative_log"""'], {}), "(self.pp_tree_priors.priors, 'negative_log')\n", (6029, 6073), False, 'from cassiopeia.solver import solver_utilities\n'), ((6430, 6479), 'scipy.spatial.distance.squareform', 'scipy.spatial.distance.squareform', (['similarity_map'], {}), '(similarity_map)\n', (6463, 6479), False, 'import scipy\n'), ((6506, 6601), 'pandas.DataFrame', 'pd.DataFrame', (['similarity_map'], {'index': 'character_matrix.index', 'columns': 'character_matrix.index'}), '(similarity_map, index=character_matrix.index, columns=\n character_matrix.index)\n', (6518, 6601), True, 'import pandas as pd\n'), ((6985, 7073), 'numba.jit', 'numba.jit', (['dissimilarity_functions.hamming_similarity_without_missing'], {'nopython': '(True)'}), '(dissimilarity_functions.hamming_similarity_without_missing,\n nopython=True)\n', (6994, 7073), False, 'import numba\n'), ((7689, 7842), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'ab': [0, 1, 1, 0], 'c': [1, 0, 0, 0], 'd': [1, 0, 0, 0], 'e': [0, 0, 0, 0]}"], {'orient': '"""index"""', 'columns': "['ab', 'c', 'd', 'e']"}), "({'ab': [0, 1, 1, 0], 'c': [1, 0, 0, 0], 'd': [1, 0, \n 0, 0], 'e': [0, 0, 0, 0]}, orient='index', columns=['ab', 'c', 'd', 'e'])\n", (7711, 7842), True, 'import pandas as pd\n'), ((8605, 8726), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'abc': [0, 0, 0], 'd': [0, 0, 0], 'e': [0, 0, 0]}"], {'orient': '"""index"""', 'columns': "['abc', 'd', 'e']"}), "({'abc': [0, 0, 0], 'd': [0, 0, 0], 'e': [0, 0, 0]},\n orient='index', columns=['abc', 'd', 'e'])\n", (8627, 8726), True, 'import pandas as pd\n'), ((9044, 9166), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'abc': [0, 0, 2], 'd': [1, 1, 1], 'e': [0, 0, 0]}"], {'orient': '"""index"""', 'columns': "['x1', 'x2', 'x3']"}), "({'abc': [0, 0, 2], 'd': [1, 1, 1], 'e': [0, 0, 0]},\n orient='index', columns=['x1', 'x2', 'x3'])\n", (9066, 9166), True, 'import pandas as pd\n'), ((9593, 9745), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'a': [0, 1, 2], 'b': [1, 1, 2], 'c': [2, 2, 2], 'd': [1, 1, 1], 'e': [0, 0, 0]\n }"], {'orient': '"""index"""', 'columns': "['x1', 'x2', 'x3']"}), "({'a': [0, 1, 2], 'b': [1, 1, 2], 'c': [2, 2, 2], 'd':\n [1, 1, 1], 'e': [0, 0, 0]}, orient='index', columns=['x1', 'x2', 'x3'])\n", (9615, 9745), True, 'import pandas as pd\n'), ((10875, 10887), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10885, 10887), True, 'import networkx as nx\n'), ((11268, 11320), 'itertools.combinations', 'itertools.combinations', (["['a', 'b', 'c', 'd', 'e']", '(3)'], {}), "(['a', 'b', 'c', 'd', 'e'], 3)\n", (11290, 11320), False, 'import itertools\n'), ((12270, 12422), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'a': [0, 1, 2], 'b': [1, 1, 2], 'c': [2, 2, 2], 'd': [1, 1, 1], 'e': [0, 0, 0]\n }"], {'orient': '"""index"""', 'columns': "['x1', 'x2', 'x3']"}), "({'a': [0, 1, 2], 'b': [1, 1, 2], 'c': [2, 2, 2], 'd':\n [1, 1, 1], 'e': [0, 0, 0]}, orient='index', columns=['x1', 'x2', 'x3'])\n", (12292, 12422), True, 'import pandas as pd\n'), ((13552, 13564), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (13562, 13564), True, 'import networkx as nx\n'), ((13945, 13997), 'itertools.combinations', 'itertools.combinations', (["['a', 'b', 'c', 'd', 'e']", '(3)'], {}), "(['a', 'b', 'c', 'd', 'e'], 3)\n", (13967, 13997), False, 'import itertools\n'), ((14952, 14964), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (14962, 14964), True, 'import networkx as nx\n'), ((15285, 15337), 'itertools.combinations', 'itertools.combinations', (["['a', 'b', 'c', 'd', 'e']", '(3)'], {}), "(['a', 'b', 'c', 'd', 'e'], 3)\n", (15307, 15337), False, 'import itertools\n'), ((15698, 15710), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15708, 15710), True, 'import networkx as nx\n'), ((16135, 16287), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'a': [1, 2, 2], 'b': [1, 2, 1], 'c': [1, 2, 0], 'd': [2, 0, 0], 'e': [2, 0, 2]\n }"], {'orient': '"""index"""', 'columns': "['x1', 'x2', 'x3']"}), "({'a': [1, 2, 2], 'b': [1, 2, 1], 'c': [1, 2, 0], 'd':\n [2, 0, 0], 'e': [2, 0, 2]}, orient='index', columns=['x1', 'x2', 'x3'])\n", (16157, 16287), True, 'import pandas as pd\n'), ((16758, 16770), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (16768, 16770), True, 'import networkx as nx\n'), ((17091, 17143), 'itertools.combinations', 'itertools.combinations', (["['a', 'b', 'c', 'd', 'e']", '(3)'], {}), "(['a', 'b', 'c', 'd', 'e'], 3)\n", (17113, 17143), False, 'import itertools\n'), ((18101, 18113), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (18111, 18113), True, 'import networkx as nx\n'), ((18489, 18546), 'itertools.combinations', 'itertools.combinations', (["['a', 'b', 'c', 'd', 'e', 'f']", '(3)'], {}), "(['a', 'b', 'c', 'd', 'e', 'f'], 3)\n", (18511, 18546), False, 'import itertools\n'), ((716, 734), 'networkx.ancestors', 'nx.ancestors', (['T', 'a'], {}), '(T, a)\n', (728, 734), True, 'import networkx as nx\n'), ((772, 790), 'networkx.ancestors', 'nx.ancestors', (['T', 'b'], {}), '(T, b)\n', (784, 790), True, 'import networkx as nx\n'), ((828, 846), 'networkx.ancestors', 'nx.ancestors', (['T', 'c'], {}), '(T, c)\n', (840, 846), True, 'import networkx as nx\n'), ((6690, 6701), 'numpy.log', 'np.log', (['(0.8)'], {}), '(0.8)\n', (6696, 6701), True, 'import numpy as np\n'), ((6809, 6820), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (6815, 6820), True, 'import numpy as np\n'), ((7193, 7253), 'numba.types.DictType', 'numba.types.DictType', (['numba.types.int64', 'numba.types.float64'], {}), '(numba.types.int64, numba.types.float64)\n', (7213, 7253), False, 'import numba\n'), ((2508, 2626), 'functools.partial', 'partial', (['dissimilarity_functions.cluster_dissimilarity', 'dissimilarity_functions.hamming_similarity_without_missing'], {}), '(dissimilarity_functions.cluster_dissimilarity,\n dissimilarity_functions.hamming_similarity_without_missing)\n', (2515, 2626), False, 'from functools import partial\n'), ((6676, 6687), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (6682, 6687), True, 'import numpy as np\n'), ((4960, 5078), 'functools.partial', 'partial', (['dissimilarity_functions.cluster_dissimilarity', 'dissimilarity_functions.hamming_similarity_without_missing'], {}), '(dissimilarity_functions.cluster_dissimilarity,\n dissimilarity_functions.hamming_similarity_without_missing)\n', (4967, 5078), False, 'from functools import partial\n'), ((11929, 11985), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['observed_tree', 'sample1', 'sample2'], {}), '(observed_tree, sample1, sample2)\n', (11952, 11985), True, 'import networkx as nx\n'), ((12007, 12063), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['expected_tree', 'sample1', 'sample2'], {}), '(expected_tree, sample1, sample2)\n', (12030, 12063), True, 'import networkx as nx\n'), ((14606, 14662), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['observed_tree', 'sample1', 'sample2'], {}), '(observed_tree, sample1, sample2)\n', (14629, 14662), True, 'import networkx as nx\n'), ((14684, 14740), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['expected_tree', 'sample1', 'sample2'], {}), '(expected_tree, sample1, sample2)\n', (14707, 14740), True, 'import networkx as nx\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.