text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
require("SymbolicLP")
using SymbolicLP
using GLPK
# Soft constraints
lpb = LPBlock([:x])
addconstraints(lpb, :(soft(x < -3, 1)))
z, x, flag = lpsolve(lpb)
@assert x[1] <= -3.0
lpb = LPBlock([:x])
addconstraints(lpb, :(soft(x > 3, 1)))
z, x, flag = lpsolve(lpb)
@assert x[1] >= 3.0
lpb = LPBlock([:x])
addconstraints(lpb, :(soft(x == -5, 1)))
z, x, flag = lpsolve(lpb)
@assert abs(x[1] + 5) < sqrt(eps())
# A simple case
lpb = LPBlock([:left, :middle, :right], Expr[:(left < middle < right)])
addconstraints(lpb,
:(soft(middle-left==2*(right-middle), 5)),
:(left == 0),
:(right == 100))
lpp, chunks = lpparse(Float64, lpb)
lpd = lpeval(lpp)
z, x, flag = lpsolve(lpd)
x = x[chunks[1]]
@show x
@assert all(abs(x-[0,200/3,100]) .< sqrt(eps()))
# A more complex case: two blocks, with cross-references and a function call
lpb1 = LPBlock([:pigs, :cows, :chickens])
lpb2 = LPBlock([:stalls, :nests])
barndims = [5,10] # in units of stall dimensions
nshelves = 4
nests_per_shelf = 10
addconstraints(lpb1, :(pigs >= 0), :(cows >= 0), :(chickens >= 0))
addconstraints(lpb1, :(pigs+cows<$lpb2[stalls]), :(chickens<$lpb2[nests]))
addconstraints(lpb2, :(nests <= $nshelves*$nests_per_shelf),
:(stalls < prod($barndims)),
:(stalls >= 0),
:(nests >= 0))
addconstraints(lpb1, :(soft(pigs == 2*cows, 7)))
addobjective(lpb1, :(-80*pigs - 100*cows - 10*chickens))
lpp, chunks = lpparse(Float64, lpb1, lpb2)
lpd = lpeval(lpp)
z, x, flag = lpsolve(lpd, "Int", 1:5) #, "msg_lev", GLPK.MSG_ALL)
x1 = x[chunks[1]]
x2 = x[chunks[2]]
@show x1
@show x2
barndims[2] = 6
lpd = lpeval(lpp)
z, x, flag = lpsolve(lpd, "Int", 1:5)
x1 = x[chunks[1]]
x2 = x[chunks[2]]
@show x1
@show x2
|
{"hexsha": "3c319f4a2f1c3a21d775327b0890b061b1eeb0c3", "size": 1746, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/symlp.jl", "max_stars_repo_name": "JuliaTagBot/SymbolicLP.jl", "max_stars_repo_head_hexsha": "d72a35dfd3d1015c96c3d6a13f80077901843a7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-03-14T11:49:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-12T22:43:31.000Z", "max_issues_repo_path": "test/symlp.jl", "max_issues_repo_name": "JuliaTagBot/SymbolicLP.jl", "max_issues_repo_head_hexsha": "d72a35dfd3d1015c96c3d6a13f80077901843a7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/symlp.jl", "max_forks_repo_name": "JuliaTagBot/SymbolicLP.jl", "max_forks_repo_head_hexsha": "d72a35dfd3d1015c96c3d6a13f80077901843a7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-03-14T11:48:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-27T20:19:02.000Z", "avg_line_length": 29.1, "max_line_length": 76, "alphanum_fraction": 0.6030927835, "num_tokens": 682}
|
import numpy as np
from Scripts.PlayersProperties import defaultProperties
from Scripts.regression import Regression
import os
import sys
import time
from copy import deepcopy
class network:
def __init__(self):
self.count = 0
self.updateScore = False
self.TeamAScore = 0
self.TeamBScore = 0
self.game_status = ''
self.playersListA = ['PlayerAKeeper', 'PlayerA1', 'PlayerA2', 'PlayerA3', 'PlayerA4', 'PlayerA5',
'PlayerA6', 'PlayerA7', 'PlayerA8', 'PlayerA9', 'PlayerA10']
self.playersListB = ['PlayerBKeeper', 'PlayerB1', 'PlayerB2', 'PlayerB3', 'PlayerB4', 'PlayerB5',
'PlayerB6', 'PlayerB7', 'PlayerB8', 'PlayerB9', 'PlayerB10']
DefaultCoordinates = {
'ball': (600, 350), 'PlayerAKeeper': (40, 330), 'PlayerA1': (140, 100),
'PlayerA2': (140, 250), 'PlayerA3': (140, 400), 'PlayerA4': (140, 550),
'PlayerA5': (340, 150), 'PlayerA6': (340, 300), 'PlayerA7': (340, 450),
'PlayerA8': (500, 350), 'PlayerA9': (550, 300), 'PlayerA10': (550, 400),
'PlayerB1': (1030, 100), 'PlayerB2': (1030, 250), 'PlayerB3': (1030, 400),
'PlayerB4': (1030, 550), 'PlayerB5': (830, 150), 'PlayerB6': (830, 300),
'PlayerB7': (830, 450), 'PlayerB8': (750, 350), 'PlayerB9': (650, 300),
'PlayerB10': (650, 400), 'PlayerBKeeper': (1130, 330)
}
self.superDict = {}
# initialize ball
self.superDict['ball'] = defaultProperties()
self.superDict['ball'].setCoordinates(*DefaultCoordinates['ball'])
# initialize playersA
for k in self.playersListA:
self.superDict[str(k)] = defaultProperties()
self.superDict[str(k)].setCoordinates(*DefaultCoordinates[str(k)])
# initialize playersB
for k in self.playersListB:
self.superDict[str(k)] = deepcopy(defaultProperties())
self.superDict[str(k)].setCoordinates(*DefaultCoordinates[str(k)])
x, y = 0, 0
return
def sortdata(self, dictData):
vals = list(dictData.values())
keys = list(dictData.keys())
vals.sort()
sortedKeys = []
for i in vals:
for j in keys:
if dictData[j] == i:
sortedKeys.append(j)
return sortedKeys
def StraightLineDistance(self, x1, x2, y1, y2):
# returns the straight line distance between coordinate (x1, y1) and coordinate (x2, y2)
x = pow(np.abs(x1 - x2), 2)
y = pow(np.abs(y1 - y2), 2)
return np.sqrt(x+y)
def ComputeGoal(self):
# get KeeperA's location
x1, y1 = self.GetplayersLocation(defaultPlayer='PlayerAKeeper')
x2, y2 = self.GetplayersLocation(defaultPlayer='PlayerBKeeper')
ballx, bally = self.superDict['ball'].GetlastLocation()
GoalStatus = 'Nil'
if ballx >= 20 and ballx <= 40:
if (bally > y1+20 or bally < y1-20) and (bally > 250 and bally < 450):
self.game_status = 'Goal!!!!'
# increment team score
self.TeamBScore += 1
out = True
GoalStatus = 'Nil'
else:
GoalStatus = 'PlayerAKeeper'
out = False
elif ballx >= 1130 and ballx <= 1180:
if (bally > y2+20 or bally < y2-20) and (bally > 250 and bally < 450):
self.game_status = 'Goal!!!!'
# increment team score
self.TeamAScore += 1
out = True
else:
GoalStatus = 'PlayerBKeeper'
out = False
else:
GoalStatus = 'Nil'
out = False
return out, GoalStatus
def GetPostBoundary(self, keeper='PlayerBKeeper'):
if keeper == 'PlayerBKeeper':
ytarget = np.random.choice([i for i in range(250, 450)])
xtarget = 1160
else:
ytarget = np.random.choice([i for i in range(250, 450)])
xtarget = 20
return xtarget, ytarget
def AttemptScore(self, player, keeper, XDict, YDict):
xtarget, ytarget = self.GetPostBoundary(keeper=keeper)
xsource, ysource = self.GetplayersLocation(defaultPlayer=player)
xmoves, ymoves = self.linearRegression(
xsource, ysource, xtarget, ytarget)
# set balls trajectory
self.setplayersLocation(coordinates=(
xmoves, ymoves), defaultPlayer='ball')
# decide if the keeper should get the ball or loose it
keeperStatus = np.random.choice([True, False], p=[0.7, 0.3])
if keeperStatus == True:
# set Keeper's movement to follow the ball
if keeper in self.playersListA:
self.superDict['PlayerAKeeper'].listY = [ymoves[-1]-10, ymoves[-1]-5,
ymoves[-1]+10, ymoves[-1]+5, ymoves[-1]]*10
self.superDict['PlayerAKeeper'].listX = [xmoves[-1]]*50
self.game_status = "Awesome!!! Goalkeeper caught the ball."
else:
self.superDict['PlayerBKeeper'].listY = [ymoves[-1]-10, ymoves[-1]-5,
ymoves[-1]+10, ymoves[-1]+5, ymoves[-1]]*10
self.superDict['PlayerBKeeper'].listX = [xmoves[-1]]*50
self.game_status = "Awesome!!! Goalkeeper caught the ball."
else:
currentPositionX, currentPositionY = self.GetplayersLocation(
defaultPlayer=keeper)
xbound, ybound = self.GetplayersBoundary(keeper)
xmoves, ymoves = self.linearRegression(currentPositionX, currentPositionY,
xbound, ybound)
self.setplayersLocation((xmoves, ymoves), defaultPlayer=keeper)
if keeper in list(XDict.keys()):
XDict.pop(keeper)
else:
YDict.pop(keeper)
# Re-position other players
for playee in XDict.keys():
currentPositionX, currentPositionY = XDict[playee]
xbound, ybound = self.GetplayersBoundary(playee)
xmoves, ymoves = self.linearRegression(currentPositionX, currentPositionY,
xbound, ybound)
self.setplayersLocation((xmoves, ymoves), defaultPlayer=playee)
for playee in YDict.keys():
currentPositionX, currentPositionY = YDict[playee]
xbound, ybound = self.GetplayersBoundary(playee)
xmoves, ymoves = self.linearRegression(currentPositionX, currentPositionY,
xbound, ybound)
self.setplayersLocation((xmoves, ymoves), defaultPlayer=playee)
return
def Attack(self, OpposingPlayers):
"""
Return the next player in the opposing team close enough to get the ball from the
current team.
"""
playee = 'A'
Status = False
# Get the current ball location
count = self.count
currentX, currentY = self.superDict['ball'].listX[count], self.superDict['ball'].listY[count]
EstimatedDistance = {}
for player in OpposingPlayers.keys():
# find Euclidean Distance from ball
playeeCoordinate = OpposingPlayers[player]
EstimatedDistance[player] = self.StraightLineDistance(playeeCoordinate[0],
currentX, playeeCoordinate[1], currentY)
sortedKey = self.sortdata(EstimatedDistance)[0]
if EstimatedDistance[sortedKey] <= 50:
Status = True
playee = sortedKey
else:
Status = False
playee = 'A'
return playee, Status
def TagPlayers(self, XDict, YDict):
"""
YDict in this case represents the source players
XDict in this case represents the target players
That is, YDict are tagging each XDict Player (excluding goal keepers)
"""
SourcePlayers = list(YDict.keys())
TargetPlayers = list(XDict.keys())
# remove goal keepers
# goalkeepers are always first on the list
TargetPlayers.remove(TargetPlayers[0])
# goalkeepers are always first on the list
SourcePlayers.remove(SourcePlayers[0])
# Tag players to players based on nearest neighbors
DictTags = {}
for k in SourcePlayers:
EstimatedDistance = {}
for j in TargetPlayers:
# get source location
xsource, ysource = self.GetplayersLocation(k)
# get target location
xtarget, ytarget = self.GetplayersLocation(j)
EstimatedDistance[j] = self.StraightLineDistance(
xsource, xtarget, ysource, ytarget)
FirstKey = self.sortdata(EstimatedDistance)[0]
DictTags[k] = FirstKey
# remove the chosen key from this data.
TargetPlayers.remove(FirstKey)
# reset movement of source players to target players
for keys in DictTags.keys():
value = DictTags[keys]
# get source location
xsource, ysource = self.GetplayersLocation(keys)
xtarget, ytarget = self.GetplayersLocation(value)
xmoves, ymoves = self.linearRegression(
xsource, ysource, xtarget, ytarget)
# reposition the ball in front of the player
self.setplayersLocation(coordinates=(
xmoves, ymoves), defaultPlayer=keys)
return
def LikePairsNearestNeighbor(self, playername, sourcePlayerCoordinate, SourcedictDataX, SourcedictDataY,
milestone='short', conditionedValue=30):
"""
SourcedictDataX: details of all source Players and their respective coordinate.
SourcedictDataY: details of all target Players and their respecitve coordinate.
playername: name of the current player
sourcePlayerCoordinate: current coordinate of the player
milstone: either long pass, short pass or across.
conditionedValue: give the game to player who is free from an opponent at a distance less than 30
"""
EstimatedDistance = {}
for playee in SourcedictDataX.keys():
playeeCoordinate = SourcedictDataX[playee]
EstimatedDistance[playee] = self.StraightLineDistance(
sourcePlayerCoordinate[0], playeeCoordinate[0], sourcePlayerCoordinate[1], playeeCoordinate[1])
sortedKeys = self.sortdata(EstimatedDistance)
sortedKeys.remove(playername)
nextplayer = np.random.choice(sortedKeys)
return nextplayer
def repositionBall(self, xmove, ymove):
i = 0
x = [i+1 for i in xmove]
y = [i+1 for j in ymove]
return x, y
def AttemptGoal(self, playername, sourcePlayerCoordinate, SourcedictDataX, SourcedictDataY):
# Attempt Shot
# get opposing keeper, target Team.
if playername in self.playersListA:
goalKeeper = 'PlayerBKeeper'
else:
goalKeeper = 'PlayerAKeeper'
self.game_status = playername + \
' is Attempting to make Shot, goalkeeper is ' + goalKeeper
self.AttemptScore(playername, goalKeeper,
SourcedictDataX, SourcedictDataY)
return
def updateposition(self, playername, sourcePlayerCoordinate, SourcedictDataX, SourcedictDataY):
# get the next player
outplayer = self.LikePairsNearestNeighbor(
playername, sourcePlayerCoordinate, SourcedictDataX, SourcedictDataY)
# update straight line movement of the ball from source to nextplayer
xbound, ybound = SourcedictDataX[outplayer]
xsource, ysource = self.superDict['ball'].GetlastLocation()
xmoves, ymoves = self.linearRegression(
xsource, ysource, xbound, ybound)
# reposition the ball in front of the player
self.setplayersLocation(coordinates=(
xmoves, ymoves), defaultPlayer='ball')
self.setplayersLocation(coordinates=(
[xbound]*50, [ybound]*50), defaultPlayer=outplayer)
# remove playername, and update new player
SourcedDictDataX_copy = SourcedictDataX.copy()
SourcedDictDataX_copy.pop(outplayer)
# update the positions of all players including playername
"""
1. get their current position from the sourcedictDataX and SourcedDictDataY
2. estimate a straight line move to their new position (within the specified boundary)
"""
for playee in SourcedDictDataX_copy.keys():
currentPositionX, currentPositionY = SourcedDictDataX_copy[playee]
xbound, ybound = self.GetplayersBoundary(playee)
xmoves, ymoves = self.linearRegression(currentPositionX, currentPositionY,
xbound, ybound)
self.setplayersLocation((xmoves, ymoves), defaultPlayer=playee)
# reset oppossing keepers position
currentPositionX, currentPositionY = SourcedictDataY[list(
SourcedictDataY.keys())[0]]
xbound, ybound = self.GetplayersBoundary(
list(SourcedictDataY.keys())[0])
xmoves, ymoves = self.linearRegression(currentPositionX, currentPositionY,
xbound, ybound)
self.setplayersLocation(
(xmoves, ymoves), defaultPlayer=list(SourcedictDataY.keys())[0])
# for the opposing players, reset their positions to attack.
self.TagPlayers(SourcedictDataX, SourcedictDataY)
return outplayer, playername
def linearRegression(self, x1, y1, x2, y2):
reg = Regression()
xmove, ymove = reg.network(x1, y1, x2, y2)
return xmove, ymove
def setplayersLocation(self, coordinates, defaultPlayer='ball'):
"""
x and y are lists of 50 different positions for the specific 'player'
updates a new location for the player passed in as defaultPlayer
"""
x = coordinates[0]
y = coordinates[1]
if defaultPlayer in self.superDict.keys():
self.superDict[defaultPlayer].listX = x
self.superDict[defaultPlayer].listY = y
else:
pass
return
def GetplayersLocation(self, defaultPlayer='ball'):
"""
updates a new location for the player passed in as defaultPlayer
"""
x = self.superDict[defaultPlayer].positionx
y = self.superDict[defaultPlayer].positiony
return x, y
def GetplayersBoundary(self, defaultPlayer='ball'):
"""
Get the boundary's of keepers, ball, defenders, strikers and mid-fielders
"""
# Determine factor, range of factor is 0 - 1
factor = 1
factor2 = 1
stat = False
# get ball's coordinate
xball, yball = self.GetplayersLocation('ball')
if yball < 330:
stat = False
# within player's A post
factor = 0.2
factor2 = 1
elif yball > 330 and yball < 660:
stat = True
factor = 1
factor2 = 1
else:
stat = False
factor = 0.7
factor2 = 1/factor
if defaultPlayer in ['PlayerA1', 'PlayerA2', 'PlayerB9']:
x1, x2, y1, y2 = 150*factor, 600*factor*factor2, 330, 660
elif defaultPlayer in ['PlayerA3', 'PlayerA4', 'PlayerB10']:
x1, x2, y1, y2 = 150*factor, 600*factor*factor2, 20, 330
elif defaultPlayer == 'PlayerB8':
x1, x2, y1, y2 = 150*factor, 600*factor*factor2, 275, 345
elif defaultPlayer in ['PlayerA5', 'PlayerB5']:
x1, x2, y1, y2 = 400*factor, 900*factor*factor2, 330, 660
elif defaultPlayer in ['PlayerA6', 'PlayerB6']:
x1, x2, y1, y2 = 400*factor, 900*factor*factor2, 275, 345
elif defaultPlayer in ['PlayerA7', 'PlayerB7']:
x1, x2, y1, y2 = 400*factor, 900*factor*factor2, 20, 330
elif defaultPlayer == 'PlayerA8':
x1, x2, y1, y2 = 600*factor, 1000*factor*factor2, 275, 345
elif defaultPlayer == 'PlayerA9':
x1, x2, y1, y2 = 600*factor, 1000*factor*factor2, 330, 660
elif defaultPlayer == 'PlayerA10':
x1, x2, y1, y2 = 600*factor, 1000*factor*factor2, 20, 330
elif defaultPlayer in ['PlayerB1', 'PlayerB2']:
x1, x2, y1, y2 = 600*factor, 1000*factor*factor2, 330, 660
elif defaultPlayer in ['PlayerB3', 'PlayerB4']:
x1, x2, y1, y2 = 600*factor, 1000*factor*factor2, 20, 330
elif defaultPlayer == 'PlayerAKeeper':
x1, x2, y1, y2 = 20, 220, 200, 500
elif defaultPlayer == 'PlayerBKeeper':
x1, x2, y1, y2 = 1000, 1100, 200, 500
else:
x1, x2, y1, y2 = 20, 1160, 20, 660
# Get player's location to minimize movement around the field
xloc, yloc = self.GetplayersLocation(defaultPlayer)
xchoice = np.random.choice([i for i in range(int(x1), int(x2))])
ychoice = np.random.choice([i for i in range(int(y1), int(y2))])
# ensure xsource isn't too far from Xnew
# if stat == True:
if xloc > xchoice:
if np.abs(xloc-xchoice) > 100:
xchoice = xloc - 100
else:
if np.abs(xloc-xchoice) > 100:
xchoice = xloc + 100
# else:
# pass
# return a random coordinate between x1 and x2, likewise, y1 and y2.
return xchoice, ychoice
|
{"hexsha": "69eae1c84798cdf190166041709e6566f35ceda9", "size": 17887, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyfootball/Scripts/net.py", "max_stars_repo_name": "elishatofunmi/ReinEnv", "max_stars_repo_head_hexsha": "ad86203d3f4bddc7a8239cefdfa31c1a8e5e9af8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyfootball/Scripts/net.py", "max_issues_repo_name": "elishatofunmi/ReinEnv", "max_issues_repo_head_hexsha": "ad86203d3f4bddc7a8239cefdfa31c1a8e5e9af8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyfootball/Scripts/net.py", "max_forks_repo_name": "elishatofunmi/ReinEnv", "max_forks_repo_head_hexsha": "ad86203d3f4bddc7a8239cefdfa31c1a8e5e9af8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7164502165, "max_line_length": 111, "alphanum_fraction": 0.5838877397, "include": true, "reason": "import numpy", "num_tokens": 4458}
|
import os
import tifffile
import numpy as np
from copy import copy
from merfishdecoder.util import imagereader
from merfishdecoder.core import dataset
class Frame(object):
def __init__(self,
dataSet,
fov: int = None,
zpos: float = None,
readoutName: str = None):
"""
Create a new Frame object for a specific MERFISH dataset.
Args:
dataSetName: MERFISH dataset name.
fov: An integer indicates the field of view.
zpos: A float number indicates the zpos of the frame.
readoutName: A string indicates the specific the frame name.
Also known as frame name.
"""
self._dataSet = dataSet;
self._fiducial = None;
self._img = None;
self._zpos = zpos;
self._fov = fov;
self._readoutName = readoutName;
bit_i = self._dataSet.dataOrganization.data["channelName"][
int(np.where(self._dataSet.dataOrganization.data["readoutName"] == readoutName)[0])];
self._fiducial_file_name = \
self._dataSet.dataOrganization.get_fiducial_filename(
self._dataSet.dataOrganization.get_data_channel_index(bit_i),
fov);
self._fiducial_frame_index = self._dataSet.dataOrganization.get_fiducial_frame_index(
self._dataSet.dataOrganization.get_data_channel_index(bit_i));
self._image_file_name = self._dataSet.dataOrganization.get_image_filename(
self._dataSet.dataOrganization.get_data_channel_index(bit_i),
fov);
self._image_frame_index = self._dataSet.dataOrganization.get_image_frame_index(
self._dataSet.dataOrganization.get_data_channel_index(bit_i),
zpos);
self._image_color = self._dataSet.dataOrganization.get_data_channel_color(
self._dataSet.dataOrganization.get_data_channel_index(bit_i));
def get_image_color(self):
"""
Get frame color
"""
return self._image_color
def load_readout_raw_image(self):
"""
Load readout raw image
"""
movie = imagereader.infer_reader(
self._dataSet.rawDataPortal.open_file(self._image_file_name));
img = movie.load_frame(
self._image_frame_index)
if self._dataSet.transpose:
img = np.transpose(img)
if self._dataSet.flipHorizontal:
img = np.flip(img, axis=1)
if self._dataSet.flipVertical:
img = np.flip(img, axis=0)
self._img = img.copy()
movie.close()
def get_readout_image(self):
"""
Get readout image
"""
return self._img
def load_fiducial_raw_image(self):
"""
Load fiducial raw image
"""
movie = imagereader.infer_reader(
self._dataSet.rawDataPortal.open_file(
self._fiducial_file_name));
img = movie.load_frame(
self._fiducial_frame_index)
if self._dataSet.transpose:
img = np.transpose(img)
if self._dataSet.flipHorizontal:
img = np.flip(img, axis=1)
if self._dataSet.flipVertical:
img = np.flip(img, axis=0)
self._fiducial = img.copy()
movie.close()
def get_fiducial_image(self):
"""
Get fiducial image
"""
return self._fiducial
def del_readout_raw_image(self):
"""
Remove readout raw image
"""
del self._img
self._img = None
def del_fiducial_raw_image(self):
"""
Remove fiducial raw image
"""
del self._fiducial
self._img = None
class Zplane(object):
"""
Purpuse:
A a plane object stores all the frames/images
from the same z position
Args:
fov: an integer indicates the field of view that the
current zplane belongs to.
zpos: z position
readoutNames: A list of string indicate the frames
to be included.
Returns:
Return a Zplane object
"""
def __init__(self,
dataSetName: str = None,
fov: int = None,
zpos: float = None):
"""
Create a new Zplane object from a MERFISH dataset.
Args:
dataSetName: MERFISH dataset name.
fov: An integer indicates the field of view.
zpos: A float number indicates the zpos of the frame.
"""
self._dataSet = dataset.MERFISHDataSet(
dataDirectoryName = dataSetName);
readoutNames = \
self._dataSet.dataOrganization.data["readoutName"]
self._frames = dict(zip(readoutNames,
[ Frame(self._dataSet, fov = fov,
zpos = zpos, readoutName = x) \
for x in readoutNames ]))
self._fov = fov
self._zpos = zpos
os.chdir(self._dataSet.analysisPath)
def get_data_path(self) -> str:
"""
Get data path
"""
return self._dataSet.rawDataPath
def get_analysis_path(self) -> str:
"""
Get analysis path
"""
return self._dataSet.analysisPath
def get_stage_position(self) -> tuple:
"""
Get stage position for current fov
"""
return self._dataSet.get_stage_positions().iloc[self._fov]
def get_image_size(self) -> tuple:
"""
Get total number of frames in one zstack
"""
return tuple(self._dataSet.get_image_dimensions())
def get_film_size(self) -> tuple:
"""
Get total number of frames in one zstack
"""
(M, N) = self.get_image_size();
return (len(self._frames), M, N);
def get_microns_per_pixel(self) -> tuple:
"""
Get micron per pixel
"""
return self._dataSet.micronsPerPixel
def get_fov(self) -> int:
"""
Get field of view
"""
return self._fov
def get_z_position(self) -> float:
"""
Get z positions
"""
return self._zpos
def get_codebook(self):
"""
Get readout name for each frame.
"""
return self._dataSet.get_codebook()
def get_readout_name(self) -> list:
"""
Get readout name for each frame.
"""
return list(self._frames.keys())
def get_frame_by_readout_name(self,
readoutName: str = None
) -> Frame:
"""
Get a frame object by readout name.
"""
return self._frames[readoutName]
def get_image_color(self,
readoutNames: list = None
) -> list:
"""
Get color for each readout
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
return [ self.get_frame_by_readout_name(rn).get_image_color() \
for rn in readoutNames ]
def get_bit_name(self) -> list:
"""
Get readout names for encoding frames.
"""
return self.get_codebook().get_bit_names()
def get_feature_name(self) -> list:
"""
Get readout names for feature frames.
"""
return list(set(self.get_readout_name()) - set(self.get_bit_name()))
def get_bit_count(self) -> int:
"""
Get readout name for each frame.
"""
return self.get_codebook().get_bit_count()
def get_image_file_name(self) -> list:
"""
Get image file name for each frame.
"""
return [ frame._image_file_name for frame in self._frames ]
def get_frames(self,
readoutNames: list = None
) -> Frame:
"""
Get multiple frames by readout names
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
return [ self.get_frame_by_readout_name(rn) \
for rn in readoutNames ]
def del_frames(self, readoutNames: list = None):
"""
Remove a frame based on the readout name.
If readoutNames is None, all frames will be deleted.
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
for rn in readoutNames:
self._frames.pop(rn, None)
def load_readout_image_from_readout_name(self,
readoutName: str = None):
"""
Load readout image from a selected readout name.
"""
self._frames[readoutName].load_readout_raw_image();
def load_readout_images(self,
readoutNames: list = None):
"""
Load readout images from a list readout names.
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
for rn in readoutNames:
self.load_readout_image_from_readout_name(rn)
def get_readout_image_from_readout_name(self,
readoutName: str
) -> np.ndarray:
"""
Get a readout image from a readout name.
"""
return self._frames[
readoutName
].get_readout_image()
def get_readout_images(self,
readoutNames: list=None):
"""
Get readout images from a list of readout names
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
return np.array([
self.get_readout_image_from_readout_name(rn) \
for rn in readoutNames ])
def load_fiducial_image_from_readout_name(self,
readoutName: str = None):
"""
Load fiducial image from readout name.
"""
self._frames[readoutName].load_fiducial_raw_image();
def load_fiducial_images(self,
readoutNames: list = None):
"""
Load fiducial images from a list of readout names.
"""
if readoutNames == None:
readoutNames = self.get_readout_name()
for rn in readoutNames:
self.load_fiducial_image_from_readout_name(rn)
def get_fiducial_image_from_readout_name(self,
readoutName: str = None
) -> np.ndarray:
"""
Get a fiducial image from a given readout name.
"""
return self._frames[
readoutName].get_fiducial_image()
def get_fiducial_images(self,
readoutNames: list= None
) -> np.ndarray:
"""
Get fiducial images from a list of readout names.
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
return np.array([
self.get_fiducial_image_from_readout_name(rn) \
for rn in readoutNames ])
def del_readout_image_from_readout_name(self,
readoutName: str = None):
"""
delete readout image from a readout name.
"""
self._frames[readoutName].del_readout_raw_image();
def del_readout_images(self,
readoutNames: list = None):
"""
Delete readout images from a list readout names.
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
for rn in readoutNames:
self.del_readout_image_from_readout_name(rn)
def del_fiducial_image_from_readout_name(self,
readoutName: str = None):
"""
Delete a fiducial raw image from a readout name.
"""
self._frames[readoutName].del_fiducial_raw_image();
def del_fiducial_images(self,
readoutNames: list = None):
"""
delete fiducial raw image from readout name.
"""
readoutNames = self.get_readout_name() \
if readoutNames is None else readoutNames
for rn in readoutNames:
self.del_fiducial_image_from_readout_name(rn);
def save_readout_images(self,
fileName: str = None,
readoutNames: list = None):
"""
Save readout images into a tif file
"""
tifffile.imwrite(file = fileName,
data = self.get_readout_images(
self.get_readout_name() \
if readoutNames is None else \
readoutNames).astype(np.uint16))
def save_fiducial_images(self,
fileName: str = None,
readoutNames: list = None):
"""
Save fiducial images into a tif file
"""
tifffile.imwrite(file = fileName,
data = self.get_fiducial_images(
self.get_readout_name() \
if readoutNames is None else \
readoutNames).astype(np.uint16))
def get_chromatic_aberration_profile(self):
"""
Get chromatic abberation correction profiles.
"""
return \
self._dataSet.get_chromatic_aberration_profile()
def load_warped_images(self, filename):
"""
Load warped images.
"""
movie = tifffile.imread(filename)
frameNames = self.get_readout_name()
for fn in frameNames:
self._frames[fn]._img = \
movie[frameNames.index(fn)].copy()
del movie
def load_processed_images(self, filename):
"""
Load processed images.
"""
if filename.endswith("npz"):
movie = np.load(filename)
movie = movie["arr_0"]
elif filename.endswith('.npy'):
movie = np.load(filename)
movie = movie["arr_0"]
elif filename.endswith('.tif'):
movie = tifffile.imread(filename)
frameNames = self.get_bit_name()
for fn in frameNames:
self._frames[fn]._img = \
movie[frameNames.index(fn)].copy()
del movie
|
{"hexsha": "a358bf3e7f70d08af51d7b9325d00ef60cc4f689", "size": 16021, "ext": "py", "lang": "Python", "max_stars_repo_path": "merfishdecoder/core/zplane.py", "max_stars_repo_name": "r3fang/MERlin", "max_stars_repo_head_hexsha": "2ee81a4e18c3fe406d91ee6ce65ca38b69ee9d68", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "merfishdecoder/core/zplane.py", "max_issues_repo_name": "r3fang/MERlin", "max_issues_repo_head_hexsha": "2ee81a4e18c3fe406d91ee6ce65ca38b69ee9d68", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "merfishdecoder/core/zplane.py", "max_forks_repo_name": "r3fang/MERlin", "max_forks_repo_head_hexsha": "2ee81a4e18c3fe406d91ee6ce65ca38b69ee9d68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3096366509, "max_line_length": 97, "alphanum_fraction": 0.5045253105, "include": true, "reason": "import numpy", "num_tokens": 3299}
|
#include <szen/System/Config.hpp>
#include <szen/System/ErrorStream.hpp>
#include <json/json.h>
#include <boost/algorithm/string.hpp>
//#include <boost/filesystem.hpp>
/////////////////////////////////////////////////
// DEFAULTS
#define DEFAULT_FULLSCREEN 0
#define DEFAULT_RESOLUTION_FULLSCREEN Window::getOptimalResolution(true)
#define DEFAULT_RESOLUTION_WINDOWED Window::getOptimalResolution(false)
#define DEFAULT_VOLUME_SOUND 90.f
#define DEFAULT_VOLUME_MUSIC 50.f
/////////////////////////////////////////////////
namespace
{
const std::string m_settingsFile = "settings.cfg";
std::unique_ptr<json::Value> m_settings;
bool exists(const std::string& path)
{
std::ifstream file(path);
return file.good();
}
}
#define _s (*m_settings)
using namespace sz;
////////////////////////////////////////////////////
void setResolutionSettings(int set = -1)
{
if(set == -1 || set == 0)
{
sf::VideoMode window = DEFAULT_RESOLUTION_WINDOWED;
_s["resolution"]["window"][0U] = window.width;
_s["resolution"]["window"][1U] = window.height;
}
if(set == -1 || set == 1)
{
sf::VideoMode full = DEFAULT_RESOLUTION_FULLSCREEN;
_s["resolution"]["full"][0U] = full.width;
_s["resolution"]["full"][1U] = full.height;
}
}
////////////////////////////////////////////////////
void resetSettings()
{
_s.clear();
_s["fullscreen"] = DEFAULT_FULLSCREEN;
setResolutionSettings();
_s["volume"]["sound"] = DEFAULT_VOLUME_SOUND;
_s["volume"]["music"] = DEFAULT_VOLUME_MUSIC;
Config::saveSettings();
}
////////////////////////////////////////////////////
bool hasKey(const std::string& key)
{
return m_settings->isMember(key);
}
////////////////////////////////////////////////////
void checkKeys()
{
//////////////////////////////////////////////
// Check screen keys
if(!hasKey("fullscreen")) _s["fullscreen"] = DEFAULT_FULLSCREEN;
if(!hasKey("resolution"))
{
setResolutionSettings();
}
else
{
if(!_s["resolution"].isMember("window")) setResolutionSettings(0);
if(!_s["resolution"].isMember("full")) setResolutionSettings(1);
}
//////////////////////////////////////////////
// Check audio keys
if(!hasKey("volume"))
{
_s["volume"]["sound"] = DEFAULT_VOLUME_SOUND;
_s["volume"]["music"] = DEFAULT_VOLUME_MUSIC;
}
else
{
if(!_s["volume"].isMember("music"))
{
_s["volume"]["music"] = DEFAULT_VOLUME_MUSIC;
}
else
{
_s["volume"]["music"] = std::max(0.f, std::min(100.f, (float)_s["volume"]["music"].asDouble()));
}
if(!_s["volume"].isMember("sound"))
{
_s["volume"]["sound"] = DEFAULT_VOLUME_SOUND;
}
else
{
_s["volume"]["sound"] = std::max(0.f, std::min(100.f, (float)_s["volume"]["sound"].asDouble()));
}
}
}
////////////////////////////////////////////////////
void openSettings()
{
std::ifstream file("settings.cfg");
json::Reader reader;
if(!reader.parse(file, *m_settings))
{
szerr << "Parse error in settings file, resetting to defaults." << ErrorStream::error;
resetSettings();
}
else
{
checkKeys();
}
}
////////////////////////////////////////////////////
void Config::saveSettings()
{
// Attempt to open settings file
std::ofstream file(m_settingsFile, std::ios_base::trunc);
if(!file.is_open())
{
szerr << "Unable to open settings file '" << m_settingsFile << "' for writing!" << ErrorStream::error;
return;
}
// Write config to file
json::StyledWriter writer;
file << writer.write(*m_settings);
file.close();
}
////////////////////////////////////////////////////
void Config::initSettingsFile()
{
m_settings.reset(new json::Value);
// If settings file already exists don't do anything
if(exists("settings.cfg"))
{
openSettings();
}
else
{
resetSettings();
}
}
////////////////////////////////////////////////////
void Config::closeSettings()
{
saveSettings();
m_settings.release();
}
////////////////////////////////////////////////////
void Config::setScreenMode(uint32 value)
{
if(value >= 3) value = 0;
_s["fullscreen"] = value;
saveSettings();
}
////////////////////////////////////////////////////
bool Config::isFullscreen()
{
return getScreenMode() >= 1;
}
////////////////////////////////////////////////////
uint32 Config::getScreenMode()
{
return _s["fullscreen"].asUInt();
}
////////////////////////////////////////////////////
uint32 Config::getSystemWindowMode()
{
switch(getScreenMode())
{
case 0: return sf::Style::Close; // Windowed
case 1: return sf::Style::Fullscreen; // Normal fullscreen
case 2: return sf::Style::None; // Windowed fullscreen
}
return sf::Style::Close;
}
////////////////////////////////////////////////////
sf::VideoMode Config::getVideoMode()
{
return getVideoMode(getScreenMode());
}
////////////////////////////////////////////////////
bool checkAspect(sf::VideoMode mode)
{
sf::VideoMode optimal = sz::Window::getOptimalResolution(Config::isFullscreen());
float optimalAspect = optimal.width / static_cast<float>(optimal.height);
float userAspect = mode.width / static_cast<float>(mode.height);
if(fabs(optimalAspect-userAspect) <= 0.001f)
{
return true;
}
else
{
return false;
}
}
////////////////////////////////////////////////////
sf::VideoMode Config::getVideoMode(const uint32 mode)
{
sf::VideoMode result;
switch(mode)
{
////////////////////////////////////////////
// Windowed
case 0:
{
result = sf::VideoMode(
_s["resolution"]["window"][0U].asUInt(),
_s["resolution"]["window"][1U].asUInt()
);
break;
}
////////////////////////////////////////////
// Fullscreen
case 1:
{
result = sf::VideoMode(
_s["resolution"]["full"][0U].asUInt(),
_s["resolution"]["full"][1U].asUInt()
);
break;
}
////////////////////////////////////////////
// Windowed fullscreen
case 2:
{
result = sz::Window::getOptimalResolution(true);
break;
}
}
return result;
}
////////////////////////////////////////////////////
void Config::setVideoMode(const bool isFull, sf::VideoMode mode)
{
if(isFull)
{
_s["resolution"]["full"][0U] = mode.width;
_s["resolution"]["full"][1U] = mode.height;
}
else
{
_s["resolution"]["window"][0U] = mode.width;
_s["resolution"]["window"][1U] = mode.height;
}
}
////////////////////////////////////////////////////
float Config::getSoundVolume()
{
return static_cast<float>(_s["volume"]["sound"].asDouble());
}
////////////////////////////////////////////////////
void Config::setSoundVolume(float value)
{
_s["volume"]["sound"] = std::max(0.f, std::min(100.f, value));
}
////////////////////////////////////////////////////
float Config::getMusicVolume()
{
return static_cast<float>(_s["volume"]["music"].asDouble());
}
////////////////////////////////////////////////////
void Config::setMusicVolume(float value)
{
_s["volume"]["music"] = std::max(0.f, std::min(100.f, value));
}
|
{"hexsha": "3625e408a5f6a29f1ae4bab882507bbbd9c54b5b", "size": 6851, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "code/szen/src/System/Config.cpp", "max_stars_repo_name": "Sonaza/scyori", "max_stars_repo_head_hexsha": "a894a9c7bd45a68ea1b6ff14877cdbe47ddd39cf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/szen/src/System/Config.cpp", "max_issues_repo_name": "Sonaza/scyori", "max_issues_repo_head_hexsha": "a894a9c7bd45a68ea1b6ff14877cdbe47ddd39cf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/szen/src/System/Config.cpp", "max_forks_repo_name": "Sonaza/scyori", "max_forks_repo_head_hexsha": "a894a9c7bd45a68ea1b6ff14877cdbe47ddd39cf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2105263158, "max_line_length": 104, "alphanum_fraction": 0.5273682674, "num_tokens": 1618}
|
/*
* This file is open source software, licensed to you under the terms
* of the Apache License, Version 2.0 (the "License"). See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. You may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (C) 2020 ScyllaDB
*/
#pragma once
#include "fs/bitwise.hh"
#include "fs/cluster.hh"
#include "fs/inode.hh"
#include "fs/inode_info.hh"
#include "fs/metadata_disk_entries.hh"
#include "fs/metadata_to_disk_buffer.hh"
#include "fs/units.hh"
#include "fs/metadata_log.hh"
#include "seastar/core/do_with.hh"
#include "seastar/core/future-util.hh"
#include "seastar/core/future.hh"
#include "seastar/core/temporary_buffer.hh"
#include <boost/crc.hpp>
#include <cstddef>
#include <cstring>
#include <unordered_set>
#include <variant>
namespace seastar::fs {
// TODO: add a comment about what it is
class data_reader {
const uint8_t* _data = nullptr;
size_t _size = 0;
size_t _pos = 0;
size_t _last_checkpointed_pos = 0;
public:
data_reader() = default;
data_reader(const uint8_t* data, size_t size) : _data(data), _size(size) {}
size_t curr_pos() const noexcept { return _pos; }
size_t last_checkpointed_pos() const noexcept { return _last_checkpointed_pos; }
size_t bytes_left() const noexcept { return _size - _pos; }
void align_curr_pos(size_t alignment) noexcept { _pos = round_up_to_multiple_of_power_of_2(_pos, alignment); }
void checkpoint_curr_pos() noexcept { _last_checkpointed_pos = _pos; }
// Returns whether the reading was successful
bool read(void* destination, size_t size);
// Returns whether the reading was successful
template<class T>
bool read_entry(T& entry) noexcept {
return read(&entry, sizeof(entry));
}
// Returns whether the reading was successful
bool read_string(std::string& str, size_t size);
std::optional<temporary_buffer<uint8_t>> read_tmp_buff(size_t size);
// Returns whether the processing was successful
bool process_crc_without_reading(boost::crc_32_type& crc, size_t size);
std::optional<data_reader> extract(size_t size);
};
class metadata_log_bootstrap {
metadata_log& _metadata_log;
cluster_range _available_clusters;
std::unordered_set<cluster_id_t> _taken_clusters;
std::optional<cluster_id_t> _next_cluster;
temporary_buffer<uint8_t> _curr_cluster_data;
data_reader _curr_cluster;
data_reader _curr_checkpoint;
metadata_log_bootstrap(metadata_log& metadata_log, cluster_range available_clusters);
future<> bootstrap(cluster_id_t first_metadata_cluster_id, fs_shard_id_t fs_shards_pool_size,
fs_shard_id_t fs_shard_id);
future<> bootstrap_cluster(cluster_id_t curr_cluster);
static auto invalid_entry_exception() {
return make_exception_future<>(std::runtime_error("Invalid metadata log entry"));
}
future<> bootstrap_read_cluster();
// Returns whether reading and checking was successful
bool read_and_check_checkpoint();
future<> bootstrap_checkpointed_data();
future<> bootstrap_next_metadata_cluster();
bool inode_exists(inode_t inode);
future<> bootstrap_create_inode();
future<> bootstrap_delete_inode();
future<> bootstrap_small_write();
future<> bootstrap_medium_write();
future<> bootstrap_large_write();
future<> bootstrap_large_write_without_mtime();
future<> bootstrap_truncate();
future<> bootstrap_add_dir_entry();
future<> bootstrap_create_inode_as_dir_entry();
future<> bootstrap_delete_dir_entry();
future<> bootstrap_delete_inode_and_dir_entry();
public:
static future<> bootstrap(metadata_log& metadata_log, inode_t root_dir, cluster_id_t first_metadata_cluster_id,
cluster_range available_clusters, fs_shard_id_t fs_shards_pool_size, fs_shard_id_t fs_shard_id);
};
} // namespace seastar::fs
|
{"hexsha": "5c3584da1d83bf706da6622e1cd2f9b66d24fba1", "size": 4396, "ext": "hh", "lang": "C++", "max_stars_repo_path": "src/fs/metadata_log_bootstrap.hh", "max_stars_repo_name": "rokinsky/seastar", "max_stars_repo_head_hexsha": "e935bc197c1acf81e7e0c3c6e78b735a95e8f2fa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/fs/metadata_log_bootstrap.hh", "max_issues_repo_name": "rokinsky/seastar", "max_issues_repo_head_hexsha": "e935bc197c1acf81e7e0c3c6e78b735a95e8f2fa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2020-04-19T14:39:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-19T14:47:49.000Z", "max_forks_repo_path": "src/fs/metadata_log_bootstrap.hh", "max_forks_repo_name": "rokinsky/seastar", "max_forks_repo_head_hexsha": "e935bc197c1acf81e7e0c3c6e78b735a95e8f2fa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1095890411, "max_line_length": 115, "alphanum_fraction": 0.741355778, "num_tokens": 995}
|
#!/usr/bin/python
# Copyright (c) 2019 Matthew Earl, Paulo Jarschel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This is Matthew Earl's code turned into a library to simplify the code usage by the
FaceswapBot script, and to make it easier for other people to use it as they see fit.
"""
import os
import cv2
import dlib
import numpy as np
import urllib
class FaceSwapLib:
# Definitions
script_dir = os.path.dirname(os.path.realpath(__file__))
PREDICTOR_PATH = script_dir + "/shape_predictor_68_face_landmarks.dat"
FEATHER_AMOUNT = 11
FACE_POINTS = list(range(17, 68))
MOUTH_POINTS = list(range(48, 61))
RIGHT_BROW_POINTS = list(range(17, 22))
LEFT_BROW_POINTS = list(range(22, 27))
RIGHT_EYE_POINTS = list(range(36, 42))
LEFT_EYE_POINTS = list(range(42, 48))
NOSE_POINTS = list(range(27, 35))
JAW_POINTS = list(range(0, 17))
# Points used to line up the images.
ALIGN_POINTS = (LEFT_BROW_POINTS + RIGHT_EYE_POINTS + LEFT_EYE_POINTS
+ RIGHT_BROW_POINTS + NOSE_POINTS + MOUTH_POINTS)
# Points from the second image to overlay on the first. The convex hull of each
# element will be overlaid.
OVERLAY_POINTS = [
LEFT_EYE_POINTS + RIGHT_EYE_POINTS + LEFT_BROW_POINTS + RIGHT_BROW_POINTS,
NOSE_POINTS + MOUTH_POINTS,
]
# Amount of blur to use during colour correction, as a fraction of the
# pupillary distance.
COLOUR_CORRECT_BLUR_FRAC = 0.6
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
def __init__(self):
True
def __delattr__(self):
True
def read_im_from_url(self, url):
req = urllib.request.urlopen(url)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
im = cv2.imdecode(arr, cv2.IMREAD_COLOR)
return im
def read_im_from_file(self, fname):
im = cv2.imread(fname, cv2.IMREAD_COLOR)
return im
def get_landmarks(self, im):
rects = self.detector(im, 1)
if len(rects) == 0:
return False
else:
face = np.random.randint(len(rects))
return np.matrix([[p.x, p.y] for p in self.predictor(im, rects[face]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
return im
def draw_convex_hull(self, im, points, color):
points = cv2.convexHull(points)
cv2.fillConvexPoly(im, points, color=color)
def get_face_mask(self, im, landmarks):
im = np.zeros(im.shape[:2], dtype=np.float64)
for group in self.OVERLAY_POINTS:
self.draw_convex_hull(im, landmarks[group], color=1)
im = np.array([im, im, im]).transpose((1, 2, 0))
im = (cv2.GaussianBlur(im, (self.FEATHER_AMOUNT, self.FEATHER_AMOUNT), 0) > 0) * 1.0
im = cv2.GaussianBlur(im, (self.FEATHER_AMOUNT, self.FEATHER_AMOUNT), 0)
return im
def transformation_from_points(self, points1, points2):
"""
Return an affine transformation [s * R | T] such that:
sum ||s*R*p1,i + T - p2,i||^2
is minimized.
"""
# Solve the procrustes problem by subtracting centroids, scaling by the
# standard deviation, and then using the SVD to calculate the rotation. See
# the following for more details:
# https://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem
points1 = points1.astype(np.float64)
points2 = points2.astype(np.float64)
c1 = np.mean(points1, axis=0)
c2 = np.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = np.std(points1)
s2 = np.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = np.linalg.svd(points1.T * points2)
# The R we seek is in fact the transpose of the one given by U * Vt. This
# is because the above formulation assumes the matrix goes on the right
# (with row vectors) where as our solution requires the matrix to be on the
# left (with column vectors).
R = (U * Vt).T
return np.vstack([np.hstack(((s2 / s1) * R, c2.T - (s2 / s1) * R * c1.T)),
np.matrix([0., 0., 1.])])
def warp_im(self, im, M, dshape):
output_im = np.zeros(dshape, dtype=im.dtype)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def correct_colours(self, im1, im2, landmarks1):
blur_amount = self.COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm(
np.mean(landmarks1[self.LEFT_EYE_POINTS], axis=0)
- np.mean(landmarks1[self.RIGHT_EYE_POINTS], axis=0))
blur_amount = int(blur_amount)
if blur_amount % 2 == 0:
blur_amount += 1
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
# Avoid divide-by-zero errors.
im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype)
return (im2.astype(np.float64) * im1_blur.astype(np.float64)
/ im2_blur.astype(np.float64))
def swap(self, img1, img2, lm1, lm2):
M = self.transformation_from_points(lm1[self.ALIGN_POINTS], lm2[self.ALIGN_POINTS])
mask = self.get_face_mask(img2, lm2)
warped_mask = self.warp_im(mask, M, img1.shape)
combined_mask = np.max([self.get_face_mask(img1, lm1), warped_mask], axis=0)
warped_im2 = self.warp_im(img2, M, img1.shape)
warped_corrected_im2 = self.correct_colours(img1, warped_im2, lm1)
output_im = img1 * (1.0 - combined_mask) + warped_corrected_im2 * combined_mask
return output_im
def get_one_image(self, im1, im2):
# Make images roughly the same height or width
w1 = len(im1[0])
h1 = len(im1)
w2 = len(im2[0])
h2 = len(im2)
if h1*w1 > h2*w2:
ratio = np.min([h1/h2, w1/w2])
im2 = cv2.resize(im2, (int(w2*ratio), int(h2*ratio)))
w2 = len(im2[0])
h2 = len(im2)
else:
ratio = np.min([h2/h1, w2/w1])
im1 = cv2.resize(im1, (int(w1*ratio), int(h1*ratio)))
w1 = len(im1[0])
h1 = len(im1)
# Join the two of them, vertically if the larger is too wide, horizontally
# if larger is too high
padding = 0
imf = np.zeros([128, 128, 3])
if np.max([w1, w2]) >= np.max([h1, h2]):
wt = np.max([w1, w2])
ht = h1 + h2 + padding
leftover1 = int(np.abs(wt - im1.shape[1])/2)
leftover2 = int(np.abs(wt - im2.shape[1])/2)
imf = np.zeros((ht, wt, 3), dtype=np.uint8)
imf[:im1.shape[0], leftover1:(im1.shape[1] + leftover1), :] = im1
imf[(im1.shape[0] + padding):, leftover2:(im2.shape[1] + leftover2), :] = im2
else:
wt = w1 + w2 + padding
ht = np.max([h1, h2])
leftover1 = int(np.abs(ht - im1.shape[0])/2)
leftover2 = int(np.abs(ht - im2.shape[0])/2)
imf = np.zeros((ht, wt, 3), dtype=np.uint8)
imf[leftover1:(im1.shape[0] + leftover1), :im1.shape[1], :] = im1
imf[leftover2:(im2.shape[0] + leftover2), (im1.shape[1] + padding):, :] = im2
return imf
|
{"hexsha": "ac6dc45ecd7ece966d3f456cdc856c0a0a25ae9e", "size": 8968, "ext": "py", "lang": "Python", "max_stars_repo_path": "faceswaplib.py", "max_stars_repo_name": "pfjarschel/faceswapbot", "max_stars_repo_head_hexsha": "f8bc0f26874e04097a5e0a69f66dcdfcb2c4e31d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-16T19:56:48.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-25T08:16:41.000Z", "max_issues_repo_path": "faceswaplib.py", "max_issues_repo_name": "pfjarschel/faceswapbot", "max_issues_repo_head_hexsha": "f8bc0f26874e04097a5e0a69f66dcdfcb2c4e31d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "faceswaplib.py", "max_forks_repo_name": "pfjarschel/faceswapbot", "max_forks_repo_head_hexsha": "f8bc0f26874e04097a5e0a69f66dcdfcb2c4e31d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0578512397, "max_line_length": 92, "alphanum_fraction": 0.603367529, "include": true, "reason": "import numpy", "num_tokens": 2456}
|
\documentclass[12pt,a4paper]{article}
%\title{Lab6-Lucene3(自定义Similarity)}
\usepackage{ctex}
\usepackage{amsmath,amscd,amsbsy,amssymb,latexsym,url,bm,amsthm}
\usepackage{epsfig,graphicx,subfigure}
\usepackage{enumitem,balance}
\usepackage{wrapfig}
\usepackage{mathrsfs,euscript}
\usepackage[usenames]{xcolor}
\usepackage{hyperref}
\usepackage[vlined,ruled,commentsnumbered,linesnumbered]{algorithm2e}
\usepackage{float}
\usepackage{geometry}
\usepackage{listings}
\geometry{a4paper,scale=0.8}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{amssymb}
% --- Python code template ---
\usepackage[utf8]{inputenc}
% Default fixed font does not support bold face
\DeclareFixedFont{\ttb}{T1}{txtt}{bx}{n}{12} % for bold
\DeclareFixedFont{\ttm}{T1}{txtt}{m}{n}{12} % for normal
% Custom colors
\usepackage{color}
\definecolor{deepblue}{rgb}{0,0,0.5}
\definecolor{deepred}{rgb}{0.6,0,0}
\definecolor{deepgreen}{rgb}{0,0.5,0}
\usepackage{listings}
% Python style for highlighting
\newcommand\pythonstyle{\lstset{
language=Python,
basicstyle=\ttm,
morekeywords={self}, % Add keywords here
keywordstyle=\ttb\color{deepblue},
emph={MyClass,__init__}, % Custom highlighting
emphstyle=\ttb\color{deepred}, % Custom highlighting style
stringstyle=\color{deepgreen},
frame=tb, % Any extra options here
showstringspaces=false
}}
% Python environment
\lstnewenvironment{python}[1][]
{
\pythonstyle
\lstset{#1}
}
{}
% Python for external files
\newcommand\pythonexternal[2][]{{
\pythonstyle
\lstinputlisting[#1]{#2}}}
% Python for inline
\newcommand\pythoninline[1]{{\pythonstyle\lstinline!#1!}}
% --- Python code template ---
\newcommand{\vect}[1]{\boldsymbol{#1}}
\newcommand{\ve}[1]{\boldsymbol{#1}}
\newcommand{\vve}[1]{\boldsymbol{#1}}
\DeclareMathOperator{\st}{s.t.}
% Macros
\newcommand{\mb}{\mathbb}
%\title{Homework\quad 1}
%\date{2021.11}
%\author{孙济宸\quad \quad 学号:520030910016 \quad \quad 班级:F2003003}
\begin{document}
%\maketitle
Homework 2
DDL: March 2
孙济宸 \quad 520030910016
\section{}
\subsection{a}
$$H(X) - H(X|Y) = \mb{E}-\log p(X) + \mb{E} \log p(X|Y)$$
$$=\mb{E}-\log p(X) + \mb{E} \log \frac{p(X,Y)}{p(Y)} = \mb{E}\log \frac{p(X,Y)}{p(X)p(Y)} = I(X;Y)$$
\subsection{b}
$$H(X) + H(Y) - H(X,Y) = \mb{E}-\log p(X) + + \mb{E}-\log p(Y) \mb{E} \log p(X,Y)$$
$$= \mb{E}\log \frac{p(X,Y)}{p(X)p(Y)} = I(X;Y)$$
\subsection{c}
$$H(X|Z) + H(Y|Z) - H(X,Y|Z) = \mb{E}-\log p(X|Z) + + \mb{E}-\log p(Y|Z) \mb{E} \log p(X,Y|Z)$$
$$= \mb{E}\log \frac{p(X,Y|Z)}{p(X|Z)p(Y|Z)} = I(X;Y|Z)$$
\section{}
\subsection{a}\noindent
if:
$$p(x)= q(x)$$
$$D(p||q) = \sum_{x\in X} p(x)\log \frac{p(x)}{q(x)}=0$$
only if:
$$D(p||q) = -\sum_{x\in X} p(x)\log \frac{q(x)}{p(x)} \geq \sum_{x\in X} p(x)(1-\frac{q(x)}{p(x)}) = \sum_{x\in X} p(x) - \sum_{x\in X} q(x) =0$$
with equality only if $p=q$ for all $x\in X$.
\subsection{b}\noindent
if:
$X$ and $Y$ are independent,
$$I(X;Y) = H(X) - H(X|Y)= H(X) - H(X) = 0$$
only if:
$$I(X;Y) = D(p(x,y)||p(x)p(y)) = 0$$
From (a), we know $$p(x,y)=p(x)p(y)$$. i.e. $X$ and $Y$ are independent
\subsection{c}\noindent
if:
$$p(y|x)= q(y|x), \forall x,y s.t.p(x)>0$$
$$D(p(y|x)||q(y|x)) = \sum_{x\in X} p(x) \sum_{y\in Y} p(y|x)\log \frac{p(y|x)}{q(y|x)}=0$$
only if:
$$D(p(y|x)||q(y|x)) = -\sum_{x\in X} p(x) \sum_{y\in Y} p(y|x)\log \frac{q(y|x)}{p(y|x)} $$
$$ \geq \sum_{x\in X} p(x) \sum_{y\in Y} p(y|x)(1- \frac{q(y|x)}{p(y|x)} ) = \sum_{x\in X} p(x)[ \sum_{y\in Y} p(y|x) - \sum_{y\in Y} {q(y|x)} ] = 0 $$
with equality only if $p(y|x)=q(y|x)$ for all $p(x)>0$.
\subsection{d}\noindent
if:
$X$ and $Y$ are conditionally independent given $Z$,
$$I(X;Y|Z) = H(X|Z) - H(X|Y,Z)= H(X|Z) - H(X|Z) = 0$$
only if:
$$I(X;Y|Z) = D(p(x,y|z)||p(x|z)p(y|z)) = 0$$
$$p(x,y|z)=p(x|z)p(y|z)$$. i.e. $X$ and $Y$ are conditionally independent given $Z$
\end{document}
|
{"hexsha": "b45c75044e2dc069b64440a51d680c97ccc6f6f9", "size": 3864, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "homeworks/hw2/hw2.tex", "max_stars_repo_name": "MadCreeper/information-theory-notes", "max_stars_repo_head_hexsha": "3156594807acd53c32946581714a953d54045a43", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "homeworks/hw2/hw2.tex", "max_issues_repo_name": "MadCreeper/information-theory-notes", "max_issues_repo_head_hexsha": "3156594807acd53c32946581714a953d54045a43", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "homeworks/hw2/hw2.tex", "max_forks_repo_name": "MadCreeper/information-theory-notes", "max_forks_repo_head_hexsha": "3156594807acd53c32946581714a953d54045a43", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2727272727, "max_line_length": 152, "alphanum_fraction": 0.6164596273, "num_tokens": 1639}
|
using Pkg
Pkg.activate(joinpath(@__DIR__, ".."))
using FoodCachingPlotting, Stipple, StippleUI, StipplePlotly, Genie, DataFrames,
Random, PlotlyJS, FoodCachingExperiments, FoodCachingModels
import FoodCachingExperiments: EXPERIMENTS, CLAYTON0103_EXPERIMENTS
const M = FoodCachingModels
Base.@kwdef mutable struct Model <: ReactiveModel
plot::R{Bool} = false
birdid::R{Int} = 0
birdid_options::R{Vector{Int}} = []
imgurl::R{String} = ""
filter::R{String} = ""
seed::R{String} = "random"
models::Vector{Any} = []
list::R{String} = ""
layout::R{Any} = nothing
plot_data::R{Any} = nothing
dist_layout::R{Any} = nothing
dist_plots::R{Any} = nothing
summary1_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary1_plots::R{Any} = nothing
summary2_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary2_plots::R{Any} = nothing
summary3_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary3_plots::R{Any} = nothing
summary4_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary4_plots::R{Any} = nothing
summary5_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary5_plots::R{Any} = nothing
summary6_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary6_plots::R{Any} = nothing
summary7_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary7_plots::R{Any} = nothing
summary8_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary8_plots::R{Any} = nothing
summary9_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary9_plots::R{Any} = nothing
summary10_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary10_plots::R{Any} = nothing
summary11_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary11_plots::R{Any} = nothing
summary12_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary12_plots::R{Any} = nothing
summary13_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary13_plots::R{Any} = nothing
summary14_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary14_plots::R{Any} = nothing
summary15_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary15_plots::R{Any} = nothing
summary16_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary16_plots::R{Any} = nothing
summary17_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary17_plots::R{Any} = nothing
summary18_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary18_plots::R{Any} = nothing
summary19_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary19_plots::R{Any} = nothing
summary20_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary20_plots::R{Any} = nothing
summary21_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary21_plots::R{Any} = nothing
summary22_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary22_plots::R{Any} = nothing
summary23_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary23_plots::R{Any} = nothing
summary24_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary24_plots::R{Any} = nothing
summary25_layout::R{Any} = PlotlyJS.Layout(height = 10, width = 10)
summary25_plots::R{Any} = nothing
experiment::R{String} = ""
datadir::R{String} = joinpath(@__DIR__, "..", "data")
files::R{Vector{String}} = []
opt_experiment::Vector{String} = string.(setdiff(collect(keys(EXPERIMENTS)),
CLAYTON0103_EXPERIMENTS))
model::R{String} = ""
opt_models::Vector{String} = ["Baseline", "MotivationalControl",
"EpisodicLikeMemory", "PlasticCaching",
"ReplayAndPlan"]
commit::R{String} = ""
opt_commit::R{Vector{String}} = [""]
id::R{String} = ""
opt_id::R{Vector{String}} = [""]
end
function updatefiles(model, dir)
if ispath(dir)
model.files[] = readdir(dir)
end
end
function runmodel(m, experiment, trackedfields, seed)
p = M.parameters(M.Model{:euler};
m.p.fixed...,
tracker = () -> M.Tracker(trackedfields = trackedfields))
m = M.Population(m.m, m.s, m.l, m.u, p, m.dist, M.init(p))
Random.seed!(seed)
ms = rand(m, nbirds(experiment))
data = run!(experiment, ms)
ms, data
end
function default_trackedfields(p)
pd = Dict(p...)
res = [(:cage, :cacheableitems),
(:cage, :eatableitems),
(:cage, :ismdpresent),
(:cage, :trays),
:actions]
if pd[:agent] ∈ [M.SpecSatAgent, M.PlasticCachingAgent]
push!(res, (:agent, :hungermodel, :hunger))
end
# if haskey(pd, :cacheparams) && pd[:cacheparams] <: M.PlanningAgentParams
# push!(res, (:agent, :cacheparams, :current))
# push!(res, (:agent, :cacheparams, :memory))
# end
if haskey(pd, :cacheparams) && pd[:cacheparams] <: M.PlasticCachingAgentParams
push!(res, (:agent, :cacheparams, :trayweights))
end
res
end
function parse_seed(seed)
seed == "best" && return 0
seed == "random" && return UInt(time_ns())
parse(UInt, seed)
end
stringify(x) = x == "" ? "" : "_$x"
function setfilename()
f = "$(model.model[])_$(model.experiment[])$(stringify(model.id[]))$(stringify(model.commit[])).bson.zstd"
if f ∈ model.files[]
model.list[] = f
end
end
function plot_bird(ms, id)
println("plotting bird $id")
tmp = FoodCachingPlotting.plotexperiment(ms[id])
model.layout[] = tmp.plot.layout
model.plot_data[] = tmp.plot.data
end
function titles(m)
vcat([fill(string(k), M.NestedStructInitialiser.free_param_length(v))
for (k, v) in m.p.free]...)
end
function plot_dists(m; x = 0:.01:1)
n = length(m.m)
n1 = ceil(Int, sqrt(n))
n2 = ceil(Int, n / n1)
xaxis = Dict([Symbol("xaxis", i) => attr(automargin = false,
title = attr(text = n, standoff = 0))
for (i, n) in enumerate(titles(m))]...)
model.dist_layout[] = PlotlyJS.Layout(; grid = attr(ygap = .6,
rows = n1,
columns = n2,
pattern = "independent"),
showlegend = false,
height = 900,
xaxis...)
model.dist_plots[] = [PlotData(x = (u - l) * x .+ l,
y = M.Distributions.pdf(m.dist(d, s), x),
yaxis = "y$i", xaxis = "x$i")
for (i, u, l, d, s) in zip(1:length(m.u), m.u, m.l, m.m, m.s)]
end
function plot_summary(e, data)
summary_plots = plot_compare(e, FoodCachingExperiments.summarize(e, data))
for i in 1:25
if i <= length(summary_plots)
getproperty(model, Symbol("summary$(i)_plots"))[] = summary_plots[i]
getproperty(model, Symbol("summary$(i)_layout"))[] = PlotlyJS.Layout(height = 300, width = 600)
else
getproperty(model, Symbol("summary$(i)_layout"))[] = PlotlyJS.Layout(height = 10, width = 10)
end
end
end
model = Stipple.init(Model())
updatefiles(model, model.datadir[])
on(model.birdid) do id
isempty(id) || plot_bird(model.models, id)
end
on(model.datadir) do dir
updatefiles(model, dir)
end
on(model.plot) do plot
println("plot changed $plot")
plot || return
setfilename()
f = model.list[]
println("Loading $f ...")
m = load(joinpath(model.datadir[], f))
plot_dists(m)
println("loaded")
e = Symbol(model.experiment[])
println("running")
ms, data = runmodel(m, e,
default_trackedfields(m.p.fixed),
parse_seed(model.seed[]))
model.models = ms
plot_summary(e, data)
model.birdid_options[] = 1:length(ms)
model.birdid[] = 1
model.plot[] = false
end
function ui()
page(vm(model), class = "container", [input("", @bind(:datadir),
style = "width: 500px"),
row(class = "st-br",
[cell(class = "st-module", [
h6("model"),
quasar(:select, "", @bind(:model), options = :opt_models)]),
cell(class = "st-module", [
h6("experiment"),
quasar(:select, "", @bind(:experiment), options = :opt_experiment)]),
cell(class = "st-module", [
h6("commit"),
quasar(:select, "", @bind(:commit), options = :opt_commit)]),
cell(class = "st-module", [
h6("id"),
quasar(:select, "", @bind(:id), options = :opt_id)]),
cell(class = "st-module", [
h6("bird"),
quasar(:select, "", @bind(:birdid), options = :birdid_options)]),
cell(class = "st-module", [
h6("seed"), "<br>",
input("", @bind(:seed), style = "width: 75px")]),
row(class = "justify-end",
btn("plot", @click("plot = true"),
style = "background: #FF0000; color: white")
),
]),
row(cell(class = "st-module", [
StipplePlotly.plot(:plot_data, layout = :layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary1_plots, layout = :summary1_layout),
StipplePlotly.plot(:summary2_plots, layout = :summary2_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary3_plots, layout = :summary3_layout),
StipplePlotly.plot(:summary4_plots, layout = :summary4_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary5_plots, layout = :summary5_layout),
StipplePlotly.plot(:summary6_plots, layout = :summary6_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary7_plots, layout = :summary7_layout),
StipplePlotly.plot(:summary8_plots, layout = :summary8_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary9_plots, layout = :summary9_layout),
StipplePlotly.plot(:summary10_plots, layout = :summary10_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary11_plots, layout = :summary11_layout),
StipplePlotly.plot(:summary12_plots, layout = :summary12_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary13_plots, layout = :summary13_layout),
StipplePlotly.plot(:summary14_plots, layout = :summary14_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary15_plots, layout = :summary15_layout),
StipplePlotly.plot(:summary16_plots, layout = :summary16_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary17_plots, layout = :summary17_layout),
StipplePlotly.plot(:summary18_plots, layout = :summary18_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary19_plots, layout = :summary19_layout),
StipplePlotly.plot(:summary20_plots, layout = :summary20_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary21_plots, layout = :summary21_layout),
StipplePlotly.plot(:summary22_plots, layout = :summary22_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary23_plots, layout = :summary23_layout),
StipplePlotly.plot(:summary24_plots, layout = :summary24_layout)
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:summary25_plots, layout = :summary25_layout),
])),
row(cell(class = "st-module", [
StipplePlotly.plot(:dist_plots, layout = :dist_layout)
])),
]) |> html
end
route("/", ui)
Genie.config.server_host = "127.0.0.1"
up()
|
{"hexsha": "a1ef5a3bf9dac1de7df61c98286061c86dfe44f9", "size": 12582, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/sim_dashboard.jl", "max_stars_repo_name": "jbrea/FoodCachingPlotting.jl", "max_stars_repo_head_hexsha": "0f95c0b93f507c16ca52f764c1b6239bff26b523", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/sim_dashboard.jl", "max_issues_repo_name": "jbrea/FoodCachingPlotting.jl", "max_issues_repo_head_hexsha": "0f95c0b93f507c16ca52f764c1b6239bff26b523", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/sim_dashboard.jl", "max_forks_repo_name": "jbrea/FoodCachingPlotting.jl", "max_forks_repo_head_hexsha": "0f95c0b93f507c16ca52f764c1b6239bff26b523", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2524590164, "max_line_length": 110, "alphanum_fraction": 0.5701001431, "num_tokens": 3408}
|
import numpy as np
class MultinomialNaiveBayes:
def __init__(self):
self.initialize()
def initialize(self):
self.feature_probs = []
self.prior = []
self.classes = []
self.class_count = []
self.fitted = False
self.feature_count = []
self.n_features = 0
self.coef = []
self.intercept = []
def fit(self, X, y, alpha=1):
"""
given an input x and a label vector x, this function trains
the model calculating the prior and the feature probabilities
to predict future instances
"""
if self.fitted is True:
self.initialize()
X = np.array(X)
y = np.array(y)
classes, counts = np.unique(y, return_counts=True)
self.n_features = y.shape[0]
self.prior = np.log(counts/self.n_features)
self.class_count = counts
self.classes = classes
for cl in self.classes:
feature_count = X[y == cl].sum(axis=0)
added_alpha = (X.shape[1] * alpha)
class_total_sum = feature_count.sum()
feature_prob = np.log((feature_count + alpha) /
(added_alpha + class_total_sum))
self.feature_probs.append(feature_prob)
self.feature_count.append(feature_count)
if len(self.classes) == 2:
self.coef = np.array([self.feature_probs[-1]])
self.intercept = np.array([self.prior[-1]])
else:
self.coef = self.feature_probs
self.intercept = self.prior
self.fitted = True
def predict_probs(self, X):
"""
given an input x the function outputs a matrix with one
column for every class known to the classifier and predicts
the probability of every instance in x to be assigned to each class
"""
X = np.array(X)
results = []
for i in range(len(self.classes)):
res = (self.feature_probs[i] * X).sum(axis=1) + self.prior[i]
results.append(res)
results = np.column_stack(results)
return results
def predict(self, X):
"""
This function calls the predict_probs() function and translate
the probabilities in actual labels returning the label corresponding
to the highest probability
"""
X = np.array(X)
results = self.predict_probs(X)
return self.classes[np.argmax(results, axis=1)]
if __name__ == "__main__":
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
data = load_iris()
X = data["data"]
y = data["target"]
splitted = train_test_split(X, y, test_size=0.33, random_state=42)
X_train, X_test, y_train, y_test = splitted
nb = MultinomialNaiveBayes()
nb.fit(X_train, y_train)
y_pred = nb.predict(X_test)
print(confusion_matrix(y_test, y_pred))
|
{"hexsha": "36fd7ce57023605447571784303b59a407bbc7b4", "size": 3012, "ext": "py", "lang": "Python", "max_stars_repo_path": "MNB.py", "max_stars_repo_name": "sebag90/spam_filter", "max_stars_repo_head_hexsha": "455309c02b6fa921b58111754a7fd2f3dd13271f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MNB.py", "max_issues_repo_name": "sebag90/spam_filter", "max_issues_repo_head_hexsha": "455309c02b6fa921b58111754a7fd2f3dd13271f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MNB.py", "max_forks_repo_name": "sebag90/spam_filter", "max_forks_repo_head_hexsha": "455309c02b6fa921b58111754a7fd2f3dd13271f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4242424242, "max_line_length": 76, "alphanum_fraction": 0.5949535193, "include": true, "reason": "import numpy", "num_tokens": 670}
|
import numpy as np
# Write a function that takes as input a list of numbers, and returns
# the list of values given by the softmax function.
def softmax(L):
soft_max = []
denom = np.sum(np.exp(L))
for i in range(len(L)):
soft_max.append(np.exp(L[i])/denom)
return soft_max
|
{"hexsha": "3ff8273be79ed673c11af379da98afd726453d62", "size": 298, "ext": "py", "lang": "Python", "max_stars_repo_path": "2_15_softmax.py", "max_stars_repo_name": "thejammerr/Deep-Learning-with-PyTorch", "max_stars_repo_head_hexsha": "393f7cee870ddb25b2e82457f102c3cc8fb9ef42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2_15_softmax.py", "max_issues_repo_name": "thejammerr/Deep-Learning-with-PyTorch", "max_issues_repo_head_hexsha": "393f7cee870ddb25b2e82457f102c3cc8fb9ef42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2_15_softmax.py", "max_forks_repo_name": "thejammerr/Deep-Learning-with-PyTorch", "max_forks_repo_head_hexsha": "393f7cee870ddb25b2e82457f102c3cc8fb9ef42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0909090909, "max_line_length": 69, "alphanum_fraction": 0.6677852349, "include": true, "reason": "import numpy", "num_tokens": 75}
|
fir_filter_sim/dspba_library_package.vhd
fir_filter_sim/dspba_library.vhd
fir_filter_sim/auk_dspip_math_pkg_hpfir.vhd
fir_filter_sim/auk_dspip_lib_pkg_hpfir.vhd
fir_filter_sim/auk_dspip_avalon_streaming_controller_hpfir.vhd
fir_filter_sim/auk_dspip_avalon_streaming_sink_hpfir.vhd
fir_filter_sim/auk_dspip_avalon_streaming_source_hpfir.vhd
fir_filter_sim/auk_dspip_roundsat_hpfir.vhd
fir_filter_sim/altera_avalon_sc_fifo.v
fir_filter_sim/fir_filter_rtl_core.vhd
fir_filter_sim/fir_filter_ast.vhd
fir_filter_sim/fir_filter.vhd
fir_filter_sim/fir_filter_nativelink.tcl
fir_filter_sim/fir_filter_msim.tcl
fir_filter_sim/fir_filter_tb.vhd
fir_filter_sim/fir_filter_mlab.m
fir_filter_sim/fir_filter_model.m
fir_filter_sim/fir_filter_coef_int.txt
fir_filter_sim/fir_filter_input.txt
fir_filter_sim/fir_filter_param.txt
|
{"hexsha": "785f4065feb8dc2a1ec575b9582e41a6404ee80f", "size": 813, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "de1soc/hw/ip/source/fir_filter_ip/fir_filter_sim.f", "max_stars_repo_name": "Seba-P/signal-recon-project", "max_stars_repo_head_hexsha": "73951b32bff79ab65af3a909ded933b0deb1d52e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "de1soc/hw/ip/source/fir_filter_ip/fir_filter_sim.f", "max_issues_repo_name": "Seba-P/signal-recon-project", "max_issues_repo_head_hexsha": "73951b32bff79ab65af3a909ded933b0deb1d52e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "de1soc/hw/ip/source/fir_filter_ip/fir_filter_sim.f", "max_forks_repo_name": "Seba-P/signal-recon-project", "max_forks_repo_head_hexsha": "73951b32bff79ab65af3a909ded933b0deb1d52e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7142857143, "max_line_length": 62, "alphanum_fraction": 0.926199262, "num_tokens": 260}
|
import networkx as nx
import numpy as np
from HE2_ABC import Root
import HE2_tools as tools
from HE2_Solver import HE2_Solver
def evalute_network_fluids_wo_root(_G, x_dict):
assert not (Root in _G.nodes)
for u, v in _G.edges:
assert x_dict[(u, v)] >= 0
G = nx.DiGraph(_G)
for (u, v) in x_dict:
if x_dict[(u, v)] == 0:
G.remove_edge(u, v)
for node in _G.nodes:
if len(G[node]) == 0:
G.remove_node(node)
nodes = list(G.nodes)
edges = list(G.edges)
EN = len(edges)
N = len(nodes)
x = np.array([x_dict[e] for e in edges])
A = -1 * nx.incidence_matrix(G, nodelist=nodes, edgelist=edges, oriented=True).toarray()
Q = np.matmul(A, x)
var_idx = {e:i for i, e in enumerate(edges)}
sinks = [n for i, n in enumerate(nodes) if Q[i] < 0]
var_idx.update({n:i+EN for i, n in enumerate(sinks)})
M = len(var_idx)
mx1 = np.zeros((N, M)) # the first partition of matrix is for 1stCL
mx2 = np.zeros((M-N, M)) # the second partition is to dictate condition: all fluids leaving one node have to be the same
i2 = 0
for i, node in enumerate(nodes):
these_vars_are_the_same = []
if Q[i] < 0: # n is sink, so there is an unknown (equation variable) for outbound flow
j = var_idx[node] # get the unknown index in matrix
mx1[i, j] = Q[i]
these_vars_are_the_same += [j] # remember the unknown for a while
for u, v in G.in_edges(node): # inlet flows
j = var_idx[(u, v)]
mx1[i, j] = x_dict[(u, v)]
for u, v in G.out_edges(node): # all the outlet flows are the same
j = var_idx[(u, v)]
mx1[i, j] = -x_dict[(u, v)]
these_vars_are_the_same += [j] # so we have to remmeber this unknown too
if Q[i] > 0:
mx1[i] /= Q[i] # cause we need only ones and zeros on the right side
if len(these_vars_are_the_same) > 1: # if there are more than one remembered outlet flow
for fl1, fl2 in zip(these_vars_are_the_same[1:], these_vars_are_the_same[:-1]):
mx2[i2, fl1] = 1 # we have to right an equation to matrix, to describe that these outlet fluids are equal
mx2[i2, fl2] = -1
i2 += 1
mx = np.append(mx1, mx2, axis=0)
assert mx.shape == (M, M)
mx_inv = np.linalg.inv(mx)
rez_mx = np.zeros((M, N))
srcs = []
S = 0
for i, n in enumerate(nodes):
if Q[i] > 0:
rez_mx[:, S] = mx_inv[:,i]
srcs += [n]
S += 1
rez = {}
for k, i in var_idx.items():
rez[k] = rez_mx[i, :S]
np.testing.assert_almost_equal(abs(sum(rez[k])), 1)
return rez, srcs
if __name__ == '__main__':
G0, nodes = tools.generate_random_net_v0(N=7, E=9, SNK=2, randseed=424242)
solver = HE2_Solver(G0)
solver.solve()
x_dict = dict()
G = nx.DiGraph()
G.add_nodes_from(G0)
for u, v in G0.edges:
x = G0[u][v]['obj'].result['x']
if x < 0:
x_dict[(v,u)] = -x
G.add_edge(v, u)
else:
x_dict[(u, v)] = x
G.add_edge(u, v)
rez = evalute_network_fluids_wo_root(G, x_dict)
print(rez)
|
{"hexsha": "11fad87f99192d91da6650adaa0c9c057084de53", "size": 3250, "ext": "py", "lang": "Python", "max_stars_repo_path": "KirchhoffSolver/code/HE2_MixFluids.py", "max_stars_repo_name": "khabibullinra/unifloc", "max_stars_repo_head_hexsha": "7338c12788e3f3340bf8d1cb1db15d0471b62434", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-02-05T20:02:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-24T13:18:31.000Z", "max_issues_repo_path": "KirchhoffSolver/code/HE2_MixFluids.py", "max_issues_repo_name": "unifloc/unifloc_py", "max_issues_repo_head_hexsha": "7338c12788e3f3340bf8d1cb1db15d0471b62434", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2017-09-29T15:14:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-02T07:17:22.000Z", "max_forks_repo_path": "KirchhoffSolver/code/HE2_MixFluids.py", "max_forks_repo_name": "unifloc/unifloc", "max_forks_repo_head_hexsha": "7338c12788e3f3340bf8d1cb1db15d0471b62434", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-05-31T16:14:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-31T06:24:29.000Z", "avg_line_length": 32.8282828283, "max_line_length": 124, "alphanum_fraction": 0.5618461538, "include": true, "reason": "import numpy,import networkx", "num_tokens": 997}
|
import numpy
import sympy
def max_coeff(poly):
""" Computes the maximum width of a polynomial's coefficients.
Arguments:
poly -- the rational polynomial to analyze
"""
dom = poly.domain
if isinstance(dom, sympy.polys.domains.IntegerRing):
# Finds maximal element.
return numpy.max(poly.all_coeffs())
elif isinstance(dom, sympy.polys.domains.FiniteField):
# Finds maximal element modulo the field order (handles negatives).
return numpy.max(numpy.mod(poly.all_coeffs(), dom.characteristic()))
elif isinstance(dom, sympy.polys.domains.RationalField):
# Considers both numerator and denominator (coeff=p/q).
max_coeff = 0
for coeff in poly.all_coeffs():
max_coeff = max(max_coeff, abs(coeff.p), coeff.q)
return max_coeff
def count_zeros(poly):
""" Computes the number of zero coefficients in a polynomial.
Arguments:
poly -- the rational polynomial to analyze
"""
dom = poly.domain
if isinstance(dom, sympy.polys.domains.IntegerRing) or isinstance(dom, sympy.polys.domains.FiniteField):
# Finds all zero elements.
return len(poly.all_coeffs()) - numpy.count_nonzero(poly.all_coeffs())
elif isinstance(dom, sympy.polys.domains.RationalField):
# Considers both numerator and denominator (coeff=p/q).
zeros = 0
for coeff in poly.all_coeffs():
if coeff.p == 0:
zeros = zeros + 1
return zeros
|
{"hexsha": "d24cf8edc6290bf43395888943f449d1dd8620f8", "size": 1502, "ext": "py", "lang": "Python", "max_stars_repo_path": "symbaudio/utils/poly.py", "max_stars_repo_name": "ScottWe/symbaudio", "max_stars_repo_head_hexsha": "4797322acf340553cfd71ac3d63d62254735320e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "symbaudio/utils/poly.py", "max_issues_repo_name": "ScottWe/symbaudio", "max_issues_repo_head_hexsha": "4797322acf340553cfd71ac3d63d62254735320e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "symbaudio/utils/poly.py", "max_forks_repo_name": "ScottWe/symbaudio", "max_forks_repo_head_hexsha": "4797322acf340553cfd71ac3d63d62254735320e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7619047619, "max_line_length": 108, "alphanum_fraction": 0.6617842876, "include": true, "reason": "import numpy,import sympy", "num_tokens": 327}
|
[STATEMENT]
lemma nonpos_Ints_cases:
assumes "x \<in> \<int>\<^sub>\<le>\<^sub>0"
obtains n where "x = of_int n" "n \<le> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>n. \<lbrakk>x = of_int n; n \<le> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<in> \<int>\<^sub>\<le>\<^sub>0
goal (1 subgoal):
1. (\<And>n. \<lbrakk>x = of_int n; n \<le> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding nonpos_Ints_def
[PROOF STATE]
proof (prove)
using this:
x \<in> {of_int n |n. n \<le> 0}
goal (1 subgoal):
1. (\<And>n. \<lbrakk>x = of_int n; n \<le> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto elim!: Ints_cases)
|
{"llama_tokens": 340, "file": null, "length": 3}
|
type BernoulliArm <: BanditArm
p::Float64
end
function draw(arm::BernoulliArm)
if rand() > arm.p
0
else
1
end
end
|
{"hexsha": "af01a24880417b589f18dc9ae45c892919b4abf7", "size": 131, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/arms/bernoulli.jl", "max_stars_repo_name": "rmminusrslash/BanditsBook", "max_stars_repo_head_hexsha": "b477b5a51c719f3725e0c0f05cb0ff7267bfc2f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 713, "max_stars_repo_stars_event_min_datetime": "2015-01-03T03:50:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T11:11:44.000Z", "max_issues_repo_path": "julia/arms/bernoulli.jl", "max_issues_repo_name": "rmminusrslash/BanditsBook", "max_issues_repo_head_hexsha": "b477b5a51c719f3725e0c0f05cb0ff7267bfc2f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2016-01-07T18:58:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T14:27:06.000Z", "max_forks_repo_path": "julia/arms/bernoulli.jl", "max_forks_repo_name": "rmminusrslash/BanditsBook", "max_forks_repo_head_hexsha": "b477b5a51c719f3725e0c0f05cb0ff7267bfc2f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 226, "max_forks_repo_forks_event_min_datetime": "2015-01-02T11:27:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T04:09:43.000Z", "avg_line_length": 10.9166666667, "max_line_length": 32, "alphanum_fraction": 0.6488549618, "num_tokens": 52}
|
from configuration import configuration
from transformers import BertForMaskedLM, BertTokenizer, AutoConfig
import numpy as np
import torch
import argparse
from bias_utils import collate, how_many_tokens, find_mask_token
import pandas as pd
from model import Aligned_BERT
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--language', type=str, required=False, default='ko')
parser.add_argument('--custom_model_path', type=str, default=None)
parser.add_argument('--num', default=0)
args = parser.parse_args()
use_pretrained = True if args.custom_model_path is None else False
nationality = configuration[args.language]['nationality']
bert_model = configuration[args.language]['bert_model']
template_path = configuration[args.language]['template_path']
occ_path = configuration[args.language]['occ_path']
MSK = configuration[args.language]['MSK']
en_nationality = configuration['en']['nationality']
tokenizer = BertTokenizer.from_pretrained(bert_model)
MSK = tokenizer.mask_token_id
config = AutoConfig.from_pretrained(bert_model)
device = torch.device("cuda:"+str(args.num))
if args.custom_model_path:
print("Model Loading!")
model = torch.load(args.custom_model_path, map_location=device)
else:
print("Using pretrained model!")
model = BertForMaskedLM.from_pretrained(bert_model)
if args.language == 'en':
from pattern3.en import pluralize, singularize
elif args.language == 'de':
from pattern3.de import pluralize, singularize
elif args.language == 'es':
from pattern3.es import pluralize, singularize
else:
pass
model.eval()
model.to(device)
# Occupation Loading
with open(occ_path, 'r') as f:
tt = f.readlines()
occ = []
for i in range(len(tt)):
occ.append(tt[i].rstrip())
print("Occupations loading complete!")
# Loading Templates
with open(template_path, 'r') as f:
tt = f.readlines()
templates = []
for i in range(len(tt)):
templates.append(tt[i].rstrip())
print("Templates loading complete!")
def log_probability_for_single_sentence(model, tokenizer,
template, attr, nation_dict, last=False, use_pretrained=False):
col_dict = collate(en_nationality, nationality)
vocab = tokenizer.get_vocab()
softmax = torch.nn.Softmax()
results = []
attribute_num = len(tokenizer.tokenize(attr))
for number in nation_dict.keys():
nations = nation_dict[number]
how_many = int(number)
target_mask = ' '.join(['[MASK]' for _ in range(how_many)])
attribute_mask = ' '.join(['[MASK]' for _ in range(attribute_num)])
if '[AAA]' in template:
sentence = template.replace('[TTT]', target_mask).replace('[AAA]', attr)
prior_sentence = template.replace('[TTT]', target_mask).replace('[AAA]', attribute_mask)
else:
sentence = template.replace('[TTT]', target_mask).replace('[AAAs]', pluralize(attr))
prior_sentence = template.replace('[TTT]', target_mask).replace('[AAAs]', attribute_mask)
input_ids = tokenizer(sentence, return_tensors='pt').to(device)
if not use_pretrained:
target_prob = model(**input_ids).to(device)
else:
target_prob = model(**input_ids)[0].to(device)
prior_input_ids = tokenizer(prior_sentence, return_tensors='pt').to(device)
if not use_pretrained:
prior_prob = model(**prior_input_ids).to(device)
else:
prior_prob = model(**prior_input_ids)[0].to(device)
masked_tokens = find_mask_token(tokenizer, sentence, how_many, MSK)
masked_tokens_prior = find_mask_token(tokenizer, prior_sentence, how_many, MSK, last)
logits = []
prior_logits = []
for mask in masked_tokens:
logits.append(softmax(target_prob[0][mask]).detach())
for mask in masked_tokens_prior:
prior_logits.append(softmax(prior_prob[0][mask]).detach())
for nat in nations:
ddf = [col_dict[nat]]
nat_logit = 1.0
nat_prior_logit = 1.0
for token in tokenizer.tokenize(nat):
for logit in logits:
nat_logit *= float(logit[vocab[token]].item())
for prior_logit in prior_logits:
nat_prior_logit *= float(prior_logit[vocab[token]].item())
ddf.append(np.log(float(nat_logit / nat_prior_logit)))
results.append(np.array(ddf))
return pd.DataFrame(results, columns=['nationality', 'normalized_prob'], dtype=(float)).sort_values(
"normalized_prob", ascending=False)
def log_probability_for_single_sentence_multiple_attr(model, tokenizer,
template, occ, nation_dict, use_pretrained=False):
last = False
if template.find('[TTT]') > template.find('[AAA]') and template.find('[TTT]') > template.find('[AAAs]'):
last = True
mean_scores = []
var_scores = []
std_scores = []
for attr in occ:
ret_df = log_probability_for_single_sentence(model, tokenizer,
template, attr, nation_dict, last, use_pretrained)
mean_scores.append(ret_df['normalized_prob'].mean())
var_scores.append(ret_df['normalized_prob'].var())
std_scores.append(ret_df['normalized_prob'].std())
mean_scores = np.array(mean_scores)
var_scores = np.array(var_scores)
std_scores = np.array(std_scores)
return mean_scores, var_scores, std_scores
def log_probability_for_multiple_sentence(model, tokenizer, templates, occ, use_pretrained=False):
nation_dict = how_many_tokens(nationality, tokenizer)
total_mean = []
total_var = []
total_std = []
for template in tqdm(templates):
m, v, s = log_probability_for_single_sentence_multiple_attr(model, tokenizer,
template, occ, nation_dict, use_pretrained)
total_mean.append(m.mean())
total_var.append(v.mean())
total_std.append(s.mean())
return total_mean, total_var, total_std
total_mean, total_var, total_std = log_probability_for_multiple_sentence(model, tokenizer, templates, occ, use_pretrained=use_pretrained)
if use_pretrained:
print("CB score of {} in {} : {}".format(bert_model, args.language, np.array(total_var).mean()))
else:
print("CB score of {} (from weights {}) in {}: {}".format(bert_model, args.custom_model_path, args.language, np.array(total_var).mean()))
|
{"hexsha": "0d85fdee1656b7adba1d101d4f043dea5b6188b1", "size": 6574, "ext": "py", "lang": "Python", "max_stars_repo_path": "score.py", "max_stars_repo_name": "jaimeenahn/ethnic_bias", "max_stars_repo_head_hexsha": "a115eb7c3af7daf95309f3b4c1c7a564eb556190", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-09-03T08:17:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T05:53:43.000Z", "max_issues_repo_path": "score.py", "max_issues_repo_name": "jaimeenahn/ethnic_bias", "max_issues_repo_head_hexsha": "a115eb7c3af7daf95309f3b4c1c7a564eb556190", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-21T10:28:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T10:28:59.000Z", "max_forks_repo_path": "score.py", "max_forks_repo_name": "jaimeenahn/ethnic_bias", "max_forks_repo_head_hexsha": "a115eb7c3af7daf95309f3b4c1c7a564eb556190", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4188481675, "max_line_length": 141, "alphanum_fraction": 0.6659567995, "include": true, "reason": "import numpy", "num_tokens": 1416}
|
"""
Utilities for working with rating matrices.
"""
from collections import namedtuple
import logging
import warnings
import pandas as pd
import numpy as np
import scipy.sparse as sps
import numba as n
from numba import njit, jitclass, prange
from .util.array import swap
_logger = logging.getLogger(__name__)
RatingMatrix = namedtuple('RatingMatrix', ['matrix', 'users', 'items'])
RatingMatrix.__doc__ = """
A rating matrix with associated indices.
Attributes:
matrix(CSR or scipy.sparse.csr_matrix):
The rating matrix, with users on rows and items on columns.
users(pandas.Index): mapping from user IDs to row numbers.
items(pandas.Index): mapping from item IDs to column numbers.
"""
def mkl_ops():
"""
Import and return the MKL operations module. This is only for internal use.
"""
try:
from . import _mkl_ops
if _mkl_ops.clib:
return _mkl_ops
else:
return None
except ImportError:
return None
def _csr_delegate(name):
def func(self):
return getattr(self.N, name)
return property(func)
class _CSR:
"""
Internal implementation class for :py:class:`CSR`. If you work with CSRs from Numba,
you will use this.
Note that the ``values`` array is always present (unlike the Python shim), but is
zero-length if no values are present. This eases Numba type-checking.
"""
def __init__(self, nrows, ncols, nnz, ptrs, inds, vals):
self.nrows = nrows
self.ncols = ncols
self.nnz = nnz
self.rowptrs = ptrs
self.colinds = inds
if vals is not None:
self.values = vals
else:
self.values = np.zeros(0)
def subset_rows(self, begin, end):
st = self.rowptrs[begin]
ed = self.rowptrs[end]
rps = self.rowptrs[begin:(end+1)] - st
cis = self.colinds[st:ed]
if self.values.size == 0:
vs = self.values
else:
vs = self.values[st:ed]
return _CSR(end - begin, self.ncols, ed - st, rps, cis, vs)
def row(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row + 1]
v = np.zeros(self.ncols)
cols = self.colinds[sp:ep]
if self.values.size == 0:
v[cols] = 1
else:
v[cols] = self.values[sp:ep]
return v
def row_extent(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row+1]
return (sp, ep)
def row_cs(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row + 1]
return self.colinds[sp:ep]
def row_vs(self, row):
sp = self.rowptrs[row]
ep = self.rowptrs[row + 1]
if self.values.size == 0:
return np.full(ep - sp, 1.0)
else:
return self.values[sp:ep]
def rowinds(self):
ris = np.zeros(self.nnz, np.intc)
for i in range(self.nrows):
sp, ep = self.row_extent(i)
ris[sp:ep] = i
return ris
_CSR64 = type('_CSR64', _CSR.__bases__, dict(_CSR.__dict__))
_CSR = jitclass({
'nrows': n.intc,
'ncols': n.intc,
'nnz': n.intc,
'rowptrs': n.intc[::1],
'colinds': n.intc[::1],
'values': n.float64[::1]
})(_CSR)
_CSR64 = jitclass({
'nrows': n.intc,
'ncols': n.intc,
'nnz': n.int64,
'rowptrs': n.int64[::1],
'colinds': n.intc[::1],
'values': n.float64[::1]
})(_CSR64)
class CSR:
"""
Simple compressed sparse row matrix. This is like :py:class:`scipy.sparse.csr_matrix`, with
a couple of useful differences:
* It is backed by a Numba jitclass, so it can be directly used from Numba-optimized functions.
* The value array is optional, for cases in which only the matrix structure is required.
* The value array, if present, is always double-precision.
You generally don't want to create this class yourself with the constructor. Instead, use one
of its class methods.
If you need to pass an instance off to a Numba-compiled function, use :py:attr:`N`::
_some_numba_fun(csr.N)
We use the indirection between this and the Numba jitclass so that the main CSR implementation
can be pickled, and so that we can have class and instance methods that are not compatible with
jitclass but which are useful from interpreted code.
Attributes:
N(_CSR): the Numba jitclass backing (has the same attributes and most methods).
nrows(int): the number of rows.
ncols(int): the number of columns.
nnz(int): the number of entries.
rowptrs(numpy.ndarray): the row pointers.
colinds(numpy.ndarray): the column indices.
values(numpy.ndarray): the values
"""
__slots__ = ['N']
def __init__(self, nrows=None, ncols=None, nnz=None, ptrs=None, inds=None, vals=None, N=None):
if N is not None:
self.N = N
else:
if ptrs.dtype == np.int64:
self.N = _CSR64(nrows, ncols, nnz, ptrs, inds, vals)
else:
self.N = _CSR(nrows, ncols, nnz, ptrs, inds, vals)
@classmethod
def empty(cls, shape, row_nnzs, *, rpdtype=np.intc):
"""
Create an empty CSR matrix.
Args:
shape(tuple): the array shape (rows,cols)
row_nnzs(array-like): the number of nonzero entries for each row
"""
nrows, ncols = shape
assert len(row_nnzs) == nrows
nnz = np.sum(row_nnzs, dtype=np.int64)
rowptrs = np.zeros(nrows + 1, dtype=rpdtype)
rowptrs[1:] = np.cumsum(row_nnzs, dtype=rpdtype)
colinds = np.full(nnz, -1, dtype=np.intc)
values = np.full(nnz, np.nan)
return cls(nrows, ncols, nnz, rowptrs, colinds, values)
@classmethod
def from_coo(cls, rows, cols, vals, shape=None, rpdtype=np.intc):
"""
Create a CSR matrix from data in COO format.
Args:
rows(array-like): the row indices.
cols(array-like): the column indices.
vals(array-like): the data values; can be ``None``.
shape(tuple): the array shape, or ``None`` to infer from row & column indices.
"""
if shape is not None:
nrows, ncols = shape
assert np.max(rows) < nrows
assert np.max(cols) < ncols
else:
nrows = np.max(rows) + 1
ncols = np.max(cols) + 1
nnz = len(rows)
assert len(cols) == nnz
assert vals is None or len(vals) == nnz
rowptrs = np.zeros(nrows + 1, dtype=rpdtype)
align = np.full(nnz, -1, dtype=rpdtype)
_csr_align(rows, nrows, rowptrs, align)
cols = cols[align].copy()
vals = vals[align].copy() if vals is not None else None
return cls(nrows, ncols, nnz, rowptrs, cols, vals)
@classmethod
def from_scipy(cls, mat, copy=True):
"""
Convert a scipy sparse matrix to an internal CSR.
Args:
mat(scipy.sparse.spmatrix): a SciPy sparse matrix.
copy(bool): if ``False``, reuse the SciPy storage if possible.
Returns:
CSR: a CSR matrix.
"""
if not sps.isspmatrix_csr(mat):
mat = mat.tocsr(copy=copy)
rp = np.require(mat.indptr, np.intc, 'C')
if copy and rp is mat.indptr:
rp = rp.copy()
cs = np.require(mat.indices, np.intc, 'C')
if copy and cs is mat.indices:
cs = cs.copy()
vs = mat.data.copy() if copy else mat.data
return cls(mat.shape[0], mat.shape[1], mat.nnz, rp, cs, vs)
def to_scipy(self):
"""
Convert a CSR matrix to a SciPy :py:class:`scipy.sparse.csr_matrix`. Avoids copying
if possible.
Args:
self(CSR): A CSR matrix.
Returns:
scipy.sparse.csr_matrix:
A SciPy sparse matrix with the same data.
"""
values = self.values
if values is None:
values = np.full(self.nnz, 1.0)
return sps.csr_matrix((values, self.colinds, self.rowptrs), shape=(self.nrows, self.ncols))
nrows = _csr_delegate('nrows')
ncols = _csr_delegate('ncols')
nnz = _csr_delegate('nnz')
rowptrs = _csr_delegate('rowptrs')
colinds = _csr_delegate('colinds')
@property
def values(self):
if self.N.values.size:
return self.N.values
else:
return None
@values.setter
def values(self, vs: np.ndarray):
if vs is not None:
if not isinstance(vs, np.ndarray):
raise TypeError('values not an ndarray')
if vs.ndim != 1:
raise ValueError('values has {} dimensions, expected 1'.format(vs.ndims))
if vs.shape[0] < self.nnz:
s = 'values has only {} entries (expected at least {})'
raise ValueError(s.format(vs.shape[0], self.nnz))
vs = vs[:self.nnz]
vs = np.require(vs, 'f8')
self.N.values = vs
else:
self.N.values = np.zeros(0)
def subset_rows(self, begin, end):
"""
Subset the rows in this matrix.
"""
return CSR(N=self.N.subset_rows(begin, end))
def rowinds(self) -> np.ndarray:
"""
Get the row indices from this array. Combined with :py:attr:`colinds` and
:py:attr:`values`, this can form a COO-format sparse matrix.
.. note:: This method is not available from Numba.
"""
return self.N.rowinds()
def row(self, row):
"""
Return a row of this matrix as a dense ndarray.
Args:
row(int): the row index.
Returns:
numpy.ndarray: the row, with 0s in the place of missing values.
"""
return self.N.row(row)
def row_extent(self, row):
"""
Get the extent of a row in the underlying column index and value arrays.
Args:
row(int): the row index.
Returns:
tuple: ``(s, e)``, where the row occupies positions :math:`[s, e)` in the
CSR data.
"""
return self.N.row_extent(row)
def row_cs(self, row):
"""
Get the column indcies for the stored values of a row.
"""
return self.N.row_cs(row)
def row_vs(self, row):
"""
Get the stored values of a row.
"""
return self.N.row_vs(row)
def row_nnzs(self):
"""
Get a vector of the number of nonzero entries in each row.
.. note:: This method is not available from Numba.
Returns:
numpy.ndarray: the number of nonzero entries in each row.
"""
return np.diff(self.rowptrs)
def sort_values(self):
"""
Sort CSR rows in nonincreasing order by value.
.. note:: This method is not available from Numba.
"""
_csr_sort(self.nrows, self.rowptrs, self.colinds, self.values)
def normalize_rows(self, normalization):
"""
Normalize the rows of the matrix.
.. note:: The normalization *ignores* missing values instead of treating
them as 0.
.. note:: This method is not available from Numba.
Args:
normalization(str):
The normalization to perform. Can be one of:
* ``'center'`` - center rows about the mean
* ``'unit'`` - convert rows to a unit vector
Returns:
numpy.ndarray:
The normalization values for each row.
"""
if normalization == 'center':
return _center_rows(self.N)
elif normalization == 'unit':
return _unit_rows(self.N)
else:
raise ValueError('unknown normalization: ' + normalization)
def transpose(self, values=True):
"""
Transpose a CSR matrix.
.. note:: This method is not available from Numba.
Args:
values(bool): whether to include the values in the transpose.
Returns:
CSR: the transpose of this matrix (or, equivalently, this matrix in CSC format).
"""
n_rows = self.rowinds()
rows = self.colinds
n_vs = self.values if values else None
if n_vs is not None:
n_vs = n_vs.copy()
rowptrs = _csr_align_inplace((self.ncols, self.nrows), rows, n_rows, n_vs)
if self.rowptrs.dtype == np.int32:
rowptrs = rowptrs.astype(np.int32)
return CSR(self.ncols, self.nrows, self.nnz, rowptrs, n_rows, n_vs)
def filter_nnzs(self, filt):
"""
Filter the values along the full NNZ axis.
Args:
filt(ndarray):
a logical array of length :attr:`nnz` that indicates the values to keep.
Returns:
CSR: The filtered sparse matrix.
"""
if len(filt) != self.nnz:
raise ValueError('filter has length %d, expected %d' % (len(filt), self.nnz))
rps2 = np.zeros_like(self.rowptrs)
for i in range(self.nrows):
sp, ep = self.row_extent(i)
rlen = np.sum(filt[sp:ep])
rps2[i+1] = rps2[i] + rlen
nnz2 = rps2[-1]
assert nnz2 == np.sum(filt)
cis2 = self.colinds[filt]
vs = self.values
vs2 = None if vs is None else vs[filt]
return CSR(self.nrows, self.ncols, nnz2, rps2, cis2, vs2)
def __repr__(self):
return '<CSR {}x{} ({} nnz)>'.format(self.nrows, self.ncols, self.nnz)
def __getstate__(self):
return dict(shape=(self.nrows, self.ncols), nnz=self.nnz,
rowptrs=self.rowptrs, colinds=self.colinds, values=self.values)
def __setstate__(self, state):
nrows, ncols = state['shape']
nnz = state['nnz']
rps = state['rowptrs']
cis = state['colinds']
vs = state['values']
if rps.dtype == np.int64:
self.N = _CSR64(nrows, ncols, nnz, rps, cis, vs)
else:
self.N = _CSR(nrows, ncols, nnz, rps, cis, vs)
@njit(n.void(n.intc, n.intc[:], n.intc[:], n.double[:]),
parallel=True, nogil=True)
def _csr_sort(nrows, rowptrs, colinds, values):
assert len(rowptrs) > nrows
for i in prange(nrows):
sp = rowptrs[i]
ep = rowptrs[i+1]
if ep > sp:
ord = np.argsort(values[sp:ep])
ord = ord[::-1]
colinds[sp:ep] = colinds[sp + ord]
values[sp:ep] = values[sp + ord]
@njit(nogil=True)
def _center_rows(csr: _CSR):
means = np.zeros(csr.nrows)
for i in range(csr.nrows):
sp, ep = csr.row_extent(i)
if sp == ep:
continue # empty row
vs = csr.row_vs(i)
m = np.mean(vs)
means[i] = m
csr.values[sp:ep] -= m
return means
@njit(nogil=True)
def _unit_rows(csr: _CSR):
norms = np.zeros(csr.nrows)
for i in range(csr.nrows):
sp, ep = csr.row_extent(i)
if sp == ep:
continue # empty row
vs = csr.row_vs(i)
m = np.linalg.norm(vs)
norms[i] = m
csr.values[sp:ep] /= m
return norms
@njit(nogil=True)
def _csr_align(rowinds, nrows, rowptrs, align):
rcts = np.zeros(nrows, dtype=rowptrs.dtype)
for r in rowinds:
rcts[r] += 1
rowptrs[1:] = np.cumsum(rcts)
rpos = rowptrs[:-1].copy()
for i in range(len(rowinds)):
row = rowinds[i]
pos = rpos[row]
align[pos] = i
rpos[row] += 1
@njit(nogil=True)
def _csr_align_inplace(shape, rows, cols, vals):
"""
Align COO data in-place for a CSR matrix.
Args:
shape: the matrix shape
rows: the matrix row indices (not modified)
cols: the matrix column indices (**modified**)
vals: the matrix values (**modified**)
Returns:
the CSR row pointers
"""
nrows, ncols = shape
nnz = len(rows)
rps = np.zeros(nrows + 1, np.int64)
for i in range(nnz):
rps[rows[i] + 1] += 1
for i in range(nrows):
rps[i+1] += rps[i]
rci = rps[:nrows].copy()
pos = 0
row = 0
rend = rps[1]
while pos < nnz:
r = rows[pos]
# swap until we have something in place
while r != row:
tgt = rci[r]
# swap with the target position
swap(cols, pos, tgt)
if vals is not None:
swap(vals, pos, tgt)
# update the target start pointer
rci[r] += 1
# update the loop check
r = rows[tgt]
# now the current entry in the arrays is good
# we need to advance to the next entry
if pos < rend: # keep going in this row
pos += 1
rci[row] += 1
# skip finished rows
while pos == rend and pos < nnz:
row += 1
pos = rci[row]
rend = rps[row+1]
return rps
@njit
def _empty_csr(nrows, ncols, sizes):
nnz = np.sum(sizes)
rowptrs = np.zeros(nrows + 1, dtype=np.intc)
for i in range(nrows):
rowptrs[i+1] = rowptrs[i] + sizes[i]
colinds = np.full(nnz, -1, dtype=np.intc)
values = np.full(nnz, np.nan)
return _CSR(nrows, ncols, nnz, rowptrs, colinds, values)
def sparse_ratings(ratings, scipy=False):
"""
Convert a rating table to a sparse matrix of ratings.
Args:
ratings(pandas.DataFrame): a data table of (user, item, rating) triples.
scipy: if ``True``, return a SciPy matrix instead of :py:class:`CSR`.
Returns:
RatingMatrix:
a named tuple containing the sparse matrix, user index, and item index.
"""
uidx = pd.Index(ratings.user.unique(), name='user')
iidx = pd.Index(ratings.item.unique(), name='item')
_logger.debug('creating matrix with %d ratings for %d items by %d users',
len(ratings), len(iidx), len(uidx))
row_ind = uidx.get_indexer(ratings.user).astype(np.intc)
col_ind = iidx.get_indexer(ratings.item).astype(np.intc)
if 'rating' in ratings.columns:
vals = np.require(ratings.rating.values, np.float64)
else:
vals = None
matrix = CSR.from_coo(row_ind, col_ind, vals, (len(uidx), len(iidx)))
if scipy:
matrix = matrix.to_scipy()
return RatingMatrix(matrix, uidx, iidx)
|
{"hexsha": "0d2e1649a09b33ea74b9c1962e04b235d1f65cb6", "size": 18437, "ext": "py", "lang": "Python", "max_stars_repo_path": "lenskit/matrix.py", "max_stars_repo_name": "teej/lkpy", "max_stars_repo_head_hexsha": "06929c652319e5dede8d045a308a9721f03ab8a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lenskit/matrix.py", "max_issues_repo_name": "teej/lkpy", "max_issues_repo_head_hexsha": "06929c652319e5dede8d045a308a9721f03ab8a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lenskit/matrix.py", "max_forks_repo_name": "teej/lkpy", "max_forks_repo_head_hexsha": "06929c652319e5dede8d045a308a9721f03ab8a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8981191223, "max_line_length": 99, "alphanum_fraction": 0.5667950317, "include": true, "reason": "import numpy,import scipy,import numba,from numba", "num_tokens": 4816}
|
[STATEMENT]
lemma primepow_even_imp_primepow:
assumes "primepow_even n"
shows "primepow n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. primepow n
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. primepow n
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
primepow_even n
[PROOF STEP]
obtain p k where "1 \<le> k" "prime p" "n = p ^ (2 * k)"
[PROOF STATE]
proof (prove)
using this:
primepow_even n
goal (1 subgoal):
1. (\<And>k p. \<lbrakk>1 \<le> k; prime p; n = p ^ (2 * k)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding primepow_even_def
[PROOF STATE]
proof (prove)
using this:
\<exists>p k. 1 \<le> k \<and> prime p \<and> n = p ^ (2 * k)
goal (1 subgoal):
1. (\<And>k p. \<lbrakk>1 \<le> k; prime p; n = p ^ (2 * k)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
1 \<le> k
prime p
n = p ^ (2 * k)
goal (1 subgoal):
1. primepow n
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
1 \<le> k
prime p
n = p ^ (2 * k)
goal (1 subgoal):
1. primepow n
[PROOF STEP]
from \<open>1 \<le> k\<close>
[PROOF STATE]
proof (chain)
picking this:
1 \<le> k
[PROOF STEP]
have "2 * k > 0"
[PROOF STATE]
proof (prove)
using this:
1 \<le> k
goal (1 subgoal):
1. 0 < 2 * k
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 < 2 * k
goal (1 subgoal):
1. primepow n
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
1 \<le> k
prime p
n = p ^ (2 * k)
0 < 2 * k
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
1 \<le> k
prime p
n = p ^ (2 * k)
0 < 2 * k
goal (1 subgoal):
1. primepow n
[PROOF STEP]
unfolding primepow_def
[PROOF STATE]
proof (prove)
using this:
1 \<le> k
prime p
n = p ^ (2 * k)
0 < 2 * k
goal (1 subgoal):
1. \<exists>p k. prime p \<and> 0 < k \<and> n = p ^ k
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
primepow n
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 914, "file": "Bertrands_Postulate_Bertrand", "length": 14}
|
import segmentation_models_pytorch as smp
import numpy as np
import torch
from torchvision import datasets, transforms
import torchvision.models as models
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, random_split
from torchvision.transforms import ToTensor
from torch.utils.data import Subset
from sklearn.model_selection import train_test_split
from torch import optim
import matplotlib.pyplot as plt
import matplotlib
import random
from tqdm import tqdm
from pathlib import Path
from data_loading import BasicDataset
from evaluate import evaluate
from dice_score import dice_loss
#def train_val_dataset(dataset, val_split=0.25):
# shuffled_dataset = random.shuffle(list(range(len(dataset))))
# train_idx, val_idx = train_test_split(shuffled_dataset, test_size=val_split)
# datasets = {}
# datasets['train'] = Subset(dataset, train_idx)
# datasets['val'] = Subset(dataset, val_idx)
# return datasets
device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')
#print(device)
val_percent = 0.2
batch_size = 10
learning_rate = 0.001
epochs = 50
save_checkpoint = True
#print(torch.cuda.device_count())
'''
seed = 0
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
'''
'''-----The First Model-----'''
model_ft1 = smp.Unet(
encoder_name="inceptionv4", #"resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
in_channels=1, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=2, # model output channels (number of classes in your dataset)
activation="softmax",
)
#print(model_ft)
ct = 0
for name, param in model_ft1.named_parameters():
ct += 1
print(str(ct) + ' ' + name)
param.requires_grad = False # Freeze these layers
#print(ct)
'''-----The Second Model-----'''
model_ft2 = smp.Unet(
encoder_name="timm-resnest101e", #"resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
in_channels=1, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=2, # model output channels (number of classes in your dataset)
activation="softmax",
)
ct = 0
for name, param in model_ft2.named_parameters():
ct += 1
print(str(ct) + ' ' + name) #Show the layers that contains parameters
param.requires_grad = False # Freeze these layers
'''-----The Third Model-----'''
model_ft3 = smp.MAnet(
encoder_name="resnet152", #"resnet34", # choose encoder, e.g. mobilenet_v2 or efficientnet-b7
encoder_weights="imagenet", # use `imagenet` pre-trained weights for encoder initialization
in_channels=1, # model input channels (1 for gray-scale images, 3 for RGB, etc.)
classes=2, # model output channels (number of classes in your dataset)
activation="softmax",
)
ct = 0
for name, param in model_ft3.named_parameters():
ct += 1
print(str(ct) + ' ' + name) #Show the layers that contains parameters
param.requires_grad = False # Freeze these layers
class EnsembledModel(nn.Module):
def __init__(self, model1, model2, model3):
super(EnsembledModel, self).__init__()
self.model1 = model1
self.model2 = model2
self.model3 = model3
self.conv0 = nn.Conv2d(3, 1, kernel_size=(3, 3), stride=(1, 1), padding='same', bias=False)
self.conv1 = nn.Conv2d(3, 1, kernel_size=(3, 3), stride=(1, 1), padding='same', bias=False)
self.conv2 = nn.Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1), padding='same', bias=False)
self.softmax = nn.Softmax(dim = 1)
def forward(self, x):
pred1 = self.model1(x)
pred2 = self.model2(x)
pred3 = self.model3(x)
#pred = (pred1 + pred2 + pred3) / 3
pred_0 = torch.cat((pred1[:, 0:1, :, :], pred2[:, 0:1, :, :], pred3[:, 0:1, :, :]), 1)
pred_1 = torch.cat((pred1[:, 1:2, :, :], pred2[:, 1:2, :, :], pred3[:, 1:2, :, :]), 1)
pred_0 = self.conv0(pred_0)
pred_1 = self.conv1(pred_1)
pred = torch.cat((pred_0, pred_1), 1)
pred = self.conv2(pred)
pred = self.softmax(pred)
return pred
model_ft1.load_state_dict(torch.load("model/MODEL_unet_inceptionv4.pth", map_location=device))
model_ft2.load_state_dict(torch.load("model/MODEL_unet_timm-resnest101e.pth", map_location=device))
model_ft3.load_state_dict(torch.load("model/MODEL_manet_resnet152.pth", map_location=device))
model_ft = EnsembledModel(model_ft1, model_ft2, model_ft3)
model_ft.to(device=device, dtype=torch.float32)
dir_img = Path('../train/data2/cells9')
dir_mask = Path('../train/data2/mask_cells9')
dir_checkpoint = Path('./checkpoints/')
img_scale = 1.0
dataset = BasicDataset(dir_img, dir_mask, img_scale)
#torch.set_printoptions(profile="full")
#print(dataset[0])
#print(len(dataset))
#print(dataset['mask'])
#dataset = datasets.ImageFolder('./data/', transform=transform)
#print(len(dataset))
n_val = int(len(dataset) * val_percent)
n_train = len(dataset) - n_val
train_set, val_set = random_split(dataset, [n_train, n_val], generator=torch.Generator().manual_seed(0))
loader_args = dict(batch_size=batch_size, num_workers=4, pin_memory=True)
train_loader = DataLoader(train_set, shuffle=True, **loader_args)
val_loader = DataLoader(val_set, shuffle=False, drop_last=True, **loader_args)
#img = next(iter(train_loader))
#print(img)
#plt.imshow(img['image'][0][0])
#plt.show()
#plt.imshow(img['mask'][0])
#plt.show()
#print(train_set)
#print(len(train_set))
#print(img['image'].size())
#masks_pred = model_ft(img['image'].to(device))
#optimizer = optim.RMSprop(model_ft.parameters(), lr=learning_rate, weight_decay=1e-8, momentum=0.9)
#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2) # goal: maximize Dice score
optimizer = optim.RMSprop(model_ft.parameters(), lr=learning_rate, weight_decay=1e-7, momentum=0.9)
#grad_scaler = torch.cuda.amp.GradScaler(enabled=False)
#grad_scaler2 = torch.cuda.amp.GradScaler(enabled=False)
#grad_scaler3 = torch.cuda.amp.GradScaler(enabled=False)
criterion = nn.CrossEntropyLoss()
global_step = 0
for epoch in range(epochs):
model_ft.train()
epoch_loss = 0
with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in train_loader:
images = batch['image']
true_masks = batch['mask']
#true_masks = torch.unsqueeze(true_masks, dim=1)
images = images.to(device=device, dtype=torch.float32)
true_masks = true_masks.to(device=device, dtype=torch.long)
with torch.cuda.amp.autocast(enabled=False):
#print(images.size())
masks_pred = model_ft(images)
#masks_pred = torch.squeeze(masks_pred, dim=1)
#print(masks_pred)
#print(masks_pred.size())
#print(true_masks.size())
#loss = criterion(masks_pred, true_masks) \
# + dice_loss(nn.functional.softmax(masks_pred, dim=1).float(),
# nn.functional.one_hot(true_masks, num_classes = 1).permute(0, 3, 1, 2).float(),
# multiclass=True)
#print(nn.functional.one_hot(true_masks, num_classes = 1).size())
loss = dice_loss(masks_pred.float(),
nn.functional.one_hot(true_masks, num_classes = 2).permute(0, 3, 1, 2).float(),
multiclass=True)
#loss_local = loss / 3
#print(loss)
#print(loss_local)
'''
print(nn.functional.softmax(masks_pred, dim=1)[0][0].cpu().detach().numpy())
print(nn.functional.softmax(masks_pred, dim=1)[0][1].cpu().detach().numpy())
plt.imshow(nn.functional.softmax(masks_pred, dim=1)[0][1].cpu().detach().numpy())
plt.show()
print(nn.functional.one_hot(true_masks, num_classes = 2).permute(0, 3, 1, 2)[0][0].cpu().detach().numpy())
print(nn.functional.one_hot(true_masks, num_classes = 2).permute(0, 3, 1, 2)[0][1].cpu().detach().numpy())
plt.imshow(nn.functional.one_hot(true_masks, num_classes = 2).permute(0, 3, 1, 2)[0][1].cpu().detach().numpy())
plt.show()
'''
'''
grad_scaler.scale(loss).backward(retain_graph=True)
optimizer1.zero_grad(set_to_none=True)
#grad_scaler1.scale(loss_local).backward(retain_graph=True)
grad_scaler.step(optimizer1)
grad_scaler.update()
optimizer2.zero_grad(set_to_none=True)
#grad_scaler2.scale(loss_local).backward(retain_graph=True)
grad_scaler.step(optimizer2)
grad_scaler.update()
optimizer3.zero_grad(set_to_none=True)
#grad_scaler3.scale(loss_local).backward(retain_graph=True)
grad_scaler.step(optimizer3)
grad_scaler.update()
'''
optimizer.zero_grad(set_to_none=True)
loss.backward()
optimizer.step()
pbar.update(images.shape[0])
global_step += 1
epoch_loss += loss.item()
pbar.set_postfix(**{'loss (batch)': loss.item()})
# Evaluation round
#division_step = (n_train // (10 * batch_size))
division_step = (n_train // (2 * batch_size))
if division_step > 0:
if global_step % division_step == 0:
val_score = evaluate(model_ft, val_loader, device)
print("val_score: " + str(val_score))
if save_checkpoint:
Path(dir_checkpoint).mkdir(parents=True, exist_ok=True)
torch.save(model_ft.state_dict(), str(dir_checkpoint / 'checkpoint_epoch{}.pth'.format(epoch + 1)))
#logging.info(f'Checkpoint {epoch + 1} saved!')
'''
dataloaders = {x:DataLoader(datasets[x], batch_size=1, shuffle = True) for x in ['train','val']}
img, label = next(iter(dataloaders['train']))
'''
'''
#print(test_x[25:26, :, :, :])
#print(test_label[0])
#print(test_x.size())
#print(model.encoder.layer1[0].conv1.weight)
img = test_x[0]
img = img.swapaxes(0,1)
img = img.swapaxes(1,2)
plt.imshow(img)
plt.savefig('input_img.png')
#plt.show()
#model.eval()
print(test_x)
with torch.no_grad():
output = model(test_x)
print(output[0][0])
print(output.size())
out_threshold=0.5
#full_mask = torch.sigmoid(output)[0]
#print(full_mask[0][0])
#print(full_mask[0][0].size())
#plt.imshow((output[0][0] > out_threshold).numpy(), cmap = "gray")
#plt.show()
plt.imshow(output[0][0].numpy(), cmap = "gray")
plt.savefig('output_img.png')
plt.show()
'''
|
{"hexsha": "b22242bf29288c2f6930a25d55a207b0600577dc", "size": 12402, "ext": "py", "lang": "Python", "max_stars_repo_path": "ensemble/train.py", "max_stars_repo_name": "RuixinGuo/Image-Segmentation", "max_stars_repo_head_hexsha": "77f4c08f37316ee0f1c9a3bd79b422df81ad4df8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ensemble/train.py", "max_issues_repo_name": "RuixinGuo/Image-Segmentation", "max_issues_repo_head_hexsha": "77f4c08f37316ee0f1c9a3bd79b422df81ad4df8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ensemble/train.py", "max_forks_repo_name": "RuixinGuo/Image-Segmentation", "max_forks_repo_head_hexsha": "77f4c08f37316ee0f1c9a3bd79b422df81ad4df8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0406779661, "max_line_length": 143, "alphanum_fraction": 0.5735365264, "include": true, "reason": "import numpy", "num_tokens": 2798}
|
[STATEMENT]
lemma measurable_liminf [measurable (raw)]:
assumes [measurable]: "\<And>n. A n \<in> sets M"
shows "liminf A \<in> sets M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. liminf A \<in> sets M
[PROOF STEP]
by (subst liminf_SUP_INF, auto)
|
{"llama_tokens": 107, "file": null, "length": 1}
|
import pandas as pd
import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import finterstellar as fs
pd.plotting.deregister_matplotlib_converters()
font = 'NanumSquareRound, AppleGothic, Malgun Gothic, DejaVu Sans'
class Visualize:
today = '(' + pd.to_datetime('today').date().strftime("%y%m%d") + ') '
today_str = pd.to_datetime('today').date().strftime("%Y%m%d")
def __init__(self):
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = font
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.grid'] = True
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['grid.linestyle'] = '--'
plt.rcParams['grid.alpha'] = 0.7
plt.rcParams['lines.antialiased'] = True
plt.rcParams['figure.figsize'] = [15.0, 7.0]
plt.rcParams['savefig.dpi'] = 96
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 'medium'
plt.rcParams['figure.titlesize'] = 'medium'
def price_view(self, df, b_date, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
for c in cds:
plt.plot(x, df.loc[b_date:, c], label=c)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' price_view.png', bbox_inches='tight')
def index_view(self, df, b_date, cd, size=(15,7), make_file=False):
if isinstance(df.index[0], dt.date):
b_date = fs.check_base_date(df, b_date)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
cds = fs.str_list(cd)
for c in cds:
plt.plot(x, df.loc[b_date:, c] / df.loc[b_date, c] * 100, label=c)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' index_view.png', bbox_inches='tight')
def complex_view(self, df, b_date, cd_a, cd_b, size=(15,7), make_file=False):
cds_a = fs.str_list(cd_a)
cds_b = fs.str_list(cd_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.fill_between(x, df.loc[b_date:, c], 0, facecolor='C'+str(i), alpha=0.3)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+self.today+cds_a[0]+' complex_view.png', bbox_inches='tight')
def multi_line_view(self, df, b_date, cd_a, cd_b, size=(15,7), make_file=False):
cds_a = fs.str_list(cd_a)
cds_b = fs.str_list(cd_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
pass
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c, alpha=0.7)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+self.today+cds_a[0]+' multi_line_view.png', bbox_inches='tight')
def position_view(self, df, cd, size=(15,1), make_file=False, file_name=''):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.fill_between(x, df['ps'+c], 0, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
plt.legend()
if make_file:
f_name = file_name+'_position_view.png'
plt.savefig('./image/'+f_name, bbox_inches='tight')
def position_view_bar(self, df, cd, size=(15,1), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
x_ticks = self.time_serial(df)
plt.xticks(x_ticks[0], x_ticks[1])
plt.autoscale(True, axis='x')
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.bar(range(x.size), df['ps'+c], width=1, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' position_view.png', bbox_inches='tight')
def pair_trend_index_view(self, df, trd, cd, size=(15,7), make_file=False, file_name=''):
fig, ax1 = plt.subplots(figsize=size)
x = df.index
ax1.fill_between(x, df[cd[1]+' expected']*(1+trd), df[cd[1]+' expected']*(1-trd), facecolor='sienna', alpha=0.2)
ax1.plot(x, df[cd[1]+' expected'], 'sienna', linestyle='--')
ax1.plot(x, df[cd[1]], 'C1', lw=3)
ax2 = ax1.twinx()
ax2.plot(x, df[cd[0]], 'C0', alpha=0.7)
ax1.plot(np.nan, 'C0', label=cd[0])
ax1.legend(loc=0)
if make_file:
f_name = file_name+'_pair_trend_view.png'
plt.savefig('./image/'+f_name, bbox_inches='tight')
return()
def pair_trend_price_view(self, df, trd, cd, size=(15,7), make_file=False):
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df[cd[1]+' expected']*(1+trd), df[cd[1]+' expected']*(1-trd), facecolor='sienna', alpha=0.2)
plt.plot(x, df[cd[1]+' expected'], 'sienna', linestyle='--')
plt.plot(x, df[cd[0]], 'C0')
plt.plot(x, df[cd[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cd[0]+' pair_trend_price_view.png', bbox_inches='tight')
def BB_trend_view(self, df, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df['lb'], df['ub'], facecolor='sienna', alpha=0.2)
plt.plot(x, df['center'], color='sienna', linestyle='--', label='MA')
plt.plot(x, df[cds[0]], color='C0', linestyle='-', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' bb_trend_view.png', bbox_inches='tight')
def futures_basis_view(self, df, threshold, cd, size=(15,7), make_file=False):
cds = fs.str_list(cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.autoscale(True, axis='both')
plt.fill_between(x, df[cds[0]], df[cds[0]]+df['basis'], facecolor='sienna', alpha=0.2)
plt.plot(x, df[cds[0]], 'sienna', linestyle='--')
plt.plot(x, df[cds[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+cds[0]+' futures_basis_view.png', bbox_inches='tight')
def value_at_expiry_view(self, x, make_file=False, size=(7,7), **y):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
s = pd.Series(0 for _ in range(len(x)))
if len(y) > 1:
for key, value in y.items():
plt.plot(x, value, linestyle='--', linewidth=1, label=key)
s = s + pd.Series(value)
plt.plot(x, s, linewidth=3, color='red', label='Synthetic')
else:
for key, value in y.items():
plt.plot(x, value, linewidth=3, color='red', label=key)
step = ( x.max() - x.min() + 1 ) / 4
plt.yticks(np.arange(0-step*2, 0+step*3, step))
plt.ylim(0-step*2, 0+step*2)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+' value_at_expiry_view.png', bbox_inches='tight')
def square_one_to_one_view(self, x, make_file=False, size=(7,7), **y):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
s = pd.Series(0 for _ in range(len(x)))
if len(y) > 1:
for key, value in y.items():
plt.plot(x, value, linestyle='--', linewidth=1, label=key)
s = s + pd.Series(value)
plt.plot(x, s, linewidth=3, color='red', label='Synthetic')
else:
for key, value in y.items():
plt.plot(x, value, linewidth=3, color='red', label=key)
step = ( x.max() - x.min() + 1 ) / 4
plt.yticks(np.arange(0-step*2, 0+step*3, step))
plt.ylim(0-step*2, 0+step*2)
plt.legend()
if make_file:
plt.savefig('./image/'+self.today+' square_one_to_one_view.png', bbox_inches='tight')
def square_free_plot_view(self, x, make_file=False, size=(7,7), **y):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
s = pd.Series(0 for _ in range(len(x)))
if len(y) > 1:
for key, value in y.items():
plt.plot(x, value, linestyle='--', linewidth=1, label=key)
s = s + pd.Series(value)
plt.plot(x, s, linewidth=3, color='red', label='Synthetic')
else:
for key, value in y.items():
plt.plot(x, value, linewidth=3, color='red', label=key)
plt.legend()
if make_file:
plt.savefig('./image/'+Visualize.today+' square_free_plot_view.png', bbox_inches='tight')
def square_scatter_view(self, x, y, make_file=False, size=(7,7)):
fig, ax = plt.subplots(figsize=size)
plt.axhline(y=0, color = 'k', linewidth=1) # x축
plt.scatter(x, y, linewidth=3, color='red')
step = ( x.max() - x.min() + 1 ) / 4
plt.legend()
if make_file:
plt.savefig('./image/'+Visualize.today+' square_free_plot_view.png', bbox_inches='tight')
def time_serial(self, df):
chart = pd.DataFrame()
chart = df.copy()
chart.reset_index(inplace=True)
sequence = []
xlabels = []
if isinstance(chart.iloc[0, 0], dt.date):
first = chart.iloc[0, 0]
last = chart.iloc[-1, 0]
delta = last - first
if delta.days >= 730:
time_series = pd.date_range(first, last, freq='YS')
elif delta.days >= 365:
time_series = pd.date_range(first, last, freq='QS')
elif delta.days >= 180:
time_series = pd.date_range(first, last, freq='2MS')
elif delta.days >= 90:
time_series = pd.date_range(first, last, freq='MS')
elif delta.days >= 60:
time_series = pd.date_range(first, last, freq='SMS')
elif delta.days >= 30:
time_series = pd.date_range(first, last, freq='5B')
elif delta.days >= 10:
time_series = pd.date_range(first, last, freq='2B')
elif delta.days >= 5:
time_series = pd.date_range(first, last, freq='D')
else:
time_series = chart.iloc[:, 0]
sequence.append(first)
if delta.days >= 180:
xlabels.append(first.strftime('%y.%m.%d'))
else:
xlabels.append(first.strftime('%m.%d'))
for d in time_series:
d = fs.check_base_date(df, d)
s = chart[chart.iloc[:, 0]==d].iloc[0].tolist()
sequence.append(s[0])
l = d.strftime('%y.%m.%d')
if delta.days >= 180:
l = d.strftime('%y.%m.%d')
else:
l = d.strftime('%m.%d')
xlabels.append(l)
sequence.append(last)
if delta.days >= 180:
xlabels.append(last.strftime('%y.%m.%d'))
else:
xlabels.append(last.strftime('%m.%d'))
if sequence[0] == sequence[1]:
del sequence[0]
del xlabels[0]
if sequence[-1] == sequence[-2]:
del sequence[-1]
del xlabels[-1]
return(sequence, xlabels)
'''
intraday charting
'''
class VisualizeIntraday:
today = '(' + pd.to_datetime('today').date().strftime("%y%m%d") + ') '
def __init__(self):
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = font
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.grid'] = True
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['grid.linestyle'] = '--'
plt.rcParams['grid.alpha'] = 0.7
plt.rcParams['lines.antialiased'] = True
plt.rcParams['figure.figsize'] = [15.0, 7.0]
plt.rcParams['savefig.dpi'] = 96
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 'medium'
plt.rcParams['figure.titlesize'] = 'medium'
def price_view(self, df, b_date, s_cd, size=(15,7), make_file=False):
cds = fs.str_list(s_cd)
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
for c in cds:
plt.plot(x, df.loc[b_date:, c], label=c)
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+cds[0]+' price_view.png', bbox_inches='tight')
def index_view(self, df, b_date, s_cd, size=(15,7), make_file=False):
fig, ax = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
cds = fs.str_list(s_cd)
for c in cds:
plt.plot(x, df.loc[b_date:, c] / df.loc[b_date, c] * 100, label=c)
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+Visualize.today+s_cd[0]+' index_view.png', bbox_inches='tight')
def complex_view(self, df, b_date, cd_set_a, cd_set_b=[], size=(15,7), make_file=False):
cds_a = fs.str_list(cd_set_a)
cds_b = fs.str_list(cd_set_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.fill_between(x, df.loc[b_date:, c], 0, facecolor='C'+str(i), alpha=0.3)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
ax1.set_xticks(np.arange(0, x_length+jump, jump))
ax1.set_xticklabels(xs, rotation=45)
ax2.set_xticks(np.arange(0, x_length+jump, jump))
ax2.set_xticklabels(xs, rotation=45)
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+Visualize.today+cds_a[0]+' complex_view.png', bbox_inches='tight')
def multi_line_view(self, df, b_date, cd_set_a, cd_set_b=[], size=(15,7), make_file=False):
cds_a = fs.str_list(cd_set_a)
cds_b = fs.str_list(cd_set_b)
fig, ax1 = plt.subplots(figsize=size)
x = df.loc[b_date:].index
plt.autoscale(True, axis='both')
i = 1
for c in cds_a:
if i==1:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), lw=3, label=c)
pass
else:
ax1.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c)
i += 1
if cds_b:
ax2 = ax1.twinx()
i = 6
for c in cds_b:
ax2.plot(x, df.loc[b_date:, c], color='C'+str(i), label=c, alpha=0.7)
ax1.plot(np.nan, color='C'+str(i), label=c)
i += 1
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
ax1.set_xticks(np.arange(0, x_length+jump, jump))
ax1.set_xticklabels(xs, rotation=45)
ax2.set_xticks(np.arange(0, x_length+jump, jump))
ax2.set_xticklabels(xs, rotation=45)
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+Visualize.today+cds_a[0]+' multi_line_view.png', bbox_inches='tight')
def position_view(self, df, s_cd, size=(15,1), make_file=False):
cds = fs.str_list(s_cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
for c in cds:
df['ps'+c] = 0
df.loc[ df['p '+c] == 'll', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'sl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'zl', ['ps'+c] ] = 1
df.loc[ df['p '+c] == 'ls', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'ss', ['ps'+c] ] = -1
df.loc[ df['p '+c] == 'zs', ['ps'+c] ] = -1
plt.fill_between(x, df['ps'+c], 0, label=c)
plt.yticks([-1, 0, 1], ["Short", "Zero", "Long"])
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+cds[0]+' position_view.png', bbox_inches='tight')
def pair_trend_price_view(self, df, thd, s_cd, make_file=False, size=(15,7)):
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df[s_cd[1]+' expected']*(1+thd), df[s_cd[1]+' expected']*(1-thd), facecolor='sienna', alpha=0.2)
plt.plot(x, df[s_cd[1]+' expected'], 'sienna', linestyle='--')
plt.plot(x, df[s_cd[0]], 'C0')
plt.plot(x, df[s_cd[1]], 'C1', lw=3)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+s_cd[0]+' pairs_trend_price_view.png', bbox_inches='tight')
def pair_trend_index_view(self, df, thd, s_cd, make_file=False, size=(15,7)):
fig, ax1 = plt.subplots(figsize=size)
x = df.index
ax1.fill_between(x, df[s_cd[1]+' expected']*(1+thd), df[s_cd[1]+' expected']*(1-thd), facecolor='sienna', alpha=0.2)
ax1.plot(x, df[s_cd[1]+' expected'], 'sienna', linestyle='--')
ax1.plot(x, df[s_cd[1]], 'C1', lw=3)
ax2 = ax1.twinx()
ax2.plot(x, df[s_cd[0]], 'C0', alpha=0.7)
ax1.plot(np.nan, 'C0', label=s_cd[0])
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
ax1.set_xticks(np.arange(0, x_length+jump, jump))
ax1.set_xticklabels(xs, rotation=45)
ax2.set_xticks(np.arange(0, x_length+jump, jump))
ax2.set_xticklabels(xs, rotation=45)
ax1.legend(loc=0)
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+s_cd[0]+' pairs_trend_index_view.png', bbox_inches='tight')
def BB_trend_view(self, sample, sigma, s_cd, make_file=False, size=(15,7)):
cds = fs.str_list(s_cd)
fig, ax = plt.subplots(figsize=size)
x = sample.index
plt.fill_between(x, sample['lb'], sample['ub'], facecolor='sienna', alpha=0.2)
plt.plot(x, sample['center'], color='sienna', linestyle='--', label='MA')
plt.plot(x, sample[cds[0]], color='C0', linestyle='-', lw=3)
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+cds[0]+' bb_trend_view.png', bbox_inches='tight')
def futures_basis_view(self, df, threshold, s_cd, make_file=False, size=(15,7)):
cds = fs.str_list(s_cd)
fig, ax = plt.subplots(figsize=size)
x = df.index
plt.fill_between(x, df[cds[0]], df[cds[0]]+df['basis'], facecolor='sienna', alpha=0.2)
plt.plot(x, df[cds[0]], 'sienna', linestyle='--')
plt.plot(x, df[cds[1]], 'C1', lw=3)
x_length = len(x)
jump = int( x_length / 10 )
xs = list()
for i in range(10):
xs.append(x[jump*i])
xs.append(x[-1])
plt.xticks(np.arange(0, x_length+jump, jump), xs, rotation=45)
plt.legend()
if make_file:
plt.savefig('./image/'+VisualizeIntraday.today+cds[0]+' futures_basis_view.png', bbox_inches='tight')
class Visualize3D():
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def __init__(self):
plt.style.use('fivethirtyeight')
plt.rcParams['font.family'] = font
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.grid'] = True
plt.rcParams['lines.linewidth'] = 1.5
plt.rcParams['grid.linestyle'] = '--'
plt.rcParams['grid.alpha'] = 0.7
plt.rcParams['lines.antialiased'] = True
plt.rcParams['figure.figsize'] = [15.0, 7.0]
plt.rcParams['savefig.dpi'] = 96
plt.rcParams['font.size'] = 12
plt.rcParams['legend.fontsize'] = 'medium'
plt.rcParams['figure.titlesize'] = 'medium'
def surface_view(self, size=(10, 6), **points):
labels = []
values = []
for key, value in points.items():
labels.append(key)
values.append(value)
try:
fig = plt.figure(figsize=size)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(values[0], values[1], values[2], cmap=self.cm.summer, linewidth=1, alpha=0.8)
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
fig.colorbar(surf, shrink=0.5, aspect=5)
except Exception as e:
print('x, y, z 각 축 입력값의 개수가 일치해야 합니다.')
print(e)
|
{"hexsha": "92f69018505272edddd06f5d95902f1f4fc37178", "size": 25112, "ext": "py", "lang": "Python", "max_stars_repo_path": "w2/finterstellar/visualization.py", "max_stars_repo_name": "finterstellar/lecture", "max_stars_repo_head_hexsha": "fb14fb1c6a842e2ee2f79b0225ac9f4d11c3ca47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-14T05:53:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-29T03:45:59.000Z", "max_issues_repo_path": "w2/finterstellar/visualization.py", "max_issues_repo_name": "finterstellar/lecture", "max_issues_repo_head_hexsha": "fb14fb1c6a842e2ee2f79b0225ac9f4d11c3ca47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "w2/finterstellar/visualization.py", "max_forks_repo_name": "finterstellar/lecture", "max_forks_repo_head_hexsha": "fb14fb1c6a842e2ee2f79b0225ac9f4d11c3ca47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-03-01T13:50:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T05:47:28.000Z", "avg_line_length": 34.3529411765, "max_line_length": 124, "alphanum_fraction": 0.4947037273, "include": true, "reason": "import numpy", "num_tokens": 6884}
|
(*
File: Arithmetic_Summatory_Asymptotics.thy
Author: Manuel Eberl, TU München
*)
section \<open>Asymptotics of summatory arithmetic functions\<close>
theory Arithmetic_Summatory_Asymptotics
imports
Euler_MacLaurin.Euler_MacLaurin_Landau
Arithmetic_Summatory
Dirichlet_Series_Analysis
Landau_Symbols.Landau_More
begin
subsection \<open>Auxiliary bounds\<close>
lemma sum_inverse_squares_tail_bound:
assumes "d > 0"
shows "summable (\<lambda>n. 1 / (real (Suc n) + d) ^ 2)"
"(\<Sum>n. 1 / (real (Suc n) + d) ^ 2) \<le> 1 / d"
proof -
show *: "summable (\<lambda>n. 1 / (real (Suc n) + d) ^ 2)"
proof (rule summable_comparison_test, intro allI exI impI)
fix n :: nat
from assms show "norm (1 / (real (Suc n) + d) ^ 2) \<le> 1 / real (Suc n) ^ 2"
unfolding norm_divide norm_one norm_power
by (intro divide_left_mono power_mono) simp_all
qed (insert inverse_squares_sums, simp add: sums_iff)
show "(\<Sum>n. 1 / (real (Suc n) + d) ^ 2) \<le> 1 / d"
proof (rule sums_le)
fix n have "1 / (real (Suc n) + d) ^ 2 \<le> 1 / ((real n + d) * (real (Suc n) + d))"
unfolding power2_eq_square using assms
by (intro divide_left_mono mult_mono mult_pos_pos add_nonneg_pos) simp_all
also have "\<dots> = 1 / (real n + d) - 1 / (real (Suc n) + d)"
using assms by (simp add: divide_simps)
finally show "1 / (real (Suc n) + d)\<^sup>2 \<le> 1 / (real n + d) - 1 / (real (Suc n) + d)" .
next
show "(\<lambda>n. 1 / (real (Suc n) + d)\<^sup>2) sums (\<Sum>n. 1 / (real (Suc n) + d)\<^sup>2)"
using * by (simp add: sums_iff)
next
have "(\<lambda>n. 1 / (real n + d) - 1 / (real (Suc n) + d)) sums (1 / (real 0 + d) - 0)"
by (intro telescope_sums' real_tendsto_divide_at_top[OF tendsto_const],
subst add.commute, rule filterlim_tendsto_add_at_top[OF tendsto_const
filterlim_real_sequentially])
thus "(\<lambda>n. 1 / (real n + d) - 1 / (real (Suc n) + d)) sums (1 / d)" by simp
qed
qed
lemma moebius_sum_tail_bound:
assumes "d > 0"
shows "abs (\<Sum>n. moebius_mu (Suc n + d) / real (Suc n + d)^2) \<le> 1 / d" (is "abs ?S \<le> _")
proof -
have *: "summable (\<lambda>n. 1 / (real (Suc n + d))\<^sup>2)"
by (insert sum_inverse_squares_tail_bound(1)[of "real d"] assms, simp_all add: add_ac)
have **: "summable (\<lambda>n. abs (moebius_mu (Suc n + d) / real (Suc n + d)^2))"
proof (rule summable_comparison_test, intro exI allI impI)
fix n :: nat
show "norm (\<bar>moebius_mu (Suc n + d) / (real (Suc n + d))^2\<bar>) \<le>
1 / (real (Suc n + d))^2"
unfolding real_norm_def abs_abs abs_divide power_abs abs_of_nat
by (intro divide_right_mono abs_moebius_mu_le) simp_all
qed (insert *)
from ** have "abs ?S \<le> (\<Sum>n. abs (moebius_mu (Suc n + d) / real (Suc n + d)^2))"
by (rule summable_rabs)
also have "\<dots> \<le> (\<Sum>n. 1 / (real (Suc n) + d) ^ 2)"
proof (intro suminf_le allI)
fix n :: nat
show "abs (moebius_mu (Suc n + d) / (real (Suc n + d))^2) \<le> 1 / (real (Suc n) + real d)^2"
unfolding abs_divide abs_of_nat power_abs of_nat_add [symmetric]
by (intro divide_right_mono abs_moebius_mu_le) simp_all
qed (insert * **, simp_all add: add_ac)
also from assms have "\<dots> \<le> 1 / d" by (intro sum_inverse_squares_tail_bound) simp_all
finally show ?thesis .
qed
lemma sum_upto_inverse_bound:
"sum_upto (\<lambda>i. 1 / real i) x \<ge> 0"
"eventually (\<lambda>x. sum_upto (\<lambda>i. 1 / real i) x \<le> ln x + 13 / 22) at_top"
proof -
show "sum_upto (\<lambda>i. 1 / real i) x \<ge> 0"
by (simp add: sum_upto_def sum_nonneg)
from order_tendstoD(2)[OF euler_mascheroni_LIMSEQ euler_mascheroni_less_13_over_22]
obtain N where N: "\<And>n. n \<ge> N \<Longrightarrow> harm n - ln (real n) < 13 / 22"
unfolding eventually_at_top_linorder by blast
show "eventually (\<lambda>x. sum_upto (\<lambda>i. 1 / real i) x \<le> ln x + 13 / 22) at_top"
using eventually_ge_at_top[of "max (real N) 1"]
proof eventually_elim
case (elim x)
have "sum_upto (\<lambda>i. 1 / real i) x = (\<Sum>i\<in>{0<..nat \<lfloor>x\<rfloor>}. 1 / real i)"
by (simp add: sum_upto_altdef)
also have "\<dots> = harm (nat \<lfloor>x\<rfloor>)"
unfolding harm_def by (intro sum.cong refl) (auto simp: field_simps)
also have "\<dots> \<le> ln (real (nat \<lfloor>x\<rfloor>)) + 13 / 22"
using N[of "nat \<lfloor>x\<rfloor>"] elim by (auto simp: le_nat_iff le_floor_iff)
also have "ln (real (nat \<lfloor>x\<rfloor>)) \<le> ln x" using elim by (subst ln_le_cancel_iff) auto
finally show ?case by - simp
qed
qed
lemma sum_upto_inverse_bigo: "sum_upto (\<lambda>i. 1 / real i) \<in> O(\<lambda>x. ln x)"
proof -
have "eventually (\<lambda>x. norm (sum_upto (\<lambda>i. 1 / real i) x) \<le> 1 * norm (ln x + 13/22)) at_top"
using eventually_ge_at_top[of "1::real"] sum_upto_inverse_bound(2)
by eventually_elim (insert sum_upto_inverse_bound(1), simp_all)
hence "sum_upto (\<lambda>i. 1 / real i) \<in> O(\<lambda>x. ln x + 13/22)"
by (rule bigoI)
also have "(\<lambda>x::real. ln x + 13/22) \<in> O(\<lambda>x. ln x)" by simp
finally show ?thesis .
qed
lemma
defines "G \<equiv> (\<lambda>x::real. (\<Sum>n. moebius_mu (n + Suc (nat \<lfloor>x\<rfloor>)) / (n + Suc (nat \<lfloor>x\<rfloor>))^2) :: real)"
shows moebius_sum_tail_bound': "\<And>t. t \<ge> 2 \<Longrightarrow> \<bar>G t\<bar> \<le> 1 / (t - 1)"
and moebius_sum_tail_bigo: "G \<in> O(\<lambda>t. 1 / t)"
proof -
show "\<bar>G t\<bar> \<le> 1 / (t - 1)" if t: "t \<ge> 2" for t
proof -
from t have "\<bar>G t\<bar> \<le> 1 / real (nat \<lfloor>t\<rfloor>)"
unfolding G_def using moebius_sum_tail_bound[of "nat \<lfloor>t\<rfloor>"] by simp
also have "t \<le> 1 + real_of_int \<lfloor>t\<rfloor>" by linarith
hence "1 / real (nat \<lfloor>t\<rfloor>) \<le> 1 / (t - 1)" using t by (simp add: field_simps)
finally show ?thesis .
qed
hence "G \<in> O(\<lambda>t. 1 / (t - 1))"
by (intro bigoI[of _ 1] eventually_mono[OF eventually_ge_at_top[of "2::real"]]) auto
also have "(\<lambda>t::real. 1 / (t - 1)) \<in> \<Theta>(\<lambda>t. 1 / t)" by simp
finally show "G \<in> O(\<lambda>t. 1 / t)" .
qed
subsection \<open>Summatory totient function\<close>
theorem summatory_totient_asymptotics':
"(\<lambda>x. sum_upto (\<lambda>n. real (totient n)) x) =o (\<lambda>x. 3 / pi\<^sup>2 * x\<^sup>2) +o O(\<lambda>x. x * ln x)"
using summatory_totient_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem summatory_totient_asymptotics'':
"sum_upto (\<lambda>n. real (totient n)) \<sim>[at_top] (\<lambda>x. 3 / pi\<^sup>2 * x\<^sup>2)"
proof -
have "(\<lambda>x. sum_upto (\<lambda>n. real (totient n)) x - 3 / pi\<^sup>2 * x\<^sup>2) \<in> O(\<lambda>x. x * ln x)"
by (rule summatory_totient_asymptotics)
also have "(\<lambda>x. x * ln x) \<in> o(\<lambda>x. 3 / pi ^ 2 * x ^ 2)" by simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
subsection \<open>Asymptotic distribution of squarefree numbers\<close>
lemma le_sqrt_iff: "x \<ge> 0 \<Longrightarrow> x \<le> sqrt y \<longleftrightarrow> x^2 \<le> y"
using real_sqrt_le_iff[of "x^2" y] by (simp del: real_sqrt_le_iff)
theorem squarefree_asymptotics: "(\<lambda>x. card {n. real n \<le> x \<and> squarefree n} - 6 / pi\<^sup>2 * x) \<in> O(sqrt)"
proof -
define f :: "nat \<Rightarrow> real" where "f = (\<lambda>n. if n = 0 then 0 else 1)"
define g :: "nat \<Rightarrow> real" where "g = dirichlet_prod (ind squarefree) moebius_mu"
interpret g: multiplicative_function g unfolding g_def
by (intro multiplicative_dirichlet_prod squarefree.multiplicative_function_axioms
moebius_mu.multiplicative_function_axioms)
interpret g: multiplicative_function' g "\<lambda>p k. if k = 2 then -1 else 0" "\<lambda>_. 0"
proof
interpret g': multiplicative_dirichlet_prod' "ind squarefree" moebius_mu
"\<lambda>p k. if 1 < k then 0 else 1" "\<lambda>p k. if k = 1 then - 1 else 0" "\<lambda>_. 1" "\<lambda>_. - 1"
by (intro multiplicative_dirichlet_prod'.intro squarefree.multiplicative_function'_axioms
moebius_mu.multiplicative_function'_axioms)
fix p k :: nat assume "prime p" "k > 0"
hence "g (p ^ k) = (\<Sum>i\<in>{0<..<k}. (if Suc 0 < i then 0 else 1) *
(if k - i = Suc 0 then - 1 else 0))"
by (auto simp: g'.prime_power g_def)
also have "\<dots> = (\<Sum>i\<in>{0<..<k}. (if k = 2 then -1 else 0))"
by (intro sum.cong refl) auto
also from \<open>k > 0\<close> have "\<dots> = (if k = 2 then -1 else 0)" by simp
finally show "g (p ^ k) = \<dots>" .
qed simp_all
have mult_g_square: "multiplicative_function (\<lambda>n. g (n ^ 2))"
by standard (simp_all add: power_mult_distrib g.mult_coprime)
have g_square: "g (m ^ 2) = moebius_mu m" for m
using mult_g_square moebius_mu.multiplicative_function_axioms
proof (rule multiplicative_function_eqI)
fix p k :: nat assume *: "prime p" "k > 0"
have "g ((p ^ k) ^ 2) = g (p ^ (2 * k))" by (simp add: power_mult [symmetric] mult_ac)
also from * have "\<dots> = (if k = 1 then -1 else 0)" by (simp add: g.prime_power)
also from * have "\<dots> = moebius_mu (p ^ k)" by (simp add: moebius_mu.prime_power)
finally show "g ((p ^ k) ^ 2) = moebius_mu (p ^ k)" .
qed
have g_nonsquare: "g m = 0" if "\<not>is_square m" for m
proof (cases "m = 0")
case False
from that False obtain p where p: "prime p" "odd (multiplicity p m)"
using is_nth_power_conv_multiplicity_nat[of 2 m] by auto
from p have "multiplicity p m \<noteq> 2" by auto
moreover from p have "p \<in> prime_factors m"
by (auto simp: prime_factors_multiplicity intro!: Nat.gr0I)
ultimately have "(\<Prod>p\<in>prime_factors m. if multiplicity p m = 2 then - 1 else 0 :: real) = 0"
(is "?P = _") by auto
also have "?P = g m" using False by (subst g.prod_prime_factors') auto
finally show ?thesis .
qed auto
have abs_g_le: "abs (g m) \<le> 1" for m
by (cases "is_square m")
(auto simp: g_square g_nonsquare abs_moebius_mu_le elim!: is_nth_powerE)
have fds_g: "fds g = fds_ind squarefree * fds moebius_mu"
by (rule fds_eqI) (simp add: g_def fds_nth_mult)
have "fds g * fds_zeta = fds_ind squarefree * (fds_zeta * fds moebius_mu)"
by (simp add: fds_g mult_ac)
also have "fds_zeta * fds moebius_mu = (1 :: real fds)"
by (rule fds_zeta_times_moebius_mu)
finally have *: "fds_ind squarefree = fds g * fds_zeta" by simp
have ind_squarefree: "ind squarefree = dirichlet_prod g f"
proof
fix n :: nat
from * show "ind squarefree n = dirichlet_prod g f n"
by (cases "n = 0") (simp_all add: fds_eq_iff fds_nth_mult f_def)
qed
define H :: "real \<Rightarrow> real"
where "H = (\<lambda>x. sum_upto (\<lambda>m. g (m^2) * (real_of_int \<lfloor>x / real (m\<^sup>2)\<rfloor> - x / real (m^2))) (sqrt x))"
define J where "J = (\<lambda>x::real. (\<Sum>n. moebius_mu (n + Suc (nat \<lfloor>x\<rfloor>)) / (n + Suc (nat \<lfloor>x\<rfloor>))^2))"
have "eventually (\<lambda>x. norm (H x) \<le> 1 * norm (sqrt x)) at_top"
using eventually_ge_at_top[of "0::real"]
proof eventually_elim
case (elim x)
have "abs (H x) \<le> sum_upto (\<lambda>m. abs (g (m^2) * (real_of_int \<lfloor>x / real (m\<^sup>2)\<rfloor> -
x / real (m^2)))) (sqrt x)" (is "_ \<le> ?S") unfolding H_def sum_upto_def
by (rule sum_abs)
also have "x / (real m)\<^sup>2 - real_of_int \<lfloor>x / (real m)\<^sup>2\<rfloor> \<le> 1" for m by linarith
hence "?S \<le> sum_upto (\<lambda>m. 1 * 1) (sqrt x)" unfolding abs_mult sum_upto_def
by (intro sum_mono mult_mono abs_g_le) simp_all
also have "\<dots> = of_int \<lfloor>sqrt x\<rfloor>" using elim by (simp add: sum_upto_altdef)
also have "\<dots> \<le> sqrt x" by linarith
finally show ?case using elim by simp
qed
hence H_bigo: "H \<in> O(\<lambda>x. sqrt x)" by (rule bigoI)
let ?A = "\<lambda>x. card {n. real n \<le> x \<and> squarefree n}"
have "eventually (\<lambda>x. ?A x - 6 / pi\<^sup>2 * x = (-x) * J (sqrt x) + H x) at_top"
using eventually_ge_at_top[of "0::real"]
proof eventually_elim
fix x :: real assume x: "x \<ge> 0"
have "{n. real n \<le> x \<and> squarefree n} = {n. n > 0 \<and> real n \<le> x \<and> squarefree n}"
by (auto intro!: Nat.gr0I)
also have "card \<dots> = sum_upto (ind squarefree :: nat \<Rightarrow> real) x"
by (rule sum_upto_ind [symmetric])
also have "\<dots> = sum_upto (\<lambda>d. g d * sum_upto f (x / real d)) x" (is "_ = ?S")
unfolding ind_squarefree by (rule sum_upto_dirichlet_prod)
also have "sum f {0<..nat \<lfloor>x / real i\<rfloor>} = of_int \<lfloor>x / real i\<rfloor>" if "i > 0" for i
using x by (simp add: f_def)
hence "?S = sum_upto (\<lambda>d. g d * of_int \<lfloor>x / real d\<rfloor>) x"
unfolding sum_upto_altdef by (intro sum.cong refl) simp_all
also have "\<dots> = sum_upto (\<lambda>m. g (m ^ 2) * of_int \<lfloor>x / real (m ^ 2)\<rfloor>) (sqrt x)"
unfolding sum_upto_def
proof (intro sum.reindex_bij_betw_not_neutral [symmetric])
show "bij_betw power2 ({i. 0 < i \<and> real i \<le> sqrt x} - {})
({i. 0 < i \<and> real i \<le> x} - {i\<in>{0<..nat \<lfloor>x\<rfloor>}. \<not>is_square i})"
by (auto simp: bij_betw_def inj_on_def power_eq_iff_eq_base le_sqrt_iff
is_nth_power_def le_nat_iff le_floor_iff)
qed (auto simp: g_nonsquare)
also have "\<dots> = x * sum_upto (\<lambda>m. g (m ^ 2) / real m ^ 2) (sqrt x) + H x"
by (simp add: H_def sum_upto_def sum.distrib ring_distribs sum_subtractf
sum_distrib_left sum_distrib_right mult_ac)
also have "sum_upto (\<lambda>m. g (m ^ 2) / real m ^ 2) (sqrt x) =
sum_upto (\<lambda>m. moebius_mu m / real m ^ 2) (sqrt x)"
unfolding sum_upto_altdef by (intro sum.cong refl) (simp_all add: g_square)
also have "sum_upto (\<lambda>m. moebius_mu m / (real m)\<^sup>2) (sqrt x) =
(\<Sum>m<Suc (nat \<lfloor>sqrt x\<rfloor>). moebius_mu m / (real m) ^ 2)"
unfolding sum_upto_altdef by (intro sum.mono_neutral_cong_left) auto
also have "\<dots> = (6 / pi^2 - J (sqrt x))"
using sums_split_initial_segment[OF moebius_over_square_sums, of "Suc (nat \<lfloor>sqrt x\<rfloor>)"]
by (auto simp: sums_iff algebra_simps J_def sum_upto_altdef)
finally show "?A x - 6 / pi\<^sup>2 * x = (-x) * J (sqrt x) + H x"
by (simp add: algebra_simps)
qed
hence "(\<lambda>x. ?A x - 6 / pi\<^sup>2 * x) \<in> \<Theta>(\<lambda>x. (-x) * J (sqrt x) + H x)"
by (rule bigthetaI_cong)
also have "(\<lambda>x. (-x) * J (sqrt x) + H x) \<in> O(\<lambda>x. sqrt x)"
proof (intro sum_in_bigo H_bigo)
have "(\<lambda>x. J (sqrt x)) \<in> O(\<lambda>x. 1 / sqrt x)" unfolding J_def
using moebius_sum_tail_bigo sqrt_at_top by (rule landau_o.big.compose)
hence "(\<lambda>x. (-x) * J (sqrt x)) \<in> O(\<lambda>x. x * (1 / sqrt x))"
by (intro landau_o.big.mult) simp_all
also have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
by (intro bigthetaI_cong eventually_mono[OF eventually_gt_at_top[of "0::real"]])
(auto simp: field_simps)
finally show "(\<lambda>x. (-x) * J (sqrt x)) \<in> O(\<lambda>x. sqrt x)" .
qed
finally show ?thesis .
qed
theorem squarefree_asymptotics':
"(\<lambda>x. card {n. real n \<le> x \<and> squarefree n}) =o (\<lambda>x. 6 / pi\<^sup>2 * x) +o O(\<lambda>x. sqrt x)"
using squarefree_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem squarefree_asymptotics'':
"(\<lambda>x. card {n. real n \<le> x \<and> squarefree n}) \<sim>[at_top] (\<lambda>x. 6 / pi\<^sup>2 * x)"
proof -
have "(\<lambda>x. card {n. real n \<le> x \<and> squarefree n} - 6 / pi\<^sup>2 * x) \<in> O(\<lambda>x. sqrt x)"
by (rule squarefree_asymptotics)
also have "(sqrt :: real \<Rightarrow> real) \<in> \<Theta>(\<lambda>x. x powr (1/2))"
by (intro bigthetaI_cong eventually_mono[OF eventually_ge_at_top[of "0::real"]])
(auto simp: powr_half_sqrt)
also have "(\<lambda>x::real. x powr (1/2)) \<in> o(\<lambda>x. 6 / pi ^ 2 * x)" by simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
subsection \<open>The hyperbola method\<close>
lemma hyperbola_method_bigo:
fixes f g :: "nat \<Rightarrow> 'a :: real_normed_field"
assumes "(\<lambda>x. sum_upto (\<lambda>n. f n * sum_upto g (x / real n)) (sqrt x) - R x) \<in> O(b)"
assumes "(\<lambda>x. sum_upto (\<lambda>n. sum_upto f (x / real n) * g n) (sqrt x) - S x) \<in> O(b)"
assumes "(\<lambda>x. sum_upto f (sqrt x) * sum_upto g (sqrt x) - T x) \<in> O(b)"
shows "(\<lambda>x. sum_upto (dirichlet_prod f g) x - (R x + S x - T x)) \<in> O(b)"
proof -
let ?A = "\<lambda>x. (sum_upto (\<lambda>n. f n * sum_upto g (x / real n)) (sqrt x) - R x) +
(sum_upto (\<lambda>n. sum_upto f (x / real n) * g n) (sqrt x) - S x) +
(-(sum_upto f (sqrt x) * sum_upto g (sqrt x) - T x))"
have "(\<lambda>x. sum_upto (dirichlet_prod f g) x - (R x + S x - T x)) \<in> \<Theta>(?A)"
by (intro bigthetaI_cong eventually_mono[OF eventually_ge_at_top[of "0::real"]])
(auto simp: hyperbola_method_sqrt)
also from assms have "?A \<in> O(b)"
by (intro sum_in_bigo(1)) (simp_all only: landau_o.big.uminus_in_iff)
finally show ?thesis .
qed
lemma frac_le_1: "frac x \<le> 1"
unfolding frac_def by linarith
lemma ln_minus_ln_floor_bound:
assumes "x \<ge> 2"
shows "ln x - ln (floor x) \<in> {0..<1 / (x - 1)}"
proof -
from assms have "ln (floor x) \<ge> ln (x - 1)" by (subst ln_le_cancel_iff) simp_all
hence "ln x - ln (floor x) \<le> ln ((x - 1) + 1) - ln (x - 1)" by simp
also from assms have "\<dots> < 1 / (x - 1)" by (intro ln_diff_le_inverse) simp_all
finally have "ln x - ln (floor x) < 1 / (x - 1)" by simp
moreover from assms have "ln x \<ge> ln (of_int \<lfloor>x\<rfloor>)" by (subst ln_le_cancel_iff) simp_all
ultimately show ?thesis by simp
qed
lemma ln_minus_ln_floor_bigo:
"(\<lambda>x::real. ln x - ln (floor x)) \<in> O(\<lambda>x. 1 / x)"
proof -
have "eventually (\<lambda>x. norm (ln x - ln (floor x)) \<le> 1 * norm (1 / (x - 1))) at_top"
using eventually_ge_at_top[of "2::real"]
proof eventually_elim
case (elim x)
with ln_minus_ln_floor_bound[OF this] show ?case by auto
qed
hence "(\<lambda>x::real. ln x - ln (floor x)) \<in> O(\<lambda>x. 1 / (x - 1))" by (rule bigoI)
also have "(\<lambda>x::real. 1 / (x - 1)) \<in> O(\<lambda>x. 1 / x)" by simp
finally show ?thesis .
qed
lemma divisor_count_asymptotics_aux:
"(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)"
proof -
define R where "R = (\<lambda>x. \<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. frac (x / real i))"
define S where "S = (\<lambda>x. ln (real (nat \<lfloor>sqrt x\<rfloor>)) - ln x / 2)"
have R_bound: "R x \<in> {0..sqrt x}" if x: "x \<ge> 0" for x
proof -
have "R x \<le> (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1)" unfolding R_def by (intro sum_mono frac_le_1)
also from x have "\<dots> = of_int \<lfloor>sqrt x\<rfloor>" by simp
also have "\<dots> \<le> sqrt x" by simp
finally have "R x \<le> sqrt x" .
moreover have "R x \<ge> 0" unfolding R_def by (intro sum_nonneg) simp_all
ultimately show ?thesis by simp
qed
have R_bound': "norm (R x) \<le> 1 * norm (sqrt x)" if "x \<ge> 0" for x
using R_bound[OF that] that by simp
have R_bigo: "R \<in> O(sqrt)" using eventually_ge_at_top[of "0::real"]
by (intro bigoI[of _ 1], elim eventually_mono) (rule R_bound')
have "eventually (\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) =
x * harm (nat \<lfloor>sqrt x\<rfloor>) - R x) at_top"
using eventually_ge_at_top[of "0 :: real"]
proof eventually_elim
case (elim x)
have "sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) =
(\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. of_int \<lfloor>x / real i\<rfloor>)" using elim
by (simp add: sum_upto_altdef)
also have "\<dots> = x * (\<Sum>i\<in>{0<..nat \<lfloor>sqrt x\<rfloor>}. 1 / real i) - R x"
by (simp add: sum_subtractf frac_def R_def sum_distrib_left)
also have "{0<..nat \<lfloor>sqrt x\<rfloor>} = {1..nat \<lfloor>sqrt x\<rfloor>}" by auto
also have "(\<Sum>i\<in>\<dots>. 1 / real i) = harm (nat \<lfloor>sqrt x\<rfloor>)" by (simp add: harm_def divide_simps)
finally show ?case .
qed
hence "(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1 :: real) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in>
\<Theta>(\<lambda>x. x * (harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (nat \<lfloor>sqrt x\<rfloor>) + euler_mascheroni)) - R x + x * S x)"
(is "_ \<in> \<Theta>(?A)")
by (intro bigthetaI_cong) (elim eventually_mono, simp_all add: algebra_simps S_def)
also have "?A \<in> O(sqrt)"
proof (intro sum_in_bigo)
have "(\<lambda>x. - S x) \<in> \<Theta>(\<lambda>x. ln (sqrt x) - ln (of_int \<lfloor>sqrt x\<rfloor>))"
by (intro bigthetaI_cong eventually_mono [OF eventually_ge_at_top[of "1::real"]])
(auto simp: S_def ln_sqrt)
also have "(\<lambda>x. ln (sqrt x) - ln (of_int \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / sqrt x)"
by (rule landau_o.big.compose[OF ln_minus_ln_floor_bigo sqrt_at_top])
finally have "(\<lambda>x. x * S x) \<in> O(\<lambda>x. x * (1 / sqrt x))" by (intro landau_o.big.mult) simp_all
also have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::real"]])
(auto simp: field_simps)
finally show "(\<lambda>x. x * S x) \<in> O(sqrt)" .
next
let ?f = "\<lambda>x::real. harm (nat \<lfloor>sqrt x\<rfloor>) - (ln (real (nat \<lfloor>sqrt x\<rfloor>)) + euler_mascheroni)"
have "?f \<in> O(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>))"
proof (rule landau_o.big.compose[of _ _ _ "\<lambda>x. nat \<lfloor>sqrt x\<rfloor>"])
show "filterlim (\<lambda>x::real. nat \<lfloor>sqrt x\<rfloor>) at_top at_top"
by (intro filterlim_compose[OF filterlim_nat_sequentially]
filterlim_compose[OF filterlim_floor_sequentially] sqrt_at_top)
next
show "(\<lambda>a. harm a - (ln (real a) + euler_mascheroni)) \<in> O(\<lambda>a. 1 / real a)"
by (rule harm_expansion_bigo_simple2)
qed
also have "(\<lambda>x. 1 / real (nat \<lfloor>sqrt x\<rfloor>)) \<in> O(\<lambda>x. 1 / (sqrt x - 1))"
proof (rule bigoI[of _ 1], use eventually_ge_at_top[of 2] in eventually_elim)
case (elim x)
have "sqrt x \<le> 1 + real_of_int \<lfloor>sqrt x\<rfloor>" by linarith
with elim show ?case by (simp add: field_simps)
qed
also have "(\<lambda>x::real. 1 / (sqrt x - 1)) \<in> O(\<lambda>x. 1 / sqrt x)"
by (rule landau_o.big.compose[OF _ sqrt_at_top]) simp_all
finally have "(\<lambda>x. x * ?f x) \<in> O(\<lambda>x. x * (1 / sqrt x))"
by (intro landau_o.big.mult landau_o.big_refl)
also have "(\<lambda>x::real. x * (1 / sqrt x)) \<in> \<Theta>(\<lambda>x. sqrt x)"
by (intro bigthetaI_cong eventually_mono[OF eventually_gt_at_top[of "0::real"]])
(auto elim!: eventually_mono simp: field_simps)
finally show "(\<lambda>x. x * ?f x) \<in> O(sqrt)" .
qed fact+
finally show ?thesis .
qed
lemma sum_upto_sqrt_bound:
assumes x: "x \<ge> (0 :: real)"
shows "norm ((sum_upto (\<lambda>_. 1) (sqrt x))\<^sup>2 - x) \<le> 2 * norm (sqrt x)"
proof -
from x have "0 \<le> 2 * sqrt x * (1 - frac (sqrt x)) + frac (sqrt x) ^ 2"
by (intro add_nonneg_nonneg mult_nonneg_nonneg) (simp_all add: frac_le_1)
also from x have "\<dots> = (sqrt x - frac (sqrt x)) ^ 2 - x + 2 * sqrt x"
by (simp add: algebra_simps power2_eq_square)
also have "sqrt x - frac (sqrt x) = of_int \<lfloor>sqrt x\<rfloor>" by (simp add: frac_def)
finally have "(of_int \<lfloor>sqrt x\<rfloor>) ^ 2 - x \<ge> -2 * sqrt x" by (simp add: algebra_simps)
moreover from x have "of_int (\<lfloor>sqrt x\<rfloor>) ^ 2 \<le> sqrt x ^ 2"
by (intro power_mono) simp_all
with x have "of_int (\<lfloor>sqrt x\<rfloor>) ^ 2 - x \<le> 0" by simp
ultimately have "sum_upto (\<lambda>_. 1) (sqrt x) ^ 2 - x \<in> {-2 * sqrt x..0}"
using x by (simp add: sum_upto_altdef)
with x show ?thesis by simp
qed
lemma summatory_divisor_count_asymptotics:
"(\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x -
(x * ln x + (2 * euler_mascheroni - 1) * x)) \<in> O(sqrt)"
proof -
let ?f = "\<lambda>x. x * ln x / 2 + euler_mascheroni * x"
have "(\<lambda>x. sum_upto (dirichlet_prod (\<lambda>_. 1 :: real) (\<lambda>_. 1)) x - (?f x + ?f x - x)) \<in> O(sqrt)"
(is "?g \<in> _")
proof (rule hyperbola_method_bigo)
have "eventually (\<lambda>x::real. norm (sum_upto (\<lambda>_. 1) (sqrt x) ^ 2 - x) \<le>
2 * norm (sqrt x)) at_top"
using eventually_ge_at_top[of "0::real"] by eventually_elim (rule sum_upto_sqrt_bound)
thus "(\<lambda>x::real. sum_upto (\<lambda>_. 1) (sqrt x) * sum_upto (\<lambda>_. 1) (sqrt x) - x) \<in> O(sqrt)"
by (intro bigoI[of _ 2]) (simp_all add: power2_eq_square)
next
show "(\<lambda>x. sum_upto (\<lambda>n. 1 * sum_upto (\<lambda>_. 1) (x / real n)) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)"
using divisor_count_asymptotics_aux by simp
next
show "(\<lambda>x. sum_upto (\<lambda>n. sum_upto (\<lambda>_. 1) (x / real n) * 1) (sqrt x) -
(x * ln x / 2 + euler_mascheroni * x)) \<in> O(sqrt)"
using divisor_count_asymptotics_aux by simp
qed
also have "divisor_count n = dirichlet_prod (\<lambda>_. 1) (\<lambda>_. 1) n" for n
using fds_divisor_count
by (cases "n = 0") (simp_all add: fds_eq_iff power2_eq_square fds_nth_mult)
hence "?g = (\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x -
(x * ln x + (2 * euler_mascheroni - 1) * x))"
by (intro ext) (simp_all add: algebra_simps dirichlet_prod_def)
finally show ?thesis .
qed
theorem summatory_divisor_count_asymptotics':
"(\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x) =o
(\<lambda>x. x * ln x + (2 * euler_mascheroni - 1) * x) +o O(\<lambda>x. sqrt x)"
using summatory_divisor_count_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem summatory_divisor_count_asymptotics'':
"sum_upto (\<lambda>n. real (divisor_count n)) \<sim>[at_top] (\<lambda>x. x * ln x)"
proof -
have "(\<lambda>x. sum_upto (\<lambda>n. real (divisor_count n)) x -
(x * ln x + (2 * euler_mascheroni - 1) * x)) \<in> O(sqrt)"
by (rule summatory_divisor_count_asymptotics)
also have "sqrt \<in> \<Theta>(\<lambda>x. x powr (1/2))"
by (intro bigthetaI_cong eventually_mono [OF eventually_ge_at_top[of "0::real"]])
(auto elim!: eventually_mono simp: powr_half_sqrt)
also have "(\<lambda>x::real. x powr (1/2)) \<in> o(\<lambda>x. x * ln x + (2 * euler_mascheroni - 1) * x)" by simp
finally have "sum_upto (\<lambda>n. real (divisor_count n)) \<sim>[at_top]
(\<lambda>x. x * ln x + (2 * euler_mascheroni - 1) * x)"
by (simp add: asymp_equiv_altdef)
also have "\<dots> \<sim>[at_top] (\<lambda>x. x * ln x)" by (subst asymp_equiv_add_right) simp_all
finally show ?thesis .
qed
lemma summatory_divisor_eq:
"sum_upto (\<lambda>n. real (divisor_count n)) (real m) = card {(n,d). n \<in> {0<..m} \<and> d dvd n}"
proof -
have "sum_upto (\<lambda>n. real (divisor_count n)) m = card (SIGMA n:{0<..m}. {d. d dvd n})"
unfolding sum_upto_altdef divisor_count_def by (subst card_SigmaI) simp_all
also have "(SIGMA n:{0<..m}. {d. d dvd n}) = {(n,d). n \<in> {0<..m} \<and> d dvd n}" by auto
finally show ?thesis .
qed
context
fixes M :: "nat \<Rightarrow> real"
defines "M \<equiv> \<lambda>m. card {(n,d). n \<in> {0<..m} \<and> d dvd n} / card {0<..m}"
begin
lemma mean_divisor_count_asymptotics:
"(\<lambda>m. M m - (ln m + 2 * euler_mascheroni - 1)) \<in> O(\<lambda>m. 1 / sqrt m)"
proof -
have "(\<lambda>m. M m - (ln m + 2 * euler_mascheroni - 1))
\<in> \<Theta>(\<lambda>m. (sum_upto (\<lambda>n. real (divisor_count n)) (real m) -
(m * ln m + (2 * euler_mascheroni - 1) * m)) / m)" (is "_ \<in> \<Theta>(?f)")
unfolding M_def
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::nat"]])
(auto simp: summatory_divisor_eq field_simps)
also have "?f \<in> O(\<lambda>m. sqrt m / m)"
by (intro landau_o.big.compose[OF _ filterlim_real_sequentially] landau_o.big.divide_right
summatory_divisor_count_asymptotics eventually_at_top_not_equal)
also have "(\<lambda>m::nat. sqrt m / m) \<in> \<Theta>(\<lambda>m. 1 / sqrt m)"
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::nat"]])
(auto simp: field_simps)
finally show ?thesis .
qed
theorem mean_divisor_count_asymptotics':
"M =o (\<lambda>x. ln x + 2 * euler_mascheroni - 1) +o O(\<lambda>x. 1 / sqrt x)"
using mean_divisor_count_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem mean_divisor_count_asymptotics'': "M \<sim>[at_top] ln"
proof -
have "(\<lambda>x. M x - (ln x + 2 * euler_mascheroni - 1)) \<in> O(\<lambda>x. 1 / sqrt x)"
by (rule mean_divisor_count_asymptotics)
also have "(\<lambda>x. 1 / sqrt (real x)) \<in> \<Theta>(\<lambda>x. x powr (-1/2))"
using eventually_gt_at_top[of "0::nat"]
by (intro bigthetaI_cong)
(auto elim!: eventually_mono simp: powr_half_sqrt field_simps powr_minus)
also have "(\<lambda>x::nat. x powr (-1/2)) \<in> o(\<lambda>x. ln x + 2 * euler_mascheroni - 1)"
by (intro smallo_real_nat_transfer) simp_all
finally have "M \<sim>[at_top] (\<lambda>x. ln x + 2 * euler_mascheroni - 1)"
by (simp add: asymp_equiv_altdef)
also have "\<dots> = (\<lambda>x::nat. ln x + (2 * euler_mascheroni - 1))" by (simp add: algebra_simps)
also have "\<dots> \<sim>[at_top] (\<lambda>x::nat. ln x)" by (subst asymp_equiv_add_right) auto
finally show ?thesis .
qed
end
subsection \<open>The asymptotic ditribution of coprime pairs\<close>
context
fixes A :: "nat \<Rightarrow> (nat \<times> nat) set"
defines "A \<equiv> (\<lambda>N. {(m,n) \<in> {1..N} \<times> {1..N}. coprime m n})"
begin
lemma coprime_pairs_asymptotics:
"(\<lambda>N. real (card (A N)) - 6 / pi\<^sup>2 * (real N)\<^sup>2) \<in> O(\<lambda>N. real N * ln (real N))"
proof -
define C :: "nat \<Rightarrow> (nat \<times> nat) set"
where "C = (\<lambda>N. (\<Union>m\<in>{1..N}. (\<lambda>n. (m,n)) ` totatives m))"
define D :: "nat \<Rightarrow> (nat \<times> nat) set"
where "D = (\<lambda>N. (\<Union>n\<in>{1..N}. (\<lambda>m. (m,n)) ` totatives n))"
have fin: "finite (C N)" "finite (D N)" for N unfolding C_def D_def
by (intro finite_UN_I finite_imageI; simp)+
have *: "card (A N) = 2 * (\<Sum>m\<in>{0<..N}. totient m) - 1" if N: "N > 0" for N
proof -
have "A N = C N \<union> D N"
by (auto simp add: A_def C_def D_def totatives_def image_iff ac_simps)
also have "card \<dots> = card (C N) + card (D N) - card (C N \<inter> D N)"
using card_Un_Int[OF fin[of N]] by arith
also have "C N \<inter> D N = {(1, 1)}" using N by (auto simp: image_iff totatives_def C_def D_def)
also have "D N = (\<lambda>(x,y). (y,x)) ` C N" by (simp add: image_UN image_image C_def D_def)
also have "card \<dots> = card (C N)" by (rule card_image) (simp add: inj_on_def C_def)
also have "card (C N) = (\<Sum>m\<in>{1..N}. card ((\<lambda>n. (m,n)) ` totatives m))"
unfolding C_def by (intro card_UN_disjoint) auto
also have "\<dots> = (\<Sum>m\<in>{1..N}. totient m)" unfolding totient_def
by (subst card_image) (auto simp: inj_on_def)
also have "\<dots> = (\<Sum>m\<in>{0<..N}. totient m)" by (intro sum.cong refl) auto
finally show "card (A N) = 2 * \<dots> - 1" by simp
qed
have **: "(\<Sum>m\<in>{0<..N}. totient m) \<ge> 1" if "N \<ge> 1" for N
proof -
have "1 \<le> N" by fact
also have "N = (\<Sum>m\<in>{0<..N}. 1)" by simp
also have "(\<Sum>m\<in>{0<..N}. 1) \<le> (\<Sum>m\<in>{0<..N}. totient m)"
by (intro sum_mono) (simp_all add: Suc_le_eq)
finally show ?thesis .
qed
have "(\<lambda>N. real (card (A N)) - 6 / pi\<^sup>2 * (real N)\<^sup>2) \<in>
\<Theta>(\<lambda>N. 2 * (sum_upto (\<lambda>m. real (totient m)) (real N) - (3 / pi\<^sup>2 * (real N)\<^sup>2)) - 1)"
(is "_ \<in> \<Theta>(?f)") using * **
by (intro bigthetaI_cong eventually_mono [OF eventually_gt_at_top[of "0::nat"]])
(auto simp: of_nat_diff sum_upto_altdef)
also have "?f \<in> O(\<lambda>N. real N * ln (real N))"
proof (rule landau_o.big.compose[OF _ filterlim_real_sequentially], rule sum_in_bigo)
show " (\<lambda>x. 2 * (sum_upto (\<lambda>m. real (totient m)) x - 3 / pi\<^sup>2 * x\<^sup>2)) \<in> O(\<lambda>x. x * ln x)"
by (subst landau_o.big.cmult_in_iff, simp, rule summatory_totient_asymptotics)
qed simp_all
finally show ?thesis .
qed
theorem coprime_pairs_asymptotics':
"(\<lambda>N. real (card (A N))) =o (\<lambda>N. 6 / pi\<^sup>2 * (real N)\<^sup>2) +o O(\<lambda>N. real N * ln (real N))"
using coprime_pairs_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem coprime_pairs_asymptotics'':
"(\<lambda>N. real (card (A N))) \<sim>[at_top] (\<lambda>N. 6 / pi\<^sup>2 * (real N)\<^sup>2)"
proof -
have "(\<lambda>N. real (card (A N)) - 6 / pi\<^sup>2 * (real N) ^ 2) \<in> O(\<lambda>N. real N * ln (real N))"
by (rule coprime_pairs_asymptotics)
also have "(\<lambda>N. real N * ln (real N)) \<in> o(\<lambda>N. 6 / pi ^ 2 * real N ^ 2)"
by (rule landau_o.small.compose[OF _ filterlim_real_sequentially]) simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
theorem coprime_probability_tendsto:
"(\<lambda>N. card (A N) / card ({1..N} \<times> {1..N})) \<longlonglongrightarrow> 6 / pi\<^sup>2"
proof -
have "(\<lambda>N. 6 / pi ^ 2) \<sim>[at_top] (\<lambda>N. 6 / pi ^ 2 * real N ^ 2 / real N ^ 2)"
using eventually_gt_at_top[of "0::nat"]
by (intro asymp_equiv_refl_ev) (auto elim!: eventually_mono)
also have "\<dots> \<sim>[at_top] (\<lambda>N. real (card (A N)) / real N ^ 2)"
by (intro asymp_equiv_intros asymp_equiv_symI[OF coprime_pairs_asymptotics''])
also have "\<dots> \<sim>[at_top] (\<lambda>N. real (card (A N)) / real (card ({1..N} \<times> {1..N})))"
by (simp add: power2_eq_square)
finally have "\<dots> \<sim>[at_top] (\<lambda>_. 6 / pi ^ 2)" by (simp add: asymp_equiv_sym)
thus ?thesis by (rule asymp_equivD_const)
qed
end
subsection \<open>The asymptotics of the number of Farey fractions\<close>
definition farey_fractions :: "nat \<Rightarrow> rat set" where
"farey_fractions N = {q :: rat \<in> {0<..1}. snd (quotient_of q) \<le> int N} "
lemma Fract_eq_coprime:
assumes "Rat.Fract a b = Rat.Fract c d" "b > 0" "d > 0" "coprime a b" "coprime c d"
shows "a = c" "b = d"
proof -
from assms have "a * d = c * b" by (auto simp: eq_rat)
hence "abs (a * d) = abs (c * b)" by (simp only:)
hence "abs a * abs d = abs c * abs b" by (simp only: abs_mult)
also have "?this \<longleftrightarrow> abs a = abs c \<and> d = b"
using assms by (subst coprime_crossproduct_int) simp_all
finally show "b = d" by simp
with \<open>a * d = c * b\<close> and \<open>b > 0\<close> show "a = c" by simp
qed
lemma quotient_of_split:
"P (quotient_of q) = (\<forall>a b. b > 0 \<longrightarrow> coprime a b \<longrightarrow> q = Rat.Fract a b \<longrightarrow> P (a, b))"
by (cases q) (auto simp: quotient_of_Fract dest: Fract_eq_coprime)
lemma quotient_of_split_asm:
"P (Rat.quotient_of q) = (\<not>(\<exists>a b. b > 0 \<and> coprime a b \<and> q = Rat.Fract a b \<and> \<not>P (a, b)))"
using quotient_of_split[of P q] by blast
lemma farey_fractions_bij:
"bij_betw (\<lambda>(a,b). Rat.Fract (int a) (int b))
{(a,b)|a b. 0 < a \<and> a \<le> b \<and> b \<le> N \<and> coprime a b} (farey_fractions N)"
proof (rule bij_betwI[of _ _ _ "\<lambda>q. case quotient_of q of (a, b) \<Rightarrow> (nat a, nat b)"], goal_cases)
case 1
show ?case
by (auto simp: farey_fractions_def Rat.zero_less_Fract_iff Rat.Fract_le_one_iff
Rat.quotient_of_Fract Rat.normalize_def gcd_int_def Let_def)
next
case 2
show ?case
by (auto simp add: farey_fractions_def Rat.Fract_le_one_iff Rat.zero_less_Fract_iff split: prod.splits quotient_of_split_asm)
(simp add: coprime_int_iff [symmetric])
next
case (3 x)
thus ?case by (auto simp: Rat.quotient_of_Fract Rat.normalize_def Let_def gcd_int_def)
next
case (4 x)
thus ?case unfolding farey_fractions_def
by (split quotient_of_split) (auto simp: Rat.zero_less_Fract_iff)
qed
lemma card_farey_fractions: "card (farey_fractions N) = sum totient {0<..N}"
proof -
have "card (farey_fractions N) = card {(a,b)|a b. 0 < a \<and> a \<le> b \<and> b \<le> N \<and> coprime a b}"
using farey_fractions_bij by (rule bij_betw_same_card [symmetric])
also have "{(a,b)|a b. 0 < a \<and> a \<le> b \<and> b \<le> N \<and> coprime a b} =
(\<Union>b\<in>{0<..N}. (\<lambda>a. (a, b)) ` totatives b)"
by (auto simp: totatives_def image_iff)
also have "card \<dots> = (\<Sum>b\<in>{0<..N}. card ((\<lambda>a. (a, b)) ` totatives b))"
by (intro card_UN_disjoint) auto
also have "\<dots> = (\<Sum>b\<in>{0<..N}. totient b)"
unfolding totient_def by (intro sum.cong refl card_image) (auto simp: inj_on_def)
finally show ?thesis .
qed
lemma card_farey_fractions_asymptotics:
"(\<lambda>N. real (card (farey_fractions N)) - 3 / pi\<^sup>2 * (real N)\<^sup>2) \<in> O(\<lambda>N. real N * ln (real N))"
proof -
have "(\<lambda>N. sum_upto (\<lambda>n. real (totient n)) (real N) - 3 / pi\<^sup>2 * (real N)\<^sup>2)
\<in> O(\<lambda>N. real N * ln (real N))" (is "?f \<in> _")
using summatory_totient_asymptotics filterlim_real_sequentially
by (rule landau_o.big.compose)
also have "?f = (\<lambda>N. real (card (farey_fractions N)) - 3 / pi\<^sup>2 * (real N)\<^sup>2)"
by (intro ext) (simp add: sum_upto_altdef card_farey_fractions)
finally show ?thesis .
qed
theorem card_farey_fractions_asymptotics':
"(\<lambda>N. card (farey_fractions N)) =o (\<lambda>N. 3 / pi\<^sup>2 * N^2) +o O(\<lambda>N. N * ln N)"
using card_farey_fractions_asymptotics
by (subst set_minus_plus [symmetric]) (simp_all add: fun_diff_def)
theorem card_farey_fractions_asymptotics'':
"(\<lambda>N. real (card (farey_fractions N))) \<sim>[at_top] (\<lambda>N. 3 / pi\<^sup>2 * (real N)\<^sup>2)"
proof -
have "(\<lambda>N. real (card (farey_fractions N)) - 3 / pi\<^sup>2 * (real N) ^ 2) \<in> O(\<lambda>N. real N * ln (real N))"
by (rule card_farey_fractions_asymptotics)
also have "(\<lambda>N. real N * ln (real N)) \<in> o(\<lambda>N. 3 / pi ^ 2 * real N ^ 2)"
by (rule landau_o.small.compose[OF _ filterlim_real_sequentially]) simp
finally show ?thesis by (simp add: asymp_equiv_altdef)
qed
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/Dirichlet_Series/Arithmetic_Summatory_Asymptotics.thy"}
|
'''
'''
import os
import pickle
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from itertools import chain, combinations_with_replacement
# -- astropy --
import astropy.units as u
from astropy.time import Time
# -- specsim --
import specsim
from specsim.atmosphere import Moon
# -- feasibgs --
from . import util as UT
def Isky_regression(airmass, moonill, moonalt, moonsep, sunalt, sunsep):
''' Sky surface brightness as a function of airmass, moon parameters, and
sun parameters. The sky surface brightness uses a regression model fit
using BOSS and DESI CMX sky fibers to predict V-band moonlight surface
brightness. This V-band magnitude is then used to scale up the dark time
sky.
:param airmass:
airmass
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:return specsim_wave, Isky:
returns wavelength [Angstrom], sky surface brightness [$10^{-17} erg/cm^{2}/s/\AA/arcsec^2$]
'''
# initialize atmosphere model using hacked version of specsim.atmosphere.initialize
specsim_sky = _specsim_initialize('desi', model='regression')
specsim_wave = specsim_sky._wavelength # Ang
specsim_sky.airmass = airmass
specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi
specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg
specsim_sky.moon.separation_angle = moonsep * u.deg
Isky = specsim_sky.surface_brightness.value
# twilight contribution
if sunalt > -20.:
w_twi, I_twi = _cI_twi(sunalt, sunsep, airmass)
I_twi /= np.pi
I_twi_interp = interp1d(10. * w_twi, I_twi, fill_value='extrapolate')
Isky += np.clip(I_twi_interp(specsim_wave), 0, None)
return specsim_wave, Isky
def Isky_newKS_twi(airmass, moonill, moonalt, moonsep, sunalt, sunsep):
''' Sky surface brightness as a function of airmass, moon parameters, and sun parameters.
The sky surface brightness uses the KS model scaling with coefficients re-fit to match
BOSS sky data and includes a twilight contribution from Parker's thesis.
:param airmass:
airmass
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:return specsim_wave, Isky:
returns wavelength [Angstrom] and sky surface brightness [$10^{-17} erg/cm^{2}/s/\AA/arcsec^2$]
'''
# initialize atmosphere model using hacked version of specsim.atmosphere.initialize
specsim_sky = _specsim_initialize('desi', model='refit_ks')
specsim_wave = specsim_sky._wavelength # Ang
specsim_sky.airmass = airmass
specsim_sky.moon.moon_phase = np.arccos(2.*moonill - 1)/np.pi
specsim_sky.moon.moon_zenith = (90. - moonalt) * u.deg
specsim_sky.moon.separation_angle = moonsep * u.deg
# updated KS coefficients
specsim_sky.moon.KS_CR = 458173.535128
specsim_sky.moon.KS_CM0 = 5.540103
specsim_sky.moon.KS_CM1 = 178.141045
_sky = specsim_sky._surface_brightness_dict['dark'].copy()
_sky *= specsim_sky.extinction
I_ks_rescale = specsim_sky.surface_brightness
Isky = I_ks_rescale.value
# twilight contribution
if sunalt > -20.:
w_twi, I_twi = _cI_twi(sunalt, sunsep, airmass)
I_twi /= np.pi
I_twi_interp = interp1d(10. * w_twi, I_twi, fill_value='extrapolate')
Isky += np.clip(I_twi_interp(specsim_wave), 0, None)
return specsim_wave, Isky
def Isky_parker(airmass, ecl_lat, gal_lat, gal_lon, tai, sun_alt, sun_sep, moon_phase, moon_ill, moon_alt, moon_sep):
''' Parker's sky model, which is a function of:
:param airmass:
airmass
:param ecl_lat:
ecliptic latitude (used for zodiacal light contribution)
:param gal_lat:
galactic latitude (used for ISL contribution)
:param gal_lon:
galactic longitude (used for ISL contribution)
:param tai:
time in seconds
:param sunalt:
sun altitude: 0 - 90 deg
:param sunsep:
sun separation: 0 - 90 deg
:param moonill:
moon illumination fraction: 0 - 1
:param moonalt:
moon altitude: 0 - 90 deg
:param moonsep:
moon separation angle: 0 - 180 deg
'''
from astroplan import Observer
from astropy.coordinates import EarthLocation
X = airmass # air mass
beta = ecl_lat # ecliptic latitude ( used for zodiacal light contribution )
l = gal_lat # galactic latitude ( used for ISL contribution )
b = gal_lon # galactic longitude ( used for ISL contribution )
_kpno = EarthLocation.of_site('kitt peak')
obs_time = Time(tai/86400., scale='tai', format='mjd', location=_kpno)
mjd = obs_time.mjd
# fractional months ( used for seasonal contribution)
month_frac = obs_time.datetime.month + obs_time.datetime.day/30.
# fractional hour ( used for hourly contribution)
kpno = Observer(_kpno)
sun_rise = kpno.sun_rise_time(obs_time, which='next')
sun_set = kpno.sun_set_time(obs_time, which='previous')
hour = ((obs_time - sun_set).sec)/3600.
hour_frac = hour/((Time(sun_rise, format='mjd') - Time(sun_set,format = 'mjd')).sec/3600.)
alpha = sun_alt # sun altitude
delta = sun_sep # sun separation (separation between the target and the sun's location)
# used for scattered moonlight
g = moon_phase # moon phase
altm = moon_alt
illm = moon_ill
delm = moon_sep
# get coefficients
coeffs = _read_parkerCoeffs()
# sky continuum
_w, _Icont = _parker_Icontinuum(coeffs, X, beta, l, b, mjd, month_frac, hour_frac, alpha, delta, altm, illm, delm, g)
S_continuum = _Icont / np.pi # BOSS has 2 arcsec diameter
# sky emission from the UVES continuum subtraction
w_uves, S_uves = np.loadtxt(''.join([UT.code_dir(), 'dat/sky/UVES_sky_emission.dat']),
unpack=True, usecols=[0,1])
f_uves = interp1d(w_uves, S_uves, bounds_error=False, fill_value='extrapolate')
S_emission = f_uves(_w)
return _w, S_continuum + S_emission
def Isky_parker_radecobs(ra, dec, obs_time):
''' wrapper for Isky_parker, where the input parameters are calculated based
on RA, Dec, and obs_time
'''
from astroplan import download_IERS_A
from astropy.coordinates import EarthLocation, SkyCoord, AltAz, get_sun, get_moon
download_IERS_A()
# target coordinates
coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg)
# observed time (UTC)
utc_time = Time(obs_time)
kpno = EarthLocation.of_site('kitt peak')
kpno_altaz = AltAz(obstime=utc_time, location=kpno)
coord_altaz = coord.transform_to(kpno_altaz)
airmass = coord_altaz.secz
elc_lat = coord.barycentrictrueecliptic.lat.deg
gal_lat = coord.galactic.l.deg # galactic latitude ( used for ISL contribution )
gal_lon = coord.galactic.b.deg # galactic longitude ( used for ISL contribution )
tai = utc_time.tai
# sun altitude (degrees)
sun = get_sun(utc_time)
sun_altaz = sun.transform_to(kpno_altaz)
sunalt = sun_altaz.alt.deg
# sun separation
sunsep = sun.separation(coord).deg
# used for scattered moonlight
moon = get_moon(utc_time)
moon_altaz = moon.transform_to(kpno_altaz)
moon_alt = moon_altaz.alt.deg
moon_sep = moon.separation(coord).deg #coord.separation(self.moon).deg
elongation = sun.separation(moon)
phase = np.arctan2(sun.distance * np.sin(elongation), moon.distance - sun.distance*np.cos(elongation))
moon_phase = phase.value
moon_ill = (1. + np.cos(phase))/2.
return Isky_parker(airmass, ecl_lat, gal_lat, gal_lon, tai, sun_alt, sun_sep, moon_phase, moon_ill, moon_alt, moon_sep)
def _specsim_initialize(config, model='regression'):
''' hacked version of specsim.atmosphere.initialize, which initializes the
atmosphere model from configuration parameters.
'''
if specsim.config.is_string(config):
config = specsim.config.load_config(config)
atm_config = config.atmosphere
# Load tabulated data.
surface_brightness_dict = config.load_table(
atm_config.sky, 'surface_brightness', as_dict=True)
extinction_coefficient = config.load_table(
atm_config.extinction, 'extinction_coefficient')
# Initialize an optional atmospheric seeing PSF.
psf_config = getattr(atm_config, 'seeing', None)
if psf_config:
seeing = dict(
fwhm_ref=specsim.config.parse_quantity(psf_config.fwhm_ref),
wlen_ref=specsim.config.parse_quantity(psf_config.wlen_ref),
moffat_beta=float(psf_config.moffat_beta))
else:
seeing = None
# Initialize an optional lunar scattering model.
moon_config = getattr(atm_config, 'moon', None)
if moon_config:
moon_spectrum = config.load_table(moon_config, 'flux')
c = config.get_constants(moon_config,
['moon_zenith', 'separation_angle', 'moon_phase'])
moon = _Moon(
config.wavelength, moon_spectrum, extinction_coefficient,
atm_config.airmass, c['moon_zenith'], c['separation_angle'],
c['moon_phase'], model=model)
else:
moon = None
atmosphere = specsim.atmosphere.Atmosphere(
config.wavelength, surface_brightness_dict, extinction_coefficient,
atm_config.extinct_emission, atm_config.sky.condition,
atm_config.airmass, seeing, moon)
if config.verbose:
print(
"Atmosphere initialized with condition '{0}' from {1}."
.format(atmosphere.condition, atmosphere.condition_names))
if seeing:
print('Seeing is {0} at {1} with Moffat beta {2}.'
.format(seeing['fwhm_ref'], seeing['wlen_ref'],
seeing['moffat_beta']))
if moon:
print(
'Lunar V-band extinction coefficient is {0:.5f}.'
.format(moon.vband_extinction))
return atmosphere
class _Moon(Moon):
''' specimsim.atmosphere.Moon object hacked to work with a Krisciunas & Schaefer (1991)
model with extra free parameters
'''
def __init__(self, wavelength, moon_spectrum, extinction_coefficient,
airmass, moon_zenith, separation_angle, moon_phase,
model='regression'):
# initialize via super function
super().__init__(wavelength, moon_spectrum, extinction_coefficient,
airmass, moon_zenith, separation_angle, moon_phase)
self.model = model
# default KS coefficients
self.KS_CR = 10**5.36 # proportionality constant in the Rayleigh scattering function
# constants for the Mie scattering function term
self.KS_CM0 = 6.15
self.KS_CM1 = 40.
self.KS_M0 = -12.73
self.KS_M1 = 0.026
self.KS_M2 = 4.
def _update(self):
"""Update the model based on the current parameter values.
"""
self._update_required = False
# Calculate the V-band surface brightness of scattered moonlight.
if self.model == 'refit_ks':
self._scattered_V = krisciunas_schaefer_free(
self.obs_zenith, self.moon_zenith, self.separation_angle,
self.moon_phase, self.vband_extinction, self.KS_CR, self.KS_CM0,
self.KS_CM1, self.KS_M0, self.KS_M1, self.KS_M2)
elif self.model == 'regression':
self._scattered_V = _scattered_V_regression(
self.airmass,
0.5 * (np.cos(np.pi * self.moon_phase) + 1.),
90 - self.moon_zenith.value,
self.separation_angle.value) * u.mag / u.arcsec**2
else:
raise NotImplementedError
# Calculate the wavelength-dependent extinction of moonlight
# scattered once into the observed field of view.
scattering_airmass = (
1 - 0.96 * np.sin(self.moon_zenith) ** 2) ** (-0.5)
extinction = (
10 ** (-self._extinction_coefficient * scattering_airmass / 2.5) *
(1 - 10 ** (-self._extinction_coefficient * self.airmass / 2.5)))
self._surface_brightness = self._moon_spectrum * extinction
# Renormalized the extincted spectrum to the correct V-band magnitude.
raw_V = self._vband.get_ab_magnitude(
self._surface_brightness, self._wavelength) * u.mag
area = 1 * u.arcsec ** 2
self._surface_brightness *= 10 ** (
-(self._scattered_V * area - raw_V) / (2.5 * u.mag)) / area
@property
def KS_CR(self):
return self._KS_CR
@KS_CR.setter
def KS_CR(self, ks_cr):
self._KS_CR = ks_cr
self._update_required = True
@property
def KS_CM0(self):
return self._KS_CM0
@KS_CM0.setter
def KS_CM0(self, ks_cm0):
self._KS_CM0 = ks_cm0
self._update_required = True
@property
def KS_CM1(self):
return self._KS_CM1
@KS_CM1.setter
def KS_CM1(self, ks_cm1):
self._KS_CM1 = ks_cm1
self._update_required = True
@property
def KS_M0(self):
return self._KS_M0
@KS_M0.setter
def KS_M0(self, ks_m0):
self._KS_M0 = ks_m0
self._update_required = True
@property
def KS_M1(self):
return self._KS_M1
@KS_M1.setter
def KS_M1(self, ks_m1):
self._KS_M1 = ks_m1
self._update_required = True
@property
def KS_M2(self):
return self._KS_M2
@KS_M2.setter
def KS_M2(self, ks_m2):
self._KS_M2 = ks_m2
self._update_required = True
reg_model_coeffs = np.array([
0.00000000e+00, -1.24246947e-01, -2.19592318e-01, -1.27371956e-02,
4.16108739e-02, -8.96992463e-02, -6.74266151e-01, 2.67170371e-02,
-1.54258481e-02, -3.52318515e-01, -4.12007754e-03, 6.44355466e-02,
2.70616098e-04, -2.52914043e-04, -6.59789181e-04, -1.00704130e-01,
-1.17732794e+00, 1.00074153e-02, 2.02381309e-02, -1.03468867e+00,
7.06332796e-02, 1.80523919e-02, -8.04924203e-04, -8.78033445e-04,
-1.93926394e-04, -6.88153692e-01, -1.34713209e-01, 1.85076523e-03,
5.65520710e-05, -1.30331216e-05, -4.89722809e-04, 2.99858228e-06,
8.39852557e-06, 8.86494950e-06, 4.35592782e-06])
reg_model_intercept = 20.507688847655775
def _scattered_V_regression(airmass, moon_frac, moon_alt, moon_sep):
''' 4th degree polynomial regression fit to the V-band scattered moonlight
from BOSS and DESI CMX data.
'''
theta = np.atleast_2d(np.array([airmass, moon_frac, moon_alt, moon_sep]).T)
combs = chain.from_iterable(combinations_with_replacement(range(4), i)
for i in range(0, 4))
theta_transform = np.empty((theta.shape[0], len(reg_model_coeffs)))
for i, comb in enumerate(combs):
theta_transform[:, i] = theta[:, comb].prod(1)
return np.dot(theta_transform, reg_model_coeffs.T) + reg_model_intercept
def krisciunas_schaefer_free(obs_zenith, moon_zenith, separation_angle, moon_phase,
vband_extinction, C_R, C_M0, C_M1, M0, M1, M2):
"""Calculate the scattered moonlight surface brightness in V band.
Based on Krisciunas and Schaefer, "A model of the brightness of moonlight",
PASP, vol. 103, Sept. 1991, p. 1033-1039 (http://dx.doi.org/10.1086/132921).
Equation numbers in the code comments refer to this paper.
The function :func:`plot_lunar_brightness` provides a convenient way to
plot this model's predictions as a function of observation pointing.
Units are required for the angular inputs and the result has units of
surface brightness, for example:
>>> sb = krisciunas_schaefer(20*u.deg, 70*u.deg, 50*u.deg, 0.25, 0.15)
>>> print(np.round(sb, 3))
19.855 mag / arcsec2
The output is automatically broadcast over input arrays following the usual
numpy rules.
This method has several caveats but the authors find agreement with data at
the 8% - 23% level. See the paper for details.
Parameters
----------
obs_zenith : astropy.units.Quantity
Zenith angle of the observation in angular units.
moon_zenith : astropy.units.Quantity
Zenith angle of the moon in angular units.
separation_angle : astropy.units.Quantity
Opening angle between the observation and moon in angular units.
moon_phase : float
Phase of the moon from 0.0 (full) to 1.0 (new), which can be calculated
as abs((d / D) - 1) where d is the time since the last new moon
and D = 29.5 days is the period between new moons. The corresponding
illumination fraction is ``0.5*(1 + cos(pi * moon_phase))``.
vband_extinction : float
V-band extinction coefficient to use.
Returns
-------
astropy.units.Quantity
Observed V-band surface brightness of scattered moonlight.
"""
moon_phase = np.asarray(moon_phase)
if np.any((moon_phase < 0) | (moon_phase > 1)):
raise ValueError(
'Invalid moon phase {0}. Expected 0-1.'.format(moon_phase))
# Calculate the V-band magnitude of the moon (eqn. 9).
abs_alpha = 180. * moon_phase
#m = -12.73 + 0.026 * abs_alpha + 4e-9 * abs_alpha ** 4 (default value)
m = M0 + M1 * abs_alpha + M2 * 1e-9 * abs_alpha ** 4
# Calculate the illuminance of the moon outside the atmosphere in
# foot-candles (eqn. 8).
Istar = 10 ** (-0.4 * (m + 16.57))
# Calculate the scattering function (eqn.21).
rho = separation_angle.to(u.deg).value
f_scatter = (C_R * (1.06 + np.cos(separation_angle) ** 2) +
10 ** (C_M0 - rho / C_M1))
# Calculate the scattering airmass along the lines of sight to the
# observation and moon (eqn. 3).
X_obs = (1 - 0.96 * np.sin(obs_zenith) ** 2) ** (-0.5)
X_moon = (1 - 0.96 * np.sin(moon_zenith) ** 2) ** (-0.5)
# Calculate the V-band moon surface brightness in nanoLamberts.
B_moon = (f_scatter * Istar *
10 ** (-0.4 * vband_extinction * X_moon) *
(1 - 10 ** (-0.4 * (vband_extinction * X_obs))))
# Convert from nanoLamberts to to mag / arcsec**2 using eqn.19 of
# Garstang, "Model for Artificial Night-Sky Illumination",
# PASP, vol. 98, Mar. 1986, p. 364 (http://dx.doi.org/10.1086/131768)
return ((20.7233 - np.log(B_moon / 34.08)) / 0.92104 *
u.mag / (u.arcsec ** 2))
def _cI_twi(alpha, delta, airmass):
''' twilight contribution
:param alpha:
:param delta:
:param airmass:
:retrun wave:
:return twi:
'''
ftwi = os.path.join(UT.dat_dir(), 'sky', 'twilight_coeffs.p')
twi_coeffs = pickle.load(open(ftwi, 'rb'))
twi = (
twi_coeffs['t0'] * np.abs(alpha) + # CT2
twi_coeffs['t1'] * np.abs(alpha)**2 + # CT1
twi_coeffs['t2'] * np.abs(delta)**2 + # CT3
twi_coeffs['t3'] * np.abs(delta) # CT4
) * np.exp(-twi_coeffs['t4'] * airmass) + twi_coeffs['c0']
return twi_coeffs['wave'], np.array(twi)
def _twilight_coeffs():
''' save twilight coefficients from Parker
'''
f = os.path.join(UT.code_dir(), 'dat', 'sky', 'MoonResults.csv')
coeffs = pd.DataFrame.from_csv(f)
coeffs.columns = [
'wl', 'model', 'data_var', 'unexplained_var',' X2', 'rX2',
'c0', 'c_am', 'tau', 'tau2', 'c_zodi', 'c_isl', 'sol', 'I',
't0', 't1', 't2', 't3', 't4', 'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6',
'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec',
'c2', 'c3', 'c4', 'c5', 'c6']
# keep moon models
twi_coeffs = coeffs[coeffs['model'] == 'twilight']
coeffs = coeffs[coeffs['model'] == 'moon']
# order based on wavelengths for convenience
wave_sort = np.argsort(np.array(coeffs['wl']))
twi = {}
twi['wave'] = np.array(coeffs['wl'])[wave_sort]
for k in ['t0', 't1', 't2', 't3', 't4', 'c0']:
twi[k] = np.array(twi_coeffs[k])[wave_sort]
# save to file
ftwi = os.path.join(UT.dat_dir(), 'sky', 'twilight_coeffs.p')
pickle.dump(twi, open(ftwi, 'wb'))
return None
##########################################################################
# contributions to parker's sky surface brightness model
##########################################################################
def _read_parkerCoeffs():
''' read the coefficients of parker's model
'''
f = ''.join([UT.code_dir(), 'dat/sky/MoonResults.csv'])
_coeffs = pd.DataFrame.from_csv(f)
_coeffs.columns = [
'wl', 'model', 'data_var', 'unexplained_var',' X2', 'rX2',
'c0', 'c_am', 'tau', 'tau2', 'c_zodi', 'c_isl', 'sol', 'I',
't0', 't1', 't2', 't3', 't4', 'm0', 'm1', 'm2', 'm3', 'm4', 'm5', 'm6',
'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec',
'c2', 'c3', 'c4', 'c5', 'c6'
]
# keep moon models
coeffs = _coeffs[coeffs['model'] == 'moon']
# order based on wavelengths for convenience
wave_sort = np.argsort(np.array(coeffs['wl']))
for k in coeffs.keys():
coeffs[k] = np.array(coeffs[k])[wave_sort]
return coeffs
def _parker_Icontinuum(coeffs, X, beta, l, b, mjd, month_frac, hour_frac, alpha, delta, altm, illm, delm, g):
''' sky continuum (Fragelius thesis Eq. 4.23)
'''
# airmass contrib.
_Iairmass = coeffs['c_am'] * X
# zodiacal contrib. (func. of ecliptic latitude)
_Izodiacal = coeffs['c_zodi'] * _parker_Izodi(beta)
_Iisl = coeffs['c_isl'] * _parker_Iisl(l, b)
_Isolar_flux = coeffs['sol'] * _parker_Isf(mjd - coeffs['I'])
_Iseasonal = _parker_cI_seas(month_frac, coeffs)
_Ihourly = _parker_cI_hour(hour_frac, coeffs)
_dT = _parker_deltaT(X, coeffs)
# When the sun is above -20 altitude, some of its light will back-scatter
# off the atmosphere into the field of view. (Fragelius thesis Eq. 4.27)
_Itwilight = _parker_cI_twi_exp(alpha, delta, X, coeffs)
# light from the moon that is scattered into our field of view (Fragelius thesis Eq. 4.28, 4.29)
_Imoon = _parker_cI_moon_exp(altm, illm, delm, g, X, coeffs)
_Iadd_continuum = coeffs['c0']
# I_continuum(lambda)
Icont = (_Iairmass + _Izodiacal + _Iisl + _Isolar_flux + _Iseasonal + _Ihourly + _Iadd_continuum) * _dT + _Itwilight + _Imoon
return 10*coeffs['wl'], np.array(Icont)
def _parker_cI_moon_exp(altm, illm, deltam, g, airmass, coeffs):
''' light from the moon that is scattered into our field of view (Fragelius thesis Eq. 4.28, 4.29)
'''
Alambda = _parker_albedo(g, coeffs) # albedo factor
moon = (coeffs['m0'] * altm**2 +
coeffs['m1'] * altm +
coeffs['m2'] * illm**2 +
coeffs['m3'] * illm +
coeffs['m4'] * deltam**2 +
coeffs['m5'] * deltam
) * Alambda * np.exp(-coeffs['m6'] * airmass)
return moon
def _parker_albedo(g, coeffs):
''' albedo, i.e. reflectivity of the moon (Fragelius thesis Eq. 4.28)
g is the lunar phase (g = 0 for full moon and 180 for new moon)
'''
albedo_table = pd.read_csv(''.join([UT.code_dir(), 'dat/sky/albedo_constants.csv']),
delim_whitespace=True)
albedo_constants = {}
for col in list(albedo_table):
line = interp1d(albedo_table['WAVELENGTH'], albedo_table[col],
bounds_error=False, fill_value=0)
albedo_constants[col] = line
p1 = 4.06054
p2 = 12.8802
p3 = -30.5858
p4 = 16.7498
A = []
for i in range(4):
A.append(albedo_constants['a%d'%i](coeffs['wl'])*(g**i))
A.append(albedo_constants['d1'](coeffs['wl']) * np.exp(-g/p1))
A.append(albedo_constants['d2'](coeffs['wl']) * np.exp(-g/p2))
A.append(albedo_constants['d3'](coeffs['wl']) * np.cos((g - p3)/p4))
lnA = np.sum(A, axis=0)
Al = np.exp(lnA)
return Al
def _parker_cI_twi_exp(alpha, delta, airmass, coeffs):
''' When the sun is above -20 altitude, some of its light will back-scatter
off the atmosphere into the field of view. (Fragelius thesis Eq. 4.27)
no observations are made when sun is above -14 altitude.
'''
if alpha > -20.:
twi = (
coeffs['t0'] * np.abs(alpha) + # CT2
coeffs['t1'] * alpha**2 + # CT1
coeffs['t2'] * delta**2 + # CT3
coeffs['t3'] * delta # CT4
) * np.exp(-coeffs['t4'] * airmass)
else:
twi = np.zeros(len(coeffs['t0']))
return twi
def _parker_deltaT(airmass, coeffs):
'''effective transmission curve that accounts for the additional extinction
for observing at higher airmass (Fragelius thesis Eq. 4.24)
'''
zen_ext = np.loadtxt(''.join([UT.code_dir(), 'dat/sky/ZenithExtinction-KPNO.dat']))
zen_wave = zen_ext[:,0]/10.
ext = zen_ext[:,1]
zext = interp1d(zen_wave, ext, bounds_error=False, fill_value='extrapolate')
k = zext(coeffs['wl'])
return 1 - (10**(-0.4*k) - 10**(-0.4*k*airmass))
def _parker_cI_hour(hour_frac, coeffs):
''' Fragelius thesis Eq. 4.26
'''
levels = np.linspace(0,1,7)
idx = np.argmin(np.abs(levels - hour_frac))
_hours = np.zeros(6)
_hours[idx] = 1
for i in range(1,6):
if i == 1:
hours = coeffs['c'+str(i+1)] * _hours[i]
else:
hours += coeffs['c'+str(i+1)] * _hours[i]
return hours
def _parker_cI_seas(month_frac, coeffs):
# Fragelius thesis Eq. 4.25
mm = np.rint(month_frac)
if mm == 13: mm = 1
_months = np.zeros(12)
_months[int(mm-1)] = 1
month_names = ['feb', 'mar', 'apr', 'may', 'jun', 'jul', 'sep', 'oct', 'nov', 'dec']
for i, mon in zip(range(1,12), month_names):
if i == 1:
months = coeffs[mon] * _months[i]
else:
months += coeffs[mon] * _months[i]
return months
def _parker_Isf(mjd):
# solar flux as a function of MJD
solar_data = np.load(''.join([UT.code_dir(), 'dat/sky/solar_flux.npy']))
solar_flux = interp1d(solar_data['MJD'], solar_data['fluxobsflux'], bounds_error=False, fill_value=0)
return solar_flux(mjd)
def _parker_Iisl(gal_lat, gal_long):
# returns float
isl_data = pickle.load(open(''.join([UT.code_dir(), 'dat/sky/isl_map.p']),'rb'))
return isl_data(gal_long, gal_lat)[0]
def _parker_Izodi(ecl_lat):
zodi_data = pickle.load(open(''.join([UT.code_dir(), 'dat/sky/s10_zodi.p']),'rb'))
return zodi_data(np.abs(ecl_lat))
|
{"hexsha": "7931a3722bf89c0a6df78883174ad969b8290e26", "size": 27204, "ext": "py", "lang": "Python", "max_stars_repo_path": "feasibgs/skymodel.py", "max_stars_repo_name": "changhoonhahn/feasiBGS", "max_stars_repo_head_hexsha": "b5f535f12cf64babc9e25bcec75edd45d8668f74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-24T15:02:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-24T15:02:34.000Z", "max_issues_repo_path": "feasibgs/skymodel.py", "max_issues_repo_name": "michaelJwilson/feasiBGS", "max_issues_repo_head_hexsha": "63975b1e60f6f93f3b5020ee51ca565f325b918d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-10-23T16:02:01.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-04T18:53:20.000Z", "max_forks_repo_path": "feasibgs/skymodel.py", "max_forks_repo_name": "michaelJwilson/feasiBGS", "max_forks_repo_head_hexsha": "63975b1e60f6f93f3b5020ee51ca565f325b918d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-12T00:19:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T00:19:41.000Z", "avg_line_length": 36.1274900398, "max_line_length": 129, "alphanum_fraction": 0.6240258785, "include": true, "reason": "import numpy,from scipy,import astropy,from astropy", "num_tokens": 8142}
|
[STATEMENT]
lemma lincomb_0coeffs : "set ms \<subseteq> M \<Longrightarrow> \<forall>s\<in>set rs. s = 0 \<Longrightarrow> rs \<bullet>\<cdot> ms = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>set ms \<subseteq> M; \<forall>s\<in>set rs. s = (0::'r)\<rbrakk> \<Longrightarrow> rs \<bullet>\<cdot> ms = (0::'m)
[PROOF STEP]
using lincomb_Nil lincomb_Cons zero_smult
[PROOF STATE]
proof (prove)
using this:
?rs = [] \<or> ?ms = [] \<Longrightarrow> ?rs \<bullet>\<cdot> ?ms = (0::'m)
(?r # ?rs) \<bullet>\<cdot> (?m # ?ms) = ?r \<cdot> ?m + ?rs \<bullet>\<cdot> ?ms
?m \<in> M \<Longrightarrow> (0::'r) \<cdot> ?m = (0::'m)
goal (1 subgoal):
1. \<lbrakk>set ms \<subseteq> M; \<forall>s\<in>set rs. s = (0::'r)\<rbrakk> \<Longrightarrow> rs \<bullet>\<cdot> ms = (0::'m)
[PROOF STEP]
by (induct rs ms rule: list_induct2') auto
|
{"llama_tokens": 351, "file": "Rep_Fin_Groups_Rep_Fin_Groups", "length": 2}
|
v = read.table(file("results.log"))
t <- data.frame(readers=v[,1], writers=v[,2], distribution=v[,3], variant=v[,4], opss=v[,5], op=v[,7])
library(plyr)
t$writers = as.factor(t$writers)
t$readers = as.numeric(t$readers)
# split the data into tx/readers and tx/refresh
compare_impl = t[grep("evmap-refresh", t$variant, invert = TRUE),]
compare_rate = t[grep("evmap-refresh", t$variant, invert = FALSE),]
compare_rate = rbind(compare_rate, compare_impl[compare_impl$variant == "evmap",])
r = compare_impl[compare_impl$op == "read",]
r <- ddply(r, c("readers", "writers", "distribution", "variant", "op"), summarise, opss = sum(opss))
w = compare_impl[compare_impl$op == "write",]
w <- ddply(w, c("readers", "writers", "distribution", "variant", "op"), summarise, opss = sum(opss))
library(ggplot2)
r$opss = r$opss / 1000000.0
p <- ggplot(data=r, aes(x=readers, y=opss, color=variant))
#p <- p + ylim(c(0, 2500))
p <- p + xlim(c(0, NA))
p <- p + facet_grid(distribution ~ writers, labeller = labeller(writers = label_both))
p <- p + geom_point(size = .4, alpha = .1)
p <- p + geom_line(size = .5)
#p <- p + stat_smooth(size = .5, se = FALSE)
p <- p + xlab("readers") + ylab("M reads/s") + ggtitle("Total reads/s with increasing # of readers")
ggsave('read-throughput.png',plot=p,width=10,height=6)
w$opss = w$opss / 1000000.0
p <- ggplot(data=w, aes(x=readers, y=opss, color=variant))
#p <- p + scale_y_log10(lim=c(1, NA))#5000))
p <- p + facet_grid(distribution ~ writers, labeller = labeller(writers = label_both))
p <- p + geom_point(size = 1, alpha = .2)
p <- p + geom_line(size = .5)
#p <- p + stat_smooth(size = .5, se = FALSE)
#p <- p + coord_cartesian(ylim=c(0,250))
p <- p + xlim(c(0, NA))
p <- p + xlab("readers") + ylab("M writes/s") + ggtitle("Total writes/s with increasing # of readers")
ggsave('write-throughput.png',plot=p,width=10,height=6)
library(scales)
w = compare_rate
w <- w[w$writers == 1,]
w <- w[w$readers == 1,]
w <- w[w$op == "write",]
w$variant = gsub("^evmap$", "evmap-refresh1", w$variant, perl = TRUE)
w$period = as.numeric(gsub("evmap-refresh([\\d]+)", "\\1", w$variant, perl = TRUE))
w$variant = gsub("evmap-refresh[\\d]+", "evmap", w$variant, perl = TRUE)
w$opss = w$opss / 1000000.0
p <- ggplot(data=w, aes(x=period, y=opss, color=distribution))
p <- p + geom_point(size = 1, alpha = .2)
p <- p + geom_line(size = .5)
p <- p + scale_x_continuous(trans="log2",
breaks = trans_breaks("log2", function(x) 2^x),
labels = trans_format("log2", math_format(2^.x)))
p <- p + xlab("refresh every N writes") + ylab("M writes/s") + ggtitle("Total writes/s with decreasing refresh frequency")
ggsave('write-with-refresh.png',plot=p,width=10,height=6)
|
{"hexsha": "5c22e41432979b4fe85a15dcd85a79d3f54ea9eb", "size": 2691, "ext": "r", "lang": "R", "max_stars_repo_path": "benchmark/plot.r", "max_stars_repo_name": "benbromhead/rust-evmap", "max_stars_repo_head_hexsha": "d4c12b8758c7e35145c40856f3d3474befe6ecda", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 244, "max_stars_repo_stars_event_min_datetime": "2020-12-21T02:00:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T14:09:29.000Z", "max_issues_repo_path": "benchmark/plot.r", "max_issues_repo_name": "aDotInTheVoid/rust-evmap", "max_issues_repo_head_hexsha": "6238a4a6219b3546ceef5fc55996db954571321c", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-12-21T01:43:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-08T18:22:37.000Z", "max_forks_repo_path": "benchmark/plot.r", "max_forks_repo_name": "aDotInTheVoid/rust-evmap", "max_forks_repo_head_hexsha": "6238a4a6219b3546ceef5fc55996db954571321c", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2020-12-21T10:08:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T13:11:00.000Z", "avg_line_length": 42.046875, "max_line_length": 122, "alphanum_fraction": 0.6488294314, "num_tokens": 908}
|
import numpy as np
import os
import glob
import csv
import shutil
import tqdm as tq
from utils import *
# class Sensor(tb.IsDescription):
# number = tb.Int32Col()
# id = tb.StringCol(100)
# lat = tb.Float64Col()
# lon = tb.Float64Col()
rootPath = os.path.expanduser('~/data/storage/cense/confy/')
inputPath = rootPath+'raw/'
dataPath = rootPath+'data/'
localPath = os.path.expanduser('~/drive/experiments/data/local/censeConfy/')
def step(setting, experiment):
nbFrequencyBands = 29
nbVec = 351
sensorInfo = sensor_list('sensorList.csv')
monthPath = ['01_02_2020', '04_05_2020']
if setting.month is 'january':
monthPath = '01_02_2020'
if setting.month is 'march':
monthPath = '04_05_2020'
if setting.period in ['day', 'hour']:
dayLimit = 2
else:
dayLimit = 33
if setting.period == 'hour':
hourLimit = 2
else:
hourLimit = 25
dataId = setting.id(sort=False)
# print(dataPath+dataId)
if not os.path.exists(dataPath):
os.makedirs(dataPath)
#for setting.sensor, s in enumerate(sensorInfo): #tq.tqdm(enumerate(sensorInfo), total=len(sensorInfo)):
# print('Sensor '+str(setting.sensor)+' / '+str(len(sensorInfo)))
fileNames = []
for year in [2019, 2020]:
for month in range(13):
for day in range(dayLimit):
for hour in range(hourLimit):
f = inputPath+monthPath+'/'+sensorInfo[setting.sensor]["sID"]+'/'+str(year)+'/'+str(month)+'/'+str(day)+'/'+str(hour)+'.zip'
# print(f)
if os.path.exists(f):
fileNames.append(f)
fCount = 0
arrayTime = np.zeros((len(fileNames)*6))
arraySpec = np.zeros((len(fileNames)*6, nbVec, nbFrequencyBands))
arrayEnergy = np.zeros((len(fileNames)*6, nbVec, 2))
# print(fileNames)
for inputFileName in tq.tqdm(fileNames, total=len(fileNames)):
shutil.copy(inputFileName, '/tmp/confy.zip')
os.system('unzip -qq -o -d /tmp /tmp/confy.zip')
csvFileName = os.path.basename(inputFileName).replace('zip', 'csv')
with open('/tmp/'+csvFileName, 'r') as csvfileID:
reader = csv.reader(csvfileID, delimiter=',')
data = np.zeros((nbVec, 32))
rCount = 0
for r, row in enumerate(reader):
if r%4800<nbVec: # every 10 minutes, for 45 seconds
#print(rCount)
data[rCount, :] = np.array([float(s) for s in row])
rCount +=1
elif r%4800==nbVec:
rCount = 0
arrayTime[fCount] = data[0, 0]
arrayEnergy[fCount, :, :] = data[:, 1:3]
arraySpec[fCount, :, :] = data[:, 3:][None]
fCount += 1
#print(arraySpec.shape)
os.remove('/tmp/'+csvFileName)
os.remove('/tmp/confy.zip')
np.save(dataPath+dataId+'_time.npy', arrayTime)
np.save(dataPath+dataId+'_spec.npy', arraySpec)
np.save(dataPath+dataId+'_energy.npy', arrayEnergy)
|
{"hexsha": "beefbb7976e0942ac42b6da2f37d33872701f0d3", "size": 2832, "ext": "py", "lang": "Python", "max_stars_repo_path": "prepareDataNpy.py", "max_stars_repo_name": "mathieulagrange/confy", "max_stars_repo_head_hexsha": "dd6a7e05a19b2d2e465fd781853a90bf743adbcc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "prepareDataNpy.py", "max_issues_repo_name": "mathieulagrange/confy", "max_issues_repo_head_hexsha": "dd6a7e05a19b2d2e465fd781853a90bf743adbcc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prepareDataNpy.py", "max_forks_repo_name": "mathieulagrange/confy", "max_forks_repo_head_hexsha": "dd6a7e05a19b2d2e465fd781853a90bf743adbcc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1208791209, "max_line_length": 134, "alphanum_fraction": 0.6320621469, "include": true, "reason": "import numpy", "num_tokens": 825}
|
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
import pytz
from qstrader.signals.sma import SMASignal
@pytest.mark.parametrize(
'start_dt,lookbacks,prices,expected',
[
(
pd.Timestamp('2019-01-01 14:30:00', tz=pytz.utc),
[6, 12],
[
99.34, 101.87, 98.32, 92.98, 103.87,
104.51, 97.62, 95.22, 96.09, 100.34,
105.14, 107.49, 90.23, 89.43, 87.68
],
[96.71833333333333, 97.55]
)
]
)
def test_sma_signal(start_dt, lookbacks, prices, expected):
"""
Checks that the SMA signal correctly calculates the
simple moving average for various lookbacks.
"""
universe = Mock()
universe.get_assets.return_value = ['EQ:SPY']
sma = SMASignal(start_dt, universe, lookbacks)
for price_idx in range(len(prices)):
sma.append('EQ:SPY', prices[price_idx])
for i, lookback in enumerate(lookbacks):
assert np.isclose(sma('EQ:SPY', lookback), expected[i])
|
{"hexsha": "bc7b7b6b22c09427a3dd303a90f49e353e5e1f4d", "size": 1058, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/signals/test_sma.py", "max_stars_repo_name": "calumrussell/qstrader", "max_stars_repo_head_hexsha": "826d3eeb63b95b9d8587f5e2152c030f2c57bbba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2220, "max_stars_repo_stars_event_min_datetime": "2015-12-05T21:28:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T01:59:38.000Z", "max_issues_repo_path": "tests/unit/signals/test_sma.py", "max_issues_repo_name": "calumrussell/qstrader", "max_issues_repo_head_hexsha": "826d3eeb63b95b9d8587f5e2152c030f2c57bbba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 322, "max_issues_repo_issues_event_min_datetime": "2016-03-14T22:46:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T02:26:28.000Z", "max_forks_repo_path": "tests/unit/signals/test_sma.py", "max_forks_repo_name": "calumrussell/qstrader", "max_forks_repo_head_hexsha": "826d3eeb63b95b9d8587f5e2152c030f2c57bbba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 872, "max_forks_repo_forks_event_min_datetime": "2016-01-10T07:47:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T20:11:54.000Z", "avg_line_length": 26.45, "max_line_length": 63, "alphanum_fraction": 0.6001890359, "include": true, "reason": "import numpy", "num_tokens": 314}
|
################################################################################
#
# config.py (c) Cameron Liang
# University of Chicago
# jwliang@oddjob.uchicago.edu
#
# Read in and define all of the parameters for voigt profile based on a
# config file; see ./scripts/bvp_write_config.py for producing config file.
# or example in ./data/example/
################################################################################
import numpy as np
import os
import re
import sys
import ntpath
from bayesvp.utilities import get_transitions_params, MyParser
class DefineParams:
"""
Read and define fitting parameters from
config file
Attributes:
-----------
lines: array_like
All lines containing in config file
spec_path: str
Full path to the spectrum file
chain_short_fname: str
Name of the output mcmc chain (with .npy extension)
self_bvp_test: bool
True if it is a test run
chain_fname: str
output_path: str
mcmc_outputpath: str
output path for the MCMC chain
data_product_path: str
parent directory for data output
data_product_path_files: str
output path for ascii files such as best fits and confidence
levels
data_product_path_plots: str
output path for corner plots, model comparison and etc
nwalkers: int
Number of walkers
nsteps: int
Number of steps for each walker
nthreads: int
Number of parallel threads
wave: array
Selected region of the input spectral data
flux: array
flux of the spectrum
error: array
uncertainty of flux of the input spectrum
wave_begins: array_like
Selected wavelength regions bounds
wave_ends: array_like
Selected wavelength regions bounds
vp_params: array_like
transitions_params_array: array_like
vp_params_type: array_like
Voigt profile model parameter types, i.e [logN, b, z]
vp_params_flags: array_like
indexed flags that indicate the parameter type
priors: array_like
Priors for three types of parameters (logN, b, z)
shape = (3,2)
n_component: int
Number of Vogit components defined for the model
lsf: array_like
Line spread function to be convolved with the model
cont_normalize: bool
True if user choose to include continuum fit
cont_nparams: int
Number of parameters from the polynomial continuum model
cont_prior: array_like
All continuum parameters are limited by +/- this value
"""
def __init__(self,config_fname):
self.config_fname = config_fname
self.config_basename = ntpath.basename(config_fname)
# Read and filter empty lines
all_lines = filter(None,(line.rstrip() for line in open(config_fname)))
# Remove commented lines
self.lines = []
for line in all_lines:
if not line.startswith('#') and not line.startswith('!'):
self.lines.append(line)
########################################################################
# Retrieve MCMC parameters from config file
# --------
# self.spec_path, self.chain_fname, self.nwalkers, self.nsteps,
# self.nthreads
########################################################################
# continuum model preset to false
self.cont_normalize = False
self.cont_nparams = 0
self.cont_prior = 1.0
self.self_bvp_test = False
# Paths and fname strings
for line in self.lines:
line = list(filter(None,line.split(' ')))
if 'spec_path' in line or 'input' in line or 'spectrum' in line:
if line [1] == 'test_path_to_spec':
self.spec_path = (os.path.dirname(os.path.abspath(__file__)) +
'/data/example')
self.self_bvp_test = True
else:
self.spec_path = line [1]
elif 'outpath' in line:
self.short_outpath = line[1]
elif 'output' in line or 'chain' in line:
self.chain_short_fname = line[1]
elif 'continuum' in line or 'contdegree' in line:
self.cont_normalize = True
self.cont_nparams = int(line[1]) + 1 # n_param = poly degree + 1 (offset)
elif 'cont_prior' in line or 'contprior' in line:
if self.cont_normalize and self.cont_nparams>0:
tmp_priors = [float(i) for i in line[1:]]
# reverse direction to match order of cont params
# {a_i} in polynomial a0*x^0 + a1*x^1 + ....
tmp_priors = tmp_priors[::-1]
if len(tmp_priors) == 1:
# all parameters share the same prior
self.cont_prior = np.ones(self.cont_nparams)*tmp_priors
elif len(tmp_priors) == self.cont_nparams:
# each have its unique prior
self.cont_prior = np.array(tmp_priors)
else:
sys.exit('Please enter only 1 continuum prior or match'
' the number of continuum parameters. Exiting program..')
else:
sys.exit('Continuum fit is not set or degree is less than 0.\n')
elif 'mcmc_params' in line or 'mcmc' in line:
self.nwalkers = int(line[1])
self.nsteps = int(line[2])
self.nthreads = int(line[3])
# Default
self.model_selection = 'bic'
self.mcmc_sampler = 'kombine'
# Change keys if defined in config
for key in line[3:]:
if key in ['kombine','emcee']:
self.mcmc_sampler = key
elif key in ['aic','bic','bf']:
self.model_selection = key
########################################################################
# Get the spectral data specified by the config file
# The spectrum file assumes three column of data with
# [wave,flux,error]
# ---------
# self.spec_short_fname, self.spec_fname
# self.wave, self.flux, self.error
########################################################################
for line in self.lines:
if re.search('%%',line):
spec_fname_line = line
spec_data_array = spec_fname_line.split(' ')
self.spec_short_fname = spec_data_array[1]
######print('-----------------------------------', self.spec_path.name, spec_data_array[1])
self.spec_fname = self.spec_path + '/' + spec_data_array[1]
# Select spectral range to fit
if len(spec_data_array[2:]) % 2 != 0:
sys.exit('There is an odd number of wavelengths entered in config file.\n Exiting program...')
else:
self.wave_begins = np.array(spec_data_array[2:][0::2]).astype(float)
self.wave_ends = np.array(spec_data_array[2:][1::2]).astype(float)
for i in range(len(self.wave_begins)):
if self.wave_begins[i] >= self.wave_ends[i]:
sys.exit('Starting wavelength cannot be greater or equal to ending wavelength: (%.3f, %.3f); exiting program...'
% (self.wave_begins[i] ,self.wave_ends[i]))
wave,flux,dflux = np.loadtxt(self.spec_fname,
unpack=True,usecols=[0,1,2])
# Select regions of interests
all_inds = []
for i in range(len(self.wave_begins)):
inds = np.where(np.logical_and(wave>=self.wave_begins[i], wave<=self.wave_ends[i]))[0]
all_inds.append(inds)
all_inds = np.hstack(np.asarray(all_inds))
wave = wave[all_inds]; flux = flux[all_inds]; dflux = dflux[all_inds]
# Remove NaN pixels in flux
inds = np.where((~np.isnan(flux)))
self.wave = wave[inds]; self.flux = flux[inds]; self.dflux = dflux[inds]
# Set negative pixels in flux and error
inds = np.where((self.flux < 0)); self.flux[inds] = 0
inds = np.where((self.dflux < 0)); self.dflux[inds] = 0
if len(self.wave) == 0 or len(self.flux) == 0 or len(self.dflux) == 0:
raise ValueError('No data within specified wavelength range.' \
'Please check config file and spectrum.')
########################################################################
# Get Voigt profile parameters of arbitary number of components
# specified in the config file.
# Uses './data/atom.dat' to read in atomic/transition data with format:
# [atom state rest_wavelength oscillator_strength damping_coeff mass_amu]
# Users can add additional row to the file for new atomic data
# --------
# self.vp_params, self.transitions_params_array, self.vp_params_flags,
# self.vp_params_type, self.n_component
########################################################################
# Lines in config file that contain the component parameters
# i.e atom, state, logN, b, z
component_lines = []
for line in self.lines:
if re.search('%',line):
component_lines.append(line)
component_lines = component_lines[1:]
logNs = []; bs = []; redshifts = []
transitions_params_array = []
for i in range(len(component_lines)):
line = component_lines[i]
#------> line = filter(None,line.split(' '))
line = list(filter(None,line.split(' ')))
atom = line[1]; state = line[2] # To obtain transition data
logNs.append(line[3])
bs.append(line[4])
redshifts.append(line[5])
if line[5][-1].isalpha():
self.redshift = line[5][:-1]
else:
self.redshift = line[5]
transitions_params_array.append([])
# Each component gets a set of all of the transitions data
for j in range(len(self.wave_begins)):
# each wavelength regions gets all of the transitions
try:
temp_params = get_transitions_params(atom,state,self.wave_begins[j],
self.wave_ends[j],float(self.redshift))
transitions_params_array[i].append(temp_params)
except ValueError: raise ValueError('Could not find any transitions of %s%s in wavelength range. ' % (atom,state) +
'Check redshift and wavelength range. Exiting program...')
# Shape = (n_component,n_regions,n_transitions,4)
self.transitions_params_array = np.asarray(transitions_params_array)
self.vp_params = np.transpose(np.array([logNs,bs,redshifts]))
self.n_component = len(component_lines)
# Define what kind of parameters to get walker initiazation ranges.
# and for fixing and freeing paramters.
# Note that this assumed the pattern of parameters.
# will update for continuum parameters.
vp_params_type = [None]*len(self.vp_params.flatten())
vp_params_type[::3] = ['logN'] * (len(vp_params_type[::3]))
vp_params_type[1::3] = ['b'] * (len(vp_params_type[1::3]))
vp_params_type[2::3] = ['z'] * (len(vp_params_type[2::3]))
flat_params = self.vp_params.flatten()
flags = np.zeros(len(flat_params))
letters = [None]*len(flat_params)
for i in range(len(flat_params)):
for j in range(len(flat_params[i])):
if flat_params[i][j].isalpha():
letters[i] = flat_params[i][j]
unique_letters = filter(None,list(set(letters)))
n_free_params_counter = 0
for i in range(len(letters)):
if letters[i] == None:
flags[i] = n_free_params_counter
n_free_params_counter += 1
for unique_letter in unique_letters:
inds = [i for i, x in enumerate(letters) if x == unique_letter]
if unique_letter.islower():
flags[inds] = n_free_params_counter
n_free_params_counter += 1
else:
for index in inds:
flags[index] = None
# Model uses these to construct sets of (logN, b, z) for each component
self.vp_params_type = np.array(vp_params_type)
self.vp_params_flags = np.array(flags)
self.n_params = n_free_params_counter
if self.cont_normalize:
self.n_params = self.n_params + self.cont_nparams
# Make directories for data products
if self.self_bvp_test:
# write to local direcotry if it is test to avoid permission issues in bayesvp library location
self.output_path = '.' + '/bvp_output'
else:
#self.output_path = self.spec_path + '/bvp_output_z' + str(self.redshift)
if hasattr(self,'short_outpath'):
self.output_path = self.spec_path + '/' + self.short_outpath
else:
self.output_path = self.spec_path + '/bvp_output'
self.mcmc_outputpath = self.output_path + '/chains'
self.data_product_path = self.output_path +'/data_products'
self.data_product_path_files = self.data_product_path + '/ascii'
self.data_product_path_plots = self.data_product_path + '/plots'
self.chain_fname = self.mcmc_outputpath + '/' + self.chain_short_fname
try:
os.makedirs(self.mcmc_outputpath)
except OSError:
if not os.path.isdir(self.mcmc_outputpath):
raise
try:
os.makedirs(self.data_product_path_files)
except OSError:
if not os.path.isdir(self.data_product_path_files):
raise
try:
os.makedirs(self.data_product_path_plots)
except OSError:
if not os.path.isdir(self.data_product_path_plots):
raise
########################################################################
# Determine the LSF by specifying LSF filename with
# 'database' directory under self.spec_path.
# Assumes LSF file contains only 1 column of data
# -----------
# lsf
########################################################################
# Check if LSF is specified in config file
defined_lsf = False
for line in self.lines:
if re.search('lsf',line) or re.search('LSF',line):
lsf_line = line.split(' ')[1:]
defined_lsf = True
if not os.path.isdir(self.spec_path + '/database'):
os.mkdir(self.spec_path + '/database')
sys.exit('Require LSF file to be in %s' % self.spec_path + '/database\n Exiting program...')
break
# Get the LSF function from directory 'database'
if defined_lsf:
if len(lsf_line) == len(self.wave_begins):
self.lsf = []
for lsf_fname in lsf_line:
# assume lsf file has one column
fname = self.spec_path + '/database/' + lsf_fname
self.lsf.append(np.loadtxt(fname))
#print(fname)
elif len(lsf_line) == 1:
for lsf_fname in lsf_line:
# assume lsf file has one column
fname = self.spec_path + '/database/' + lsf_fname
self.lsf = np.loadtxt(fname)
else:
sys.exit('Please check if number of LSF matches wavelength regions. Exiting program...')
else:
# Convolve with LSF = 1
self.lsf = np.ones(len(self.wave_begins))
#######################################################################
# Read priors and use them for walker initialization
#
# format in config file:
# logN min_logN max_logN
# b min_b max_b
# z center_z min_dv max_dv (range defined by range of velocity) [km/s]
# -----------
# self.priors
#######################################################################
self.priors = np.zeros((3,2))
for line in self.lines:
line = np.array(line.split(' '))
line = list(filter(None,line))
if 'logN' in line:
if len(line) != 3:
sys.exit('Error! In config file, format for logN prior:\n logN min_logN max_logN\nExiting program...')
self.priors[0] = [float(line[1]),float(line[2])]
if 'b' in line:
if len(line) != 3:
sys.exit('Error! In config file, format for b prior:\n z min_b max_b\nExiting program...')
self.priors[1] = [float(line[1]),float(line[2])]
if 'z' in line:
c = 299792.458 # speed of light [km/s]
if len(line) == 4:
center_z,min_dv,max_dv = float(line[1]),float(line[2]),float(line[3])
if min_dv == max_dv:
min_dv = -min_dv
min_z,max_z = center_z+min_dv/c,center_z+max_dv/c
elif len(line) == 3:
center_z,dv = float(line[1]),float(line[2])
min_z,max_z = center_z-dv/c,center_z+dv/c
else:
sys.exit('Error! In config file, format for z prior:\n z center_z |min_dv| |max_dv|\nor\n' +
' z center_z dv\nThe latter option will use +/- dv [km/s]')
self.priors[2] = [min_z,max_z]
def print_config_params(self):
# First copy the original config file to output path
# but replace the `test_path_to_spec` to actual output path
with open(self.output_path + '/' + self.config_basename,'w') as f_config:
# Paths and fname strings
for line in self.lines:
tmp_line = list(filter(None,line.split(' ')))
if 'spec_path' in tmp_line or 'input' in tmp_line or 'spectrum' in tmp_line:
if tmp_line[1] == 'test_path_to_spec':
cwd = os.getcwd()
print(cwd + self.output_path[1:])
f_config.write('spec_path %s\n' % (cwd + self.output_path[1:]))
else:
f_config.write('%s\n' % line)
# Also copy the spectrum to the output path in the case of a test
import shutil
shutil.copy(self.spec_fname,self.output_path)
f_logging = open(self.output_path + '/config.log','w')
f_logging.write('Config file: %s\n' % self.config_fname)
f_logging.write('Spectrum Path: %s\n' % self.spec_path)
f_logging.write('Spectrum name: %s\n' % self.spec_short_fname)
f_logging.write('Fitting %i components with transitions:\n' % self.n_component)
for i in range(len(self.transitions_params_array)):
for j in range(len(self.transitions_params_array[i])):
if not np.isnan(self.transitions_params_array[i][j]).any():
for k in range(len(self.transitions_params_array[i][j])):
rest_wavelength = self.transitions_params_array[i][j][k][1]
f_logging.write(' Transitions Wavelength: %.3f\n' % rest_wavelength)
else:
sys.exit('No transitions satisfy the wavelength regime for fitting;Check input wavelength boundaries')
f_logging.write('Selected data wavelegnth region:\n')
for i in range(len(self.wave_begins)):
f_logging.write(' [%.3f, %.3f]\n' % (self.wave_begins[i],self.wave_ends[i]))
f_logging.write('MCMC Sampler: %s\n' % self.mcmc_sampler)
f_logging.write('Model selection method (if needed): %s\n' % self.model_selection)
f_logging.write('Walkers,steps,threads : %i,%i,%i\n' % (self.nwalkers,self.nsteps,self.nthreads))
f_logging.write('Priors: ')
f_logging.write('logN: [min, max] = [%.3f, %.3f]\n' % (self.priors[0][0],self.priors[0][1]))
f_logging.write('b: [min, max] = [%.3f, %.3f]\n' % (self.priors[1][0],self.priors[1][1]))
f_logging.write('redshift: [min, max] = [%.5f, %.5f]\n' % (self.priors[2][0],self.priors[2][1]))
if self.cont_normalize:
f_logging.write('Continuum polynomial degree: %i\n' % (self.cont_nparams-1))
f_logging.write('Continuum priors with +/- a_i: ')
for i in range(len(self.cont_prior)):
f_logging.write('%f\t' % self.cont_prior[i])
f_logging.write('\n')
f_logging.close()
def main():
parser = MyParser()
parser.add_argument('config_fname',help="full path to config filename", nargs='?')
parser.add_argument("-t", "--test",help="test for reading config file",
action="store_true")
parser.add_argument("-v", "--verbose",help="print config summary to file ",
action="store_true")
if len(sys.argv)==1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
if args.test:
from bayesvp.utilities import get_bayesvp_Dir
path = get_bayesvp_Dir()
config_fname = path + '/data/example/config_OVI.dat'
config_params = DefineParams(config_fname)
config_params.print_config_params()
if args.config_fname:
if os.path.isfile(args.config_fname):
config_params = DefineParams(args.config_fname)
if args.verbose:
config_params.print_config_params()
else:
sys.exit('Config file does not exist:\n %s' % args.config_fname)
if __name__ == '__main__':
sys.exit(main() or 0)
|
{"hexsha": "d7da858356db776daeab06cf2abc56095e22a5c4", "size": 22518, "ext": "py", "lang": "Python", "max_stars_repo_path": "bayesvp/config.py", "max_stars_repo_name": "ais97/bayesvp", "max_stars_repo_head_hexsha": "dedf38dd7236f040301a84e5578f7202a2da07aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bayesvp/config.py", "max_issues_repo_name": "ais97/bayesvp", "max_issues_repo_head_hexsha": "dedf38dd7236f040301a84e5578f7202a2da07aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bayesvp/config.py", "max_forks_repo_name": "ais97/bayesvp", "max_forks_repo_head_hexsha": "dedf38dd7236f040301a84e5578f7202a2da07aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6477272727, "max_line_length": 133, "alphanum_fraction": 0.5401900702, "include": true, "reason": "import numpy", "num_tokens": 4928}
|
import numpy as np
import json
from ageta import *
vae,encoder,decoder=getvaeq()
def clean(q):
if type(q) is dict:
for key in q.keys():
q[key]=clean(q[key])
return q
if type(q) is list:
for i in range(len(q)):
q[i]=clean(q[i])
return q
return str(q)
def traf(q):
ll=q.layers
ret=[]
for l in ll:
c=l.get_config()
c.update({"type":str(type(l))})
c=clean(c)
ret.append(c)
return ret
with open("model.json","w") as f:
f.write(json.dumps({"encoder":traf(encoder),"decoder":traf(decoder)},indent=2))
|
{"hexsha": "a820386e454abe1ca989a83cdfd00c96f9d64968", "size": 575, "ext": "py", "lang": "Python", "max_stars_repo_path": "not_so_weird/oneoff/exportmodel.py", "max_stars_repo_name": "psorus/anogen", "max_stars_repo_head_hexsha": "86afc22718aded00bbc05e4582fa0a9b6aa3ab25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "not_so_weird/oneoff/exportmodel.py", "max_issues_repo_name": "psorus/anogen", "max_issues_repo_head_hexsha": "86afc22718aded00bbc05e4582fa0a9b6aa3ab25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "not_so_weird/oneoff/exportmodel.py", "max_forks_repo_name": "psorus/anogen", "max_forks_repo_head_hexsha": "86afc22718aded00bbc05e4582fa0a9b6aa3ab25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.6904761905, "max_line_length": 81, "alphanum_fraction": 0.5860869565, "include": true, "reason": "import numpy", "num_tokens": 174}
|
import logging
import time
import numpy as np
import matplotlib.pyplot as pt
from keysight_fpga.sd1.fpga_utils import print_fpga_info
from keysight_fpga.sd1.dig_iq import load_iq_image
from keysight_fpga.qcodes.M3202A_fpga import M3202A_fpga
from core_tools.drivers.M3102A import SD_DIG, OPERATION_MODES
from pulse_lib.base_pulse import pulselib
from core_tools.HVI2.hvi2_schedule_loader import Hvi2ScheduleLoader
import qcodes
# close objects still active since previous run (IPython)
try:
oldLoader.close_all()
except: pass
oldLoader = Hvi2ScheduleLoader
try:
qcodes.Instrument.close_all()
except: pass
def create_pulse_lib(awgs):
pulse = pulselib(backend='M3202A')
# add to pulse_lib
for i, awg in enumerate(awgs):
pulse.add_awgs(awg.name, awg)
# define channels
for ch in range(1,5):
pulse.define_channel(f'{awg.name}_{ch}', awg.name, ch)
# pulse.define_marker(f'{awg.name}_T', awg.name, 0)
pulse.finish_init()
return pulse
awg_slots = [3]
dig_slot = 6
full_scale = 2.0
t_wave = 900_000
t_pulse = 8000
pulse_duration = 100
awg_channel_los = [('AWG3',1,0), ('AWG3',1,1), ('AWG3',3,0), ('AWG3',3,1)]
awg_lo_amps = [600, 200]
awg_lo_freq = [60e6, 190e6]
switch_los = False
dig_channel_modes = {1:1, 2:2, 3:2}
t_measure = 1000
t_average = 10
p2decim = 0
lo_f = [0, 0, 60e6, 190e6, 0]
n_rep = 5
awgs = []
for i, slot in enumerate(awg_slots):
awg = M3202A_fpga(f'AWG{slot}', 1, slot)
awg.set_digital_filter_mode(0)
awgs.append(awg)
dig = SD_DIG('DIG1', 1, dig_slot)
load_iq_image(dig.SD_AIN)
print_fpga_info(dig.SD_AIN)
dig.set_operating_mode(OPERATION_MODES.HVI_TRG)
for awg in awgs:
for awg_name, channel, lo in awg_channel_los:
if awg_name == awg.name:
awg.config_lo(channel, lo, not switch_los, awg_lo_freq[lo], awg_lo_amps[lo])
awg.set_lo_mode(channel, True)
## add to pulse lib.
p = create_pulse_lib(awgs)
schedule = Hvi2ScheduleLoader(p, "SingleShot", dig, switch_los=switch_los)
## create waveforms
seg = p.mk_segment()
for awg in awgs:
for ch in [1,2,3,4]:
channel = getattr(seg, f'{awg.name}_{ch}')
channel.wait(t_wave)
channel.add_block(t_pulse, t_pulse+pulse_duration, 800)
seg.add_HVI_marker('dig_trigger_1', t_off=t_pulse-100)
seg.add_HVI_marker('awg_los_on_1', t_off=t_pulse-100)
seg.add_HVI_marker('awg_los_off_1', t_off=t_pulse-100+t_measure)
## create sequencer
sequencer = p.mk_sequence([seg])
sequencer.set_hw_schedule(schedule)
sequencer.n_rep = n_rep
for ch, mode in dig_channel_modes.items():
dig.set_lo(ch, lo_f[ch], 0, input_channel=1)
#config_fpga_debug_log(dig.SD_AIN,
# #change_mask = 0x9F00_8585,
# enable_mask=0xC000_0000,
## enable_mask=0xC038_0505,
## capture_start_mask=0x8800_4141, capture_duration=1
# )
sequencer.upload(index=[0])
for ch, mode in dig_channel_modes.items():
dig.set_channel_acquisition_mode(ch, mode)
dig.set_channel_properties(ch, full_scale)
dig.set_daq_settings(ch, n_rep, t_measure, downsampled_rate=1e9/t_average, power2decimation=p2decim)
sequencer.play(index=[0])
data = dig.measure.get_data()
for awg in awgs:
for awg_name, channel, los in awg_channel_los:
if awg.name == awg_name:
awg.set_lo_mode(channel, False)
#print_fpga_log(dig.SD_AIN)
#for awg in awgs:
# print(f'AWG: {awg.name}')
# print_fpga_log(awg.awg, clock200=True)
dig_data = [None]*4
for ch in dig_channel_modes:
c = ch-1
dig_data[c] = data[c].flatten()
print(f'ch{ch}: {len(dig_data[c])}')
### plots
#colors = ['k', 'b','r', 'c', 'y']
#colors = ['k', 'tab:blue', 'k', 'yellow', 'tomato']
colors = ['k', 'tab:blue', 'tab:orange', 'tab:green', 'tab:red']
# plot direct data
if 0 in dig_channel_modes.values():
pt.figure(5)
pt.clf()
for ch, mode in dig_channel_modes.items():
if mode == 0:
pt.figure(ch)
c = ch-1
t = (np.arange(len(dig_data[c])) + 0.5) * 2
pt.plot(t, dig_data[c])
pt.figure(5)
pt.plot(t, dig_data[c], '-', ms=4, label=f'direct 500 MSa/a', color=colors[ch])
if 1 in dig_channel_modes.values():
pt.figure(6)
pt.clf()
# plot averages
for ch, mode in dig_channel_modes.items():
if mode == 1:
c = ch-1
t = (np.arange(len(dig_data[c])) + 0.5) * t_average
pt.figure(ch)
pt.plot(t, dig_data[c], '-', label=f'p2d={p2decim}, {1000/t_average} MSa/s')
# pt.ylim(-0.8, 0.8)
pt.legend()
pt.figure(6)
pt.plot(t, dig_data[c], '-', ms=4, color=colors[ch],
label=f'p2d={p2decim}, {1000/t_average} MSa/s')
pt.legend()
# pt.ylim(-0.8, 0.8)
if 2 in dig_channel_modes.values() or 3 in dig_channel_modes.values():
## plot IQ
for ch, mode in dig_channel_modes.items():
if mode in [2,3]:
c = ch-1
t = (np.arange(len(dig_data[c])) + 0.5) * t_average
pt.figure(20)
pt.plot(t, dig_data[c].real, label=f'ch{ch} I')
pt.legend()
pt.figure(10+ch)
pt.plot(t, dig_data[c].real, label=f'ch{ch} I')
if mode == 2:
pt.plot(t, dig_data[c].imag, label=f'ch{ch} Q')
pt.legend()
pt.figure(30)
pt.plot(t, dig_data[c].imag, label=f'ch{ch} Q')
pt.legend()
pt.figure(9)
phase = np.angle(dig_data[c], deg=True)
jitter = (phase - np.average(phase))/360/lo_f[ch] * 1e12
pt.plot(t, jitter, label=f'ch{ch}')
pt.ylabel('[ps]')
pt.title('Jitter')
pt.figure(7)
pt.plot(t, np.abs(dig_data[c]),
label=f'ch{ch} p2d={p2decim}, {1000/t_average} MSa/s')
pt.legend()
pt.title('Amplitude')
pt.figure(8)
pt.plot(t, np.angle(dig_data[c], deg=True),
label=f'ch{ch} p2d={p2decim}, {1000/t_average} MSa/s')
pt.legend()
pt.title('Phase')
schedule.close()
for awg in awgs:
awg.close()
dig.close()
|
{"hexsha": "6789350f3425b15883204101c0fd0a3f3895f015", "size": 6307, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/hvi2/test_single_shot_rf.py", "max_stars_repo_name": "peendebak/core_tools", "max_stars_repo_head_hexsha": "2e43edf0bbc1d7ceb7042559db499535e8f6a076", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-11T09:24:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T09:24:35.000Z", "max_issues_repo_path": "examples/hvi2/test_single_shot_rf.py", "max_issues_repo_name": "peendebak/core_tools", "max_issues_repo_head_hexsha": "2e43edf0bbc1d7ceb7042559db499535e8f6a076", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/hvi2/test_single_shot_rf.py", "max_forks_repo_name": "peendebak/core_tools", "max_forks_repo_head_hexsha": "2e43edf0bbc1d7ceb7042559db499535e8f6a076", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-06T14:31:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-07T13:57:19.000Z", "avg_line_length": 28.0311111111, "max_line_length": 104, "alphanum_fraction": 0.6050420168, "include": true, "reason": "import numpy", "num_tokens": 1940}
|
module Splines
using EllipsisNotation
include("Utils.jl")
include("Polynomials.jl")
include("Visuals.jl")
import .Polynomials: bernstein, N_basis
import .Visuals: plot_curve, plot_nodes, plot_surface
import .Utils: get_curve_parameters, ∑, dimensions, nnodes
export plot_curve, plot_nodes, plot_surface
export Bezier, BSpline, AbstractSpline, AbstractBezier
export ⊗, ∑
# ---------------------------------------------------------------------------- #
# Abstract types #
# ---------------------------------------------------------------------------- #
abstract type AbstractSpline end
abstract type AbstractBezier <: AbstractSpline end
abstract type AbstractBSpline <: AbstractSpline end
# ---------------------------------- Methods --------------------------------- #
function Base.show(io::IO, s::AbstractSpline)
print(io, "Spline (nodes: $(s.η), N: $(s.N))")
end
"""
Spline(t, v, ...)
Evaluates an N dimensional spline (with N > 1) at a set of N paramters t, v, ...
values with each paramter `t ∈ [0, 1]`
"""
function (bez::AbstractSpline)(params...)
if length(params) != bez.N
error("Number of parameters doesnt match the dimensionality of the spline surface")
end
sum((n)->bez.inner_fn[n](params[n]), 1:length(params))
end
# ---------------------------------------------------------------------------- #
# BSPLINES #
# ---------------------------------------------------------------------------- #
struct BSpline <: AbstractBSpline
nodes::AbstractArray # coordinates of nodes (d x N (x M ...))
coordinates::AbstractArray # coordinates of points along the spline
weights::AbstractArray # weights, shape N x M ...
knots::AbstractArray
degree::Int
η # shape of the nodes -1 | degree (linera, quadratic, cubic..)
d::Int # dimensionality of the Euclidean space
N::Int # spline dimensionality: 1 - curve, 2 - surface...
inner_fn # a collection of lower dimensionality bezier curves
end
# --------------------------- knots initialization --------------------------- #
"""
uniform(n, d)
Defines d+n+1 uniform knots ∈[0,1] for b-spline interpolation.
For i ∈ 0:n+d+1:
if i ≤ d: k=0
if d+1 ≤ i ≤ n: k=(i-d)/(n+1-d)
else: k = 1
See: https://www.geometrictools.com/Documentation/BSplineCurveLeastSquaresFit.pdf
"""
function uniform(n::Int, d::Int)::AbstractArray
if d > n
@warn "When using B-splines, the degree should be < the number of control nodes"
end
knots = zeros(n+d+1+1) # second 1 is because start from 0
for i in 0:n+d+1
if i ≤ d
knots[i+1] = 0
elseif (d+1) ≤ i ≤ n
knots[i+1] = (i-d)/(n+1-d)
else
knots[i+1] = 1
end
end
return knots
end
"""
periodic(n, d)
Defines d+n+1 periodic knots ∈[0,1] for b-spline interpolation.
See: https://www.geometrictools.com/Documentation/BSplineCurveLeastSquaresFit.pdf
"""
function periodic(n::Int, d::Int)::AbstractArray
if d > n
@warn "When using B-splines, the degree should be < the number of control nodes"
end
map((i)->(i-d)/(n+1-d), 0:n+d+1)
end
# ------------------------------- Constructors ------------------------------- #
"""
BSpline(nodes::AbstractArray; knots_type::Symbol=:uniform, δt=0.025)::BSpline
Constructor for a (non rational) B-Spline curve (N=1).
"""
function BSpline(nodes::AbstractArray, degree::Int; knots_type::Symbol=:uniform, δt=0.025)::BSpline
weights = ones(size(nodes)...)
return BSpline(nodes, weights, degree; knots_type= knots_type, δt=δt)
end
"""
BSpline(nodes::AbstractArray, weights::AbstractArray; knots_type::Symbol=:uniform δt=0.025)::BSpline
Constructor for a rational B-Spline curve (N=1).
"""
function BSpline(nodes::AbstractArray, weights::AbstractArray, degree::Int; knots_type::Symbol=:uniform, δt=0.025)::BSpline
# get knots
η = size(nodes, 2) - 1
knots = eval(:($knots_type($η, $degree)))
return BSpline(nodes, weights, knots, degree; δt= δt)
end
"""
BSpline(nodes::AbstractArray, weights::AbstractArray, knots::AbstractArray; knots_type::Symbol=:uniform δt=0.025)::BSpline
Constructor for a rational B Spline given a set of knot
"""
function BSpline(nodes::AbstractArray, weights::AbstractArray, knots::AbstractArray, degree::Int; δt=0.025)::BSpline
# get params
τ, N, η, d = get_curve_parameters(nodes, δt)
# compute coordinates
numerator(i) = N_basis(τ; k=knots, i=i, d=degree)' .* nodes[:, i+1] .* weights[i+1]
denominator(i) = N_basis(τ; k=knots, i=i, d=degree)' .* weights[i+1]
coordinates = ∑(numerator, 0:η) ./ ∑(denominator, 0:η)
return BSpline(
nodes, coordinates, weights, knots, degree, η, d, N, nothing
)
end
"""
BSpline(t)
Function to evaluate a B Spline curve at a parameter `t ∈ [0, 1]`
"""
function (bspline::AbstractBSpline)(t)
numerator(i) = N_basis(τ; k=bspline.knots, i=i, d=bspline.degree)' .* bspline.nodes[:, i+1] .* bspline.weights[i+1]
denominator(i) = N_basis(τ; k=bspline.knots, i=i, d=bspline.degree)' .* bspline.weights[i+1]
∑(numerator, 0:bspline.η) ./ ∑(denominator, 0:bspline.η)
end
# ---------------------------------------------------------------------------- #
# BEZIER #
# ---------------------------------------------------------------------------- #
"""
Bezier
Struct of type Bezier <: AbstractBezier.
Contains all information about an N-dimensional Bezier surface.
For `deg=1` we have a Bezier curve and:
`nodes`: `dxN` array of nodes coordinates
`η`: dimensionality of the nodes (`N-1`)
`coordinates`: d x K points with the coordinates of the Bezier curve.
for `deg>1` the type represents `deg` dimensional Bezier surfaces
"""
struct Bezier <: AbstractBezier
nodes::AbstractArray # coordinates of nodes (d x N (x M ...))
coordinates::AbstractArray # coordinates of points along the spline
weights::AbstractArray # weights, shape N x M ...
η # shape of the nodes -1 | degree (linera, quadratic, cubic..)
d::Int # dimensionality of the Euclidean space
N::Int # spline dimensionality: 1 - curve, 2 - surface...
inner_fn # a collection of lower dimensionality bezier curves
end
# ------------------------------- constructors ------------------------------- #
"""
Bezier(nodes; δt=0.05)
Constructor for a degree 1 (`n=1`) `Bezier` curve defined by a set of nodes
"""
function Bezier(nodes::AbstractArray; δt=0.025)::Bezier
weights = ones(size(nodes)...)
return Bezier(nodes, weights; δt=δt)
end
"""
Bezier(nodes::AbstractArray, weights::AbstractArray; δt = 0.025)
Constructor for a degree 1 rational bezier.
"""
function Bezier(nodes::AbstractArray, weights::AbstractArray; δt = 0.025)::Bezier
τ, N, η, d = get_curve_parameters(nodes, δt)
numerator(i) = bernstein(τ; i=i, n=η)' .* nodes[:, i+1] .* weights[i+1]
denominator(i) = bernstein(τ; i=i, n=η)' .* weights[i+1]
coordinates = ∑(numerator, 0:η) ./ ∑(denominator, 0:η)
return Bezier(
nodes, coordinates, weights, η, d, N, nothing
)
end
# ---------------------------- function evaluation --------------------------- #∂
"""
Bezier(t)
Evaluate a `Bezier` curve (N=1) at parameter value `t ∈ [0,1]`
"""
function (bez::AbstractBezier)(t)
numerator(i) = bernstein(t; i=i, n=bez.η)' .* bez.nodes[:, i+1] .* bez.weights[i+1]
denominator(i) = bernstein(t; i=i, n=bez.η)' .* bez.weights[i+1]
∑(numerator, 0:bez.η) ./ ∑(denominator, 0:bez.η)
end
# ---------------------------------------------------------------------------- #
# Tensor Product #
# ---------------------------------------------------------------------------- #
"""
×(x::AbstractArray, y::AbstractArray)
Cartesian product between two arrays, without the 3 Euclidean coordinates.
Given a N x M... array `x` and a P x Q array `y` it returns an array of shape
N x M... x P x Q... with the product of the two arrays.
"""
function ×(x::AbstractArray, y::AbstractArray)::AbstractArray
if ndims(x) == ndims(y) == 1
return x .* y'
else
# initialize an array of the appropriate shape
N = zeros(size(x)..., size(y)...)
for Ix in CartesianIndices(x), Iy in CartesianIndices(y)
N[Ix, Iy] = x[Ix] .+ y[Iy]
end
end
return N
end
"""
⊗(x::AbstractArray, y::AbstractArray)
Tensor product between two arrays.
Given x as a d x N x M... and y as a d x P x Q...
it returns an array of shape d x N x M .. x P x Q x ...
with the product of the two input arrays.
This is done by iterating over the dimension d using Einstein summatin.
Then each N x M... and P x Q... array is repeated/permuted to
to create two N x M... x P x Q... arrays which are then summed.
"""
function ⊗(x::AbstractArray, y::AbstractArray)::AbstractArray
if ndims(x) == ndims(y) == 1
return x .* y'
else
# initialize an array of the appropriate shape
N = zeros(3, dimensions(x) ..., dimensions(y)...)
for Ix in CartesianIndices(x[1, ..]), Iy in CartesianIndices(y[1, ..])
N[:, Ix, Iy] = x[:, Ix] .+ y[:, Iy]
end
end
return N
end
"""
⊗(b1::AbstractBezier, b2::AbstractBezier)::AbstractBezier
Tensor product of two beziers
"""
function ⊗(b1::T, b2::T) where T <: AbstractBezier
# get nodes
nodes = b1.nodes ⊗ b2.nodes
n, m = nnodes(nodes)
# get weights
weights = b1.weights ⊗ b2.weights
# get coordinates
coordinates = b1.coordinates ⊗ b2.coordinates
# concatenate inner functions
β1 = b1.inner_fn == nothing ? [b1] : b1.inner_fn
β2 = b2.inner_fn == nothing ? [b2] : b2.inner_fn
return T(nodes, coordinates, weights, (n, m), b1.d, b1.N+b2.N, hcat(β1, β2))
end
"""
⊗(b1::AbstractBSpline, b2::AbstractBSpline)::AbstractBSpline
Tensor product of two beziers
"""
function ⊗(b1::T, b2::T) where T <: AbstractBSpline
if b1.degree != b2.degree
@warn "BSpline ⊗ BSpline, '⊗' is only defined for B Splines with the same degree"
end
# get nodes
nodes = b1.nodes ⊗ b2.nodes
n, m = nnodes(nodes)
# get knots
knots = b1.knots × b2.knots
# get weights
weights = b1.weights ⊗ b2.weights
# get coordinates
coordinates = b1.coordinates ⊗ b2.coordinates
# concatenate inner functions
β1 = b1.inner_fn == nothing ? [b1] : b1.inner_fn
β2 = b2.inner_fn == nothing ? [b2] : b2.inner_fn
return T(nodes, coordinates, weights, knots, b1.degree, (n, m), b1.d, b1.N+b2.N, hcat(β1, β2))
end
end
|
{"hexsha": "f88b8af3c568470452293d6c2348dd1270a4d97c", "size": 12160, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Splines/src/Splines.jl", "max_stars_repo_name": "FedeClaudi/Splines", "max_stars_repo_head_hexsha": "a475aa4e423b4efe71363a5833a1709a7d59b6ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-02T16:55:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T16:55:13.000Z", "max_issues_repo_path": "Splines/src/Splines.jl", "max_issues_repo_name": "FedeClaudi/Splines", "max_issues_repo_head_hexsha": "a475aa4e423b4efe71363a5833a1709a7d59b6ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Splines/src/Splines.jl", "max_forks_repo_name": "FedeClaudi/Splines", "max_forks_repo_head_hexsha": "a475aa4e423b4efe71363a5833a1709a7d59b6ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6598240469, "max_line_length": 130, "alphanum_fraction": 0.5106085526, "num_tokens": 3137}
|
### A Pluto.jl notebook ###
# v0.12.21
using Markdown
using InteractiveUtils
# ╔═╡ b6b1b198-7bd8-11eb-10f5-a98dd0a59fcf
begin
cd("..")
using Pkg
Pkg.activate(".")
end
# ╔═╡ 594227e6-7bd9-11eb-3ffb-ab8e6e7b4d95
begin
using ColabINAOE2021
using PyPlot
using SparseArrays
using LinearAlgebra
end
# ╔═╡ 2141105e-7bda-11eb-3519-1d5cff10d945
ion()
# ╔═╡ 6310aeb4-7bd9-11eb-0bd4-81a074081964
# Global Constants
begin
k = 1
N = 32
end;
# ╔═╡ 81e6e088-7bd9-11eb-17a6-03ef56dc026b
# Initial amplitude vector constructors
pint(q) = sparsevec(Dict(q + 1 => 1), N + 1); # Single amplitude point at 0 ≤ q ≤ N
# ╔═╡ e3ceb6d0-7bda-11eb-115f-4584602f29df
# Initial amplitude vector
q0 = normalize(pint(8) + pint(24));
# ╔═╡ 203134d6-7bdb-11eb-2df1-374c190cc45b
t_list = range(0, stop = 61, length = 200);
# ╔═╡ b2181d84-7fcf-11eb-2400-23cdf5b8fff7
md"### 1. Circular array"
# ╔═╡ b49a93f8-806b-11eb-2dc1-3f8e020d0f8c
evolc = ColabINAOE2021.Qc(1, t_list, Array(q0));
# ╔═╡ e30513a6-7bdb-11eb-0e31-cd6f16283698
begin
fig_1 = figure(figsize = (10,5))
ax_1 = gca()
ax_1.imshow(evolc)
ax_1.set_xlabel("Evolution time: t")
ax_1.set_ylabel("Element index: n")
ax_1.set_aspect("2")
ax_1.set_xticks([0,33,66,99,132,165,200])
ax_1.set_xticklabels([0,10,20,30,40,50,60])
pcm1 = ax_1.get_children()[10]
cb1 = colorbar(pcm1,orientation="vertical",shrink=0.7,aspect=35,fraction=0.015)
tight_layout()
#savefig("notebooks/circ.png",dpi=150,transparent=true)
end
# ╔═╡ 07fccfdc-7bdc-11eb-1a00-a345a06f8517
sum(abs2.(evolc))
# ╔═╡ ac233d02-7fcf-11eb-1640-c172d67a0cfc
md"### 2. Lineal array"
# ╔═╡ cbc567c2-8095-11eb-0996-05753de896af
evoll = ColabINAOE2021.Ql(1, t_list, Array(q0));
# ╔═╡ fb886f34-8095-11eb-0b4f-e58f8dcb8964
begin
fig_2 = figure(figsize = (10,5))
ax_2 = gca()
ax_2.imshow(evoll)
ax_2.set_xlabel("Evolution time: t")
ax_2.set_ylabel("Element index: n")
ax_2.set_aspect("2")
ax_2.set_xticks([0,33,66,99,132,165,200])
ax_2.set_xticklabels([0,10,20,30,40,50,60])
pcm2 = ax_2.get_children()[10]
cb2 = colorbar(pcm2,orientation="vertical",shrink=0.7,aspect=35,fraction=0.015)
tight_layout()
#savefig("notebooks/lin.png",dpi=150,transparent=true)
end
# ╔═╡ 64040048-8096-11eb-2c6f-ed400b9dbf42
md"### 3. Kravchuk array "
# ╔═╡ 6ec1d92e-8096-11eb-3980-7107878eecb7
evolk = ColabINAOE2021.Qk(t_list, Array(q0));
# ╔═╡ 7f1a6296-8096-11eb-12b1-a14e3a7eb885
begin
fig_3 = figure(figsize = (10,5))
ax_3 = gca()
ax_3.imshow(evolk)
ax_3.set_xlabel("Evolution time: t")
ax_3.set_ylabel("Element index: n")
ax_3.set_aspect("2")
ax_3.set_xticks([0,33,66,99,132,165,200])
ax_3.set_xticklabels([0,10,20,30,40,50,60])
pcm3 = ax_2.get_children()[10]
cb3 = colorbar(pcm3,orientation="vertical",shrink=0.7,aspect=35,fraction=0.015)
tight_layout()
#savefig("notebooks/krav.png",dpi=150,transparent=true)
end
# ╔═╡ Cell order:
# ╠═b6b1b198-7bd8-11eb-10f5-a98dd0a59fcf
# ╠═594227e6-7bd9-11eb-3ffb-ab8e6e7b4d95
# ╠═2141105e-7bda-11eb-3519-1d5cff10d945
# ╠═6310aeb4-7bd9-11eb-0bd4-81a074081964
# ╠═81e6e088-7bd9-11eb-17a6-03ef56dc026b
# ╠═e3ceb6d0-7bda-11eb-115f-4584602f29df
# ╠═203134d6-7bdb-11eb-2df1-374c190cc45b
# ╟─b2181d84-7fcf-11eb-2400-23cdf5b8fff7
# ╠═b49a93f8-806b-11eb-2dc1-3f8e020d0f8c
# ╠═e30513a6-7bdb-11eb-0e31-cd6f16283698
# ╠═07fccfdc-7bdc-11eb-1a00-a345a06f8517
# ╟─ac233d02-7fcf-11eb-1640-c172d67a0cfc
# ╠═cbc567c2-8095-11eb-0996-05753de896af
# ╠═fb886f34-8095-11eb-0b4f-e58f8dcb8964
# ╟─64040048-8096-11eb-2c6f-ed400b9dbf42
# ╠═6ec1d92e-8096-11eb-3980-7107878eecb7
# ╠═7f1a6296-8096-11eb-12b1-a14e3a7eb885
|
{"hexsha": "53cb8a0c02cf08f4c49f99359bd61aa1eaf3bea4", "size": 3554, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "notebooks/MArrays_Pluto.jl", "max_stars_repo_name": "rurz/ColabINAOE2021", "max_stars_repo_head_hexsha": "539bf6ea662a95706734958663dc5bca278d5298", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/MArrays_Pluto.jl", "max_issues_repo_name": "rurz/ColabINAOE2021", "max_issues_repo_head_hexsha": "539bf6ea662a95706734958663dc5bca278d5298", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/MArrays_Pluto.jl", "max_forks_repo_name": "rurz/ColabINAOE2021", "max_forks_repo_head_hexsha": "539bf6ea662a95706734958663dc5bca278d5298", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7536231884, "max_line_length": 83, "alphanum_fraction": 0.7228474958, "num_tokens": 1764}
|
module Functional
using LinearAlgebra: Matrix, zero, similar
using LinearAlgebra, Printf
# using Roots
# using Optim
using Quadmath
# using ProfileView
# using InteractiveUtils
using ..Discrete
# const Float = Float64
# const Float = BigFloat
const Float = Float128
include("kernel.jl")
include("findmax.jl")
# using Plots
# function plotResidual(basis, proj, gmin, gmax, candidate=nothing, residual=nothing)
# ω = LinRange(gmin, gmax, 1000)
# y = [Residual(basis, proj, w) for w in ω]
# p = plot(ω, y, xlims=(gmin, gmax))
# if isnothing(candidate) == false
# plot!(p, candidate, residual, seriestype=:scatter)
# end
# display(p)
# readline()
# end
mutable struct Basis
Λ::Float
rtol::Float
N::Int # number of basis
grid::Vector{Float} # grid for the basis
residual::Vector{Float} # achieved error by each basis
Q::Matrix{Float} # K = Q*R
proj::Matrix{Float} # the overlap of basis functions <K(g_i), K(g_j)>
candidate::Vector{Float}
candidateResidual::Vector{Float}
function Basis(Λ, rtol)
_Q = Matrix{Float}(undef, (0, 0))
return new(Λ, rtol, 0, [], [], _Q, similar(_Q), [], [])
end
end
function addBasis!(basis, proj, g0::Float)
basis.N += 1
if basis.N == 1
idx = 1
basis.grid = [g0,]
basis.Q = zeros(Float, (basis.N, basis.N))
basis.Q[1, 1] = 1 / sqrt(proj(basis.Λ, g0, g0))
basis.proj = projKernel(basis, proj)
else
idxList = findall(x -> x > g0, basis.grid)
# if ω is larger than any existing freqencies, then idx is an empty list
idx = length(idxList) == 0 ? basis.N : idxList[1] # the index to insert the new frequency
insert!(basis.grid, idx, g0)
basis.proj = projKernel(basis, proj)
_Q = copy(basis.Q)
basis.Q = zeros(Float, (basis.N, basis.N))
basis.Q[1:idx-1, 1:idx-1] = _Q[1:idx-1, 1:idx-1]
basis.Q[1:idx-1, idx+1:end] = _Q[1:idx-1, idx:end]
basis.Q[idx+1:end, 1:idx-1] = _Q[idx:end, 1:idx-1]
basis.Q[idx+1:end, idx+1:end] = _Q[idx:end, idx:end]
# println(maximum(abs.(GramSchmidt(basis, idx, g0) .- mGramSchmidt(basis, idx, g0))))
basis.Q[idx, :] = mGramSchmidt(basis, idx, g0)
end
scanResidual!(basis, proj, g0, idx)
insert!(basis.residual, idx, maximum(basis.candidateResidual)) # record error after the new grid is added
return idx
end
function scanResidual!(basis, proj, g0, idx)
grids = copy(basis.grid)
if basis.grid[1] > Float(0)
insert!(grids, 1, Float(0))
end
if basis.grid[end] < basis.Λ
append!(grids, basis.Λ)
end
resize!(basis.candidate, length(grids) - 1)
resize!(basis.candidateResidual, length(grids) - 1)
# println(g0, " and ", idx)
# println(grids)
for i = 1:length(grids)-1 # because of the separation of scales, the grids far away from idx is rarely affected
g = findCandidate(basis, proj, grids[i], grids[i+1])
basis.candidate[i] = g
basis.candidateResidual[i] = Residual(basis, proj, g)
end
end
function printCandidate(basis, idx)
lower = (idx == 1) ? 0 : basis.grid[idx-1]
upper = (idx == basis.N) ? basis.Λ : basis.grid[idx+1]
@printf("%3i : ω=%24.8f ∈ (%24.8f, %24.8f) -> error=%24.16g\n", basis.N, basis.grid[idx], lower, upper, basis.residual[idx])
end
function QR(Λ, rtol, proj, g0; N = nothing)
basis = Basis(Λ, rtol)
# println(g0)
for g in g0
idx = addBasis!(basis, proj, Float(g))
# @printf("%3i : ω=%24.8f ∈ (%24.8f, %24.8f) -> error=%24.16g\n", 1, g, 0, Λ, basis.residual[idx])
printCandidate(basis, idx)
end
# @code_warntype Residual(basis, proj, Float(1.0))
# exit(0)
maxResidual, ωi = findmax(basis.candidateResidual)
# plotResidual(basis, proj, Float(0), Float(100), basis.candidate, basis.candidateResidual)
while isnothing(N) ? maxResidual > rtol / 10 : basis.N < N
newω = basis.candidate[ωi]
idx = addBasis!(basis, proj, newω)
printCandidate(basis, idx)
# println(length(basis.grid))
# println(idx)
# lower = (idx == 1) ? 0 : basis.grid[idx - 1]
# upper = (idx == basis.N) ? Λ : basis.grid[idx + 1]
# @printf("%3i : ω=%24.8f ∈ (%24.8f, %24.8f) -> error=%24.16g\n", basis.N, newω, lower, upper, basis.residual[idx])
# println("$(length(freq)) basis: ω=$(Float64(newω)) between ($(Float64(freq[idx - 1])), $(Float64(freq[idx + 1])))")
# plotResidual(basis, proj, Float(0), Float(100), candidate, residual)
maxResidual, ωi = findmax(basis.candidateResidual)
end
testOrthgonal(basis)
# @printf("residual = %.10e, Fnorm/F0 = %.10e\n", residual, residualF(freq, Q, Λ))
@printf("residual = %.10e\n", maximum(basis.candidateResidual))
# plotResidual(basis, proj, Float(0), Float(100), basis.candidate, basis.candidateResidual)
return basis
end
"""
q1=sum_j c_j K_j
q2=sum_k d_k K_k
return <q1, q2> = sum_jk c_j*d_k <K_j, K_k>
"""
projqq(basis, q1::Vector{Float}, q2::Vector{Float}) = q1' * basis.proj * q2
"""
<K(g_i), K(g_j)>
"""
function projKernel(basis, proj)
K = zeros(Float, (basis.N, basis.N))
for i = 1:basis.N
for j = 1:basis.N
K[i, j] = proj(basis.Λ, basis.grid[i], basis.grid[j])
end
end
return K
end
"""
modified Gram-Schmidt process
"""
function mGramSchmidt(basis, idx, g::Float)
qnew = zeros(Float, basis.N)
qnew[idx] = 1
for qi = 1:basis.N
if qi == idx
continue
end
q = basis.Q[qi, :]
qnew -= projqq(basis, q, qnew) .* q # <q, qnew> q
end
return qnew / sqrt(projqq(basis, qnew, qnew))
end
# """
# Gram-Schmidt process
# """
# function GramSchmidt(basis, idx, g::Float)
# q0 = zeros(Float, basis.N)
# q0[idx] = 1
# qnew = copy(q0)
# for qi in 1:basis.N
# if qi == idx
# continue
# end
# q = basis.Q[qi, :]
# qnew -= projqq(basis, q, q0) .* q
# end
# norm = sqrt(projqq(basis, qnew, qnew))
# return qnew / norm
# end
function Residual(basis, proj, g::Float)
# norm2 = proj(g, g) - \sum_i (<qi, K_g>)^2
# qi=\sum_j Q_ij K_j ==> (<qi, K_g>)^2 = (\sum_j Q_ij <K_j, K_g>)^2 = \sum_jk Q_ij*Q_ik <K_j, K_g>*<K_k, Kg>
KK = [proj(basis.Λ, gj, g) for gj in basis.grid]
norm2 = proj(basis.Λ, g, g) - (norm(basis.Q * KK))^2
# norm2 = proj(basis.Λ, g, g)
# for j in 1:basis.N
# norm2 -= basisQ[j, :]
# end
return norm2 < 0 ? Float(0) : sqrt(norm2)
end
function testOrthgonal(basis)
println("testing orthognalization...")
II = basis.Q * basis.proj * basis.Q'
maxerr = maximum(abs.(II - I))
println("Max Orthognalization Error: ", maxerr)
end
"""
function build(dlrGrid, print::Bool = true)
Construct discrete Lehmann representation
#Arguments:
- `dlrGrid`: struct that contains the information to construct the DLR grid. The following entries are required:
Λ: the dimensionless scale β*Euv, rtol: the required relative accuracy, isFermi: fermionic or bosonic, symmetry: particle-hole symmetry/antisymmetry or none
- `print`: print the internal information or not
"""
function build(dlrGrid, print::Bool = true)
print && println("Using the functional algorithm to build DLR ...")
Λ = Float(dlrGrid.Λ)
rtol = Float(dlrGrid.rtol)
symmetry = dlrGrid.symmetry
if symmetry == :ph
print && println("Building ω grid ... ")
ωBasis = QR(Λ, rtol, projPH_ω, [Float(0), Float(Λ)])
ωGrid = ωBasis.grid
rank = ωBasis.N
elseif symmetry == :pha
print && println("Building ω grid ... ")
ωBasis = QR(Λ, rtol, projPHA_ω, [Float(Λ),])
ωGrid = ωBasis.grid
rank = ωBasis.N
else
error("Functional algorithm for the symmetry $symmetry has not yet been implemented!")
# elseif type == :fermi
# println("Building ω grid ... ")
# ωBasis = QR(Λ, rtol, projPH_ω, [Float(0), Float(Λ)])
# ωGrid = vcat(-ωBasis.grid[end:-1:2], ωBasis.grid)
# rank = length(ωGrid)
# println("rank: $rank")
# println("Building τ grid ... ")
# τBasis = tauGrid(ωGrid, rank, Λ, rtol, :fermi)
# # τBasis = QR(Λ / 2, rtol / 10, projPH_τ, Float(0), N=ωBasis.N)
# println("Building n grid ... ")
# nBasis = MatFreqGrid(ωGrid, rank, Λ, :fermi)
end
ωGrid = Float64.(ωGrid)
degree = 128
τ = Discrete.τChebyGrid(dlrGrid, degree, print)
kernel = Discrete.preciseKernelT(dlrGrid, τ, ωGrid, print)
Discrete.testInterpolation(dlrGrid, τ, ωGrid, kernel, print)
τIndex = Discrete.τnQR(kernel, rank, print)
τGrid = sort(τ.grid[τIndex])
nFineGrid, nFermiKernel, nBoseKernel = Discrete.preciseKernelΩn(dlrGrid, ωGrid, print)
nFermiIndex = Discrete.τnQR(nFermiKernel, rank, print)
nFermiGrid = sort(nFineGrid[nFermiIndex])
nBoseIndex = Discrete.τnQR(nBoseKernel, rank, print)
nBoseGrid = sort(nFineGrid[nBoseIndex])
# τGrid = τBasis / Λ
# τGrid = τBasis
# nGrid = nBasis
########### output ############################
# @printf("%5s %32s %32s %8s\n", "index", "real freq", "tau", "ωn")
# for r = 1:rank
# @printf("%5i %32.17g %32.17g %16i\n", r, ωGrid[r], τGrid[r], nGrid[r])
# end
########### output ############################
print && @printf("%5s %32s %32s %11s %11s\n", "index", "real freq", "tau", "fermi ωn", "bose ωn")
for r = 1:rank
print && @printf("%5i %32.17g %32.17g %16i %16i\n", r, ωGrid[r], τGrid[r], nFermiGrid[r], nBoseGrid[r])
end
return ωGrid, τGrid, nFermiGrid, nBoseGrid
# return Dict([(:ω, ωGrid), (:τ, τGrid), (:ωn, nGrid)])
end
end
if abspath(PROGRAM_FILE) == @__FILE__
# freq, Q = findBasis(1.0e-3, Float(100))
# basis = QR(100, 1e-3)
Λ = 1e10
# Λ = 100
# @time ωBasis = QR(Λ, 1e-13, projPH_ω, [Float(0), Float(Λ)])
@time ωBasis = Functional.QR(Λ, 1e-12, projPHA_ω, [Float(Λ),])
# @time τBasis = QR(Λ / 2, 1e-11, projPHA_τ, Float(0), N=ωBasis.N)
# nBasis = MatFreqGrid(ωBasis.grid, ωBasis.N, Λ, :acorr)
# @time basis = QR(100, 1e-10)
# readline()
# basis = QR(100, 1e-3)
end
|
{"hexsha": "f783d4cd0768c4d41e642ffb827fefc789bf23e5", "size": 10301, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/functional/builder.jl", "max_stars_repo_name": "numericalEFT/Lehmann.jl", "max_stars_repo_head_hexsha": "058a505d1bb8590bb20ffd053976798373d977de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-12-16T12:35:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T12:01:51.000Z", "max_issues_repo_path": "src/functional/builder.jl", "max_issues_repo_name": "numericalEFT/Lehmann.jl", "max_issues_repo_head_hexsha": "058a505d1bb8590bb20ffd053976798373d977de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2021-09-23T21:21:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T18:26:52.000Z", "max_forks_repo_path": "src/functional/builder.jl", "max_forks_repo_name": "numericalEFT/Lehmann.jl", "max_forks_repo_head_hexsha": "058a505d1bb8590bb20ffd053976798373d977de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-17T10:10:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T10:10:53.000Z", "avg_line_length": 32.7015873016, "max_line_length": 159, "alphanum_fraction": 0.5917872051, "num_tokens": 3569}
|
function [priormeans,posteriormeans,covmatrix] = gpnddisimPredictRNAOnly(model,predtimes);
% GPASIMPREDICT Compute predictions (means and a covariance matrix)
% of RNA values for the GPASIM model, conditional on the existing
% RNA values in the model but not on any existing POL2 values.
%
% FORMAT
%---------------------------------
% DESC computes predictions for the asynchronous Gaussian
% process single input motif model.
%
% ARG model : the model for which the gradient is computed.
%
% ARG pol2times : the time points where predictions for POL2 are needed
%
% ARG rnatime : the time points where predictions for RNA are needed
%
% RETURN means : the predicted mean values, first the POL2
% predictions and then the RNA predictions.
%
% RETURN covmatrix : the covariance matrix between the
% predictions; for example, the diagonal values are the variances
% of each prediction.
%---------------------------------
%
% SEEALSO : gpasimCreate
%
% COPYRIGHT : Jaakko Peltonen, 2011
% GPASIMPREDICT
numGenes=model.numGenes;
if numGenes==0,
error('gpnddisimPredictRNAOnly requires a model that has parameters for RNA modeling\n');
end;
% compute prior means
%pol2priormeans=[1:size(predtimes,1)]'*0;
pol2priormeans=ones(size(predtimes,1),1)*model.simMean;
% Mean for the mRNA is nonconstant over time and depends on the
% B,D,S parameters and on the POL2 mean
Bj=model.B(1);
Dj=model.D(1);
Sj=model.S(1);
if model.use_disimstartmean==1,
disimStartMean=model.disimStartMean(1);
end;
% compute the RNA mean curve
rnapriormeans=[];
tempind1=1;
for k=1:numGenes,
nt=length(predtimes);
rnapriormeans=[rnapriormeans;nan*ones(nt,1)];
if (model.use_disimstartmean==1),
tempt=predtimes-model.delay(k);
I=find(tempt<0);
tempt(I)=0;
rnapriormeans(tempind1:tempind1+nt-1)=...
model.disimStartMean(k)*exp(model.D(k)*(-predtimes)) ...
+(model.B(k)/model.D(k))*(1-exp(-model.D(k)*predtimes)) ...
+(model.simMean*model.S(k)/model.D(k))*(1-exp(-model.D(k)*tempt));
else
tempt=predtimes-model.delay(k);
I=find(tempt<0);
tempt(I)=0;
rnapriormeans(tempind1:tempind1+nt-1)=...
((model.B(k)+model.simMean*model.S(k))/model.D(k))*exp(model.D(k)*(-predtimes))...
+((model.B(k)+model.simMean*model.S(k))/model.D(k))*(1-exp(-model.D(k)*tempt));
end;
tempind1=tempind1+nt;
end;
%if model.use_disimstartmean==1,
% rnapriormeans=(Bj+model.simMean*Sj)/Dj+(disimStartMean-(Bj+model.simMean*Sj)/Dj)*exp(Dj*(-predtimes));
%else
% rnapriormeans=(Bj+model.simMean*Sj)/Dj*ones(size(predtimes));
%end;
if 1,
K_new=kernCompute(model.kern, predtimes);
K_new=K_new(length(predtimes)+1:2*length(predtimes),length(predtimes)+1:2*length(predtimes));
K_new_old=kernCompute(model.kern, predtimes, model.t);
K_new_old=K_new_old(length(predtimes)+1:2*length(predtimes),length(model.t)+1:2*length(model.t));
K_old=model.K(length(model.t)+1:2*length(model.t),length(model.t)+1:2*length(model.t));
K_old_new=K_new_old';
end;
tempm=model.m(length(model.t)+1:2*length(model.t));
priormeans=rnapriormeans;
size(tempm)
size(K_new_old)
size(K_old)
size(priormeans)
posteriormeans=priormeans+K_new_old*(K_old\tempm);
%covmatrix=K_new-K_new_old*inv(K_old)*K_old_new;
covmatrix=K_new-K_new_old*(K_old\K_old_new);
%posteriormeans=real(posteriormeans);
%covmatrix=real(covmatrix);
|
{"author": "SheffieldML", "repo": "GPmat", "sha": "4b5914a38ecbad9fb7a13a3392970bfc28c9d911", "save_path": "github-repos/MATLAB/SheffieldML-GPmat", "path": "github-repos/MATLAB/SheffieldML-GPmat/GPmat-4b5914a38ecbad9fb7a13a3392970bfc28c9d911/gpsim/gpnddisimPredictRNAOnly.m"}
|
# This example is written for the new interface
# This is the full COVID-19 model to be fitted to the RKI data
# see the PPT for details of the model design
import StateModeling as stm
import numpy as np
import matplotlib.pyplot as plt
# import tensorflow as tf
from Corona.LoadData import loadData, preprocessData
from Corona.CoronaModel import CoronaModel, plotTotalCases, plotRaw
#data = loadData(r"COVID-19 Linelist 2020_04_23.xlsx", useThuringia = True, pullData=False)
# data = loadData(r"COVID-19 Linelist 2020_04_30.xlsx", useThuringia = True, pullData=False)
# AllMeasured = preprocessData(AllMeasured)
AllMeasured = loadData(useThuringia = False, pullData = False)
ExampleRegions = ['SK Jena', 'LK Greiz'] # 'SK Gera',
AllMeasured = preprocessData(AllMeasured, ReduceDistricts=ExampleRegions, SumDistricts=False, SumAges=True, SumGender=True)
# plotRaw(AllMeasured)
# M = CoronaDelayModel(AllMeasured, Tmax = AllMeasured['Cases'].shape[0], lossWeight=lossWeights)
M = CoronaModel(AllMeasured)
if False:
mobdat = AllMeasured['mobility']
mobdate = mobdat['date'].to_numpy()
plt.figure('Retail and recreation');plt.plot(mobdat['retail_and_recreation_percent_change_from_baseline'].to_numpy())
offsetDay=0; plt.xticks(range(offsetDay, len(mobdate), 7), [date for date in mobdate[offsetDay:-1:7]], rotation="vertical")
plt.ylabel('Percent Change'); plt.tight_layout()
Tmax = 120
# M.toFit(['r0', 'hr', 'ht0', 'I0'])
# M.toFit(['r0', 'I0'])
M.toFit(['r0', 'h', 'aT0', 'aBase', 'I0', 'd', 'rd', 'T0', 'q']) # 'q',
# M.toFit(['r0'])
# if Cases.shape[-1] > 1:
# M.toFit.append()
if AllMeasured['Cases'].shape[-2] > 1:
M.appendToFit(['Age Border', 'Age Sigma'])
# g = M.getGUI()
lossWeights = {'cases':0.1,'deaths': 0.1}
M.DataDict={}
g = M.getGUI(showResults=M.showSimRes, doFit=M.doFit, Dates = list(AllMeasured['Dates']))
PopSum = np.sum(AllMeasured['Population'])
measured = AllMeasured['Cases'][:, np.newaxis, :, :, :] / PopSum
measuredDead = AllMeasured['Dead'][:, np.newaxis, :, :, :] / PopSum
NIter = 500 # 200
xlim = None # (60,100)
# fittedVars, fittedRes = M.fit({'detected': measured}, Tmax, otype=otype, oparam=oparam, NIter=NIter, verbose=True, lossScale=lossScale)
FitDict = {'cases': measured}
if "Hospitalized" in AllMeasured.keys():
FitDict['hospitalization'] = AllMeasured['Hospitalized'][:, np.newaxis, :, :, :]/ PopSum
FitDict['deaths'] = measuredDead
# SimDict = {'cases': None, 'cumul_cases': None, 'cumul_dead':None}
if True:
simulated = M.simulate('simulated', FitDict, Tmax=Tmax)
M.showResults(title=AllMeasured['Region'], Scale=PopSum, ylabel='occupancy', xlim=xlim, dims=("District"), Dates=AllMeasured['Dates'], legendPlacement='upper right', styles=['.','-','--'])
M.showStates(MinusOne=('S'), dims2d=None, Dates = AllMeasured['Dates'])
if True:
otype = "L-BFGS"
lossScale = 1.0 # 1e4
oparam = {"normFac": 'max'}
else:
lossScale = None
otype = "nesterov" # "adagrad" "adadelta" "SGD" "nesterov" "adam"
learnrate = {"nesterov": 1000.0, "adam": 7e-7}
oparam = {"learning_rate": tf.constant(learnrate[otype], dtype=stm.CalcFloatStr)}
# oparam['noiseModel'] = 'Poisson'
oparam['noiseModel'] = 'Gaussian'
# oparam['noiseModel'] = 'ScaledGaussian' # is buggy? Why the NaNs?
# tf.config.experimental_run_functions_eagerly(True)
fittedVars, fittedRes = M.fit(FitDict, Tmax, otype=otype, oparam=oparam, NIter=NIter, verbose=True, lossScale=lossScale)
# YMax =np.max(np.sum(AllMeasured['Cases']) / sum(AllMeasured['Population']))
# M.showResults(title=AllMeasured['Region'], ylabel='occupancy', xlim=xlim, ylim = [1e-6,YMax], dims=("District"), Dates=AllMeasured['Dates'], legendPlacement='upper right')
M.showResults(title=AllMeasured['Region'], Scale=PopSum, ylabel='occupancy', xlim=xlim, dims=("District"), Dates=AllMeasured['Dates'], legendPlacement='upper right', styles=['.','-','--'])
# plt.ylim(1e-7*PopSum,1e-3*PopSum)
M.showStates(MinusOne=('S'), dims2d=None, Dates = AllMeasured['Dates'], legendPlacement='upper right')
if measured.shape[-2] > 1:
M.showResults(title="Age Distribution", ylabel='occupancy', xlim=xlim, dims=("Age"), Dates=AllMeasured['Dates'], legendPlacement='upper right')
# np.sum(measured[-1,:,:,:],(0,2))*PopSum / Pop # detected per population
# if 'T0' in fittedVars:
# print("mean(T0) = " + str(np.mean(fittedVars['T0'])))
# print("mean(r0) = " + str(np.mean(fittedVars['r0'])))
# print("h = " + str(fittedVars['h']))
# print("aT0 = " + str(fittedVars['aT0']))
# print("aBase = " + str(fittedVars['aBase']))
# print("d = " + str(fittedVars['d']))
# if 'rd' in fittedVars:
# print("rd = " + str(fittedVars['rd']))
# if 'q' in fittedVars:
# print("q = " + str(fittedVars['q']))
M.compareFit(fittedVars=fittedVars)
plotTotalCases(AllMeasured)
if False:
plt.figure("Awareness reduction")
plt.plot(awareness(np.arange(0, 100)))
|
{"hexsha": "edde5f9dc6804dd269fe2c5442240f4d18e04644", "size": 4906, "ext": "py", "lang": "Python", "max_stars_repo_path": "Examples/FitCorona.py", "max_stars_repo_name": "RainerHeintzmann/StateModeling", "max_stars_repo_head_hexsha": "051826783954c8421b0de341ba6241b33c43af79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-05T20:17:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-28T10:36:45.000Z", "max_issues_repo_path": "Examples/FitCorona.py", "max_issues_repo_name": "RainerHeintzmann/StateModeling", "max_issues_repo_head_hexsha": "051826783954c8421b0de341ba6241b33c43af79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:44:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:25:41.000Z", "max_forks_repo_path": "Examples/FitCorona.py", "max_forks_repo_name": "RainerHeintzmann/StateModeling", "max_forks_repo_head_hexsha": "051826783954c8421b0de341ba6241b33c43af79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-02T17:16:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-22T11:30:17.000Z", "avg_line_length": 42.6608695652, "max_line_length": 192, "alphanum_fraction": 0.6858948227, "include": true, "reason": "import numpy", "num_tokens": 1502}
|
import pickle
import numpy as np
import os
import sys
from collections import defaultdict
from tqdm import tqdm
sys.path.append('..')
from config import get_configs, experiment_path, data_path
sys.path.append('..')
from config import experiment_path
class Vocab(object):
def __init__(self, size):
self.lexicon = pickle.load(open(os.path.join(data_path, 'lexicon.pkl'), 'rb'))[:size-1]
# put unk to top.
# otherwise, if freq if provided, sort with freq
self.lexicon = [('<unk>', 0)] + self.lexicon
self.w2i = {x[0]:i for i, x in enumerate(self.lexicon)}
self.i2w = {v:k for k,v in self.w2i.items()}
print('vocab with size {} loaded'.format(size))
def __len__(self):
return len(self.w2i)
class CharVocab(Vocab):
def __init__(self, size):
super(CharVocab, self).__init__(size)
# build char to index for char RNN
self.c2i = {}
self.c2i['<unk>'] = 0
self.c2i['<eos>'] = 1
for item in self.lexicon[2:]:
word = item[0].split('/')[0]
for c in word:
if c not in self.c2i:
self.c2i[c] = len(self.c2i)
self.i2c = {v: k for k, v in self.c2i.items()}
print('{} chars contained'.format(len(self.c2i)))
#print(sorted([x for x in self.c2i.keys()]))
def __len__(self):
return len(self.c2i)
class Corpus(object):
def __init__(self, vocab, debug=False):
self.vocab = vocab
self.encoded_train = self.encode_corpus('train.txt', debug)
self.encoded_dev = self.encode_corpus('dev.txt', debug)
self.encoded_test = self.encode_corpus('test.txt', debug)
def encode_corpus(self, filename, debug=False):
encoded = []
print('encode corpus: {}'.format(filename))
with open(os.path.join(data_path, filename), 'r', encoding='utf-8') as f:
lines = f.readlines()
if debug:
lines = lines[:1024*100]
for line in tqdm(lines):
words = line.strip().split(' ')
if isinstance(self.vocab, CharVocab):
words = ''.join([word.split('/')[0] for word in words])
encoded += [self.vocab.c2i[x] if x in self.vocab.c2i else self.vocab.c2i['<unk>'] for x in words] \
+ [self.vocab.c2i['<eos>']]
else:
encoded += [self.vocab.w2i[x] if x in self.vocab.w2i else self.vocab.w2i['<unk>'] for x in words] \
+ [self.vocab.w2i['<eos>']]
return encoded
'''
Pack the compressed embeddings into a pickle again
'''
def build_compressed_embedding_pkl(name):
embeddings = []
weights_dir = os.path.join(experiment_path, str(experiment_id), "weights")
with open(os.path.join(weights_dir, name), 'r') as f:
lines = f.readlines()
for line in lines:
tokens = line.strip().split()
v = [float(x) for x in tokens[1:]]
embeddings.append(v)
LM= np.array(embeddings)
pickle.dump(LM, open(os.path.join(weights_dir, "CLM.pkl"), "wb"))
print('LM size {} dumped'.format(LM.shape))
if __name__ == "__main__":
# test the model
# build_compressed_embedding_pkl('embedding.txt.comp')
vocab = Vocab(50000)
#vocab = CharVocab(50000)
corpus = Corpus(vocab, debug=True)
print([vocab.i2w[x] for x in corpus.encoded_train][:100])
#print([vocab.i2c[x] for x in corpus.encoded_train][:1000])
|
{"hexsha": "2e2a75ad26814651eca858e2a6ddd4670f996a25", "size": 3628, "ext": "py", "lang": "Python", "max_stars_repo_path": "train/data.py", "max_stars_repo_name": "jiali-ms/JLM", "max_stars_repo_head_hexsha": "ca7325cfcc6ed56469a90e8eca59b1e79e0cba9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 128, "max_stars_repo_stars_event_min_datetime": "2018-01-20T13:46:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-25T05:09:08.000Z", "max_issues_repo_path": "train/data.py", "max_issues_repo_name": "jiali-ms/JLM", "max_issues_repo_head_hexsha": "ca7325cfcc6ed56469a90e8eca59b1e79e0cba9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-03-14T02:25:29.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-14T02:25:29.000Z", "max_forks_repo_path": "train/data.py", "max_forks_repo_name": "jiali-ms/JLM", "max_forks_repo_head_hexsha": "ca7325cfcc6ed56469a90e8eca59b1e79e0cba9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-03-13T16:31:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-12T18:04:03.000Z", "avg_line_length": 37.0204081633, "max_line_length": 120, "alphanum_fraction": 0.5664277839, "include": true, "reason": "import numpy", "num_tokens": 919}
|
[STATEMENT]
lemma PO_m1_refines_m1a [iff]:
"refines R1a1 med1a1 m1a m1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. refines R1a1 med1a1 m1a m1
[PROOF STEP]
by (rule Refinement_basic) (auto del: subsetI)
|
{"llama_tokens": 104, "file": "Security_Protocol_Refinement_Key_establish_m1_ds", "length": 1}
|
import numpy as np
import pandas as pd
import scipy
from sklearn import metrics
from FPMax import FPMax
from Apriori import Apriori
from scipy.cluster.hierarchy import fcluster
from scipy.cluster.hierarchy import linkage
# MASPC algorithm
class MASPC():
def __init__(self,demographic,dignosisCodes):
self.demographic = demographic
self.dignosisCodes = dignosisCodes
def MAS(self,minSup,minAc,minOv):
# Run FPMax to get MFI
fpmax = FPMax()
fpmax.encode_input([])
fpmax.run(minSup)
# Running Apriori is a preparatory step for getting MFA
apriori = Apriori()
apriori.encode_input([])
apriori.run(minSup)
# This assumes input to be the output of the spmf.jar file
list_1 = []
for i in apriori.decode_output():
if len(i)==3:
list_1.append(i)
# Get MFA
all_con=self.get_all_allconfidence(list_1,fpmax.decode_output(),minAc)
all_con.sort(key=lambda x: x[-1],reverse=True)
all_con_withoutSUP=[]
for i in all_con:
all_con_withoutSUP.append([x for x in i[:len(i)-2]])
all_con_target = []
for i in all_con_withoutSUP:
flag = 0
for j in all_con_target:
if (set(i) & set(j) != set()):
number = 0
for k in self.dignosisCodes:
if ( ( set(k) & (set(i)|set(j)) ) == (set(i)|set(j)) ):
number = number + 1
if number <= minOv:
flag = 1
break
if flag == 0:
all_con_target.append(i)
all_con_target_without1=[]
for i in all_con_target:
if len(i) != 1:
all_con_target_without1.append(i)
# save MFAs
self.MFAs = all_con_target_without1
# Input a list of MFIs
# Return MFIs whose All_confidence is above minAc
def get_all_allconfidence(self,list_1,list_all_max,threshhold):
all_max=[]
for i in list_all_max:
temp_allconfidence = self.allconfidence(list_1,i)
if temp_allconfidence >= threshhold:
i[-1] = temp_allconfidence
all_max.append(i)
return all_max
def allconfidence(self,list_1,list_max):
# Compute All_confidence of an itemset
b=[]
for i in list_max[:len(list_max)-2]:
for j in list_1:
if i==j[0]:
b.append(int(j[2]))
return int(list_max[-1])/max(b)
def PC(self,k,method,metric):
w, h = len(self.MFAs), len(self.dignosisCodes);
all_con_tables_without1=[[0 for x in range(w)] for y in range(h)]
# project maximum set of independent frequnet patterns
for i,j in enumerate(self.dignosisCodes):
temp=set(j)
l=len(temp)
for a,b in enumerate(self.MFAs):
while(set(b)<=temp):
temp=temp.difference(set(b))
all_con_tables_without1[i][a]+=1
# build a dataframe
all_con_part_2_without1=pd.DataFrame(all_con_tables_without1, columns=[str(sublist) for sublist in self.MFAs])
all_con_final_t_without1=self.demographic.join(all_con_part_2_without1)
# delete the data that not be subscribed
all_con_delete_without1= [sum(i) for i in all_con_tables_without1]
all_con_delete_idex_without1=[i for i, e in enumerate(all_con_delete_without1) if e == 0]
all_con_final_t_without1.drop(all_con_delete_idex_without1,inplace=True)
self.binaryData=all_con_final_t_without1
# do clustering
all_con_cos_ave_without1 = linkage(all_con_final_t_without1.values, method, metric)
self.ClusterResult=fcluster(all_con_cos_ave_without1, k, criterion='maxclust')
|
{"hexsha": "1cbfc1362635b7e2fd966e15c95afae959cca98b", "size": 4031, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/MASPC.py", "max_stars_repo_name": "dominicamartinez/clustehr", "max_stars_repo_head_hexsha": "0ce893a666974674fad36591f0156bd720910b4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MASPC.py", "max_issues_repo_name": "dominicamartinez/clustehr", "max_issues_repo_head_hexsha": "0ce893a666974674fad36591f0156bd720910b4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MASPC.py", "max_forks_repo_name": "dominicamartinez/clustehr", "max_forks_repo_head_hexsha": "0ce893a666974674fad36591f0156bd720910b4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0076923077, "max_line_length": 122, "alphanum_fraction": 0.5795088067, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 964}
|
import keras
import librosa
import os
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing import image
from config import EXAMPLES_PATH
import os
os.environ["IMAGEIO_FFMPEG_EXE"] = "/usr/bin/ffmpeg"
import moviepy.editor
import sys
import shutil
import subprocess
class audioPredictions:
def __init__(self, file):
self.file = file
self.path = './models/audioModel.h5'
self.loaded_model = keras.models.load_model(self.path)
def make_predictions(self):
data, sampling_rate = librosa.load(self.file)
mfccs = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)
x = np.expand_dims(mfccs, axis=1)
x = np.expand_dims(x, axis=0)
predictions = self.loaded_model.predict_classes(x)
#print("Audio emotion prediction is", " ", self.convert_class_to_emotion(predictions))
return self.convert_class_to_emotion(predictions)
@staticmethod
def convert_class_to_emotion(pred):
label_conversion = {'0': 'neutral',
'1': 'calm',
'2': 'happy',
'3': 'sad',
'4': 'angry',
'5': 'fear',
'6': 'disgusted',
'7': 'surprised'}
for key, value in label_conversion.items():
if int(key) == pred:
label = value
return label
if __name__ == '__main__':
video = "./media/9930.mp4"
vid = moviepy.editor.VideoFileClip(video)
audio = vid.audio
audio.write_audiofile("./media/audio.wav")
# load model
model = model_from_json(open("./models/vidModelConv2Fer.json", "r").read())
# load weights
model.load_weights('./models/vidModelWeightsConv2Fer.h5')
face_haar_cascade = cv2.CascadeClassifier('./models/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(video)
chunks = os.listdir("./audioChunks/")
i=0
fourcc = cv2.VideoWriter_fourcc('X','V','I','D')
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter('predictions.avi', fourcc, 20,(frame_width,frame_height),True )
font = cv2.FONT_HERSHEY_SIMPLEX
for chunk in chunks:
live_prediction = audioPredictions(file="./audioChunks/" + chunk)
ret, test_img = cap.read() # captures frame and returns boolean value and captured image
if not ret:
continue
gray_img= cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
faces_detected = face_haar_cascade.detectMultiScale(test_img, 1.32, 5)
visual_prediction = ""
audio_prediction = ""
for (x, y, w, h) in faces_detected:
cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0), thickness=7)
roi_gray = gray_img[y:y + w, x:x + h] # cropping region of interest i.e. face area from image
#test_img = cv2.resize(test_img, (112, 112))
roi_gray = cv2.resize(roi_gray, (48, 48))
img_pixels = image.img_to_array(roi_gray)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
predictions = model.predict(img_pixels)
# find max indexed array
max_index = np.argmax(predictions[0])
emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
#emotions = ("neutral","angry","disgusted","scared","happy","sad","surprised")
visual_prediction = emotions[max_index]
audio_prediction = live_prediction.make_predictions()
print("Visual emotion prediction at second " + str(i) +" is :" + visual_prediction)
print("Audio emotion prediction at second " + str(i) +" is :" + audio_prediction)
if visual_prediction == audio_prediction:
print("Multimodal emotion prediction at second " + str(i) +" is :" + audio_prediction)
else:
print("The singles modalities predictions do not match")
resized_img = cv2.resize(test_img, (frame_width,frame_height))
#cv2.putText(test_img, visual_prediction + audio_prediction, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), cv2.LINE_4)
cv2.putText(resized_img,
"Face is " + visual_prediction
+ " and speech is " +audio_prediction,
(50, 50),
font, 1,
(0, 255, 255),
2,
2)
out.write(resized_img)
i=i+1
cap.release()
out.release()
cv2.destroyAllWindows
|
{"hexsha": "f973da6a1dc403a2e36dff98b491a63e252fefd2", "size": 4599, "ext": "py", "lang": "Python", "max_stars_repo_path": "multimodalDetection/multimodalPredictions.py", "max_stars_repo_name": "Rubik90/TFM_AG", "max_stars_repo_head_hexsha": "5e836245d0704122f2a0d47413e93bf53d966ca0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-16T18:32:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-16T18:32:38.000Z", "max_issues_repo_path": "multimodalDetection/multimodalPredictions.py", "max_issues_repo_name": "Rubik90/TFM_AG", "max_issues_repo_head_hexsha": "5e836245d0704122f2a0d47413e93bf53d966ca0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multimodalDetection/multimodalPredictions.py", "max_forks_repo_name": "Rubik90/TFM_AG", "max_forks_repo_head_hexsha": "5e836245d0704122f2a0d47413e93bf53d966ca0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-16T18:32:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T18:32:39.000Z", "avg_line_length": 34.5789473684, "max_line_length": 130, "alphanum_fraction": 0.6081756904, "include": true, "reason": "import numpy", "num_tokens": 1137}
|
(* Title: HOL/HOLCF/IOA/NTP/Correctness.thy
Author: Tobias Nipkow & Konrad Slind
*)
section \<open>The main correctness proof: Impl implements Spec\<close>
theory Correctness
imports Impl Spec
begin
definition
hom :: "'m impl_state => 'm list" where
"hom s = rq(rec(s)) @ (if rbit(rec s) = sbit(sen s) then sq(sen s)
else tl(sq(sen s)))"
setup \<open>map_theory_claset (fn ctxt => ctxt delSWrapper "split_all_tac")\<close>
lemmas hom_ioas = Spec.ioa_def Spec.trans_def sender_trans_def receiver_trans_def impl_ioas
and impl_asigs = sender_asig_def receiver_asig_def srch_asig_def rsch_asig_def
declare split_paired_All [simp del]
text \<open>
A lemma about restricting the action signature of the implementation
to that of the specification.
\<close>
lemma externals_lemma:
"a\<in>externals(asig_of(Automata.restrict impl_ioa (externals spec_sig))) =
(case a of
S_msg(m) \<Rightarrow> True
| R_msg(m) \<Rightarrow> True
| S_pkt(pkt) \<Rightarrow> False
| R_pkt(pkt) \<Rightarrow> False
| S_ack(b) \<Rightarrow> False
| R_ack(b) \<Rightarrow> False
| C_m_s \<Rightarrow> False
| C_m_r \<Rightarrow> False
| C_r_s \<Rightarrow> False
| C_r_r(m) \<Rightarrow> False)"
apply (simp (no_asm) add: externals_def restrict_def restrict_asig_def Spec.sig_def asig_projections)
apply (induct_tac "a")
apply (simp_all (no_asm) add: actions_def asig_projections)
txt \<open>2\<close>
apply (simp (no_asm) add: impl_ioas)
apply (simp (no_asm) add: impl_asigs)
apply (simp (no_asm) add: asig_of_par asig_comp_def asig_projections)
apply (simp (no_asm) add: "transitions"(1) unfold_renaming)
txt \<open>1\<close>
apply (simp (no_asm) add: impl_ioas)
apply (simp (no_asm) add: impl_asigs)
apply (simp (no_asm) add: asig_of_par asig_comp_def asig_projections)
done
lemmas sels = sbit_def sq_def ssending_def rbit_def rq_def rsending_def
text \<open>Proof of correctness\<close>
lemma ntp_correct:
"is_weak_ref_map hom (Automata.restrict impl_ioa (externals spec_sig)) spec_ioa"
apply (unfold Spec.ioa_def is_weak_ref_map_def)
apply (simp (no_asm) cong del: if_weak_cong split del: if_split add: Correctness.hom_def
cancel_restrict externals_lemma)
apply (rule conjI)
apply (simp (no_asm) add: hom_ioas)
apply (simp (no_asm_simp) add: sels)
apply (rule allI)+
apply (rule imp_conj_lemma)
apply (induct_tac "a")
apply (simp_all (no_asm_simp) add: hom_ioas)
apply (frule inv4)
apply force
apply (frule inv4)
apply (frule inv2)
apply (erule disjE)
apply (simp (no_asm_simp))
apply force
apply (frule inv2)
apply (erule disjE)
apply (frule inv3)
apply (case_tac "sq (sen (s))=[]")
apply (simp add: hom_ioas)
apply (blast dest!: add_leD1 [THEN leD])
apply (rename_tac m, case_tac "m = hd (sq (sen (s)))")
apply force
apply simp
apply (blast dest!: add_leD1 [THEN leD])
apply simp
done
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/HOLCF/IOA/NTP/Correctness.thy"}
|
import numpy as np
class zigzag_tubes(object):
def __init__(self,nr_t,l,area):
self.A_tot = area*np.ones(nr_t, dtype=np.float)
self.l = l*np.ones(nr_t, dtype=np.float)
|
{"hexsha": "335dcc2295901b6910db423f07c307edff1c7f73", "size": 194, "ext": "py", "lang": "Python", "max_stars_repo_path": "py_dp/simulation/zigzag_tubes.py", "max_stars_repo_name": "amirdel/dispersion-continua", "max_stars_repo_head_hexsha": "2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-23T14:35:43.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-23T14:35:43.000Z", "max_issues_repo_path": "py_dp/simulation/zigzag_tubes.py", "max_issues_repo_name": "amirdel/dispersion-continua", "max_issues_repo_head_hexsha": "2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py_dp/simulation/zigzag_tubes.py", "max_forks_repo_name": "amirdel/dispersion-continua", "max_forks_repo_head_hexsha": "2e1f7a3fbfcdc0b27c546cb0ae51a628a926ad60", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-23T14:36:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-23T14:36:29.000Z", "avg_line_length": 24.25, "max_line_length": 55, "alphanum_fraction": 0.6443298969, "include": true, "reason": "import numpy", "num_tokens": 55}
|
[STATEMENT]
lemma le_sup_equiv2: "(a \<le> b) = (a \<squnion> b = b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (a \<le> b) = (a \<squnion> b = b)
[PROOF STEP]
by (rule sup1_dual.le_sup_equiv)
|
{"llama_tokens": 99, "file": "PseudoHoops_PseudoHoops", "length": 1}
|
#=
test_collater:
- Julia version: 1.1.0
- Author: bramb
- Date: 2019-03-18
=#
using Test
using HyperCollate,MetaGraphs
include("util.jl")
@testset "collater" begin
@testset "collating 2 xml texts" begin
include("util.jl")
f_xml = """
<text>
<s>Hoe zoet moet nochtans zijn dit <subst><del>werven om</del><add>trachten naar</add></subst> een vrouw,
de ongewisheid vóór de liefelijke toestemming!</s>
</text>
"""
q_xml = """
<text>
<s>Hoe zoet moet nochtans zijn dit <subst><del>werven om</del><add>trachten naar</add></subst> een vrouw !
Die dagen van nerveuze verwachting vóór de liefelijke toestemming.</s>
</text>
"""
collation = Collation()
@test collation.state == needs_witness
add_witness!(collation,"F",f_xml)
@test collation.state == needs_witness
add_witness!(collation,"Q",f_xml)
@test collation.state == ready_to_collate
collate!(collation)
@test collation.state == is_collated
@debug(collation)
dot = to_dot(collation.graph)
_print_dot(dot)
end
@testset "ranking" begin
xml = """
<text><s><subst><del>Dit kwam van een</del><add>De</add></subst> te streng doorgedreven rationalisatie</s></text>
"""
vwg = to_graph(xml)
r = ranking(vwg)
for v in keys(r.by_vertex)
str = get_prop(vwg,v,:text)
@debug("$str : $(r.by_vertex[v])")
end
for rank in sort(collect(keys(r.by_rank)))
@debug("$rank : $(r.by_rank[rank])")
end
end
end
|
{"hexsha": "b2131baa2f1ee8ad6be7c22e9c594f25d9793015", "size": 1683, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_collater.jl", "max_stars_repo_name": "bleekere/HyperCollate.jl", "max_stars_repo_head_hexsha": "3caf51a4817dffa98b44d404639b0aab485eae32", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-28T17:41:41.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-28T17:41:41.000Z", "max_issues_repo_path": "test/test_collater.jl", "max_issues_repo_name": "bleekere/HyperCollate.jl", "max_issues_repo_head_hexsha": "3caf51a4817dffa98b44d404639b0aab485eae32", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_collater.jl", "max_forks_repo_name": "bleekere/HyperCollate.jl", "max_forks_repo_head_hexsha": "3caf51a4817dffa98b44d404639b0aab485eae32", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-28T09:29:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-28T09:29:00.000Z", "avg_line_length": 27.1451612903, "max_line_length": 121, "alphanum_fraction": 0.568627451, "num_tokens": 479}
|
#module LinearPredict
export linPred,linPred2
function Toeplitz(a::Array{Complex{Float64},1})
m=length(a)
T=[(k>=l) ? a[1+mod(k-l,m)] : conj(a[1+mod(l-k,m)]) for k=1:m,l=1:m]
return(T)
end
function autoCorr(a::Array{Complex{Float64},1},p::Int64)
c=zeros(Complex{Float64},p)
for l=0:(p-1),k=1:(length(a)-l)
c[l+1]+=a[k]*conj(a[k+l])
end
return( c )
end
function linPred(a::Array{Complex{Float64},1},p::Int,n::Int)
lena=length(a)
r=autoCorr(a,p+1)
coeff = conj(reverse(Toeplitz(r[1:p,1]) \ r[2:p+1,1]))
# print(size(coeff))
# print(coeff)
ap=zeros(Complex{Float64},lena+n) ; ap[1:lena]=a
for k=(lena+1):(lena+n)
ap[k] = sum(coeff.*ap[(k-p):(k-1)])
end
return(ap)
end
function linPred2(a::Array{Complex{Float64},2},p::Int,n::Int)
(r,s)=size(a)
ap=zeros(Complex{Float64},r,s+n)
for k=1:r
ap[k,:]=linPred(a[k,:],p,n)
end
return(ap)
end
#end
|
{"hexsha": "cf38b3951130cde15a6b5f86099ce19c8fa71784", "size": 987, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LinearPredict.jl", "max_stars_repo_name": "marcel-utz/NMR", "max_stars_repo_head_hexsha": "3fddc374a9c3c5127fb3e8e16402fbbb91319215", "max_stars_repo_licenses": ["IJG"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LinearPredict.jl", "max_issues_repo_name": "marcel-utz/NMR", "max_issues_repo_head_hexsha": "3fddc374a9c3c5127fb3e8e16402fbbb91319215", "max_issues_repo_licenses": ["IJG"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-04-02T09:06:34.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-02T09:10:34.000Z", "max_forks_repo_path": "src/LinearPredict.jl", "max_forks_repo_name": "marcel-utz/NMR.jl", "max_forks_repo_head_hexsha": "1b297796aa2191ecbfbe16a666d839730caf38ad", "max_forks_repo_licenses": ["IJG"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9534883721, "max_line_length": 73, "alphanum_fraction": 0.556231003, "num_tokens": 370}
|
\documentclass[conference]{IEEEtran}
\pdfoutput=1
\usepackage{cite}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{textcomp}
\usepackage{xcolor}
\usepackage{url}
\usepackage{flushend}
\def\BibTeX{{\rm
B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
\begin{document}
\title{The Dark Side of Unikernels for Machine Learning}
\author{
\IEEEauthorblockN{Matthew Leon}
\IEEEauthorblockA{
\textit{Vanderbilt University}\\
Nashville, Tennessee, USA\\
matthew.leon@vanderbilt.edu
}
}
\maketitle
\begin{abstract}
This paper analyzes the shortcomings of unikernels as a method
of deployment for machine learning inferencing applications as
well as provides insights and analysis on future work in this
space. The findings of this paper advocate for a tool to enable
management of dependent libraries in a unikernel to enable a
more ergonomic build process as well as take advantage of the
inherent security and performance benefits of unikernels.
\end{abstract}
\begin{IEEEkeywords}
unikernel, virtualization, xen, kernel samepage merging,
docker, containerization, lightweight operating system, library
operating system, cloud computing
\end{IEEEkeywords}
\section{Introduction}
Virtualization technology is used in datacenters spanning the
whole world to provide availability, scalability, and security to
millions of client workloads. While virtualizing an entire
computer and running everything, including the OS image,
libraries and application code, on top is still popular, many
alternative methods have emerged over the last decade, each with
promises to increase security and improve resource utilization.
There are numerous benefits to improving resource utilization of
the host system. For users, fewer resources used by the host
operating system means more resources available to application
code. For the large corporations hosting public clouds,
maximizing utilization on a small number of machines means lower
costs, as datacenters consume massive amounts of
power~\cite{directenergy}. Recently, containerization technology
has been adopted as a way to reduce the number of virtualization
layers in the modern datacenter, with services like Docker
providing benefits including easier deployment, ensuring
consistency between the development and production environments,
providing limits on resource usage, and sandboxing applications
for better security. Containerization cuts down on costs by
reducing duplication of the operating system—rather than running
several different stacks all virtualized on top of a hypervisor,
a user can run a single operating system and divide up its
resources among several containers. Unikernel is yet another
lightweight virtualization technology increasingly being adopted
in cloud data centers.
\section{What are Unikernels?}
Unikernels, on the other hand, focus on the other side of the
playing field from containers. With unikernels, the operating
system is totally eliminated—the application code itself is
augmented with the minimal set of code necessary to interface
with the hypervisor and is then directly run as a bootable image
on top of a hypervisor. The compactness of this system can result
in numerous benefits over containers and fully virtualized linux
servers. In one study, boot times as low as 50ms were achieved,
as well as lower memory usage and reduced latency due to
zero-copy network implementation~\cite{libos}. The significant
benefits of the unikernels are discussed in the next section.
\section{Study Goals}
This study aimed to analyze the state of a few different
unikernels and their environments, comparing them to traditional
methods of virtualization in terms of developer experience,
performance, flexibility, security, and feasibility for adoption.
Specifically, the study was conducted through a use case where we
wanted to understand whether it was feasible to deploy a machine
learning-trained image classification inference inside a
Unikernel. To that end, we implemented an image classification
API capable of receiving an image via HTTP and responding with an
inference as to the contents of the image.
\section{Report Organization}
This report first outlines preliminary knowledge about the
differences of unikernels, including major vendors of unikernel
technology as well as an overview of the pertinent differences
from ordinary virtualization solutions. The next section provides
an overview of the work done in the process of evaluating
the maturity of unikernels as a modern, lightweight alternative
to containerization technology. Finally, the paper is concluded
with an analysis of the hurdles that must be addressed before
unikernels are sufficient for a modern deployment,
\section{Unikernels In-depth}
\subsection{What are Unikernels?}
Unikernels, on the other hand, focus on the other side of the
playing field from containers. With unikernels, the operating
system is totally eliminated—the application code itself is
augmented with the minimal set of code necessary to interface
with the hypervisor and is then directly run as a bootable image
on top of a hypervisor. The compactness of this system can result
in numerous benefits over containers and fully virtualized linux
servers. In one study, boot times as low as 50ms were achieved,
as well as lower memory usage and reduced latency due to
zero-copy network implementation~\cite{libos}. The significant
benefits of the unikernels are discussed in the next section.
Unikernels can be grouped into two distinct categories. Firstly
are unikernels that function as a library operating system. OSs
in this group, such as IncludeOS~\cite{includeos},
HaLVM~\cite{halvm}, and MirageOS~\cite{mirageos}, cannot run full
executable programs, instead, they are written in and run code in
an augmented runtime environment that implements operating system
functions, such as I/O. The other group of unikernels, such as
RumpRun~\cite{rumprun}, and Nanos~\cite{nanos}, provide
application binaries an entire POSIX-compatible runtime
environment which can run arbitrary ELF executables. In addition
to these runtime environments, several build, orchestration, and
packaging tools are available, such as ops~\cite{nanos},
Unikraft~\cite{unikraft}, and UniK~\cite{unik}. This study
investigates the feasibility and shortcomings of using these
tools to deploy a deep neural network inference solution
available via a web API.\@
\subsection{Benefits of Unikernels}
The single address space architecture of unikernels provides
numerous benefits that are not achievable with conventional
preemptive multitasking operating systems. Firstly, the total
attack surface is much lower with a unikernel. Bratterud, Happe,
and Duncan highlight a 92\% reduction in total bytes of code in a
running unikernel, which they translate to a 92\% smaller attack
surface~\cite{enhancingprivacy}. The lack of a shell prevents an
entire class of vulnerabilities, while a single address space
allows for compile-time address space layout randomization, which
is more performant than the runtime alternative. In addition to
the security implications of a single address space, the removal
of kernel space eliminates time spent in kernel space context
switches as well as scheduling interrupts by the guest OS.\@
Instead, scheduling and load balancing is handled entirely by the
hypervisor.
In terms of load balancing itself, unikernels offer distinct
benefits for web-related tasks, especially due to their startup
time. The unikernel itself being the executable and thus not
requiring file systems to be initialized as well as the small
size the kernel code occupies means that the only boot step
necessary is initializing the network interface. In a hypervisor
environment, this allows the unikernel to be booted in response
to an incoming request in time to handle that request. Such a
fast boot time allows horizontal scaling with the granularity of
individual requests. This instant availability enables
applications such as fog deployment for IoT, which was
investigated by Cozzolino, Ding, and Ott~\cite{fades}. This work
is further being applied at the same time as this research as
infrastructure in smart city monitoring of ongoing road
hazards~\cite{ecco}.
\section{Insights from our Study}
Supplementary source code materials and motivating examples for
the following findings may be found at~\cite{myghrepo}. Many
simple implementations of image classifications are available on
GitHub, such as~\cite{pytorchapi}. In the goal of evaluating the
effectiveness of unikernels in different environments and
implementations, three different machine learning frameworks were
tested: Tensorflow, PyTorch, and Tensorflow.js (Tensorflow and
Tensorflow.js are included separately as they do not share
bindings to the same underlying library; they are completely
separate implementations in two different languages of the same
API). IncludeOS was used in conjunction with Tensorflow, and
RumpRun and Nanos were both used to test each of PyTorch and
Tensorflow.js.
Our findings revealed that none of the tested solutions were
successful. The shortcomings ranged depending on the
implementation—Tensorflow and PyTorch struggled with issues
linking inside of the unikernel, and Tensorflow.js struggled
fetching the trained model via URL due to the lack of a DNS
resolver in the unikernel environment. When adding the node.js
extension to Tensorflow.js to allow for loading the model from
within the image, the unikernel struggled due to lack of node-gyp
(a C/C++ native binding) support inside the unikernel. We note
that Tensorflow.js could be extended to support loading from file
without involving node-gyp, but performing large modifications to
the source of the application was out of scope for this study’s
investigation of unikernels as an alternative deployment
environment. PyTorch encountered similar issues as it is an
optimized runtime with most of the deep learning code implemented
in C—the modules for the library were unable to be loaded inside
the unikernel environment.
Seeing as most of the encountered issues were due to the lack of
interoperability between native libraries and interpreted code,
the next approach we took was compiling Tensorflow into an
application compiled with IncludeOS, the library operating system
capable of transforming the C/C++ application it is built with to
an Xen-bootable executable. Unfortunately, linking also became
an issue in this case. The publicly available distributions of
Tensorflow depend on over 10 shared libraries, and IncludeOS must
be built statically, which is not supported (nor possible in an
unsupported fashion) in any version of the library. Copying the
shared libraries into the image from the system used to build
Tensorflow resulted in a bootable system, but the execution
failed due to missing symbols in the outdated version of glibc
used in the host system. No other languages were tested after
these failures, as all languages link to the C library, with the
only exception being the previously mentioned Tensorflow.js
without node.js extensions, which is designed for the browser
environment. It was unexplored whether other smaller toolkits
would’ve been more successful—mlpack~\cite{mlpack} appears to be
a good candidate for future research, as it may allow static
linking~\cite{mlpackcmake}.
\section{Analysis and Possibilities for Future Work}
Unikernels, when compared to a deployment solution using docker
containers or a native Linux virtual machine, still have many
hurdles to overcome before they can claim full parity in terms of
supported use cases. Due to the decades of prevalence of
ecosystems which support dynamic linking as a way to quickly fix
security issues and reduce compiled code duplication across
binaries, even common libraries like Tensorflow do not support
static linking, which is unfortunate news for any application
developer looking to use these libraries in a unikernel. There
are ways to build a static library manually such as by packing
GCC’s object file output with tools such as ar, but these are
steps for build system maintainers rather than application
developers~\cite{ar}. It is this researcher’s opinion that
unikernels would be most benefited by a robust build tool which
handles dependency bundling inside the unikernel environment,
much like Docker’s \textit{build} command or Ansible scripts.
With access to a layered build system, unikernels could provide a
compelling base layer for virtualization due to their lightweight
and secure runtimes; however, dependencies in docker are handled
through Linux distribution archives, which would be lacking in
the environment of a unikernel. Without such tools, the art of
manually packing a static archive for linking or building each
shared library with the correct version of Glibc will remain out
of reach for all but the most skilled devops engineers deploying
in the most demanding situations where significant cost and
performance benefits of unikernels may offset the additional
development work required for deploying the unikernel. The build
tools tested during this study, unik and ops, were both unable to
contend with library dependencies in an efficient manner.
Beyond the deployment itself, there are supplementary
considerations that must be investigated in terms of the
performance implications of unikernels. Docker’s AUFS allows for
something which unikernel images, in their current,
statically-linked form, do not—deduplication of layers. For
example, if the unikernel is being used for a microservice-based
web API, it would not be uncommon for there to be two endpoints
that look very similar from a dependency point of view—endpoints
involved with creating and updating a user’s profile, for
one—which would duplicate all library code in each binary. In a
Docker deployment, the libraries for the operating system would
be shared on disk, as the containers are stored as layers and
extended with each command executed in the Dockerfile. This
benefit extends to memory, as well—different docker containers
descending from the same parent layers are able to share the same
pages in memory due to Kernel Samepage
Merging~\cite{dockerdedup},~\cite{ksm}. Unikernels, on the other
hand, may be able to share less memory due to differences in how
private pages may be accessed by unikernels sharing a majority of
code, but being compiled with different static dependencies. This
is an area requiring further research to experimentally determine
the extent of the memory saving, and the concept of copy-on-write
deduplication of memory pages is currently subject to security
concerns discovered along with side channel
attacks~\cite{ksm}~\cite{sidechannelattacks}.
\section{Conclusion}
Unikernels present compelling benefits in terms of performance
and security for deploying applications to the fog or the cloud,
but currently face issues in regards to managing dependencies,
updates, and compatibility with 3rd party libraries. A solution à
la \textit{docker build} for unikernels—providing a method for
dependency management as well as possibly for sharing and
extending images others have made—may provide a more secure and
performant platform for future cloud computing needs.
\vspace{12pt}
\bibliographystyle{IEEEtran}
\bibliography{Leon-the-dark-side-of-unikernels}
\end{document}
|
{"hexsha": "af237ab9e42956276f725b4a8b2b2b153f034d01", "size": 15444, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Leon-the-dark-side-of-unikernels.tex", "max_stars_repo_name": "leonm1/unikernel-research-2020", "max_stars_repo_head_hexsha": "abe09db995668dda8947efbe6a73d302a366a237", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-02T00:14:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-02T00:14:09.000Z", "max_issues_repo_path": "Leon-the-dark-side-of-unikernels.tex", "max_issues_repo_name": "leonm1/unikernel-research-2020", "max_issues_repo_head_hexsha": "abe09db995668dda8947efbe6a73d302a366a237", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-22T11:47:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T11:47:05.000Z", "max_forks_repo_path": "Leon-the-dark-side-of-unikernels.tex", "max_forks_repo_name": "leonm1/unikernel-research-2020", "max_forks_repo_head_hexsha": "abe09db995668dda8947efbe6a73d302a366a237", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.7192429022, "max_line_length": 65, "alphanum_fraction": 0.8216783217, "num_tokens": 3377}
|
function ecephyscache()
ecephys_project_cache.EcephysProjectCache.from_warehouse(manifest=ecephysmanifest)
end
# These will take a while to download
function getsessiontable()
@info "Please wait, this can take a few seconds"
CSV.read(IOBuffer(ecephyscache().get_session_table().to_csv()), DataFrame);
end
export getsessiontable
function getprobes()
CSV.read(IOBuffer(ecephyscache().get_probes().to_csv()), DataFrame);
end
export getprobes
function getchannels()
CSV.read(IOBuffer(ecephyscache().get_channels().to_csv()), DataFrame);
end
export getchannels
function getunits(; filter_by_validity=true, amplitude_cutoff_maximum = 0.1, presence_ratio_minimum = 0.9, isi_violations_maximum = 0.5)
str = ecephyscache().getunits(filter_by_validity=filter_by_validity,
amplitude_cutoff_maximum=amplitude_cutoff_maximum,
presence_ratio_minimum=presence_ratio_minimum,
isi_violations_maximum=isi_violations_maximum).to_csv()
CSV.read(IOBuffer(str), DataFrame);
end
export getunits
function getunitanalysismetricsbysessiontype(session_type; filter_by_validity=true, amplitude_cutoff_maximum = 0.1, presence_ratio_minimum = 0.9, isi_violations_maximum = 0.5) # Yeah thats python
str = ecephyscache().get_unit_analysis_metrics_by_session_type(session_type,
filter_by_validity=filter_by_validity,
amplitude_cutoff_maximum=amplitude_cutoff_maximum,
presence_ratio_minimum=presence_ratio_minimum,
isi_violations_maximum=isi_violations_maximum).to_csv()
CSV.read(IOBuffer(str), DataFrame);
end
export getunitanalysismetricsbysessiontype
function getallunitmetrics() # This one is really slow
metrics1 = get_unit_analysis_metrics_by_session_type("brain_observatory_1.1",
amplitude_cutoff_maximum = Inf,
presence_ratio_minimum = -Inf,
isi_violations_maximum = Inf)
metrics2 = get_unit_analysis_metrics_by_session_type("functional_connectivity",
amplitude_cutoff_maximum = Inf,
presence_ratio_minimum = -Inf,
isi_violations_maximum = Inf)
vcat(analysis_metrics1, analysis_metrics2)
end
export getallunitmetrics
function getsessiondata(session_id::Int; filter_by_validity=true, amplitude_cutoff_maximum = 0.1, presence_ratio_minimum = 0.9, isi_violations_maximum = 0.5)
ecephyscache().get_session_data(session_id; filter_by_validity=filter_by_validity,
amplitude_cutoff_maximum=amplitude_cutoff_maximum,
presence_ratio_minimum=presence_ratio_minimum,
isi_violations_maximum=isi_violations_maximum)
end
export getsessiondata
abstract type AbstractSession end
struct Session <: AbstractSession
pyObject
end
export Session
Session(session_id::Int; kwargs...) = Session(getsessiondata(session_id; kwargs...))
getid(S::AbstractSession) = S.pyObject.ecephys_session_id
getprobes(S::AbstractSession) = CSV.read(IOBuffer(S.pyObject.probes.to_csv()), DataFrame)
getprobeids(S::AbstractSession) = getprobes(S)[!, :id]
getchannels(S::AbstractSession) = CSV.read(IOBuffer(S.pyObject.channels.to_csv()), DataFrame)
function getchannels(S::AbstractSession, probeid)
c = getchannels(S)
c = subset(c, :probe_id=>ByRow(==(probeid)))
end
function getprobecoordinates(S::AbstractSession)
c = subset(getchannels(S), :anterior_posterior_ccf_coordinate => ByRow(!ismissing),
:dorsal_ventral_ccf_coordinate => ByRow(!ismissing),
:left_right_ccf_coordinate => ByRow(!ismissing))
x = c[!, :anterior_posterior_ccf_coordinate]
y = c[!, :dorsal_ventral_ccf_coordinate]
z = c[!, :left_right_ccf_coordinate]
return (x, y, z)
end
function getprobecoordinates(S::AbstractSession, probeid)
c = subset(getchannels(S, probeid), :anterior_posterior_ccf_coordinate => ByRow(!ismissing),
:dorsal_ventral_ccf_coordinate => ByRow(!ismissing),
:left_right_ccf_coordinate => ByRow(!ismissing))
x = c[!, :anterior_posterior_ccf_coordinate]
y = c[!, :dorsal_ventral_ccf_coordinate]
z = c[!, :left_right_ccf_coordinate]
return (x, y, z)
end
function getstructureacronyms(channelids::Vector{Int})
channels = getchannels()
acronyms = Vector{Any}(undef, size(channelids))
[acronyms[i] = channels[channels.id.==channelids[i], :ecephys_structure_acronym][1] for i ∈ 1:length(channelids)]
return acronyms
end
function getstructureids(channelids::Vector{Int})
channels = getchannels()
acronyms = Vector{Any}(undef, size(channelids))
[acronyms[i] = channels[channels.id.==channelids[i], :ecephys_structure_id][1] for i ∈ 1:length(channelids)]
return acronyms
end
function getstimuli(S::Session)
str = S.pyObject.stimulus_presentations.to_csv()
CSV.read(IOBuffer(str), DataFrame);
end
function getunitmetrics(session::AbstractSession)
str = session.pyObject.units.to_csv()
CSV.read(IOBuffer(str), DataFrame);
end
function getstimulusname(session::AbstractSession, time::Number; stimulus_table=getstimuli(session))
idx = findlast(stimulus_table.start_time .< time)
if isnothing(idx)
"blank"
else
stimulus_table.stimulus_name[idx]
end
end
getstimulusname(session::AbstractSession, times; stimulus_table=getstimuli(session), kwargs...) = getstimulusname.([session], times; stimulus_table, kwargs...)
function getstimuli(S::Session, stimulusname::String)
stimulus_table = getstimuli(S)
df = subset(stimulus_table, :stimulus_name=>ByRow(==(stimulusname)))
end
function getstimuli(session::Session, times::Union{Tuple, UnitRange, LinRange, Vector})
stimuli = getstimuli(session)
idxs = [findfirst(time .< stimuli.stop_time) for time ∈ times] # Find first frame that ends after each time point
return stimuli[idxs, :]
end
function getepochs(S::Session)
p = S.pyObject.get_stimulus_epochs() # Why is this so slow
CSV.read(IOBuffer(p.to_csv()), DataFrame);
end
function getepochs(S::Session, stimulusname)
epoch_table = getepochs(S)
df = subset(epoch_table, :stimulus_name=>ByRow(==(stimulusname)))
end
|
{"hexsha": "94640706bd54ac1e93082457bb18e90829050dfb", "size": 6517, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/EcephysCache.jl", "max_stars_repo_name": "brendanjohnharris/AllenNeuropixels.jl", "max_stars_repo_head_hexsha": "26156723c95e82cad6ceca2c5550d8790eb3a7bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/EcephysCache.jl", "max_issues_repo_name": "brendanjohnharris/AllenNeuropixels.jl", "max_issues_repo_head_hexsha": "26156723c95e82cad6ceca2c5550d8790eb3a7bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/EcephysCache.jl", "max_forks_repo_name": "brendanjohnharris/AllenNeuropixels.jl", "max_forks_repo_head_hexsha": "26156723c95e82cad6ceca2c5550d8790eb3a7bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.246835443, "max_line_length": 195, "alphanum_fraction": 0.7010894583, "num_tokens": 1537}
|
%
% API Documentation for QSTK
% Module QSTK.qstklearn.hmm
%
% Generated by epydoc 3.0.1
% [Mon Mar 5 00:49:20 2012]
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Module Description %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}|(}
\section{Module QSTK.qstklearn.hmm}
\label{QSTK:qstklearn:hmm}
This package includes code for representing and learning HMM's.
Most of the code in this package was derived from the descriptions provided
in 'A Tutorial on Hidden Markov Models and Selected Applications in Speach
Recognition' by Lawence Rabiner.
Conventions: The keyword argument elem\_size will be passed in when
creating numpy array objects.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Functions %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Functions}
\label{QSTK:qstklearn:hmm:calcalpha}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.calcalpha \textit{(function)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{calcalpha}(\textit{stateprior}, \textit{transition}, \textit{emission}, \textit{observations}, \textit{numstates}, \textit{elem\_size}={\tt {\textless}type 'numpy.float128'{\textgreater}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Calculates 'alpha' the forward variable.
The alpha variable is a numpy array indexed by time, then state (TxN).
alpha[t][i] = the probability of being in state 'i' after observing the
first t symbols.
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:forwardbackward}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.forwardbackward \textit{(function)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{forwardbackward}(\textit{stateprior}, \textit{transition}, \textit{emission}, \textit{observations}, \textit{numstates}, \textit{elem\_size}={\tt {\textless}type 'numpy.float128'{\textgreater}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Calculates the probability of a sequence given the HMM.
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:calcbeta}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.calcbeta \textit{(function)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{calcbeta}(\textit{transition}, \textit{emission}, \textit{observations}, \textit{numstates}, \textit{elem\_size}={\tt {\textless}type 'numpy.float128'{\textgreater}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Calculates 'beta' the backward variable.
The beta variable is a numpy array indexed by time, then state (TxN).
beta[t][i] = the probability of being in state 'i' and then observing
the symbols from t+1 to the end (T).
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:calcxi}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.calcxi \textit{(function)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{calcxi}(\textit{stateprior}, \textit{transition}, \textit{emission}, \textit{observations}, \textit{numstates}, \textit{alpha}={\tt None}, \textit{beta}={\tt None}, \textit{elem\_size}={\tt {\textless}type 'numpy.float128'{\textgreater}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Calculates 'xi', a joint probability from the 'alpha' and 'beta'
variables.
The xi variable is a numpy array indexed by time, state, and state
(TxNxN). xi[t][i][j] = the probability of being in state 'i' at time
't', and 'j' at time 't+1' given the entire observation sequence.
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:calcgamma}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.calcgamma \textit{(function)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{calcgamma}(\textit{xi}, \textit{seqlen}, \textit{numstates}, \textit{elem\_size}={\tt {\textless}type 'numpy.float128'{\textgreater}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Calculates 'gamma' from xi.
Gamma is a (TxN) numpy array, where gamma[t][i] = the probability of
being in state 'i' at time 't' given the full observation sequence.
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:baumwelchstep}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.baumwelchstep \textit{(function)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{baumwelchstep}(\textit{stateprior}, \textit{transition}, \textit{emission}, \textit{observations}, \textit{numstates}, \textit{numsym}, \textit{elem\_size}={\tt {\textless}type 'numpy.float128'{\textgreater}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Given an HMM model and a sequence of observations, computes the
Baum-Welch update to the parameters using gamma and xi.
\setlength{\parskip}{1ex}
\end{boxedminipage}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Variables %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Variables}
\vspace{-1cm}
\hspace{\varindent}\begin{longtable}{|p{\varnamewidth}|p{\vardescrwidth}|l}
\cline{1-2}
\cline{1-2} \centering \textbf{Name} & \centering \textbf{Description}& \\
\cline{1-2}
\endhead\cline{1-2}\multicolumn{3}{r}{\small\textit{continued on next page}}\\\endfoot\cline{1-2}
\endlastfoot\raggedright \_\-\_\-p\-a\-c\-k\-a\-g\-e\-\_\-\_\- & \raggedright \textbf{Value:}
{\tt \texttt{'}\texttt{QSTK.qstklearn}\texttt{'}}&\\
\cline{1-2}
\end{longtable}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Class Description %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.HMMLearner \textit{(class)}|(}
\subsection{Class HMMLearner}
\label{QSTK:qstklearn:hmm:HMMLearner}
\begin{alltt}
A class for modeling and learning HMMs.
This class conveniently wraps the module level functions. Class objects hold 6
data members:
- num\_states number of hidden states in the HMM
- num\_symbols number of possible symbols in the observation
sequence
- precision precision of the numpy.array elements (defaults to
longdouble)
- prior The prior probability of starting in each state
(Nx1 array)
- transition\_matrix The probability of transitioning between each state
(NxN matrix)
- emission\_matrix The probability of each symbol in each state
(NxO matrix)
You can set the 3 matrix parameters as you wish, but make sure the shape of
the arrays matches num\_states and num\_symbols, as these are used internally
Typical usage of this class is to create an HMM with a set number of states
and external symbols, train the HMM using addEvidence(...), and then use
the sequenceProb(...) method to see how well a specific sequence matches
the trained HMM.
\end{alltt}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Methods %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Methods}
\label{QSTK:qstklearn:hmm:HMMLearner:__init__}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.HMMLearner \textit{(class)}!QSTK.qstklearn.hmm.HMMLearner.\_\_init\_\_ \textit{(method)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{\_\_init\_\_}(\textit{self}, \textit{num\_states}, \textit{num\_symbols}, \textit{init\_type}={\tt \texttt{'}\texttt{uniform}\texttt{'}}, \textit{precision}={\tt {\textless}type 'numpy.float128'{\textgreater}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Creates a new HMMLearner object with the given number of internal
states, and external symbols.
calls self.reset(init\_type=init\_type)
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:HMMLearner:reset}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.HMMLearner \textit{(class)}!QSTK.qstklearn.hmm.HMMLearner.reset \textit{(method)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{reset}(\textit{self}, \textit{init\_type}={\tt \texttt{'}\texttt{uniform}\texttt{'}})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
\begin{alltt}
Resets the 3 arrays using the given initialization method.
Wipes out the old arrays. You can use this method to change the shape
of the arrays by first changing num\_states and/or num\_symbols, and then
calling this method.
Currently supported initialization methods:
uniform prior, transition, and emission probabilities are all
uniform (default)
\end{alltt}
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:HMMLearner:sequenceProb}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.HMMLearner \textit{(class)}!QSTK.qstklearn.hmm.HMMLearner.sequenceProb \textit{(method)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{sequenceProb}(\textit{self}, \textit{newData})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Returns the probability that this HMM generated the given sequence.
Uses the forward-backward algorithm. If given an array of sequences,
returns a 1D array of probabilities.
\setlength{\parskip}{1ex}
\end{boxedminipage}
\label{QSTK:qstklearn:hmm:HMMLearner:addEvidence}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.HMMLearner \textit{(class)}!QSTK.qstklearn.hmm.HMMLearner.addEvidence \textit{(method)}}
\vspace{0.5ex}
\hspace{.8\funcindent}\begin{boxedminipage}{\funcwidth}
\raggedright \textbf{addEvidence}(\textit{self}, \textit{newData}, \textit{iterations}={\tt 1}, \textit{epsilon}={\tt 0.0})
\vspace{-1.5ex}
\rule{\textwidth}{0.5\fboxrule}
\setlength{\parskip}{2ex}
Updates this HMMs parameters given a new set of observed sequences
using the Baum-Welch algorithm.
newData can either be a single (1D) array of observed symbols, or a 2D
matrix, each row of which is a seperate sequence. The Baum-Welch update
is repeated 'iterations' times, or until the sum absolute change in
each matrix is less than the given epsilon. If given multiple
sequences, each sequence is used to update the parameters in order, and
the sum absolute change is calculated once after all the sequences are
processed.
\setlength{\parskip}{1ex}
\end{boxedminipage}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}!QSTK.qstklearn.hmm.HMMLearner \textit{(class)}|)}
\index{QSTK \textit{(package)}!QSTK.qstklearn \textit{(package)}!QSTK.qstklearn.hmm \textit{(module)}|)}
|
{"hexsha": "29e4c4125d59b4f3a19c3fbff601c94d710695a4", "size": 12758, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Docs/pdf/QSTK.qstklearn.hmm-module.tex", "max_stars_repo_name": "elxavicio/QSTK", "max_stars_repo_head_hexsha": "4981506c37227a72404229d5e1e0887f797a5d57", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 339, "max_stars_repo_stars_event_min_datetime": "2015-01-01T10:06:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T23:32:24.000Z", "max_issues_repo_path": "Legacy/Docs/pdf/QSTK.qstklearn.hmm-module.tex", "max_issues_repo_name": "jenniyanjie/QuantSoftwareToolkit", "max_issues_repo_head_hexsha": "0eb2c7a776c259a087fdcac1d3ff883eb0b5516c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2015-01-04T13:12:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-19T11:13:47.000Z", "max_forks_repo_path": "Legacy/Docs/pdf/QSTK.qstklearn.hmm-module.tex", "max_forks_repo_name": "jenniyanjie/QuantSoftwareToolkit", "max_forks_repo_head_hexsha": "0eb2c7a776c259a087fdcac1d3ff883eb0b5516c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 154, "max_forks_repo_forks_event_min_datetime": "2015-01-30T09:41:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-19T02:27:59.000Z", "avg_line_length": 40.1194968553, "max_line_length": 263, "alphanum_fraction": 0.6443799969, "num_tokens": 3867}
|
using ActorInterfaces.Classic
using Test
struct Spawner end
struct SpawnTree
childcount::UInt8
depth::UInt8
end
@ctx function (me::Spawner)(msg::SpawnTree)
if msg.depth > 0
for i = 1:msg.childcount
child = spawn(Spawner())
send(child, SpawnTree(msg.childcount, msg.depth - 1))
end
end
return nothing
end
const TREE_HEIGHT = 15
const TREE_SIZE = 2^(TREE_HEIGHT + 1) - 1
function run_spawnertest(lib::ActorLib)
return @testset "ActorInterfaceTests/spawner: Spawning children and sending messages to them" begin
@test ex_actorcount(lib) == 0
root = Spawner()
rootaddr = ex_spawn!(lib, root)
@test ex_actorcount(lib) == 1
ex_send!(lib, rootaddr, SpawnTree(2, TREE_HEIGHT))
println("Building a tree of $TREE_SIZE actors and delivering the same amount of messages")
ex_runtofinish(lib)
@test ex_actorcount(lib) == TREE_SIZE
end
end
|
{"hexsha": "c1769764e7f0af65a7a025079385ccb528eeeee7", "size": 964, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/suite/spawner.jl", "max_stars_repo_name": "JuliaActors/ActorInterfacesTests.jl", "max_stars_repo_head_hexsha": "0edf44b8a6e99f2707e908d430e3c622bc5101b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/suite/spawner.jl", "max_issues_repo_name": "JuliaActors/ActorInterfacesTests.jl", "max_issues_repo_head_hexsha": "0edf44b8a6e99f2707e908d430e3c622bc5101b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-31T17:12:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-31T17:12:08.000Z", "max_forks_repo_path": "src/suite/spawner.jl", "max_forks_repo_name": "JuliaActors/ActorInterfacesTests.jl", "max_forks_repo_head_hexsha": "0edf44b8a6e99f2707e908d430e3c622bc5101b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0540540541, "max_line_length": 103, "alphanum_fraction": 0.6628630705, "num_tokens": 259}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 1 19:22:38 2020
@author: sankha
"""
'''
Compute the following cluster validation indices:
Silhouette index, Dunne Index, Davies Bouldin Index
'''
# Import Libraries
import math
import random
from urllib.request import urlretrieve
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.metrics import davies_bouldin_score
from sklearn.preprocessing import normalize,StandardScaler
# K-Means
def KMeansClustering(X, n_cluster):
model = KMeans(n_clusters = n_cluster)
model.fit(X)
labels = model.predict(X)
return labels
def getSilhoutteIndex(clusters,arr = []):
index = 0
total = 0
for cluster_key in clusters:
this_cluster = clusters[cluster_key]
this_cluster_length = len(this_cluster)
total = total + this_cluster_length
for ele in this_cluster:
sum = 0
#Calculate two points a and b i.e. coesion and seperation
for other_ele in this_cluster:
# get euclidean dist here between ele and other_ele
dist = 0
for i in range(len(ele)):
if i not in arr:
dist = dist + (ele[i] - other_ele[i]) ** 2
dist = math.sqrt(dist)
sum = sum + dist
if (this_cluster_length == 1):
a = 0
else:
a = sum / (this_cluster_length - 1)
# Getting value of b
b = 999999999
for other_cluster_key in clusters:
if (other_cluster_key == cluster_key):
continue
other_cluster = clusters[other_cluster_key]
other_cluster_length = len(other_cluster)
sum1 = 0
for other_cluster_ele in other_cluster:
dist = 0
for i in range(len(ele)):
if i not in arr:
dist = dist + (ele[i] - other_cluster_ele[i]) ** 2
dist = math.sqrt(dist)
sum1 = sum1 + dist
b = min(b,sum1/other_cluster_length)
cluster_index = 0
if (a != b):
#storing the Silhoutte coefficient value
cluster_index = ((b-a)/max(a,b))
index = index + cluster_index
index = index / total
return index
# DaviesBouldin Index
def getDBIndex(X,labels):
return davies_bouldin_score(X,labels)
# Helper function for containing attributes of that each np arrays
def getCentroid(cluster):
length = len(cluster)
centroid = []
first = True
for ele in cluster:
if first:
first = False
for i in range(len(ele)):
if np.isreal(ele[i]):
centroid.append(ele[i])
else:
centroid.append(0)
continue
for i in range(len(ele)):
if np.isreal(ele[i]):
centroid[i] = centroid[i] + ele[i]
else:
centroid[i] = centroid[i] + 0
centroid = np.asarray(centroid)
centroid = centroid / length
return centroid
# Dunn Index
def getDunnIndex(clusters,arr=[]):
cluster_in_between = 9999999
for cluster_key in clusters:
examp_temp = getCentroid(clusters[cluster_key])
for cluster_key1 in clusters:
if cluster_key == cluster_key1:
continue
examp_temp1 = getCentroid(clusters[cluster_key1])
dist = 0
for i in range(len(examp_temp)):
if i not in arr:
dist+=(examp_temp[i]-examp_temp1[i])**2
dist = math.sqrt(dist)
if (cluster_in_between > dist):
cluster_in_between = dist
max_intra_cluster = 0
for cluster_key in clusters:
max_intra_cluster = 0
this_cluster = clusters[cluster_key]
for data in this_cluster:
for data1 in this_cluster:
dist = 0
for i in range(len(data)):
if i not in arr:
dist+=(data[i]-data1[i])**2
dist = math.sqrt(dist)
if (dist > max_intra_cluster):
max_intra_cluster = dist
if (max_intra_cluster > max_intra_cluster):
max_intra_cluster = max_intra_cluster
return (cluster_in_between/max_intra_cluster)
# Main
df = pd.read_csv("iris.csv")
scaler = StandardScaler()
X = np.array(scaler.fit_transform(df.drop(["class"],1).astype(float)))
# plt.plot(list(range(1,X.shape[0]+1)), distanceDec)
# plt.show()
labels = KMeansClustering(X,3)
df1 = pd.read_csv("wine.csv")
scaler1 = StandardScaler()
#scaler1 = MinMaxScaler()
X1 = np.array(scaler1.fit_transform(df1.drop(["class"],1).astype(float)))
labels1 = KMeansClustering(X1,2)
df2 = pd.read_csv("wdbc.csv")
scaler2 = StandardScaler()
#scaler2 = MinMaxScaler()
X2 = np.array(scaler2.fit_transform(df2.drop(['class'],1).astype(float)))
labels2 = KMeansClustering(X2,3)
# Dictionary with key as cluster number and,value as a list of points in that cluster (each point is a np array of attributes)
clusters={}
k = 0
for i in labels:
if i in clusters:
clusters[i].append(df.iloc[k].values)
else:
clusters[i] = [df.iloc[k].values]
k = k + 1
clusters1={}
x = 0
for i in labels1:
if i in clusters1:
clusters1[i].append(df1.iloc[x].values)
else:
clusters1[i] = [df1.iloc[x].values]
x = x + 1
clusters2={}
y = 0
for i in labels2:
if i in clusters2:
clusters2[i].append(df2.iloc[y].values)
else:
clusters2[i] = [df2.iloc[y].values]
y = y + 1
# printing results
print("Iris:")
for item in clusters:
print()
print("Cluster ",item)
print("Length: ",len(clusters[item]))
print()
print("BCW:")
for item in clusters1:
print()
print("Cluster ",item)
print("Length: ",len(clusters1[item]))
print()
print("Seeds:")
for item in clusters2:
print()
print("Cluster ",item)
print("Length: ",len(clusters2[item]))
print()
'''
del clusters[-1]
to_remove = []
for i in range(len(labels)):
if labels[i] == -1:
to_remove.append(i)
X = np.delete(X,to_remove,0)
labels = np.delete(labels,to_remove)
del clusters1[-1]
to_remove1 = []
for i in range(len(labels1)):
if labels1[i] == -1:
to_remove1.append(i)
X1 = np.delete(X1,to_remove1,0)
labels1 = np.delete(labels1,to_remove1)
del clusters2[-1]
to_remove2 = []
for i in range(len(labels2)):
if labels2[i] == -1:
to_remove2.append(i)
X2 = np.delete(X2,to_remove2,0)
labels2 = np.delete(labels2,to_remove2)
'''
# printing the indices
print("Different types of Index vales")
print("Silhoutte Index :",getSilhoutteIndex(clusters,[4]))
print("Davies Bouldin Index :",getDBIndex(X,labels))
print("Dunn Index :",getDunnIndex(clusters,[4]))
print()
print("Silhoutte Index :",getSilhoutteIndex(clusters1,[0]))
print("Davies Bouldin Index :",getDBIndex(X1,labels1))
print("Dunn Index :",getDunnIndex(clusters1,[0]))
print()
print("Silhoutte Index :",getSilhoutteIndex(clusters2,[0]))
print("Davies Bouldin Index :",getDBIndex(X2,labels2))
print("Dunn Index :",getDunnIndex(clusters2,[0]))
|
{"hexsha": "c4c25f1c2b09ed3ef5e26ac11206c209123ae760", "size": 7406, "ext": "py", "lang": "Python", "max_stars_repo_path": "2nd.py", "max_stars_repo_name": "Sankha98/Data-Mining", "max_stars_repo_head_hexsha": "05f9e6bd9e88202231bb0a4add51da862f76cdcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2nd.py", "max_issues_repo_name": "Sankha98/Data-Mining", "max_issues_repo_head_hexsha": "05f9e6bd9e88202231bb0a4add51da862f76cdcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2nd.py", "max_forks_repo_name": "Sankha98/Data-Mining", "max_forks_repo_head_hexsha": "05f9e6bd9e88202231bb0a4add51da862f76cdcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8629032258, "max_line_length": 128, "alphanum_fraction": 0.5980286254, "include": true, "reason": "import numpy", "num_tokens": 1866}
|
from openmdao.api import Component, Group, IndepVarComp
import numpy as np
import pyframe3dd.frame3dd as frame3dd
from commonse.utilities import nodal2sectional
from commonse import gravity, eps, Tube, NFREQ
import commonse.UtilizationSupplement as util
import commonse.manufacturing as manufacture
from commonse.WindWaveDrag import AeroHydroLoads, CylinderWindDrag, CylinderWaveDrag
from commonse.environment import WaveBase, PowerWind
from commonse.vertical_cylinder import CylinderDiscretization, CylinderMass
from .map_mooring import NLINES_MAX
def find_nearest(array,value):
return (np.abs(array-value)).argmin()
def ghostNodes(x1, x2, r1, r2):
dx = x2 - x1
L = np.sqrt( np.sum( dx**2 ) )
dr1 = ( r1/L) * dx + x1
dr2 = (1.0 - r2/L) * dx + x1
return dr1, dr2
class FloatingFrame(Component):
"""
OpenMDAO Component class for semisubmersible pontoon / truss structure for floating offshore wind turbines.
Should be tightly coupled with Semi and Mooring classes for full system representation.
"""
def __init__(self, nFull, nFullTow):
super(FloatingFrame,self).__init__()
# Keep Frame3DD data object for easy testing and debugging
self.myframe = None
# Environment
self.add_param('water_density', val=0.0, units='kg/m**3', desc='density of water')
# Material properties
self.add_param('material_density', val=0., units='kg/m**3', desc='density of material')
self.add_param('E', val=0.0, units='Pa', desc='Modulus of elasticity (Youngs) of material')
self.add_param('G', val=0.0, units='Pa', desc='Shear modulus of material')
self.add_param('yield_stress', val=0.0, units='Pa', desc='yield stress of material')
self.add_param('Hs', val=0.0, units='m', desc='wave significant height')
# Base column
self.add_param('main_z_full', val=np.zeros((nFull,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')
self.add_param('main_d_full', val=np.zeros((nFull,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')
self.add_param('main_t_full', val=np.zeros((nFull-1,)), units='m', desc='shell wall thickness at each section node bottom to top (length = nsection + 1)')
self.add_param('main_mass', val=np.zeros((nFull-1,)), units='kg', desc='mass of main column by section')
self.add_param('main_buckling_length', val=np.zeros((nFull-1,)), units='m', desc='distance between ring stiffeners')
self.add_param('main_displaced_volume', val=np.zeros((nFull-1,)), units='m**3', desc='column volume of water displaced by section')
self.add_param('main_hydrostatic_force', val=np.zeros((nFull-1,)), units='N', desc='Net z-force of hydrostatic pressure by section')
self.add_param('main_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of column buoyancy force')
self.add_param('main_center_of_mass', val=0.0, units='m', desc='z-position of center of column mass')
self.add_param('main_Px', np.zeros(nFull), units='N/m', desc='force per unit length in x-direction on main')
self.add_param('main_Py', np.zeros(nFull), units='N/m', desc='force per unit length in y-direction on main')
self.add_param('main_Pz', np.zeros(nFull), units='N/m', desc='force per unit length in z-direction on main')
self.add_param('main_qdyn', np.zeros(nFull), units='N/m**2', desc='dynamic pressure on main')
self.add_param('main_pontoon_attach_upper', val=0.0, desc='Fraction of main column for upper truss attachment on main column')
self.add_param('main_pontoon_attach_lower', val=0.0, desc='Fraction of main column lower truss attachment on main column')
# offset columns
self.add_param('offset_z_full', val=np.zeros((nFull,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')
self.add_param('offset_d_full', val=np.zeros((nFull,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')
self.add_param('offset_t_full', val=np.zeros((nFull-1,)), units='m', desc='shell wall thickness at each section node bottom to top (length = nsection + 1)')
self.add_param('offset_mass', val=np.zeros((nFull-1,)), units='kg', desc='mass of offset column by section')
self.add_param('offset_buckling_length', val=np.zeros((nFull-1,)), units='m', desc='distance between ring stiffeners')
self.add_param('offset_displaced_volume', val=np.zeros((nFull-1,)), units='m**3', desc='column volume of water displaced by section')
self.add_param('offset_hydrostatic_force', val=np.zeros((nFull-1,)), units='N', desc='Net z-force of hydrostatic pressure by section')
self.add_param('offset_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of column buoyancy force')
self.add_param('offset_center_of_mass', val=0.0, units='m', desc='z-position of center of column mass')
self.add_param('offset_Px', np.zeros(nFull), units='N/m', desc='force per unit length in x-direction on offset')
self.add_param('offset_Py', np.zeros(nFull), units='N/m', desc='force per unit length in y-direction on offset')
self.add_param('offset_Pz', np.zeros(nFull), units='N/m', desc='force per unit length in z-direction on offset')
self.add_param('offset_qdyn', np.zeros(nFull), units='N/m**2', desc='dynamic pressure on offset')
# Tower
self.add_param('tower_z_full', val=np.zeros((nFullTow,)), units='m', desc='z-coordinates of section nodes (length = nsection+1)')
self.add_param('tower_d_full', val=np.zeros((nFullTow,)), units='m', desc='outer radius at each section node bottom to top (length = nsection + 1)')
self.add_param('tower_t_full', val=np.zeros((nFullTow-1,)), units='m', desc='shell wall thickness at each section node bottom to top (length = nsection + 1)')
self.add_param('tower_mass_section', val=np.zeros((nFullTow-1,)), units='kg', desc='mass of tower column by section')
self.add_param('tower_buckling_length', 0.0, units='m', desc='buckling length')
self.add_param('tower_center_of_mass', val=0.0, units='m', desc='z-position of center of tower mass')
self.add_param('tower_Px', np.zeros(nFullTow), units='N/m', desc='force per unit length in x-direction on tower')
self.add_param('tower_Py', np.zeros(nFullTow), units='N/m', desc='force per unit length in y-direction on tower')
self.add_param('tower_Pz', np.zeros(nFullTow), units='N/m', desc='force per unit length in z-direction on tower')
self.add_param('tower_qdyn', np.zeros(nFullTow), units='N/m**2', desc='dynamic pressure on tower')
# Semi geometry
self.add_param('radius_to_offset_column', val=0.0, units='m',desc='Distance from main column centerpoint to offset column centerpoint')
self.add_param('number_of_offset_columns', val=3, desc='Number of offset columns evenly spaced around main column')
# Pontoon properties
self.add_param('pontoon_outer_diameter', val=0.0, units='m',desc='Outer radius of tubular pontoon that connects offset or main columns')
self.add_param('pontoon_wall_thickness', val=0.0, units='m',desc='Inner radius of tubular pontoon that connects offset or main columns')
self.add_param('cross_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the bottom of the central main to the tops of the outer offset columns', pass_by_obj=True)
self.add_param('lower_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central main to the outer offset columns at their bottoms', pass_by_obj=True)
self.add_param('upper_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central main to the outer offset columns at their tops', pass_by_obj=True)
self.add_param('lower_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer offset columns at their bottoms', pass_by_obj=True)
self.add_param('upper_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer offset columns at their tops', pass_by_obj=True)
self.add_param('outer_cross_pontoons', val=True, desc='Inclusion of pontoons that ring around outer offset columns at their tops', pass_by_obj=True)
# Turbine parameters
self.add_param('rna_mass', val=0.0, units='kg', desc='mass of tower')
self.add_param('rna_cg', val=np.zeros(3), units='m', desc='Location of RNA center of mass relative to tower top')
self.add_param('rna_force', val=np.zeros(3), units='N', desc='Force in xyz-direction on turbine')
self.add_param('rna_moment', val=np.zeros(3), units='N*m', desc='Moments about turbine main')
self.add_param('rna_I', val=np.zeros(6), units='kg*m**2', desc='Moments about turbine main')
# Mooting parameters for loading
self.add_param('number_of_mooring_connections', val=3, desc='number of mooring connections on vessel')
self.add_param('mooring_lines_per_connection', val=1, desc='number of mooring lines per connection')
self.add_param('mooring_neutral_load', val=np.zeros((NLINES_MAX,3)), units='N', desc='z-force of mooring lines on structure')
self.add_param('mooring_stiffness', val=np.zeros((6,6)), units='N/m', desc='Linearized stiffness matrix of mooring system at neutral (no offset) conditions.')
self.add_param('mooring_moments_of_inertia', val=np.zeros(6), units='kg*m**2', desc='mass moment of inertia of mooring system about fairlead-centerline point [xx yy zz xy xz yz]')
self.add_param('fairlead', val=0.0, units='m', desc='Depth below water for mooring line attachment')
self.add_param('fairlead_radius', val=0.0, units='m',desc='Radius from center of structure to fairlead connection points')
self.add_param('fairlead_support_outer_diameter', val=0.0, units='m',desc='fairlead support outer diameter')
self.add_param('fairlead_support_wall_thickness', val=0.0, units='m',desc='fairlead support wall thickness')
# safety factors
self.add_param('gamma_f', 0.0, desc='safety factor on loads')
self.add_param('gamma_m', 0.0, desc='safety factor on materials')
self.add_param('gamma_n', 0.0, desc='safety factor on consequence of failure')
self.add_param('gamma_b', 0.0, desc='buckling safety factor')
self.add_param('gamma_fatigue', 0.0, desc='total safety factor for fatigue')
# Manufacturing
self.add_param('connection_ratio_max', val=0.0, desc='Maximum ratio of pontoon outer diameter to main/offset outer diameter')
# Costing
self.add_param('material_cost_rate', 0.0, units='USD/kg', desc='Raw material cost rate: steel $1.1/kg, aluminum $3.5/kg')
self.add_param('labor_cost_rate', 0.0, units='USD/min', desc='Labor cost rate')
self.add_param('painting_cost_rate', 0.0, units='USD/m/m', desc='Painting / surface finishing cost rate')
# Outputs
self.add_output('pontoon_wave_height_depth_margin', val=np.zeros(2), units='m', desc='Distance between attachment point of pontoons and wave crest- both above and below waterline')
self.add_output('pontoon_cost', val=0.0, units='USD', desc='Cost of pontoon elements and connecting truss')
self.add_output('pontoon_cost_rate', val=0.0, units='USD/t', desc='Cost rate of finished pontoon and truss')
self.add_output('pontoon_mass', val=0.0, units='kg', desc='Mass of pontoon elements and connecting truss')
self.add_output('pontoon_displacement', val=0.0, units='m**3', desc='Buoyancy force of submerged pontoon elements')
self.add_output('pontoon_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of pontoon buoyancy force')
self.add_output('pontoon_center_of_mass', val=0.0, units='m', desc='z-position of center of pontoon mass')
self.add_output('top_deflection', 0.0, units='m', desc='Deflection of tower top in yaw-aligned +x direction')
self.add_output('pontoon_stress', val=np.zeros((70,)), desc='Utilization (<1) of von Mises stress by yield stress and safety factor for all pontoon elements')
self.add_output('main_stress', np.zeros(nFull-1), desc='Von Mises stress utilization along main column at specified locations. Incudes safety factor.')
self.add_output('main_stress:axial', np.zeros(nFull-1), desc='Axial stress along main column at specified locations.')
self.add_output('main_stress:shear', np.zeros(nFull-1), desc='Shear stress along main column at specified locations.')
self.add_output('main_stress:hoop', np.zeros(nFull-1), desc='Hoop stress along main column at specified locations.')
self.add_output('main_stress:hoopStiffen', np.zeros(nFull-1), desc='Hoop stress along main column at specified locations.')
self.add_output('main_shell_buckling', np.zeros(nFull-1), desc='Shell buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('main_global_buckling', np.zeros(nFull-1), desc='Global buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('offset_stress', np.zeros(nFull-1), desc='Von Mises stress utilization along offset column at specified locations. Incudes safety factor.')
self.add_output('offset_stress:axial', np.zeros(nFull-1), desc='Axial stress along offset column at specified locations.')
self.add_output('offset_stress:shear', np.zeros(nFull-1), desc='Shear stress along offset column at specified locations.')
self.add_output('offset_stress:hoop', np.zeros(nFull-1), desc='Hoop stress along offset column at specified locations.')
self.add_output('offset_stress:hoopStiffen', np.zeros(nFull-1), desc='Hoop stress along offset column at specified locations.')
self.add_output('offset_shell_buckling', np.zeros(nFull-1), desc='Shell buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('offset_global_buckling', np.zeros(nFull-1), desc='Global buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('tower_stress', np.zeros(nFullTow-1), desc='Von Mises stress utilization along tower at specified locations. incudes safety factor.')
self.add_output('tower_stress:axial', np.zeros(nFullTow-1), desc='Axial stress along tower column at specified locations.')
self.add_output('tower_stress:shear', np.zeros(nFullTow-1), desc='Shear stress along tower column at specified locations.')
self.add_output('tower_stress:hoop', np.zeros(nFullTow-1), desc='Hoop stress along tower column at specified locations.')
self.add_output('tower_stress:hoopStiffen', np.zeros(nFullTow-1), desc='Hoop stress along tower column at specified locations.')
self.add_output('tower_shell_buckling', np.zeros(nFullTow-1), desc='Shell buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('tower_global_buckling', np.zeros(nFullTow-1), desc='Global buckling constraint. Should be < 1 for feasibility. Includes safety factors')
self.add_output('plot_matrix', val=np.array([]), desc='Ratio of shear stress to yield stress for all pontoon elements', pass_by_obj=True)
self.add_output('main_connection_ratio', val=np.zeros((nFull,)), desc='Ratio of pontoon outer diameter to main outer diameter')
self.add_output('offset_connection_ratio', val=np.zeros((nFull,)), desc='Ratio of pontoon outer diameter to main outer diameter')
self.add_output('structural_frequencies', np.zeros(NFREQ), units='Hz', desc='First six natural frequencies')
self.add_output('substructure_mass', val=0.0, units='kg', desc='Mass of substructure elements and connecting truss')
self.add_output('structural_mass', val=0.0, units='kg', desc='Mass of whole turbine except for mooring lines')
self.add_output('total_displacement', val=0.0, units='m**3', desc='Total volume of water displaced by floating turbine (except for mooring lines)')
self.add_output('z_center_of_buoyancy', val=0.0, units='m', desc='z-position of center of buoyancy of whole turbine')
self.add_output('substructure_center_of_mass', val=np.zeros(3), units='m', desc='xyz-position of center of gravity of substructure only')
self.add_output('structure_center_of_mass', val=np.zeros(3), units='m', desc='xyz-position of center of gravity of whole turbine')
self.add_output('total_force', val=np.zeros(3), units='N', desc='Net forces on turbine')
self.add_output('total_moment', val=np.zeros(3), units='N*m', desc='Moments on whole turbine')
# Derivatives
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
self.deriv_options['check_form'] = 'central'
self.deriv_options['step_calc'] = 'relative'
self.deriv_options['step_size'] = 1e-5
def solve_nonlinear(self, params, unknowns, resids):
# Unpack variables
ncolumn = int(params['number_of_offset_columns'])
crossAttachFlag = params['cross_attachment_pontoons']
lowerAttachFlag = params['lower_attachment_pontoons']
upperAttachFlag = params['upper_attachment_pontoons']
lowerRingFlag = params['lower_ring_pontoons']
upperRingFlag = params['upper_ring_pontoons']
outerCrossFlag = params['outer_cross_pontoons']
R_semi = params['radius_to_offset_column'] if ncolumn>0 else 0.0
R_od_pontoon = 0.5*params['pontoon_outer_diameter']
R_od_main = 0.5*params['main_d_full']
R_od_offset = 0.5*params['offset_d_full']
R_od_tower = 0.5*params['tower_d_full']
R_od_fairlead = 0.5*params['fairlead_support_outer_diameter']
t_wall_main = params['main_t_full']
t_wall_offset = params['offset_t_full']
t_wall_pontoon = params['pontoon_wall_thickness']
t_wall_tower = params['tower_t_full']
t_wall_fairlead = params['fairlead_support_wall_thickness']
E = params['E']
G = params['G']
rho = params['material_density']
sigma_y = params['yield_stress']
z_main = params['main_z_full']
z_offset = params['offset_z_full']
z_tower = params['tower_z_full']
z_attach_upper = params['main_pontoon_attach_upper']*(z_main[-1] - z_main[0]) + z_main[0]
z_attach_lower = params['main_pontoon_attach_lower']*(z_main[-1] - z_main[0]) + z_main[0]
z_fairlead = -params['fairlead']
m_main = params['main_mass']
m_offset = params['offset_mass']
m_tower = params['tower_mass_section']
m_rna = params['rna_mass']
F_rna = params['rna_force']
M_rna = params['rna_moment']
I_rna = params['rna_I']
cg_rna = params['rna_cg']
rhoWater = params['water_density']
V_main = params['main_displaced_volume']
V_offset = params['offset_displaced_volume']
F_hydro_main = params['main_hydrostatic_force']
F_hydro_offset = params['offset_hydrostatic_force']
z_cb_main = params['main_center_of_buoyancy']
z_cb_offset = params['offset_center_of_buoyancy']
cg_main = np.r_[0.0, 0.0, params['main_center_of_mass']]
cg_offset = np.r_[0.0, 0.0, params['offset_center_of_mass']]
cg_tower = np.r_[0.0, 0.0, params['tower_center_of_mass']]
n_connect = int(params['number_of_mooring_connections'])
n_lines = int(params['mooring_lines_per_connection'])
K_mooring = np.diag( params['mooring_stiffness'] )
I_mooring = params['mooring_moments_of_inertia']
F_mooring = params['mooring_neutral_load']
R_fairlead = params['fairlead_radius']
gamma_f = params['gamma_f']
gamma_m = params['gamma_m']
gamma_n = params['gamma_n']
gamma_b = params['gamma_b']
gamma_fatigue = params['gamma_fatigue']
# Quick ratio for unknowns
unknowns['main_connection_ratio'] = params['connection_ratio_max'] - R_od_pontoon/R_od_main
unknowns['offset_connection_ratio'] = params['connection_ratio_max'] - R_od_pontoon/R_od_offset
unknowns['pontoon_wave_height_depth_margin'] = np.abs(np.array([z_attach_lower, z_attach_upper])) - np.abs(params['Hs'])
# --- INPUT CHECKS -----
# If something fails, we have to tell the optimizer this design is no good
def bad_input():
unknowns['structural_frequencies'] = 1e30 * np.ones(NFREQ)
unknowns['top_deflection'] = 1e30
unknowns['substructure_mass'] = 1e30
unknowns['structural_mass'] = 1e30
unknowns['total_displacement'] = 1e30
unknowns['z_center_of_buoyancy'] = 0.0
unknowns['substructure_center_of_mass'] = 1e30 * np.ones(3)
unknowns['structure_center_of_mass'] = 1e30 * np.ones(3)
unknowns['total_force'] = 1e30 * np.ones(3)
unknowns['total_moment'] = 1e30 * np.ones(3)
unknowns['tower_stress'] = 1e30 * np.ones(m_tower.shape)
unknowns['tower_shell_buckling'] = 1e30 * np.ones(m_tower.shape)
unknowns['tower_global_buckling'] = 1e30 * np.ones(m_tower.shape)
unknowns['main_stress'] = 1e30 * np.ones(m_main.shape)
unknowns['main_shell_buckling'] = 1e30 * np.ones(m_main.shape)
unknowns['main_global_buckling'] = 1e30 * np.ones(m_main.shape)
unknowns['offset_stress'] = 1e30 * np.ones(m_offset.shape)
unknowns['offset_shell_buckling'] = 1e30 * np.ones(m_offset.shape)
unknowns['offset_global_buckling'] = 1e30 * np.ones(m_offset.shape)
return
# There is no truss if not offset columns
if ncolumn == 0:
crossAttachFlag = lowerAttachFlag = upperAttachFlag = False
lowerRingFlag = upperRingFlag = outerCrossFlag = False
# Must have symmetry for the substructure to work out
if ncolumn in [1, 2] or ncolumn > 7:
bad_input()
return
# Must have symmetry in moorning loading too
if (ncolumn > 0) and (n_connect > 0) and (ncolumn != n_connect):
bad_input()
return
# If there are offset columns, must have attachment pontoons (only have ring pontoons doesn't make sense)
if (ncolumn > 0) and (not crossAttachFlag) and (not lowerAttachFlag) and (not upperAttachFlag):
bad_input()
return
# Must have lower ring if have cross braces
if (ncolumn > 0) and outerCrossFlag and (not lowerRingFlag):
bad_input()
return
# ---GEOMETRY---
# Compute frustum angles
angle_tower = np.arctan( np.diff(R_od_tower) / np.diff(z_tower) )
angle_main = np.arctan( np.diff(R_od_main) / np.diff(z_main) )
angle_offset = np.arctan( np.diff(R_od_offset) / np.diff(z_offset) )
# ---NODES---
# Add nodes for main column: Using 4 nodes/3 elements per section
# Make sure there is a node at upper and lower attachment points
mainBeginID = 0 + 1
if ncolumn > 0:
idx = find_nearest(z_main, z_attach_lower)
z_main[idx] = z_attach_lower
mainLowerID = idx + 1
idx = find_nearest(z_main, z_attach_upper)
z_main[idx] = z_attach_upper
mainUpperID = idx + 1
mainEndID = z_main.size
freeboard = z_main[-1]
fairleadID = []
# Need mooring attachment point if just running a spar
if ncolumn == 0:
idx = find_nearest(z_main, z_fairlead)
z_main[idx] = z_fairlead
fairleadID.append( idx + 1 )
znode = np.copy( z_main )
xnode = np.zeros(znode.shape)
ynode = np.zeros(znode.shape)
rnode = np.copy( R_od_main)
towerBeginID = mainEndID
myz = np.zeros(len(z_tower)-1)
xnode = np.append(xnode, myz)
ynode = np.append(ynode, myz)
znode = np.append(znode, z_tower[1:] + freeboard )
rnode = np.append(rnode, R_od_tower[1:])
towerEndID = xnode.size
# Create dummy node so that the tower isn't the last in a chain.
# This avoids a Frame3DD bug
dummyID = xnode.size + 1
xnode = np.append(xnode, 0.0)
ynode = np.append(ynode, 0.0)
znode = np.append(znode, znode[-1]+1.0 )
rnode = np.append(rnode, 0.0)
# Get x and y positions of surrounding offset columns
offsetLowerID = []
offsetUpperID = []
offsetx = R_semi * np.cos( np.linspace(0, 2*np.pi, ncolumn+1) )
offsety = R_semi * np.sin( np.linspace(0, 2*np.pi, ncolumn+1) )
offsetx = offsetx[:-1]
offsety = offsety[:-1]
# Add in offset column nodes around the circle, make sure there is a node at the fairlead
idx = find_nearest(z_offset, z_fairlead)
myones = np.ones(z_offset.shape)
for k in range(ncolumn):
offsetLowerID.append( xnode.size + 1 )
fairleadID.append( xnode.size + idx + 1 )
xnode = np.append(xnode, offsetx[k]*myones)
ynode = np.append(ynode, offsety[k]*myones)
znode = np.append(znode, z_offset )
rnode = np.append(rnode, R_od_offset )
offsetUpperID.append( xnode.size )
# Add nodes where mooring lines attach, which may be offset from columns
mooringx = R_fairlead * np.cos( np.linspace(0, 2*np.pi, n_connect+1) )[:-1]
mooringy = R_fairlead * np.sin( np.linspace(0, 2*np.pi, n_connect+1) )[:-1]
mooringID = xnode.size + 1 + np.arange(n_connect, dtype=np.int32)
xnode = np.append(xnode, mooringx)
ynode = np.append(ynode, mooringy)
znode = np.append(znode, z_fairlead*np.ones(n_connect) )
rnode = np.append(rnode, np.zeros(n_connect))
# Add nodes midway around outer ring for cross bracing
if outerCrossFlag and ncolumn > 0:
crossx = 0.5*(offsetx + np.roll(offsetx,1))
crossy = 0.5*(offsety + np.roll(offsety,1))
crossOuterLowerID = xnode.size + np.arange(ncolumn) + 1
crossOuterLowerID = crossOuterLowerID.tolist()
xnode = np.append(xnode, crossx)
ynode = np.append(ynode, crossy)
znode = np.append(znode, z_offset[0]*np.ones(ncolumn))
rnode = np.append(rnode, np.zeros(ncolumn))
#crossOuterUpperID = xnode.size + np.arange(ncolumn) + 1
#xnode = np.append(xnode, crossx)
#ynode = np.append(ynode, crossy)
#znode = np.append(znode, z_offset[-1]*np.ones(ncolumn))
# Create matrix for easy referencing
nodeMat = np.c_[xnode, ynode, znode]
# To aid in wrap-around references
if ncolumn > 0:
offsetLowerID.append( offsetLowerID[0] )
offsetUpperID.append( offsetUpperID[0] )
if outerCrossFlag:
crossOuterLowerID.append( crossOuterLowerID[0] )
# ---ELEMENTS / EDGES---
# To accurately capture pontoon length and stiffness, for each connection we create 2 additional nodes,
# where the pontoon "line" intersects the main and offset shells. Highly stiff "ghost" elements are created
# from the column centerline to the shell. These are not calculated for pontoon weight.
# The actual pontoon only extends from shell boundary to shell boundary.
N1 = np.array([], dtype=np.int32)
N2 = np.array([], dtype=np.int32)
gN1 = np.array([], dtype=np.int32)
gN2 = np.array([], dtype=np.int32)
# Lower connection from central main column to offset columns
if lowerAttachFlag:
lowerAttachEID = N1.size + 1
for k in range(ncolumn):
tempID1 = mainLowerID
tempID2 = offsetLowerID[k]
add1, add2 = ghostNodes(nodeMat[tempID1-1,:], nodeMat[tempID2-1,:], rnode[tempID1-1], rnode[tempID2-1])
if ( (add1[-1] > z_main[0]) and (add1[-1] < z_main[-1]) ):
tempID1 = xnode.size + 1
xnode = np.append(xnode, add1[0])
ynode = np.append(ynode, add1[1])
znode = np.append(znode, add1[2])
gN1 = np.append(gN1, mainLowerID)
gN2 = np.append(gN2, tempID1)
if ( (add2[-1] > z_offset[0]) and (add2[-1] < z_offset[-1]) ):
tempID2 = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetLowerID[k])
gN2 = np.append(gN2, tempID2)
# Pontoon connection
N1 = np.append(N1, tempID1 )
N2 = np.append(N2, tempID2 )
# Upper connection from central main column to offset columns
if upperAttachFlag:
upperAttachEID = N1.size + 1
for k in range(ncolumn):
tempID1 = mainUpperID
tempID2 = offsetUpperID[k]
add1, add2 = ghostNodes(nodeMat[tempID1-1,:], nodeMat[tempID2-1,:], rnode[tempID1-1], rnode[tempID2-1])
if ( (add1[-1] > z_main[0]) and (add1[-1] < z_main[-1]) ):
tempID1 = xnode.size + 1
xnode = np.append(xnode, add1[0])
ynode = np.append(ynode, add1[1])
znode = np.append(znode, add1[2])
gN1 = np.append(gN1, mainUpperID)
gN2 = np.append(gN2, tempID1)
if ( (add2[-1] > z_offset[0]) and (add2[-1] < z_offset[-1]) ):
tempID2 = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetUpperID[k])
gN2 = np.append(gN2, tempID2)
# Pontoon connection
N1 = np.append(N1, tempID1 )
N2 = np.append(N2, tempID2 )
# Cross braces from lower central main column to upper offset columns
if crossAttachFlag:
crossAttachEID = N1.size + 1
for k in range(ncolumn):
tempID1 = mainLowerID
tempID2 = offsetUpperID[k]
add1, add2 = ghostNodes(nodeMat[tempID1-1,:], nodeMat[tempID2-1,:], rnode[tempID1-1], rnode[tempID2-1])
if ( (add1[-1] > z_main[0]) and (add1[-1] < z_main[-1]) ):
tempID1 = xnode.size + 1
xnode = np.append(xnode, add1[0])
ynode = np.append(ynode, add1[1])
znode = np.append(znode, add1[2])
gN1 = np.append(gN1, mainLowerID)
gN2 = np.append(gN2, tempID1)
if ( (add2[-1] > z_offset[0]) and (add2[-1] < z_offset[-1]) ):
tempID2 = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetUpperID[k])
gN2 = np.append(gN2, tempID2)
# Pontoon connection
N1 = np.append(N1, tempID1 )
N2 = np.append(N2, tempID2 )
# Will be used later to convert from local member c.s. to global
cross_angle = np.arctan( (z_attach_upper - z_attach_lower) / R_semi )
# Lower ring around offset columns
if lowerRingFlag:
lowerRingEID = N1.size + 1
for k in range(ncolumn):
tempID1 = offsetLowerID[k]
tempID2 = offsetLowerID[k+1]
add1, add2 = ghostNodes(nodeMat[tempID1-1,:], nodeMat[tempID2-1,:], rnode[tempID1-1], rnode[tempID2-1])
if ( (add1[-1] > z_offset[0]) and (add1[-1] < z_offset[-1]) ):
tempID1 = xnode.size + 1
xnode = np.append(xnode, add1[0])
ynode = np.append(ynode, add1[1])
znode = np.append(znode, add1[2])
gN1 = np.append(gN1, offsetLowerID[k])
gN2 = np.append(gN2, tempID1)
if ( (add2[-1] > z_offset[0]) and (add2[-1] < z_offset[-1]) ):
tempID2 = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetLowerID[k+1])
gN2 = np.append(gN2, tempID2)
# Pontoon connection
N1 = np.append(N1, tempID1 )
N2 = np.append(N2, tempID2 )
# Upper ring around offset columns
if upperRingFlag:
upperRingEID = N1.size + 1
for k in range(ncolumn):
tempID1 = offsetUpperID[k]
tempID2 = offsetUpperID[k+1]
add1, add2 = ghostNodes(nodeMat[tempID1-1,:], nodeMat[tempID2-1,:], rnode[tempID1-1], rnode[tempID2-1])
if ( (add1[-1] > z_offset[0]) and (add1[-1] < z_offset[-1]) ):
tempID1 = xnode.size + 1
xnode = np.append(xnode, add1[0])
ynode = np.append(ynode, add1[1])
znode = np.append(znode, add1[2])
gN1 = np.append(gN1, offsetUpperID[k])
gN2 = np.append(gN2, tempID1)
if ( (add2[-1] > z_offset[0]) and (add2[-1] < z_offset[-1]) ):
tempID2 = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetUpperID[k+1])
gN2 = np.append(gN2, tempID2)
# Pontoon connection
N1 = np.append(N1, tempID1 )
N2 = np.append(N2, tempID2 )
# Outer cross braces (only one ghost node per connection)
if outerCrossFlag:
outerCrossEID = N1.size + 1
for k in range(ncolumn):
tempID1 = crossOuterLowerID[k]
tempID2 = offsetUpperID[k]
_, add2 = ghostNodes(nodeMat[tempID1-1,:], nodeMat[tempID2-1,:], rnode[tempID1-1], rnode[tempID2-1])
if ( (add2[-1] > z_offset[0]) and (add2[-1] < z_offset[-1]) ):
tempID2 = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetUpperID[k])
gN2 = np.append(gN2, tempID2)
# Pontoon connection
N1 = np.append(N1, tempID1 )
N2 = np.append(N2, tempID2 )
_, add2 = ghostNodes(nodeMat[crossOuterLowerID[k+1]-1,:], nodeMat[offsetUpperID[k]-1,:], rnode[crossOuterLowerID[k+1]-1], rnode[offsetUpperID[k]-1])
tempID = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetUpperID[k])
gN2 = np.append(gN2, tempID)
N1 = np.append(N1, crossOuterLowerID[k+1] )
N2 = np.append(N2, tempID )
tempID1 = crossOuterLowerID[k+1]
tempID2 = offsetUpperID[k]
_, add2 = ghostNodes(nodeMat[tempID1-1,:], nodeMat[tempID2-1,:], rnode[tempID1-1], rnode[tempID2-1])
if ( (add2[-1] > z_offset[0]) and (add2[-1] < z_offset[-1]) ):
tempID2 = xnode.size + 1
xnode = np.append(xnode, add2[0])
ynode = np.append(ynode, add2[1])
znode = np.append(znode, add2[2])
gN1 = np.append(gN1, offsetUpperID[k])
gN2 = np.append(gN2, tempID2)
# Pontoon connection
N1 = np.append(N1, tempID1 )
N2 = np.append(N2, tempID2 )
# TODO: Parameterize these for upper, lower, cross connections
# Properties for the inner connectors
mytube = Tube(2.0*R_od_pontoon, t_wall_pontoon)
Ax = mytube.Area * np.ones(N1.shape)
As = mytube.Asx * np.ones(N1.shape)
Jx = mytube.J0 * np.ones(N1.shape)
I = mytube.Jxx * np.ones(N1.shape)
S = mytube.S * np.ones(N1.shape)
C = mytube.C * np.ones(N1.shape)
modE = E * np.ones(N1.shape)
modG = G * np.ones(N1.shape)
roll = 0.0 * np.ones(N1.shape)
dens = rho * np.ones(N1.shape)
# Add in fairlead support elements
mooringEID = N1.size + 1
mytube = Tube(2.0*R_od_fairlead, t_wall_fairlead)
for k in range(n_connect):
kfair = 0 if ncolumn==0 else k
add1, _ = ghostNodes(nodeMat[fairleadID[kfair]-1,:], nodeMat[mooringID[k]-1,:], rnode[fairleadID[kfair]-1], rnode[mooringID[k]-1])
tempID = xnode.size + 1
xnode = np.append(xnode, add1[0])
ynode = np.append(ynode, add1[1])
znode = np.append(znode, add1[2])
gN1 = np.append(gN1, fairleadID[kfair])
gN2 = np.append(gN2, tempID)
N1 = np.append(N1, tempID )
N2 = np.append(N2, mooringID[k] )
Ax = np.append(Ax , mytube.Area )
As = np.append(As , mytube.Asx )
Jx = np.append(Jx , mytube.J0 )
I = np.append(I , mytube.Jxx )
S = np.append(S , mytube.S )
C = np.append(C , mytube.C )
modE = np.append(modE, E )
modG = np.append(modG, G )
roll = np.append(roll, 0.0 )
dens = np.append(dens, rho )
# Now mock up cylindrical columns as truss members even though long, slender assumption breaks down
# Will set density = 0.0 so that we don't double count the mass
# First get geometry in each of the elements
R_od_main,_ = nodal2sectional( R_od_main )
R_od_offset,_ = nodal2sectional( R_od_offset )
R_od_tower,_ = nodal2sectional( R_od_tower )
# Main column
mainEID = N1.size + 1
mytube = Tube(2.0*R_od_main, t_wall_main)
myrange = np.arange(R_od_main.size)
myones = np.ones(myrange.shape)
mydens = m_main / mytube.Area / np.diff(z_main) + eps
N1 = np.append(N1 , myrange + mainBeginID )
N2 = np.append(N2 , myrange + mainBeginID + 1)
Ax = np.append(Ax , mytube.Area )
As = np.append(As , mytube.Asx )
Jx = np.append(Jx , mytube.J0 )
I = np.append(I , mytube.Jxx )
S = np.append(S , mytube.S )
C = np.append(C , mytube.C )
modE = np.append(modE, E*myones )
modG = np.append(modG, G*myones )
roll = np.append(roll, np.zeros(myones.shape) )
dens = np.append(dens, mydens )
# Tower column
towerEID = N1.size + 1
mytube = Tube(2.0*R_od_tower, t_wall_tower)
myrange = np.arange(R_od_tower.size)
myones = np.ones(myrange.shape)
mydens = m_tower / mytube.Area / np.diff(z_tower) + eps
N1 = np.append(N1 , myrange + towerBeginID )
N2 = np.append(N2 , myrange + towerBeginID + 1)
Ax = np.append(Ax , mytube.Area )
As = np.append(As , mytube.Asx )
Jx = np.append(Jx , mytube.J0 )
I = np.append(I , mytube.Jxx )
S = np.append(S , mytube.S )
C = np.append(C , mytube.C )
modE = np.append(modE, E*myones )
modG = np.append(modG, G*myones )
roll = np.append(roll, np.zeros(myones.shape) )
dens = np.append(dens, mydens )
# Dummy element
dummyEID = N1.size + 1
N1 = np.append(N1 , towerEndID )
N2 = np.append(N2 , dummyID )
Ax = np.append(Ax , Ax[-1] )
As = np.append(As , As[-1] )
Jx = np.append(Jx , Jx[-1] )
I = np.append(I , I[-1] )
S = np.append(S , S[-1] )
C = np.append(C , C[-1] )
modE = np.append(modE, 1e20 )
modG = np.append(modG, 1e20 )
roll = np.append(roll, 0.0 )
dens = np.append(dens, 1e-6 )
# Offset column
offsetEID = []
mytube = Tube(2.0*R_od_offset, t_wall_offset)
myrange = np.arange(R_od_offset.size)
myones = np.ones(myrange.shape)
mydens = m_offset / mytube.Area / np.diff(z_offset) + eps
for k in range(ncolumn):
offsetEID.append( N1.size + 1 )
N1 = np.append(N1 , myrange + offsetLowerID[k] )
N2 = np.append(N2 , myrange + offsetLowerID[k] + 1)
Ax = np.append(Ax , mytube.Area )
As = np.append(As , mytube.Asx )
Jx = np.append(Jx , mytube.J0 )
I = np.append(I , mytube.Jxx )
S = np.append(S , mytube.S )
C = np.append(C , mytube.C )
modE = np.append(modE, E*myones )
modG = np.append(modG, G*myones )
roll = np.append(roll, np.zeros(myones.shape) )
dens = np.append(dens, mydens ) # Mass added below
# Ghost elements between centerline nodes and column shells
ghostEID = N1.size + 1
myones = np.ones(gN1.shape)
N1 = np.append(N1 , gN1 )
N2 = np.append(N2 , gN2 )
Ax = np.append(Ax , 1e-1*myones )
As = np.append(As , 1e-1*myones )
Jx = np.append(Jx , 1e-1*myones )
I = np.append(I , 1e-1*myones )
S = np.append(S , 1e-1*myones )
C = np.append(C , 1e-1*myones )
modE = np.append(modE, 1e20*myones )
modG = np.append(modG, 1e20*myones )
roll = np.append(roll, 0.0 *myones )
dens = np.append(dens, 1e-6*myones )
# Create Node Data object
nnode = 1 + np.arange(xnode.size)
myrnode = np.zeros(xnode.shape) # z-spacing too narrow for use of rnodes
nodes = frame3dd.NodeData(nnode, xnode, ynode, znode, myrnode)
nodeMat = np.c_[xnode, ynode, znode]
# Create Element Data object
nelem = 1 + np.arange(N1.size)
elements = frame3dd.ElementData(nelem, N1, N2, Ax, As, As, Jx, I, I, modE, modG, roll, dens)
# Store data for plotting, also handy for operations below
plotMat = np.zeros((mainEID, 3, 2))
myn1 = N1[:mainEID]
myn2 = N2[:mainEID]
plotMat[:,:,0] = nodeMat[myn1-1,:]
plotMat[:,:,1] = nodeMat[myn2-1,:]
unknowns['plot_matrix'] = plotMat
# Compute length and center of gravity for each element for use below
elemL = np.sqrt( np.sum( np.diff(plotMat, axis=2)**2.0, axis=1) ).flatten()
elemCoG = 0.5*np.sum(plotMat, axis=2)
# Get vertical angle as a measure of welding prep difficulty
elemAng = np.arccos( np.diff(plotMat[:,-1,:], axis=-1).flatten() / elemL )
# ---Options object---
shear = True # 1: include shear deformation
geom = False # 1: include geometric stiffness
dx = -1 # x-axis increment for internal forces, -1 to skip
other = frame3dd.Options(shear, geom, dx)
# ---LOAD CASES---
# Extreme loading
gx = 0.0
gy = 0.0
gz = -gravity
load = frame3dd.StaticLoadCase(gx, gy, gz)
# Wind + Wave loading in local main / offset / tower c.s.
Px_main, Py_main, Pz_main = params['main_Pz'], params['main_Py'], -params['main_Px'] # switch to local c.s.
Px_offset, Py_offset, Pz_offset = params['offset_Pz'], params['offset_Py'], -params['offset_Px'] # switch to local c.s.
Px_tower, Py_tower, Pz_tower = params['tower_Pz'], params['tower_Py'], -params['tower_Px'] # switch to local c.s.
epsOff = 1e-5
# Get mass right- offsets, stiffeners, tower, rna, etc.
# Also account for buoyancy loads
# Also apply wind/wave loading as trapezoidal on each element
# NOTE: Loading is in local element coordinates 0-L, x is along element
# Base
nrange = np.arange(R_od_main.size, dtype=np.int32)
EL = mainEID + nrange
Ux = F_hydro_main / np.diff(z_main)
x1 = np.zeros(nrange.shape)
x2 = np.diff(z_main) - epsOff # subtract small number b.c. of precision
wx1, wx2 = Px_main[:-1], Px_main[1:]
wy1, wy2 = Py_main[:-1], Py_main[1:]
wz1, wz2 = Pz_main[:-1], Pz_main[1:]
# Tower
nrange = np.arange(R_od_tower.size, dtype=np.int32)
EL = np.append(EL, towerEID + nrange)
Ux = np.append(Ux, np.zeros(nrange.shape))
x1 = np.append(x1, np.zeros(nrange.shape))
x2 = np.append(x2, np.diff(z_tower) - epsOff)
wx1 = np.append(wx1, Px_tower[:-1])
wx2 = np.append(wx2, Px_tower[1:])
wy1 = np.append(wy1, Py_tower[:-1])
wy2 = np.append(wy2, Py_tower[1:])
wz1 = np.append(wz1, Pz_tower[:-1])
wz2 = np.append(wz2, Pz_tower[1:])
# Buoyancy- offset columns
nrange = np.arange(R_od_offset.size, dtype=np.int32)
for k in range(ncolumn):
EL = np.append(EL, offsetEID[k] + nrange)
Ux = np.append(Ux, F_hydro_offset / np.diff(z_offset) )
x1 = np.append(x1, np.zeros(nrange.shape))
x2 = np.append(x2, np.diff(z_offset) - epsOff)
wx1 = np.append(wx1, Px_offset[:-1])
wx2 = np.append(wx2, Px_offset[1:])
wy1 = np.append(wy1, Py_offset[:-1])
wy2 = np.append(wy2, Py_offset[1:])
wz1 = np.append(wz1, Pz_offset[:-1])
wz2 = np.append(wz2, Pz_offset[1:])
# Add mass of main and offset columns while we've already done the element enumeration
Uz = Uy = np.zeros(Ux.shape)
xx1 = xy1 = xz1 = x1
xx2 = xy2 = xz2 = x2
load.changeTrapezoidalLoads(EL, xx1, xx2, wx1, wx2, xy1, xy2, wy1, wy2, xz1, xz2, wz1, wz2)
# Buoyancy for fully submerged members
nrange = np.arange(ncolumn, dtype=np.int32)
Frange = np.pi * R_od_pontoon**2 * rhoWater * gravity
F_truss = 0.0
z_cb = np.zeros((3,))
if ncolumn > 0 and znode[offsetLowerID[0]-1] < 0.0:
if lowerAttachFlag:
EL = np.append(EL, lowerAttachEID + nrange)
Ux = np.append(Ux, np.zeros(nrange.shape))
Uy = np.append(Uy, np.zeros(nrange.shape))
Uz = np.append(Uz, Frange * np.ones(nrange.shape))
F_truss += Frange * elemL[lowerAttachEID-1] * ncolumn
z_cb += Frange * elemL[lowerAttachEID-1] * ncolumn * elemCoG[lowerAttachEID-1,:]
if lowerRingFlag:
EL = np.append(EL, lowerRingEID + nrange)
Ux = np.append(Ux, np.zeros(nrange.shape))
Uy = np.append(Uy, np.zeros(nrange.shape))
Uz = np.append(Uz, Frange * np.ones(nrange.shape))
F_truss += Frange * elemL[lowerRingEID-1] * ncolumn
z_cb += Frange * elemL[lowerRingEID-1] * ncolumn * elemCoG[lowerRingEID-1]
if crossAttachFlag:
factor = np.minimum(1.0, (0.0 - z_attach_lower) / (znode[offsetUpperID[0]-1] - z_attach_lower) )
EL = np.append(EL, crossAttachEID + nrange)
Ux = np.append(Ux, factor * Frange * np.sin(cross_angle) * np.ones(nrange.shape))
Uy = np.append(Uy, np.zeros(nrange.shape))
Uz = np.append(Uz, factor * Frange * np.cos(cross_angle) * np.ones(nrange.shape))
F_truss += factor * Frange * elemL[crossAttachEID-1] * ncolumn
z_cb += factor * Frange * elemL[crossAttachEID-1] * ncolumn * elemCoG[crossAttachEID-1,:]
if outerCrossFlag:
factor = np.minimum(1.0, (0.0 - znode[mainLowerID-1]) / (znode[offsetUpperID[0]-1] - znode[mainLowerID-1]) )
# TODO: This one will take a little more math
#EL = np.append(EL, outerCrossEID + np.arange(2*ncolumn, dtype=np.int32))
#Ux = np.append(Ux, np.zeros(nrange.shape))
#Uy = np.append(Uy, np.zeros(nrange.shape))
#Uz = np.append(Uz, factor * Frange * np.ones(nrange.shape))
F_truss += factor * Frange * elemL[outerCrossEID-1] * ncolumn
z_cb += factor * Frange * elemL[outerCrossEID-1] * ncolumn * elemCoG[outerCrossEID-1,:]
if ncolumn > 0 and znode[offsetUpperID[0]-1] < 0.0:
if upperAttachFlag:
EL = np.append(EL, upperAttachEID + nrange)
Ux = np.append(Ux, np.zeros(nrange.shape))
Uy = np.append(Uy, np.zeros(nrange.shape))
Uz = np.append(Uz, Frange * np.ones(nrange.shape))
F_truss += Frange * elemL[upperAttachEID-1] * ncolumn
z_cb += Frange * elemL[upperAttachEID-1] * ncolumn * elemCoG[upperAttachEID-1,:]
if upperRingFlag:
EL = np.append(EL, upperRingEID + nrange)
Ux = np.append(Ux, np.zeros(nrange.shape))
Uy = np.append(Uy, np.zeros(nrange.shape))
Uz = np.append(Uz, Frange * np.ones(nrange.shape))
F_truss += Frange * elemL[upperRingEID-1] * ncolumn
z_cb += Frange * elemL[upperRingEID-1] * ncolumn * elemCoG[upperRingEID-1,:]
# Now do fairlead supports
nrange = np.arange(n_connect, dtype=np.int32)
Frange = np.pi * R_od_fairlead**2 * rhoWater * gravity
EL = np.append(EL, mooringEID + nrange)
Ux = np.append(Ux, np.zeros(nrange.shape))
Uy = np.append(Uy, np.zeros(nrange.shape))
Uz = np.append(Uz, Frange * np.ones(nrange.shape))
F_truss += Frange * elemL[mooringEID-1] * n_connect
z_cb += Frange * elemL[mooringEID-1] * n_connect * elemCoG[mooringEID-1,:]
# Finally add in all the uniform loads on buoyancy
load.changeUniformLoads(EL, Ux, Uy, Uz)
# Point loading for rotor thrust and mooring lines
# Point loads for mooring loading
nnode_connect = len(fairleadID)
nF = np.array(fairleadID, dtype=np.int32)
Fx = np.zeros(nnode_connect)
Fy = np.zeros(nnode_connect)
Fz = np.zeros(nnode_connect)
Mxx = np.zeros(nnode_connect)
Myy = np.zeros(nnode_connect)
Mzz = np.zeros(nnode_connect)
for k in range(n_connect):
iline = 0 if nnode_connect==1 else k
idx = k*n_lines + np.arange(n_lines)
Fx[iline] += F_mooring[idx,0].sum()
Fy[iline] += F_mooring[idx,1].sum()
Fz[iline] += F_mooring[idx,2].sum()
# Note: extra momemt from mass accounted for below
nF = np.append(nF , towerEndID)
Fx = np.append(Fx , F_rna[0] )
Fy = np.append(Fy , F_rna[1] )
Fz = np.append(Fz , F_rna[2] )
Mxx = np.append(Mxx, M_rna[0] )
Myy = np.append(Myy, M_rna[1] )
Mzz = np.append(Mzz, M_rna[2] )
# Add in all point loads
load.changePointLoads(nF, Fx, Fy, Fz, Mxx, Myy, Mzz)
# ---MASS SUMMARIES---
# Mass summaries now that we've tabulated all of the pontoons
m_substructure = m_main.sum() + ncolumn*m_offset.sum()
if mainEID > 1: # Have some pontoons or fairlead supports
# Buoyancy assembly from incremental calculations above
V_pontoon = F_truss/rhoWater/gravity
z_cb = z_cb[-1] / F_truss if F_truss > 0.0 else 0.0
unknowns['pontoon_displacement'] = V_pontoon
unknowns['pontoon_center_of_buoyancy'] = z_cb
# Sum up mass and compute CofG. Frame3DD does mass, but not CG
ind = mainEID-1
m_total = Ax[:ind] * rho * elemL[:ind]
m_pontoon = m_total.sum() #mass.struct_mass
m_substructure += m_pontoon
cg_pontoon = np.sum( m_total[:,np.newaxis] * elemCoG[:ind,:], axis=0 ) / m_total.sum()
# Compute costs based on "Optimum Design of Steel Structures" by Farkas and Jarmai
# All dimensions for correlations based on mm, not meters.
k_m = params['material_cost_rate'] #1.1 # USD / kg carbon steel plate
k_f = params['labor_cost_rate'] #1.0 # USD / min labor
k_p = params['painting_cost_rate'] #USD / m^2 painting
npont = m_total.size
# Cost Step 1) Cutting and grinding tube ends
theta_g = 3.0 # Difficulty factor
# Cost Step 2) Fillet welds with SMAW (shielded metal arc welding)
# Multiply by 2 for both ends worth of work
theta_w = 3.0 # Difficulty factor
# Labor-based expenses
K_f = k_f * 2 * ( manufacture.steel_tube_cutgrind_time(theta_g, R_od_pontoon, t_wall_pontoon, elemAng[:ind]) +
manufacture.steel_tube_welding_time(theta_w, npont+ncolumn+1, m_substructure, 2*np.pi*R_od_pontoon, t_wall_pontoon) )
# Cost Step 3) Painting
theta_p = 2.0
S_pont = 2.0 * np.pi * R_od_pontoon * elemL[:ind]
K_p = k_p * theta_p * S_pont.sum()
# Material cost
K_m = k_m * m_pontoon
# Total cost
c_pontoon = K_m + K_f + K_p
unknowns['pontoon_mass'] = m_pontoon
unknowns['pontoon_cost'] = c_pontoon
unknowns['pontoon_cost_rate'] = 1e3*c_pontoon/m_pontoon
unknowns['pontoon_center_of_mass'] = cg_pontoon[-1]
else:
V_pontoon = z_cb = m_pontoon = 0.0
cg_pontoon = np.zeros(3)
# Summary of mass and volumes
unknowns['total_displacement'] = V_main.sum() + ncolumn*V_offset.sum() + V_pontoon
unknowns['substructure_mass'] = m_substructure
unknowns['substructure_center_of_mass'] = (ncolumn*m_offset.sum()*cg_offset + m_main.sum()*cg_main +
m_pontoon*cg_pontoon) / unknowns['substructure_mass']
m_total = unknowns['substructure_mass'] + m_rna + m_tower.sum()
unknowns['structural_mass'] = m_total
unknowns['structure_center_of_mass'] = (m_rna*cg_rna + m_tower.sum()*cg_tower +
unknowns['substructure_mass']*unknowns['substructure_center_of_mass']) / m_total
# Find cb (center of buoyancy) for whole system
z_cb = (V_main.sum()*z_cb_main + ncolumn*V_offset.sum()*z_cb_offset + V_pontoon*z_cb) / unknowns['total_displacement']
unknowns['z_center_of_buoyancy'] = z_cb
# ---REACTIONS---
# Find node closest to CG
cg_dist = np.sum( (nodeMat - unknowns['structure_center_of_mass'][np.newaxis,:])**2, axis=1 )
cg_node = np.argmin(cg_dist)
# Free=0, Rigid=inf
rid = np.array([mainBeginID]) #np.array(fairleadID) #np.array([cg_node+1]) #
Rx = np.inf * np.ones(rid.shape)
Ry = np.inf * np.ones(rid.shape)
Rz = np.inf * np.ones(rid.shape)
Rxx = np.inf * np.ones(rid.shape)
Ryy = np.inf * np.ones(rid.shape)
Rzz = np.inf * np.ones(rid.shape)
rid = np.append(rid, fairleadID)
Rx = np.append(Rx, K_mooring[0] /nnode_connect * np.ones(nnode_connect) )
Ry = np.append(Ry, K_mooring[1] /nnode_connect * np.ones(nnode_connect) )
Rz = np.append(Rz, K_mooring[2] /nnode_connect * np.ones(nnode_connect) )
Rxx = np.append(Rxx, K_mooring[3]/nnode_connect * np.ones(nnode_connect) )
Ryy = np.append(Ryy, K_mooring[4]/nnode_connect * np.ones(nnode_connect) )
Rzz = np.append(Rzz, K_mooring[5]/nnode_connect * np.ones(nnode_connect) )
# Get reactions object from frame3dd
reactions = frame3dd.ReactionData(rid, Rx, Ry, Rz, Rxx, Ryy, Rzz, rigid=np.inf)
# ---FRAME3DD INSTANCE---
# Initialize frame3dd object
self.myframe = frame3dd.Frame(nodes, reactions, elements, other)
# Add in extra mass of rna
inode = np.array([towerEndID], dtype=np.int32) # rna
m_extra = np.array([m_rna])
Ixx = np.array([ I_rna[0] ])
Iyy = np.array([ I_rna[1] ])
Izz = np.array([ I_rna[2] ])
Ixy = np.array([ I_rna[3] ])
Ixz = np.array([ I_rna[4] ])
Iyz = np.array([ I_rna[5] ])
rhox = np.array([ cg_rna[0] ])
rhoy = np.array([ cg_rna[1] ])
rhoz = np.array([ cg_rna[2] ])
self.myframe.changeExtraNodeMass(inode, m_extra, Ixx, Iyy, Izz, Ixy, Ixz, Iyz, rhox, rhoy, rhoz, True)
# Store load case into frame 3dd object
self.myframe.addLoadCase(load)
# ---DYNAMIC ANALYSIS---
# This needs to be compared to FAST until I trust it enough to use it.
# Have to test BCs, results, mooring stiffness, mooring mass/MOI, etc
nM = 0 #NFREQ # number of desired dynamic modes of vibration
Mmethod = 1 # 1: subspace Jacobi 2: Stodola
lump = 0 # 0: consistent mass ... 1: lumped mass matrix
tol = 1e-5 # mode shape tolerance
shift = 0.0 # shift value ... for unrestrained or partially restrained structures
#self.myframe.enableDynamics(nM, Mmethod, lump, tol, shift)
# ---DEBUGGING---
#self.myframe.write('debug.3dd') # For debugging
# ---RUN ANALYSIS---
try:
displacements, forces, reactions, internalForces, mass, modal = self.myframe.run()
except:
bad_input()
return
# --OUTPUTS--
nE = nelem.size
iCase = 0
# natural frequncies- catch nans and zeros
temp = np.zeros(NFREQ) #np.array( modal.freq )
temp[np.isnan(temp)] = 0.0
unknowns['structural_frequencies'] = temp + eps
# deflections due to loading (from cylinder top and wind/wave loads)
unknowns['top_deflection'] = displacements.dx[iCase, towerEndID-1] # in yaw-aligned direction
# Find cg (center of gravity) for whole system
F_main = -1.0 * np.array([reactions.Fx.sum(), reactions.Fy.sum(), reactions.Fz.sum()])
M_main = -1.0 * np.array([reactions.Mxx.sum(), reactions.Myy.sum(), reactions.Mzz.sum()])
r_cg_main = np.array([0.0, 0.0, (znode[mainBeginID] - unknowns['structure_center_of_mass'][-1])])
delta = np.cross(r_cg_main, F_main)
unknowns['total_force'] = F_main
unknowns['total_moment'] = M_main + delta
myM= np.cross(np.array([0.0, 0.0, (z_tower[-1] - unknowns['structure_center_of_mass'][-1])]), F_rna)
# shear and bending (convert from local to global c.s.)
Nx = forces.Nx[iCase, 1::2]
Vy = forces.Vy[iCase, 1::2]
Vz = forces.Vz[iCase, 1::2]
Tx = forces.Txx[iCase, 1::2]
My = forces.Myy[iCase, 1::2]
Mz = forces.Mzz[iCase, 1::2]
# Compute axial and shear stresses in elements given Frame3DD outputs and some geomtry data
# Method comes from Section 7.14 of Frame3DD documentation
# http://svn.code.sourceforge.net/p/frame3dd/code/trunk/doc/Frame3DD-manual.html#structuralmodeling
M = np.sqrt(My*My + Mz*Mz)
sigma_ax = Nx/Ax - M/S
sigma_sh = np.sqrt(Vy*Vy + Vz*Vz)/As + Tx/C
# Extract pontoon for stress check
idx = range(mainEID-1)
npon = len(idx)
if len(idx) > 0:
qdyn_pontoon = np.max( np.abs( np.r_[params['main_qdyn'], params['offset_qdyn']] ) )
sigma_ax_pon = sigma_ax[idx]
sigma_sh_pon = sigma_sh[idx]
sigma_h_pon = util.hoopStress(2*R_od_pontoon, t_wall_pontoon, qdyn_pontoon) * np.ones(sigma_ax_pon.shape)
unknowns['pontoon_stress'][:npon] = util.vonMisesStressUtilization(sigma_ax_pon, sigma_h_pon, sigma_sh_pon,
gamma_f*gamma_m*gamma_n, sigma_y)
# Extract tower for Eurocode checks
idx = towerEID-1 + np.arange(R_od_tower.size, dtype=np.int32)
L_reinforced = params['tower_buckling_length'] * np.ones(idx.shape)
sigma_ax_tower = sigma_ax[idx]
sigma_sh_tower = sigma_sh[idx]
qdyn_tower,_ = nodal2sectional( params['tower_qdyn'] )
sigma_h_tower = util.hoopStress(2*R_od_tower, t_wall_tower*np.cos(angle_tower), qdyn_tower)
unknowns['tower_stress:axial'] = sigma_ax_tower
unknowns['tower_stress:shear'] = sigma_sh_tower
unknowns['tower_stress:hoop'] = sigma_h_tower
unknowns['tower_stress:hoopStiffen'] = util.hoopStressEurocode(z_tower, 2*R_od_tower, t_wall_tower, L_reinforced, qdyn_tower)
unknowns['tower_stress'] = util.vonMisesStressUtilization(sigma_ax_tower, sigma_h_tower, sigma_sh_tower,
gamma_f*gamma_m*gamma_n, sigma_y)
sigma_y_vec = sigma_y * np.ones(idx.shape)
unknowns['tower_shell_buckling'] = util.shellBucklingEurocode(2*R_od_tower, t_wall_tower, sigma_ax_tower, sigma_h_tower, sigma_sh_tower,
L_reinforced, modE[idx], sigma_y_vec, gamma_f, gamma_b)
tower_height = z_tower[-1] - z_tower[0]
unknowns['tower_global_buckling'] = util.bucklingGL(2*R_od_tower, t_wall_tower, Nx[idx], M[idx], tower_height, modE[idx], sigma_y_vec, gamma_f, gamma_b)
# Extract main column for Eurocode checks
idx = mainEID-1 + np.arange(R_od_main.size, dtype=np.int32)
L_reinforced = params['main_buckling_length']
sigma_ax_main = sigma_ax[idx]
sigma_sh_main = sigma_sh[idx]
qdyn_main,_ = nodal2sectional( params['main_qdyn'] )
sigma_h_main = util.hoopStress(2*R_od_main, t_wall_main*np.cos(angle_main), qdyn_main)
unknowns['main_stress:axial'] = sigma_ax_main
unknowns['main_stress:shear'] = sigma_sh_main
unknowns['main_stress:hoop'] = sigma_h_main
unknowns['main_stress:hoopStiffen'] = util.hoopStressEurocode(z_main, 2*R_od_main, t_wall_main, L_reinforced, qdyn_main)
unknowns['main_stress'] = util.vonMisesStressUtilization(sigma_ax_main, sigma_h_main, sigma_sh_main,
gamma_f*gamma_m*gamma_n, sigma_y)
sigma_y_vec = sigma_y * np.ones(idx.shape)
unknowns['main_shell_buckling'] = util.shellBucklingEurocode(2*R_od_main, t_wall_main, sigma_ax_main, sigma_h_main, sigma_sh_main,
L_reinforced, modE[idx], sigma_y_vec, gamma_f, gamma_b)
main_height = z_main[-1] - z_main[0]
unknowns['main_global_buckling'] = util.bucklingGL(2*R_od_main, t_wall_main, Nx[idx], M[idx], main_height, modE[idx], sigma_y_vec, gamma_f, gamma_b)
# Extract offset column for Eurocode checks
if ncolumn > 0:
idx = offsetEID[0]-1 + np.arange(R_od_offset.size, dtype=np.int32)
L_reinforced = params['offset_buckling_length']
sigma_ax_offset = sigma_ax[idx]
sigma_sh_offset = sigma_sh[idx]
qdyn_offset,_ = nodal2sectional( params['offset_qdyn'] )
sigma_h_offset = util.hoopStress(2*R_od_offset, t_wall_offset*np.cos(angle_offset), qdyn_offset)
unknowns['offset_stress:axial'] = sigma_ax_offset
unknowns['offset_stress:shear'] = sigma_sh_offset
unknowns['offset_stress:hoop'] = sigma_h_offset
unknowns['offset_stress:hoopStiffen'] = util.hoopStressEurocode(z_offset, 2*R_od_offset, t_wall_offset, L_reinforced, qdyn_offset)
unknowns['offset_stress'] = util.vonMisesStressUtilization(sigma_ax_offset, sigma_h_offset, sigma_sh_offset,
gamma_f*gamma_m*gamma_n, sigma_y)
sigma_y_vec = sigma_y * np.ones(idx.shape)
unknowns['offset_shell_buckling'] = util.shellBucklingEurocode(2*R_od_offset, t_wall_offset, sigma_ax_offset, sigma_h_offset, sigma_sh_offset,
L_reinforced, modE[idx], sigma_y_vec, gamma_f, gamma_b)
offset_height = z_offset[-1] - z_offset[0]
unknowns['offset_global_buckling'] = util.bucklingGL(2*R_od_offset, t_wall_offset, Nx[idx], M[idx], offset_height, modE[idx], sigma_y_vec, gamma_f, gamma_b)
# TODO: FATIGUE
# Base and offset columns get API stress/buckling checked in Column Group because that takes into account stiffeners
class TrussIntegerToBoolean(Component):
def __init__(self):
super(TrussIntegerToBoolean,self).__init__()
self.add_param('cross_attachment_pontoons_int', val=1, desc='Inclusion of pontoons that connect the bottom of the central main to the tops of the outer offset columns')
self.add_param('lower_attachment_pontoons_int', val=1, desc='Inclusion of pontoons that connect the central main to the outer offset columns at their bottoms')
self.add_param('upper_attachment_pontoons_int', val=1, desc='Inclusion of pontoons that connect the central main to the outer offset columns at their tops')
self.add_param('lower_ring_pontoons_int', val=1, desc='Inclusion of pontoons that ring around outer offset columns at their bottoms')
self.add_param('upper_ring_pontoons_int', val=1, desc='Inclusion of pontoons that ring around outer offset columns at their tops')
self.add_param('outer_cross_pontoons_int', val=1, desc='Inclusion of pontoons that ring around outer offset columns at their tops')
self.add_output('cross_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the bottom of the central main to the tops of the outer offset columns', pass_by_obj=True)
self.add_output('lower_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central main to the outer offset columns at their bottoms', pass_by_obj=True)
self.add_output('upper_attachment_pontoons', val=True, desc='Inclusion of pontoons that connect the central main to the outer offset columns at their tops', pass_by_obj=True)
self.add_output('lower_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer offset columns at their bottoms', pass_by_obj=True)
self.add_output('upper_ring_pontoons', val=True, desc='Inclusion of pontoons that ring around outer offset columns at their tops', pass_by_obj=True)
self.add_output('outer_cross_pontoons', val=True, desc='Inclusion of pontoons that ring around outer offset columns at their tops', pass_by_obj=True)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['cross_attachment_pontoons'] = (int(params['cross_attachment_pontoons_int']) == 1)
unknowns['lower_attachment_pontoons'] = (int(params['lower_attachment_pontoons_int']) == 1)
unknowns['upper_attachment_pontoons'] = (int(params['upper_attachment_pontoons_int']) == 1)
unknowns['lower_ring_pontoons'] = (int(params['lower_ring_pontoons_int']) == 1)
unknowns['upper_ring_pontoons'] = (int(params['upper_ring_pontoons_int']) == 1)
unknowns['outer_cross_pontoons'] = (int(params['outer_cross_pontoons_int']) == 1)
# -----------------
# Assembly
# -----------------
class Loading(Group):
def __init__(self, nFull, nFullTow):
super(Loading, self).__init__()
# Independent variables that are unique to TowerSE
self.add('main_pontoon_attach_lower', IndepVarComp('main_pontoon_attach_lower', 0.0), promotes=['*'])
self.add('main_pontoon_attach_upper', IndepVarComp('main_pontoon_attach_upper', 0.0), promotes=['*'])
self.add('pontoon_outer_diameter', IndepVarComp('pontoon_outer_diameter', 0.0), promotes=['*'])
self.add('pontoon_wall_thickness', IndepVarComp('pontoon_wall_thickness', 0.0), promotes=['*'])
self.add('outer_cross_pontoons_int', IndepVarComp('outer_cross_pontoons_int', 1), promotes=['*'])
self.add('cross_attachment_pontoons_int', IndepVarComp('cross_attachment_pontoons_int', 1), promotes=['*'])
self.add('lower_attachment_pontoons_int', IndepVarComp('lower_attachment_pontoons_int', 1), promotes=['*'])
self.add('upper_attachment_pontoons_int', IndepVarComp('upper_attachment_pontoons_int', 1), promotes=['*'])
self.add('lower_ring_pontoons_int', IndepVarComp('lower_ring_pontoons_int', 1), promotes=['*'])
self.add('upper_ring_pontoons_int', IndepVarComp('upper_ring_pontoons_int', 1), promotes=['*'])
self.add('connection_ratio_max', IndepVarComp('connection_ratio_max', 0.0), promotes=['*'])
self.add('fairlead_support_outer_diameter', IndepVarComp('fairlead_support_outer_diameter', 0.0), promotes=['*'])
self.add('fairlead_support_wall_thickness', IndepVarComp('fairlead_support_wall_thickness', 0.0), promotes=['*'])
# All the components
self.add('loadingWind', PowerWind(nFullTow), promotes=['z0','Uref','shearExp','zref'])
self.add('windLoads', CylinderWindDrag(nFullTow), promotes=['cd_usr','beta'])
self.add('intbool', TrussIntegerToBoolean(), promotes=['*'])
self.add('frame', FloatingFrame(nFull, nFullTow), promotes=['*'])
# Connections for geometry and mass
self.connect('loadingWind.z', ['windLoads.z', 'tower_z_full'])
self.connect('windLoads.d', ['tower_d_full'])
self.connect('loadingWind.U', 'windLoads.U')
# connections to distLoads1
self.connect('windLoads.windLoads_Px', 'tower_Px')
self.connect('windLoads.windLoads_Py', 'tower_Py')
self.connect('windLoads.windLoads_Pz', 'tower_Pz')
self.connect('windLoads.windLoads_qdyn', 'tower_qdyn')
|
{"hexsha": "b813242d5965386cb6046c1f42c22cda8e611be3", "size": 72320, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/floatingse/loading.py", "max_stars_repo_name": "mattEhall/FloatingSE", "max_stars_repo_head_hexsha": "f13e0f38a7742ea00a8f446a9ebf505dcf7acd42", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-27T15:09:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-27T15:09:02.000Z", "max_issues_repo_path": "src/floatingse/loading.py", "max_issues_repo_name": "mattEhall/FloatingSE", "max_issues_repo_head_hexsha": "f13e0f38a7742ea00a8f446a9ebf505dcf7acd42", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-05-17T14:57:05.000Z", "max_issues_repo_issues_event_max_datetime": "2017-05-17T14:57:05.000Z", "max_forks_repo_path": "src/floatingse/loading.py", "max_forks_repo_name": "mattEhall/FloatingSE", "max_forks_repo_head_hexsha": "f13e0f38a7742ea00a8f446a9ebf505dcf7acd42", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2015-12-26T01:06:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-08T20:19:57.000Z", "avg_line_length": 57.3968253968, "max_line_length": 194, "alphanum_fraction": 0.6005254425, "include": true, "reason": "import numpy", "num_tokens": 19866}
|
"""
Fits PSPL model with parallax ground-based and satellite data
using EMCEE sampler. We're using photometry of OGLE-2014-BLG-0939 from:
Yee et al. 2015 ApJ 802, 76
https://ui.adsabs.harvard.edu/abs/2015ApJ...802...76Y/abstract
and explore only 1 of 4 degenerate models.
It is similar to example_06_fit_parallax_EMCEE.py
"""
from os.path import join as join
import sys
import numpy as np
try:
import emcee
except ImportError as err:
print(err)
print("\nEMCEE could not be imported.")
print("Get it from: http://dfm.io/emcee/current/user/install/")
print("and re-run the script")
sys.exit(1)
import matplotlib.pyplot as plt
import MulensModel as mm
# Define likelihood functions
def ln_like(theta, event, parameters_to_fit):
"""likelihood function"""
for key, val in enumerate(parameters_to_fit):
setattr(event.model.parameters, val, theta[key])
return -0.5 * event.get_chi2()
def ln_prior(theta, parameters_to_fit):
"""priors - we only reject obviously wrong models"""
if theta[parameters_to_fit.index("t_E")] < 0.:
return -np.inf
return 0.0
def ln_prob(theta, event, parameters_to_fit):
"""combines likelihood and priors"""
ln_prior_ = ln_prior(theta, parameters_to_fit)
if not np.isfinite(ln_prior_):
return -np.inf
ln_like_ = ln_like(theta, event, parameters_to_fit)
# In the cases that source fluxes are negative we want to return
# these as if they were not in priors.
if np.isnan(ln_like_):
return -np.inf
return ln_prior_ + ln_like_
# Read the data (note that we do not rescale errorbars here):
dir_ = join(mm.DATA_PATH, "photometry_files", "OB140939")
file_ground = join(dir_, "ob140939_OGLE.dat")
file_spitzer = join(dir_, "ob140939_Spitzer.dat")
data_ground = mm.MulensData(file_name=file_ground,
plot_properties={'label': 'OGLE'})
# Here is the main difference - we provide the ephemeris for Spitzer:
file_spitzer_eph = join(
mm.DATA_PATH, 'ephemeris_files', 'Spitzer_ephemeris_01.dat')
data_spitzer = mm.MulensData(file_name=file_spitzer,
ephemerides_file=file_spitzer_eph,
plot_properties={'label': 'Spitzer'})
# For parallax calculations we need event coordinates:
coords = "17:47:12.25 -21:22:58.7"
# Starting parameters:
params = {
't_0': 2456830., 'u_0': 0.8, 't_E': 25.,
'pi_E_N': 0., 'pi_E_E': 0.,
't_0_par': 2456836.06}
my_model = mm.Model(params, coords=coords)
my_event = mm.Event(datasets=[data_ground, data_spitzer], model=my_model)
# Which parameters we want to fit?
parameters_to_fit = ["t_0", "u_0", "t_E", "pi_E_N", "pi_E_E"]
# And remember to provide dispersions to draw starting set of points
sigmas = [0.1, 0.01, 0.1, 0.05, 0.05]
# Initializations for EMCEE
n_dim = len(parameters_to_fit)
n_walkers = 20
n_steps = 1500
n_burn = 500
# Including the set of n_walkers starting points:
start_1 = [params[p] for p in parameters_to_fit]
start = [start_1 + np.random.randn(n_dim) * sigmas
for i in range(n_walkers)]
# Run emcee (this should take about a minute):
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, ln_prob, args=(my_event, parameters_to_fit))
sampler.run_mcmc(start, n_steps)
# Remove burn-in samples and reshape:
samples = sampler.chain[:, n_burn:, :].reshape((-1, n_dim))
# Results:
results = np.percentile(samples, [16, 50, 84], axis=0)
print("Fitted parameters:")
fmt = "{:} : {:.5f} {:.5f} {:.5f}"
for (i, p) in enumerate(parameters_to_fit):
r = results[1, i]
print(fmt.format(p, r, results[2, i]-r, r-results[0, i]))
# We extract best model parameters and chi2 from my_event:
print("\nSmallest chi2 model:")
best = [my_event.best_chi2_parameters[p] for p in parameters_to_fit]
print(*["{:.4f}".format(b) if isinstance(b, float) else b.value for b in best])
print("{:.4f}".format(my_event.best_chi2))
for (i, parameter) in enumerate(parameters_to_fit):
setattr(my_event.model.parameters, parameter, best[i])
# In order to make plots, we need a Model instance
# that has satellite ephemeris:
params = my_event.model.parameters.parameters
space_model = mm.Model({**params}, coords=coords,
ephemerides_file=file_spitzer_eph)
# Prepare plots:
my_event.plot_model(subtract_2450000=True)
fluxes = my_event.model.get_ref_fluxes() # We need this to ensure that fluxes
# are scalled properly.
space_model.plot_lc(subtract_2450000=True,
f_source=fluxes[0], f_blend=fluxes[1])
my_event.plot_data(subtract_2450000=True)
plt.legend()
plt.xlim(6800., 6880.)
plt.figure()
my_event.model.plot_trajectory()
space_model.plot_trajectory()
space_model.plot_caustics(color='black')
plt.axis('equal')
plt.xlim(-1.1, 1.1)
plt.show()
|
{"hexsha": "57f02c0d3bcec16d6b5d386575b1f1467349638b", "size": 4765, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/example_12_fit_satellite_parallax_EMCEE.py", "max_stars_repo_name": "pmehta08/MulensModel", "max_stars_repo_head_hexsha": "261738c445a8d116d09c90e65f6e847cfc8a7ad8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/example_12_fit_satellite_parallax_EMCEE.py", "max_issues_repo_name": "pmehta08/MulensModel", "max_issues_repo_head_hexsha": "261738c445a8d116d09c90e65f6e847cfc8a7ad8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/example_12_fit_satellite_parallax_EMCEE.py", "max_forks_repo_name": "pmehta08/MulensModel", "max_forks_repo_head_hexsha": "261738c445a8d116d09c90e65f6e847cfc8a7ad8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8620689655, "max_line_length": 79, "alphanum_fraction": 0.7038824764, "include": true, "reason": "import numpy", "num_tokens": 1344}
|
(*-------------------------------------------*
| The Dining Mathematicians in CSP-Prover |
| August 2004 |
| December 2004 (modified) |
| November 2005 (modified) |
| April 2006 (modified) |
| May 2016 (modified) |
| |
| Yoshinao Isobe (AIST JAPAN) |
*-------------------------------------------*)
theory DM1_Imp_def
imports CSP_F
begin
(*****************************************************************
1. defines Imp
2.
3.
4.
*****************************************************************)
(*********************************************************
ODD and EVEN
*********************************************************)
definition
EVEN :: "int => bool"
where
EVEN_def : "EVEN n == (n mod 2 = 0)"
definition
ODD :: "int => bool"
where
ODD_def : "ODD n == (n mod 2 = 1)"
abbreviation
EVENs :: "int set" ("EVENs")
where
"EVENs == Collect EVEN"
abbreviation
ODDs :: "int set" ("ODDs")
where
"ODDs == Collect ODD"
(*********************************************************
event
*********************************************************)
datatype Event = Eat0 | Back0 | End0 | RD0 int | WR0 int
| Eat1 | Back1 | End1 | RD1 int | WR1 int
| NUM int
lemma expand_Event_fun[simp]:
"ALL Ef Eg. ((Ef::int => Event) = Eg) = (ALL n. Ef n = Eg n)"
apply (simp add: fun_eq_iff)
done
abbreviation CH0 :: "Event set"
where "CH0 == (range RD0) Un (range WR0)"
abbreviation CH1 :: "Event set"
where "CH1 == (range RD1) Un (range WR1)"
abbreviation OBS :: "Event set"
where "OBS == {Eat0, Back0, End0, Eat1, Back1, End1}"
(*********************************************************
function
*********************************************************)
primrec
getInt :: "Event => int"
where
"getInt (Eat0) = 0"
|"getInt (Eat1) = 0"
|"getInt (Back0) = 0"
|"getInt (Back1) = 0"
|"getInt (End0) = 0"
|"getInt (End1) = 0"
|"getInt (RD0 n) = n"
|"getInt (RD1 n) = n"
|"getInt (WR0 n) = n"
|"getInt (WR1 n) = n"
|"getInt (NUM n) = n"
(*********************************************************
Parallel system definition
*********************************************************)
(*
defs FPmode_def [simp]: "FPmode == CMSmode"
*)
overloading FPmode ==
"FPmode :: fpmode"
begin
definition "FPmode == CMSmode"
end
declare FPmode_def [simp]
datatype ImpName = VAR int | TH0 | EAT0 int | TH1 | EAT1 int
primrec
Impfun :: "ImpName => (ImpName, Event) proc"
where
"Impfun (TH0) = RD0 ? n -> IF (EVEN n)
THEN (Eat0 -> $(EAT0 n))
ELSE (Back0 -> $(TH0))"
|"Impfun (TH1) = RD1 ? n -> IF (ODD n)
THEN (Eat1 -> $(EAT1 n))
ELSE (Back1 -> $(TH1))"
|"Impfun (EAT0 n) = End0 -> WR0 ! (n div 2) -> $(TH0)"
|"Impfun (EAT1 n) = End1 -> WR1 ! (3 * n + 1) -> $(TH1)"
|"Impfun (VAR n) = WR0 ? n -> $(VAR n)
[+] WR1 ? n -> $(VAR n)
[+] RD0 ! n -> $(VAR n)
[+] RD1 ! n -> $(VAR n)"
(*
defs (overloaded)
Set_Impfun_def [simp]: "PNfun == Impfun"
*)
overloading Set_Impfun ==
"PNfun :: (ImpName, Event) pnfun"
begin
definition "PNfun == Impfun"
end
declare Set_Impfun_def [simp]
definition
Imp :: "int => (ImpName, Event) proc"
where
Imp_def: "Imp == (%n. ($TH0 |[CH0]| $(VAR n) |[CH1]| $TH1) -- (CH0 Un CH1))"
(*********************************************************
To unfold "range" and "syntactic sugar", ...
*********************************************************)
declare image_iff [simp]
declare inj_on_def [simp]
declare csp_prefix_ss_def [simp]
(*********************************************************
gProc lemmas (routine work)
*********************************************************)
lemma guarded_Imp[simp]:
"guardedfun Impfun"
by (simp add: guardedfun_def, rule allI, induct_tac p, simp_all)+
(*********************************************************
Lemmas
*********************************************************)
(*** int lemmas ***)
lemma int_le_inc: "ALL (n::int) m. n <= m --> n = m | n+1 <= m"
by (auto)
lemma mod_2_not_le: "ALL (n::int). ~(2 <= n mod 2)"
by (simp add: linorder_not_le)
lemma mod_2_or: "ALL (n::int). n mod 2 = 0 | n mod 2 =1"
apply (intro allI)
apply (insert int_le_inc)
apply (drule_tac x="0" in spec)
apply (drule_tac x="n mod 2" in spec)
apply (simp)
apply (erule disjE)
apply (simp)
apply (insert int_le_inc)
apply (drule_tac x="1" in spec)
apply (drule_tac x="n mod 2" in spec)
apply (simp)
done
(*** ODD and EVEN lemmas ***)
lemma EVEN_not_ODD[simp]: "ALL n. EVEN n = (~ ODD n)"
apply (auto simp add: EVEN_def ODD_def )
done
lemma ODD_add_1: "ALL n. (n mod 2 = 1) --> ((n + 1) mod 2 = (0::int))"
apply (simp add: mod_add_eq[THEN sym])
(*
apply (simp add: mod_add_eq) (in Isabelle 2016)
*)
(*
apply (simp add: zmod_zadd_left_eq) (in Isabelle 2008)
*)
done
lemma ODD_EX: "ALL m. ODD m --> (EX n. m = 2 * n + 1)"
apply (intro allI impI)
apply (simp add: ODD_def)
apply (insert ODD_add_1)
apply (drule_tac x="m" in spec)
apply (simp)
apply (simp add: zmod_eq_0_iff)
apply (erule exE)
apply (rule_tac x="q - 1" in exI)
apply (simp)
done
lemma ODD_to_EVEN[simp]: "ODD n ==> EVEN (3 * n + 1)"
apply (insert ODD_EX)
apply (drule_tac x="n" in spec)
apply (simp add: ODD_def EVEN_def)
apply (erule exE)
(*
apply (simp add: zmod_zadd_left_eq)
apply (simp add: zmod_zmult1_eq')
*)
apply (simp add: mod_add_eq)
(* apply (simp add: mod_mult_eq) *)
done
lemma ODD_to_notODD[simp]: "ODD n ==> ~ ODD (3 * n + 1)"
apply (insert ODD_to_EVEN[of n])
apply (simp)
done
(*** range ***)
lemma fold_range: "(EX a. x = f a) = (x : range f)"
apply (auto)
done
(*********************************************************
unfolding & folding process names
*********************************************************)
(*** unfold VAR ***)
lemma VAR:
"$(VAR n)
=F ? x:insert (RD1 n) (insert (RD0 n) ((range WR0) Un (range WR1)))
-> IF (x : (range WR0) | x : (range WR1))
THEN $(VAR (getInt x)) ELSE $(VAR n)"
apply (cspF_unwind_left)
apply (cspF_step_left)+
apply (auto)
apply (cspF_simp)+
done
lemmas VAR_simp = VAR[simplified]
(*
lemmas unfold_Imp_rules = VAR_simp
lemmas fold_Imp_rules = VAR_simp[THEN cspF_sym]
*)
(*** unfold TH0 ***)
lemma TH0:
"($TH0)
=F RD0 ? n -> IF (EVEN n) THEN (Eat0 -> ($(EAT0 n)))
ELSE (Back0 -> ($TH0))"
apply (cspF_unwind_left)
done
lemmas TH0_simp = TH0[simplified]
lemmas unfold_Imp_rules0 = VAR_simp TH0_simp
lemmas fold_Imp_rules0 = VAR_simp[THEN cspF_sym] TH0_simp[THEN cspF_sym]
(*** unfold TH1 ***)
lemma TH1:
"($TH1)
=F RD1 ? n -> IF (ODD n) THEN (Eat1 -> ($(EAT1 n)))
ELSE (Back1 -> ($TH1))"
by (cspF_unwind_left)
lemmas TH1_simp = TH1[simplified]
lemmas unfold_Imp_rules1 = unfold_Imp_rules0 TH1_simp
lemmas fold_Imp_rules1 = fold_Imp_rules0 TH1_simp[THEN cspF_sym]
(*** unfold EAT0 ***)
lemma EAT0:
"($(EAT0 n))
=F ? x:{End0} -> WR0 (n div 2) -> ($TH0)"
by (cspF_unwind_left)
lemmas EAT0_simp = EAT0[simplified]
lemmas unfold_Imp_rules2 = unfold_Imp_rules1 EAT0_simp
lemmas fold_Imp_rules2 = fold_Imp_rules1 EAT0_simp[THEN cspF_sym]
(*** unfold EAT1 ***)
lemma EAT1:
"($(EAT1 n))
=F ? x:{End1} -> WR1 ! (3 * n + 1) -> ($TH1)"
by (cspF_unwind_left)
lemmas EAT1_simp = EAT1[simplified]
lemmas unfold_Imp_rules3 = unfold_Imp_rules2 EAT1_simp
lemmas fold_Imp_rules3 = fold_Imp_rules2 EAT1_simp[THEN cspF_sym]
end
|
{"author": "yoshinao-isobe", "repo": "CSP-Prover", "sha": "806fbe330d7e23279675a2eb351e398cb8a6e0a8", "save_path": "github-repos/isabelle/yoshinao-isobe-CSP-Prover", "path": "github-repos/isabelle/yoshinao-isobe-CSP-Prover/CSP-Prover-806fbe330d7e23279675a2eb351e398cb8a6e0a8/DM/DM1_Imp_def.thy"}
|
import QuantumOptics
const qo = QuantumOptics
using Test, IonSim
using Suppressor
@suppress_err begin
# setup system
C = Ca40(["S-1/2", "D-1/2"])
chain = LinearChain(
ions=[C, C], com_frequencies=(x=2,y=2,z=1), vibrational_modes=(x=[1], y=[], z=[1])
)
T = Trap(configuration=chain)
modes = get_vibrational_modes(chain)
@testset "operators -- VibrationalMode operators" begin
# test creation of VibrationalMode functions by comparison with equiv. QO functions
fb = qo.FockBasis(10)
@test create(modes[1]).data == qo.create(fb).data
@test destroy(modes[1]).data == qo.destroy(fb).data
@test number(modes[1]).data == qo.number(fb).data
n = rand(0:10)
@test fockstate(modes[1], n).data == qo.fockstate(fb, n).data
# displacement operator
α = rand(0:1e-3:7) + im*rand(0:1e-3:7)
@test displace(modes[1], α).data ≈ qo.displace(fb, α).data
fb2 = qo.FockBasis(200)
modes[1].N = 200
i = rand(1:5); j = rand(1:5)
@test displace(modes[1], α, method="analytic").data[i,j] ≈ qo.displace(fb2, α).data[i,j] rtol=1e-3
# test that mean excitation of thermalstate is as expected
modes[1].N = 500
n̄ = abs(2randn())
@test expect(number(modes[1]), thermalstate(modes[1], n̄)) ≈ n̄
@test expect(number(modes[1]), thermalstate(modes[1], n̄, method="analytic")) ≈ n̄
# test coherentstate matches QO results
α = 10*(randn() + im*randn())
coherentstate(modes[1], α).data == qo.coherentstate(fb, α).data
# test coherenthermalstate
N = 500
modes[1].N = N
n̄ = rand(0:1e-6:10)
@test coherentthermalstate(modes[1], n̄, 0, method="analytic").data ≈ thermalstate(modes[1], n̄).data
@test coherentthermalstate(modes[1], 0, α, method="analytic").data ≈ dm(coherentstate(modes[1], α)).data rtol=1e-3*N^2
@test coherentthermalstate(modes[1], n̄, 0).data ≈ thermalstate(modes[1], n̄).data
@test coherentthermalstate(modes[1], 0, α).data ≈ dm(coherentstate(modes[1], α)).data rtol=1e-3*N^2
# shouldn't be able to have a mean phonon occupation greater than Hilbert space dimension
@test_throws AssertionError coherentthermalstate(modes[1], N+1, 0)
@test_throws AssertionError coherentthermalstate(modes[1], 0, N+1)
@test_throws AssertionError coherentstate(modes[1], N+1)
@test_throws AssertionError thermalstate(modes[1], N+1)
@test_throws AssertionError displace(modes[1], N+1)
end
@testset "operators -- Ion operators" begin
# test that ionstate constructs the appropriate state for a single ion
@test ionstate(C, "S-1/2").data == ionstate(C, 1).data == ComplexF64[1; 0]
@test ionstate(C, "D-1/2").data == ionstate(C, 2).data == ComplexF64[0; 1]
# test ionstate for an IonConfiguration input
@test ionstate(chain, "S-1/2", "D-1/2").data == kron(ComplexF64[0; 1], ComplexF64[1; 0])
@test ionstate(chain, 1, 2).data == kron(ComplexF64[0; 1], ComplexF64[1; 0])
# test ionstate for an Trap input
@test ionstate(T, "S-1/2", "D-1/2").data == kron(ComplexF64[0; 1], ComplexF64[1; 0])
# test sigma(ion::Ion, ψ1::T, ψ2::T) where {T<:Union{String,Int}}
@test sigma(C, "S-1/2", "D-1/2").data == sigma(C, 1, 2).data == ComplexF64[0 1; 0 0]
# test sigma(ion::Ion, ψ1::T<:Union{String,Int})
@test sigma(C, "S-1/2").data == sigma(C, 1).data == ComplexF64[1 0; 0 0]
# test ionprojector for IonConfiguration input
ψ = ionprojector(chain, "S-1/2", "D-1/2", only_ions=true)
@test ψ.data == kron(ComplexF64[0; 1] * ComplexF64[0; 1]', ComplexF64[1; 0] * ComplexF64[1; 0]')
@test ionprojector(chain, "S-1/2", "D-1/2") == ψ ⊗ one(modes[1]) ⊗ one(modes[2])
@test ψ == ionprojector(T, "S-1/2", "D-1/2", only_ions=true)
end
@testset "operators -- internal functions" begin
# test _pf(s, n, m)
s = rand(1:12)
n = rand(1:s)
m = rand(1:s)
v1 = IonSim._pf(s, n, m)
v2 = 1im^n * (-1im)^m * factorial(s) / ((s+1) * √(factorial(m) * factorial(n)))
isapprox(v1, v2)
# test He(n) gives the correct Hermite polynomial for order 10
IonSim._He(10) == [-945, 0, 4725, 0, -3150, 0, 630, 0, -45, 0, 1]
# test fHe(x, He)
IonSim._fHe(1, 10) == sum(IonSim._He(10))
# _alaguerre
L32 = (1/6) * (-2^3 + 3*(2+3)*2^2 - 3*(2+2)*(2+3)*2 + (2+1)*(2+2)*(2+3))
@test IonSim._alaguerre(2, 3, 2) ≈ L32
# _Dnm
ξ = im * exp(2π*im)
d = displace(modes[1], ξ).data
diff = 0.0
for i in 1:100, j in 1:100
diff += abs(d[i, j] - IonSim._Dnm(ξ, i, j))
end
@test diff < 100 # <1% difference of L1 norm
# Note: displace() is an approximation, whereas _Dnm should not be
end
end # end suppress
|
{"hexsha": "9c57e296379b96bb1d8a8771b4f77ebf786e647b", "size": 4655, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_operators.jl", "max_stars_repo_name": "HaeffnerLab/IonSim.jl", "max_stars_repo_head_hexsha": "884b0196ace4623e91bcd957f293d26d6723c071", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2020-02-25T23:34:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T03:15:54.000Z", "max_issues_repo_path": "test/test_operators.jl", "max_issues_repo_name": "HaeffnerLab/IonSim.jl", "max_issues_repo_head_hexsha": "884b0196ace4623e91bcd957f293d26d6723c071", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2020-03-25T22:51:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T22:24:45.000Z", "max_forks_repo_path": "test/test_operators.jl", "max_forks_repo_name": "HaeffnerLab/IonSim.jl", "max_forks_repo_head_hexsha": "884b0196ace4623e91bcd957f293d26d6723c071", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-05-25T22:21:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T18:19:47.000Z", "avg_line_length": 39.1176470588, "max_line_length": 122, "alphanum_fraction": 0.6221267454, "num_tokens": 1783}
|
#! /usr/bin/env python
DESCRIPTION = 'pyPanair: A pre / post processor for PANAIR'
DISTNAME = 'pyPanair'
MAINTAINER = 'STakanashi'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/SaTa999/pyPanair'
VERSION = '0.8.0'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
maintainer=MAINTAINER,
description=DESCRIPTION,
license=LICENSE,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['pyPanair', 'pyPanair.preprocess', 'pyPanair.postprocess'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.5',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows'],
)
|
{"hexsha": "bc723b91ae8985647718dd439fc38030c3c6bedf", "size": 1592, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "SaTa999/pyPanair", "max_stars_repo_head_hexsha": "49c987d92a447903ac28cc19da8ad91aa5843466", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2017-02-28T11:36:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T08:31:44.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "DriesVerstraete/pyPanair", "max_issues_repo_head_hexsha": "49c987d92a447903ac28cc19da8ad91aa5843466", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-06-03T10:10:30.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-03T10:10:30.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "DriesVerstraete/pyPanair", "max_forks_repo_head_hexsha": "49c987d92a447903ac28cc19da8ad91aa5843466", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-05-23T09:33:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-12T20:08:16.000Z", "avg_line_length": 28.4285714286, "max_line_length": 80, "alphanum_fraction": 0.6092964824, "include": true, "reason": "import numpy,import scipy", "num_tokens": 329}
|
```python
import numpy as np
import sympy as sym
import numba
import pydae.build as db
```
```python
```
### Electromechanical differential equations
\begin{eqnarray}
f_1 &=& \dot \delta = \Omega_b \left( \omega - \omega_s \right) \\
f_2 &=& \dot \omega = \frac{1}{2H} \left( p_m - p_e - D \left( \omega - \omega_s \right) \right)
\end{eqnarray}
### Electric rotor differential equations
\begin{eqnarray}
f_3 &=& \dot e_q' = \frac{1}{T'_{d0}} \left( -e'_q - \left(X_d - X'_d \right) i_d + v_f^\star \right) \\
f_4 &=& \dot e'_d = \frac{1}{T'_{q0}} \left( -e'_d - \left(X_q - X'_q \right) i_q \right)
\end{eqnarray}
### Park transform
\begin{eqnarray}
v_d &=& v_t \sin\left(\delta - \theta_t\right) \\
v_q &=& v_t \cos\left(\delta - \theta_t\right) \\
p_e &=& \left( v_q + R_a i_q \right) i_q + \left( v_d + R_a i_d \right) i_d
\end{eqnarray}
### Stator equations
\begin{eqnarray}
g_1 &=& v_q + R_a i_q + X'_d i_d - e'_q\\
g_2 &=& v_d + R_a i_d - X'_q i_q - e'_d\\
\end{eqnarray}
### Powers
\begin{eqnarray}
g_3 &=& i_d v_d + i_q v_q - p_t \\
g_4 &=& i_d v_q - i_q v_d - q_t
\end{eqnarray}
### Network equations
\begin{eqnarray}
g_5 &=& p_t - \left(v_t v_0 \sin\left(\theta_t - \theta_0\right)\right)/X_l\\
g_6 &=& q_t + \left(v_t v_0 \cos\left(\theta_t - \theta_0\right)\right)/X_l - v_t^2/X_l \\
g_7 &=& p_l - \left(v_0 v_t \sin\left(\theta_0 - \theta_t\right)\right)/X_l\\
g_8 &=& q_l + \left(v_0 v_t \cos\left(\theta_0 - \theta_t\right)\right)/X_l - v_0^2/X_l
\end{eqnarray}
## System definition
```python
params_dict = {'X_d':1.81,'X1d':0.3, 'T1d0':8.0, # synnchronous machine d-axis parameters
'X_q':1.76,'X1q':0.65,'T1q0':1.0, # synnchronous machine q-axis parameters
'R_a':0.003,
'X_l': 0.02,
'H':3.5,'D':0.01,
'Omega_b':2*np.pi*50,'omega_s':1.0,
'K_delta':0.1,'K_v':1e-3
}
u_ini_dict = {'v_t':0.8,'theta_t':1.0,'p_l':0.0,'q_l':0.0} # for the initialization problem
u_run_dict = {'p_m':0.8,'v_f':1.0,'p_l':0.0,'q_l':0.0} # for the running problem (here initialization and running problem are the same)
x_list = ['delta','omega','e1q','e1d'] # dynamic states
y_ini_list = ['i_d','i_q','p_t','q_t','p_m','v_f','v_0','theta_0']
y_run_list = ['i_d','i_q','p_t','q_t','v_t','theta_t','v_0','theta_0']
sys_vars = {'params':params_dict,
'u_list':u_run_dict,
'x_list':x_list,
'y_list':y_run_list}
exec(db.sym_gen_str()) # exec to generate the required symbolic varables and constants
```
```python
# auxiliar equations
v_d = v_t*sin(delta - theta_t) # park
v_q = v_t*cos(delta - theta_t) # park
p_e = i_d*(v_d + R_a*i_d) + i_q*(v_q + R_a*i_q) # electromagnetic power
# dynamic equations
ddelta = Omega_b*(omega - omega_s) - K_delta*delta # load angle
domega = 1/(2*H)*(p_m - p_e - D*(omega - omega_s)) # speed
de1q = 1/T1d0*(-e1q - (X_d - X1d)*i_d + v_f + K_v*(1-v_t))
de1d = 1/T1q0*(-e1d + (X_q - X1q)*i_q)
# algrbraic equations
g_1 = v_q + R_a*i_q + X1d*i_d - e1q # stator
g_2 = v_d + R_a*i_d - X1q*i_q - e1d # stator
g_3 = i_d*v_d + i_q*v_q - p_t # active power
g_4 = i_d*v_q - i_q*v_d - q_t # reactive power
g_5 = p_t - (v_t*v_0*sin(theta_t - theta_0))/X_l # network equation (p)
g_6 = q_t + (v_t*v_0*cos(theta_t - theta_0))/X_l - v_t**2/X_l # network equation (q)
g_7 = -p_l - (v_t*v_0*sin(theta_0 - theta_t))/X_l # network equation (p)
g_8 = -q_l + (v_t*v_0*cos(theta_0 - theta_t))/X_l - v_0**2/X_l # network equation (q)
```
```python
sys = {'name':'iso_milano_ex8p1_4ord_uctrl',
'params_dict':params,
'f_list':[ddelta,domega,de1q,de1d],
'g_list':[g_1,g_2,g_3,g_4,g_5,g_6,g_7,g_8],
'x_list':x_list,
'y_ini_list':y_ini_list,
'y_run_list':y_run_list,
'u_ini_dict':u_ini_dict,
'u_run_dict':u_run_dict,
'h_dict':{'p_m':p_m,'p_e':p_e, 'v_f':v_f}}
sys = db.system(sys)
db.sys2num(sys)
```
```python
u_ini_dict
```
{'v_t': 0.8, 'theta_t': 1.0, 'p_l': 0.0, 'q_l': 0.0}
```python
u_ini_dict
```
{'v_t': 0.8, 'theta_t': 1.0, 'p_l': 0.0, 'q_l': 0.0}
```python
```
|
{"hexsha": "f0ade8db3777cd0ee89114bfd0eb1830c560ff44", "size": 6883, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "examples/grids/smib_milano_ex8p1/iso_milano_ex8p1_4ord_uctrl/iso_milano_ex8p1_4ord_uctrl_builder.ipynb", "max_stars_repo_name": "pydae/pydae", "max_stars_repo_head_hexsha": "8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-20T03:45:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-20T03:45:26.000Z", "max_issues_repo_path": "examples/grids/smib_milano_ex8p1/iso_milano_ex8p1_4ord_uctrl/iso_milano_ex8p1_4ord_uctrl_builder.ipynb", "max_issues_repo_name": "pydae/pydae", "max_issues_repo_head_hexsha": "8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/grids/smib_milano_ex8p1/iso_milano_ex8p1_4ord_uctrl/iso_milano_ex8p1_4ord_uctrl_builder.ipynb", "max_forks_repo_name": "pydae/pydae", "max_forks_repo_head_hexsha": "8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4557522124, "max_line_length": 145, "alphanum_fraction": 0.4897573732, "converted": true, "num_tokens": 1709}
|
"""
Reimplements distillator.py with a callback-based interface.
Author: Dominic Jack
Date: May 2020
"""
import itertools
import time
from typing import Dict, Iterable, List, Optional, Sequence, Union
import numpy as np
import tensorflow as tf
import tqdm
from rlo import analytics
from rlo import distillator
from rlo import utils
from rlo.dataset import PolicyNetDataset, StateValueDataset
from rlo.tf_model import ModelWrapper, Weights
class StopTraining(Exception):
"""
Exception to signal training should stop.
Like `StopIteration` it should not be interpreted as an error.
"""
# pylint: disable=unused-argument
class TqdmCallback(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
self._time = time.time()
self._progress = tqdm.tqdm(desc="Epoch {}".format(epoch))
def on_batch_end(self, batch, logs=None):
self._progress.update()
def on_epoch_end(self, epoch, logs=None):
self._progress.close()
print("-------")
if logs is not None:
for key in sorted(logs):
print(" {:15s}: {}".format(key, logs[key]))
print(" {:15s}: {}".format("total time (s)", time.time() - self._time))
class DistillationCallback(tf.keras.callbacks.Callback):
def __init__(self, model: ModelWrapper, min_epochs: int, patience_epochs: int):
self.min_epochs = min_epochs
self.patience_epochs = patience_epochs
self.rlo_model = model
def on_train_begin(self, logs: Optional[Dict] = None):
self.epoch_counter = 0
self.batch_counter = 0
self.best_valid_loss: Optional[float] = np.inf
self.train_loss_at_best: Optional[float] = np.inf
self.last_train_loss = None
self.last_valid_loss = None
self.best_valid_epoch = self.epoch_counter
def on_epoch_begin(self, epoch: int, logs=None):
self.batch_losses = [] # type: List[float]
def on_batch_end(self, batch: int, logs: Optional[Dict] = None):
if logs is not None:
self.batch_losses.append(logs["loss"])
def on_epoch_end(self, epoch: int, logs: Optional[Dict] = None):
if logs is not None:
self.last_train_loss = logs["weighted_loss"]
self.last_valid_loss = logs["val_weighted_loss"]
self.last_train_total_weight = logs["total_weight"]
self.last_valid_total_weight = logs["val_total_weight"]
else:
self.last_train_loss = None
self.last_valid_loss = None
self.last_train_total_weight = None
self.last_valid_total_weight = None
analytics.event(
"distill_epoch",
verbosity=1,
epoch_counter=epoch,
train_loss=self.last_train_loss,
train_losses=self.batch_losses,
valid_loss=self.last_valid_loss,
prev_best_valid_loss=self.best_valid_loss,
prev_best_valid_epoch=self.best_valid_epoch,
)
self.batch_counter += len(self.batch_losses)
if self.last_valid_loss is None or self.last_valid_loss < self.best_valid_loss:
self.best_valid_loss = self.last_valid_loss
self.best_valid_epoch = epoch
self.train_loss_at_best = self.last_train_loss
self.best_weights = self.rlo_model.get_weights()
self.batch_losses = []
self.epoch_counter = epoch # used in distill_end event
if epoch >= max(self.min_epochs, self.best_valid_epoch + self.patience_epochs):
self.model.stop_training = True
def on_train_end(self, logs: Optional[Dict] = None):
# Hardcoding "inf" here means to always keep the model trained
# for up to patience iters BEYOND the point of lowest validation loss; this seems wrong, but the correct
# value of 0/negative (i.e. always restore the "best" params) more than doubled execution time (issue #224).
# This was resolved in distillator.py in PR #1236 (effectively changing the inf to 0).
# Inbetween values allow only to restore weights where it has most impact, if we want to introduce that additional hyperparameter!
assert self.last_valid_loss is not None
if self.last_valid_loss / self.best_valid_loss > np.inf:
self.rlo_model.set_weights(self.best_weights)
else:
self.best_weights = self.rlo_model.get_weights()
analytics.event(
"distill_end",
epoch_counter=self.epoch_counter + 1, # finished epochs
batch_counter=self.batch_counter,
train_total_weight=self.last_train_total_weight,
loss=self.last_train_loss,
valid_loss=self.last_valid_loss,
valid_total_weight=self.last_valid_total_weight,
best_valid_epoch=self.best_valid_epoch,
valid_loss_at_best=self.best_valid_loss,
loss_at_best=self.train_loss_at_best,
)
def _fit(
model_wrapper: ModelWrapper,
train_data: Iterable,
validation_data: Iterable,
callbacks: Sequence[tf.keras.callbacks.Callback],
max_epochs: Optional[int],
) -> None:
"""
Minimal rlo equivalent of tf.keras.Model.fit.
Runs until StopTraining is raised by a callback.
See Distillator.__call__ for example usage.
Args:
model_wrapper: model to be trained.
train_data: iterable of batched data which can be iterated over multiple times.
validation_data: iterable of batch data for validation that can be iterated over
multiple times.
callbacks: sequence of `tf.keras.callbacks.Callback` for custom functionality.
"""
training_model = model_wrapper.keras_model
training_model.stop_training = False
for callback in callbacks:
callback.set_model(training_model)
try:
logs = {} # type: Dict[str, float]
for cb in callbacks:
cb.on_train_begin(logs)
for epoch in itertools.count() if max_epochs is None else range(max_epochs):
model_wrapper.reset_metrics()
for cb in callbacks:
logs = {}
cb.on_epoch_begin(epoch, logs)
# training
for index, train_batch in enumerate(train_data):
if training_model.stop_training:
raise StopTraining()
logs = {}
for cb in callbacks:
cb.on_train_batch_begin(index, logs)
logs["loss"] = model_wrapper.train_on_batch(train_batch)
for cb in callbacks:
cb.on_train_batch_end(index, logs)
logs = model_wrapper.metric_results()
# validation
model_wrapper.reset_metrics()
for val_batch in validation_data:
model_wrapper.evaluate_loss(val_batch)
logs.update(
{f"val_{k}": v for k, v in model_wrapper.metric_results().items()}
)
for cb in callbacks:
cb.on_epoch_end(epoch, logs)
except StopTraining:
pass
logs = {}
for cb in callbacks:
cb.on_train_end(logs)
class DistillatorV2(distillator.Distillator):
def __init__(
self,
min_epochs: int,
max_epochs: Optional[int] = None,
batch_size: Optional[int] = None,
max_nodes_per_batch: Optional[int] = None,
split: float = 0.9,
patience_epochs: int = 10,
verbose: bool = False,
tb_dir: Optional[str] = None,
):
super().__init__(
max_epochs=max_epochs,
min_epochs=min_epochs,
batch_size=batch_size,
max_nodes_per_batch=max_nodes_per_batch,
split=split,
patience_epochs=patience_epochs,
)
self._verbose = verbose
self._tb_dir = tb_dir
def __call__(
self,
model_wrapper: ModelWrapper,
seed: int,
dataset: Union[PolicyNetDataset, StateValueDataset],
) -> Weights:
"""
This function takes the model and distill the information content of dataset into the current model.
"""
analytics.event(
"distill_start",
num_points=dataset.num_points(),
num_exprs=dataset.num_expressions(),
seed=seed,
)
dataset_list = dataset.get_examples()
rng = utils.rng(seed)
dataset_list = utils.permutation(rng, dataset_list)
train_examples, val_examples = distillator.split_train_valid(
self.split, dataset_list
)
del dataset_list # ensure gc can clean up
train_data = model_wrapper.create_dataset(
train_examples, rng, **self._regen_kwargs
)
val_data = model_wrapper.create_dataset(val_examples, **self._regen_kwargs)
callback = DistillationCallback(
model_wrapper, self.min_epochs, self.patience_epochs
)
callbacks = [callback]
if self._verbose:
callbacks.append(TqdmCallback())
if self._tb_dir is not None:
callbacks.append(
tf.keras.callbacks.TensorBoard(
log_dir=self._tb_dir, profile_batch="2, 12"
)
)
_fit(model_wrapper, train_data, val_data, callbacks, max_epochs=self.max_epochs)
distillator.log_fitted_vals(model_wrapper, dataset, **self._regen_kwargs)
return callback.best_weights
|
{"hexsha": "6f53395648e9f1443e95177c10b7b63adb9d4cdc", "size": 9512, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlo/src/rlo/distillator_v2.py", "max_stars_repo_name": "tomjaguarpaw/knossos-ksc", "max_stars_repo_head_hexsha": "8fa75e67c0db8f632b135379740051cd10ff31f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2021-09-09T16:09:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T02:15:19.000Z", "max_issues_repo_path": "rlo/src/rlo/distillator_v2.py", "max_issues_repo_name": "tomjaguarpaw/knossos-ksc", "max_issues_repo_head_hexsha": "8fa75e67c0db8f632b135379740051cd10ff31f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2021-08-06T14:30:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-19T08:49:52.000Z", "max_forks_repo_path": "rlo/src/rlo/distillator_v2.py", "max_forks_repo_name": "tomjaguarpaw/knossos-ksc", "max_forks_repo_head_hexsha": "8fa75e67c0db8f632b135379740051cd10ff31f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-08-06T11:20:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T19:39:40.000Z", "avg_line_length": 37.0116731518, "max_line_length": 138, "alphanum_fraction": 0.6358284272, "include": true, "reason": "import numpy", "num_tokens": 1994}
|
push!(LOAD_PATH, "Modules")
using Colors
using JuliRay
φ=MathConstants.φ
Base.@irrational ° 0.0174532925199432957692369076848861271344 (big(pi)/big(180))
function POLYGON2(vertices,O,R;r1=1.0,r2=0.05,r3=0.025)
n=length(vertices)
C=O+R*normalize(+(vertices...)/n-O)
V=rgbColor(csgUnion((p->Sphere(p,r2)).(vertices)),RGB(0.1,0.1,0.1))
E=rgbColor(csgUnion([Arc(vertices[i],O+R*normalize((vertices[i]+vertices[mod(i,n)+1])/2-O),vertices[mod(i,n)+1],r3) for i in 1:n]),RGB(0.1,0.1,0.1))
if(R<10^2)
F=csgClip(
rgbftColor(
Sphere(O,R),RGB(1,0.5,1),FT(0.1,0.1)
),
csgIntersection(
[
(v=JuliRay.NormalVector(O,vertices[i],vertices[mod(i,n)+1]);s=sign(dot(v,C-O));Cylinder(O,O+s*R*v,R))
for i ∈ 1:n]
)
)
else
F=rgbftColor(Polygon(vertices),RGB(1,0.5,1),FT(0.1,0.1))
end
return csgUnion(V,E,F)
end
function Mirror(q,p1,p2,p3)
e₃=NormalVector(p1,p2,p3)
e₁=OrthogonalVector(e₃)
e₂=cross(e₃,e₁)
A=hcat(e₁,e₂,e₃)*transpose(hcat(e₁,e₂,-e₃))
fixedpoint=(p1+p2+p3)/3
b=fixedpoint-A*fixedpoint
return A*q+b
end
function Put(F,i,O)
p1=O
n=length(F)
p2,p3=F[mod(i-1,n)+1],F[mod(i,n)+1]
return (p->Mirror(p,p1,p2,p3)).(F)
end
function f(x)
ifelse(x>0,exp(-1/x),0)
end
function g(x)
f(2x/√3)/(f(2x/√3)+f(2/√3-2x/√3))
end
function Smooth(a,b,x)
g((x-a)/(b-a))
end
## Tetrahedron
h=1/3 # 単位球面に内接させた際の内接球半径
n=3 # n角形
m=4 # m面体
M=60 # アニメーションの刻み数
r1=√(1-h^2) # n角形の外接円半径
for i ∈ 1:M
θ=(1-Smooth(0,1,2(i-1)/M)+Smooth(1,2,2(i-1)/M))*π/2 # パラメータθは0→1→0の順で滑らかに変化
R=1/cos(θ)
O=[0,0,tan(θ)]
N=O+[0,0,R]
H=O-[0,0,√(R^2-r1^2)]
F=[[[r1*cos(2π*i/n),r1*sin(2π*i/n),0.0]+H for i in 1:n] for i ∈ 1:m] # 最初のn角形の頂点
F[2]=Put(F[1],1,O) # 以下はn角形を付け加える操作
F[3]=Put(F[2],2,O)
F[4]=Put(F[2],3,O)
object=csgUnion([POLYGON2(F[i],O,R,r2=0.03,r3=0.01) for i ∈ 1:m]) # 頂点座標をもとにオブジェクトを生成
render(csgUnion(object),camera=LngLatCamera(lng=30°,lat=30°,pers=0.2,zoom=0.2,width=640,height=360),name="Ns"*string(m),index=i)
end
## Cube
h=1/√3 # 単位球面に内接させた際の内接球半径
n=4 # n角形
m=6 # m面体
M=60 # アニメーションの刻み数
r1=√(1-h^2) # n角形の外接円半径
for i ∈ 1:M
θ=(1-Smooth(0,1,2(i-1)/M)+Smooth(1,2,2(i-1)/M))*π/2 # パラメータθは0→1→0の順で滑らかに変化
R=1/cos(θ)
O=[0,0,tan(θ)]
N=O+[0,0,R]
H=O-[0,0,√(R^2-r1^2)]
F=[[[r1*cos(2π*i/n),r1*sin(2π*i/n),0.0]+H for i in 1:n] for i ∈ 1:m] # 最初のn角形の頂点
F[2]=Put(F[1],1,O) # 以下はn角形を付け加える操作
F[3]=Put(F[2],3,O)
F[4]=Put(F[1],2,O)
F[5]=Put(F[4],3,O)
F[6]=Put(F[5],1,O)
object=csgUnion([POLYGON2(F[i],O,R,r2=0.03,r3=0.01) for i ∈ 1:m]) # 頂点座標をもとにオブジェクトを生成
render(object,camera=LngLatCamera(lng=30°,lat=30°,pers=0.2,zoom=0.2,width=640,height=360),name="Ns"*string(m),index=i)
end
## Octahedron
h=1/√3 # 単位球面に内接させた際の内接球半径
n=3 # n角形
m=8 # m面体
M=60 # アニメーションの刻み数
r1=√(1-h^2) # n角形の外接円半径
for i ∈ 1:M
θ=(1-Smooth(0,1,2(i-1)/M)+Smooth(1,2,2(i-1)/M))*π/2 # パラメータθは0→1→0の順で滑らかに変化
R=1/cos(θ)
O=[0,0,tan(θ)]
N=O+[0,0,R]
H=O-[0,0,√(R^2-r1^2)]
F=[[[r1*cos(2π*i/n),r1*sin(2π*i/n),0.0]+H for i in 1:n] for i ∈ 1:m] # 最初のn角形の頂点
F[2]=Put(F[1],1,O) # 以下はn角形を付け加える操作
F[3]=Put(F[2],3,O)
F[4]=Put(F[3],2,O)
F[5]=Put(F[4],3,O)
F[6]=Put(F[1],2,O)
F[7]=Put(F[6],3,O)
F[8]=Put(F[7],2,O)
object=csgUnion([POLYGON2(F[i],O,R,r2=0.03,r3=0.01) for i ∈ 1:m]) # 頂点座標をもとにオブジェクトを生成
render(object,camera=LngLatCamera(lng=30°,lat=30°,pers=0.2,zoom=0.2,width=640,height=360),name="Ns"*string(m),index=i)
end
## Dodecahedron
φ=2cos(π/5)
ξ=2sin(π/5)
h=φ/(√3*ξ) # 単位球面に内接させた際の内接球半径
n=5 # n角形
m=12 # m面体
M=60 # アニメーションの刻み数
r1=√(1-h^2) # n角形の外接円半径
for i ∈ 1:M
θ=(1-Smooth(0,1,2(i-1)/M)+Smooth(1,2,2(i-1)/M))*π/2 # パラメータθは0→1→0の順で滑らかに変化
R=1/cos(θ)
O=[0,0,tan(θ)]
N=O+[0,0,R]
H=O-[0,0,√(R^2-r1^2)]
F=[[[r1*cos(2π*i/n),r1*sin(2π*i/n),0.0]+H for i in 1:n] for i ∈ 1:m] # 最初のn角形の頂点
F[2]=Put(F[1],4,O) # 以下はn角形を付け加える操作
F[3]=Put(F[2],1,O)
F[4]=Put(F[2],2,O)
F[5]=Put(F[2],3,O)
F[6]=Put(F[2],5,O)
F[7]=Put(F[1],2,O)
F[8]=Put(F[7],5,O)
F[9]=Put(F[8],1,O)
F[10]=Put(F[8],2,O)
F[11]=Put(F[8],3,O)
F[12]=Put(F[8],4,O)
object=csgUnion([POLYGON2(F[i],O,R,r2=0.03,r3=0.01) for i ∈ 1:m]) # 頂点座標をもとにオブジェクトを生成
render(object,camera=LngLatCamera(lng=30°,lat=30°,pers=0.2,zoom=0.2,width=640,height=360),name="Ns"*string(m),index=i)
end
## Icosahedron
φ=2cos(π/5)
ξ=2sin(π/5)
h=φ/(√3*ξ) # 単位球面に内接させた際の内接球半径
n=3 # n角形
m=20 # m面体
M=60 # アニメーションの刻み数
r1=√(1-h^2) # n角形の外接円半径
for i ∈ 1:M
θ=(1-Smooth(0,1,2(i-1)/M)+Smooth(1,2,2(i-1)/M))*π/2 # パラメータθは0→1→0の順で滑らかに変化
R=1/cos(θ)
O=[0,0,tan(θ)]
N=O+[0,0,R]
H=O-[0,0,√(R^2-r1^2)]
F=[[[r1*cos(2π*i/n),r1*sin(2π*i/n),0.0]+H for i in 1:n] for i ∈ 1:m] # 最初のn角形の頂点
F[2]=Put(F[1],1,O) # 以下はn角形を付け加える操作
F[3]=Put(F[2],3,O)
F[4]=Put(F[3],2,O)
F[5]=Put(F[4],1,O)
F[6]=Put(F[5],3,O)
F[7]=Put(F[1],2,O)
F[8]=Put(F[7],3,O)
F[9]=Put(F[8],1,O)
F[10]=Put(F[9],2,O)
F[11]=Put(F[1],3,O)
F[12]=Put(F[2],2,O)
F[13]=Put(F[3],1,O)
F[14]=Put(F[4],3,O)
F[15]=Put(F[5],2,O)
F[16]=Put(F[6],1,O)
F[17]=Put(F[7],1,O)
F[18]=Put(F[8],2,O)
F[19]=Put(F[9],3,O)
F[20]=Put(F[10],1,O)
object=csgUnion([POLYGON2(F[i],O,R,r2=0.03,r3=0.01) for i ∈ 1:m]) # 頂点座標をもとにオブジェクトを生成
render(object,camera=LngLatCamera(lng=30°,lat=30°,pers=0.2,zoom=0.2,width=640,height=360),name="Ns"*string(m),index=i)
end
|
{"hexsha": "b43d7a9d3a24ec1ed2a16acdc9c4f9e6efe93cba", "size": 5566, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Examples/NetOfPolyhedra/NetOfPolyhedra.jl", "max_stars_repo_name": "hyrodium/JuliRay", "max_stars_repo_head_hexsha": "178ea03571ea0852294d24a2277ac26d630880b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-11T04:48:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-11T04:48:26.000Z", "max_issues_repo_path": "Examples/NetOfPolyhedra/NetOfPolyhedra.jl", "max_issues_repo_name": "hyrodium/JuliRay", "max_issues_repo_head_hexsha": "178ea03571ea0852294d24a2277ac26d630880b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-08-03T12:58:20.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-19T15:45:17.000Z", "max_forks_repo_path": "Examples/NetOfPolyhedra/NetOfPolyhedra.jl", "max_forks_repo_name": "hyrodium/JuliRay", "max_forks_repo_head_hexsha": "178ea03571ea0852294d24a2277ac26d630880b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7596153846, "max_line_length": 152, "alphanum_fraction": 0.5547969817, "num_tokens": 3146}
|
"""
_tmap(f, xs...)
An internal helper for `tmap` that handles the `basesize == 1` case.
"""
function _tmap(f, xss...)
# Set the number of threads that BLAS uses to 1 here in case it's using up too many
nthreads() > 1 && LinearAlgebra.BLAS.set_num_threads(1)
futures = map(xss...) do x...
Threads.@spawn f(x...)
end
results = map(fetch, futures)
# Bump up the BLAS threads once we're done
nthreads() > 1 && LinearAlgebra.BLAS.set_num_threads(typemax(Int32))
return results
end
"""
_tmap_with_partition(f, xss...; basesize)
An internal helper for `tmap` that handles the `basesize > 1` case.
Works for `basesize == 1`, but less efficent; since it breaks things up into single
item slices then stiches them back together again.
"""
function _tmap_with_partition(f, xss...; basesize)
partitioned_xss = Iterators.partition.(xss, basesize)
partioned_ys = _tmap(partitioned_xss...) do xss...
map(f, xss...)
end
return reduce(vcat, partioned_ys)
end
"""
tmap(f, xs...; basesize=1)
Multithreaded version of `map`.
`basesize` controls the minimum number of items from `xs` to process per `@spawn`ed task.
!!! tip
`basesize` should be set high enough that proccessing that
many items takes about ~1ms. This is to counter the ~50μs overhead
it takes to dispatch work to a thread. If the function takes >1ms per
call, then `basesize=1` is recommended.
"""
function tmap(f, xss...; basesize=1)
if basesize == 1
_tmap(f, xss...)
else
_tmap_with_partition(f, xss...; basesize=basesize)
end
end
"""
tmap_with_warmup(f, xs...; basesize=1)
Similar to [`tmap`](@ref), but runs the first call single threaded, before multithreading
the remainnder.
This is useful for dealing with things that benifit from something happening on first run.
Which might be related to caching values, or some compilation troubles.
`basesize` controls the minimum number of items from `xs` to process per `@spawn`ed task.
See [`tmap`](@ref) for more details
"""
function tmap_with_warmup(f, xss...; basesize=1)
xs = first.(xss)
xs_tails = Iterators.rest.(xss, 2)
y = f(xs...)
ys_tail = tmap(f, xs_tails...; basesize=basesize)
ys = pushfirst!(ys_tail, y)
return ys
end
|
{"hexsha": "35409da04808fe172ec4f84177f8cf6d71c483ea", "size": 2290, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/tmap.jl", "max_stars_repo_name": "invenia/Parallelism.jl", "max_stars_repo_head_hexsha": "d5b8a82828b68bf7823ca208987c0a03b23e81f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-05T21:55:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T09:14:20.000Z", "max_issues_repo_path": "src/tmap.jl", "max_issues_repo_name": "invenia/Parallelism.jl", "max_issues_repo_head_hexsha": "d5b8a82828b68bf7823ca208987c0a03b23e81f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-07-03T13:22:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-16T19:52:26.000Z", "max_forks_repo_path": "src/tmap.jl", "max_forks_repo_name": "invenia/Parallelism.jl", "max_forks_repo_head_hexsha": "d5b8a82828b68bf7823ca208987c0a03b23e81f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7402597403, "max_line_length": 90, "alphanum_fraction": 0.6786026201, "num_tokens": 665}
|
#!/usr/bin/env python3
"""
Unit and regression test for data_proc.py
"""
import errno
import os
import sys
import unittest
from contextlib import contextmanager
from io import StringIO
import pandas as pd
import numpy as np
import logging
from otu_proj.data_proc import main, data_process_analysis
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
DISABLE_REMOVE = logger.isEnabledFor(logging.DEBUG)
CURRENT_DIR = os.path.dirname(__file__)
MAIN_DIR = os.path.join(CURRENT_DIR, '..')
TEST_DATA_DIR = os.path.join(CURRENT_DIR, 'data_proc')
PROJ_DIR = os.path.join(MAIN_DIR, 'otu_proj')
DATA_DIR = os.path.join(PROJ_DIR, 'data')
SAMPLE_DATA_FILE_LOC = os.path.join(DATA_DIR, 'OTU_data.xlsx')
SAMPLE_DATA2_FILE_LOC = os.path.join(DATA_DIR, 'OTU_data2.xlsx')
DATA_FNAME = os.path.basename(SAMPLE_DATA_FILE_LOC)
FNAME = os.path.splitext(DATA_FNAME)[0]
# Assumes running tests from the main directory
DEF_XLSX_OUT = os.path.join(MAIN_DIR, 'OTU_data_processed.xlsx')
DEF_PNG_OUT = os.path.join(MAIN_DIR, 'OTU_data.png')
def silent_remove(filename, disable=False):
"""
Removes the target file name, catching and ignoring errors that indicate that the
file does not exist.
@param filename: The file to remove.
@param disable: boolean to flag if want to disable removal
"""
if not disable:
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class TestMain(unittest.TestCase):
# These tests make sure that the program can run properly from main
def testSampleData(self):
# Checks that runs with defaults and that files are created
test_input = ["-d", SAMPLE_DATA_FILE_LOC]
try:
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
# checks that the expected message is sent to standard out
with capture_stdout(main, test_input) as output:
self.assertTrue("OTU_data_processed.xlsx" in output)
self.assertTrue(os.path.isfile("OTU_data_processed.xlsx"))
self.assertTrue(os.path.isfile("OTU_data.png"))
finally:
silent_remove(DEF_XLSX_OUT, disable=DISABLE_REMOVE)
silent_remove(DEF_PNG_OUT, disable=DISABLE_REMOVE)
class TestMainFailWell(unittest.TestCase):
def testMissingFile(self):
test_input = ["-d", "ghost.xlsx"]
if logger.isEnabledFor(logging.DEBUG):
main(test_input)
with capture_stderr(main, test_input) as output:
self.assertTrue("ghost.xlsx" in output)
class TestDataProcessAnalysis(unittest.TestCase):
def testSampleData(self):
# Tests that the mean and standard deviation dataframes generated by the data_process_analysis function
# match saved expected results
xlsx_data = pd.read_excel(SAMPLE_DATA_FILE_LOC, index_col=0)
analysis_mean_vals, analysis_sd_vals = data_process_analysis(xlsx_data, FNAME)
expected_mean_vals = pd.read_excel(os.path.join(TEST_DATA_DIR, "OTU_data_results.xlsx"),
sheet_name='mean', index_col=0)
expected_sd_vals = pd.read_excel(os.path.join(TEST_DATA_DIR, "OTU_data_results.xlsx"),
sheet_name='sd', index_col=0)
self.assertTrue(np.allclose(analysis_mean_vals, expected_mean_vals) and
np.allclose(analysis_sd_vals, expected_sd_vals))
def testSampleData2(self):
# A second check, with slightly different values, of the data_analysis function
xlsx_data = pd.read_excel(SAMPLE_DATA2_FILE_LOC, index_col=0)
analysis_mean_vals, analysis_sd_vals = data_process_analysis(xlsx_data, FNAME)
expected_mean_vals = pd.read_excel(os.path.join(TEST_DATA_DIR, "OTU_data2_results.xlsx"),
sheet_name='mean', index_col=0)
expected_sd_vals = pd.read_excel(os.path.join(TEST_DATA_DIR, "OTU_data2_results.xlsx"),
sheet_name='sd', index_col=0)
self.assertTrue(np.allclose(analysis_mean_vals, expected_mean_vals) and
np.allclose(analysis_sd_vals, expected_sd_vals))
# Utility functions
# From http://schinckel.net/2013/04/15/capture-and-test-sys.stdout-sys.stderr-in-unittest.testcase/
@contextmanager
def capture_stdout(command, *args, **kwargs):
# pycharm doesn't know six very well, so ignore the false warning
# noinspection PyCallingNonCallable
out, sys.stdout = sys.stdout, StringIO()
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
sys.stdout = out
@contextmanager
def capture_stderr(command, *args, **kwargs):
# pycharm doesn't know six very well, so ignore the false warning
# noinspection PyCallingNonCallable
err, sys.stderr = sys.stderr, StringIO()
command(*args, **kwargs)
sys.stderr.seek(0)
yield sys.stderr.read()
sys.stderr = err
|
{"hexsha": "248cc213023d92886cef02702fc2ca9dad47ab89", "size": 5020, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_otu_proj.py", "max_stars_repo_name": "chechung/otu_proj", "max_stars_repo_head_hexsha": "dc68b9708c36901c696c61c84f8283f69f9c08ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_otu_proj.py", "max_issues_repo_name": "chechung/otu_proj", "max_issues_repo_head_hexsha": "dc68b9708c36901c696c61c84f8283f69f9c08ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_otu_proj.py", "max_forks_repo_name": "chechung/otu_proj", "max_forks_repo_head_hexsha": "dc68b9708c36901c696c61c84f8283f69f9c08ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.16, "max_line_length": 111, "alphanum_fraction": 0.6912350598, "include": true, "reason": "import numpy", "num_tokens": 1110}
|
import glob
from time import time
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.callbacks import TensorBoard
from keras.optimizers import RMSprop
from numpy.random import seed
from sklearn.model_selection import StratifiedKFold
from keras.models import load_model
from keras. models import Model
from pyemd import emd_samples
from models.models import *
from models.SelfAttentionModule import *
from utils.configs import *
from utils.metric import *
from utils.ultilities import *
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
from keras.callbacks import LambdaCallback
import sys
def generator_test(b_s, imgs_test_path):
images = [imgs_test_path + f for f in os.listdir(imgs_test_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
images.sort()
gaussian = np.zeros((b_s, nb_gaussian, shape_r_gt, shape_c_gt))
counter = 0
while True:
if net.startswith("ms"):
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c),
preprocess_images(images[counter:counter + b_s], int(shape_r/2), int(shape_c/2))]
elif net.startswith("ts"):
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c),
preprocess_images(images[counter:counter + b_s], int(shape_r / 2), int(shape_c / 2)),
preprocess_images(images[counter:counter + b_s], int(shape_r / 4), int(shape_c / 4))]
else:
yield [preprocess_images(images[counter:counter + b_s], shape_r, shape_c)]
counter = (counter + b_s) % len(images)
def load_data():
images = [imgs_train_path + f for f in os.listdir(imgs_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
maps = [maps_train_path + f for f in os.listdir(maps_train_path) if f.endswith(('.jpg', '.jpeg', '.png'))]
fixs = [fixs_train_path + f for f in os.listdir(fixs_train_path) if f.endswith('.mat')]
images.sort()
maps.sort()
fixs.sort()
counter = 0
X_train = []
Y_train = []
while True:
Y = preprocess_maps(maps[counter:counter+b_s], shape_r_out, shape_c_out)
Y_fix = preprocess_fixmaps_salicon(fixs[counter:counter + b_s], shape_r_out, shape_c_out)
if net.startswith("ms"):
X = [preprocess_images(images[counter:counter + b_s], shape_r, shape_c), preprocess_images(images[counter:counter + b_s], int(shape_r / 2), int(shape_c / 2))]
elif net.startswith("ts"):
X = [preprocess_images(images[counter:counter + b_s], shape_r, shape_c),
preprocess_images(images[counter:counter + b_s], int(shape_r / 2), int(shape_c / 2)),
preprocess_images(images[counter:counter + b_s], int(shape_r / 4), int(shape_c / 4))]
else:
X = [preprocess_images(images[counter:counter + b_s], shape_r, shape_c)]
X_train.append(X)
Y_train.append([Y, Y, Y_fix])
counter = (counter + b_s) % len(images)
if counter == 0:
break
y_dummy = np.zeros(shape=(len(X_train), 1))
folds = list(StratifiedKFold(n_splits=10, shuffle=True, random_state=1).split(X_train, y_dummy))
return folds, X_train, Y_train
def batch_generator(X, Y, batch_size = 1):
while True:
for x, y in zip(X, Y):
yield x, y
def batch_generator_test(X):
while True:
for x in X:
yield x
def create_model():
if net == 'msdensenet':
print('Compiling multiscale densenet')
m = Model(inputs=[x, x1], outputs=msdensenet([x, x1]))
elif net == 'tsdensenet':
print('Compiling multiscale(3) densenet')
m = Model(inputs=[x, x1, x2], outputs=tsdensenet([x, x1, x2]))
elif net == 'msdensenetnon':
print('Compiling multiscale densenet without dilated block')
m = Model(input=[x, x1], output=msdensenet_non([x, x1]))
elif net == 'sdensenet':
print('Compiling singlescale densenet')
m = Model(input=[x], output=sdensenet([x]))
elif net == 'msdensenet_att':
print('Compiling multiscale densenet dilated with att')
m = Model(input=[x, x1], output=msdensenet_att([x, x1]))
elif net == 'dense':
print('Compiling dense')
m = Model(input=[x], output=dense([x]))
else:
raise NotImplementedError
return m
def traning_process(path, model, batch_gen_train, nb_train, batch_gen_val, nb_val, fold, weight=None):
print(weight)
if weight is not None:
import tensorflow as tf
initepochstr = weight[weight.find(".", 9) + 1:weight.find("-")]
initepoch = int(initepochstr)
model.load_weights(path + weight)
del model
model = load_model(path + weight, custom_objects={"tf": tf, "kl_divergence": kl_divergence,
"correlation_coefficient": correlation_coefficient,
"nss": nss, "SelfAttention": SelfAttention})
print(initepoch)
print(path+weight)
else:
initepoch = 0
model.fit_generator(batch_gen_train, nb_train,
initial_epoch=initepoch,
epochs=nb_epoch,
validation_data=batch_gen_val,
validation_steps=nb_val,
callbacks=[tensorboard,
ModelCheckpoint(path + '/weights.'+net+'f' + str(fold) + '.{epoch:02d}-{val_loss:.4f}.h5',
save_best_only=True)])
if __name__ == '__main__':
if len(sys.argv) == 1:
raise NotImplementedError
else:
print("Version 1.2")
K.set_image_data_format("channels_first")
phase = sys.argv[1]
x = Input((3, shape_r, shape_c))
x1 = Input((3, shape_r / 2, shape_c / 2))
x2 = Input((3, shape_r / 4, shape_c / 4))
x_maps = Input((nb_gaussian, shape_r_gt, shape_c_gt))
m = 0
if phase == 'train':
path = "weight/cv/" + net
try:
weight = sys.argv[2]
except:
weight = None
if not os.path.exists(path):
os.makedirs(path)
folds, X_train, Y_train = load_data()
sum_nss = 0
sum_cc = 0
sum_kl = 0
h = 0
for j, (train_idx, val_idx) in enumerate(folds):
m = create_model()
print("Fold: ", j)
m.output_names = ['output_1', 'output_2', 'output_3']
tensorboard = TensorBoard(log_dir="logs/{}_{}_{}".format(net, j, time()))
m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss], metrics={'output_1': kl_divergence, 'output_2': correlation_coefficient,'output_3': nss})
X_train_cv = [X_train[i] for i in train_idx]
y_train_cv = [Y_train[i] for i in train_idx]
X_valid_cv = [X_train[i] for i in val_idx]
y_valid_cv = [Y_train[i] for i in val_idx]
print("Number of train image ", len(X_train_cv))
print("Number of validation image ", len(X_valid_cv))
traning_process(path, m, batch_generator(X_train_cv, y_train_cv), len(X_train_cv),
batch_generator(X_valid_cv, y_valid_cv), len(X_valid_cv), j, weight=weight)
weight=None
elif phase == "test":
# Output Folder Path
output_folder = "pred/" + net + '/'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
path_test = sys.argv[3]
file_names = [f for f in os.listdir(path_test) if f.endswith(('.jpg', '.jpeg', '.png'))]
file_names.sort()
nb_imgs_test = len(file_names)
m = create_model()
if nb_imgs_test % b_s != 0:
print("The number of test images should be a multiple of the batch size. Please change your batch size in config.py accordingly.")
exit()
print("Loading weights")
weight_path = sys.argv[2]
m.load_weights(weight_path)
print("Predicting saliency maps for " + path_test)
predictions = m.predict_generator(generator_test(b_s=b_s, imgs_test_path=path_test), nb_imgs_test)[0]
for pred, name in zip(predictions, file_names):
original_image = cv2.imread(path_test + name, 0)
res = postprocess_predictions(pred[0], original_image.shape[0], original_image.shape[1])
cv2.imwrite(output_folder + '%s' % name, res.astype(int))
elif phase == 'foldcal':
folds, X_train, Y_train = load_data()
path = "weight/cv/" + net + "/result"
f = open('doc/'+net+'_salicon10f.csv', 'a')
sum_aucjud = 0
sum_sim = 0
sum_emd = 0
sum_aucbor = 0
sum_sauc = 0
sum_nss = 0
sum_cc = 0
sum_kl = 0
m = create_model()
m.compile(RMSprop(lr=1e-4), loss=[kl_divergence, correlation_coefficient, nss])
smap = cv2.imread("data/shuffle_map.png", 0)
smap = cv2.resize(smap, (640, 480))
for j, (train_idx, val_idx) in enumerate(folds):
print("Fold: ", j)
X_train_cv = [X_train[i] for i in train_idx]
y_train_cv = [Y_train[i] for i in train_idx]
X_valid_cv = [X_train[i] for i in val_idx]
y_valid_cv = [Y_train[i] for i in val_idx]
nb_val = len(X_valid_cv)
lastest_file = glob.glob(path + '/weights.' + net + 'f' + str(j) + '*.*')
if not lastest_file:
print("not found")
continue
lastest_file = max(lastest_file, key=os.path.getctime)
print(lastest_file)
m.load_weights(lastest_file)
predictions = m.predict_generator(batch_generator_test(X_valid_cv), nb_val)[0]
nss_tmp = 0
cc_tmp = 0
kl_tmp = 0
emd_tmp = 0
aucjud_tmp = 0
sim_tmp = 0
aucbor_tmp = 0
sauc_tmp = 0
for pred, gt in zip(predictions, y_valid_cv):
res = postprocess_predictions(pred[0], shape_r_out, shape_c_out)
res = res/255
aucjud_tmp += auc_judd(res, gt[2][0, 0])
sim_tmp += similarity(res, gt[0][0, 0])
aucbor_tmp += auc_borji(res, gt[2][0, 0])
nss_tmp += nss_metric(gt[2][0, 0], res)
cc_tmp += cc(gt[0][0, 0], res)
kl_tmp += kldiv(gt[0][0, 0], res)
emdgt = gt[0][0, 0]*255
emdres = res*255
emd_tmp += emd_samples(emdgt.flatten(), emdres.flatten(), bins=255)
sauc_tmp += auc_shuff(res, gt[2][0, 0], smap)
print(emd_tmp/nb_val)
sum_nss += nss_tmp / nb_val
sum_cc += cc_tmp / nb_val
sum_kl += kl_tmp / nb_val
sum_emd += emd_tmp / nb_val
sum_aucjud += aucjud_tmp / nb_val
sum_sim += sim_tmp / nb_val
sum_aucbor += aucbor_tmp / nb_val
sum_sauc += sauc_tmp / nb_val
f.write("{},{},{},{},{},{},{},{}\n".format(aucjud_tmp / nb_val, sim_tmp / nb_val, emd_tmp / nb_val,
aucbor_tmp / nb_val, sauc_tmp / nb_val, cc_tmp / nb_val, nss_tmp / nb_val,
kl_tmp / nb_val))
f.write("{},{},{},{},{},{},{},{}\n".format(sum_aucjud/10, sum_sim/10, sum_emd/10,
sum_aucbor/10, sum_sauc/10, sum_cc/10, sum_nss/10, sum_kl/10))
f.close()
|
{"hexsha": "982e4524cc5011a46f035a311e10998e867f3df6", "size": 12159, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "jaist-kotani-lab/DeepSaliencyInCrowd", "max_stars_repo_head_hexsha": "d9c47fc6e2acfc37ffbc50e473e8e48f854ff268", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "jaist-kotani-lab/DeepSaliencyInCrowd", "max_issues_repo_head_hexsha": "d9c47fc6e2acfc37ffbc50e473e8e48f854ff268", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "jaist-kotani-lab/DeepSaliencyInCrowd", "max_forks_repo_head_hexsha": "d9c47fc6e2acfc37ffbc50e473e8e48f854ff268", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3658536585, "max_line_length": 185, "alphanum_fraction": 0.5553088247, "include": true, "reason": "from numpy", "num_tokens": 3032}
|
"""
module Datasets
Commonly used datasets and utilities for creating data containers.
In the future, contents will be integrated into packages:
- FastAI datasets and data containers will be moved into MLDatasets.jl
- data container transformations will be moved to MLDataPattern.jl
This submodule will then reexport the same definitions.
"""
module Datasets
using ..FastAI
using ..FastAI: typify, TableRow
using DataDeps
using Glob
using FilePathsBase
import DataAugmentation
using FilePathsBase: filename
import FileIO
using IndirectArrays: IndirectArray
using MLDataPattern
using MLDataPattern: splitobs
import LearnBase
using Colors
using FixedPointNumbers
using DataFrames
using Tables
using CSV
using ShowCases
include("fastaidatasets.jl")
function __init__()
initdatadeps()
end
include("containers.jl")
include("transformations.jl")
include("load.jl")
include("recipes/recipe.jl")
include("recipes/vision.jl")
include("recipes/tabular.jl")
include("registry.jl")
include("fastairegistry.jl")
export
# reexports from MLDataPattern
splitobs,
# container transformations
mapobs,
filterobs,
groupobs,
joinobs,
eachobs,
# primitive containers
FileDataset,
TableDataset,
# utilities
isimagefile,
matches,
loadfile,
loadmask,
pathname,
pathparent,
parentname,
grandparentname,
# datasets
DATASETS,
loadfolderdata,
datasetpath,
# recipes
loadrecipe,
finddatasets,
listdatasources,
loaddataset
end # module
|
{"hexsha": "749261b0d822c7b9bed04c6e1a7d4e653bd4def1", "size": 1549, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/datasets/Datasets.jl", "max_stars_repo_name": "inferential/FastAI.jl", "max_stars_repo_head_hexsha": "3a017af061a1125231fe7d7a4ec98da0a255d781", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/datasets/Datasets.jl", "max_issues_repo_name": "inferential/FastAI.jl", "max_issues_repo_head_hexsha": "3a017af061a1125231fe7d7a4ec98da0a255d781", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/datasets/Datasets.jl", "max_forks_repo_name": "inferential/FastAI.jl", "max_forks_repo_head_hexsha": "3a017af061a1125231fe7d7a4ec98da0a255d781", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.2111111111, "max_line_length": 70, "alphanum_fraction": 0.7437056165, "num_tokens": 378}
|
"""Generate the API for symbolic multicomponent enrichment using sympy.
Note that there is a bug in sympy v0.7.2 that prevents the cse() function
from being used with infinities. For a work around see [1].
1. https://groups.google.com/forum/#!msg/sympy/YL1R_hR6OKQ/axKrCsCSMQsJ
"""
from __future__ import print_function, division
import os
import logging
import multiprocessing
import time
from warnings import warn
from pyne.utils import QAWarning
from sympy import Symbol, pprint, latex, diff, count_ops, simplify, cse, Eq, Q, \
log, logcombine, Abs, exp, sqrt, series, separate, powsimp, collect, expand, Abs
from sympy.solvers import solve
from sympy.utilities.iterables import numbered_symbols
from utils import cse_to_c
warn(__name__ + " is not yet QA compliant.", QAWarning)
NPROCS = 10
def _aggstatus(stat, msg, aggstat):
if not aggstat:
print(msg)
stat += msg + '\n'
return stat
def cgen_ncomp(ncomp=3, nporder=2, aggstat=False, debug=False):
"""Generates a C function for ncomp (int) number of components.
The jth key component is always in the first position and the kth
key component is always in the second. The number of enrichment
stages (NP) is calculated via a taylor series approximation. The
order of this approximation may be set with nporder. Only values
of 1 or 2 are allowed. The aggstat argument determines whether the
status messages should be aggreated and printed at the end or output
as the function executes.
"""
start_time = time.time()
stat = _aggstatus('', "generating {0} component enrichment".format(ncomp), aggstat)
r = range(0, ncomp)
j = 0
k = 1
# setup-symbols
alpha = Symbol('alpha', positive=True, real=True)
LpF = Symbol('LpF', positive=True, real=True)
PpF = Symbol('PpF', positive=True, real=True)
TpF = Symbol('TpF', positive=True, real=True)
SWUpF = Symbol('SWUpF', positive=True, real=True)
SWUpP = Symbol('SWUpP', positive=True, real=True)
NP = Symbol('NP', positive=True, real=True) # Enrichment Stages
NT = Symbol('NT', positive=True, real=True) # De-enrichment Stages
NP0 = Symbol('NP0', positive=True, real=True) # Enrichment Stages Initial Guess
NT0 = Symbol('NT0', positive=True, real=True) # De-enrichment Stages Initial Guess
NP1 = Symbol('NP1', positive=True, real=True) # Enrichment Stages Computed Value
NT1 = Symbol('NT1', positive=True, real=True) # De-enrichment Stages Computed Value
Mstar = Symbol('Mstar', positive=True, real=True)
MW = [Symbol('MW[{0}]'.format(i), positive=True, real=True) for i in r]
beta = [alpha**(Mstar - MWi) for MWi in MW]
# np_closed helper terms
NP_b = Symbol('NP_b', real=True)
NP_2a = Symbol('NP_2a', real=True)
NP_sqrt_base = Symbol('NP_sqrt_base', real=True)
xF = [Symbol('xF[{0}]'.format(i), positive=True, real=True) for i in r]
xPi = [Symbol('xP[{0}]'.format(i), positive=True, real=True) for i in r]
xTi = [Symbol('xT[{0}]'.format(i), positive=True, real=True) for i in r]
xPj = Symbol('xPj', positive=True, real=True)
xFj = xF[j]
xTj = Symbol('xTj', positive=True, real=True)
ppf = (xFj - xTj)/(xPj - xTj)
tpf = (xFj - xPj)/(xTj - xPj)
xP = [(((xF[i]/ppf)*(beta[i]**(NT+1) - 1))/(beta[i]**(NT+1) - beta[i]**(-NP))) \
for i in r]
xT = [(((xF[i]/tpf)*(1 - beta[i]**(-NP)))/(beta[i]**(NT+1) - beta[i]**(-NP))) \
for i in r]
rfeed = xFj / xF[k]
rprod = xPj / xP[k]
rtail = xTj / xT[k]
# setup constraint equations
numer = [ppf*xP[i]*log(rprod) + tpf*xT[i]*log(rtail) - xF[i]*log(rfeed) for i in r]
denom = [log(beta[j]) * ((beta[i] - 1.0)/(beta[i] + 1.0)) for i in r]
LoverF = sum([n/d for n, d in zip(numer, denom)])
SWUoverF = -1.0 * sum(numer)
SWUoverP = SWUoverF / ppf
prod_constraint = (xPj/xFj)*ppf - (beta[j]**(NT+1) - 1)/\
(beta[j]**(NT+1) - beta[j]**(-NP))
tail_constraint = (xTj/xFj)*(sum(xT)) - (1 - beta[j]**(-NP))/\
(beta[j]**(NT+1) - beta[j]**(-NP))
#xp_constraint = 1.0 - sum(xP)
#xf_constraint = 1.0 - sum(xF)
#xt_constraint = 1.0 - sum(xT)
# This is NT(NP,...) and is correct!
#nt_closed = solve(prod_constraint, NT)[0]
# However, this is NT(NP,...) rewritten (by hand) to minimize the number of NP
# and M* instances in the expression. Luckily this is only depends on the key
# component and remains general no matter the number of components.
nt_closed = (-MW[0]*log(alpha) + Mstar*log(alpha) + log(xTj) + log((-1.0 + xPj/\
xF[0])/(xPj - xTj)) - log(alpha**(NP*(MW[0] - Mstar))*(xF[0]*xPj - xPj*xTj)/\
(-xF[0]*xPj + xF[0]*xTj) + 1))/((MW[0] - Mstar)*log(alpha))
# new expression for normalized flow rate
# NOTE: not needed, solved below
#loverf = LoverF.xreplace({NT: nt_closed})
# Define the constraint equation with which to solve NP. This is chosen such to
# minimize the number of ops in the derivatives (and thus np_closed). Other,
# more verbose possibilities are commented out.
#np_constraint = (xP[j]/sum(xP) - xPj).xreplace({NT: nt_closed})
#np_constraint = (xP[j]- sum(xP)*xPj).xreplace({NT: nt_closed})
#np_constraint = (xT[j]/sum(xT) - xTj).xreplace({NT: nt_closed})
np_constraint = (xT[j] - sum(xT)*xTj).xreplace({NT: nt_closed})
# get closed form approximation of NP via symbolic derivatives
stat = _aggstatus(stat, " order-{0} NP approximation".format(nporder), aggstat)
d0NP = np_constraint.xreplace({NP: NP0})
d1NP = diff(np_constraint, NP, 1).xreplace({NP: NP0})
if 1 == nporder:
np_closed = NP0 - d1NP / d0NP
elif 2 == nporder:
d2NP = diff(np_constraint, NP, 2).xreplace({NP: NP0})/2.0
# taylor series polynomial coefficients, grouped by order
# f(x) = ax**2 + bx + c
a = d2NP
b = d1NP - 2*NP0*d2NP
c = d0NP - NP0*d1NP + NP0*NP0*d2NP
# quadratic eq. (minus only)
#np_closed = (-b - sqrt(b**2 - 4*a*c)) / (2*a)
# However, we need to break up this expr as follows to prevent
# a floating point arithmetic bug if b**2 - 4*a*c is very close
# to zero but happens to be negative. LAME!!!
np_2a = 2*a
np_sqrt_base = b**2 - 4*a*c
np_closed = (-NP_b - sqrt(NP_sqrt_base)) / (NP_2a)
else:
raise ValueError("nporder must be 1 or 2")
# generate cse for writing out
msg = " minimizing ops by eliminating common sub-expressions"
stat = _aggstatus(stat, msg, aggstat)
exprstages = [Eq(NP_b, b), Eq(NP_2a, np_2a),
# fix for floating point sqrt() error
Eq(NP_sqrt_base, np_sqrt_base), Eq(NP_sqrt_base, Abs(NP_sqrt_base)),
Eq(NP1, np_closed), Eq(NT1, nt_closed).xreplace({NP: NP1})]
cse_stages = cse(exprstages, numbered_symbols('n'))
exprothers = [Eq(LpF, LoverF), Eq(PpF, ppf), Eq(TpF, tpf),
Eq(SWUpF, SWUoverF), Eq(SWUpP, SWUoverP)] + \
[Eq(*z) for z in zip(xPi, xP)] + [Eq(*z) for z in zip(xTi, xT)]
exprothers = [e.xreplace({NP: NP1, NT: NT1}) for e in exprothers]
cse_others = cse(exprothers, numbered_symbols('g'))
exprops = count_ops(exprstages + exprothers)
cse_ops = count_ops(cse_stages + cse_others)
msg = " reduced {0} ops to {1}".format(exprops, cse_ops)
stat = _aggstatus(stat, msg, aggstat)
# create function body
ccode, repnames = cse_to_c(*cse_stages, indent=6, debug=debug)
ccode_others, repnames_others = cse_to_c(*cse_others, indent=6, debug=debug)
ccode += ccode_others
repnames |= repnames_others
msg = " completed in {0:.3G} s".format(time.time() - start_time)
stat = _aggstatus(stat, msg, aggstat)
if aggstat:
print(stat)
return ccode, repnames, stat
_func_header1 = \
"""pyne::enrichment::Cascade pyne::enrichment::solve_symbolic(pyne::enrichment::Cascade & orig_casc)
{
pyne::enrichment::Cascade casc = orig_casc;
int j = casc.j;
int k = casc.k;
double alpha = casc.alpha;
double NP0 = casc.N;
//double NT0 = casc.M;
double Mstar = casc.Mstar;
double xPj = casc.x_prod_j;
//double xFj = casc.x_feed_j;
double xTj = casc.x_tail_j;
int ncomp = casc.mat_feed.comp.size();
double LpF = -1.0, PpF = -1.0, TpF = -1.0,
SWUpF = -1.0, SWUpP = -1.0,
NP_b = -1.0, NP_sqrt_base = -1.0, NP_2a = -1.0,
NP1 = -1.0, NT1 = -1.0;
double * MW = new double [ncomp];
double * xP = new double [ncomp];
double * xF = new double [ncomp];
double * xT = new double [ncomp];
"""
_func_header2 = """
int nuc;
int i = 2;
MW[0] = pyne::atomic_mass(j);
MW[1] = pyne::atomic_mass(k);
xF[0] = casc.mat_feed.comp[j];
xF[1] = casc.mat_feed.comp[k];
for(pyne::comp_iter ci = casc.mat_feed.comp.begin(); ci != casc.mat_feed.comp.end(); ci++)
{
nuc = (*ci).first;
if (nuc == j || nuc == k)
continue;
MW[i] = pyne::atomic_mass(nuc);
xF[i] = (*ci).second;
i++;
};
switch (ncomp)
{
"""
_func_footer = """
};
i = 2;
casc.mat_prod.comp[j] = xP[0];
casc.mat_prod.comp[k] = xP[1];
casc.mat_tail.comp[j] = xT[0];
casc.mat_tail.comp[k] = xT[1];
for(pyne::comp_iter ci = casc.mat_feed.comp.begin(); ci != casc.mat_feed.comp.end(); ci++)
{
nuc = (*ci).first;
if (nuc == j || nuc == k)
continue;
casc.mat_prod.comp[nuc] = xP[i];
casc.mat_tail.comp[nuc] = xT[i];
i++;
};
// must renormalize to eliminate numerical error
casc.mat_prod.norm_comp();
casc.mat_tail.norm_comp();
casc.mat_prod.mass = PpF;
casc.mat_tail.mass = TpF;
casc.N = NP1;
casc.M = NT1;
casc.l_t_per_feed = LpF;
casc.swu_per_feed = SWUpF;
casc.swu_per_prod = SWUpP;
delete [] MW;
delete [] xP;
delete [] xF;
delete [] xT;
return casc;
};
"""
def _mapable_cgen_ncomp(kwargs):
return cgen_ncomp(**kwargs)
def cgen_func(max_ncomp=40, debug=False):
"""Generate C function to compute multicoponent enrichment cascades for
a number of components between 3 and max_ncomp.
"""
ncomps = range(3, max_ncomp+1)
if 1 == NPROCS:
ncomp_kwargs = [{'ncomp': n, 'debug': debug, 'aggstat': False} for n in ncomps]
cgened = map(_mapable_cgen_ncomp, ncomp_kwargs)
elif 1 < NPROCS:
ncomp_kwargs = [{'ncomp': n, 'debug': debug, 'aggstat': True} for n in ncomps]
pool = multiprocessing.Pool(NPROCS)
cgened = pool.map(_mapable_cgen_ncomp, ncomp_kwargs)
else:
raise ValueError("NPROCS must be greater than or equal to 1")
cases = ''
repnames = set()
for ncomp, (ccode_ncomp, repnames_ncomp, statmsg) in zip(ncomps, cgened):
cases += " case {0}:\n".format(ncomp)
cases += ccode_ncomp
cases += " break;\n"
repnames |= repnames_ncomp
logging.info(statmsg)
repdeclare = " double " + repnames.pop() + " = 0.0,\n"
repdectemp = " {0} = 0.0"
repdeclare += ",\n".join([repdectemp.format(r) for r in repnames])
repdeclare += ";\n"
ccode = _func_header1
ccode += repdeclare
ccode += _func_header2
ccode += cases
ccode += _func_footer
return ccode
_header_file_template = r"""
/// \file {hfname}
/// \author Anthony Scopatz (scopatz\@gmail.com)
///
/// \brief A multicomponent enrichment cascade solver using
/// a symbolic solution to the mass flow rate equations.
/*********************************************************/
/*** Symbolic Enrichment Functions ***/
/*** WARNING: This file is auto-generated. ***/
/*** DO NOT MODIFY!!! ***/
/*********************************************************/
#ifndef PYNE_OU4PO4TJDBDM5PY4VKAVL7JCSM
#define PYNE_OU4PO4TJDBDM5PY4VKAVL7JCSM
#include <math.h>
#ifndef PYNE_IS_AMALGAMATED
#include "enrichment_cascade.h"
#endif
namespace pyne {{
namespace enrichment {{
/// A multicomponent enrichment cascade solver using
/// a symbolic solution to the mass flow rate equations.
/// \param orig_casc The original state of the cascade.
/// \return A cascade solved for new N, M, and total flow
/// rates.
Cascade solve_symbolic(Cascade & orig_casc);
// end enrichment
}};
// end pyne
}};
#endif
"""
_source_file_header_template = """
/*********************************************************/
/*** Symbolic Enrichment Functions ***/
/*** WARNING: This file is auto-generated. ***/
/*** DO NOT MODIFY!!! ***/
/*********************************************************/
#ifndef PYNE_IS_AMALGAMATED
#include "{hfname}"
#endif
"""
def cgen_header_file(hfname="temp"):
""" Generates a valid C/C++ header file for multicomponent enrichment cascades.
"""
hcode = _header_file_template.format(hfname=os.path.split(hfname)[-1])
return hcode
def cgen_source_file(hfname="temp", max_ncomp=40, debug=False):
""" Generates a valid C/C++ source file for multicomponent enrichment cascades.
"""
ccode = _source_file_header_template.format(hfname=os.path.split(hfname)[-1])
ccode += cgen_func(max_ncomp, debug=debug)
return ccode
def cgen_file(filename="temp", header_filename=None, lang='C++', max_ncomp=40,
debug=False):
"""Generate C/C++ header and source file to compute multicoponent enrichment
cascades for a number of components between 3 and max_ncomp. The filename
argument should not end in extension ('.h', '.c', or '.cpp') as it will be
appended automatically.
"""
logfile = 'sme{0}.log'.format(max_ncomp)
if os.path.exists(logfile):
os.remove(logfile)
logging.basicConfig(filename=logfile, level=logging.DEBUG)
hfname = filename + '.h' if header_filename is None else header_filename
sfname = filename + '.' + {'C': 'c', 'C++': 'cpp', 'CPP': 'cpp'}[lang.upper()]
logging.info("header filename: " + hfname)
logging.info("source filename: " + sfname)
logging.info("language: " + lang)
logging.info("maximum number of components: {0}".format(max_ncomp))
logging.info("debug enabled: {0}".format(debug))
hcode = cgen_header_file(hfname)
ccode = cgen_source_file(hfname, max_ncomp, debug=debug)
with open(hfname, 'w') as f:
f.write(hcode)
with open(sfname, 'w') as f:
f.write(ccode)
if __name__ == '__main__':
cgen_file(max_ncomp=3)
|
{"hexsha": "bc7ecb72c3fc1f63353e3a51e74c97fe3cd27cd1", "size": 14636, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyne/apigen/enrich_multi_sym.py", "max_stars_repo_name": "ypark234/pyne", "max_stars_repo_head_hexsha": "b7c4932c0399e6a0881aea943b392fb97cd0b6bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-12T17:05:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-12T17:05:29.000Z", "max_issues_repo_path": "pyne/apigen/enrich_multi_sym.py", "max_issues_repo_name": "ypark234/pyne", "max_issues_repo_head_hexsha": "b7c4932c0399e6a0881aea943b392fb97cd0b6bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2019-01-07T16:13:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-09T15:56:26.000Z", "max_forks_repo_path": "pyne/apigen/enrich_multi_sym.py", "max_forks_repo_name": "ypark234/pyne", "max_forks_repo_head_hexsha": "b7c4932c0399e6a0881aea943b392fb97cd0b6bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.053164557, "max_line_length": 100, "alphanum_fraction": 0.6091828368, "include": true, "reason": "from sympy", "num_tokens": 4470}
|
# -*- coding: utf-8 -*-
"""~
@author: yanral
"""
import sabs_pkpd
import pints
import numpy as np
import os
# Select the folder in which this repo is downloaded in the line below
os.chdir('The/location/of/the/root/folder/of/this/repo')
# Set the parallelisation of the script
parallel = True
# Load the MMT model for the O'Hara CiPA model
filename = './Models/Ohara CiPA - algebraic voltage.mmt'
s = sabs_pkpd.load_model.load_simulation_from_mmt(filename)
s.set_tolerance(1e-08, 1e-08)
default_state = s.state()
# Save the initial conditions published in OHara CiPA model
Ohara_init_conds = default_state.copy()
# Define the functions to make sure there is consistency between the initial conditions
def G0_calc(Ki = 144.65559, Kss = 144.65556, Nai = 7.268, Nass = 7.26809,
Cai = 8.6e-5, Cansr = 1.61957, Cajsr = 1.571234014, Cass = 8.49e-5,
V=-88, extraK = 5.4, extraNa = 140, extraCa = 1.8):
tot_cai = Cai * (1 + 0.05 / (Cai + 0.00238) + 0.07/(Cai + 0.0005))
tot_cass = Cass * (1 + 0.047 / (Cass + 0.00087) + 1.124/(Cass + 0.0087))
tot_cajsr = Cajsr * (1 + 10 / (Cajsr + 0.8))
return -V / (96485 * 2.583592e-05) * 0.0001533576 + Ki + Kss * 0.029411764705882353 + Nai + Nass * 0.029411764705882353 + 2*(tot_cai + tot_cass * 0.029411764705882353 + Cansr * 0.08117647059 + tot_cajsr * 0.007059) - extraK - extraNa - 2 * extraCa
def Ki_calc(G0, Nai = 7.268, Nass = 7.26809, Cai = 8.6e-5, Cansr = 1.61957, Cajsr = 1.571234014, Cass = 8.49e-5, V=-88, extraK = 5.4, extraNa = 140, extraCa = 1.8):
tot_cai = Cai * (1 + 0.05 / (Cai + 0.00238) + 0.07/(Cai + 0.0005))
tot_cass = Cass * (1 + 0.047 / (Cass + 0.00087) + 1.124/(Cass + 0.0087))
tot_cajsr = Cajsr * (1 + 10 / (Cajsr + 0.8))
return (V / (96485 * 2.583592e-05) * 0.0001533576 + extraK + extraNa + 2 * extraCa + G0 - Nai - Nass * 0.029411764705882353 - 2*(tot_cai + tot_cass * 0.029411764705882353 + Cansr * 0.08117647059 + tot_cajsr * 0.007059)) / 1.029411764705882353
def compute(Gamma_0):
# Reinitialise the myokit.Simulation
s.reset()
# Set the initial conditions for Ki and Kss so that the initial conditions match with the value of Gamma_0
initial_state = default_state.copy()
initial_K = Ki_calc(Gamma_0,
Nai = default_state[1],
Nass = default_state[2],
Cai = default_state[5],
Cansr = default_state[7],
Cajsr = default_state[8],
Cass = default_state[6])
initial_state[3] = initial_K
initial_state[4] = initial_K
s.set_state(initial_state)
# Record the action potential at the limit cycle
s.pre(2000000)
out = s.run(1000, log_interval = 1)
return out['membrane.V']
# Run the model with the published original initial conditions and the Gamma_0 value associated with it
Original_Gamma_0 = 7.801116
data_to_fit = compute(Original_Gamma_0)
# Define the time points on which to read the voltage
time_points = np.linspace(0, 999, 1000)
# Define the fitted parameters and initial point
parameters_to_fit = ['ical.rescale', 'ikr.rescale', 'IKs.rescale', 'INa.rescale', 'INaL.rescale']
true_values = np.array([1, 1, 1, 1, 1])
x0 = np.random.uniform(low = 0.2, high = 5, size = 5)
print('Initial point for fitting : ' + str(x0))
# Set the boundaries for fitting
mini = np.ones(5) * 0.1
print('Lower boundary : ' + str(mini))
maxi = np.ones(5) * 10
print('Upper boundary : ' + str(maxi))
# Prepare Pints optimisation routine
# In[Run the optimisation]
# Run optimisation of the 13 params
class MyModel(pints.ForwardModel):
def n_parameters(self):
# Define the amount of fitted parameters
return sabs_pkpd.constants.n
def simulate(self, parameters, times):
sabs_pkpd.constants.n = len(parameters)
# Set the rescaling parameters
for p, label in enumerate(parameters_to_fit):
s.set_constant(label, parameters[p])
# In case there is a numerical error
try:
out = compute(Gamma_0 = Original_Gamma_0)
return out
except:
print('Simulation error. Continuing anyway...')
return np.zeros(len(times))
# Define the parameters and method for fitting
sigma0 = [0.1, 0.1, 0.1, 0.1, 0.1]
sabs_pkpd.constants.n = len(parameters_to_fit)
problem = pints.SingleOutputProblem(
model=MyModel(),
times=time_points,
values=data_to_fit)
boundaries = pints.RectangularBoundaries(mini, maxi)
function = pints.SumOfSquaresError(problem)
method = pints.CMAES
# Detailed code from PINTS controller to log the CSV files during fitting
# Convert x0 to vector
# This converts e.g. (1, 7) shapes to (7, ), giving users a bit more
# freedom with the exact shape passed in. For example, to allow the
# output of LogPrior.sample(1) to be passed in.
x0 = pints.vector(x0)
# Check if minimising or maximising
minimising = not isinstance(function, pints.LogPDF)
# Store transform for later detransformation: if using a transform, any
# parameters logged to the filesystem or printed to screen should be
# detransformed first!
transform = None
# Create optimiser
optimiser = method(x0, sigma0, boundaries)
# Check if sensitivities are required
needs_sensitivities = False
# Logging
log_to_screen = True
log_filename = './Scripts/Fitting ORd-CiPA/OHara initial conditions.csv'
log_csv = True
message_interval = 20
message_warm_up = 3
#
# Stopping criteria
#
# Maximum iterations
max_iterations = 100000
# Maximum unchanged iterations
max_unchanged_iterations = 100
min_significant_change = 1
# Threshold value
threshold = None
# Post-run statistics
evaluations = None
iterations = None
time = None
# Iterations and function evaluations
iteration = 0
evaluations = 0
# Unchanged iterations count
unchanged_iterations = 0
# Choose method to evaluate
f = function
if needs_sensitivities:
f = f.evaluateS1
# Create evaluator object
if parallel:
n_workers = min(n_workers, optimiser.population_size())
evaluator = pints.ParallelEvaluator(f, n_workers=n_workers)
else:
evaluator = pints.SequentialEvaluator(f)
# Keep track of best position and score
fbest = float('inf')
xbest = x0
# Internally we always minimise! Keep a 2nd value to show the user
fbest_user = fbest if minimising else -fbest
# Set up progress reporting
next_message = 0
# Start logging
if log_to_screen:
# Show method
print('Using ' + str(optimiser.name()))
# Show parallelisation
print('Running in parallel with ' + str(n_workers) +
' worker processes.')
# Show population size
pop_size = optimiser.population_size()
if log_to_screen:
print('Population size: ' + str(pop_size))
# Set up logger
logger = pints.Logger()
logger.set_filename(log_filename, csv=log_csv)
# Add fields to log
max_iter_guess = max(max_iterations or 0, 10000)
max_eval_guess = max_iter_guess * pop_size
logger.add_counter('Iter.', max_value=max_iter_guess)
logger.add_counter('Eval.', max_value=max_eval_guess)
logger.add_float('Best')
#Log the values of parameters returning the best score
for param in range(sabs_pkpd.constants.n):
logger.add_float(parameters_to_fit[param])
# Initialise logger
optimiser._log_init(logger)
logger.add_time('Time m:s')
# Start searching
timer = pints.Timer()
# Log first point
logger.log(0, 0, fbest_user, *xbest)
optimiser._log_write(logger)
logger.log(timer.time())
running = True
try:
while running:
# Get points
xs = optimiser.ask()
# Calculate scores
fs = evaluator.evaluate(xs)
# Perform iteration
optimiser.tell(fs)
# Check if new best found
fnew = optimiser.fbest()
if fnew < fbest:
# Check if this counts as a significant change
if np.abs(fnew - fbest) < min_significant_change:
unchanged_iterations += 1
else:
unchanged_iterations = 0
# Update best
fbest = fnew
xbest = optimiser.xbest()
# Update user value of fbest
fbest_user = fbest if minimising else -fbest
else:
unchanged_iterations += 1
# Update evaluation count
evaluations += len(fs)
# Log state
logger.log(iteration, evaluations, fbest_user, *xbest)
optimiser._log_write(logger)
logger.log(timer.time())
# Update iteration count
iteration += 1
#
# Check stopping criteria
#
# Maximum number of iterations
if (max_iterations is not None and
iteration >= max_iterations):
running = False
halt_message = ('Halting: Maximum number of iterations ('
+ str(iteration) + ') reached.')
# Maximum number of iterations without significant change
halt = (max_unchanged_iterations is not None and
unchanged_iterations >= max_unchanged_iterations)
if halt:
running = False
halt_message = ('Halting: No significant change for ' +
str(unchanged_iterations) + ' iterations.')
# Error in optimiser
error = optimiser.stop()
if error: # pragma: no cover
running = False
halt_message = ('Halting: ' + str(error))
except (Exception, SystemExit, KeyboardInterrupt): # pragma: no cover
# Unexpected end!
# Show last result and exit
print('\n' + '-' * 40)
print('Unexpected termination.')
print('Current best score: ' + str(fbest))
print('Current best position:')
xbest = optimiser.xbest()
for p in xbest:
print(pints.strfloat(p))
print('-' * 40)
raise
# Stop timer
time = timer.time()
# Log final values and show halt message
logger.log(iteration, evaluations, fbest_user)
optimiser._log_write(logger)
logger.log(time)
print(halt_message)
# Save post-run statistics
evaluations = evaluations
iterations = iteration
# Inverse transform search parameters
xbest = optimiser.xbest()
print('/n /n Best point:/n')
print(xbest)
# Write the starting point to the last line in the format: 0, 0, 0, starting_point[0], ..., starting_point[-1], 0
with open(log_filename, 'a+', newline = '') as csvfile:
mywriter = csv.writer(csvfile, delimiter=',')
mywriter.writerow([0, 0, 0] + list(starting_point[:]) + [0])
|
{"hexsha": "9378b8b32de2486e23f9941f35dc01ba39bea67d", "size": 10493, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/Fitting ORd-CiPA/Fitting with OHara initial conditions.py", "max_stars_repo_name": "CardiacModelling/Gamma_0", "max_stars_repo_head_hexsha": "05c7e16088656b343d3aa347df88fd3dd58cea87", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Scripts/Fitting ORd-CiPA/Fitting with OHara initial conditions.py", "max_issues_repo_name": "CardiacModelling/Gamma_0", "max_issues_repo_head_hexsha": "05c7e16088656b343d3aa347df88fd3dd58cea87", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/Fitting ORd-CiPA/Fitting with OHara initial conditions.py", "max_forks_repo_name": "CardiacModelling/Gamma_0", "max_forks_repo_head_hexsha": "05c7e16088656b343d3aa347df88fd3dd58cea87", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.98, "max_line_length": 251, "alphanum_fraction": 0.6615839131, "include": true, "reason": "import numpy", "num_tokens": 2898}
|
using CUDA
using ExaPF
using KernelAbstractions
using Test
using TimerOutputs
import ExaPF: PowerSystem
import ExaPF.PowerSystem: ParsePSSE
const PS = PowerSystem
@testset "Powerflow residuals and Jacobian" begin
local_case = "case14.raw"
# read data
to = TimerOutputs.TimerOutput()
datafile = joinpath(dirname(@__FILE__), "..", "data", local_case)
data_raw = ParsePSSE.parse_raw(datafile)
data = ParsePSSE.raw_to_exapf(data_raw)
# Parsed data indexes
BUS_I, BUS_TYPE, PD, QD, GS, BS, BUS_AREA, VM, VA, BASE_KV, ZONE, VMAX, VMIN,
LAM_P, LAM_Q, MU_VMAX, MU_VMIN = PS.IndexSet.idx_bus()
# retrive required data
bus = data["bus"]
gen = data["gen"]
SBASE = data["baseMVA"][1]
bus_to_indexes = PS.get_bus_id_to_indexes(bus)
nbus = size(bus, 1)
# obtain V0 from raw data
V = Array{Complex{Float64}}(undef, nbus)
T = Vector
for i in 1:nbus
V[i] = bus[i, VM]*exp(1im * pi/180 * bus[i, VA])
end
@test V ≈ Complex{Float64}[
1.06 + 0.0im,
1.0410510706561686 - 0.0907616013832108im,
0.985192522040012 - 0.22247627854771523im,
1.0012292929704543 - 0.18218707911892243im,
1.0075796620614788 - 0.15551162239548505im,
1.0372102511734809 - 0.2628590779498494im,
1.0327942548732372 - 0.24527685887754397im,
1.0605035588701377 - 0.2518575026156106im,
1.0202428186152266 - 0.27219984563562466im,
1.0147053262903118 - 0.27373721193522754im,
1.0218895875940064 - 0.26981552747562876im,
1.0188740342304141 - 0.27444787933420284im,
1.0138437793219441 - 0.2746250817572887im,
0.995247767507711 - 0.286014443990015im
]
# form Y matrix
Ybus = PS.makeYbus(data, bus_to_indexes).ybus;
Vm = abs.(V)
Va = angle.(V)
bus = data["bus"]
gen = data["gen"]
nbus = size(bus, 1)
ngen = size(gen, 1)
ybus_re, ybus_im = ExaPF.Spmat{T{Int}, T{Float64}}(Ybus)
SBASE = data["baseMVA"][1]
Sbus, Sload = PS.assembleSbus(gen, bus, SBASE, bus_to_indexes)
pbus = real(Sbus)
qbus = imag(Sbus)
# Test that Sbus is correctly specified
@test Sbus ≈ Complex{Float64}[
2.32393 - 0.16549im,
0.23189298795657234 + 0.3255213997211121im,
-0.8066188947413118 + 0.028255632303189476im,
-0.5123531308565288 + 0.035668879376258705im,
-0.07125129097998142 - 0.02060016150325537im,
-0.1098278333939612 + 0.07281082918372937im,
0.0 + 0.0im,
0.0 + 0.17623im,
-0.24683805744186976 - 0.15824985834313557im,
-0.08821656550979241 - 0.05553726513776928im,
-0.038463023291667925 - 0.014532033764664084im,
-0.05643415630748495 - 0.017413308476656675im,
-0.11665673277410679 - 0.05281302692126483im,
-0.13051302125309594 - 0.04527619677595794im
]
end
@testset "PowerNetwork object" begin
psse_datafile = "case14.raw"
matpower_datafile = "case9.m"
# Test constructor
@testset "Parsers $name" for name in [psse_datafile, matpower_datafile]
datafile = joinpath(dirname(@__FILE__), "..", "data", name)
pf = PS.PowerNetwork(datafile)
@test isa(pf, PS.PowerNetwork)
end
# From now on, test with "case9.m"
datafile = joinpath(dirname(@__FILE__), "..", "data", matpower_datafile)
data = PS.import_dataset(datafile)
pf = PS.PowerNetwork(data)
@testset "Computing cost coefficients" begin
coefs = PS.get_costs_coefficients(pf)
@test size(coefs) == (3, 4)
@test isequal(coefs[:, 1], [3.0, 2.0, 2.0])
end
@testset "Getters" for Attr in [
PS.NumberOfBuses,
PS.NumberOfPVBuses,
PS.NumberOfPQBuses,
PS.NumberOfSlackBuses,
PS.NumberOfLines,
PS.NumberOfGenerators,
]
res = PS.get(pf, Attr())
@test isa(res, Int)
end
@testset "Indexing" begin
idx = PS.get(pf, PS.GeneratorIndexes())
@test isequal(idx, [1, 2, 3])
end
@testset "Bounds" begin
n_bus = PS.get(pf, PS.NumberOfBuses())
v_min, v_max = PS.bounds(pf, PS.Buses(), PS.VoltageMagnitude())
@test length(v_min) == n_bus
@test length(v_max) == n_bus
n_gen = PS.get(pf, PS.NumberOfGenerators())
p_min, p_max = PS.bounds(pf, PS.Generators(), PS.ActivePower())
@test length(p_min) == n_gen
@test length(p_max) == n_gen
q_min, q_max = PS.bounds(pf, PS.Generators(), PS.ReactivePower())
@test length(q_min) == n_gen
@test length(q_max) == n_gen
n_lines = PS.get(pf, PS.NumberOfLines())
f_min, f_max = PS.bounds(pf, PS.Lines(), PS.ActivePower())
@test length(f_min) == n_lines
@test length(f_max) == n_lines
end
@testset "Load from data" begin
pf_original = PS.PowerNetwork(data)
@test isa(pf_original, PS.PowerNetwork)
n_lines = PS.get(pf_original, PS.NumberOfLines())
pf_removed = PS.PowerNetwork(data; remove_lines=Int[1])
@test isa(pf_removed, PS.PowerNetwork)
n_after_removal = PS.get(pf_removed, PS.NumberOfLines())
@test n_lines - 1 == n_after_removal
@test pf_original.Ybus != pf_removed.Ybus
end
end
|
{"hexsha": "139953173222ec3b6583fcac408fea9c241d6b4e", "size": 5265, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/powersystem.jl", "max_stars_repo_name": "exanauts/ExaPF.jl", "max_stars_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2020-07-15T16:01:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T21:28:25.000Z", "max_issues_repo_path": "test/powersystem.jl", "max_issues_repo_name": "exanauts/ExaPF.jl", "max_issues_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 129, "max_issues_repo_issues_event_min_datetime": "2020-07-02T11:59:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T20:10:54.000Z", "max_forks_repo_path": "test/powersystem.jl", "max_forks_repo_name": "exanauts/ExaPF.jl", "max_forks_repo_head_hexsha": "cd1bcb8a0782fe448d46a10816f82c5d28c3854e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-07-15T18:49:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-29T20:12:19.000Z", "avg_line_length": 32.5, "max_line_length": 81, "alphanum_fraction": 0.6357075024, "num_tokens": 1758}
|
#
# This file is part of the profilerTools suite (see
# https://github.com/mssm-labmmol/profiler).
#
# Copyright (c) 2020 mssm-labmmol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from random import uniform, gauss, lognormvariate, choice
from abc import ABC
import numpy as np
class RandomizerInterface(ABC):
def random(self):
pass
class LogNormalRandomizer(RandomizerInterface):
def __init__(self, mean, stdev):
self.mean = mean
self.stdev = stdev
def random(self):
phi = (self.stdev**2 + self.mean**2)**0.5
mu = np.log(self.mean**2 / phi)
sigma = (np.log(phi**2 / self.mean**2))**0.5
samples = lognormvariate(mu, sigma)
return samples
class GaussianRandomizer(RandomizerInterface):
def __init__(self, mean, stdev):
self.mean = mean
self.stdev = stdev
def random(self):
samples = gauss(self.mean, self.stdev)
return samples
class UniformRandomizer(RandomizerInterface):
def __init__(self, low, up):
self.low = low
self.up = up
def random(self):
samples = uniform(self.low, self.up)
return samples
class UniformDimDist(RandomizerInterface):
def __init__(self, low, up):
self.low = low
self.up = up
def random(self):
low_exp = np.log10(self.low)
up_exp = np.log10(self.up)
exp = uniform(low_exp, up_exp)
return 10**exp
class SignReverserDecorator(RandomizerInterface):
def __init__(self, randomizer, pinv):
self._randomizer = randomizer
self.pinv = pinv
def random(self):
x = choice(range(1, 101))
r = self._randomizer.random()
if x <= self.pinv:
return r
else:
return -1.0 * r
class LimiterDecorator(RandomizerInterface):
def __init__(self, randomizer, min_, max_):
self._randomizer = randomizer
self.min_ = min_
self.max_ = max_
def random(self):
rand = self._randomizer.random()
while (rand < self.min_) and (rand > self.max_):
rand = self._randomizer.random()
return rand
def RandomizerFactory(typestr, **kwargs):
if (typestr == 'uniform'):
return UniformRandomizer(**kwargs)
if (typestr == 'uniform_dim'):
return UniformDimDist(**kwargs)
if (typestr == 'gaussian'):
return GaussianRandomizer(**kwargs)
if (typestr == 'lognormal'):
return LogNormalRandomizer(**kwargs)
|
{"hexsha": "582c9d2c15d8117cacb1330014852274d2bb068d", "size": 3515, "ext": "py", "lang": "Python", "max_stars_repo_path": "profilerTools/randomizer.py", "max_stars_repo_name": "mssm-labmmol/profiler", "max_stars_repo_head_hexsha": "6c4a508cd656a8cdf05ab3db19680e6239296a88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-21T13:06:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-10T13:40:46.000Z", "max_issues_repo_path": "profilerTools/randomizer.py", "max_issues_repo_name": "mssm-labmmol/profiler", "max_issues_repo_head_hexsha": "6c4a508cd656a8cdf05ab3db19680e6239296a88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "profilerTools/randomizer.py", "max_forks_repo_name": "mssm-labmmol/profiler", "max_forks_repo_head_hexsha": "6c4a508cd656a8cdf05ab3db19680e6239296a88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3017241379, "max_line_length": 80, "alphanum_fraction": 0.66685633, "include": true, "reason": "import numpy", "num_tokens": 848}
|
# -*- coding: utf-8 -*-
"""@package Methods.Machine.MagnetType14.build_geometry
MagnetType14 build_geometry method
@date Created on Wed Dec 17 16:09:15 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
from numpy import angle, exp
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.SurfLine import SurfLine
from pyleecan.Functions.Geometry.comp_flower_arc import comp_flower_arc
from pyleecan.Methods import ParentMissingError
def build_geometry(self, alpha=0, delta=0, is_simplified=False):
"""Compute the curve (Segment, Arc1) needed to plot the Magnet.
The list represents a closed surface.
The ending point of a curve is always the starting point of the next
curve in the list
Parameters
----------
self : MagnetType14
A MagnetType14 object
alpha : float
Angle for rotation [rad]
delta : complex
Complex value for translation
is_simplified: bool
True to avoid line superposition
Returns
-------
surf_list : list
list of surfaces needed to draw the lamination
"""
# defining label for type_magnetization
if self.type_magnetization == 0:
t_p = "Radial"
else:
t_p = "Parallel"
if self.parent is not None:
(Z1, Z2) = self.parent.get_point_bottom()
H0 = self.parent.H0
W0 = self.parent.W0
else:
raise ParentMissingError(
"Error: The magnet object is not inside a " + "slot object"
)
# comp point coordinate (in complex)
if W0 > self.Wmag: # The magnet is smaller than the slot => center the mag
Z1 = Z1 * exp(1j * (W0 - self.Wmag) / 2)
Z2 = Z2 * exp(-1j * (W0 - self.Wmag) / 2)
if self.is_outwards():
(alpha_lim, Z4, Z3) = comp_flower_arc(
abs(angle(Z1) - angle(Z2)), self.Rtop, abs(Z1) - self.Hmag
)
Zs3 = (abs(Z1) - H0) * exp(1j * angle(Z1))
Zs4 = (abs(Z2) - H0) * exp(1j * angle(Z2))
Zref = abs(Z1) - self.Hmag / 2
else:
(alpha_lim, Z4, Z3) = comp_flower_arc(
abs(angle(Z1) - angle(Z2)), self.Rtop, abs(Z1) + self.Hmag
)
Zs3 = (abs(Z1) + H0) * exp(1j * angle(Z1))
Zs4 = (abs(Z2) + H0) * exp(1j * angle(Z2))
Zref = abs(Z1) + self.Hmag / 2
# Creation of curve
curve_list = list()
if is_simplified and W0 > self.Wmag:
curve_list.append(Segment(Z1, Z3))
elif is_simplified and H0 < self.Hmag:
curve_list.append(Segment(Zs3, Z3))
elif not is_simplified:
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, self.Rtop))
if is_simplified and W0 > self.Wmag:
curve_list.append(Segment(Z4, Z2))
elif is_simplified and H0 < self.Hmag:
curve_list.append(Segment(Z4, Zs4))
elif not is_simplified:
curve_list.append(Segment(Z4, Z2))
if not is_simplified:
curve_list.append(Arc1(Z2, Z1, -abs(Z2), is_trigo_direction=False))
surf_list = list()
surf_list.append(
SurfLine(
line_list=curve_list,
label="MagnetRotor" + t_p + "_N_R0_T0_S0",
point_ref=Zref,
)
)
# Apply transformation
for surf in surf_list:
surf.rotate(alpha)
surf.translate(delta)
return surf_list
|
{"hexsha": "4f267f205b0da9bcd03d483ca4b23cebaff285a2", "size": 3363, "ext": "py", "lang": "Python", "max_stars_repo_path": "Methods/Machine/MagnetType14/build_geometry.py", "max_stars_repo_name": "Superomeg4/pyleecan", "max_stars_repo_head_hexsha": "2b695b5f39e77475a07aa0ea89489fb0a9659337", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Methods/Machine/MagnetType14/build_geometry.py", "max_issues_repo_name": "Superomeg4/pyleecan", "max_issues_repo_head_hexsha": "2b695b5f39e77475a07aa0ea89489fb0a9659337", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Methods/Machine/MagnetType14/build_geometry.py", "max_forks_repo_name": "Superomeg4/pyleecan", "max_forks_repo_head_hexsha": "2b695b5f39e77475a07aa0ea89489fb0a9659337", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7610619469, "max_line_length": 79, "alphanum_fraction": 0.621171573, "include": true, "reason": "from numpy", "num_tokens": 982}
|
import numpy as np
from scipy import optimize
class SteelSection:
"""a"""
def __init__(self, code, props):
"""a"""
self.code = code
self.area = props[0]
self.mass = props[1]
self.ixx = props[2]
self.zxx = props[3]
self.sxx = props[4]
self.rx = props[5]
self.iyy = props[6]
self.zyy = props[7]
self.syy = props[8]
self.ry = props[9]
self.j = props[10]
self.iw = props[11]
self.kf = props[12]
def calc_section_properties(self):
"""a"""
pass
def load_section_properties(self):
"""a"""
pass
def calc_ze(self, axis):
"""a
Cl. 5.2.3, Cl. 5.2.4, Cl. 5.2.5 AS4100-1998
"""
if axis == 'x':
z = self.zxx
s = self.sxx
(compact_x, lambda_s, lambda_lims, plate_type) = self.bending_compact_x()
elif axis == 'y':
z = self.zyy
s = self.syy
(compact_x, lambda_s, lambda_lims, plate_type) = self.bending_compact_y()
if compact_x == 'C':
return min(1.5 * z, s)
elif compact_x == 'NC':
zc = min(1.5 * z, s)
lambda_sy = lambda_lims[1]
lambda_sp = lambda_lims[0]
return z + (zc - z) * (lambda_sy - lambda_s) / (lambda_sy - lambda_sp)
elif compact_x == 'S':
if plate_type in ['Uniform1', 'Uniform2']:
return z * lambda_lims[1] / lambda_s
elif plate_type == 'CHS':
z1 = z * np.sqrt(lambda_lims[1] / lambda_s)
z2 = z * (2 * lambda_lims[1] / lambda_s) ** 2
return min(z1, z2)
else:
return z * (lambda_lims[1] / lambda_s) ** 2
def calc_phi_msx(self):
"""a"""
return self.code.phi_member * self.get_yield_stress() * self.calc_ze(
axis='x') / 1e6
def calc_phi_msy(self):
"""a"""
return self.code.phi_member * self.get_yield_stress() * self.calc_ze(
axis='y') / 1e6
def calc_phi_mbx(self, le, alpha_m=1):
"""a
"""
# get section capacity
msx = self.calc_phi_msx() / self.code.phi_member
# get reference buckling moment
m0 = self.calc_m0(le)
# calculate alpha_s
alpha_s = 0.6 * (np.sqrt((msx / m0) ** 2 + 3) - msx / m0)
return self.code.phi_member * alpha_m * alpha_s * msx
def full_restraint_length(self, alpha_m):
"""a"""
def f(x):
return self.calc_phi_mbx(x, alpha_m) - self.calc_phi_msx()
return optimize.root_scalar(f, x0=500, x1=2000).root
class UBSection(SteelSection):
"""Class for Universal Beam sections.
:param string name: Name of the UB section
:param float d: Total depth of the UB section [mm]
:param float bf: Flange width of the UB section [mm]
:param float tf: Flange thickness of the UB section [mm]
:param float tw: Web thickness of the UB section [mm]
:param float r: Root radius of the UB section [mm]
:param grade: Steel grade
:type grade: :class:`~steeldesign.codes.SteelGrade`
:param props: List of section properties to import
:type props: list[float]
"""
def __init__(self, code, name, d, bf, tf, tw, r, grade, props=[None] * 13):
"""Inits the UBSection class."""
super().__init__(code, props)
self.name = name
self.d = d
self.bf = bf
self.tf = tf
self.tw = tw
self.r = r
self.grade = grade
# calculate yield stresses
self.fyf = self.grade.get_yield_stress(self.tf)
self.fyw = self.grade.get_yield_stress(self.tw)
def get_tf(self):
"""Returns the thickness of the flange.
:returns: Flange thickness
:rtype: float
"""
return self.tf
def get_tw(self):
"""Returns the thickness of the web.
:returns: Web thickness
:rtype: float
"""
return self.tw
def get_nw(self):
"""Returns the number of webs.
:returns: Number of webs
:rtype: int
"""
return 1
def calc_dw(self):
"""Returns the depth of the web (d - 2 * tf).
:returns: Depth of the web [mm]
:rtype: float
"""
return self.d - 2 * self.tf
def calc_web_slenderness(self):
"""Returns the slenderness of the web (dw / tw).
:returns: Web slenderness
:rtype: float
"""
return self.calc_dw() / self.tw
def calc_flange_slenderness(self):
"""Returns the slenderness of the flange ((bf - tw) / (2 * tf)).
:returns: Flange slenderness
:rtype: float
"""
return (self.bf - self.tw) / (2 * self.tf)
def get_yield_stress(self):
"""Returns the yield stress of the section
fy = min(fyf, fyw)
:returns: Yield stress of the section
:rtype: float
"""
return min(self.fyf, self.fyw)
def bending_compact_x(self):
"""Returns the compactness of the section for bending about the x-axis.
:returns: Compactness of the section for bending about the x-axis *('C', 'NC', 'S'),
section slenderness, slenderness limits and the plate type
:rtype: tuple(string, float, tuple(float, float, float), string)
"""
# flange slenderness
lambda_f = self.calc_flange_slenderness() * np.sqrt(self.fyf / 250)
lambda_f_lims = self.code.plate_slenderness_bending('Uniform1', 'HR')
ratio_f = lambda_f / lambda_f_lims[1]
# web slenderness
lambda_w = self.calc_web_slenderness() * np.sqrt(self.fyw / 250)
lambda_w_lims = self.code.plate_slenderness_bending('Bending2', 'HR')
ratio_w = lambda_w / lambda_w_lims[1]
# section slenderness
if ratio_f > ratio_w:
lambda_s = lambda_f
lambda_lims = lambda_f_lims
plate_type = 'Uniform1'
else:
lambda_s = lambda_w
lambda_lims = lambda_w_lims
plate_type = 'Bending2'
# compactness
if lambda_s < lambda_lims[0]:
compact = 'C'
elif lambda_s < lambda_lims[1]:
compact = 'NC'
else:
compact = 'S'
return(compact, lambda_s, lambda_lims, plate_type)
def bending_compact_y(self):
"""Returns the compactness of the section for bending about the y-axis.
:returns: Compactness of the section for bending about the y-axis *('C', 'NC', 'S'),
section slenderness, slenderness limits and the plate type
:rtype: tuple(string, float, tuple(float, float, float), string)
"""
# flange slenderness
lambda_s = self.calc_flange_slenderness() * np.sqrt(self.fyf / 250)
lambda_lims = self.code.plate_slenderness_bending('Bending1', 'HR')
plate_type = 'Bending1'
# compactness
if lambda_s < lambda_lims[0]:
compact = 'C'
elif lambda_s < lambda_lims[1]:
compact = 'NC'
else:
compact = 'S'
return(compact, lambda_s, lambda_lims, plate_type)
def full_restraint_length_simple(self, beta_m=-1):
"""Returns the maximum segment length for which the section is considered fully laterally
restrained as defined by Cl. 5.3.2.4 AS4100-1998.
:param float beta_m: Factor dependent on the bending moments within the segment
:returns: Maximum segment length for which the section is considered fully laterally
restrained
:rtype: float
The ratio beta_m shall be taken as one of the following as appropriate:
* -1;
* -0.8 for segments with transverse loads; or
* the ratio of the smaller to the larger end moments in the length (l), (positive when the
segment is bent in reverse curvature and negative when bent in single curvature) for
segments without transverse loads.
"""
return self.ry * (80 + 50 * beta_m) * np.sqrt(250 / self.get_yield_stress())
def calc_m0(self, le):
"""Returns the reference buckling moment, M0, for a UB section.
:param float le: Effective length of the segment under consideration
:returns: Reference buckling moment [kN.m]
:rtype: float
"""
e = self.code.elastic_modulus
g = self.code.shear_modulus
return 1e-6 * np.sqrt(((np.pi ** 2 * e * self.iyy) / (le ** 2)) * (
g * self.j + (np.pi ** 2 * e * self.iw / (le ** 2)))
)
|
{"hexsha": "2058e6dfa06a6d37ba1151e86ebe8b4288cd7899", "size": 8703, "ext": "py", "lang": "Python", "max_stars_repo_path": "steeldesign/sections.py", "max_stars_repo_name": "robbievanleeuwen/steeldesign", "max_stars_repo_head_hexsha": "4a2351c7d922a4f0f379b413ecca17b672be1c6e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-11T21:28:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-17T12:24:26.000Z", "max_issues_repo_path": "steeldesign/sections.py", "max_issues_repo_name": "robbievanleeuwen/steeldesign", "max_issues_repo_head_hexsha": "4a2351c7d922a4f0f379b413ecca17b672be1c6e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "steeldesign/sections.py", "max_forks_repo_name": "robbievanleeuwen/steeldesign", "max_forks_repo_head_hexsha": "4a2351c7d922a4f0f379b413ecca17b672be1c6e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-17T12:24:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-17T12:24:28.000Z", "avg_line_length": 28.9136212625, "max_line_length": 98, "alphanum_fraction": 0.5657819143, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2297}
|
# -*- coding: utf-8 -*-
"""
trim image to create sub images
Author :
Yuki Kumon
Last Update :
2019-11-15
"""
import sys
sys.path.append('.')
from misc.dependences.attr_dict import AttributeDict
from misc.dependences.File_util import File_util
import yaml
import os
import csv
import numpy as np
import cv2
with open('./configs/configs.yml', 'r') as f:
config = AttributeDict(yaml.load(f, Loader=yaml.SafeLoader))
# set path
original_root = config.fields()['path']['original']
edit_root = config.fields()['path']['edit']
# make folders
util = File_util()
util.create_folder(os.path.join(edit_root, 'band3s'))
util.create_folder(os.path.join(edit_root, 'band3bs'))
util.create_folder(os.path.join(edit_root, 'depth'))
# load image size
original_size = config.fields()['size']['original']
trimed_size = config.fields()['size']['trim']
stride = config.fields()['size']['stride']
stride = [int(x) for x in stride]
length = [int(np.floor((original_size[i] - trimed_size[i]) / stride[i])) for i in range(2)]
# trim and save
def trimer(img, target_root, img_name, ext='.tif'):
# load original images and annotations
img_sub = []
name_list = []
for j in range(length[1]):
for i in range(length[0]):
img_sub.append(img[
stride[0] * i:stride[0] * i + trimed_size[0],
stride[1] * j:stride[1] * j + trimed_size[1]
])
name_list.append(os.path.splitext(os.path.split(img_name)[1])[0] + '_' + str(i) + '_' + str(j))
# save
save_name_list = []
for i in range(len(img_sub)):
cv2.imwrite(os.path.join(target_root, name_list[i] + ext), img_sub[i])
save_name_list.append(os.path.join(target_root, name_list[i] + ext))
return save_name_list
# execute
band3s_img = cv2.imread(os.path.join(original_root, 'band3s.tif'), cv2.IMREAD_GRAYSCALE)[9:-9, 9:-9]
band3bs_img = cv2.imread(os.path.join(original_root, 'band3bs.tif'), cv2.IMREAD_GRAYSCALE)[9:-9, 9:-9]
depth_img = np.load(os.path.join(original_root, 'res.npy'))[9:-9, 9:-9]
# depth_img = cv2.imread(os.path.join(original_root, 'res.png'), cv2.IMREAD_GRAYSCALE)[9:-9, 9:-9]
band3s_list = trimer(band3s_img, os.path.join(edit_root, 'band3s'), 'band3s.tif')
band3bs_list = trimer(band3bs_img, os.path.join(edit_root, 'band3bs'), 'band3bs.tif')
depth_list = trimer(depth_img, os.path.join(edit_root, 'depth'), 'depth.png', ext='.png')
# write csv
with open(os.path.join(edit_root, 'result.csv'), 'w') as f:
writer = csv.writer(f)
for i in range(len(band3s_list)):
writer.writerow([band3s_list[i], band3bs_list[i], depth_list[i]])
|
{"hexsha": "880872fa96a054fc0bed76b9a4570287c9fcde81", "size": 2628, "ext": "py", "lang": "Python", "max_stars_repo_path": "misc/create_subimage.py", "max_stars_repo_name": "Yuki-Kumon/PSMNet", "max_stars_repo_head_hexsha": "c47a9e6edb885e76fc6a6d399f5ec42c65133d85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "misc/create_subimage.py", "max_issues_repo_name": "Yuki-Kumon/PSMNet", "max_issues_repo_head_hexsha": "c47a9e6edb885e76fc6a6d399f5ec42c65133d85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "misc/create_subimage.py", "max_forks_repo_name": "Yuki-Kumon/PSMNet", "max_forks_repo_head_hexsha": "c47a9e6edb885e76fc6a6d399f5ec42c65133d85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2857142857, "max_line_length": 107, "alphanum_fraction": 0.6681887367, "include": true, "reason": "import numpy", "num_tokens": 739}
|
import numpy as np
import kmeans
import common
import naive_em
import em
X = np.loadtxt("toy_data.txt")
# TODO: Your code here
# for K in [1,2,3,4]:
# for seed in [0,1,2,3,4]:
# mixture,post=common.init(X, K, seed)
# mixture, post, cost=kmeans.run(X,mixture,post)
# common.plot(X,mixture,post,title='K=%s seed=%s cost=%s'%(K,seed,cost))
# print('K=%s seed=%s cost=%s'%(K,seed,cost))
for K in [1,2,3,4]:
maxcost=-100000
for seed in [0]:
mixture,post=common.init(X, K, seed)
mixture, post, cost=naive_em.run(X,mixture,post)
print(common.bic(X,mixture,cost))
# common.plot(X,mixture,post,title='EM K=%s seed=%s cost=%s'%(K,seed,cost))
# mixture,post=common.init(X, K, seed)
# mixture, post, cost=kmeans.run(X,mixture,post)
# common.plot(X,mixture,post,title='kmeansK=%s seed=%s cost=%s'%(K,seed,cost))
# print('K=%s seed=%s cost=%s'%(K,seed,cost))
# if cost>maxcost: maxcost=cost
# print("maxll %s"%maxcost)
# K=3
# seed=0
# mixture,post=common.init(X, K, seed)
# # print(mixture)
# # posts,ll = naive_em.estep(X,mixture)
# # print('ll = %s'%ll)
# # print(posts)
# # for K in [1,2,3,4]:
# # for seed in [0,1,2,3,4]:
# # mixture,post=common.init(X, K, seed)
# # mixture, post, cost=kmeans.run(X,mixture,post)
# # #common.plot(X,mixture,post,title='K=%s seed=%s cost=%s'%(K,seed,cost))
# # print('K=%s seed=%s cost=%s'%(K,seed,cost))
# K=5
# X = np.loadtxt("lasttestx.txt")
# mu=np.array( [[-0.60787456, 0.09534884],
# [ 0.53830805, -0.24498689],
# [ 0.4983494, -0.94992061],
# [-0.66868763, -0.9861811 ],
# [-0.15367443, -0.44492439]])
# var=np.array([0.66695384, 0.30533997, 1.00062913, 1.639639,0.61075705])
# p=np.array([0.12075413,0.26092829, 0.19481629, 0.23742157, 0.18607972])
# mixture = common.GaussianMixture(mu, var, p)
# posts,ll = naive_em.estep(X,mixture)
# print('ll = %s'%ll)
# print(posts)
# newmixt=naive_em.mstep(X,posts)
# print(newmixt)
# n=X.shape[0]
# llt=0.0
# # print("startar")
# # for i in range(n):
# # for j in range(K):
# # llt+=np.log(mixture.p[j]*Gaussian(mixture[i,j], var[j],X[i])
# # # print(pn)
# # # print(pn.sum())
# # llt+=np.log(pn)
# # print("test %.20f"%llt)
# # print(llt)
# # print('fin')
# # # Output:
# # # post:[[0.03939317 0.66938479 0.1207385 0.05606209 0.11442145]
# # # [0.1284887 0.46274858 0.09891089 0.08438805 0.22546379]
# # # [0.12250705 0.49162696 0.09739513 0.0799134 0.20855745]
# # # [0.0496701 0.65425613 0.1051122 0.05541291 0.13554867]
# # # [0.09493723 0.56463229 0.10373629 0.07240648 0.16428772]
# # # [0.17238229 0.41053463 0.10757978 0.10210253 0.20740077]
# # # [0.20502453 0.35053858 0.10083158 0.10866167 0.23494364]
# # # [0.04599863 0.66358567 0.10870553 0.05489602 0.12681415]
# # # [0.11717788 0.40929401 0.18426962 0.13553269 0.15372579]
# # # [0.19227515 0.37045863 0.11410566 0.11445474 0.20870582]
# # # [0.13920751 0.47399095 0.10541633 0.08908133 0.19230388]]
# # # LL:-25.086211
# # tst=np.array([[0.03939317, 0.66938479, 0.1207385, 0.05606209, 0.11442145],
# # [0.1284887, 0.46274858, 0.09891089, 0.08438805, 0.22546379],
# # [0.12250705, 0.49162696, 0.09739513, 0.0799134 , 0.20855745],
# # [0.0496701, 0.65425613, 0.1051122 , 0.05541291, 0.13554867],
# # [0.09493723, 0.56463229, 0.10373629, 0.07240648, 0.16428772],
# # [0.17238229, 0.41053463, 0.10757978, 0.10210253, 0.20740077],
# # [0.20502453, 0.35053858, 0.10083158, 0.10866167, 0.23494364],
# # [0.04599863, 0.66358567, 0.10870553, 0.05489602, 0.12681415],
# # [0.11717788, 0.40929401, 0.18426962, 0.13553269, 0.15372579],
# # [0.19227515, 0.37045863, 0.11410566, 0.11445474, 0.20870582],
# # [0.13920751, 0.47399095, 0.10541633, 0.08908133, 0.19230388]])
# # n=tst.shape[0]
# # print(n)
# # llt=0.0
# # print("bla")
# # for i in range(n):
# # print(tst[i,:])
# # print(np.log(tst[i,:]).sum())
# # print("end")
# # pn=np.log(tst[i,:]).sum()
# # # print(pn)
# # # print(pn.sum())
# # llt+=pn
# # print("test %.20f"%llt)
# # print(llt)
|
{"hexsha": "2a96b605630a5e4d56fbbd6a6840963b201cc254", "size": 4145, "ext": "py", "lang": "Python", "max_stars_repo_path": "mit-ml/netflix/main.py", "max_stars_repo_name": "stepinski/machinelearning", "max_stars_repo_head_hexsha": "1f84883a25616da4cd76bb4655267efd3421e561", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mit-ml/netflix/main.py", "max_issues_repo_name": "stepinski/machinelearning", "max_issues_repo_head_hexsha": "1f84883a25616da4cd76bb4655267efd3421e561", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mit-ml/netflix/main.py", "max_forks_repo_name": "stepinski/machinelearning", "max_forks_repo_head_hexsha": "1f84883a25616da4cd76bb4655267efd3421e561", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8319327731, "max_line_length": 86, "alphanum_fraction": 0.6057901086, "include": true, "reason": "import numpy", "num_tokens": 1823}
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: vis
# Purpose:
#
# Authors: Maik Heistermann
#
# Created: 2015-11-6
# Copyright: (c) Maik Heistermann
# Licence: The MIT License
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import wradlib
import numpy as np
import pylab as plt
import os
import datetime as dt
from matplotlib.collections import PolyCollection
from matplotlib.colors import from_levels_and_colors
from scipy.stats.stats import pearsonr
from scipy.signal import medfilt
import trmmlib as tl
def plot_grid(X, Y, R, dx=0.25, dy=0.25, ax=None, overlays=[], bbox=None, **kwargs):
"""Plots grid using pcolormesh.
This function assumes that the grid coordinates X and Y represent center
points of the grid cells. Since pcolormesh requires corner points, you also
need to specify the pixel size using dx and dy.
X: array of X coordinates having the same shape as R
Y: array of Y coordinates having the same shape as R
R: 2-D grid to be plotted
dx: pixel size in x direction
dy: pixel size in y direction
ax: axes object (if None, a new axes will be created)
shape: list of strings
paths to shapefiles for overlay
kwargs: keyword arguments for matplotlib.pcolormesh
"""
# Pre-process X and Y coords to represent corner points instead of center points
X = X.copy() - dx/2.
X = np.hstack((X,X[:,-1].reshape((-1,1)) + dx) )
X = np.vstack((X,X[-1,:].reshape((1,-1))) )
Y = Y.copy() - dy/2.
Y = np.vstack((Y,Y[-1,:].reshape((1,-1)) + dy) )
Y = np.hstack((Y,Y[:,-1].reshape((-1,1))) )
# Create axes object of not passed
if ax==None:
fig = plt.figure(figsize=(14,6))
ax = fig.add_subplot(111, aspect="equal")
pm = ax.pcolormesh(X, Y, R, **kwargs)
plt.colorbar(pm, shrink=0.5)
plt.grid(color="white")
if bbox:
plt.xlim(bbox["left"],bbox["right"])
plt.ylim(bbox["bottom"],bbox["top"])
plt.xlabel("Longitude")
plt.ylabel("Latitude")
# Shapefile overlay
# for overlay in overlays:
# dataset, inLayer = wradlib.io.open_shape(shp)
# borders, keys = wradlib.georef.get_shape_coordinates(inLayer)
# wradlib.vis.add_lines(ax, overlay, color='white', lw=0.5)
def plot_cats(cats, R, ax=None, bbox=None, **kwargs):
"""Plots grid using pcolormesh.
This function assumes that the grid coordinates X and Y represent center
points of the grid cells. Since pcolormesh requires corner points, you also
need to specify the pixel size using dx and dy.
"""
if ax==None:
fig = plt.figure(figsize=(14,6))
ax = fig.add_subplot(111, aspect="equal")
wradlib.vis.add_lines(ax, cats, color='black', lw=0.5)
coll = PolyCollection(cats, array=R, **kwargs)
ax.add_collection(coll)
ax.autoscale()
if bbox:
plt.xlim(bbox["left"],bbox["right"])
plt.ylim(bbox["bottom"],bbox["top"])
plt.draw()
return ax, coll
def plot_trmm_grid_lines(ax):
"""
"""
y = np.arange(60, 60-480*0.25, -0.25)
x = np.arange(0, 360, 0.25) - 180.
ax.hlines(y, xmin=x.min()-0, xmax=x.max()+0, color="grey")
ax.vlines(x, ymin=y.min()-0, ymax=y.max()+0, color="grey")
def maps_from_echse(conf):
"""Produces time series of rainfall maps from ECHSE input data and catchment shapefiles.
"""
# Read sub-catchment rainfall from file
fromfile = np.loadtxt(conf["f_data"], dtype="string", delimiter="\t")
if len(fromfile)==2:
rowix = 1
elif len(fromfile)>2:
rowix = slice(1,len(fromfile))
else:
raise Exception("Data file is empty: %s" % conf["f_data"])
var = fromfile[rowix,1:].astype("f4")
dtimes = fromfile[rowix,0]
dtimes = np.array([wradlib.util.iso2datetime(dtime) for dtime in dtimes])
dtimesfromconf = wradlib.util.from_to(conf["tstart"], conf["tend"], conf["interval"])
dtimes = np.intersect1d(dtimes, dtimesfromconf)
if len(dtimes)==0:
print "No datetimes for mapping based on intersection of data file and config info."
return(0)
# objects = fromfile[0,1:]
cats = plt.genfromtxt(conf["f_coords"], delimiter="\t", names=True,
dtype=[('id', '|S20'), ('lat', 'f4'), ('lon', 'f4'),
('x', 'f4'), ('y', 'f4')])
mapx, mapy = wradlib.georef.reproject(cats["x"],cats["y"],
projection_source=conf["trg_proj"],
projection_target=conf["map_proj"])
# Read shapefile
dataset, inLayer = wradlib.io.open_shape(conf["f_cats_shp"])
polys, keys = wradlib.georef.get_shape_coordinates(inLayer, key='DN')
keys = np.array(keys)
# Preprocess polygons (remove minors, sort in same order as in coords file)
polys2 = []
for i, id in enumerate(cats["id"]):
keyix = np.where( keys==eval(id.strip("cats_")) )[0]
if len(keyix) > 1:
# More than one key matching? Find largest matching polygon
keyix = keyix[np.argmax([len(polys[key]) for key in keyix])]
else:
keyix = keyix[0]
poly = polys[keyix].copy()
if poly.ndim==1:
# Multi-Polygons - keep only the largest polygon
# (just for plotting - no harm done)
poly2 = poly[np.argmax([len(subpoly) for subpoly in poly])].copy()
else:
poly2 = poly.copy()
polys2.append ( wradlib.georef.reproject(poly2,
projection_source=conf["trg_proj"],
projection_target=conf["map_proj"]) )
colors = plt.cm.spectral(np.linspace(0,1,len(conf["levels"])))
mycmap, mynorm = from_levels_and_colors(conf["levels"], colors, extend="max")
plt.interactive(False)
for i, dtime in enumerate(dtimes):
datestr = (dtime-dt.timedelta(seconds=conf["interval"])).strftime("%Y%m%d.png")
print datestr
figpath = os.path.join(conf["savefigs"], datestr)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111, aspect="equal")
ax, coll = plot_cats(polys2, var[i], ax=ax, bbox=conf["bbox"], cmap=mycmap,
norm=mynorm, edgecolors='none')
cb = plt.colorbar(coll, ax=ax, ticks=conf["levels"], shrink=0.6)
cb.ax.tick_params(labelsize="small")
cb.set_label("(mm)")
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plot_trmm_grid_lines(ax)
plt.text(conf["bbox"]["left"]+0.25, conf["bbox"]["top"]-0.25,
"%s\n%s to\n%s" % (conf["figtxtbody"],
(dtime-dt.timedelta(seconds=conf["interval"])).isoformat(" "),
dtime.isoformat(" ") ),
color="red", fontsize="small", verticalalignment="top")
plt.tight_layout()
plt.savefig(figpath)
plt.close()
plt.interactive(True)
def simple_scatter(x, y, xlab, ylab, lim, txt="", **kwargs):
"""Simple scatter plot
"""
plt.scatter(x, y, **kwargs)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.xlim(lim[0],lim[1])
plt.ylim(lim[0],lim[1])
plt.plot([lim[0]-100,lim[1]+100], [lim[0]-100,lim[1]+100], color="black", linestyle="--")
plt.text(lim[0]+10, lim[1]-10, txt, verticalalignment="top", color="red")
def time_series_intercomparison(conf_imd4, conf_gages, conf_trmm, conf_trmm_rt):
"""
"""
tstart = dt.datetime.strptime("2001-04-01 00:00:00", "%Y-%m-%d %H:%M:%S")
tend = dt.datetime.strptime("2010-12-30 00:00:00", "%Y-%m-%d %H:%M:%S")
dtimes_imd4, _, imd4 = tl.echse.read_echse_data_file(conf_imd4["f_data"])
dtimes_trmm, _, trmm = tl.echse.read_echse_data_file(conf_trmm["f_data"])
dtimes_trmmrt, _, trmmrt = tl.echse.read_echse_data_file(conf_trmm_rt["f_data"])
dtimes_gage, _, gage = tl.echse.read_echse_data_file(conf_gages["f_data"])
ix_imd4 = (dtimes_imd4 >= tstart) & (dtimes_imd4 <tend)
ix_trmm = (dtimes_trmm >= tstart) & (dtimes_trmm <tend)
ix_trmmrt = (dtimes_trmmrt >= tstart) & (dtimes_trmmrt <tend)
ix_gage = (dtimes_gage >= tstart) & (dtimes_gage <tend)
lim = (0,300)
txt = "Daily rainfall\nSubcatchment average\n%s to %s" % (tstart.strftime("%Y-%m-%d"),tend.strftime("%Y-%m-%d"))
kwargs = {"edgecolor":"None", "alpha":0.05}
plt.interactive(False)
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(221, aspect="equal")
corr = "\nR=%.2f" % pearsonr(gage[ix_gage,:].ravel(), trmm[ix_trmm,:].ravel())[0]
tl.vis.simple_scatter(gage[ix_gage,:], trmm[ix_trmm,:], "GAGE (mm)", "TRMM (mm)", lim, txt="GAGE vs. TRMM\n"+txt+corr, **kwargs )
ax = fig.add_subplot(222, aspect="equal")
corr = "\nR=%.2f" % pearsonr(imd4[ix_imd4,:].ravel(), trmm[ix_trmm,:].ravel())[0]
tl.vis.simple_scatter(imd4[ix_imd4,:], trmm[ix_trmm,:], "IMD4 (mm)", "TRMM (mm)", lim, txt="IMD4 vs. TRMM\n"+txt+corr, **kwargs)
ax = fig.add_subplot(223, aspect="equal")
corr = "\nR=%.2f" % pearsonr(gage[ix_gage,:].ravel(), imd4[ix_imd4,:].ravel())[0]
tl.vis.simple_scatter(gage[ix_gage,:], imd4[ix_imd4,:], "GAGE (mm)", "IMD4 (mm)", lim, txt="GAGE vs. IMD4\n"+txt+corr, **kwargs)
ax = fig.add_subplot(224, aspect="equal")
corr = "\nR=%.2f" % pearsonr(trmm[ix_trmm,:].ravel(), trmmrt[ix_trmmrt,:].ravel())[0]
tl.vis.simple_scatter(trmm[ix_trmm,:], trmmrt[ix_trmmrt,:], "TRMM (mm)", "TRMM RT (mm)", lim, txt="TRMM vs. TRMM RT\n"+txt+corr, **kwargs)
plt.tight_layout()
plt.savefig("P:/progress/mahanadi/_qpe/inter_product_scatter.png")
plt.interactive(True)
plt.figure(figsize=(12,12))
plt.subplot(311)
plt.plot(dtimes_imd4[ix_imd4], medfilt( np.mean(imd4[ix_imd4,:],axis=1), 1 ), color="black", label="IMD4" )
plt.plot(dtimes_gage[ix_gage], medfilt( np.mean(gage[ix_gage,:],axis=1), 1 ), color="green", label="GAGE", alpha=0.7 )
plt.plot(dtimes_trmm[ix_trmm], medfilt( np.mean(trmm[ix_trmm,:],axis=1), 1 ), color="red", label="TRMM", alpha=0.5 )
plt.plot(dtimes_trmmrt[ix_trmmrt], medfilt( np.mean(trmmrt[ix_trmmrt,:],axis=1), 1 ), color="blue", label="TRMM RT", alpha=0.5 )
plt.xlabel("Year")
plt.ylabel("Daily rainfall (mm)")
plt.title("Unsmoothed")
plt.legend()
plt.subplot(312)
plt.plot(dtimes_imd4[ix_imd4], medfilt( np.mean(imd4[ix_imd4,:],axis=1), 31 ), color="black", label="IMD4" )
plt.plot(dtimes_gage[ix_gage], medfilt( np.mean(gage[ix_gage,:],axis=1), 31 ), color="green", label="GAGE", alpha=0.7 )
plt.plot(dtimes_trmm[ix_trmm], medfilt( np.mean(trmm[ix_trmm,:],axis=1), 31 ), color="red", label="TRMM", alpha=0.5 )
plt.plot(dtimes_trmmrt[ix_trmmrt], medfilt( np.mean(trmmrt[ix_trmmrt,:],axis=1), 31 ), color="blue", label="TRMM RT", alpha=0.5 )
plt.xlabel("Year")
plt.ylabel("Daily rainfall (mm)")
plt.title("Smoothed with 31 day median filter")
plt.subplot(313)
plt.plot(dtimes_imd4[ix_imd4], medfilt( np.mean(imd4[ix_imd4,:],axis=1), 91 ), color="black", label="IMD4" )
plt.plot(dtimes_gage[ix_gage], medfilt( np.mean(gage[ix_gage,:],axis=1), 91 ), color="green", label="GAGE", alpha=0.7 )
plt.plot(dtimes_trmm[ix_trmm], medfilt( np.mean(trmm[ix_trmm,:],axis=1), 91 ), color="red", label="TRMM", alpha=0.5 )
plt.plot(dtimes_trmmrt[ix_trmmrt], medfilt( np.mean(trmmrt[ix_trmmrt,:],axis=1), 91 ), color="blue", label="TRMM RT", alpha=0.5 )
plt.xlabel("Year")
plt.title("Smoothed with 91 day median filter")
plt.tight_layout()
plt.savefig("P:/progress/mahanadi/_qpe/inter_product_timeseries.png")
if __name__ == '__main__':
pass
|
{"hexsha": "eefae7f85bb930e309056ca8415bb325fc25d608", "size": 11938, "ext": "py", "lang": "Python", "max_stars_repo_path": "trmmlib/vis.py", "max_stars_repo_name": "heistermann/trmmlib", "max_stars_repo_head_hexsha": "b32cf623737285073e4c61bd0e01a0fe8b26c329", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trmmlib/vis.py", "max_issues_repo_name": "heistermann/trmmlib", "max_issues_repo_head_hexsha": "b32cf623737285073e4c61bd0e01a0fe8b26c329", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trmmlib/vis.py", "max_forks_repo_name": "heistermann/trmmlib", "max_forks_repo_head_hexsha": "b32cf623737285073e4c61bd0e01a0fe8b26c329", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9424460432, "max_line_length": 142, "alphanum_fraction": 0.5999329871, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3589}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 11 09:55:19 2017
@author: Elizabeth Ferriss
Show all FTIR spectra and baselines used for H diffusion in olivine GCA paper
"""
from olivine.SanCarlos import SanCarlos_spectra as SC
from olivine.KilaueaIki import Kiki_spectra as kiki
from matplotlib.backends.backend_pdf import PdfPages
import os
import pandas as pd
import numpy as np
import olivine
import matplotlib.pyplot as plt
from pynams import styles
filetosave = os.path.join(olivine.__path__[0], 'Ferriss_Supplement_test.pdf')
SC_whole_block_list = [SC.wb_800C_hyd] + SC.whole_block_list.copy()
kiki_whole_block_list = [kiki.wb_Kiki_init,
kiki.wb_Kiki_1hr,
kiki.wb_Kiki_8hr,
kiki.wb_Kiki_1000C_3hr,
kiki.wb_Kiki_1000C_6hr,
kiki.wb_Kiki_1000C_7hr,
kiki.wb_Kiki_1000C_8hr,
]
wblist = SC_whole_block_list + kiki_whole_block_list
#wblist = [SC.wb_800C_1hr, SC.wb_800C_3hr, SC.wb_800C_13hr]
SCpeaks = [3600, 3525]
Kpeaks = kiki.peaks
init_peaks_list = [SCpeaks, SCpeaks, Kpeaks]
SCpeaks_list = [SCpeaks] * len(SC_whole_block_list)
Kpeaks_list = [Kpeaks] * len(kiki_whole_block_list)
peaks_list = SCpeaks_list + Kpeaks_list
SCcolors = ['#2ca02c'] * len(SC_whole_block_list)
Kcolors = ['darkmagenta'] * len(kiki_whole_block_list)
colors = SCcolors + Kcolors
# get baselines and areas
for wb, color, peaks in zip(wblist, colors, peaks_list):
wb.get_baselines()
wb.make_areas()
wb.make_peakheights(peaks=peaks)
wb.areas = [prof.areas for prof in wb.profiles]
wb.peak_heights = [prof.peak_heights for prof in wb.profiles]
wb.style = styles.style_points.copy()
wb.style['color'] = color
wb.style['markeredgecolor'] = color
for wb in SC_whole_block_list:
wb.sample.name = 'SC1-2'
for wb in kiki_whole_block_list:
wb.sample.name = 'kiki'
#%%# get diffusivity data from spreadsheet in pynams
# store log10 diffusivities in wholeblock attribute D3
datafile = os.path.join(olivine.__path__[0], 'mydata.csv')
diffs = pd.read_csv(datafile)
diffs = diffs.dropna(how='all') # ignore empty rows
diffs.fillna(0, inplace=True) # replace missing values with zero
diffs['hours'] = diffs['hours'].astype(float)
peak2mech = {None: 'total', 3600:'[Si]', 3525:'[Ti]', 3356:'[tri]', 3236:'[Mg]'}
peak2idx = {None: None, 3600: 0, 3525: 1, 3356: 2, 3236:3}
peak2fin = {None: 0.15, 3600: 0.4, 3525: 0, 3356: 0, 3236: 0.}
def show_error_envelope(wb, ax3, fin, D3, error_log_units=[0.4]*3):
"""
Input:
The whole-block
A list of 3 axes on which to plot
The final value diffusion is going to
The list of diffusivities, D
The list of assumed errors on each diffusivity
Shows lines for D +/- each error with fill-between on axes
"""
Dhigh = [D+e for D, e in zip(D3, error_log_units)]
Dlow = [D-e for D, e in zip(D3, error_log_units)]
lmfit, xs, yhighs = wb.diffusion_profiles(wholeblock_diffusion=True,
fin = fin, log10D_m2s=Dhigh)
lmfit, xs, ylows = wb.diffusion_profiles(wholeblock_diffusion=True,
fin = fin, log10D_m2s=Dlow)
xs = [x - (length/2) for x, length in zip(xs, wb.sample.lengths_microns)]
for ax, x, ylow, yhigh in zip(ax3, xs, ylows, yhighs):
ax.fill_between(x, ylow, yhigh, color='grey', alpha=0.3)
pdf = PdfPages(filetosave)
######## San Carlos ########
peaks = [None] + SCpeaks
for peak in peaks:
idx = peak2idx[peak]
mech = peak2mech[peak]
if peak is None:
height = False
else:
height = True
for wb in SC_whole_block_list:
# plot data
fig, ax3 = wb.plot_areas_3panels(styles3=[wb.style]*3,
centered=True,
peak_idx = idx,
heights_instead=height,
wholeblock=True,
scale=1,
ytop = 1.4,
show_line_at_1=True)
# get diffusivity data from spreadsheet and plot
wb.hours = wb.time_seconds / 3600.
df = diffs[diffs['name'] == wb.sample.name]
df = df[df['Celsius'] == wb.celsius]
df = df[df['hours'] == wb.hours]
df = df[df['mechanism'] == mech]
if np.array(df.maximum_val)[0] == False:
D3 = list(df.log10D)
else:
D3 = list(df.log10D_estimated)
D3e = list(df.log10Derror)
hyd = list(df.Experiment)
# plot diffusion curve
if (0 not in D3) and ('dehydration' in hyd):
fin = peak2fin[peak]
wb.plot_diffusion(axes3=ax3, show_data=False,
wholeblock_diffusion=True,
fin = fin,
log10D_m2s=D3, labelD=False)
show_error_envelope(wb, ax3, fin, D3, error_log_units=D3e)
for ax, D, e in zip(ax3, D3, D3e):
label = ''.join(('log$_{10}$D in m$^2$/s\n',
str(D), '+/-', str(e)))
ax.text(0, 1.19, label, ha='center', fontsize=8)
# adjust and save figure
ax3[1].set_title(' '.join((wb.name, ax3[1].get_title())))
fig.subplots_adjust(bottom=0.3)
pdf.savefig()
plt.close()
####### Kilauea Iki ##########
#pvlist = [0, 0, 97, 97, 97, 97, 95]
#ytops = [1.5, 2.5, 1.5, 1.5, 1.5]
#peaks = [None, 3600, 3525, 3356]
#temps = [800, 800] + [1000]*5
#
#for peak, ytop in zip(peaks, ytops):
# idx = peak2idx[peak]
# mech = peak2mech[peak]
#
# if peak is None:
# height = False
# else:
# height = True
#
# for wb, pv, temp in zip(kiki_whole_block_list, pvlist, temps):
# # plot data
# if wb.Celsius == 1000:
# ytop = 1.2
# fig, ax3 = wb.plot_areas_3panels(styles3=[wb.style]*3,
# centered=True,
# peak_idx = idx,
# heights_instead=height,
# wholeblock=True,
# scale=1,
# ytop = ytop,
# show_line_at_1=True)
#
# # get diffusivity data from spreadsheet and plot
# wb.hours = wb.time_seconds / 3600.
# df = diffs[diffs['name'] == wb.sample.name]
# df = df[df['Celsius'] == wb.Celsius]
# df = df[df['hours'] == wb.hours]
# df = df[df['mechanism'] == mech]
# D3 = list(df.log10D)
# D3e = list(df.log10Derror)
# hyd = list(df.Experiment)
# if (0 not in D3) and ('dehydration' in hyd):
# wb.plot_diffusion(axes3=ax3, show_data=False,
# wholeblock_diffusion=True,
# log10D_m2s=D3, labelD=False)
#
# labelDy=ytop-ytop*0.15
# show_error_envelope(wb, ax3, fin, D3, error_log_units=D3e)
# for ax, D, e in zip(ax3, D3, D3e):
# label = ''.join(('log$_{10}$D in m$^2$/s\n',
# str(D), '+/-', str(e)))
# ax.text(0, labelDy, label, ha='center', fontsize=8)
#
# ax3[1].set_title(' '.join((wb.name, ax3[1].get_title())))
# fig.subplots_adjust(bottom=0.3)
# pdf.savefig()
# plt.close()
######## individual spectra and baselines #############
#wblist = [SC.wb_1000C_SC1_7] + SC_whole_block_list + kiki_whole_block_list
#
#ytops = np.ones_like(wblist) * 0.5
#ytops[0] = 1.4
#ytops[9:] = 1.
#for ytop, wb in zip(ytops, wblist):
# for R, orient, prof in zip(wb.raypaths, wb.directions, wb.profiles):
# for loc, spec in zip(prof.positions_microns, prof.spectra):
# spec.get_baseline()
# fig, ax = spec.plot_showbaseline()
# ax.set_ylim(0, ytop)
# ax.set_title(''.join((wb.name, '\n',
# '{:.1f}'.format(loc), ' $\mu$m along ',
# orient, ', R || ', str(R)
# )))
# pdf.savefig()
# plt.close()
pdf.close()
|
{"hexsha": "be19fc93fc80994bbdc309114662a311e0112e11", "size": 8528, "ext": "py", "lang": "Python", "max_stars_repo_path": "olivine/old_and_exploratory/Ferriss_Supplement_estimates.py", "max_stars_repo_name": "EFerriss/olivine", "max_stars_repo_head_hexsha": "78cb3c7664d4dce284cf5096775cdf1e40463f91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-06T23:32:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-06T23:32:25.000Z", "max_issues_repo_path": "olivine/old_and_exploratory/Ferriss_Supplement_estimates.py", "max_issues_repo_name": "EFerriss/olivine", "max_issues_repo_head_hexsha": "78cb3c7664d4dce284cf5096775cdf1e40463f91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "olivine/old_and_exploratory/Ferriss_Supplement_estimates.py", "max_forks_repo_name": "EFerriss/olivine", "max_forks_repo_head_hexsha": "78cb3c7664d4dce284cf5096775cdf1e40463f91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9177489177, "max_line_length": 80, "alphanum_fraction": 0.537054409, "include": true, "reason": "import numpy", "num_tokens": 2484}
|
import os, sys
import cv2
import numpy as np
import torch
from PIL import Image
from sklearn.cluster import DBSCAN
from dface import MTCNN, FaceNet
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def get_frames(video):
frames = []
vid = cv2.VideoCapture(video)
total = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
nframe = total//30 # one frame in every 30 frames
idx = np.linspace(0, total, nframe, endpoint=False, dtype=int)
for i in range(total):
ok = vid.grab()
if i not in idx:
continue
ok, frm = vid.retrieve()
if not ok:
continue
frm = cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)
frames.append(frm)
vid.release()
return frames
def get_boundingbox(box, w, h, scale=1.2):
x1, y1, x2, y2 = box
size = int(max(x2-x1, y2-y1) * scale)
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
if size > w or size > h:
size = int(max(x2-x1, y2-y1))
x1 = max(int(center_x - size // 2), 0)
y1 = max(int(center_y - size // 2), 0)
size = min(w - x1, size)
size = min(h - y1, size)
return x1, y1, size
def main():
if len(sys.argv) != 2:
print('usage: example.py <video>')
return
video = sys.argv[1]
print("loading models.")
mtcnn = MTCNN(device)
facenet = FaceNet(device)
print("reading video frames.")
frames = get_frames(video)
print("detecting & extracting faces.")
result = mtcnn.detect(frames)
faces = []
for i, res in enumerate(result):
if res is None:
continue
# extract faces
boxes, probs, lands = res
for j, box in enumerate(boxes):
# confidence of detected face
if probs[j] > 0.98:
h, w = frames[i].shape[:2]
x1, y1, size = get_boundingbox(box, w, h)
face = frames[i][y1:y1+size, x1:x1+size]
faces.append(face)
print("creating face embeddings.")
embeds = facenet.embedding(faces)
print("clustering faces.")
dbscan = DBSCAN(eps=0.35, metric='cosine', min_samples=5)
labels = dbscan.fit_predict(embeds)
name, _ = os.path.splitext(video)
os.mkdir(name)
print("saving clustered faces.")
for i in range(len(labels)):
label = labels[i]
if label < 0:
continue
id_dir = '%s/id_%d'%(name, label)
if not os.path.exists(id_dir):
os.mkdir(id_dir)
face = Image.fromarray(faces[i])
face.save('%s/%d.bmp'%(id_dir, i))
main()
|
{"hexsha": "7d75f8bc4e056436bf037066b43c9ad0360a4e2f", "size": 2234, "ext": "py", "lang": "Python", "max_stars_repo_path": "example.py", "max_stars_repo_name": "deepware/dFace", "max_stars_repo_head_hexsha": "3471ce8f40fe41d17669f67dba3f88768cde0815", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-05-11T10:18:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-25T13:21:39.000Z", "max_issues_repo_path": "example.py", "max_issues_repo_name": "deepware/facelib", "max_issues_repo_head_hexsha": "3471ce8f40fe41d17669f67dba3f88768cde0815", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example.py", "max_forks_repo_name": "deepware/facelib", "max_forks_repo_head_hexsha": "3471ce8f40fe41d17669f67dba3f88768cde0815", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-05-18T10:28:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-25T13:21:42.000Z", "avg_line_length": 22.7959183673, "max_line_length": 63, "alphanum_fraction": 0.6584601611, "include": true, "reason": "import numpy", "num_tokens": 710}
|
/*!
* @file
* This file contains unit tests for `boost::mpl::sequence_facade`.
*/
#include <boost/mpl/sequence_facade.hpp>
#include <boost/mpl/begin.hpp>
#include <boost/mpl/empty.hpp>
#include <boost/mpl/end.hpp>
#include <boost/mpl/front.hpp>
#include <boost/mpl/size.hpp>
#include <boost/mpl/vector.hpp>
#include <boost/type_traits/is_same.hpp>
using namespace boost;
template <typename ...T>
struct forward_sequence : mpl::sequence_facade {
struct begin : mpl::begin<typename mpl::vector<T...>::type> { };
struct end : mpl::end<typename mpl::vector<T...>::type> { };
};
static_assert(mpl::size<forward_sequence<int, float>>::value == 2, "");
static_assert(is_same<
mpl::front<forward_sequence<int, float>>::type, int
>::value, "");
static_assert(!mpl::empty<forward_sequence<int, float>>::value, "");
static_assert(mpl::empty<forward_sequence<>>::value, "");
int main() { }
|
{"hexsha": "bfb9bb757e6e7fce8187a828124636274c13956a", "size": 903, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/sequence_facade.cpp", "max_stars_repo_name": "ldionne/mpl_extensions", "max_stars_repo_head_hexsha": "ca728992567b96dad884be1658b0822a955174cc", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-03-25T19:19:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-25T19:19:06.000Z", "max_issues_repo_path": "test/sequence_facade.cpp", "max_issues_repo_name": "ldionne/mpl_extensions", "max_issues_repo_head_hexsha": "ca728992567b96dad884be1658b0822a955174cc", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/sequence_facade.cpp", "max_forks_repo_name": "ldionne/mpl_extensions", "max_forks_repo_head_hexsha": "ca728992567b96dad884be1658b0822a955174cc", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-03-25T19:19:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-25T19:19:07.000Z", "avg_line_length": 25.0833333333, "max_line_length": 71, "alphanum_fraction": 0.6910299003, "num_tokens": 229}
|
import numpy as np
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as util
from PyBASC.basc import (
group_dim_reduce,
nifti_individual_stability,
map_group_stability_random_bootstrap,
join_group_stability,
individual_group_clustered_maps,
post_analysis,
ndarray_to_vol
)
from PyBASC.utils import Function, CustomCacheNode, CustomCacheMapNode
def _generate_list(n):
if n == False or n <= 1:
return [False]
return list(range(n))
def create_basc(proc_mem, name='basc'):
"""
Bootstrap Analysis of Stable Clusters (BASC)
This workflow performs group-level BASC.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
basc : nipype.pipeline.engine.Workflow
BASC workflow.
Notes
-----
Workflow Inputs::
inputspec.roi : string (nifti file)
Mask of region(s) of interest
inputpsec.subjects : list (nifti files)
4-D timeseries of a group of subjects normalized to MNI space
inputspec.dataset_bootstraps : integer
Number of bootstrap samples of the dataset
inputspec.timeseries_bootstraps : integer
Number of bootstraps of each subject's timeseries
inputspec.n_clusters : integer
Number of clusters at both the individiual and group level
inputspec.affinity_threshold : list (floats)
Minimum threshold for similarity matrix based on correlation to
create an edge
Workflow Outputs::
outputspec.group_stability_matrix : ndarray
Group stability matrix
outputspec.clusters_G: ndarray
Matrix partitioning each cluster of the group stability matrix
outputspec.cluster_voxel_scores: ndarray
Group stability map using gsm and gscluster to calculate average
within-cluster stability
outputspec.gsclusters_img : string (nifti file)
3-D volume of brain regions partitioned with gsclusters
outputspec.cluster_voxel_scores_img : string (nifti file)
3-D volume of brain regions associated with gs_map
BASC Procedure:
1. Generate individual stability matrices based on multiple clusterings of
each bootstrap sample for a single subject
2. Use stratified bootstrap to sample new datasets of subjects
3. Calculate average stability matrix of each new dataset using individual
stability matrices generated at step 1
4. Cluster each average stabiilty matrix
5. Average to create a group stability matrix
6. Cluster the group stability matrix
7. Calculate average within-cluster stability based on clustering of step 6
Workflow Graph:
.. image:: ../images/basc.dot.png
:width: 500
Detailed Workflow Graph:
.. image:: ../images/basc_detailed.dot.png
:width: 500
References
----------
.. [1] P. Bellec, P. Rosa-Neto, O. C. Lyttelton, H. Benali, and A. C. Evans,
"Multi-level bootstrap analysis of stable clusters in resting-state fMRI"
NeuroImage, vol. 51, no. 3, pp. 1126-39, Jul. 2010.
"""
mem_per_proc = float(proc_mem[1]) / float(proc_mem[0])
basc = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=[
'subjects_files',
'roi_mask_file',
'dataset_bootstraps',
'timeseries_bootstraps',
'compression_dim',
'n_clusters',
'bootstrap_list',
'similarity_metric',
'blocklength',
'affinity_threshold',
'cluster_method',
'group_dim_reduce',
'cross_cluster',
'cxc_roi_mask_file',
]), name='inputspec')
outputspec = pe.Node(util.IdentityInterface(fields=[
'group_stability_matrix',
'clusters_G',
'ism_gsm_corr',
'gsclusters_img',
'cluster_voxel_scores_img',
'cluster_voxel_scores',
'ind_group_cluster_stability',
'individualized_group_clusters',
'ind_group_cluster_labels',
'ind_group_cluster_stability_set',
]), name='outputspec')
gdr = CustomCacheNode(
Function(
input_names=['subjects_files',
'roi_mask_file',
'compression_dim',
'group_dim_reduce',
'cross_cluster',
'cxc_roi_mask_file'],
output_names=['compressor',
'cxc_compressor',
'compression_labels_file'],
function=group_dim_reduce,
as_module=True
),
name='group_dim_reduce',
mem_gb=mem_per_proc
)
nis = CustomCacheMapNode(
Function(
input_names=['subject_file',
'roi_mask_file',
'n_bootstraps',
'n_clusters',
'compression_dim',
'similarity_metric',
'compressor',
'cxc_compressor',
'cross_cluster',
'cxc_roi_mask_file',
'cbb_block_size',
'blocklength',
'affinity_threshold',
'cluster_method'],
output_names=['ism_file', 'compression_labels_file'],
function=nifti_individual_stability,
as_module=True
),
name='individual_stability_matrices',
mem_gb=mem_per_proc,
iterfield=['subject_file',
'affinity_threshold']
)
nis.inputs.cbb_block_size = None
mgsm = CustomCacheMapNode(
Function(
input_names=['subject_stability_list',
'n_clusters',
'is_bootstrapping',
'roi_mask_file',
'group_dim_reduce',
'cluster_method'],
output_names=['G_file'],
function=map_group_stability_random_bootstrap,
as_module=True
),
name='map_group_stability',
mem_gb=mem_per_proc,
iterfield='is_bootstrapping'
)
jgsm = CustomCacheNode(
Function(
input_names=['subject_stability_list',
'group_stability_list',
'n_bootstraps',
'n_clusters',
'roi_mask_file',
'group_dim_reduce',
'compression_labels_list',
'cluster_method'],
output_names=['G',
'clusters_G',
'ism_gsm_corr',
'gsm_file',
'clusters_G_file',
'ism_gsm_corr_file'],
function=join_group_stability,
as_module=True
),
name='join_group_stability',
mem_gb=mem_per_proc
)
igcm = CustomCacheMapNode(
Function(
input_names=['subject_stability_list',
'clusters_G',
'roi_mask_file',
'group_dim_reduce',
'compression_labels_file'],
output_names=['ind_group_cluster_stability_file',
'individualized_group_clusters_file',
'ind_group_cluster_labels_file'],
function=individual_group_clustered_maps,
as_module=True
),
name='individual_group_clustered_maps',
mem_gb=mem_per_proc,
iterfield=['subject_stability_list', 'compression_labels_file']
)
post = CustomCacheNode(
Function(
input_names=['ind_group_cluster_stability_file_list'],
output_names=['ind_group_cluster_stability_set_file'],
function=post_analysis,
as_module=True
),
name='post_analysis',
mem_gb=mem_per_proc
)
gs_cluster_vol = CustomCacheNode(
Function(
input_names=['data_array',
'roi_mask_file',
'sample_file',
'filename'],
output_names=['img_file', 'img'],
function=ndarray_to_vol,
as_module=True
),
name='group_stability_clusters',
mem_gb=mem_per_proc
)
gs_cluster_vol.inputs.filename = 'group_stability_clusters.nii.gz'
basc.connect([
(
inputspec, gdr, [
('subjects_files', 'subjects_files'),
('roi_mask_file', 'roi_mask_file'),
('compression_dim', 'compression_dim'),
('cxc_roi_mask_file', 'cxc_roi_mask_file'),
('group_dim_reduce', 'group_dim_reduce'),
('cross_cluster', 'cross_cluster'),
]
),
(
inputspec, nis, [
('subjects_files', 'subject_file'),
('roi_mask_file', 'roi_mask_file'),
('timeseries_bootstraps', 'n_bootstraps'),
('n_clusters', 'n_clusters'),
('compression_dim', 'compression_dim'),
('similarity_metric', 'similarity_metric'),
('cross_cluster', 'cross_cluster'),
('cxc_roi_mask_file', 'cxc_roi_mask_file'),
('blocklength', 'blocklength'),
('affinity_threshold', 'affinity_threshold'),
('cluster_method', 'cluster_method'),
]
),
(
inputspec, mgsm, [
('bootstrap_list', 'is_bootstrapping'),
('n_clusters', 'n_clusters'),
('roi_mask_file', 'roi_mask_file'),
('group_dim_reduce', 'group_dim_reduce'),
('cluster_method', 'cluster_method'),
]
),
(
inputspec, jgsm, [
('dataset_bootstraps', 'n_bootstraps'),
('n_clusters', 'n_clusters'),
('roi_mask_file', 'roi_mask_file'),
('group_dim_reduce', 'group_dim_reduce'),
('cluster_method', 'cluster_method'),
]
),
(
inputspec, gs_cluster_vol, [
('subjects_files', 'sample_file'),
('roi_mask_file', 'roi_mask_file'),
]
),
(
inputspec, igcm, [
('roi_mask_file', 'roi_mask_file'),
('group_dim_reduce', 'group_dim_reduce'),
]
),
(
gdr, nis, [
('compressor', 'compressor'),
('cxc_compressor', 'cxc_compressor'),
]
),
(
nis, mgsm, [
('ism_file', 'subject_stability_list'),
]
),
(
nis, jgsm, [
('ism_file', 'subject_stability_list'),
('compression_labels_file', 'compression_labels_list'),
]
),
(
nis, igcm, [
('ism_file', 'subject_stability_list'),
('compression_labels_file', 'compression_labels_file'),
]
),
(
mgsm, jgsm, [
('G_file', 'group_stability_list'),
]
),
(
jgsm, igcm, [
('clusters_G', 'clusters_G'),
]
),
(
jgsm, gs_cluster_vol, [
('clusters_G', 'data_array'),
]
),
(
igcm, post, [
('ind_group_cluster_stability_file',
'ind_group_cluster_stability_file_list'),
]
),
# Workflow output
(
jgsm, outputspec, [
('gsm_file', 'group_stability_matrix'),
('clusters_G_file', 'clusters_G'),
('ism_gsm_corr_file', 'ism_gsm_corr'),
]
),
(
gs_cluster_vol, outputspec, [
('img_file', 'gsclusters_img'),
]
),
(
igcm, outputspec, [
('ind_group_cluster_stability_file',
'ind_group_cluster_stability'),
('individualized_group_clusters_file',
'individualized_group_clusters'),
('ind_group_cluster_labels_file',
'ind_group_cluster_labels'),
]
),
(
post, outputspec, [
('ind_group_cluster_stability_set_file',
'ind_group_cluster_stability_set'),
]
),
])
return basc
def create_basc_parallelized(proc_mem, name='basc', random_state=None):
"""
Bootstrap Analysis of Stable Clusters (BASC)
This workflow performs group-level BASC.
Parameters
----------
name : string, optional
Name of the workflow.
Returns
-------
basc : nipype.pipeline.engine.Workflow
BASC workflow.
Notes
-----
Workflow Inputs::
inputspec.roi : string (nifti file)
Mask of region(s) of interest
inputpsec.subjects : list (nifti files)
4-D timeseries of a group of subjects normalized to MNI space
inputspec.dataset_bootstraps : integer
Number of bootstrap samples of the dataset
inputspec.timeseries_bootstraps : integer
Number of bootstraps of each subject's timeseries
inputspec.n_clusters : integer
Number of clusters at both the individiual and group level
inputspec.affinity_threshold : list (floats)
Minimum threshold for similarity matrix based on correlation to
create an edge
Workflow Outputs::
outputspec.group_stability_matrix : ndarray
Group stability matrix
outputspec.clusters_G: ndarray
Matrix partitioning each cluster of the group stability matrix
outputspec.cluster_voxel_scores: ndarray
Group stability map using gsm and gscluster to calculate average
within-cluster stability
outputspec.gsclusters_img : string (nifti file)
3-D volume of brain regions partitioned with gsclusters
outputspec.cluster_voxel_scores_img : string (nifti file)
3-D volume of brain regions associated with gs_map
BASC Procedure:
1. Generate individual stability matrices based on multiple clusterings of
each bootstrap sample for a single subject
2. Use stratified bootstrap to sample new datasets of subjects
3. Calculate average stability matrix of each new dataset using individual
stability matrices generated at step 1
4. Cluster each average stabiilty matrix
5. Average to create a group stability matrix
6. Cluster the group stability matrix
7. Calculate average within-cluster stability based on clustering of step 6
Workflow Graph:
.. image:: ../images/basc.dot.png
:width: 500
Detailed Workflow Graph:
.. image:: ../images/basc_detailed.dot.png
:width: 500
References
----------
.. [1] P. Bellec, P. Rosa-Neto, O. C. Lyttelton, H. Benali, and A. C. Evans,
"Multi-level bootstrap analysis of stable clusters in resting-state fMRI"
NeuroImage, vol. 51, no. 3, pp. 1126-39, Jul. 2010.
"""
mem_per_proc = float(proc_mem[1]) / float(proc_mem[0])
basc_wf = pe.Workflow(name=name)
ignore_cache = ()
if not random_state:
ignore_cache = ('random_state_tuple',)
inputspec = CustomCacheNode(
util.IdentityInterface(fields=[
'subjects_files',
'roi_mask_file',
'cross_cluster',
'cxc_roi_mask_file',
'group_dim_reduce',
'random_state_tuple',
]),
name='inputspec',
ignore_cache=ignore_cache
)
inputspec_compression_dim = CustomCacheNode(
util.IdentityInterface(fields=[
'compression_dim',
]),
name='inputspec_compression_dim',
ignore_cache=ignore_cache
)
inputspec_boostraps = CustomCacheNode(
util.IdentityInterface(fields=[
'dataset_bootstraps',
'timeseries_bootstraps',
]),
name='inputspec_boostraps',
ignore_cache=ignore_cache
)
inputspec_boostraps.synchronize = True
inputspec_similarity_metric = CustomCacheNode(
util.IdentityInterface(fields=[
'similarity_metric',
]),
name='inputspec_similarity_metric',
ignore_cache=ignore_cache
)
inputspec_cluster_method = CustomCacheNode(
util.IdentityInterface(fields=[
'cluster_method',
]),
name='inputspec_cluster_method',
ignore_cache=ignore_cache
)
inputspec_blocklength = CustomCacheNode(
util.IdentityInterface(fields=[
'blocklength',
]),
name='inputspec_blocklength',
ignore_cache=ignore_cache
)
inputspec_n_clusters = CustomCacheNode(
util.IdentityInterface(fields=[
'n_clusters',
]),
name='inputspec_n_clusters',
ignore_cache=ignore_cache
)
inputspec_affinity_threshold = CustomCacheNode(
util.IdentityInterface(fields=[
'affinity_threshold',
]),
name='inputspec_affinity_threshold',
ignore_cache=ignore_cache
)
outputspec = CustomCacheNode(
util.IdentityInterface(fields=[
'group_stability_matrix',
'clusters_G',
'ism_gsm_corr',
'gsclusters_img',
'cluster_voxel_scores_img',
'cluster_voxel_scores',
'ind_group_cluster_stability',
'individualized_group_clusters',
'ind_group_cluster_labels',
'ind_group_cluster_stability_set',
]),
name='outputspec',
ignore_cache=ignore_cache
)
gdr = CustomCacheNode(
Function(
input_names=['subjects_files',
'roi_mask_file',
'compression_dim',
'group_dim_reduce',
'cross_cluster',
'cxc_roi_mask_file'],
output_names=['compressor',
'cxc_compressor',
'compression_labels_file'],
function=group_dim_reduce,
as_module=True
),
name='group_dim_reduce',
mem_gb=mem_per_proc,
ignore_cache=ignore_cache
)
nis = CustomCacheMapNode(
Function(
input_names=['subject_file',
'roi_mask_file',
'n_bootstraps',
'n_clusters',
'compression_dim',
'similarity_metric',
'compressor',
'cxc_compressor',
'cross_cluster',
'cxc_roi_mask_file',
'cbb_block_size',
'blocklength',
'affinity_threshold',
'cluster_method',
'random_state_tuple'],
output_names=['ism_file', 'compression_labels_file'],
function=nifti_individual_stability,
as_module=True
),
name='individual_stability_matrices',
iterfield=['subject_file'],
mem_gb=mem_per_proc,
ignore_cache=ignore_cache
)
nis.inputs.cbb_block_size = None
mgsm = CustomCacheMapNode(
Function(
input_names=['subject_stability_list',
'n_clusters',
'is_bootstrapping',
'roi_mask_file',
'group_dim_reduce',
'cluster_method',
'random_state_tuple'],
output_names=['G_file'],
function=map_group_stability_random_bootstrap,
as_module=True
),
name='map_group_stability',
iterfield='is_bootstrapping',
mem_gb=mem_per_proc,
ignore_cache=ignore_cache
)
jgsm = CustomCacheNode(
Function(
input_names=['subject_stability_list',
'group_stability_list',
'n_bootstraps',
'n_clusters',
'roi_mask_file',
'group_dim_reduce',
'compression_labels_list',
'cluster_method',
'random_state_tuple'],
output_names=['G',
'clusters_G',
'ism_gsm_corr',
'gsm_file',
'clusters_G_file',
'ism_gsm_corr_file'],
function=join_group_stability,
as_module=True
),
name='join_group_stability',
mem_gb=mem_per_proc,
ignore_cache=ignore_cache
)
igcm = CustomCacheMapNode(
Function(
input_names=['subject_stability_list',
'clusters_G',
'roi_mask_file',
'group_dim_reduce',
'compression_labels_file',
'random_state_tuple'],
output_names=['ind_group_cluster_stability_file',
'individualized_group_clusters_file',
'ind_group_cluster_labels_file'],
function=individual_group_clustered_maps,
as_module=True
),
name='individual_group_clustered_maps',
iterfield=[
'subject_stability_list',
'compression_labels_file'
],
mem_gb=mem_per_proc,
ignore_cache=ignore_cache
)
post = CustomCacheNode(
Function(
input_names=['ind_group_cluster_stability_file_list'],
output_names=['ind_group_cluster_stability_set_file'],
function=post_analysis,
as_module=True
),
name='post_analysis',
mem_gb=mem_per_proc,
ignore_cache=ignore_cache
)
gs_cluster_vol = CustomCacheNode(
Function(
input_names=['data_array',
'roi_mask_file',
'sample_file',
'filename'],
output_names=['img_file', 'img'],
function=ndarray_to_vol,
as_module=True
),
name='group_stability_clusters',
mem_gb=mem_per_proc,
ignore_cache=ignore_cache
)
gs_cluster_vol.inputs.filename = 'group_stability_clusters.nii.gz'
basc_wf.connect([
(
inputspec, gdr, [
('subjects_files', 'subjects_files'),
('roi_mask_file', 'roi_mask_file'),
('cxc_roi_mask_file', 'cxc_roi_mask_file'),
('cross_cluster', 'cross_cluster'),
('group_dim_reduce', 'group_dim_reduce'),
]
),
(
inputspec_compression_dim, gdr, [
('compression_dim', 'compression_dim'),
],
),
(
inputspec, nis, [
('subjects_files', 'subject_file'),
('roi_mask_file', 'roi_mask_file'),
('cross_cluster', 'cross_cluster'),
('cxc_roi_mask_file', 'cxc_roi_mask_file'),
('random_state_tuple', 'random_state_tuple'),
]
),
(
inputspec_boostraps, nis, [
('timeseries_bootstraps', 'n_bootstraps'),
]
),
(
inputspec_blocklength, nis, [
('blocklength', 'blocklength'),
]
),
(
inputspec_cluster_method, nis, [
('cluster_method', 'cluster_method'),
]
),
(
inputspec_compression_dim, nis, [
('compression_dim', 'compression_dim'),
]
),
(
inputspec_similarity_metric, nis, [
('similarity_metric', 'similarity_metric'),
]
),
(
inputspec_n_clusters, nis, [
('n_clusters', 'n_clusters'),
]
),
(
inputspec_affinity_threshold, nis, [
('affinity_threshold', 'affinity_threshold'),
]
),
(
gdr, nis, [
('compressor', 'compressor'),
('cxc_compressor', 'cxc_compressor'),
]
),
(
inputspec, mgsm, [
('roi_mask_file', 'roi_mask_file'),
('group_dim_reduce', 'group_dim_reduce'),
('random_state_tuple', 'random_state_tuple'),
]
),
(
inputspec_boostraps, mgsm, [
(('dataset_bootstraps', _generate_list), 'is_bootstrapping'),
]
),
(
inputspec_n_clusters, mgsm, [
('n_clusters', 'n_clusters'),
]
),
(
inputspec_cluster_method, mgsm, [
('cluster_method', 'cluster_method'),
]
),
(
nis, mgsm, [
('ism_file', 'subject_stability_list'),
]
),
(
inputspec, jgsm, [
('roi_mask_file', 'roi_mask_file'),
('group_dim_reduce', 'group_dim_reduce'),
('random_state_tuple', 'random_state_tuple'),
]
),
(
inputspec_boostraps, jgsm, [
('dataset_bootstraps', 'n_bootstraps'),
]
),
(
inputspec_n_clusters, jgsm, [
('n_clusters', 'n_clusters'),
]
),
(
inputspec_cluster_method, jgsm, [
('cluster_method', 'cluster_method'),
]
),
(
nis, jgsm, [
('ism_file', 'subject_stability_list'),
('compression_labels_file', 'compression_labels_list'),
]
),
(
mgsm, jgsm, [
('G_file', 'group_stability_list'),
]
),
(
inputspec, igcm, [
('roi_mask_file', 'roi_mask_file'),
('group_dim_reduce', 'group_dim_reduce'),
]
),
(
nis, igcm, [
('ism_file', 'subject_stability_list'),
('compression_labels_file', 'compression_labels_file'),
]
),
(
jgsm, igcm, [
('clusters_G', 'clusters_G'),
]
),
(
inputspec, gs_cluster_vol, [
('subjects_files', 'sample_file'),
('roi_mask_file', 'roi_mask_file'),
]
),
(
jgsm, gs_cluster_vol, [
('clusters_G', 'data_array'),
]
),
(
igcm, post, [
('ind_group_cluster_stability_file',
'ind_group_cluster_stability_file_list'),
]
),
# Workflow output
(
jgsm, outputspec, [
('gsm_file', 'group_stability_matrix'),
('clusters_G_file', 'clusters_G'),
('ism_gsm_corr_file', 'ism_gsm_corr'),
]
),
(
gs_cluster_vol, outputspec, [
('img_file', 'gsclusters_img'),
]
),
(
igcm, outputspec, [
('ind_group_cluster_stability_file',
'ind_group_cluster_stability'),
('individualized_group_clusters_file',
'individualized_group_clusters'),
('ind_group_cluster_labels_file',
'ind_group_cluster_labels'),
]
),
(
post, outputspec, [
('ind_group_cluster_stability_set_file',
'ind_group_cluster_stability_set'),
]
),
])
return basc_wf
|
{"hexsha": "84b2e76d84ae55abf3ea46d895bd76be77edbf30", "size": 28722, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyBASC/pipeline.py", "max_stars_repo_name": "AkiNikolaidis/BASC", "max_stars_repo_head_hexsha": "07ac80c1a22df84db8bdd30b09b881cecc8caf1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2017-09-22T07:47:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T07:04:59.000Z", "max_issues_repo_path": "PyBASC/pipeline.py", "max_issues_repo_name": "AkiNikolaidis/BASC", "max_issues_repo_head_hexsha": "07ac80c1a22df84db8bdd30b09b881cecc8caf1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2017-10-24T17:52:32.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-02T17:51:04.000Z", "max_forks_repo_path": "PyBASC/pipeline.py", "max_forks_repo_name": "AkiNikolaidis/BASC", "max_forks_repo_head_hexsha": "07ac80c1a22df84db8bdd30b09b881cecc8caf1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-11-17T00:47:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-02T17:56:14.000Z", "avg_line_length": 30.1701680672, "max_line_length": 80, "alphanum_fraction": 0.516503029, "include": true, "reason": "import numpy", "num_tokens": 5690}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 30 17:11:02 2021
@author: qcao
sketch of rod plate model and skeleton code for PyChrono
Useful links:
https://github.com/projectchrono/chrono/blob/03931da6b00cf276fc1c4882f43b2fd8b3c6fa7e/src/chrono/fea/ChElementShellBST.cpp
"""
# the usual suspects
import numpy as np
import nrrd
# meshing
from skimage import measure
import tetgen
import trimesh
import pyvista as pv
# finite element library
import pychrono as chrono
import pychrono.fea as fea
import pychrono.pardisomkl as mkl
def computeTriangularMeshNeighbors(faces):
"""
Find neighboring vertex indices for each face of a triangular mesh
For a boundary element without neighboring elements, the vertex index is NULL_INDEX
"""
NULL_INDEX = np.uint64(-1)
N = faces.shape[0]
neighbors = np.empty((N,3), dtype=np.uint64)
neighbors[:] = NULL_INDEX
# Compute neighbors for each face
facesSets = [set(x) for x in faces]
for ind in range(N):
diffList = [facesSets[x] - facesSets[ind] for x in range(faces.shape[0])]
neighboringFaces = np.nonzero(np.array([len(x) for x in diffList]) == 1)[0] # one element difference
for nind in neighboringFaces:
opposingVertex = faces[ind,:] == (facesSets[ind] - facesSets[nind]).pop()
neighbors[ind,opposingVertex] = diffList[nind].pop()
return neighbors, NULL_INDEX
def computeFEARodPlateLinear(vertices, edges, faces,
verticesForce, verticesFixed, forceVector,
elasticModulus, poissonRatio, density,
barAreas, shellThicknesses):
"""
Finite element analysis for mixed rod-plate model
elasticModulus, poissonRatio
vertices : (numVerts,3) --> ChNodeFEAxyz
edges : (numEdges,2) --> ChElementBar
faces : (numFaces,3), or (numFaces,6) --> ChElementShellBST 0-1-2 (3-4-5 neighboring triangles opposed to 0-1-2)
forceVector : (3,) force direction and magnitude
verticesForce : applied force (array of bools or node indices)
verticesFixed : fixed nodes (array of bools or node indices)
For an example on ChElementShellBST, see:
https://github.com/projectchrono/chrono/blob/develop/src/demos/python/fea/demo_FEA_shellsBST.py
"""
# Indices should always be np.uint64
asUint64 = lambda x: x.astype(np.uint64)
asDouble = lambda x: x.astype(np.double)
# Convert boolean arrays to node indices
vertices, edges, faces = asDouble(vertices), asUint64(edges), asUint64(faces)
if verticesForce.dtype == bool:
verticesForce = np.nonzero(verticesForce)[0]
if verticesFixed.dtype == bool:
verticesFixed = np.nonzero(verticesFixed)[0]
# Find neighboring vertices for elements in faces
facesNeighbors, NULL_INDEX = computeTriangularMeshNeighbors(faces)
# System and mesh
system = chrono.ChSystemNSC()
mesh = fea.ChMesh()
mesh.SetAutomaticGravity(False)
# Material (Shell and Bars are different)
materialKirchoff = fea.ChElasticityKirchhoffIsothropic(elasticModulus, poissonRatio)
materialKirchoffShell = fea.ChMaterialShellKirchhoff(materialKirchoff)
materialKirchoffShell.SetDensity(density)
# Create list of nodes and set to mesh
nodesList = []
for ind in range(vertices.shape[0]):
node = fea.ChNodeFEAxyz(chrono.ChVectorD(vertices[ind,0], \
vertices[ind,1], \
vertices[ind,2]))
nodesList.append(node)
mesh.AddNode(node) # use 0-based indexing here
# Create list of shell elements and set to mesh
elementsShellList = []
for ind in range(faces.shape[0]):
# Get neighboring nodes, None if on face boundary
neighboringNodeInds = list(facesNeighbors[ind,:])
neighboringNodes = [None if x==NULL_INDEX else nodesList[x] for x in neighboringNodeInds]
# Add shell element
elementShell = fea.ChElementShellBST()
elementShell.SetNodes(nodesList[faces[ind,0]],
nodesList[faces[ind,1]],
nodesList[faces[ind,2]],
neighboringNodes[0],
neighboringNodes[1],
neighboringNodes[2])
elementShell.AddLayer(shellThicknesses[ind],
0 * chrono.CH_C_DEG_TO_RAD, # fiber angle (not used)
materialKirchoffShell)
elementsShellList.append(elementShell)
mesh.AddElement(elementShell)
# Create list of bar elements and set to mesh
elementsBarList = []
for ind in range(edges.shape[0]):
elementBar = fea.ChElementBar()
elementBar.SetNodes(nodesList[edges[ind,0]], nodesList[edges[ind,1]])
elementBar.SetBarDensity(density)
elementBar.SetBarYoungModulus(elasticModulus)
elementBar.SetBarArea(barAreas[ind])
elementsBarList.append(elementBar)
mesh.AddElement(elementBar)
# Boundary Condition: Truss with nodes of verticesForce
trussForce = chrono.ChBody()
# Boundary Condition: Truss with nodes of verticesFixed
trussFixed = chrono.ChBody()
trussFixed.SetBodyFixed(True)
# Boundary Condition: External force (****This took a long night to debug)
for vertInd in verticesForce:
nodesList[vertInd].SetForce(chrono.ChVectorD(*forceVector))
# Boundary Consition: Link to moving truss (should move in unison)
constraintsForceList = []
for ind in range(len(verticesForce)):
constraint = fea.ChLinkPointFrame()
constraint.Initialize(nodesList[ind], trussForce)
constraintsForceList.append(constraint)
system.Add(constraint)
# Boundary Condition: Link trussForce and trussFixed to Prismatic Joint (displacement Z only)
# TODO: Reimplement this direct ChCoordsysD in the direction of loading
# constraint = chrono.ChLinkLockPrismatic()
# constraint.Initialize(trussFixed, trussForce, chrono.ChCoordsysD())
# system.AddLink(constraint)
# Boundary Condition: Link to fixed truss
constraintsFixedList = []
for ind in range(len(verticesFixed)):
constraint = fea.ChLinkPointFrame()
constraint.Initialize(nodesList[ind], trussFixed)
constraintsFixedList.append(constraint)
system.Add(constraint)
# Prepare system and solve
system.Add(mesh)
system.Add(trussForce)
system.Add(trussFixed)
# Solver
msolver = mkl.ChSolverPardisoMKL()
msolver.LockSparsityPattern(True)
# Solve
system.SetSolver(msolver)
system.DoStaticLinear()
# Node positions of solution
vertices1 = vertices.copy()
for ind in range(len(nodesList)):
pos = nodesList[ind].GetPos()
vertices1[ind,0] = pos.x
vertices1[ind,1] = pos.y
vertices1[ind,2] = pos.z
# Save Shell Element Strains
strainsShell = np.zeros((faces.shape[0],6), dtype=np.double) # [m (bending), n(stretching)]
for ind, element in enumerate(elementsShellList):
Fi = chrono.ChVectorDynamicD(element.GetNdofs())
element.ComputeInternalForces(Fi)
strainsShell[ind,:] = [element.m.x, element.m.y, element.m.z,
element.n.x, element.n.y, element.n.z]
# Save Bar Element Strains
strainsBar = np.zeros(edges.shape[0],dtype=np.double)
for ind, element in enumerate(elementsBarList):
strainsBar[ind] = element.GetStrain()
return vertices1, strainsBar, strainsShell
if __name__ == "__main__":
# Nodes and Elements
vertices = np.array([[0,0,0],[1,0,0],[0,0,1],[1,0,1]]).astype(np.double)
# edges = np.array([[0,2],[1,3]])
edges = np.array([[0,2]])
faces = np.array([[0,1,2],[2,1,3]])
# Boundary Conditions
forceVector = np.array([0,0,-1.]).astype(np.double) # must be a (3,)
verticesForce = np.array([2,3],dtype=np.uint64)
verticesFixed = np.array([0,1],dtype=np.uint64)
# Material Properties
elasticModulus = 17e9 # Pa
poissonRatio = 0.3
density = 0.1
barAreas = np.pi*100e-6**2*np.ones(edges.shape[0],dtype=float) # m**2 = PI*r**2
shellThicknesses = 100e-6*np.ones(faces.shape[0],dtype=float) # m
vertices1, strainsBar, strainsShell = computeFEARodPlateLinear(vertices, edges, faces,
verticesForce, verticesFixed, forceVector,
elasticModulus, poissonRatio, density,
barAreas, shellThicknesses)
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.axes(projection='3d')
|
{"hexsha": "d9d5b80dd180c43f90d3d6c41e0986812dcd51d4", "size": 8942, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/test_20211130_rod_plate_models.py", "max_stars_repo_name": "qiancao/BoneBox", "max_stars_repo_head_hexsha": "0d10dac7c93f16f0643bebc62c63be2f4bd099f6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-11T20:49:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T20:49:19.000Z", "max_issues_repo_path": "examples/test_20211130_rod_plate_models.py", "max_issues_repo_name": "qiancao/BoneBox", "max_issues_repo_head_hexsha": "0d10dac7c93f16f0643bebc62c63be2f4bd099f6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/test_20211130_rod_plate_models.py", "max_forks_repo_name": "qiancao/BoneBox", "max_forks_repo_head_hexsha": "0d10dac7c93f16f0643bebc62c63be2f4bd099f6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7983539095, "max_line_length": 126, "alphanum_fraction": 0.6443748602, "include": true, "reason": "import numpy", "num_tokens": 2182}
|
%function ind = MS_nearest(x,tau,v);
%
% returns the row vector containing the indicies of the nearest
% neighbours to each of the columns of x. Each point and its tau
% temporal neighbours are excluded from the search.
% v is an array (not necessarily logical) indicating which columns of x to
% use or, the relative importance of these columns, in the computation
% (i.e. use v(i)*x(i,:), not x(i,:)).
%
% default : tau=0
% v=ones(1,length(x(:,1)))
%
% this is a mex version of nearneigh, and provides a speed-up of
% atleast 1000%.
%
% Michael Small
% michael.small@uwa.edu.au, http://school.maths.uwa.edu.au/~small/
% 15/7/04
% For further details, please see M. Small. Applied Nonlinear Time Series
% Analysis: Applications in Physics, Physiology and Finance. Nonlinear Science
% Series A, vol. 52. World Scientific, 2005. (ISBN 981-256-117-X) and the
% references therein.
|
{"author": "benfulcher", "repo": "hctsa", "sha": "919f2aed7cc8e1a3a03304c1ade573fa664c73f8", "save_path": "github-repos/MATLAB/benfulcher-hctsa", "path": "github-repos/MATLAB/benfulcher-hctsa/hctsa-919f2aed7cc8e1a3a03304c1ade573fa664c73f8/Toolboxes/Michael_Small/MS_nearest.m"}
|
[STATEMENT]
lemma termination_0_1:
assumes p: "\<And>s. guard s \<Longrightarrow> p \<le> weight_spmf (while s)"
and p_pos: "0 < p"
and lossless: "\<And>s. guard s \<Longrightarrow> lossless_spmf (body s)"
shows "lossless_spmf (while s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lossless_spmf (local.while s)
[PROOF STEP]
unfolding lossless_spmf_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. weight_spmf (local.while s) = 1
[PROOF STEP]
proof(rule antisym)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. weight_spmf (local.while s) \<le> 1
2. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
let ?X = "{s. \<not> guard s}"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. weight_spmf (local.while s) \<le> 1
2. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
show "weight_spmf (while s) \<le> 1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. weight_spmf (local.while s) \<le> 1
[PROOF STEP]
by(rule weight_spmf_le_1)
[PROOF STATE]
proof (state)
this:
weight_spmf (local.while s) \<le> 1
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
define p' where "p' \<equiv> p / 2"
[PROOF STATE]
proof (state)
this:
p' \<equiv> p / 2
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
have p'_pos: "p' > 0" and "p' < p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < p' &&& p' < p
[PROOF STEP]
using p_pos
[PROOF STATE]
proof (prove)
using this:
0 < p
goal (1 subgoal):
1. 0 < p' &&& p' < p
[PROOF STEP]
by(simp_all add: p'_def)
[PROOF STATE]
proof (state)
this:
0 < p'
p' < p
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
have "\<exists>n. p' < measure (measure_spmf (iter n s)) ?X" if "guard s" for s
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>n. p' < Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s}
[PROOF STEP]
using p[OF that] \<open>p' < p\<close>
[PROOF STATE]
proof (prove)
using this:
p \<le> weight_spmf (local.while s)
p' < p
goal (1 subgoal):
1. \<exists>n. p' < Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s}
[PROOF STEP]
unfolding weight_while_conv_iter
[PROOF STATE]
proof (prove)
using this:
p \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
p' < p
goal (1 subgoal):
1. \<exists>n. p' < Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s}
[PROOF STEP]
by(subst (asm) le_cSUP_iff)(auto intro!: measure_spmf.subprob_measure_le_1)
[PROOF STATE]
proof (state)
this:
guard ?s \<Longrightarrow> \<exists>n. p' < Sigma_Algebra.measure (measure_spmf (iter n ?s)) {s. \<not> guard s}
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
guard ?s \<Longrightarrow> \<exists>n. p' < Sigma_Algebra.measure (measure_spmf (iter n ?s)) {s. \<not> guard s}
[PROOF STEP]
obtain N where p': "p' \<le> measure (measure_spmf (iter (N s) s)) ?X" if "guard s" for s
[PROOF STATE]
proof (prove)
using this:
guard ?s \<Longrightarrow> \<exists>n. p' < Sigma_Algebra.measure (measure_spmf (iter n ?s)) {s. \<not> guard s}
goal (1 subgoal):
1. (\<And>N. (\<And>s. guard s \<Longrightarrow> p' \<le> Sigma_Algebra.measure (measure_spmf (iter (N s) s)) {s. \<not> guard s}) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using p
[PROOF STATE]
proof (prove)
using this:
guard ?s \<Longrightarrow> \<exists>n. p' < Sigma_Algebra.measure (measure_spmf (iter n ?s)) {s. \<not> guard s}
guard ?s \<Longrightarrow> p \<le> weight_spmf (local.while ?s)
goal (1 subgoal):
1. (\<And>N. (\<And>s. guard s \<Longrightarrow> p' \<le> Sigma_Algebra.measure (measure_spmf (iter (N s) s)) {s. \<not> guard s}) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by atomize_elim(rule choice, force dest: order.strict_implies_order)
[PROOF STATE]
proof (state)
this:
guard ?s \<Longrightarrow> p' \<le> Sigma_Algebra.measure (measure_spmf (iter (N ?s) ?s)) {s. \<not> guard s}
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
interpret fuse: loop_spmf guard "\<lambda>s. iter (N s) s"
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
have "1 = weight_spmf (fuse.while s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 1 = weight_spmf (fuse.while s)
[PROOF STEP]
by(rule lossless_weight_spmfD[symmetric])
(rule fuse.termination_0_1_immediate; auto simp add: spmf_map vimage_def intro: p' p'_pos lossless_iter lossless)
[PROOF STATE]
proof (state)
this:
1 = weight_spmf (fuse.while s)
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
1 = weight_spmf (fuse.while s)
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
have "\<dots> \<le> (\<Squnion>n. measure (measure_spmf (iter n s)) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. weight_spmf (fuse.while s) \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
unfolding fuse.weight_while_conv_iter
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (fuse.iter n s)) {s. \<not> guard s}) \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
proof(rule cSUP_least)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. UNIV \<noteq> {}
2. \<And>x. x \<in> UNIV \<Longrightarrow> Sigma_Algebra.measure (measure_spmf (fuse.iter x s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
fix n
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. UNIV \<noteq> {}
2. \<And>x. x \<in> UNIV \<Longrightarrow> Sigma_Algebra.measure (measure_spmf (fuse.iter x s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "emeasure (measure_spmf (fuse.iter n s)) ?X \<le> (SUP n. emeasure (measure_spmf (iter n s)) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
proof(induction n arbitrary: s)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>s. emeasure (measure_spmf (fuse.iter 0 s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
2. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. \<And>s. emeasure (measure_spmf (fuse.iter 0 s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
2. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. emeasure (measure_spmf (fuse.iter 0 s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
by(auto intro!: SUP_upper2[where i=0])
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (fuse.iter 0 s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
case (Suc n)
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (fuse.iter n ?s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n ?s)) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have inc: "incseq (\<lambda>n s'. emeasure (measure_spmf (iter n s')) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. incseq (\<lambda>n s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s})
[PROOF STEP]
by(rule incseq_SucI le_funI iter_mono_emeasure1)+
[PROOF STATE]
proof (state)
this:
incseq (\<lambda>n s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "emeasure (measure_spmf (fuse.iter (Suc n) s)) ?X = emeasure (measure_spmf (iter (N s) s \<bind> fuse.iter n)) ?X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} = emeasure (measure_spmf (iter (N s) s \<bind> fuse.iter n)) {s. \<not> guard s}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} = emeasure (measure_spmf (iter (N s) s \<bind> fuse.iter n)) {s. \<not> guard s}
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} = emeasure (measure_spmf (iter (N s) s \<bind> fuse.iter n)) {s. \<not> guard s}
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "\<dots> = \<integral>\<^sup>+ s'. emeasure (measure_spmf (fuse.iter n s')) ?X \<partial>measure_spmf (iter (N s) s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. emeasure (measure_spmf (iter (N s) s \<bind> fuse.iter n)) {s. \<not> guard s} = \<integral>\<^sup>+ s'. emeasure (measure_spmf (fuse.iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s)
[PROOF STEP]
by(simp add: measure_spmf_bind o_def emeasure_bind[where N="measure_spmf _"] space_measure_spmf Pi_def space_subprob_algebra)
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (iter (N s) s \<bind> fuse.iter n)) {s. \<not> guard s} = \<integral>\<^sup>+ s'. emeasure (measure_spmf (fuse.iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s)
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (iter (N s) s \<bind> fuse.iter n)) {s. \<not> guard s} = \<integral>\<^sup>+ s'. emeasure (measure_spmf (fuse.iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s)
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "\<dots> \<le> \<integral>\<^sup>+ s'. (SUP n. emeasure (measure_spmf (iter n s')) ?X) \<partial>measure_spmf (iter (N s) s)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<integral>\<^sup>+ s'. emeasure (measure_spmf (fuse.iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s) \<le> \<integral>\<^sup>+ s'. (\<Squnion>n. emeasure (measure_spmf (iter n s')) {s. \<not> guard s}) \<partial>measure_spmf (iter (N s) s)
[PROOF STEP]
by(rule nn_integral_mono Suc.IH)+
[PROOF STATE]
proof (state)
this:
\<integral>\<^sup>+ s'. emeasure (measure_spmf (fuse.iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s) \<le> \<integral>\<^sup>+ s'. (\<Squnion>n. emeasure (measure_spmf (iter n s')) {s. \<not> guard s}) \<partial>measure_spmf (iter (N s) s)
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<integral>\<^sup>+ s'. emeasure (measure_spmf (fuse.iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s) \<le> \<integral>\<^sup>+ s'. (\<Squnion>n. emeasure (measure_spmf (iter n s')) {s. \<not> guard s}) \<partial>measure_spmf (iter (N s) s)
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "\<dots> = (SUP n. \<integral>\<^sup>+ s'. emeasure (measure_spmf (iter n s')) ?X \<partial>measure_spmf (iter (N s) s))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<integral>\<^sup>+ s'. (\<Squnion>n. emeasure (measure_spmf (iter n s')) {s. \<not> guard s}) \<partial>measure_spmf (iter (N s) s) = (\<Squnion>n. \<integral>\<^sup>+ s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s))
[PROOF STEP]
by(rule nn_integral_monotone_convergence_SUP[OF inc]) simp
[PROOF STATE]
proof (state)
this:
\<integral>\<^sup>+ s'. (\<Squnion>n. emeasure (measure_spmf (iter n s')) {s. \<not> guard s}) \<partial>measure_spmf (iter (N s) s) = (\<Squnion>n. \<integral>\<^sup>+ s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s))
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
\<integral>\<^sup>+ s'. (\<Squnion>n. emeasure (measure_spmf (iter n s')) {s. \<not> guard s}) \<partial>measure_spmf (iter (N s) s) = (\<Squnion>n. \<integral>\<^sup>+ s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s))
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "\<dots> = (SUP n. emeasure (measure_spmf (bind_spmf (iter (N s) s) (iter n))) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Squnion>n. \<integral>\<^sup>+ s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s)) = (\<Squnion>n. emeasure (measure_spmf (iter (N s) s \<bind> iter n)) {s. \<not> guard s})
[PROOF STEP]
by(simp add: measure_spmf_bind o_def emeasure_bind[where N="measure_spmf _"] space_measure_spmf Pi_def space_subprob_algebra)
[PROOF STATE]
proof (state)
this:
(\<Squnion>n. \<integral>\<^sup>+ s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s)) = (\<Squnion>n. emeasure (measure_spmf (iter (N s) s \<bind> iter n)) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Squnion>n. \<integral>\<^sup>+ s'. emeasure (measure_spmf (iter n s')) {s. \<not> guard s} \<partial>measure_spmf (iter (N s) s)) = (\<Squnion>n. emeasure (measure_spmf (iter (N s) s \<bind> iter n)) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "\<dots> = (SUP n. emeasure (measure_spmf (iter (N s + n) s)) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Squnion>n. emeasure (measure_spmf (iter (N s) s \<bind> iter n)) {s. \<not> guard s}) = (\<Squnion>n. emeasure (measure_spmf (iter (N s + n) s)) {s. \<not> guard s})
[PROOF STEP]
by(simp add: iter_bind_iter)
[PROOF STATE]
proof (state)
this:
(\<Squnion>n. emeasure (measure_spmf (iter (N s) s \<bind> iter n)) {s. \<not> guard s}) = (\<Squnion>n. emeasure (measure_spmf (iter (N s + n) s)) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Squnion>n. emeasure (measure_spmf (iter (N s) s \<bind> iter n)) {s. \<not> guard s}) = (\<Squnion>n. emeasure (measure_spmf (iter (N s + n) s)) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "\<dots> \<le> (SUP n. emeasure (measure_spmf (iter n s)) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Squnion>n. emeasure (measure_spmf (iter (N s + n) s)) {s. \<not> guard s}) \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
by(rule SUP_mono) auto
[PROOF STATE]
proof (state)
this:
(\<Squnion>n. emeasure (measure_spmf (iter (N s + n) s)) {s. \<not> guard s}) \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. \<And>n s. (\<And>s. emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})) \<Longrightarrow> emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (fuse.iter (Suc n) s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (2 subgoals):
1. UNIV \<noteq> {}
2. \<And>x. x \<in> UNIV \<Longrightarrow> Sigma_Algebra.measure (measure_spmf (fuse.iter x s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (2 subgoals):
1. UNIV \<noteq> {}
2. \<And>x. x \<in> UNIV \<Longrightarrow> Sigma_Algebra.measure (measure_spmf (fuse.iter x s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "\<dots> = ennreal (SUP n. measure (measure_spmf (iter n s)) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s}) = ennreal (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
by(subst ennreal_SUP)(fold measure_spmf.emeasure_eq_measure, auto simp add: not_less measure_spmf.subprob_emeasure_le_1 intro!: exI[where x="1"])
[PROOF STATE]
proof (state)
this:
(\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s}) = ennreal (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (2 subgoals):
1. UNIV \<noteq> {}
2. \<And>x. x \<in> UNIV \<Longrightarrow> Sigma_Algebra.measure (measure_spmf (fuse.iter x s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Squnion>n. emeasure (measure_spmf (iter n s)) {s. \<not> guard s}) = ennreal (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (2 subgoals):
1. UNIV \<noteq> {}
2. \<And>x. x \<in> UNIV \<Longrightarrow> Sigma_Algebra.measure (measure_spmf (fuse.iter x s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
have "0 \<le> (SUP n. measure (measure_spmf (iter n s)) ?X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
by(rule cSUP_upper2)(auto intro!: bdd_aboveI[where M=1] simp add: measure_spmf.subprob_measure_le_1)
[PROOF STATE]
proof (state)
this:
0 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (2 subgoals):
1. UNIV \<noteq> {}
2. \<And>x. x \<in> UNIV \<Longrightarrow> Sigma_Algebra.measure (measure_spmf (fuse.iter x s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> ennreal (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
0 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
show "measure (measure_spmf (fuse.iter n s)) ?X \<le> \<dots>"
[PROOF STATE]
proof (prove)
using this:
emeasure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> ennreal (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
0 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. Sigma_Algebra.measure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
by(simp add: measure_spmf.emeasure_eq_measure)
[PROOF STATE]
proof (state)
this:
Sigma_Algebra.measure (measure_spmf (fuse.iter n s)) {s. \<not> guard s} \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. UNIV \<noteq> {}
[PROOF STEP]
qed simp
[PROOF STATE]
proof (state)
this:
weight_spmf (fuse.while s) \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
1 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
show "1 \<le> weight_spmf (while s)"
[PROOF STATE]
proof (prove)
using this:
1 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. 1 \<le> weight_spmf (local.while s)
[PROOF STEP]
unfolding weight_while_conv_iter
[PROOF STATE]
proof (prove)
using this:
1 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
goal (1 subgoal):
1. 1 \<le> (\<Squnion>n. Sigma_Algebra.measure (measure_spmf (iter n s)) {s. \<not> guard s})
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
1 \<le> weight_spmf (local.while s)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 11727, "file": "Probabilistic_While_While_SPMF", "length": 74}
|
import configparser
import os
import sys
import netCDF4 as nc
import numpy as np
from helperFunctions import temperature_array_from_result
from helperFunctions import surface_temperature_array_from_result
DEPTH = 10
def print_results(section, temp_mean, temp_max, temp_min, temp_std_dev):
print('Mean temp of {}: {:02.3f}.'.format(section, temp_mean))
print('Max temp of {}: {:02.3f}.'.format(section, temp_max))
print('Min temp of {}: {:02.3f}.'.format(section, temp_min))
print('Std dev temp of {}: {:02.4f}.'.format(section, temp_std_dev))
def write_results_to_file(section, temp_mean, temp_max, temp_min, temp_std_dev,
filepath, file_mode):
config = configparser.ConfigParser()
config[section] = {}
config[section]['Mean'] = str(temp_mean)
config[section]['Max'] = str(temp_max)
config[section]['Min'] = str(temp_min)
config[section]['Std_Dev'] = str(temp_std_dev)
print('Write results to {0}.'.format(filepath))
with open(filepath, file_mode) as configfile:
config.write(configfile)
def array_from_file(filepath, name):
filepath += '.nc'
if os.path.isfile(filepath) == False:
print(filepath, 'does not exist.')
print('Aborting.')
exit()
nc_file = nc.Dataset(filepath)
dim0 = nc_file.dimensions['nNodes_0'].size
dim1 = nc_file.dimensions['nNodes_1'].size
dim2 = nc_file.dimensions['nNodes_2'].size
nc_var = nc_file.variables[name]
np_array = np.zeros((dim2, dim1, dim0))
np_array[:,:,:] = nc_var[-1,:,:,:]
nc_file.close()
return np_array
def region_array_from_file(filepath):
region = array_from_file(filepath, 'region')
return region
def surface_array_from_file(filepath):
surface = array_from_file(filepath, 'surface')
return surface
def surface_vessels_array_from_file(filepath):
vessels = array_from_file(filepath, 'vessels')
vessels = vessels[-1,:,:]
return vessels
def calc_open_surface_temperatures(temp, surface):
skull = surface[-1,:,:]
if np.count_nonzero(skull == 1) != 0:
temp_mean = np.mean(temp[np.where(skull == 1)])
temp_max = np.max(temp[np.where(skull == 1)])
temp_min = np.min(temp[np.where(skull == 1)])
temp_std_dev = np.std(temp[np.where(skull == 1)])
else:
print('No open surface specified.')
temp_mean = -1.0
temp_max = -1.0
temp_min = -1.0
temp_std_dev = -1.0
return temp_mean, temp_max, temp_min, temp_std_dev
def open_surface_temperatures(filepath, filepath_init, do_print=True, do_write=True):
print()
print('Calc open surface temperatures of {0}.'.format(filepath))
temp = surface_temperature_array_from_result(filepath)
surface = surface_array_from_file(filepath_init)
temp_mean, temp_max, temp_min, temp_std_dev = calc_open_surface_temperatures(temp, surface)
if do_print == True:
print_results('open surface', temp_mean, temp_max, temp_min,
temp_std_dev)
if do_write == True:
filepath = os.path.splitext(filepath)[0] + '_results.dat'
write_results_to_file('Open_Surface', temp_mean, temp_max, temp_min,
temp_std_dev, filepath, 'w')
print('Done.')
return temp_mean
def calc_tumor_temperatures(temp, tumor):
if np.count_nonzero(tumor == 1) != 0:
temp_mean = np.mean(temp[np.where(tumor == 1)])
temp_max = np.max(temp[np.where(tumor == 1)])
temp_min = np.min(temp[np.where(tumor == 1)])
temp_std_dev = np.std(temp[np.where(tumor == 1)])
else:
print('No tumor specified.')
temp_mean = -1.0
temp_max = -1.0
temp_min = -1.0
temp_std_dev = -1.0
return temp_mean, temp_max, temp_min, temp_std_dev
def tumor_temperatures(filepath, region_filepath, do_print=True, do_write=True):
print()
print('Calc tumor temperatures of {0}.'.format(filepath))
temp = temperature_array_from_result(filepath)
tumor = region_array_from_file(region_filepath)
temp_mean, temp_max, temp_min, temp_std_dev = calc_tumor_temperatures(temp, tumor)
if do_print == True:
print_results('tumor', temp_mean, temp_max, temp_min, temp_std_dev)
if do_write == True:
filepath = os.path.splitext(filepath)[0] + '_results.dat'
write_results_to_file('Tumor', temp_mean, temp_max, temp_min,
temp_std_dev, filepath, 'a')
print('Done.')
return temp_mean
def calc_tumor_near_surface_temperatures(temp, tumor):
dim2 = np.any(tumor, axis=(1, 2))
try:
min2, max2 = np.where(dim2)[0][[0, -1]]
except IndexError:
return -1.0, -1.0, -1.0, -1.0
depth = DEPTH
tumor = tumor[max2-depth+1:max2+1,:,:]
temp = temp[max2-depth+1:max2+1,:,:]
if np.count_nonzero(tumor == 1) != 0:
temp_mean = np.mean(temp[np.where(tumor == 1)])
temp_max = np.max(temp[np.where(tumor == 1)])
temp_min = np.min(temp[np.where(tumor == 1)])
temp_std_dev = np.std(temp[np.where(tumor == 1)])
else:
print('No tumor specified.')
temp_mean = -1.0
temp_max = -1.0
temp_min = -1.0
temp_std_dev = -1.0
return temp_mean, temp_max, temp_min, temp_std_dev
def tumor_near_surface_temperatures(filepath, region_filepath, do_print=True, do_write=True):
print()
print('Calc tumor temperatures near surface of {0}.'.format(filepath))
temp = temperature_array_from_result(filepath)
tumor = region_array_from_file(region_filepath)
temp_mean, temp_max, temp_min, temp_std_dev = calc_tumor_near_surface_temperatures(temp, tumor)
depth = DEPTH
if do_print == True:
section = 'tumor near surface (first ' + str(depth) + ' nodes)'
print_results(section, temp_mean, temp_max, temp_min, temp_std_dev)
if do_write == True:
filepath = os.path.splitext(filepath)[0] + '_results.dat'
section = 'Tumor_Near_Surface_' + str(depth) + '_Depth'
write_results_to_file(section, temp_mean, temp_max, temp_min,
temp_std_dev, filepath, 'a')
print('Done.')
return temp_mean
def calc_brain_temperatures(temp, tumor):
if np.count_nonzero(tumor == 1) != 0:
temp_mean = np.mean(temp[np.where(tumor == 0)])
temp_max = np.max(temp[np.where(tumor == 0)])
temp_min = np.min(temp[np.where(tumor == 0)])
temp_std_dev = np.std(temp[np.where(tumor == 0)])
else:
temp_mean = np.mean(temp)
temp_max = np.max(temp)
temp_min = np.min(temp)
temp_std_dev = np.std(temp)
return temp_mean, temp_max, temp_min, temp_std_dev
def brain_temperatures(filepath, region_filepath, do_print=True, do_write=True):
print()
print('Calc brain temperatures of {0}.'.format(filepath))
temp = temperature_array_from_result(filepath)
tumor = region_array_from_file(region_filepath)
temp_mean, temp_max, temp_min, temp_std_dev = calc_brain_temperatures(temp, tumor)
if do_print == True:
print_results('brain', temp_mean, temp_max, temp_min, temp_std_dev)
if do_write == True:
filepath = os.path.splitext(filepath)[0] + '_results.dat'
write_results_to_file('Brain', temp_mean, temp_max, temp_min, temp_std_dev,
filepath, 'a')
print('Done.')
return temp_mean
def calc_domain_temperatures(temp):
temp_mean = np.mean(temp)
temp_max = np.max(temp)
temp_min = np.min(temp)
temp_std_dev = np.std(temp)
return temp_mean, temp_max, temp_min, temp_std_dev
def domain_temperatures(filepath, do_print=True, do_write=True):
print()
print('Calc domain temperatures of {0}.'.format(filepath))
temp = temperature_array_from_result(filepath)
temp_mean, temp_max, temp_min, temp_std_dev = calc_domain_temperatures(temp)
if do_print == True:
print_results('domain', temp_mean, temp_max, temp_min, temp_std_dev)
if do_write == True:
filepath = os.path.splitext(filepath)[0] + '_results.dat'
write_results_to_file('Domain', temp_mean, temp_max, temp_min, temp_std_dev,
filepath, 'a')
print('Done.')
return temp_mean
def calc_csv_result_temperatures(temp):
temp_mean = np.mean(temp[np.where(temp != 0)])
temp_max = np.max(temp[np.where(temp != 0)])
temp_min = np.min(temp[np.where(temp != 0)])
temp_std_dev = np.std(temp[np.where(temp != 0)])
return temp_mean, temp_max, temp_min, temp_std_dev
def csv_result_temperatures(filepath, csv, do_print=True, do_write=True):
csv = os.path.join(csv, 'thermo.csv')
print()
print('Calc temperatures of {0}.'.format(csv))
# Open results file (thermography).
temp = np.genfromtxt(csv, delimiter=',')
temp = np.nan_to_num(temp)
temp_mean, temp_max, temp_min, temp_std_dev = calc_csv_result_temperatures(temp)
if do_print == True:
print_results('thermo.csv', temp_mean, temp_max, temp_min, temp_std_dev)
if do_write == True:
section = str(os.path.basename(csv))
filepath = os.path.splitext(filepath)[0] + '_results.dat'
write_results_to_file(section, temp_mean, temp_max, temp_min, temp_std_dev,
filepath, 'a')
print('Done.')
return temp_mean
def calc_vessels_temperatures(temp, vessels):
temp_mean = np.mean(temp[np.where(vessels == 1)])
temp_max = np.max(temp[np.where(vessels == 1)])
temp_min = np.min(temp[np.where(vessels == 1)])
temp_std_dev = np.std(temp[np.where(vessels == 1)])
return temp_mean, temp_max, temp_min, temp_std_dev
def vessels_temperatures(filepath_nc, filepath_vessels, do_print=True, do_write=True):
print()
print('Calc vessel temperatures of {0}.'.format(filepath_nc))
temp = surface_temperature_array_from_result(filepath_nc)
vessels = surface_vessels_array_from_file(filepath_vessels)
if np.count_nonzero(vessels == 1) != 0:
temp_mean, temp_max, temp_min, temp_std_dev = calc_vessels_temperatures(temp, vessels)
else:
print('* WARNING: No vessels in vessels file found.')
temp_mean, temp_max, temp_min, temp_std_dev = -1.0, -1.0, -1.0, -1.0
if do_print == True:
print_results('vessels', temp_mean, temp_max, temp_min, temp_std_dev)
if do_write == True:
filepath = os.path.splitext(filepath_nc)[0] + '_results.dat'
write_results_to_file('Vessel', temp_mean, temp_max, temp_min, temp_std_dev,
filepath, 'a')
print('Done.')
return temp_mean
def calc_non_vessels_temperatures(temp, vessels):
temp_mean = np.mean(temp[np.where(vessels == 0)])
temp_max = np.max(temp[np.where(vessels == 0)])
temp_min = np.min(temp[np.where(vessels == 0)])
temp_std_dev = np.std(temp[np.where(vessels == 0)])
return temp_mean, temp_max, temp_min, temp_std_dev
def non_vessels_temperatures(filepath_nc, filepath_vessels, do_print=True, do_write=True):
print()
print('Calc non-vessel temperatures of {0}.'.format(filepath_nc))
temp = surface_temperature_array_from_result(filepath_nc)
vessels = surface_vessels_array_from_file(filepath_vessels)
if np.count_nonzero(vessels == 0) != 0:
temp_mean, temp_max, temp_min, temp_std_dev = calc_non_vessels_temperatures(temp, vessels)
else:
print('* WARNING: No non-vessels in vessels file found.')
temp_mean, temp_max, temp_min, temp_std_dev = -1.0, -1.0, -1.0, -1.0
if do_print == True:
print_results('non-vessels', temp_mean, temp_max, temp_min, temp_std_dev)
if do_write == True:
filepath = os.path.splitext(filepath_nc)[0]
filepath += '_results.dat'
write_results_to_file('Non_Vessel', temp_mean, temp_max, temp_min,
temp_std_dev, filepath, 'a')
print('Done.')
return temp_mean
def calc_l2_norm(filepath_nc, T_normal, T_tumor, T_vessel,
T_normal_thermo, T_tumor_thermo, T_vessel_thermo):
print()
print('Calc L2-norm of {0}.'.format(filepath_nc))
if T_normal_thermo == -1.0 or \
T_tumor_thermo == -1.0 or \
T_vessel_thermo == -1.0:
print('No target values specified.')
elif T_vessel == -1.0:
print('No vessels specified.')
elif T_normal == -1.0:
print('No normal region specified.')
elif T_tumor == -1.0:
print('No tumor specified.')
else:
scafes_values = np.asarray([T_normal, T_tumor, T_vessel])
target_values = np.asarray([T_normal_thermo, T_tumor_thermo, T_vessel_thermo])
l2_norm = np.linalg.norm(np.subtract(scafes_values, target_values), 2)
print('Target values:', target_values)
print('T_normal: {:02.3f}.'.format(T_normal))
print('T_tumor: {:02.3f}.'.format(T_tumor))
print('T_vessel: {:02.3f}.'.format(T_vessel))
print('L2-norm: {:02.3f}.'.format(l2_norm))
filepath = os.path.splitext(filepath_nc)[0] + '_results.dat'
config = configparser.ConfigParser()
config['L2-norm'] = {}
config['L2-norm']['Target values'] = str(target_values)
config['L2-norm']['T_normal'] = str(T_normal)
config['L2-norm']['T_tumor'] = str(T_tumor)
config['L2-norm']['T_vessel'] = str(T_vessel)
config['L2-norm']['L2-norm'] = str(l2_norm)
print('Write results to {0}.'.format(filepath))
with open(filepath, 'a') as configfile:
config.write(configfile)
print('Done.')
def main():
print('Script will be called by startSimulation.py.')
print('Aborting.')
exit()
if __name__ == '__main__':
main()
|
{"hexsha": "3c41b50cf636908653e4cc66f362e7103c1fa25c", "size": 13733, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/MRIDataPBHEqnFDM/postProcessing.py", "max_stars_repo_name": "nih23/MRIDrivenHeatSimulation", "max_stars_repo_head_hexsha": "de6d16853df1faf44c700d1fc06584351bf6c816", "max_stars_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/MRIDataPBHEqnFDM/postProcessing.py", "max_issues_repo_name": "nih23/MRIDrivenHeatSimulation", "max_issues_repo_head_hexsha": "de6d16853df1faf44c700d1fc06584351bf6c816", "max_issues_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/MRIDataPBHEqnFDM/postProcessing.py", "max_forks_repo_name": "nih23/MRIDrivenHeatSimulation", "max_forks_repo_head_hexsha": "de6d16853df1faf44c700d1fc06584351bf6c816", "max_forks_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5777202073, "max_line_length": 99, "alphanum_fraction": 0.6546275395, "include": true, "reason": "import numpy", "num_tokens": 3570}
|
import numpy as np
import pandas as pd
class Population():
"""docstring for Population"""
def __init__(self, population=None, #можно передать конкретную популяцию
population_size=10, #размер популяции
random_state=42, #фиксируем seed, если инициализируется случайная популяция
number_of_vectors=10, #количество векторов n
vectors_dimension=10, #размерность векторов m
number_of_groups=2, #количество групп, на которое мы делим вектора k
crossover_percent=0.85, #процент детей в новом поколении
mutation_probability=0.1 #вероятность мутации гена
):
'''Инициализирует популяцию'''
if type(population) != type(None): #по умолчанию инициализирует случайную популяцию
self.population = population
self.population_size = population.shape[0]
else:
np.random.seed(random_state)
self.population = np.random.randint(0, high=number_of_groups, size=(population_size, number_of_vectors))
self.population_size = population_size
self.random_state = random_state
self.fitness_values = np.zeros(shape=population_size)
self.number_of_groups = number_of_groups
self.vectors_dimension = vectors_dimension
self.number_of_vectors = number_of_vectors
self.crossover_percent = crossover_percent
self.mutation_probability = mutation_probability
def evaluate_fitness_values(self, vectors):
'''Вычисляет fitness_values хромосом'''
self.fitness_values = []
for chromosome in self.population:
sums = np.array([vectors[chromosome == l].sum(axis=0) for l in range(self.number_of_groups)])
self.fitness_values.append(np.abs(sums.max(axis=0) - sums.min(axis=0)).max())
self.fitness_values = np.array(self.fitness_values)
return self.fitness_values
def sort(self, vectors):
data = pd.DataFrame(self.population, columns=np.arange(self.number_of_vectors))
data['fitness_values'] = self.evaluate_fitness_values(vectors)
data = data.sort_values(by=['fitness_values'])
self.population = data[np.arange(self.number_of_vectors)].values
self.fitness_values = data['fitness_values'].values
return self
def mutate(self):
'''Осуществляет операцию мутации со всей популяцией'''
return np.where(np.random.random(size=(self.population_size, self.number_of_vectors)) <= self.mutation_probability,
np.random.randint(0, high=self.number_of_groups, size=(self.population_size, self.number_of_vectors)),
self.population)
def crossover(self):
'''Осуществляет операцию мутации со всей популяцией'''
generation = []
for i in range(self.population_size - 1):
crossover_point = np.random.randint(1, high=self.number_of_vectors-1, size=1)
generation.append(np.hstack((self.population[i, :crossover_point[0]], self.population[i + 1, crossover_point[0]:])))
generation.append(np.hstack((self.population[i + 1, :crossover_point[0]], self.population[i, crossover_point[0]:])))
return np.array(generation)
def next_generation(self, vectors):
'''Осуществляет переход к новому поколению'''
new_generation = Population(population=self.crossover(), number_of_vectors=vectors.shape[0]).sort(vectors)
self.sort(vectors)
number = int(self.crossover_percent*self.population_size)
self.population = np.vstack((new_generation.population[:number], self.population[:self.population_size - number]))
self.population = self.mutate()
self.local_search_2_change(vectors).sort(vectors)
return self
def local_search_2_change(self, vectors):
for i in range(self.population_size):
chromosome = self.population[i]
i1, i2 = np.random.choice(np.arange(self.number_of_vectors), size=2, replace=False)
new_chromosome = chromosome.copy()
new_chromosome[i1] = chromosome[i2]
new_chromosome[i2] = chromosome[i1]
sums = np.array([vectors[new_chromosome == l].sum(axis=0) for l in range(self.number_of_groups)])
t = np.abs(sums.max(axis=0) - sums.min(axis=0)).max()
if t < self.fitness_values[i]:
self.population[i] = new_chromosome
self.fitness_values[i] = t
return self
class Memetic():
count = 0
solutions = {}
def solve(vectors, popul, number_of_steps=10):
population = Population(population=popul.population,
population_size=popul.population_size,
random_state=popul.random_state,
number_of_vectors=popul.number_of_vectors,
vectors_dimension=popul.vectors_dimension,
number_of_groups=popul.number_of_groups,
crossover_percent=popul.crossover_percent,
mutation_probability=popul.mutation_probability
).sort(vectors)
best = population.population[0]
value = population.fitness_values[0]
story = []
for i in range(number_of_steps):
population = population.next_generation(vectors)
if population.fitness_values[0] < value:
best = population.population[0]
value = population.fitness_values[0]
story.append(value)
Memetic.count += 1
Memetic.solutions[Memetic.count] = {'value' : value,
'objective function story' : np.array(story),
'solution' : best,
'start population' : popul,
'end population' : population}
print('Решение записано в Memetic.solutions, идентификатор: {}'.format(Memetic.count))
return value, best
|
{"hexsha": "b37d796668e70639a263dd6347a0ff50f9176a09", "size": 6359, "ext": "py", "lang": "Python", "max_stars_repo_path": "memetic_algorithm/memetic_algorithm.py", "max_stars_repo_name": "tsyploff/rossiya-airlines", "max_stars_repo_head_hexsha": "3d0dc97f8712f197da2179e5c3836349785ebb45", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "memetic_algorithm/memetic_algorithm.py", "max_issues_repo_name": "tsyploff/rossiya-airlines", "max_issues_repo_head_hexsha": "3d0dc97f8712f197da2179e5c3836349785ebb45", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "memetic_algorithm/memetic_algorithm.py", "max_forks_repo_name": "tsyploff/rossiya-airlines", "max_forks_repo_head_hexsha": "3d0dc97f8712f197da2179e5c3836349785ebb45", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.1229508197, "max_line_length": 129, "alphanum_fraction": 0.6051265922, "include": true, "reason": "import numpy", "num_tokens": 1370}
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#A Basic Example 一个基本的例子
'''
Keras is a powerful and easy-to-use deep learning library for Theano and TensorFlow
that provides a high-level neural networks API to develop and evaluate deep learning models.
'''
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
data = np.random.random((1000,100))
labels = np.random.randint(2,size=(1000,1))
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(data,labels,validation_split=0.1)
|
{"hexsha": "ecfb1815a85034f3cccc249a196dad68aa4c89fa", "size": 740, "ext": "py", "lang": "Python", "max_stars_repo_path": "kerasTest/kerasBasicExample.py", "max_stars_repo_name": "Dongzhixiao/newRecognize", "max_stars_repo_head_hexsha": "d87fa7752e2a805ddb2f8c0435383de47b687f6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 72, "max_stars_repo_stars_event_min_datetime": "2018-05-02T08:07:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T23:38:15.000Z", "max_issues_repo_path": "kerasTest/kerasBasicExample.py", "max_issues_repo_name": "Dongzhixiao/newRecognize", "max_issues_repo_head_hexsha": "d87fa7752e2a805ddb2f8c0435383de47b687f6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kerasTest/kerasBasicExample.py", "max_forks_repo_name": "Dongzhixiao/newRecognize", "max_forks_repo_head_hexsha": "d87fa7752e2a805ddb2f8c0435383de47b687f6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 71, "max_forks_repo_forks_event_min_datetime": "2018-07-14T08:33:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T05:41:02.000Z", "avg_line_length": 26.4285714286, "max_line_length": 92, "alphanum_fraction": 0.7297297297, "include": true, "reason": "import numpy", "num_tokens": 172}
|
import os
import sys
sys.path.append('../')
import gridemic
import numpy as np
from scipy.optimize import root_scalar
R0 = float(sys.argv[1])
print(R0)
# Common settings
Seed = 5772 # A seed for the RNG. None lets numpy.random select a seed randomly
N = 3162 # Grid size. N x N = P
NIi = 100 # Number of initial weak-symptom infectious
kEI = 3 # E---> I shape parameter for the Gamma distribution
thetaEI = 1 # E---> I scale parameter for the Gamma distribution
kIR = 4 # I---> R shape parameter for the Gamma distribution
thetaIR = 1 # I---> R scale parameter for the Gamma distribution
prob_symptom = 0.50 # Probability of developing strong symptoms
test_lag = 1 # Time to wait before knowing the test result (if testing is on)
verbose = 0 # Verbose output if 1
# Parameters
prob_trace = 1.00 # Probability of succeeding in tracing a neighbor
prob_detect = 1.00 # Probability of succeeding in recognizing a strong-symptom case
NTime = 10000 # Maximum duration of the simulation
num_tests = 0 # Number of tests available per time step
test_begin = 10000 # Day when Testing and Quarantine starts
def fR0(tau, R0):
return 4*tau+4*(1-(1-tau)**4)-R0*1.33
if R0 == 0:
tau =0
else:
tau = root_scalar(fR0, args=(R0), bracket = [1e-6, 1]).root
SSEM = gridemic.Model(
seed_random = Seed,
N = N,
kEI = kEI,
thetaEI = thetaEI,
kIR = kIR,
thetaIR = thetaIR,
prob_symptom = prob_symptom,
tauW = tau,
etaW = tau,
tauS = tau,
etaS = tau,
num_tests = num_tests,
prob_trace = prob_trace,
prob_detect = prob_detect,
test_lag = test_lag,
test_begin = test_begin
)
Population, _ = SSEM.simulate(NTime = NTime, num_initial_infectious = NIi)
np.savetxt('data/Pop_R0={:04.2f}_NTests=0.dat'.format(R0),Population)
|
{"hexsha": "8b3f64c3523c8239c8f057cc847536bc14c4e89f", "size": 1973, "ext": "py", "lang": "Python", "max_stars_repo_path": "scarselli_etal_2021/run2b.py", "max_stars_repo_name": "burakbudanur/gridemic", "max_stars_repo_head_hexsha": "7429fd53555a1abd21614e1cce104f043576e05d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scarselli_etal_2021/run2b.py", "max_issues_repo_name": "burakbudanur/gridemic", "max_issues_repo_head_hexsha": "7429fd53555a1abd21614e1cce104f043576e05d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scarselli_etal_2021/run2b.py", "max_forks_repo_name": "burakbudanur/gridemic", "max_forks_repo_head_hexsha": "7429fd53555a1abd21614e1cce104f043576e05d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3538461538, "max_line_length": 85, "alphanum_fraction": 0.6289913837, "include": true, "reason": "import numpy,from scipy", "num_tokens": 583}
|
function fx = p57_fun ( n, x )
%*****************************************************************************80
%
%% P57_FUN evaluates the integrand for problem 57.
%
% Interval:
%
% 0 <= x <= 1
%
% Integrand:
%
% x^(3/2)
%
% Antiderivative:
%
% (2/5) * x^(5/2)
%
% Exact Integral:
%
% 0.4
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 04 November 2009
%
% Author:
%
% John Burkardt
%
% Reference:
%
% David Kahaner,
% Comparison of Numerical Quadrature Formulas,
% in Mathematical Software, edited by John R Rice,
% Academic Press, 1971.
%
% Parameters:
%
% Input, integer N, the number of evaluation points.
%
% Input, real X(N), the evaluation points.
%
% Output, real FX(N), the integrand values.
%
fx(1:n) = sqrt ( x(1:n).^3 );
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/test_int/p57_fun.m"}
|
function D = hmm_kl (hmm_p,hmm_q)
% Computes Kullback-Leibler divergence between two Hidden Markov Model
% distributions, through an approximation (an upper bound) as proposed in
% M. Do (2003). IEEE Signal Processing Letters 10
%
% Author: Diego Vidaurre, OHBA, University of Oxford (2018)
K = length(hmm_p.Pi);
if K~=length(hmm_q.Pi)
error(['The two HMMs must have the same number of states, ' ...
'and their order must correspond'])
end
if (hmm_p.train.order ~= hmm_q.train.order) || ...
(~strcmpi(hmm_p.train.covtype,hmm_q.train.covtype)) || ...
(length(hmm_p.train.embeddedlags) ~= length(hmm_q.train.embeddedlags)) || ...
(any(hmm_p.train.embeddedlags ~= hmm_q.train.embeddedlags)) || ...
(hmm_p.train.zeromean ~= hmm_q.train.zeromean)
error('The state configuration of the two HMMs must be identical')
end
hmm = hmm_p; setstateoptions;
if isfield(hmm_p.state(1),'W')
ndim = size(hmm_p.state(1).W.Mu_W,2);
else
ndim = size(hmm_p.state(1).Omega.Gam_rate,2);
end
S = hmm.train.S==1;
regressed = sum(S,1)>0;
D = 0;
if hmm_p.train.id_mixture
hmm_p.P = 1/K*ones(K); hmm_q.P = 1/K*ones(K);
end
nu = compute_nu (hmm_p.Pi,hmm_p.P); % weight vector
% Non-state specific stuff
switch train.covtype
case {'uniquediag','shareddiag'}
for n = 1:ndim
if ~regressed(n), continue; end
D = D + gamma_kl(hmm_p.Omega.Gam_shape,hmm_q.Omega.Gam_shape, ...
hmm_p.Omega.Gam_rate(n),hmm_q.Omega.Gam_rate(n));
end
case {'uniquefull','sharedfull'}
D = D + wishart_kl(hmm_p.Omega.Gam_rate(regressed,regressed),...
hmm_q.Omega.Gam_rate(regressed,regressed), ...
hmm_p.Omega.Gam_shape,hmm_q.Omega.Gam_shape);
case 'pca'
D = D + gamma_kl(hmm_p.Omega.Gam_shape,hmm_q.Omega.Gam_shape, ...
hmm_p.Omega.Gam_rate,hmm_q.Omega.Gam_rate);
end
% State specific stuff
for k = 1:K
% Trans probabilities
kk = hmm.train.Pstructure(k,:);
D = D + nu(k) * dirichlet_kl(hmm_p.Dir2d_alpha(k,kk),hmm_q.prior.Dir2d_alpha(k,kk));
% State distribution
hs = hmm_p.state(k);
hs0 = hmm_q.state(k);
if ~isempty(hs.W.Mu_W)
if train.uniqueAR || ndim==1
if train.uniqueAR || ndim==1
D = D + nu(k) * gauss_kl(hs.W.Mu_W, hs0.W.Mu_W, hs.W.S_W, hs0.W.S_W);
else
D = D + nu(k) * gauss_kl(hs.W.Mu_W, hs0.W.Mu_W, ...
permute(hs.W.S_W,[2 3 1]), permute(hs0.W.S_W,[2 3 1]));
end
elseif strcmp(train.covtype,'diag') || ...
strcmp(train.covtype,'uniquediag') || strcmp(train.covtype,'shareddiag') || ...
strcmp(train.covtype,'pca')
for n = 1:ndim
D = D + nu(k) * gauss_kl(hs.W.Mu_W(Sind(:,n),n),hs0.W.Mu_W(Sind(:,n),n), ...
permute(hs.W.S_W(n,Sind(:,n),Sind(:,n)),[2 3 1]),...
permute(hs0.W.S_W(n,Sind(:,n),Sind(:,n)),[2 3 1]));
end
else % full or sharedfull
mu_w = hs.W.Mu_W';
mu_w = mu_w(:);
mu_w0 = hs0.W.Mu_W';
mu_w0 = mu_w0(:);
D = D + nu(k) * gauss_kl(mu_w,mu_w0, hs.W.S_W, hs0.W.S_W);
end
end
switch train.covtype
case 'diag'
for n=1:ndim
if ~regressed(n), continue; end
D = D + nu(k) * gamma_kl(hs.Omega.Gam_shape,hs0.Omega.Gam_shape, ...
hs.Omega.Gam_rate(n),hs0.Omega.Gam_rate(n));
end
case 'full'
try
D = D + nu(k) * wishart_kl(hs.Omega.Gam_rate(regressed,regressed),...
hs0.Omega.Gam_rate(regressed,regressed), ...
hs.Omega.Gam_shape,hs0.Omega.Gam_shape);
catch
error(['Error computing kullback-leibler divergence of the cov matrix - ' ...
'Something strange with the data?'])
end
end
if ~isempty(orders) && ~train.uniqueAR && ndim>1
for n1=1:ndim
for n2=1:ndim
if (train.symmetricprior && n2<n1) || S(n1,n2)==0, continue; end
D = D + nu(k) * gamma_kl(hs.sigma.Gam_shape(n1,n2),hs0.sigma.Gam_shape(n1,n2), ...
hs.sigma.Gam_rate(n1,n2),hs0.sigma.Gam_rate(n1,n2));
end
end
end
if ~isempty(orders)
for i=1:length(orders)
D = D + nu(k) * gamma_kl(hs.alpha.Gam_shape,hs0.alpha.Gam_shape, ...
hs.alpha.Gam_rate(i),hs0.alpha.Gam_rate(i));
end
end
end
end
function nu = compute_nu (Pi,P)
eps = 1e-6;
nu = Pi * P;
while true
nu0 = nu;
nu = nu * P;
if mean(nu(:)-nu0(:))<eps, break; end
end
end
|
{"author": "OHBA-analysis", "repo": "HMM-MAR", "sha": "bb0433b75482e473980791a2b30afe2012cf6578", "save_path": "github-repos/MATLAB/OHBA-analysis-HMM-MAR", "path": "github-repos/MATLAB/OHBA-analysis-HMM-MAR/HMM-MAR-bb0433b75482e473980791a2b30afe2012cf6578/utils/math/hmm_kl.m"}
|
# Referenced by S. Jahnavi Prasad's detect_mask_video.py
#(https://github.com/jahnavi-prasad)
# import the necessary packages
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import torchvision
from torchvision import transforms, datasets, models
import torch
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import os
from imutils.video import VideoStream
import argparse
import imutils
import time
import cv2
def get_model_instance_segmentation(num_classes):
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes+1)
return model
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", type=str,
default= "/your/trained/model/path/model.pth",
help="path to trained face mask detector model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
tensor_transform = transforms.Compose([
transforms.ToTensor()
])
# load trained model
print("[INFO] loading face detector model...")
model = get_model_instance_segmentation(3)
resume = args["model"]
checkpoint = torch.load(resume)
model.load_state_dict(checkpoint['state_dict'])
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = model.to(device)
# initialize the video stream and allow webcam on
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames to detect face mask
while True:
frame = vs.read()
frame = imutils.resize(frame, width=600)
model.eval()
img = tensor_transform(frame)
img = img.to(device)
preds = model(list(img[None, :, :]))
if len(preds[0]["boxes"])==0:
pass
else:
mask = preds[0]["scores"] > 0.5
for (box, labels) in zip(preds[0]["boxes"][mask], preds[0]["labels"][mask]):
xmin, ymin, xmax, ymax = box
xmin = int(xmin)
ymin = int(ymin)
xmax = int(xmax)
ymax = int(ymax)
if labels == 1:
label = 'with_mask'
color = (0, 255, 0)
elif labels == 2:
label = 'without_mask'
color = (0, 0, 255)
elif labels == 3:
label = "mask_weared_incorrect"
color = (0, 255, 255)
cv2.putText(frame, label, (xmin, ymin - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
cv2.imshow("Frame", frame)
# torch.cuda.empty()
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
|
{"hexsha": "967a54009c50d48723c4bb58601e62534b3f2fde", "size": 3029, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/inference.py", "max_stars_repo_name": "hwanseung2/Image_processing", "max_stars_repo_head_hexsha": "6b4415ed6ccde274900cfd376d50dc3093d1f849", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/inference.py", "max_issues_repo_name": "hwanseung2/Image_processing", "max_issues_repo_head_hexsha": "6b4415ed6ccde274900cfd376d50dc3093d1f849", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/inference.py", "max_forks_repo_name": "hwanseung2/Image_processing", "max_forks_repo_head_hexsha": "6b4415ed6ccde274900cfd376d50dc3093d1f849", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-21T03:57:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-21T03:57:22.000Z", "avg_line_length": 29.9900990099, "max_line_length": 98, "alphanum_fraction": 0.6599537801, "include": true, "reason": "import numpy", "num_tokens": 746}
|
from collections import Counter
from numpy import log
from sklearn.base import BaseEstimator, ClassifierMixin
from data.data_examination import make_sig_words
from data.pipelines import (tokenize_pipe,
lower_pipe,
stem_pipe,
lemmatize_pipe)
class ProbabilisticClassifier(BaseEstimator, ClassifierMixin):
def __init__(
self,
log_table=None,
counter_table=None,
beta_method=False,
stem=False,
lemma=False):
"""
Called when initializing the classifier
"""
if counter_table is None:
counter_table = {}
if log_table is None:
log_table = {}
self.counterTable = counter_table
self.logTable = log_table
self.beta_method = beta_method
self.stem = stem
self.lemma = lemma
def fit(self, sentences, labels):
distinct_labels = set(labels)
if self.beta_method:
for l in distinct_labels:
self.logTable[l] = {}
s_by_a = {a:
[s for s, a1 in zip(sentences, labels) if a1 == a]
for a in distinct_labels}
tok_s_by_a = {
k:
list(tokenize_pipe(lower_pipe(v))) for k, v in s_by_a.items()}
beta_table = make_sig_words(
stem=self.stem,
lemma=self.lemma,
other_data=tok_s_by_a)
self.beta_table = beta_table
for l in beta_table:
for w in beta_table[l]:
self.logTable[l][w] = log(beta_table[l][w])
self.miss_p = {}
for l in distinct_labels:
self.miss_p[l] = min(self.logTable[l].values())
self.trained_ = True
return
for l in distinct_labels:
self.counterTable[l] = Counter()
piped = lower_pipe(sentences)
piped = tokenize_pipe(piped)
if self.stem:
piped = stem_pipe(piped)
if self.lemma:
piped = lemmatize_pipe(piped)
for s, l in zip(piped, labels):
for w in s:
self.counterTable[l][w] += 1
for l in distinct_labels:
ctr = self.counterTable[l]
tw = sum(ctr.values())
self.logTable[l] = {k: log(v / tw) for k, v in ctr.items()}
self.trained_ = True
def score_(self, w, l):
if self.hit_(w, l):
return self.logTable[l][w]
else:
if self.beta_method:
return self.miss_p[l]
else:
return 0
def hit_(self, w, l):
return w in self.logTable[l]
def predict(self, x):
try:
getattr(self, 'trained_')
except AttributeError:
raise RuntimeError('You must train the classifier before using it')
x = lower_pipe(x)
x = tokenize_pipe(x)
if self.stem:
x = stem_pipe(x)
if self.lemma:
x = lemmatize_pipe(x)
x = list(x)
return [self.predict_sen_(s) for s in x]
def predict_sen_(self, s):
words = s
scores = [
sum([self.score_(w, l) for w in words])
for l in self.logTable.keys()]
hits = [
sum([self.hit_(w, l) for w in words])
for l in self.logTable.keys()]
if self.beta_method:
maxin = scores.index(max(scores))
return list(self.logTable.keys())[maxin]
maxhits = max(hits)
merged = zip(scores, hits, self.logTable.keys())
maxes = [(s, l) for s, h, l in merged if h is maxhits]
return max(maxes, key=lambda x: x[0])[1]
|
{"hexsha": "fcd09872979249de9f4967d4d9e2bf53550bb116", "size": 3820, "ext": "py", "lang": "Python", "max_stars_repo_path": "Predictors/probabilistic.py", "max_stars_repo_name": "ALT-G-AI/skynet-learns-to-write", "max_stars_repo_head_hexsha": "47009c1b375b9893cfd9c6b47a5ab41e28b25144", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Predictors/probabilistic.py", "max_issues_repo_name": "ALT-G-AI/skynet-learns-to-write", "max_issues_repo_head_hexsha": "47009c1b375b9893cfd9c6b47a5ab41e28b25144", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-03-12T17:42:07.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-01T21:32:49.000Z", "max_forks_repo_path": "Predictors/probabilistic.py", "max_forks_repo_name": "ALT-G-AI/skynet-learns-to-write", "max_forks_repo_head_hexsha": "47009c1b375b9893cfd9c6b47a5ab41e28b25144", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5074626866, "max_line_length": 82, "alphanum_fraction": 0.5170157068, "include": true, "reason": "from numpy", "num_tokens": 859}
|
(* File reduced by coq-bug-finder from original input, then from 7421 lines to 6082 lines, then from 5860 lines to 5369 lines, then from 5300 lines to 165 lines, then from 111 lines to 38 lines *)
Set Implicit Arguments.
Record PreCategory :=
{ object :> Type;
morphism : object -> object -> Type;
identity : forall x, morphism x x }.
Bind Scope category_scope with PreCategory.
Local Notation "1" := (identity _ _) : morphism_scope.
Local Open Scope morphism_scope.
Definition prod (C D : PreCategory) : PreCategory
:= @Build_PreCategory
(C * D)%type
(fun s d => (morphism C (fst s) (fst d) * morphism D (snd s) (snd d))%type)
(fun x => (identity _ (fst x), identity _ (snd x))).
Local Infix "*" := prod : category_scope.
Module NonPrim.
Record Functor (C D : PreCategory) :=
{ object_of :> C -> D;
morphism_of : forall s d, morphism C s d -> morphism D (object_of s) (object_of d);
identity_of : forall x, morphism_of _ _ (identity _ x) = identity _ (object_of x) }.
Notation "F '_1' m" := (morphism_of F _ _ m) (at level 10, no associativity) : morphism_scope.
Goal forall C1 C2 D (F : Functor (C1 * C2) D) x, F _1 (1, 1) = identity _ (F x).
Proof.
intros.
rewrite identity_of.
reflexivity.
Qed.
End NonPrim.
Module Prim.
Set Primitive Projections.
Record Functor (C D : PreCategory) :=
{ object_of :> C -> D;
morphism_of : forall s d, morphism C s d -> morphism D (object_of s) (object_of d);
identity_of : forall x, morphism_of _ _ (identity _ x) = identity _ (object_of x) }.
Notation "F '_1' m" := (morphism_of F _ _ m) (at level 10, no associativity) : morphism_scope.
Goal forall C1 C2 D (F : Functor (C1 * C2) D) x, F _1 (1, 1) = identity _ (F x).
Proof.
intros.
rewrite identity_of. (* Toplevel input, characters 0-20:
Error:
Found no subterm matching "morphism_of ?192 ?193 ?193 (identity ?190 ?193)" in the current goal. *)
reflexivity.
Qed.
End Prim.
|
{"author": "princeton-vl", "repo": "CoqGym", "sha": "0c03a6fba3a3ea7e2aecedc1c624ff3885f7267e", "save_path": "github-repos/coq/princeton-vl-CoqGym", "path": "github-repos/coq/princeton-vl-CoqGym/CoqGym-0c03a6fba3a3ea7e2aecedc1c624ff3885f7267e/coq/test-suite/bugs/closed/3505.v"}
|
# Copyright (c) 2009,2016,2019 MetPy Developers.
# Copyright (c) 2021 Nathan Wendt.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools for reading GEMPAK files."""
from collections import namedtuple
import struct
import numpy as np
class NamedStruct(struct.Struct):
"""Parse bytes using `Struct` but provide named fields.
Class from MetPy.
"""
def __init__(self, info, prefmt='', tuple_name=None):
"""Initialize the NamedStruct."""
if tuple_name is None:
tuple_name = 'NamedStruct'
names, fmts = zip(*info)
self.converters = {}
conv_off = 0
for ind, i in enumerate(info):
if len(i) > 2:
self.converters[ind - conv_off] = i[-1]
elif not i[0]: # Skip items with no name
conv_off += 1
self._tuple = namedtuple(tuple_name, ' '.join(n for n in names if n))
super().__init__(prefmt + ''.join(f for f in fmts if f))
def _create(self, items):
if self.converters:
items = list(items)
for ind, conv in self.converters.items():
items[ind] = conv(items[ind])
if len(items) < len(self._tuple._fields):
items.extend([None] * (len(self._tuple._fields) - len(items)))
return self.make_tuple(*items)
def make_tuple(self, *args, **kwargs):
"""Construct the underlying tuple from values."""
return self._tuple(*args, **kwargs)
def unpack(self, s):
"""Parse bytes and return a namedtuple."""
return self._create(super().unpack(s))
def unpack_from(self, buff, offset=0):
"""Read bytes from a buffer and return as a namedtuple."""
return self._create(super().unpack_from(buff, offset))
def unpack_file(self, fobj):
"""Unpack the next bytes from a file object."""
return self.unpack(fobj.read(self.size))
def pack(self, **kwargs):
"""Pack the arguments into bytes using the structure."""
t = self.make_tuple(**kwargs)
return super().pack(*t)
class IOBuffer:
"""Holds bytes from a buffer to simplify parsing and random access.
Class from MetPy.
"""
def __init__(self, source):
"""Initialize the IOBuffer with the source data."""
self._data = bytearray(source)
self.reset()
@classmethod
def fromfile(cls, fobj):
"""Initialize the IOBuffer with the contents of the file object."""
return cls(fobj.read())
def reset(self):
"""Reset buffer back to initial state."""
self._offset = 0
self.clear_marks()
def set_mark(self):
"""Mark the current location and return its id so that the buffer can return later."""
self._bookmarks.append(self._offset)
return len(self._bookmarks) - 1
def jump_to(self, mark, offset=0):
"""Jump to a previously set mark."""
self._offset = self._bookmarks[mark] + offset
def offset_from(self, mark):
"""Calculate the current offset relative to a marked location."""
return self._offset - self._bookmarks[mark]
def clear_marks(self):
"""Clear all marked locations."""
self._bookmarks = []
def splice(self, mark, newdata):
"""Replace the data after the marked location with the specified data."""
self.jump_to(mark)
self._data = self._data[:self._offset] + bytearray(newdata)
def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct = struct_class.unpack_from(memoryview(self._data), self._offset)
self.skip(struct_class.size)
return struct
def read_func(self, func, num_bytes=None):
"""Parse data from the current buffer offset using a function."""
# only advance if func succeeds
res = func(self.get_next(num_bytes))
self.skip(num_bytes)
return res
def read_ascii(self, num_bytes=None):
"""Return the specified bytes as ascii-formatted text."""
return self.read(num_bytes).decode('ascii')
def read_binary(self, num, item_type='B'):
"""Parse the current buffer offset as the specified code."""
if 'B' in item_type:
return self.read(num)
if item_type[0] in ('@', '=', '<', '>', '!'):
order = item_type[0]
item_type = item_type[1:]
else:
order = '@'
return list(
self.read_struct(struct.Struct(order + '{:d}'.format(int(num)) + item_type))
)
def read_int(self, size, endian, signed):
"""Parse the current buffer offset as the specified integer code."""
return int.from_bytes(self.read(size), endian, signed=signed)
def read_array(self, count, dtype):
"""Read an array of values from the buffer."""
ret = np.frombuffer(self._data, offset=self._offset, dtype=dtype, count=count)
self.skip(ret.nbytes)
return ret
def read(self, num_bytes=None):
"""Read and return the specified bytes from the buffer."""
res = self.get_next(num_bytes)
self.skip(len(res))
return res
def get_next(self, num_bytes=None):
"""Get the next bytes in the buffer without modifying the offset."""
if num_bytes is None:
return self._data[self._offset:]
else:
return self._data[self._offset:self._offset + num_bytes]
def skip(self, num_bytes):
"""Jump the ahead the specified bytes in the buffer."""
if num_bytes is None:
self._offset = len(self._data)
else:
self._offset += num_bytes
def check_remains(self, num_bytes):
"""Check that the number of bytes specified remains in the buffer."""
return len(self._data[self._offset:]) == num_bytes
def truncate(self, num_bytes):
"""Remove the specified number of bytes from the end of the buffer."""
self._data = self._data[:-num_bytes]
def at_end(self):
"""Return whether the buffer has reached the end of data."""
return self._offset >= len(self._data)
def __getitem__(self, item):
"""Return the data at the specified location."""
return self._data[item]
def __str__(self):
"""Return a string representation of the IOBuffer."""
return 'Size: {} Offset: {}'.format(len(self._data), self._offset)
def __len__(self):
"""Return the amount of data in the buffer."""
return len(self._data)
|
{"hexsha": "eb4ee35d43dc8c27294410ca46f8fe199f0c0583", "size": 6625, "ext": "py", "lang": "Python", "max_stars_repo_path": "gempakio/tools.py", "max_stars_repo_name": "nawendt/gem2py", "max_stars_repo_head_hexsha": "d0bf086dec84d7cf28549177b8b9e402e29860db", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gempakio/tools.py", "max_issues_repo_name": "nawendt/gem2py", "max_issues_repo_head_hexsha": "d0bf086dec84d7cf28549177b8b9e402e29860db", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gempakio/tools.py", "max_forks_repo_name": "nawendt/gem2py", "max_forks_repo_head_hexsha": "d0bf086dec84d7cf28549177b8b9e402e29860db", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3264248705, "max_line_length": 94, "alphanum_fraction": 0.6111698113, "include": true, "reason": "import numpy", "num_tokens": 1479}
|
# Import the required libraries needed
# for the application to run via Flask
# and the Machine learning Model
import os
import pickle
import numpy as np
from flask import Flask
from flask import render_template
from flask import url_for
from flask import request
# Initialize the Application
app = Flask(__name__)
# The function below renders the prediction
# template of the data which gives end-users
# different options to select the required data
@app.route('/')
def prediction():
return render_template('predict.html')
# The function initializes the numerical python module
# to shape the vals into an array and load our Model in
# the Pickle file
def ValuePredictor(to_predict_list):
to_predict = np.array(to_predict_list).reshape(1,12)
loaded_model = pickle.load(open("model.pkl","rb"))
result = loaded_model.predict(to_predict)
return result[0]
# This route actually checks if the form has been filled
# and then returns the values of the form and pass it to
# the Prediction template to retrieve results.
@app.route('/result',methods = ['POST'])
def result():
if request.method == 'POST':
to_predict_list = request.form.to_dict()
to_predict_list=list(to_predict_list.values())
to_predict_list = list(map(int, to_predict_list))
result = ValuePredictor(to_predict_list)
# After the values has been checked by mapping
# the values of the form from a dictionary to a
# List return 1 if condition is true or 0 if false
if int(result)==1:
prediction='Income more than 50K'
else:
prediction='Income less that 50K'
return render_template("result.html",prediction=prediction)
# Finally run the main event Loop of the Application
if __name__ == '__main__':
app.run(debug=True)
|
{"hexsha": "b39a92dfc0b51ca844c969a694237be867b5fd33", "size": 1812, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/app.py", "max_stars_repo_name": "jim-nnamdi/Loanapplicant-salaryPredictor", "max_stars_repo_head_hexsha": "8f01bd70804a21050ea38fc24c6a0b18efacb4b7", "max_stars_repo_licenses": ["bzip2-1.0.6"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-04T17:52:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-10T21:43:10.000Z", "max_issues_repo_path": "app/app.py", "max_issues_repo_name": "MetroSpinnin/Loanapplicant-salaryPredictor", "max_issues_repo_head_hexsha": "8f01bd70804a21050ea38fc24c6a0b18efacb4b7", "max_issues_repo_licenses": ["bzip2-1.0.6"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/app.py", "max_forks_repo_name": "MetroSpinnin/Loanapplicant-salaryPredictor", "max_forks_repo_head_hexsha": "8f01bd70804a21050ea38fc24c6a0b18efacb4b7", "max_forks_repo_licenses": ["bzip2-1.0.6"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5555555556, "max_line_length": 67, "alphanum_fraction": 0.7246136865, "include": true, "reason": "import numpy", "num_tokens": 393}
|
"""Very fancy downsampling."""
import numpy
from gewittergefahr.gg_utils import target_val_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import deep_learning_utils as dl_utils
LARGE_INTEGER = int(1e12)
FRACTION_UNINTERESTING_TIMES_TO_OMIT = 0.8
def _report_class_fractions(target_values):
"""Reports fraction of examples in each class.
:param target_values: 1-D numpy array of target values (integer class
labels).
"""
unique_target_values, unique_counts = numpy.unique(
target_values, return_counts=True)
print('\n')
for k in range(len(unique_target_values)):
print('{0:d} examples in class = {1:d}'.format(
unique_counts[k], unique_target_values[k]
))
print('\n')
def _find_storm_cells(object_id_strings, desired_cell_id_strings):
"""Finds storm IDs from set 2 in set 1.
N = number of storm objects
n = number of desired storm cells
:param object_id_strings: length-N list with all storm IDs.
:param desired_cell_id_strings: length-n list with desired storm IDs.
:return: relevant_indices: 1-D numpy array of indices, such that
`object_id_strings[relevant_indices]` yields all IDs in
`object_id_strings` that are in `desired_cell_id_strings`, including
duplicates.
:raises: ValueError: if not all desired ID were found in
`object_id_strings`.
"""
desired_cell_id_strings = numpy.unique(numpy.array(desired_cell_id_strings))
relevant_flags = numpy.in1d(
numpy.array(object_id_strings), desired_cell_id_strings,
assume_unique=False)
relevant_indices = numpy.where(relevant_flags)[0]
cell_id_strings = numpy.unique(
numpy.array(object_id_strings)[relevant_indices]
)
if numpy.array_equal(cell_id_strings, desired_cell_id_strings):
return relevant_indices
error_string = (
'\nDesired storm IDs:\n{0:s}\nFound storm IDs:\n{1:s}\nNot all desired '
'storm IDs were found, as shown above.'
).format(
str(desired_cell_id_strings), str(cell_id_strings)
)
raise ValueError(error_string)
def _find_uncovered_times(all_times_unix_sec, covered_times_unix_sec):
"""Finds times in set 1 that are not in set 2.
:param all_times_unix_sec: 1-D numpy array with all times.
:param covered_times_unix_sec: 1-D numpy array of covered times.
:return: uncovered_indices: 1-D numpy array of indices, such that
`all_times_unix_sec[uncovered_indices]` yields all times in
`all_times_unix_sec` that are not in `covered_times_unix_sec`, including
duplicates.
:raises: ValueError: if not all covered times were found in
`all_times_unix_sec`.
"""
covered_times_unix_sec = numpy.unique(covered_times_unix_sec)
covered_flags = numpy.in1d(
all_times_unix_sec, covered_times_unix_sec, assume_unique=False)
covered_indices = numpy.where(covered_flags)[0]
found_times_unix_sec = numpy.unique(all_times_unix_sec[covered_indices])
if numpy.array_equal(found_times_unix_sec, covered_times_unix_sec):
return numpy.where(numpy.invert(covered_flags))[0]
error_string = (
'\nCovered times:\n{0:s}\nCovered times found in all_times_unix_sec:\n'
'{1:s}\nNot all covered times were found, as shown above.'
).format(str(covered_times_unix_sec), str(found_times_unix_sec))
raise ValueError(error_string)
def _downsampling_base(
primary_id_strings, storm_times_unix_sec, target_values, target_name,
class_fraction_dict, test_mode=False):
"""Base for `downsample_for_training` and `downsample_for_non_training`.
The procedure is described below.
[1] Find all storm objects in the highest class (e.g., tornadic). Call this
set {s_highest}.
[2] Find all storm cells with at least one object in {s_highest}. Call this
set {S_highest}.
[3] Find all time steps with at least one storm cell in {S_highest}. Call
this set {t_highest}.
[4] Randomly remove a large fraction of time steps NOT in {t_highest}.
[5] Downsample remaining storm objects, leaving a prescribed fraction in
each class (according to `class_fraction_dict`).
N = number of storm objects before downsampling
:param primary_id_strings: length-N list of primary storm IDs.
:param storm_times_unix_sec: length-N numpy array of corresponding times.
:param target_values: length-N numpy array of corresponding target values
(integer class labels).
:param target_name: Name of target variable (must be accepted by
`target_val_utils.target_name_to_params`).
:param class_fraction_dict: Dictionary, where each key is an integer class
label (-2 for "dead storm") and the corresponding value is the
sampling fraction.
:param test_mode: Never mind. Just leave this alone.
:return: indices_to_keep: 1-D numpy array of indices to keep.
"""
_report_class_fractions(target_values)
error_checking.assert_is_boolean(test_mode)
num_storm_objects = len(primary_id_strings)
num_classes = target_val_utils.target_name_to_num_classes(
target_name=target_name, include_dead_storms=False)
# Step 1.
print((
'Finding storm objects in class {0:d} (the highest class), yielding set'
' {{s_highest}}...'
).format(num_classes - 1))
highest_class_indices = numpy.where(target_values == num_classes - 1)[0]
print('{{s_highest}} contains {0:d} of {1:d} storm objects.'.format(
len(highest_class_indices), num_storm_objects
))
# Step 2.
print ('Finding storm cells with at least one object in {s_highest}, '
'yielding set {S_highest}...')
highest_class_indices = _find_storm_cells(
object_id_strings=primary_id_strings,
desired_cell_id_strings=
[primary_id_strings[k] for k in highest_class_indices]
)
print('{{S_highest}} contains {0:d} of {1:d} storm objects.'.format(
len(highest_class_indices), num_storm_objects)
)
# Step 3.
print ('Finding all time steps with at least one storm cell in '
'{S_highest}, yielding set {t_highest}...')
lower_class_times_unix_sec = (
set(storm_times_unix_sec.tolist()) -
set(storm_times_unix_sec[highest_class_indices].tolist())
)
lower_class_times_unix_sec = numpy.array(
list(lower_class_times_unix_sec), dtype=int)
# Step 4.
print('Randomly removing {0:.1f}% of times not in {{t_highest}}...'.format(
FRACTION_UNINTERESTING_TIMES_TO_OMIT * 100))
this_num_times = int(numpy.round(
FRACTION_UNINTERESTING_TIMES_TO_OMIT * len(lower_class_times_unix_sec)
))
if test_mode:
times_to_remove_unix_sec = lower_class_times_unix_sec[:this_num_times]
else:
times_to_remove_unix_sec = numpy.random.choice(
lower_class_times_unix_sec, size=this_num_times, replace=False)
indices_to_keep = _find_uncovered_times(
all_times_unix_sec=storm_times_unix_sec,
covered_times_unix_sec=times_to_remove_unix_sec)
_report_class_fractions(target_values[indices_to_keep])
# Step 5.
print('Downsampling storm objects from remaining times...')
subindices_to_keep = dl_utils.sample_by_class(
sampling_fraction_by_class_dict=class_fraction_dict,
target_name=target_name, target_values=target_values[indices_to_keep],
num_examples_total=LARGE_INTEGER, test_mode=test_mode)
return indices_to_keep[subindices_to_keep]
def downsample_for_non_training(
primary_id_strings, storm_times_unix_sec, target_values, target_name,
class_fraction_dict, test_mode=False):
"""Fancy downsampling to create validation or testing data.
The procedure is described in `_downsampling_base`.
N = number of storm objects before downsampling
n = number of storm objects after final downsampling
:param primary_id_strings: See doc for `_downsampling_base`.
:param storm_times_unix_sec: Same.
:param target_values: Same.
:param target_name: Same.
:param class_fraction_dict: Same.
:param test_mode: Same.
:return: indices_to_keep: Same.
"""
indices_to_keep = _downsampling_base(
primary_id_strings=primary_id_strings,
storm_times_unix_sec=storm_times_unix_sec,
target_values=target_values, target_name=target_name,
class_fraction_dict=class_fraction_dict, test_mode=test_mode)
_report_class_fractions(target_values[indices_to_keep])
return indices_to_keep
def downsample_for_training(
primary_id_strings, storm_times_unix_sec, target_values, target_name,
class_fraction_dict, test_mode=False):
"""Fancy downsampling to create training data.
The procedure is described below.
[1-5] Run `downsample_for_non_training`.
[6] Find remaining storm objects in the highest class (e.g., tornadic).
Call this set {s_highest}.
[7] Find remaining storm cells with at least one object in {s_highest}.
Call this set {S_highest}. Add storm objects from {S_highest} to the
selected set.
:param primary_id_strings: See doc for `_downsampling_base`.
:param storm_times_unix_sec: Same.
:param target_values: Same.
:param target_name: Same.
:param class_fraction_dict: Same.
:param test_mode: Same.
:return: indices_to_keep: Same.
"""
indices_to_keep = _downsampling_base(
primary_id_strings=primary_id_strings,
storm_times_unix_sec=storm_times_unix_sec,
target_values=target_values, target_name=target_name,
class_fraction_dict=class_fraction_dict, test_mode=test_mode)
num_classes = target_val_utils.target_name_to_num_classes(
target_name=target_name, include_dead_storms=False)
# Step 6.
print((
'Finding storm objects in class {0:d} (the highest class), yielding set'
' {{s_highest}}...'
).format(num_classes - 1))
these_subindices = numpy.where(
target_values[indices_to_keep] == num_classes - 1
)[0]
highest_class_indices = indices_to_keep[these_subindices]
print('{{s_highest}} contains {0:d} of {1:d} storm objects.'.format(
len(highest_class_indices), len(indices_to_keep)
))
# Step 7.
print ('Finding storm cells with at least one object in {s_highest}, '
'yielding set {S_highest}...')
highest_class_indices = _find_storm_cells(
object_id_strings=primary_id_strings,
desired_cell_id_strings=
[primary_id_strings[k] for k in highest_class_indices]
)
print('{{S_highest}} contains {0:d} of {1:d} storm objects.'.format(
len(highest_class_indices), len(primary_id_strings)
))
indices_to_keep = (
set(indices_to_keep.tolist()) | set(highest_class_indices.tolist())
)
indices_to_keep = numpy.array(list(indices_to_keep), dtype=int)
_report_class_fractions(target_values[indices_to_keep])
return indices_to_keep
|
{"hexsha": "a61e6775624d50be2281d903a041f3417ed657b0", "size": 11107, "ext": "py", "lang": "Python", "max_stars_repo_path": "gewittergefahr/deep_learning/fancy_downsampling.py", "max_stars_repo_name": "dopplerchase/GewitterGefahr", "max_stars_repo_head_hexsha": "4415b08dd64f37eba5b1b9e8cc5aa9af24f96593", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-10-04T01:07:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T08:49:32.000Z", "max_issues_repo_path": "gewittergefahr/deep_learning/fancy_downsampling.py", "max_issues_repo_name": "liuximarcus/GewitterGefahr", "max_issues_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-12-25T02:01:08.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-19T01:54:21.000Z", "max_forks_repo_path": "gewittergefahr/deep_learning/fancy_downsampling.py", "max_forks_repo_name": "liuximarcus/GewitterGefahr", "max_forks_repo_head_hexsha": "d819874d616f98a25187bfd3091073a2e6d5279e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-12-10T23:05:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T08:49:33.000Z", "avg_line_length": 36.1791530945, "max_line_length": 80, "alphanum_fraction": 0.7159448996, "include": true, "reason": "import numpy", "num_tokens": 2592}
|
[STATEMENT]
lemma vec1_index: assumes j: "j < n"
shows "vec1I ze on n i ! j = (if i = j then on else ze)" (is "_ = ?r")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vec1I ze on n i ! j = (if i = j then on else ze)
[PROOF STEP]
unfolding vec1I_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
let ?l = "replicate i ze @ on # replicate (n - 1 - i) ze"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
have len: "length ?l > i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. i < length (replicate i ze @ on # replicate (n - 1 - i) ze)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
i < length (replicate i ze @ on # replicate (n - 1 - i) ze)
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
have len2: "length (replicate i ze @ on # []) > i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. i < length (replicate i ze @ [on])
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
i < length (replicate i ze @ [on])
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
show "?l ! j = ?r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
proof (cases "j = i")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. j = i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
2. j \<noteq> i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
j = i
goal (2 subgoals):
1. j = i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
2. j \<noteq> i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
j = i
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
by (simp add: nth_append)
[PROOF STATE]
proof (state)
this:
(replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
goal (1 subgoal):
1. j \<noteq> i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. j \<noteq> i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
j \<noteq> i
goal (1 subgoal):
1. j \<noteq> i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
proof (cases "j < i")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
2. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
j < i
goal (2 subgoals):
1. j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
2. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
j < i
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
by (simp add: nth_append)
[PROOF STATE]
proof (state)
this:
(replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
goal (1 subgoal):
1. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> j < i
goal (1 subgoal):
1. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
with \<open>j \<noteq> i\<close>
[PROOF STATE]
proof (chain)
picking this:
j \<noteq> i
\<not> j < i
[PROOF STEP]
have gt: "j > i"
[PROOF STATE]
proof (prove)
using this:
j \<noteq> i
\<not> j < i
goal (1 subgoal):
1. i < j
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
i < j
goal (1 subgoal):
1. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
i < j
[PROOF STEP]
have "\<exists> k. j = i + Suc k"
[PROOF STATE]
proof (prove)
using this:
i < j
goal (1 subgoal):
1. \<exists>k. j = i + Suc k
[PROOF STEP]
by arith
[PROOF STATE]
proof (state)
this:
\<exists>k. j = i + Suc k
goal (1 subgoal):
1. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
\<exists>k. j = i + Suc k
[PROOF STEP]
obtain k where k: "j = i + Suc k"
[PROOF STATE]
proof (prove)
using this:
\<exists>k. j = i + Suc k
goal (1 subgoal):
1. (\<And>k. j = i + Suc k \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
j = i + Suc k
goal (1 subgoal):
1. \<not> j < i \<Longrightarrow> (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
with j
[PROOF STATE]
proof (chain)
picking this:
j < n
j = i + Suc k
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
j < n
j = i + Suc k
goal (1 subgoal):
1. (replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
[PROOF STEP]
by (simp add: nth_append)
[PROOF STATE]
proof (state)
this:
(replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(replicate i ze @ on # replicate (n - 1 - i) ze) ! j = (if i = j then on else ze)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2980, "file": "Matrix_Matrix_Legacy", "length": 36}
|
names = readdir(".")
for name=names
if startswith(name, "test_") && endswith(name, ".jl")
println("Run $name")
run(`julia $name`)
end
end
|
{"hexsha": "d81fe06d4023e9006664ace2db455e531c59f0b6", "size": 163, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "tkelman/QuantumOptics.jl", "max_stars_repo_head_hexsha": "1ccd79e1bb8775796703b8d2c407c1c74429e291", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "tkelman/QuantumOptics.jl", "max_issues_repo_head_hexsha": "1ccd79e1bb8775796703b8d2c407c1c74429e291", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "tkelman/QuantumOptics.jl", "max_forks_repo_head_hexsha": "1ccd79e1bb8775796703b8d2c407c1c74429e291", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.1111111111, "max_line_length": 57, "alphanum_fraction": 0.5582822086, "num_tokens": 48}
|
import hashlib
from itertools import cycle
from random import Random
import numpy as np
from numpy import linalg
class SubsCipherAlgo:
def __init__(self, key):
self.key = key
self.table = list()
self.clean_rand = Random()
def gen_table(self, bytes_number, cols):
r = Random()
r.seed(self.key)
table = list(range(1, (2 ** (bytes_number * 8)) * cols))
r.shuffle(table)
self.table = [table[index: index + cols] for index in range(0, len(table), cols)]
def encrypt(self, block, round_key):
self.clean_rand.seed(round_key)
return chr(self.clean_rand.choice(self.table[block]))
def decrypt(self, enc_block, round_key):
dec_block = 0
while dec_block < len(self.table):
if enc_block in self.table[dec_block]:
break
dec_block += 1
return dec_block
class MatrixCipher:
def __init__(self, key, side):
key_list = [ord(ch) for ch in key]
self.key_matrix = np.array(key_list).reshape(side, side)
def encrypt(self, input_matrix):
return np.matmul(self.key_matrix, input_matrix.reshape(4, 1))
def decrypt(self, input_matrix):
return np.matmul(linalg.inv(self.key_matrix), input_matrix)
class VigenerCipher:
def __init__(self):
self.alph_size = 2 ** 16
def encrypt(self, block_item, sub_key):
return (block_item + sub_key) % self.alph_size
def decrypt(self, block_item, sub_key):
return (block_item - sub_key) % self.alph_size
class StandartEncryptionModes:
def __init__(self, key, block_len):
self.key = self.key256(str(key).encode('utf-8'))
self.block_len = block_len
def __get_iter_key(self):
key_cycle = cycle(iter(self.key))
for sub_key in key_cycle:
yield ord(sub_key)
def __get_block_stream(self, text_stream):
iter_flag = True
while iter_flag:
block = []
try:
for i in range(self.block_len):
block.append(next(text_stream))
except StopIteration:
while len(block) < self.block_len and len(block) is not 0:
block.append(32)
iter_flag = False
if len(block) is not 0:
yield block
def ecb_crypt(self, text_stream, subc_f):
kstream = self.__get_iter_key()
bstream = self.__get_block_stream(text_stream)
yield from [subc_f(item, next(kstream)) for block in bstream for item in block]
def cbc_encrypt(self, text_stream, init_block, subc_f):
kstream = self.__get_iter_key()
bstream = self.__get_block_stream(text_stream)
temp = None
for block in bstream:
temp = [subc_f(item, next(kstream))
for item in [(item1 ^ item2) for item1, item2 in zip(init_block, block)]]
init_block = iter(temp)
yield from temp
def cbc_decrypt(self, text_stream, init_block, subc_f):
kstream = self.__get_iter_key()
bstream = self.__get_block_stream(text_stream)
for block in bstream:
yield from [item1 ^ item2
for item1, item2 in zip(init_block, [subc_f(item, next(kstream)) for item in block])]
init_block = block
def cfb_encrypt(self, text_stream, init_block, subc_f):
kstream = self.__get_iter_key()
bstream = self.__get_block_stream(text_stream)
for block in bstream:
init_block = [item1 ^ item2
for item1, item2 in zip(block, [subc_f(item, next(kstream)) for item in init_block])]
yield from init_block
def cfb_decrypt(self, text_stream, init_block, subc_f):
kstream = self.__get_iter_key()
bstream = self.__get_block_stream(text_stream)
for block in bstream:
yield from [item1 ^ item2
for item1, item2 in zip(block, [subc_f(item, next(kstream)) for item in init_block])]
init_block = block
def ofb_crypt(self, text_stream, init_block, subc_f):
kstream = self.__get_iter_key()
bstream = self.__get_block_stream(text_stream)
for block in bstream:
init_block = [subc_f(item, next(kstream)) for item in init_block]
yield from [item1 ^ item2 for item1, item2 in zip(block, init_block)]
@staticmethod
def key256(key):
return hashlib.sha256(key).hexdigest()
class FeistelNet:
def __init__(self, key, block_len, rounds_number, sub_cipher):
self.key = self.key256(str(key).encode('utf-8'))
self.block_len = block_len
self.rounds_number = rounds_number
# self.sub_cipher = MatrixCipher(self.key[0:(block_len // 2) ** 2], self.block_len // 2)
self.sub_cipher = sub_cipher
def f_round(self, prev_lblock, prev_rblock, sub_key, func):
next_rblock = prev_lblock
next_lblock = [item1 ^ item2
for item1, item2 in zip(prev_rblock, [func(item, sub_key) for item in prev_lblock])]
return next_lblock, next_rblock
def __get_block_stream(self, text_stream):
iter_flag = True
while iter_flag:
block = []
try:
for i in range(self.block_len):
block.append(next(text_stream))
except StopIteration:
while len(block) < self.block_len and len(block) is not 0:
block.append(32)
iter_flag = False
if len(block) is not 0:
yield block
def __get_iter_key(self):
key_cycle = cycle(iter(self.key))
for sub_key in key_cycle:
yield ord(sub_key)
def __get_reversed_iter_key(self):
key_stream = self.__get_iter_key()
reversed_key_stream = [next(key_stream) for _ in range(self.rounds_number)]
yield from reversed(reversed_key_stream)
def encrypt(self, text_stream):
bstream = self.__get_block_stream(text_stream)
kstream = self.__get_iter_key()
for block in bstream:
prev_lblock, prev_rblock = block[0: self.block_len // 2], block[self.block_len // 2: self.block_len]
for index in range(1, self.rounds_number + 1):
prev_lblock, prev_rblock = self.f_round(prev_lblock, prev_rblock, next(kstream), self.sub_cipher.encrypt)
yield from (prev_lblock + prev_rblock)
def decrypt(self, enc_text_stream):
enc_block_stream = self.__get_block_stream(enc_text_stream)
kstream = self.__get_reversed_iter_key()
for block in enc_block_stream:
prev_lblock, prev_rblock = block[0: self.block_len // 2], block[self.block_len // 2: self.block_len]
for index in range(self.rounds_number - 1, 0, -1):
prev_lblock, prev_rblock = self.f_round(prev_lblock, prev_rblock, next(kstream), self.sub_cipher.encrypt)
yield (prev_lblock + prev_rblock)
@staticmethod
def key256(key):
return hashlib.sha256(key).hexdigest()
if __name__ == '__main__':
key = 'a'
bytes_number = 2
rounds = 2
cols = 6
text = 'AB'
t = iter(map(ord, text))
vc = VigenerCipher()
f = FeistelNet(key, bytes_number, rounds, vc)
print(f.key, len(f.key))
# enc_t = f.encrypt(t)
# et = [_ for _ in enc_t]
# dec_t = f.decrypt(iter(et))
#
# dt = [_ for _ in dec_t]
# orig_text = ''.join([chr(item) for item in dt])
pass
|
{"hexsha": "6a7d3b4fd9c311f0e676a23d7e81631b0f983cb4", "size": 7573, "ext": "py", "lang": "Python", "max_stars_repo_path": "ciplib.py", "max_stars_repo_name": "Kamkas/Block-cipher", "max_stars_repo_head_hexsha": "69eb5b71a63e013515485bf0bc9cb4610fcf17cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ciplib.py", "max_issues_repo_name": "Kamkas/Block-cipher", "max_issues_repo_head_hexsha": "69eb5b71a63e013515485bf0bc9cb4610fcf17cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ciplib.py", "max_forks_repo_name": "Kamkas/Block-cipher", "max_forks_repo_head_hexsha": "69eb5b71a63e013515485bf0bc9cb4610fcf17cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7216981132, "max_line_length": 121, "alphanum_fraction": 0.6144196488, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1849}
|
from manimlib import *
import numpy as np
# To watch one of these scenes, run the following:
# python -m manim example_scenes.py SquareToCircle
# Use -s to skip to the end and just save the final frame
# Use -w to write the animation to a file
# Use -o to write it to a file and open it once done
# Use -n <number> to skip ahead to the n'th animation of a scene.
class OpeningManimExample(Scene):
def construct(self):
intro_words = Text("""
The original motivation for manim was to
better illustrate mathematical functions
as transformations.
""")
intro_words.to_edge(UP)
self.play(Write(intro_words))
self.wait(2)
# Linear transform
grid = NumberPlane((-10, 10), (-5, 5))
matrix = [[1, 1], [0, 1]]
linear_transform_words = VGroup(
Text("This is what the matrix"),
IntegerMatrix(matrix, include_background_rectangle=True),
Text("looks like")
)
linear_transform_words.arrange(RIGHT)
linear_transform_words.to_edge(UP)
linear_transform_words.set_stroke(BLACK, 10, background=True)
self.play(
ShowCreation(grid),
FadeTransform(intro_words, linear_transform_words)
)
self.wait()
self.play(grid.animate.apply_matrix(matrix), run_time=3)
self.wait()
# Complex map
c_grid = ComplexPlane()
moving_c_grid = c_grid.copy()
moving_c_grid.prepare_for_nonlinear_transform()
c_grid.set_stroke(BLUE_E, 1)
c_grid.add_coordinate_labels(font_size=24)
complex_map_words = TexText("""
Or thinking of the plane as $\\mathds{C}$,\\\\
this is the map $z \\rightarrow z^2$
""")
complex_map_words.to_corner(UR)
complex_map_words.set_stroke(BLACK, 5, background=True)
self.play(
FadeOut(grid),
Write(c_grid, run_time=3),
FadeIn(moving_c_grid),
FadeTransform(linear_transform_words, complex_map_words),
)
self.wait()
self.play(
moving_c_grid.animate.apply_complex_function(lambda z: z**2),
run_time=6,
)
self.wait(2)
class AnimatingMethods(Scene):
def construct(self):
grid = Tex(r"\pi").get_grid(10, 10, height=4)
self.add(grid)
# You can animate the application of mobject methods with the
# ".animate" syntax:
self.play(grid.animate.shift(LEFT))
# Alternatively, you can use the older syntax by passing the
# method and then the arguments to the scene's "play" function:
self.play(grid.shift, LEFT)
# Both of those will interpolate between the mobject's initial
# state and whatever happens when you apply that method.
# For this example, calling grid.shift(LEFT) would shift the
# grid one unit to the left, but both of the previous calls to
# "self.play" animate that motion.
# The same applies for any method, including those setting colors.
self.play(grid.animate.set_color(YELLOW))
self.wait()
self.play(grid.animate.set_submobject_colors_by_gradient(BLUE, GREEN))
self.wait()
self.play(grid.animate.set_height(TAU - MED_SMALL_BUFF))
self.wait()
# The method Mobject.apply_complex_function lets you apply arbitrary
# complex functions, treating the points defining the mobject as
# complex numbers.
self.play(grid.animate.apply_complex_function(np.exp), run_time=5)
self.wait()
# Even more generally, you could apply Mobject.apply_function,
# which takes in functions form R^3 to R^3
self.play(
grid.animate.apply_function(
lambda p: [
p[0] + 0.5 * math.sin(p[1]),
p[1] + 0.5 * math.sin(p[0]),
p[2]
]
),
run_time=5,
)
self.wait()
class TextExample(Scene):
def construct(self):
# To run this scene properly, you should have "Consolas" font in your computer
# for full usage, you can see https://github.com/3b1b/manim/pull/680
text = Text("Here is a text", font="Consolas", font_size=90)
difference = Text(
"""
The most important difference between Text and TexText is that\n
you can change the font more easily, but can't use the LaTeX grammar
""",
font="Arial", font_size=24,
# t2c is a dict that you can choose color for different text
t2c={"Text": BLUE, "TexText": BLUE, "LaTeX": ORANGE}
)
VGroup(text, difference).arrange(DOWN, buff=1)
self.play(Write(text))
self.play(FadeIn(difference, UP))
self.wait(3)
fonts = Text(
"And you can also set the font according to different words",
font="Arial",
t2f={"font": "Consolas", "words": "Consolas"},
t2c={"font": BLUE, "words": GREEN}
)
fonts.set_width(FRAME_WIDTH - 1)
slant = Text(
"And the same as slant and weight",
font="Consolas",
t2s={"slant": ITALIC},
t2w={"weight": BOLD},
t2c={"slant": ORANGE, "weight": RED}
)
VGroup(fonts, slant).arrange(DOWN, buff=0.8)
self.play(FadeOut(text), FadeOut(difference, shift=DOWN))
self.play(Write(fonts))
self.wait()
self.play(Write(slant))
self.wait()
class TexTransformExample(Scene):
def construct(self):
to_isolate = ["B", "C", "=", "(", ")"]
lines = VGroup(
# Passing in muliple arguments to Tex will result
# in the same expression as if those arguments had
# been joined together, except that the submobject
# heirarchy of the resulting mobject ensure that the
# Tex mobject has a subject corresponding to
# each of these strings. For example, the Tex mobject
# below will have 5 subjects, corresponding to the
# expressions [A^2, +, B^2, =, C^2]
Tex("A^2", "+", "B^2", "=", "C^2"),
# Likewise here
Tex("A^2", "=", "C^2", "-", "B^2"),
# Alternatively, you can pass in the keyword argument
# "isolate" with a list of strings that should be out as
# their own submobject. So the line below is equivalent
# to the commented out line below it.
Tex("A^2 = (C + B)(C - B)", isolate=["A^2", *to_isolate]),
# Tex("A^2", "=", "(", "C", "+", "B", ")", "(", "C", "-", "B", ")"),
Tex("A = \\sqrt{(C + B)(C - B)}", isolate=["A", *to_isolate])
)
lines.arrange(DOWN, buff=LARGE_BUFF)
for line in lines:
line.set_color_by_tex_to_color_map({
"A": BLUE,
"B": TEAL,
"C": GREEN,
})
play_kw = {"run_time": 2}
self.add(lines[0])
# The animation TransformMatchingTex will line up parts
# of the source and target which have matching tex strings.
# Here, giving it a little path_arc makes each part sort of
# rotate into their final positions, which feels appropriate
# for the idea of rearranging an equation
self.play(
TransformMatchingTex(
lines[0].copy(), lines[1],
path_arc=90 * DEGREES,
),
**play_kw
)
self.wait()
# Now, we could try this again on the next line...
self.play(
TransformMatchingTex(lines[1].copy(), lines[2]),
**play_kw
)
self.wait()
# ...and this looks nice enough, but since there's no tex
# in lines[2] which matches "C^2" or "B^2", those terms fade
# out to nothing while the C and B terms fade in from nothing.
# If, however, we want the C^2 to go to C, and B^2 to go to B,
# we can specify that with a key map.
self.play(FadeOut(lines[2]))
self.play(
TransformMatchingTex(
lines[1].copy(), lines[2],
key_map={
"C^2": "C",
"B^2": "B",
}
),
**play_kw
)
self.wait()
# And to finish off, a simple TransformMatchingShapes would work
# just fine. But perhaps we want that exponent on A^2 to transform into
# the square root symbol. At the moment, lines[2] treats the expression
# A^2 as a unit, so we might create a new version of the same line which
# separates out just the A. This way, when TransformMatchingTex lines up
# all matching parts, the only mismatch will be between the "^2" from
# new_line2 and the "\sqrt" from the final line. By passing in,
# transform_mismatches=True, it will transform this "^2" part into
# the "\sqrt" part.
new_line2 = Tex("A^2 = (C + B)(C - B)", isolate=["A", *to_isolate])
new_line2.replace(lines[2])
new_line2.match_style(lines[2])
self.play(
TransformMatchingTex(
new_line2, lines[3],
transform_mismatches=True,
),
**play_kw
)
self.wait(3)
self.play(FadeOut(lines, RIGHT))
# Alternatively, if you don't want to think about breaking up
# the tex strings deliberately, you can TransformMatchingShapes,
# which will try to line up all pieces of a source mobject with
# those of a target, regardless of the submobject hierarchy in
# each one, according to whether those pieces have the same
# shape (as best it can).
source = Text("the morse code", height=1)
target = Text("here come dots", height=1)
self.play(Write(source))
self.wait()
kw = {"run_time": 3, "path_arc": PI / 2}
self.play(TransformMatchingShapes(source, target, **kw))
self.wait()
self.play(TransformMatchingShapes(target, source, **kw))
self.wait()
class UpdatersExample(Scene):
def construct(self):
square = Square()
square.set_fill(BLUE_E, 1)
# On all all frames, the constructor Brace(square, UP) will
# be called, and the mobject brace will set its data to match
# that of the newly constructed object
brace = always_redraw(Brace, square, UP)
text, number = label = VGroup(
Text("Width = "),
DecimalNumber(
0,
show_ellipsis=True,
num_decimal_places=2,
include_sign=True,
)
)
label.arrange(RIGHT)
# This ensures that the method deicmal.next_to(square)
# is called on every frame
always(label.next_to, brace, UP)
# You could also write the following equivalent line
# label.add_updater(lambda m: m.next_to(brace, UP))
# If the argument itself might change, you can use f_always,
# for which the arguments following the initial Mobject method
# should be functions returning arguments to that method.
# The following line ensures thst decimal.set_value(square.get_y())
# is called every frame
f_always(number.set_value, square.get_width)
# You could also write the following equivalent line
# number.add_updater(lambda m: m.set_value(square.get_width()))
self.add(square, brace, label)
# Notice that the brace and label track with the square
self.play(
square.animate.scale(2),
rate_func=there_and_back,
run_time=2,
)
self.wait()
self.play(
square.animate.set_width(5, stretch=True),
run_time=3,
)
self.wait()
self.play(
square.animate.set_width(2),
run_time=3
)
self.wait()
# In general, you can alway call Mobject.add_updater, and pass in
# a function that you want to be called on every frame. The function
# should take in either one argument, the mobject, or two arguments,
# the mobject and the amount of time since the last frame.
now = self.time
w0 = square.get_width()
square.add_updater(
lambda m: m.set_width(w0 * math.cos(self.time - now))
)
self.wait(4 * PI)
class CoordinateSystemExample(Scene):
def construct(self):
axes = Axes(
# x-axis ranges from -1 to 10, with a default step size of 1
x_range=(-1, 10),
# y-axis ranges from -2 to 10 with a step size of 0.5
y_range=(-2, 2, 0.5),
# The axes will be stretched so as to match the specified
# height and width
height=6,
width=10,
# Axes is made of two NumberLine mobjects. You can specify
# their configuration with axis_config
axis_config={
"stroke_color": GREY_A,
"stroke_width": 2,
},
# Alternatively, you can specify configuration for just one
# of them, like this.
y_axis_config={
"include_tip": False,
}
)
# Keyword arguments of add_coordinate_labels can be used to
# configure the DecimalNumber mobjects which it creates and
# adds to the axes
axes.add_coordinate_labels(
font_size=20,
num_decimal_places=1,
)
self.add(axes)
# Axes descends from the CoordinateSystem class, meaning
# you can call call axes.coords_to_point, abbreviated to
# axes.c2p, to associate a set of coordinates with a point,
# like so:
dot = Dot(color=RED)
dot.move_to(axes.c2p(0, 0))
self.play(FadeIn(dot, scale=0.5))
self.play(dot.animate.move_to(axes.c2p(3, 2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(5, 0.5)))
self.wait()
# Similarly, you can call axes.point_to_coords, or axes.p2c
# print(axes.p2c(dot.get_center()))
# We can draw lines from the axes to better mark the coordinates
# of a given point.
# Here, the always_redraw command means that on each new frame
# the lines will be redrawn
h_line = always_redraw(lambda: axes.get_h_line(dot.get_left()))
v_line = always_redraw(lambda: axes.get_v_line(dot.get_bottom()))
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
)
self.play(dot.animate.move_to(axes.c2p(3, -2)))
self.wait()
self.play(dot.animate.move_to(axes.c2p(1, 1)))
self.wait()
# If we tie the dot to a particular set of coordinates, notice
# that as we move the axes around it respects the coordinate
# system defined by them.
f_always(dot.move_to, lambda: axes.c2p(1, 1))
self.play(
axes.animate.scale(0.75).to_corner(UL),
run_time=2,
)
self.wait()
self.play(FadeOut(VGroup(axes, dot, h_line, v_line)))
# Other coordinate systems you can play around with include
# ThreeDAxes, NumberPlane, and ComplexPlane.
class GraphExample(Scene):
def construct(self):
axes = Axes((-3, 10), (-1, 8))
axes.add_coordinate_labels()
self.play(Write(axes, lag_ratio=0.01, run_time=1))
# Axes.get_graph will return the graph of a function
sin_graph = axes.get_graph(
lambda x: 2 * math.sin(x),
color=BLUE,
)
# By default, it draws it so as to somewhat smoothly interpolate
# between sampled points (x, f(x)). If the graph is meant to have
# a corner, though, you can set use_smoothing to False
relu_graph = axes.get_graph(
lambda x: max(x, 0),
use_smoothing=False,
color=YELLOW,
)
# For discontinuous functions, you can specify the point of
# discontinuity so that it does not try to draw over the gap.
step_graph = axes.get_graph(
lambda x: 2.0 if x > 3 else 1.0,
discontinuities=[3],
color=GREEN,
)
# Axes.get_graph_label takes in either a string or a mobject.
# If it's a string, it treats it as a LaTeX expression. By default
# it places the label next to the graph near the right side, and
# has it match the color of the graph
sin_label = axes.get_graph_label(sin_graph, "\\sin(x)")
relu_label = axes.get_graph_label(relu_graph, Text("ReLU"))
step_label = axes.get_graph_label(step_graph, Text("Step"), x=4)
self.play(
ShowCreation(sin_graph),
FadeIn(sin_label, RIGHT),
)
self.wait(2)
self.play(
ReplacementTransform(sin_graph, relu_graph),
FadeTransform(sin_label, relu_label),
)
self.wait()
self.play(
ReplacementTransform(relu_graph, step_graph),
FadeTransform(relu_label, step_label),
)
self.wait()
parabola = axes.get_graph(lambda x: 0.25 * x**2)
parabola.set_stroke(BLUE)
self.play(
FadeOut(step_graph),
FadeOut(step_label),
ShowCreation(parabola)
)
self.wait()
# You can use axes.input_to_graph_point, abbreviated
# to axes.i2gp, to find a particular point on a graph
dot = Dot(color=RED)
dot.move_to(axes.i2gp(2, parabola))
self.play(FadeIn(dot, scale=0.5))
# A value tracker lets us animate a parameter, usually
# with the intent of having other mobjects update based
# on the parameter
x_tracker = ValueTracker(2)
f_always(
dot.move_to,
lambda: axes.i2gp(x_tracker.get_value(), parabola)
)
self.play(x_tracker.animate.set_value(4), run_time=3)
self.play(x_tracker.animate.set_value(-2), run_time=3)
self.wait()
class SurfaceExample(Scene):
CONFIG = {
"camera_class": ThreeDCamera,
}
def construct(self):
surface_text = Text("For 3d scenes, try using surfaces")
surface_text.fix_in_frame()
surface_text.to_edge(UP)
self.add(surface_text)
self.wait(0.1)
torus1 = Torus(r1=1, r2=1)
torus2 = Torus(r1=3, r2=1)
sphere = Sphere(radius=3, resolution=torus1.resolution)
# You can texture a surface with up to two images, which will
# be interpreted as the side towards the light, and away from
# the light. These can be either urls, or paths to a local file
# in whatever you've set as the image directory in
# the custom_config.yml file
# day_texture = "EarthTextureMap"
# night_texture = "NightEarthTextureMap"
day_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Whole_world_-_land_and_oceans.jpg/1280px-Whole_world_-_land_and_oceans.jpg"
night_texture = "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/The_earth_at_night.jpg/1280px-The_earth_at_night.jpg"
surfaces = [
TexturedSurface(surface, day_texture, night_texture)
for surface in [sphere, torus1, torus2]
]
for mob in surfaces:
mob.shift(IN)
mob.mesh = SurfaceMesh(mob)
mob.mesh.set_stroke(BLUE, 1, opacity=0.5)
# Set perspective
frame = self.camera.frame
frame.set_euler_angles(
theta=-30 * DEGREES,
phi=70 * DEGREES,
)
surface = surfaces[0]
self.play(
FadeIn(surface),
ShowCreation(surface.mesh, lag_ratio=0.01, run_time=3),
)
for mob in surfaces:
mob.add(mob.mesh)
surface.save_state()
self.play(Rotate(surface, PI / 2), run_time=2)
for mob in surfaces[1:]:
mob.rotate(PI / 2)
self.play(
Transform(surface, surfaces[1]),
run_time=3
)
self.play(
Transform(surface, surfaces[2]),
# Move camera frame during the transition
frame.animate.increment_phi(-10 * DEGREES),
frame.animate.increment_theta(-20 * DEGREES),
run_time=3
)
# Add ambient rotation
frame.add_updater(lambda m, dt: m.increment_theta(-0.1 * dt))
# Play around with where the light is
light_text = Text("You can move around the light source")
light_text.move_to(surface_text)
light_text.fix_in_frame()
self.play(FadeTransform(surface_text, light_text))
light = self.camera.light_source
self.add(light)
light.save_state()
self.play(light.animate.move_to(3 * IN), run_time=5)
self.play(light.animate.shift(10 * OUT), run_time=5)
drag_text = Text("Try moving the mouse while pressing d or s")
drag_text.move_to(light_text)
drag_text.fix_in_frame()
self.play(FadeTransform(light_text, drag_text))
self.wait()
class InteractiveDevelopment(Scene):
def construct(self):
circle = Circle()
circle.set_fill(BLUE, opacity=0.5)
circle.set_stroke(BLUE_E, width=4)
square = Square()
self.play(ShowCreation(square))
self.wait()
# This opens an iPython termnial where you can keep writing
# lines as if they were part of this construct method.
# In particular, 'square', 'circle' and 'self' will all be
# part of the local namespace in that terminal.
self.embed()
# Try copying and pasting some of the lines below into
# the interactive shell
self.play(ReplacementTransform(square, circle))
self.wait()
self.play(circle.animate.stretch(4, 0))
self.play(Rotate(circle, 90 * DEGREES))
self.play(circle.animate.shift(2 * RIGHT).scale(0.25))
text = Text("""
In general, using the interactive shell
is very helpful when developing new scenes
""")
self.play(Write(text))
# In the interactive shell, you can just type
# play, add, remove, clear, wait, save_state and restore,
# instead of self.play, self.add, self.remove, etc.
# To interact with the window, type touch(). You can then
# scroll in the window, or zoom by holding down 'z' while scrolling,
# and change camera perspective by holding down 'd' while moving
# the mouse. Press 'r' to reset to the standard camera position.
# Press 'q' to stop interacting with the window and go back to
# typing new commands into the shell.
# In principle you can customize a scene to be responsive to
# mouse and keyboard interactions
always(circle.move_to, self.mouse_point)
class ControlsExample(Scene):
def setup(self):
self.textbox = Textbox()
self.checkbox = Checkbox()
self.color_picker = ColorSliders()
self.panel = ControlPanel(
Text("Text", size=0.5), self.textbox, Line(),
Text("Show/Hide Text", size=0.5), self.checkbox, Line(),
Text("Color of Text", size=0.5), self.color_picker
)
self.add(self.panel)
def construct(self):
text = Text("", size=2)
def text_updater(old_text):
assert(isinstance(old_text, Text))
new_text = Text(self.textbox.get_value(), size=old_text.size)
# new_text.align_data_and_family(old_text)
new_text.move_to(old_text)
if self.checkbox.get_value():
new_text.set_fill(
color=self.color_picker.get_picked_color(),
opacity=self.color_picker.get_picked_opacity()
)
else:
new_text.set_opacity(0)
old_text.become(new_text)
text.add_updater(text_updater)
self.add(MotionMobject(text))
self.textbox.set_value("Manim")
# self.wait(60)
# self.embed()
# See https://github.com/3b1b/videos for many, many more
|
{"hexsha": "4af663609a6576171a26b2c27acd2df920b9a152", "size": 24605, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_scenes.py", "max_stars_repo_name": "net-oil-man/Oilman-Manim-Tools", "max_stars_repo_head_hexsha": "f93d54d586fcae69932c4f0820de96d83cdb9c5d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-10T08:41:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-10T08:41:10.000Z", "max_issues_repo_path": "example_scenes.py", "max_issues_repo_name": "net-oil-man/Oilman-Manim-Tools", "max_issues_repo_head_hexsha": "f93d54d586fcae69932c4f0820de96d83cdb9c5d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example_scenes.py", "max_forks_repo_name": "net-oil-man/Oilman-Manim-Tools", "max_forks_repo_head_hexsha": "f93d54d586fcae69932c4f0820de96d83cdb9c5d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5059347181, "max_line_length": 156, "alphanum_fraction": 0.5829302987, "include": true, "reason": "import numpy", "num_tokens": 5753}
|
[STATEMENT]
lemma Array_Object_widen1_trancl:
assumes wf: "wf_prog wmdc P"
and itA: "is_type P (A\<lfloor>\<rceil>)"
shows "P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
using itA
[PROOF STATE]
proof (prove)
using this:
is_type P (A\<lfloor>\<rceil>)
goal (1 subgoal):
1. P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
proof(induction A)
[PROOF STATE]
proof (state)
goal (6 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>x. is_type P (Class x\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Class x\<lfloor>\<rceil> <\<^sup>+ Class Object
6. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
case (Class C)
[PROOF STATE]
proof (state)
this:
is_type P (Class C\<lfloor>\<rceil>)
goal (6 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>x. is_type P (Class x\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Class x\<lfloor>\<rceil> <\<^sup>+ Class Object
6. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
hence "is_class P C"
[PROOF STATE]
proof (prove)
using this:
is_type P (Class C\<lfloor>\<rceil>)
goal (1 subgoal):
1. is_class P C
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
is_class P C
goal (6 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>x. is_type P (Class x\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Class x\<lfloor>\<rceil> <\<^sup>+ Class Object
6. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
hence "P \<turnstile> C \<preceq>\<^sup>* Object"
[PROOF STATE]
proof (prove)
using this:
is_class P C
goal (1 subgoal):
1. P \<turnstile> C \<preceq>\<^sup>* Object
[PROOF STEP]
by(rule subcls_C_Object[OF _ wf])
[PROOF STATE]
proof (state)
this:
P \<turnstile> C \<preceq>\<^sup>* Object
goal (6 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>x. is_type P (Class x\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Class x\<lfloor>\<rceil> <\<^sup>+ Class Object
6. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
hence "P \<turnstile> Class C <\<^sup>* Class Object"
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> C \<preceq>\<^sup>* Object
goal (1 subgoal):
1. P \<turnstile> Class C <\<^sup>* Class Object
[PROOF STEP]
by(rule subcls_into_widen1_rtrancl)
[PROOF STATE]
proof (state)
this:
P \<turnstile> Class C <\<^sup>* Class Object
goal (6 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>x. is_type P (Class x\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Class x\<lfloor>\<rceil> <\<^sup>+ Class Object
6. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
hence "P \<turnstile> Class C\<lfloor>\<rceil> <\<^sup>* Class Object\<lfloor>\<rceil>"
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> Class C <\<^sup>* Class Object
goal (1 subgoal):
1. P \<turnstile> Class C\<lfloor>\<rceil> <\<^sup>* Class Object\<lfloor>\<rceil>
[PROOF STEP]
by(rule widen1_rtrancl_into_Array_widen1_rtrancl) simp
[PROOF STATE]
proof (state)
this:
P \<turnstile> Class C\<lfloor>\<rceil> <\<^sup>* Class Object\<lfloor>\<rceil>
goal (6 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>x. is_type P (Class x\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Class x\<lfloor>\<rceil> <\<^sup>+ Class Object
6. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> Class C\<lfloor>\<rceil> <\<^sup>* Class Object\<lfloor>\<rceil>
goal (1 subgoal):
1. P \<turnstile> Class C\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
by(rule rtrancl_into_trancl1) simp
[PROOF STATE]
proof (state)
this:
P \<turnstile> Class C\<lfloor>\<rceil> <\<^sup>+ Class Object
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
case (Array A)
[PROOF STATE]
proof (state)
this:
is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
from \<open>is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<close>
[PROOF STATE]
proof (chain)
picking this:
is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)
[PROOF STEP]
have "is_type P (A\<lfloor>\<rceil>)"
[PROOF STATE]
proof (prove)
using this:
is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)
goal (1 subgoal):
1. is_type P (A\<lfloor>\<rceil>)
[PROOF STEP]
by(rule is_type_ArrayD)
[PROOF STATE]
proof (state)
this:
is_type P (A\<lfloor>\<rceil>)
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
hence "P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object"
[PROOF STATE]
proof (prove)
using this:
is_type P (A\<lfloor>\<rceil>)
goal (1 subgoal):
1. P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
by(rule Array.IH)
[PROOF STATE]
proof (state)
this:
P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
from \<open>is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<close>
[PROOF STATE]
proof (chain)
picking this:
is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)
[PROOF STEP]
have "\<not> is_NT_Array (A\<lfloor>\<rceil>)"
[PROOF STATE]
proof (prove)
using this:
is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)
goal (1 subgoal):
1. ground_type (A\<lfloor>\<rceil>) \<noteq> NT
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
ground_type (A\<lfloor>\<rceil>) \<noteq> NT
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
ground_type (A\<lfloor>\<rceil>) \<noteq> NT
[PROOF STEP]
have "P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object\<lfloor>\<rceil>"
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object
ground_type (A\<lfloor>\<rceil>) \<noteq> NT
goal (1 subgoal):
1. P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object\<lfloor>\<rceil>
[PROOF STEP]
by(rule widen1_trancl_into_Array_widen1_trancl)
[PROOF STATE]
proof (state)
this:
P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object\<lfloor>\<rceil>
goal (5 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
5. \<And>A. \<lbrakk>is_type P (A\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil> <\<^sup>+ Class Object; is_type P (A\<lfloor>\<rceil>\<lfloor>\<rceil>)\<rbrakk> \<Longrightarrow> P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object\<lfloor>\<rceil>
goal (1 subgoal):
1. P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
by(rule trancl_into_trancl) simp
[PROOF STATE]
proof (state)
this:
P \<turnstile> A\<lfloor>\<rceil>\<lfloor>\<rceil> <\<^sup>+ Class Object
goal (4 subgoals):
1. is_type P (Void\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Void\<lfloor>\<rceil> <\<^sup>+ Class Object
2. is_type P (Boolean\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Boolean\<lfloor>\<rceil> <\<^sup>+ Class Object
3. is_type P (Integer\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> Integer\<lfloor>\<rceil> <\<^sup>+ Class Object
4. is_type P (NT\<lfloor>\<rceil>) \<Longrightarrow> P \<turnstile> NT\<lfloor>\<rceil> <\<^sup>+ Class Object
[PROOF STEP]
qed auto
|
{"llama_tokens": 6626, "file": "JinjaThreads_Common_SemiType", "length": 30}
|
""" Train network on the Fluorescence Microscopy Dataset (FMD) """
import argparse
import glob
import os
from os import listdir
from os.path import join
import keras
import numpy as np
import skimage
import tensorflow as tf
from imageio import imread
from keras import backend as K
from keras.callbacks import LambdaCallback, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import SGD, Adam
from tqdm import trange
from callback import LogProgress
from nets import *
np.random.seed(1234)
tf.set_random_seed(1234)
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
parser = argparse.ArgumentParser()
parser.add_argument('--path',required=True,help='path to dataset root')
parser.add_argument('--dataset',required=True,help='dataset name e.g. Confocal_MICE')
parser.add_argument('--mode',default='uncalib',help='noise model: uncalib, gaussian, poisson, or poissongaussian')
parser.add_argument('--reg',type=float,default=10,help='regularization weight on prior std. dev.')
parser.add_argument('--crop',type=int,default=128,help='crop size')
parser.add_argument('--batch',type=int,default=4,help='batch size')
parser.add_argument('--epoch',type=int,default=300,help='num epochs')
parser.add_argument('--steps',type=int,default=50,help='steps per epoch')
parser.add_argument('--lr',type=float,default=0.0003,help='learning rate')
parser.add_argument('--components',type=int,default=1,help='number of mixture components')
parser.add_argument('--patience',type=int,default=10,help='ReduceLROnPlateau patience')
parser.add_argument('--tag',type=str,default="",help='id tag to add to weights path')
args = parser.parse_args()
if args.components != 1 and args.mode != "uncalib":
raise ValueError("Components != 1 must be used with mode uncalib")
""" Load dataset """
def load_images(noise):
basepath = args.path + '/' + args.dataset + '/' + noise
images = []
for i in range(1,21):
if i==19: continue
for path in sorted(glob.glob(basepath + '/%d/*.png'%i)):
images.append(imread(path))
return np.stack(images,axis=0)[:,:,:,None]/255.
train_images = load_images('raw')
np.random.shuffle(train_images)
X = train_images[:-5]
X_val = train_images[-5:]
print('%d training images'%len(X))
print('%d validation images'%len(X_val))
""" Augment by rotating and flipping """
""" Adapted from https://github.com/juglab/n2v/blob/master/n2v/internals/N2V_DataGenerator.py """
def augment_images(images):
augmented = np.concatenate((images,
np.rot90(images, k=1, axes=(1, 2)),
np.rot90(images, k=2, axes=(1, 2)),
np.rot90(images, k=3, axes=(1, 2))))
augmented = np.concatenate((augmented, np.flip(augmented, axis=-2)))
return augmented
X = augment_images(X)
X_val = augment_images(X_val)
print('%d training images after augmenting'%len(X))
""" Training """
""" Train on random crops of the training image."""
def random_crop_generator(data, crop_size, batch_size):
while True:
inds = np.random.randint(data.shape[0],size=batch_size)
y = np.random.randint(data.shape[1]-crop_size,size=batch_size)
x = np.random.randint(data.shape[2]-crop_size,size=batch_size)
batch = np.zeros((batch_size,crop_size,crop_size,1),dtype=data.dtype)
for i,ind in enumerate(inds):
batch[i] = data[ind,y[i]:y[i]+crop_size,x[i]:x[i]+crop_size]
yield batch, None
model = gaussian_blindspot_network((args.crop, args.crop, 1),args.mode,args.reg,args.components)
model.compile(optimizer=Adam(args.lr))
os.makedirs('weights',exist_ok=True)
experiment_name = '%s.%s'%(args.dataset,args.mode)
if args.tag != "":
experiment_name += '.%s'%(args.tag)
if args.mode == 'uncalib' or args.mode == 'mse':
if args.components != 1:
experiment_name += '.%dcomponents'%(args.components)
else:
experiment_name += '.%0.3f'%(args.reg)
weights_path = "weights/weights." + experiment_name + ".latest.hdf5"
callbacks = []
callbacks.append(ModelCheckpoint(filepath=weights_path, monitor='val_loss',save_best_only=1,verbose=1))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=args.patience, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0))
callbacks.append(LogProgress(experiment_name))
gen = random_crop_generator(X,args.crop,args.batch)
val_crops = []
for y in range(0,X_val.shape[1],args.crop):
if y+args.crop > X_val.shape[1]: continue
for x in range(0,X_val.shape[2],args.crop):
if x+args.crop > X_val.shape[2]: continue
val_crops.append(X_val[:,y:y+args.crop,x:x+args.crop])
val_data = np.concatenate(val_crops,axis=0)
history = model.fit_generator(gen,
steps_per_epoch=args.steps,
validation_data=(val_data,None),
epochs=args.epoch,
verbose=1,
callbacks=callbacks)
|
{"hexsha": "76556de9e7613bb235cb8b35a468629b554720d6", "size": 5050, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_fmd.py", "max_stars_repo_name": "JRice15/self-supervised-poisson-gaussian", "max_stars_repo_head_hexsha": "4c9bf851194454eae17b5673a28a6131a8bd0ce5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-21T19:29:52.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-21T19:29:52.000Z", "max_issues_repo_path": "train_fmd.py", "max_issues_repo_name": "JRice15/self-supervised-poisson-gaussian", "max_issues_repo_head_hexsha": "4c9bf851194454eae17b5673a28a6131a8bd0ce5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_fmd.py", "max_forks_repo_name": "JRice15/self-supervised-poisson-gaussian", "max_forks_repo_head_hexsha": "4c9bf851194454eae17b5673a28a6131a8bd0ce5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4074074074, "max_line_length": 155, "alphanum_fraction": 0.6873267327, "include": true, "reason": "import numpy", "num_tokens": 1234}
|
import xarray as xr
import salem
import geopandas
import geojson
from shapely.geometry import shape
import numpy as np
from glob import glob
import pandas as pd
import os
from subprocess import check_call
def subset_to_west(da, lon_range, lat_range, roi):
da = da.sel(latitude=slice(lat_range[0], lat_range[1]),
longitude=slice(lon_range[0], lon_range[1]))
return da.salem.roi(geometry=roi, crs='wgs84')
# projection: EPSG:3857
gjson_fname = '/glade/u/home/mckinnon/compound_extremes/compound_extremes/shapefiles/interior_west.json'
interior_west = geopandas.read_file(gjson_fname)
lon_range = (np.min(interior_west['geometry'][0].exterior.coords.xy[0]),
np.max(interior_west['geometry'][0].exterior.coords.xy[0]))
lat_range = (np.min(interior_west['geometry'][0].exterior.coords.xy[1]),
np.max(interior_west['geometry'][0].exterior.coords.xy[1]))
with open(gjson_fname) as f:
geo = geojson.load(f)
interior_west_shapely = shape(geo[0]['geometry'])
start_month = 7
end_month = 9
jra55_dir = '/glade/scratch/mckinnon/JRA55'
datadir = '/glade/work/mckinnon/JRA55/csv'
cmd = 'mkdir -p %s' % datadir
check_call(cmd.split())
varnames_file = ['anl_surf125.011_tmp', 'anl_surf125.051_spfh']
varnames = ['TMP_GDS0_HTGL', 'SPFH_GDS0_HTGL']
for ct, this_varname in enumerate(varnames):
savename = '/glade/work/mckinnon/JRA55/processed_%s.nc' % varnames_file[ct]
if not os.path.isfile(savename):
print(this_varname)
files = sorted(glob('%s/%s*' % (jra55_dir, varnames_file[ct])))
ds = xr.open_mfdataset(files)
if 'fcst' in files[0]: # use 3 hour forecast
ds = ds.sel(forecast_time1=ds['forecast_time1'][0])
da = ds[this_varname]
if 'g0_lat_1' in da.coords:
rename_dict = {'g0_lat_1': 'latitude', 'g0_lon_2': 'longitude',
'initial_time0_hours': 'time'}
elif 'g0_lat_2' in da.coords:
rename_dict = {'g0_lat_2': 'latitude', 'g0_lon_3': 'longitude',
'initial_time0_hours': 'time'}
da = da.rename(rename_dict)
# Sort by latitude
da = da.sortby('latitude')
# Subset to US (approx)
da = da.sel(latitude=slice(20, 55), longitude=slice(230, 300))
# change to -180 to 180 longitude
da = da.assign_coords(longitude=(((da.longitude + 180) % 360) - 180))
# subset to interior west
da = subset_to_west(da, lon_range, lat_range, interior_west_shapely)
# calculate daily average
da = da.resample(time='D').mean()
# save
savename = '/glade/work/mckinnon/JRA55/processed_%s.nc' % varnames_file[ct]
da.to_netcdf(savename)
if 'TMP' in this_varname:
da_T = xr.open_dataarray(savename)
elif 'SPFH' in this_varname:
da_q = xr.open_dataarray(savename)
da_T = da_T.assign_coords({'longitude': np.round(da_T.longitude, 2)})
da_T = da_T.assign_coords({'latitude': np.round(da_T.latitude, 2)})
da_q = da_q.assign_coords({'longitude': np.round(da_q.longitude, 2)})
da_q = da_q.assign_coords({'latitude': np.round(da_q.latitude, 2)})
# For gridboxes that have data, save in same manner as ISD
# "station_id" will be lat-lon
lons, lats = np.meshgrid(da_T.longitude, da_T.latitude)
has_data = ~np.isnan(da_T[0, :, :])
lons = lons[has_data.values]
lats = lats[has_data.values]
station_id = ['%03.2f%03.2f' % (this_lat, this_lon) for (this_lat, this_lon) in zip(lats, lons)]
metadata = pd.DataFrame({'station_id': station_id,
'lat': np.round(lats, 2),
'lon': np.round(lons, 2)})
metadata.to_csv('%s/new_metadata.csv' % datadir)
# Iterate through lats, lons and make dataframes
for counter in range(len(lats)):
this_lat = lats[counter]
this_lon = lons[counter]
station_id = '%03.2f%03.2f' % (this_lat, this_lon)
print(station_id)
this_q_ts = da_q.sel(latitude=this_lat, longitude=this_lon)
this_T_ts = da_T.sel(latitude=this_lat, longitude=this_lon)
this_df = this_q_ts.to_dataframe(name='Q')
this_df = this_df.assign(TMP=this_T_ts.values)
this_df = this_df.reset_index()
# rename date column
this_df = this_df.rename(columns={'time': 'date'})
# drop columns we don't need
this_df = this_df.drop(columns=['latitude', 'longitude'])
# save
this_df.to_csv('%s/%s.csv' % (datadir, station_id), index=False)
|
{"hexsha": "6631ae00195333f19ae626554784f86403b22e3c", "size": 4446, "ext": "py", "lang": "Python", "max_stars_repo_path": "compound_extremes/scripts/preprocess_JRA55.py", "max_stars_repo_name": "karenamckinnon/compound_extremes", "max_stars_repo_head_hexsha": "26542ed55af86fdfd3486e949f475b18f03165c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-06T23:56:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-04T02:21:03.000Z", "max_issues_repo_path": "compound_extremes/scripts/preprocess_JRA55.py", "max_issues_repo_name": "karenamckinnon/compound_extremes", "max_issues_repo_head_hexsha": "26542ed55af86fdfd3486e949f475b18f03165c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compound_extremes/scripts/preprocess_JRA55.py", "max_forks_repo_name": "karenamckinnon/compound_extremes", "max_forks_repo_head_hexsha": "26542ed55af86fdfd3486e949f475b18f03165c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.734375, "max_line_length": 104, "alphanum_fraction": 0.6617183986, "include": true, "reason": "import numpy", "num_tokens": 1292}
|
//==================================================================================================
/*!
@file
Forward declaration of common components
@copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
**/
//==================================================================================================
#ifndef BOOST_SIMD_FORWARD_HPP_INCLUDED
#define BOOST_SIMD_FORWARD_HPP_INCLUDED
#include <boost/simd/config.hpp>
#include <boost/simd/meta/native_cardinal.hpp>
#include <boost/simd/meta/abi_of.hpp>
namespace boost { namespace simd
{
// Forward declaration of pack
template< typename T
, std::size_t N = native_cardinal<T>::value
, typename ABI = abi_of_t<T,N>
>
class pack;
// Forward declaration of logical
template<typename T> struct logical;
} }
#endif
|
{"hexsha": "a558ddf65d0de827c22a777a30fe68d135019c8a", "size": 939, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/forward.hpp", "max_stars_repo_name": "SylvainCorlay/pythran", "max_stars_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "third_party/boost/simd/forward.hpp", "max_issues_repo_name": "SylvainCorlay/pythran", "max_issues_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/forward.hpp", "max_forks_repo_name": "SylvainCorlay/pythran", "max_forks_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 26.8285714286, "max_line_length": 100, "alphanum_fraction": 0.5654952077, "num_tokens": 182}
|
### ----------------- IMPORTS ----------------- ###
import os
from beartype import beartype
import numpy as np
import pandas as pd
from backend.adi_parse import AdiParse
from backend import search_function
from backend.get_all_comments import GetComments
### ------------------------------------------- ###
@beartype
def get_file_data(folder_path:str, channel_structures:dict):
"""
Get file data in dataframe
Parameters
----------
folder_path : str
channel_structures : dict, keys = total channels, values = channel list
Returns
-------
file_data : pd.DataFrame
"""
# make lower string and path type
folder_path = folder_path = os.path.normpath(folder_path.lower())
file_data = pd.DataFrame()
cntr = 0
# walk through all folders
for root, dirs, files in os.walk(folder_path):
# get labchart file list
filelist = list(filter(lambda k: '.adicht' in k, files))
for file in filelist: # iterate over list
# initiate adi parse object
adi_parse = AdiParse(os.path.join(root, file), channel_structures)
# get all file data in dataframe
temp_file_data = adi_parse.get_all_file_properties()
# add folder path
temp_file_data['folder_path'] = os.path.normcase(root)
# apppend to dataframe
file_data = file_data.append(temp_file_data, ignore_index = True)
cntr+=1
# convert data frame to lower case
file_data = file_data.apply(lambda x: x.astype(str).str.lower())
# convert file length to int
file_data['file_length'] = file_data['file_length'].astype(np.int64)
# make paths relative
file_data.folder_path = file_data.folder_path.str.replace(folder_path, '', regex=False)
file_data.folder_path = file_data.folder_path.map(lambda x: x.lstrip('\\'))
return file_data
def get_channel_structures(user_data):
"""
Get channel structure from labchart files based on user data
Parameters
----------
user_data : Dataframe with user data for SAKE input
Returns
-------
order : List with channels in order
"""
# define separator
separtor = '-'
# get data containing channel order
channel_structures = user_data[user_data['Source'] == 'total_channels'].reset_index().drop(['index'], axis = 1)
regions = {}
for i in range(len(channel_structures)):
# retrieve channel names
channel_names = channel_structures['Assigned Group Name'][i]
# get list of channels for each total channels entry
region_list = channel_names.split(separtor)
regions.update({int(channel_structures['Search Value'][i]): region_list})
return regions
def add_animal_id(file_data, user_data):
"""
Add animal id from channel name to labchart data
Parameters
----------
file_data : pd.DataFrame
user_data : Dataframe with user data for SAKE input
Returns
-------
file_data : List with channels in order
user_data: Dataframe with user data for SAKE input
"""
# get data containing channel order
drop_idx = user_data['Search Function'] == 'within'
animal_id = user_data[drop_idx].reset_index().drop(['index'], axis = 1)
# check if present
if len(animal_id) > 1:
raise(Exception('Only one Search Function with -within- is allowed!\n'))
if len(animal_id) == 0:
raise(Exception('Search Function -within- is required!\n'))
# convert to dictionary
ids = animal_id.loc[0].to_dict()
# define separator
sep = ids['Search Value']
# get file name
# ids['Category']
file_data['animal_id'] = ''
for i,name in enumerate(file_data[ids['Source']]):
if sep in name:
file_data.at[i, ids['Category']] = sep + name.split(sep)[1] + sep
return file_data, user_data.drop(np.where(drop_idx)[0], axis = 0)
def get_categories(user_data):
"""
Get unique categories and groups in dictionary.
Parameters
----------
user_data : pd.DataFrame, with user group inputs.
Returns
-------
groups : dict, keys are unique categories and groups.
"""
# get unique categories
unique_categories = user_data['Category'].unique()
groups = {} # create group dictionary
for category in unique_categories: # iterate over categories
# which groups exist in categories
groups.update({category: list(user_data['Assigned Group Name'][user_data['Category'] == category]) })
return groups
def reverse_hot_encoding(sort_df):
"""
Reverse hot coding in dataframe and replace with column names or nan
Parameters
----------
sort_df : pd.DataFrame, with columns in one hot encoding format
Returns
-------
col_labels: 1D np.array with columns retrieved from one hot encoded format
"""
# get columns
labels = np.array(sort_df.columns)
# find index where column is True #np.argmax(np.array(sort_df), axis = 1)
idx_array = np.array(sort_df)
col_labels = np.zeros(len(sort_df), dtype=object)
for i in range(idx_array.shape[0]): # iterate over idx_array
# find which column
idx = np.where(idx_array[i] == True)[0]
if len(idx) == 0: # if no True value present
col_labels[i] = np.NaN
elif len(idx) > 1: # if more than one True value present
col_labels[i] = np.NaN
elif len(idx) == 1: # if one True value present
col_labels[i] = labels[idx[0]]
return col_labels
def convert_logicdf_to_groups(index_df, logic_index_df, groups_ids:dict):
"""
Convert logic from logic_index_df to groups and and append to index_df
Parameters
----------
index_df : pd.DataFrame, to append categories
logic_index_df : pd.DataFrame, containing logic
groups_ids : dict, containg categories as keys and groups as values
Returns
-------
index_df : pd.DataFrame
"""
# convert logic to groups
for category, groups in groups_ids.items():
# check if all groups present in dataframe
groups_present = all(elem in logic_index_df.columns for elem in groups)
if (groups_present == True): # are all groups present in dataframe?
if (logic_index_df[groups].any().any() == True): # was any group detected?
# convert logic to groups
index_df[category] = reverse_hot_encoding(logic_index_df[groups])
return index_df
def get_source_logic(file_data, user_data, source:str):
"""
Find which unique groups exist and return as dataframe
Parameters
----------
user_data : pd.DataFrame
source : str, source destination
Returns
-------
index : pd.DataFrame
"""
# get only user data form source
user_data = user_data[user_data['Source'] == source].reset_index()
index = {}
for i in range(len(user_data)): # iterate over user data entries
# find index for specified source and match string
idx = getattr(search_function, user_data.at[i, 'Search Function'])(file_data[source], user_data.at[i, 'Search Value'])
# append to index dictionary
index.update({user_data.at[i, 'Assigned Group Name']: idx})
return pd.DataFrame(index)
def get_drop_logic(file_data, user_data, source:str):
"""
Find which unique groups exist and return as dataframe
Parameters
----------
user_data : pd.DataFrame
source : str, source destination
Returns
-------
index : pd.DataFrame
"""
# get only user data form source
user_data = user_data[user_data['Source'] == source].reset_index()
index = {}
for i in range(len(user_data)): # iterate over user data entries
# find index for specified source and match string
idx = getattr(search_function, user_data.at[i, 'Search Function'])(file_data[source], user_data.at[i, 'Search Value'])
# append to index dictionary
col_name = source + '_' + user_data.at[i, 'Assigned Group Name'] + str(i)
index.update({col_name: idx})
return pd.DataFrame(index)
def create_index_array(file_data, user_data):
"""
Create index for experiments according to user selection
Parameters
----------
file_data : pd.DataFrame, aggregated data from labchart files
user_data : pd.DataFrame, user search and grouping parameters
Returns
-------
index_df: pd.DataFrame, with index
group_columns: list, column names that denote groups
warning_str: str, string used for warning
"""
# create empty dataframes for storage
logic_index_df = pd.DataFrame()
index_df = pd.DataFrame()
drop_df = pd.DataFrame()
warning_str = ''
# create sources list
sources = ['channel_name', 'file_name']
# separate user data based on drop
drop_idx = user_data['Assigned Group Name'] == 'drop'
user_data_drop = user_data[drop_idx]
user_data_use = user_data[~drop_idx]
for source in sources: # iterate over user data entries
# get index logic for each assigned group
df = get_source_logic(file_data, user_data_use, source)
logic_index_df = pd.concat([logic_index_df, df], axis=1)
# get drop_logic
df = get_drop_logic(file_data, user_data_drop, source)
drop_df = pd.concat([drop_df, df], axis=1)
# add columns from file to data
add_columns = ['animal_id','folder_path','file_name','file_length',
'channel_id', 'block' , 'sampling_rate', 'brain_region',]
index_df = pd.concat([index_df, file_data[add_columns]], axis=1)
# get time
index_df['start_time'] = 1
index_df['stop_time'] = file_data['file_length']
# get category with group names
groups_ids = get_categories(user_data_use)
# convert logic to groups
index_df = convert_logicdf_to_groups(index_df, logic_index_df, groups_ids)
# get time and comments
obj = GetComments(file_data, user_data_use, 'comment_text', 'comment_time')
index_df, com_warning = obj.add_comments_to_index(index_df)
# reset index and rename previous index to file_id
index_df = index_df.rename_axis('file_id').reset_index()
# check if user selected time exceeds bounds
if (index_df['start_time']<0).any() or (index_df['start_time']>index_df['file_length']).any():
raise Exception('Start time exceeds bounds.')
elif (index_df['stop_time']<0).any() or (index_df['stop_time']>index_df['file_length']).any():
raise Exception('Stop time exceeds bounds.')
# update group columns
group_columns = list(index_df.columns[index_df.columns.get_loc('stop_time')+1:]) + ['brain_region']
# remove rows containing drop
region_drop = pd.DataFrame((index_df['brain_region'] == 'drop').rename('drop'))
if len(drop_df) != 0:
drop_df = pd.concat([drop_df]*int(len(region_drop)/len(drop_df))
, axis=0).reset_index(drop=True)
drop_df = pd.concat((drop_df, region_drop), axis=1)
index_df = index_df[~drop_df.any(axis=1).values]
# check if groups were not detected
if index_df.isnull().values.any():
warning_str = 'Warning: Some conditons were not found!!'
# put categories at end
index_df = index_df[ [x for x in list(index_df.columns) if x not in group_columns] + group_columns]
return index_df, group_columns, warning_str + com_warning
def get_index_array(folder_path, user_data):
"""
Get file data, channel array and create index
for experiments according to user selection
Parameters
----------
file_data : pd.DataFrame, aggregated data from labchart files
user_data : 2D list, user search and grouping parameters from datatable
Returns
-------
index_df: pd.DataFrame, with index
group_columns: list, column names that denote groups
warning_str: str, string used for warning
"""
# get dataframe and convert to lower case
user_data = pd.DataFrame(user_data)
user_data = user_data.apply(lambda x: x.astype(str).str.lower())
# remove rows with missing inputs
user_data = user_data.dropna(axis = 0)
warning_str = ''
# ensure group names are unique
if len(user_data['Assigned Group Name']) != len(user_data['Assigned Group Name'].unique()):
warning_str += 'Duplicate -Assigned Group Names- were found. Please check that -Assigned Group Names- are unique'
# get channel order
channel_structures = get_channel_structures(user_data)
# get all file data in dataframe
file_data = get_file_data(folder_path, channel_structures)
# add animal id
file_data, user_data = add_animal_id(file_data, user_data)
# get index dataframe
index_df, group_columns, warning_add = create_index_array(file_data, user_data)
warning_str += warning_add
# check if no conditions were found
if len(list(index_df.columns[index_df.columns.get_loc('stop_time')+1:])) < 2:
warning_str += 'Warning: Only Brain region column was found!!'
# check if multiple blocks are found
if np.sum(index_df.block.astype(int)) > 0:
warning_str += 'Warning: Some files contain more tha one block!!'
return index_df, group_columns, warning_str
if __name__ == '__main__':
# define path
folder_path = r'C:\Users\panton01\Desktop\example_files'
# get user table data example
user_data = pd.read_csv(r'C:\Users\panton01\Desktop\pydsp_analysis\user_data.csv')
# convert data frame to lower case
user_data = user_data.apply(lambda x: x.astype(str).str.lower())
# remove rows with no source
user_data = user_data[user_data.Source != '']
# get channel order
channel_structures = get_channel_structures(user_data)
# get all file data in dataframe
file_data = get_file_data(folder_path, channel_structures)
# get experiment index
index_df, group_columns, warning_str = create_index_array(file_data, user_data)
|
{"hexsha": "defa5636b177097e8af817a6ca015cfbf3b3e448", "size": 14647, "ext": "py", "lang": "Python", "max_stars_repo_path": "backend/filter_table.py", "max_stars_repo_name": "SAKEverse/sake-plan", "max_stars_repo_head_hexsha": "48162b9a4f4cd6fefd557dc09f723912155f6581", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "backend/filter_table.py", "max_issues_repo_name": "SAKEverse/sake-plan", "max_issues_repo_head_hexsha": "48162b9a4f4cd6fefd557dc09f723912155f6581", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "backend/filter_table.py", "max_forks_repo_name": "SAKEverse/sake-plan", "max_forks_repo_head_hexsha": "48162b9a4f4cd6fefd557dc09f723912155f6581", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6422594142, "max_line_length": 144, "alphanum_fraction": 0.6345326688, "include": true, "reason": "import numpy", "num_tokens": 3327}
|
import cv2
import numpy as np
import os
import glob
#img_dir = "/home/mcaadmin/ALL_IDB1/im" # Enter Directory of all images
#data_path = os.path.join(img_dir,'*g')
#files = glob.glob(data_path)
#for f1 in files:
image = cv2.imread("/home/mcaadmin/infected/Im001_1.png")
#while(1):
# Convert BGR to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([80,120,70])
upper_blue = np.array([305,300,220])
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_blue, upper_blue)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(image,image, mask= mask)
#cv2.imshow('frame',frame)
cv2.imwrite("/home/mcaadmin/blueinfected/Im001_1.png",res)
#cv2.imshow('res',res)
image = cv2.imread("/home/mcaadmin/blueinfected/Im001_1.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = 20
binary = cv2.threshold(gray, thresh, 300, cv2.THRESH_BINARY)[1]
cv2.imwrite("/home/mcaadmin/binaryinfected/Im001_1.png",binary)
|
{"hexsha": "4cff97a4ae0d512839dbefdd84232365e27a4afc", "size": 1029, "ext": "py", "lang": "Python", "max_stars_repo_path": "conversion.py", "max_stars_repo_name": "JessyRajan/wbcdiseases", "max_stars_repo_head_hexsha": "f1ad6630252d65e296265b58e19a99c85025fd7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "conversion.py", "max_issues_repo_name": "JessyRajan/wbcdiseases", "max_issues_repo_head_hexsha": "f1ad6630252d65e296265b58e19a99c85025fd7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "conversion.py", "max_forks_repo_name": "JessyRajan/wbcdiseases", "max_forks_repo_head_hexsha": "f1ad6630252d65e296265b58e19a99c85025fd7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2647058824, "max_line_length": 71, "alphanum_fraction": 0.7278911565, "include": true, "reason": "import numpy", "num_tokens": 321}
|
#check (.=.)
-- #print (.=.)
-- #print (.=.)
#check (.==.)
#check Eq.refl
#check @Eq.refl
#check Eq.refl 5
#check @Eq.symm
#check @Eq.trans
#check @Eq.subst
#print Eq
def mod7Rel (x y : Nat) : Prop :=
x % 7 = y % 7
#check mod7Rel
theorem thm_1: mod7Rel 3 10 := rfl
#print thm_1
theorem thm_2: mod7Rel 3 10 := by
simp [mod7Rel]
/- Example of an equality proof -/
-- example: 5=3+2 := Eq.refl -- error: missing argument a for "a = a"
example: 5=3+2 := Eq.refl 5 -- concrete value for a
example: 5=3+2 := Eq.refl _ -- auto unification for a; in order to prove 5=5, a should be obviously 5.
example: 5=3+2 := rfl -- notification
#check rfl
#print rfl
section
variable (α : Type)
variable (a b : α)
variable (f g : α → Nat)
variable (h₁ : a = b)
variable (h₂ : f = g)
example : f a = f b := congrArg f h₁
example : f a = g a := congrFun h₂ a
example : f a = g b := congr h₂ h₁
end
#print id
#check Nat
#check @congr
#check @congrArg
#check congrArg id (Eq.refl 5)
#check @congrFun
#check @Equivalence
#print Equivalence
theorem thm_3 : Equivalence mod7Rel :=
by
constructor
intros; rfl
intro x y p1
simp [ mod7Rel ] at *
rw [ p1 ]
intro x y z hxy hyz
simp [mod7Rel] at *
apply Eq.trans hxy hyz
|
{"author": "shinsa82", "repo": "lean-devcontainer", "sha": "7edc7f0ad9aee38f686f161b6b65be805f2f538d", "save_path": "github-repos/lean/shinsa82-lean-devcontainer", "path": "github-repos/lean/shinsa82-lean-devcontainer/lean-devcontainer-7edc7f0ad9aee38f686f161b6b65be805f2f538d/quot.lean"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.