text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
def min_max(numbers):
return min(numbers), max(numbers)
class Person:
def __init__(self,name):
self.name = name
pass
class Student(Person):
def __init__(self, name, id):
super().__init__(name)
self.id = id
pass
import numpy as np
import pandas as pd
def nonpositives(x):
return x<=0
message = "Ping!"
def pong(count):
for i in range(count):
print("Pong!", end=" ")
yield message
def my_decorator(function):
def wrapper():
print("<-", end="")
function()
print("->", end="")
return wrapper
@my_decorator
def some_function():
print("|", end="")
if __name__ == '__main__':
df = pd.DataFrame(np.arange(20).reshape(5,4), index=[1,3,4,2,5])
X, y = df.iloc[:,:-1], df.iloc[:,-1]
print(X)
print(y)
# data = np.arange(20).reshape(4,5)
# df = pd.DataFrame(data)
#
# print(df)
# print(df.sum())
# for message in pong(2):
# print(message, end=" ")
# 'raining'.find('z')
# data = np.arange(1,17).reshape(4,4)
# for i in range(4):
# data[i][3-i] *= -1
# print(data)
# x = [4, -1, 0, 3, 5]
# sorted(x)
# for i in range(3):
# print("Hello", end=" ")
# if i < max(x[:i+1]):
# break
# else:
# print("world!")
# list_of_numbers = [1,2,3]
# str_to_numbers = str(n) for n in list_of_numbers
# print(type(str_to_numbers))
# df = pd.DataFrame(np.arange(20).reshape(5,4), index=[1,3,4,2,5])
# print(df.loc[1:2])
# B = {1, 2, 3}
# A = {1, 2, 3, 4, 5, 6}
# print(len(A), len(B))
# print(A<B)
# value1, value2, value3, value4 = True, False, False, True
# print(value1 or value2 and value3 or not value4)
# init_tuple = ()
# print(init_tuple.__len__())
# a = [-10, 27, 1000, -1, 0, -30]
# result = [x for x in filter(nonpositives, a)]
# print(result)
# data = np.arange(20).reshape(4,5)
# print(data.shape)
# test = Student("Tom", 123)
# print(test.name)
# numbers = [1,3,5.0]
# min_number, max_number = min_max(numbers)
# min_max_numbers = min_max(numbers)
#
# print(type(min_number))
# print(type(max_number))
# print(type(min_max_numbers))
# names = ["Snowball", "Chewy", "Bubbles", "Gruff"]
# animals = ["cat", 'dog', "fish", "goat"]
# ages=[1,2,2,6]
# t = zip(names, animals, ages)
# for name, animal, age in t:
# print(f"The {animal} {name} is {age}")
# languages = ["Python", "C++", "Javascript"]
# typing = ["dynamic", "static", "dynamic"]
#
# lang_dict = {
# lang: type_system
# for lang, type_system in zip(languages, typing)
# if type_system == "dynamic"
# }
#
# print(lang_dict)
# text = "paranaue"
# print(text[-5:-2])
# round1 = ["Chuck Norris", "Bruce Lee", "Jackie Chan"]
# round2 = round1.copy()
# round2.remove("Jackie Chan")
# print(round1)
# print(round2)
|
{"hexsha": "941bdddcb07fb6d3ea88ad678f3b28577f6cfd29", "size": 3029, "ext": "py", "lang": "Python", "max_stars_repo_path": "module1/src/test.py", "max_stars_repo_name": "gbrsouza/NLP", "max_stars_repo_head_hexsha": "a94f9ab21351b4592725fb4f11c0ed594622fc81", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "module1/src/test.py", "max_issues_repo_name": "gbrsouza/NLP", "max_issues_repo_head_hexsha": "a94f9ab21351b4592725fb4f11c0ed594622fc81", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "module1/src/test.py", "max_forks_repo_name": "gbrsouza/NLP", "max_forks_repo_head_hexsha": "a94f9ab21351b4592725fb4f11c0ed594622fc81", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.437037037, "max_line_length": 70, "alphanum_fraction": 0.5328491251, "include": true, "reason": "import numpy", "num_tokens": 956}
|
"""
"""
import numpy as np
__all__ = ["bazin09", "karpenka12", "firth17",
"bazin09_listarg", "karpenka12_listarg", "firth17_listarg",
"_defined_models"]
_defined_models = ["bazin09", "karpenka12", "firth17"]
def bazin09(x, a, t_0, t_rise, t_fall):
return a * np.exp(-(x - t_0) / t_fall) / (1.0 + np.exp(- (x - t_0) / t_rise))
def bazin09_listarg(x, params):
return params[0] * np.exp(-(x - params[1]) / params[3]) / (1.0 + np.exp(- (x - params[1]) / params[2]))
def karpenka12(x, a, b, t_0, t_1, t_rise, t_fall):
return a * (1. + b * (x - t_1)*(x - t_1)) * np.exp(- (x - t_0)/t_fall) / (1. + np.exp(- (x - t_0) / t_rise))
def karpenka12_listarg(x, params):
return params[0] * (1. + params[1] * (x - params[3])*(x - params[3])) * np.exp(- (x - params[2])/params[5]) / (1. + np.exp(- (x - params[2]) / params[4]))
def firth17(x, a, b, t_0, t_1, t_2, t_x, t_rise, t_fall):
numerator = a * (1. + b * (x - t_1) * (x - t_1)) * np.exp(- (x - t_0) / t_fall)
denominator = (1. + np.exp(- (x - t_0) / t_rise)) / (1. + np.exp(- (x - t_2) / t_x))
return numerator/denominator
def firth17_listarg(x, params):
numerator = params[0] * (1. + params[1] * (x - params[3]) * (x - params[3])) * np.exp(- (x - params[2]) / params[7])
denominator = (1. + np.exp(- (x - params[2]) / params[6])) / (1. + np.exp(- (x - params[4]) / params[5]))
return numerator/denominator
|
{"hexsha": "ec076698b4e161ecafbeef71caf5a40ce65c0977", "size": 1428, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycocosn/models.py", "max_stars_repo_name": "RobFirth/pycoco", "max_stars_repo_head_hexsha": "1c9be662b6dd67f8be5423568bb3ecbff2979492", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-06-06T06:58:36.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-06T06:58:36.000Z", "max_issues_repo_path": "pycocosn/models.py", "max_issues_repo_name": "UoS-SNe/pycoco", "max_issues_repo_head_hexsha": "bbcb09b6c8fde7e0c4464bfbd574a42e09dbfed2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-12-11T20:07:37.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-21T14:40:29.000Z", "max_forks_repo_path": "pycocosn/models.py", "max_forks_repo_name": "RobFirth/pycoco", "max_forks_repo_head_hexsha": "1c9be662b6dd67f8be5423568bb3ecbff2979492", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0, "max_line_length": 158, "alphanum_fraction": 0.5490196078, "include": true, "reason": "import numpy", "num_tokens": 580}
|
/* Copyright (c) 2010-2014, Delft University of Technology
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* - Neither the name of the Delft University of Technology nor the names of its contributors
* may be used to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Changelog
* YYMMDD Author Comment
* 150411 D. Dirkx Migrated and updated from personal code.
*
* References
*
* Notes
*
*/
#define BOOST_TEST_MAIN
#include <fstream>
#include <limits>
#include <map>
#include <boost/test/floating_point_comparison.hpp>
#include <boost/test/unit_test.hpp>
#include <Eigen/Core>
#include <Tudat/Astrodynamics/BasicAstrodynamics/orbitalElementConversions.h>
#include <Tudat/Basics/testMacros.h>
#include <Tudat/Mathematics/BasicMathematics/basicMathematicsFunctions.h>
#include <Tudat/Mathematics/BasicMathematics/mathematicalConstants.h>
#include "Tudat/Astrodynamics/BasicAstrodynamics/UnitTests/keplerPropagatorTestData.h"
#include "Tudat/Astrodynamics/BasicAstrodynamics/keplerPropagator.h"
#include "Tudat/Astrodynamics/Ephemerides/keplerEphemeris.h"
#include "Tudat/InputOutput/basicInputOutput.h"
namespace tudat
{
namespace unit_tests
{
BOOST_AUTO_TEST_SUITE( test_keplerEphemeris )
//! Test 1: Comparison of KeplerEphemeris output with benchmark data from (Melman, 2010).
//! (see testPropagateKeplerOrbit_Eccentric_Melman).
BOOST_AUTO_TEST_CASE( testKeplerEphemerisElliptical )
{
// Load the expected propagation history.
// Create expected propagation history.
PropagationHistory expectedPropagationHistory = getODTBXBenchmarkData( );
// Set Earth gravitational parameter [m^3 s^-2].
const double earthGravitationalParameter = 398600.4415e9;
// Compute propagation history.
PropagationHistory computedPropagationHistory;
computedPropagationHistory[ 0.0 ] = expectedPropagationHistory[ 0.0 ];
ephemerides::KeplerEphemeris keplerEphemeris(
expectedPropagationHistory[ 0.0 ],
0.0, earthGravitationalParameter );
for( PropagationHistory::iterator stateIterator = expectedPropagationHistory.begin( );
stateIterator != expectedPropagationHistory.end( ); stateIterator++ )
{
// Compute next entry.
computedPropagationHistory[ stateIterator->first ] =
orbital_element_conversions::convertCartesianToKeplerianElements(
keplerEphemeris.getCartesianStateFromEphemeris( stateIterator->first ),
earthGravitationalParameter );
// Check that computed results match expected results.
BOOST_CHECK_CLOSE_FRACTION(
computedPropagationHistory[ stateIterator->first ]( 5 ),
expectedPropagationHistory[ stateIterator->first ]( 5 ),
2.0e-14 );
}
}
//! Test 2: Comparison of KeplerEphemeris with that of GTOP (hyperbolic).
//! (see testPropagateKeplerOrbit_hyperbolic_GTOP).
BOOST_AUTO_TEST_CASE( testKeplerEphemerisHyperbolic )
{
// Load the expected propagation history.
PropagationHistory expectedPropagationHistory = getGTOPBenchmarkData( );
// Compute propagation history.
PropagationHistory computedPropagationHistory;
computedPropagationHistory[ 0.0 ] = expectedPropagationHistory[ 0.0 ];
ephemerides::KeplerEphemeris keplerEphemeris(
expectedPropagationHistory[ 0.0 ],
0.0, getGTOPGravitationalParameter( ) );
for( PropagationHistory::iterator stateIterator = expectedPropagationHistory.begin( );
stateIterator != expectedPropagationHistory.end( ); stateIterator++ )
{
// Compute next entry.
computedPropagationHistory[ stateIterator->first ] =
orbital_element_conversions::convertCartesianToKeplerianElements(
keplerEphemeris.getCartesianStateFromEphemeris( stateIterator->first ),
getGTOPGravitationalParameter( ) );
// Check that computed results match expected results.
BOOST_CHECK_CLOSE_FRACTION(
computedPropagationHistory[ stateIterator->first ]( 5 ),
expectedPropagationHistory[ stateIterator->first ]( 5 ),
1.0e-15 );
}
}
BOOST_AUTO_TEST_SUITE_END( )
} // namespace unit_tests
} // namespace tudat
|
{"hexsha": "5942811209d10c7d48666c735b694204f45d2192", "size": 5745, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Tudat/Astrodynamics/Ephemerides/UnitTests/unitTestKeplerEphemeris.cpp", "max_stars_repo_name": "JPelamatti/ThesisTUDAT", "max_stars_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tudat/Astrodynamics/Ephemerides/UnitTests/unitTestKeplerEphemeris.cpp", "max_issues_repo_name": "JPelamatti/ThesisTUDAT", "max_issues_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tudat/Astrodynamics/Ephemerides/UnitTests/unitTestKeplerEphemeris.cpp", "max_forks_repo_name": "JPelamatti/ThesisTUDAT", "max_forks_repo_head_hexsha": "b94ce35fb7c8fa44ae83238e296a979dfa3adfe8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-05-30T03:42:22.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-30T03:42:22.000Z", "avg_line_length": 41.9343065693, "max_line_length": 99, "alphanum_fraction": 0.7209747607, "num_tokens": 1274}
|
'''
2048 GAME PROJECT: AI Bot.
Date created:
03/2022
Date edited:
04/2022
Author:
Filip J. Cierkosz
'''
import random
import numpy as np
import pygame
from pygame.locals import *
from time import sleep, time
from graphics import GRID_COLOR, CELL_COLORS, GRID_FONT_COLOR, FONT_BOARD, FONT_SIZES, WINDOW_FONT_COLOR
class GameBot:
'''
-----------
Class to create a game board for 2048 to be solved by an AI bot.
-----------
'''
def __init__(self):
'''
Constructor to initialize an appropriately-sized grid for the game and set all attributes.
Parameters:
self
'''
# Permitted moves on the grid.
self.moves = ['right', 'left', 'up', 'down']
# Default grid size.
self.GRID_SIZE = 5
self.grid = np.zeros((self.GRID_SIZE, self.GRID_SIZE), dtype=int)
self.score = 0
self.timer = 0
# GUI adjustments.
self.HEIGHT = 540
self.WIDTH = 500
# Space at the top of GUI that is left to display time and score.
self.TOP_SPACE = self.HEIGHT-self.WIDTH
# Space between squares in the grid.
self.SPACE = 5
self.SQUARE_SIZE = (self.WIDTH-(self.GRID_SIZE+1)*self.SPACE)/self.GRID_SIZE
pygame.init()
pygame.display.set_caption("2048: GAME")
self.window = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
# Initialize all the fonts for the game.
pygame.font.init()
self.fontGame = pygame.font.SysFont(FONT_BOARD[0],
FONT_SIZES[f'{self.GRID_SIZE}'],
FONT_BOARD[1])
self.fontScore = pygame.font.SysFont(FONT_BOARD[0],
FONT_SIZES['score'],
FONT_BOARD[1])
self.fontMsg = pygame.font.SysFont(FONT_BOARD[0],
FONT_SIZES['finalMsg'],
FONT_BOARD[1])
@staticmethod
def updateArr(curr, self):
'''
Returns an updated array for row/column of the grid.
Parameters:
self
cur (array) : Array of numbers in the current state of column/row.
Returns:
new (np.array) : Updated column/row to the grid.
'''
# Append all the non-zero elements of the curr array.
temp = [n for n in curr if (n!=0)]
new = []
skip = False
# Iterate through the elements of the temp array.
for i in range(len(temp)):
# Skip an element that was just added (so that it is not repeated).
if (skip):
skip = False
continue
# If two consecutive elements are equal, add them and append in new.
if (i!=len(temp)-1 and temp[i]==temp[i+1]):
skip = True
new.append(2*temp[i])
# Otherwise, append a single number.
else:
new.append(temp[i])
# Fill the rest of the array with zeros, so that it matches the size.
while (len(new)!=len(curr)):
new.append(0)
return np.array(new)
def draw(self):
'''
Draws the game window.
Parameters:
self
'''
# Set the background color.
self.window.fill((GRID_COLOR))
# Iterate in order to display squares in the grid.
for r in range(self.GRID_SIZE):
for c in range(self.GRID_SIZE):
# Define the coordinates for the current square to draw.
x = (c+1)*self.SPACE+c*self.SQUARE_SIZE
y = self.TOP_SPACE+(r+1)*self.SPACE+r*self.SQUARE_SIZE
# Get the number from the cell to define its corresponding color.
num = self.grid[r][c]
# If a number on the grid is greater or equal to 2048, it will not
# change anymore, since dictionary has colors up to the value of 2048.
if (num>=2048):
color = CELL_COLORS[2048]
else:
color = CELL_COLORS[num]
# Draw the square in the grid.
pygame.draw.rect(self.window,
color,
pygame.Rect(x,y,self.SQUARE_SIZE,self.SQUARE_SIZE),
border_radius=8)
# Display numbers for each square. Do NOT draw zeros.
if (num!=0):
textArea = self.fontGame.render(f'{num}', True, GRID_FONT_COLOR)
self.window.blit(textArea, textArea.get_rect(center=(x+self.SQUARE_SIZE/2, y+self.SQUARE_SIZE/2)))
def insertNewNum(self, n=1):
'''
Updates a grid with a new number.
Probability rates for values: 2 (100%).
Parameters:
self
n (int) : Quantity of new numbers to be inserted.
'''
availableCoords = []
for row, col in np.ndindex(self.grid.shape):
# If the value is equal to 0, it means it is available.
if (self.grid[row][col]==0):
availableCoords.append((row, col))
# Append the new value in the grid in random available position.
for c in random.sample(availableCoords, k=n):
self.grid[c] = 2
def makeMove(self, move):
'''
Makes a move on the board based on the AI choice.
If you wish to move to the left/right, look at the rows of the grid.
If you wish to move up/down, look at the columns.
Parameters:
self
move (str) : String describing the user's move; either 'right',
'left', 'up', or 'down'.
'''
for i in range(self.GRID_SIZE):
# Move to the LEFT. Define the row.
if (move=='left'):
curr = self.grid[i]
# Move to the RIGHT. Define the reversed row.
elif (move=='right'):
curr = self.grid[i][::-1]
# Move UP. Define the column.
elif (move=='up'):
curr = self.grid[:, i]
# Move DOWN. Define the reversed column.
elif (move=='down'):
curr = self.grid[:, i][::-1]
# Update the row/column. Add any elements (if possible).
new = self.updateArr(curr, self)
# Update the grid for the move to the left or up.
if (move=='left'):
self.grid[i] = new
elif (move=='up'):
self.grid[:, i] = new
# Update in the grid for the move to the right or down.
# The updated array has to be reversed again, so that non-zero
# elements are at the end of the new array.
elif (move=='right'):
self.grid[i] = new[::-1]
elif (move=='down'):
self.grid[:, i] = new[::-1]
def updateScore(self):
'''
Updates the score.
The score is denoted by the maximum value in the grid.
Parameters:
self
'''
self.score = np.max(self.grid)
def checkIfOver(self):
'''
Checks if the game is over.
Parameters:
self
Returns:
True/False (boolean) : True if game is over; False otherwise.
'''
original = self.grid.copy()
for mv in self.moves:
self.makeMove(mv)
# Check if the grids are equal after invoking a move.
equal = (self.grid==original).all()
# If the grids are not equal, it means it is possible to
# continue the game. Since, there are still available moves to make.
if (not equal):
self.grid = original
return False
# If none of the moves changes the state of the grid, it denotes the bot loses.
return True
def setTimer(self):
'''
Sets the timer.
Parameters:
self
Returns:
start (time) : Started timer.
'''
start = time()
return start
def stopTimer(self, start):
'''
Stops the timer and returns the time of execution.
Parameters:
self
start (time) : Started timer.
Returns:
executionTime (time) : Time of execution.
'''
stop = time()
executionTime = stop-start
return executionTime
def play(self):
'''
Main method to play the game.
Parameters:
self
Returns:
True/False (boolean) : True if game is won, False otherwise.
'''
# Initialize the board with 2 starting numbers in the grid.
self.insertNewNum(n=2)
start = self.setTimer()
# Play as long as the game is neither over, nor won by the AI bot.
while (True):
# Draw the board, update and display the current score.
self.draw()
self.updateScore()
textArea = self.fontScore.render(f'SCORE: {self.score:06d}', True, WINDOW_FONT_COLOR)
self.window.blit(textArea, textArea.get_rect(center=(115,20)))
# Update the screen.
pygame.display.flip()
# If the AI bot reaches the goal state, i.e. score: 2048, it denotes win.
if (self.score==2048):
self.window.fill((GRID_COLOR))
self.timer = self.stopTimer(start)
# Display the final message.
textArea = self.fontMsg.render(f'BOT WON THE GAME!', True, WINDOW_FONT_COLOR)
self.window.blit(textArea, textArea.get_rect(center=(self.WIDTH/2,self.HEIGHT/2-50)))
textArea = self.fontMsg.render(f'TIME PLAYED: {self.timer:.1f} SEC', True, WINDOW_FONT_COLOR)
self.window.blit(textArea, textArea.get_rect(center=(self.WIDTH/2,self.HEIGHT/2+20)))
# Update the window.
pygame.display.flip()
# Wait 1 second to display the final screen.
sleep(1)
return True
# Make a copy of the old grid.
oldGrid = self.grid.copy()
# Determine the best AI-searched move.
mv = self.searchMove()
# Execute the AI selected move.
self.makeMove(mv)
# Check if it is possible to continue the game after the move.
if (self.checkIfOver()):
self.window.fill((GRID_COLOR))
self.timer = self.stopTimer(start)
# Display the final message.
textArea = self.fontMsg.render(f'BOT LOST.', True, WINDOW_FONT_COLOR)
self.window.blit(textArea, textArea.get_rect(center=(self.WIDTH/2,self.HEIGHT/2-50)))
textArea = self.fontMsg.render(f'TIME PLAYED: {self.timer:.1f} SEC', True, WINDOW_FONT_COLOR)
self.window.blit(textArea, textArea.get_rect(center=(self.WIDTH/2,self.HEIGHT/2+20)))
# Update the window.
pygame.display.flip()
# Delay 1 second to display the final screen.
sleep(1)
return False
# Check if the grids are equal. If the grids differ insert a new number.
if (not (self.grid==oldGrid).all()):
self.insertNewNum()
# STILL IN DEVELOPMENT STAGE!
def searchMove(self):
'''
AI bot searches the best move performing each of the available moves, then
simulating future states of the game board. Finally, the best move is selected
analyzing the costs (the higher the cost, the better the choice). The costs
consist of such aspects as scores on the grid and empty spots.
Parameters:
self
Returns:
bestMv (st) : Selected best move for the current state of the
grid; either 'right', 'left', 'up', or 'down'.
'''
# Make a copy of the initial state of the grid.
origGrid = self.grid.copy()
# Dictionary to calculate costs per each move, based on scores and empty spots.
costs = {mv:0 for mv in self.moves}
# Determine the number of searches per one move and the depth of the search.
searchesMv = 100
depth = 10
# Test each available move.
for mv in self.moves:
# Execute the move, check if possible to continue and get the score.
self.makeMove(mv)
gameOverMv = self.checkIfOver()
scoreMv = np.max(self.grid)
# If the game is not over and the move changes the state of the board,
# insert new number, update scores, and perform the further search.
if (not gameOverMv and not (self.grid==origGrid).all()):
self.insertNewNum()
costs[mv] += scoreMv
counterMv = 1
# Otherwise, restart the grid to the initial state and move to the next move.
else:
self.grid = origGrid.copy()
continue
# Perform simulation of later moves.
for i in range(searchesMv):
searchBoard = self.grid.copy()
gameOver = False
while (not gameOver and counterMv<depth):
searchBoard = self.grid.copy()
# Execute a random move.
self.makeMove(self.shuffleMove())
gameOver = self.checkIfOver()
simulatedScore = np.max(self.grid)
if (not gameOver and not (self.grid==searchBoard).all()):
self.insertNewNum()
costs[mv] += simulatedScore
counterMv += 1
# Increment the costs by the number of empty spots in the current state of grid.
costs[mv] += 10*np.count_nonzero(self.grid==0)
# Restart the grid to the initial state for the next move to be tested.
self.grid = origGrid.copy()
# Finally, restart the grid to the initial state one last time.
self.grid = origGrid.copy()
# Find and return the best searched move.
bestMv = max(costs, key=costs.get)
return bestMv
def shuffleMove(self):
'''
Shuffles a random move.
Parameters:
self
Returns:
randMv : One randomly selected move; either 'right', 'left',
'up', or 'down'.
'''
randMv = np.random.choice(self.moves, 1)
return randMv
|
{"hexsha": "1005e468dd6ead31f860a1a3571830f7a05689d6", "size": 15092, "ext": "py", "lang": "Python", "max_stars_repo_path": "AI-Implementation/bot.py", "max_stars_repo_name": "chizo4/2048-Project", "max_stars_repo_head_hexsha": "34d062b4b4a5eb9d9ec55d3d8b6e876dcb2967da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-16T21:36:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T21:36:54.000Z", "max_issues_repo_path": "AI-Implementation/bot.py", "max_issues_repo_name": "chizo4/2048-Project", "max_issues_repo_head_hexsha": "34d062b4b4a5eb9d9ec55d3d8b6e876dcb2967da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AI-Implementation/bot.py", "max_forks_repo_name": "chizo4/2048-Project", "max_forks_repo_head_hexsha": "34d062b4b4a5eb9d9ec55d3d8b6e876dcb2967da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1794871795, "max_line_length": 118, "alphanum_fraction": 0.5243837795, "include": true, "reason": "import numpy", "num_tokens": 3170}
|
#
# This file is part of KwarqsDashboard.
#
# KwarqsDashboard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# KwarqsDashboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KwarqsDashboard. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
import time
import threading
import cv2
import numpy as np
import logutil
from image_logger import ImageLogger
import settings
class _FakeDetector(object):
def process_image(self, img):
return (img, None)
class ImageCapture(object):
'''
This class manages the image processing stuff. It determines what
actions to take, and then performs them on another thread.
The actual image processing takes place in a different class. That
class processes the image, and then returns a dictionary that has
targeting information in it.
'''
def __init__(self, detector=None, name=''):
if detector is None:
self.detector = _FakeDetector()
else:
self.detector = detector
self.prefix = name if name == '' else '%s_' % name
self.lock = threading.Lock()
self.condition = threading.Condition(self.lock)
self.image_log_enabled = False
self.using_live_feed = False
self.camera_widget = None
self.started = False
def configure_options(self, parser):
'''
:param parser: an OptionParser instance
'''
name = self.prefix.replace('_', '-')
parser.add_option('--%scamera-ip' % name, dest='%scamera_ip' % self.prefix,
help='IP address of %s camera' % self.prefix)
parser.add_option('--%swebcam' % name, dest='%swebcam' % self.prefix,
help="Use webcam for %s camera" % self.prefix)
parser.add_option('--%sask' % name, dest='%sask' % self.prefix,
help="Ask for static images for %s camera" % self.prefix)
parser.add_option('--%sstatic' % name, dest='%sstatic_images' % self.prefix,
help="Load static images for %s camera" % self.prefix)
parser.add_option('--%slog' % name, dest='%slog' % self.prefix, action='store_true',
help="Log images for %s camera" % self.prefix)
def initialize(self, options):
# initialize this here instead of globally because we can't
# get the logger until it has been configured, and we need
# to interact with the options parser
import logging
self.logger = logging.getLogger(__name__)
from ui.util import get_directory
def _get_option(name):
return getattr(options, '%s%s' % (self.prefix, name))
def _set_option(name, val):
setattr(options, '%s%s' % (self.prefix, name), val)
self.do_stop = False
self.do_refresh = False
self.use_webcam = _get_option('webcam')
self.camera_ip = _get_option('camera_ip')
self.img_logger = None
if _get_option('ask'):
static_images = get_directory(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'logs')))
if static_images is None:
raise RuntimeError()
_set_option('static_images', static_images)
self.logger.info("%s ask: user selected %s", self.prefix, static_images)
if _get_option('log'):
self.img_logger = ImageLogger(self.prefix, options.log_dir)
# detect live or static processing
if _get_option('static_images') is not None:
self._initialize_static(_get_option('static_images'))
thread_fn = self._static_processing
elif self.use_webcam is not None or self.camera_ip is not None:
self.using_live_feed = True
thread_fn = self._live_processing
else:
thread_fn = self._no_processing
self.thread = threading.Thread(target=thread_fn)
self.thread.setDaemon(True)
def is_live_feed(self):
return self.using_live_feed
def set_camera_widget(self, camera_widget):
self.camera_widget = camera_widget
def set_detector(self, detector):
self.detector = detector
def start(self):
if self.started:
return
if self.img_logger is not None:
self.img_logger.start()
if not self.thread.is_alive():
self.thread.start()
self.started = True
def stop(self):
self.do_stop = True
with self.condition:
self.condition.notify()
if self.thread.is_alive():
self.thread.join()
if self.img_logger is not None:
self.img_logger.stop()
self.started = False
def enable_image_logging(self):
with self.lock:
self.image_log_enabled = True
def disable_image_logging(self):
with self.lock:
self.image_log_enabled = False
def refresh(self):
with self.condition:
self.do_refresh = True
self.condition.notify()
def _no_processing(self):
# called when no processing is being done
if self.camera_widget is not None:
self.camera_widget.set_error()
def _initialize_static(self, static_images):
# TODO: should this stuff be here? or move it to the UI?
import gtk
path = static_images
self.idx = 0
self.idx_increment = 1
if not os.path.exists(path):
self.logger.error("'%s' does not exist!" % path)
raise RuntimeError()
self.logger.info("Loading static images from %s", path)
if not os.path.isdir(path):
self.images = [path]
else:
self.images = []
for path, dirs, files in os.walk(path):
self.images += [os.path.join(path, f) for f in files]
self.images.sort()
# setup the key handler
def _on_key_press(widget, event):
if event.keyval == gtk.keysyms.Left:
if self.idx > 0:
with self.condition:
self.idx -= 1
self.idx_increment = -1
self.condition.notify()
elif event.keyval == gtk.keysyms.Right:
if self.idx < len(self.images):
with self.condition:
self.idx += 1
self.idx_increment = 1
self.condition.notify()
elif event.keyval == gtk.keysyms.Escape:
gtk.main_quit()
# return True otherwise we might lose focus
return True
def _on_button_pressed(widget, event):
widget.grab_focus()
# must be able to get focus to receive keyboard events
self.camera_widget.set_can_focus(True)
self.camera_widget.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.camera_widget.grab_focus()
self.camera_widget.connect('key-press-event', _on_key_press)
self.camera_widget.connect('button-press-event', _on_button_pressed)
@logutil.exception_decorator(logger=None)
def _static_processing(self):
self.logger.info("Static processing thread starting")
idx = -1
# resume processing with the last image the user looked at
last_img = settings.get('processing/last_img', None)
for i, image_name in enumerate(self.images):
if image_name == last_img:
self.idx = i
break
while True:
with self.condition:
# wait until the user hits a key
while idx == self.idx and not self.do_stop and not self.do_refresh:
self.condition.wait()
if self.do_stop:
break
idx = self.idx
self.do_refresh = False
# if the index is valid, then process an image
if idx < len(self.images) and idx >= 0:
image_name = self.images[idx]
self.logger.info("Opening %s" % image_name)
img = cv2.imread(image_name)
if img is None:
self.logger.error("Error opening %s: could not read file" % (image_name))
self.camera_widget.set_error()
self.idx += self.idx_increment
continue
try:
target_data = self.detector.process_image(img)
except:
logutil.log_exception(self.logger, 'error processing image')
self.camera_widget.set_error(img)
else:
settings.set('processing/last_img', image_name)
settings.save()
self.logger.info('Finished processing')
# note that you cannot typically interact with the UI
# from another thread -- but this function is special
self.camera_widget.set_target_data(target_data)
self.logger.info("Static processing thread exiting")
def _initialize_live(self):
vc = cv2.VideoCapture()
vc.set(cv2.cv.CV_CAP_PROP_FPS, 1)
if self.use_webcam is None:
self.logger.info('Connecting to %s' % self.camera_ip)
if not vc.open('http://%s/mjpg/video.mjpg' % self.camera_ip):
self.logger.error("Could not connect")
return
else:
self.logger.info('Connecting to webcam %s' % self.use_webcam)
if not vc.open(self.use_webcam):
self.logger.error("Could not connect")
return
self.logger.info('Connected!')
return vc
@logutil.exception_decorator(logger=None)
def _live_processing(self):
self.logger.info("Live processing thread starting")
while True:
# check for exit condition
with self.lock:
if self.do_stop:
break
# open the video capture device
vc = self._initialize_live()
if vc is None:
continue
last_log = 0
exception_occurred = False
# allocate a buffer for reading
h = vc.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
w = vc.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
capture_buffer = np.empty(shape=(h, w, 3), dtype=np.uint8)
while True:
# check for exit condition
with self.lock:
if self.do_stop:
break
image_log_enabled = self.image_log_enabled
#
# Read the video frame
#
retval, img = vc.read(capture_buffer)
if retval:
# log images to directory
if self.img_logger is not None:
tm = time.time()
diff = tm - last_log
if diff >= 1:
if image_log_enabled:
self.img_logger.log_image(img)
# adjust for possible drift
if diff > 1.5:
last_log = tm
else:
last_log += 1
#
# Process the image
#
try:
target_data = self.detector.process_image(img)
except:
# if it happened once, it'll probably happen again. Don't flood
# the logfiles...
if not exception_occurred:
logutil.log_exception(self.logger, 'error processing image')
exception_occurred = True
self.camera_widget.set_error(img)
else:
if exception_occurred:
self.logger.info("Processing resumed, no more errors.")
exception_occurred = False
# note that you cannot typically interact with the UI
# from another thread -- but this function is special
self.camera_widget.set_target_data(target_data)
else:
if last_log == 0:
self.logger.error("Not able to connect to camera, retrying")
else:
self.logger.error("Camera disconnected, retrying")
self.camera_widget.set_error()
break
self.logger.info("Static processing thread exiting")
|
{"hexsha": "c9cf9bcb0b35a18cc1a5624e7ae8aed236fcdbab", "size": 14404, "ext": "py", "lang": "Python", "max_stars_repo_path": "driver_station/src/common/image_capture.py", "max_stars_repo_name": "frc1418/2014", "max_stars_repo_head_hexsha": "2fc75b13efd5a986522a16e20ff0ccee66938724", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-04-16T18:08:36.000Z", "max_stars_repo_stars_event_max_datetime": "2015-04-16T18:08:36.000Z", "max_issues_repo_path": "driver_station/src/common/image_capture.py", "max_issues_repo_name": "frc1418/2014", "max_issues_repo_head_hexsha": "2fc75b13efd5a986522a16e20ff0ccee66938724", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "driver_station/src/common/image_capture.py", "max_forks_repo_name": "frc1418/2014", "max_forks_repo_head_hexsha": "2fc75b13efd5a986522a16e20ff0ccee66938724", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0462287105, "max_line_length": 113, "alphanum_fraction": 0.5097195224, "include": true, "reason": "import numpy", "num_tokens": 2638}
|
#!/usr/bin/python
# Copyright (C) 2010, 2011 by Eric Brochu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# from numpy import array, sqrt, nan, exp, arange
# from matplotlib.pylab import figure, subplot, plot, clf, poly_between, axvline
# import numpy.random as numpyrandom
# from random import sample, random
import ctypes
import ctypes.util
from ctypes import cdll, POINTER, c_int, c_double, c_char_p
from time import time
# from copy import copy
import pdb
from numpy import *
from numpy.linalg import inv, LinAlgError
from scipy.optimize import fmin_bfgs
from .kernel import GaussianKernel_ard, GaussianKernel_iso, MaternKernel3, MaternKernel5
#############################################################################
# this implementation of erf, cdf and pdf is substantially faster than
# the scipy implementation (a C implementation would probably be faster yet)
#############################################################################
#
# from: http://www.cs.princeton.edu/introcs/21function/ErrorFunction.java.html
# Implements the Gauss error function.
# erf(z) = 2 / sqrt(pi) * integral(exp(-t*t), t = 0..z)
#
# fractional error in math formula less than 1.2 * 10 ^ -7.
# although subject to catastrophic cancellation when z in very close to 0
# from Chebyshev fitting formula for erf(z) from Numerical Recipes, 6.2
def erf(z):
t = 1.0 / (1.0 + 0.5 * abs(z))
# use Horner's method
ans = 1 - t * exp( -z*z - 1.26551223 +
t * ( 1.00002368 +
t * ( 0.37409196 +
t * ( 0.09678418 +
t * (-0.18628806 +
t * ( 0.27886807 +
t * (-1.13520398 +
t * ( 1.48851587 +
t * (-0.82215223 +
t * ( 0.17087277))))))))))
if z >= 0.0:
return ans
else:
return -ans
def CDF(x):
return 0.5 * (1 + erf((x) * 0.707106))
def PDF(x):
return exp(-(x**2/2)) * 0.398942
class GaussianProcess(object):
def __init__(self, kernel, X=None, Y=None, prior=None, noise=.1, gnoise=1e-4, G=None):
"""
Initialize a Gaussian Process.
@param kernel: kernel object to use
@param prior: object defining the GP prior on the mean. must
be a descendant of GPMeanPrior
@param noise: noise hyperparameter sigma^2_n
@param X: initial training data
@param Y: initial observations
"""
self.kernel = kernel
self.prior = prior
self.noise = noise
self.gnoise = array(gnoise, ndmin=1)
self.R = None
if (X is None and Y is not None) or (X is not None and Y is None):
raise ValueError
self.X = zeros((0,0))
self.Y = zeros((0))
self.G = None
self.name = 'GP' # for legend
self.starttime = time() # for later analysis
if X is not None:
self.addData(X, Y)
self.augR = None
self.augL = None
self.augX = None
# mostly for testing/logging
self.selected = None
self.endtime = None
# if prior is None:
# print 'prior is None'
# else:
# print 'prior is NOT None'
#
# if self.prior is None:
# print 'self.prior is None'
# else:
# print 'self.prior is NOT None'
def _computeCorrelations(self, X):
""" compute correlations between data """
M, (N,D) = len(self.X), X.shape
r = eye(N, dtype=float) + self.noise
m = empty((M,N))
for i in xrange(N):
for j in xrange(i):
r[i,j] = r[j,i] = self.kernel.cov(X[i], X[j])
for i in xrange(M):
for j in xrange(N):
m[i,j] = self.kernel.cov(self.X[i], X[j])
return r, m
def _computeAugCorrelations(self, X):
""" compute correlations between data """
M, (N,D) = len(self.augX), X.shape
r = eye(N, dtype=float) + self.noise
m = empty((M,N))
for i in xrange(N):
for j in xrange(i):
r[i,j] = r[j,i] = self.kernel.cov(X[i], X[j])
for i in xrange(M):
for j in xrange(N):
m[i,j] = self.kernel.cov(self.augX[i], X[j])
return r, m
def posterior(self, X, getvar=True):
""" Get posterior mean and variance for a point X. """
if len(self.X)==0:
if self.prior is None:
if getvar:
return 0.0, 1.0
else:
return 0.0
else:
if getvar:
return self.prior.mu(X), 1.0
else:
return self.prior.mu(X)
X = array(X, copy=False, dtype=float, ndmin=2)
M, (N,D) = len(self.X), X.shape
m = 0.0
if self.prior is not None:
m = self.prior.mu(X)
assert isscalar(m)
if self.G is None:
# NO GRADIENT DATA.
d = self.Y-m
r = empty((M, N))
for i in xrange(M):
for j in xrange(N):
r[i,j] = self.kernel.cov(self.X[i], X[j])
else:
# WITH GRADIENT DATA.
d = hstack(map(hstack, zip(self.Y-m, self.G)))
r = empty((M*(D+1), N))
for i in xrange(M):
for j in xrange(N):
A = i*(D+1)
cov = self.kernel.covWithGradients(self.X[i], X[j])
r[A:A+D+1,j] = cov[:,0]
# calculate the mean.
Lr = linalg.solve(self.L, r)
mu = m + dot(Lr.T, linalg.solve(self.L,d))
if getvar:
# calculate the variance.
if self.augL is None:
sigma2 = (1 + self.noise) - sum(Lr**2, axis=0)
else:
M, (N,D) = len(self.augX), X.shape
r = empty((M, N))
for i in xrange(M):
for j in xrange(N):
r[i,j] = self.kernel.cov(self.augX[i], X[j])
Lr = linalg.solve(self.augL, r)
sigma2 = (1 + self.noise) - sum(Lr**2, axis=0)
sigma2 = clip(sigma2, 10e-8, 10)
return mu[0], sigma2[0]
else:
return mu[0]
def posteriors(self, X):
"""
get arrays of posterior values for the array in X
"""
M = []
V = []
for x in X:
if isscalar(x):
m, v = self.posterior(array([x]))
else:
m, v = self.posterior(x)
M.append(m)
V.append(v)
return array(M), array(V)
def mu(self, x):
"""
get posterior mean for a point x
NOTE: if you are getting the variance as well, this is less efficient
than using self.posterior()
"""
return self.posterior(x, getvar=False)
def negmu(self, x):
"""
needed occasionally for optimization
"""
nm = -self.mu(x)
# if self.prior is not None and len(self.X)==0:
# print 'no data, using prior = %.4f'%nm
return nm
def addData(self, X, Y, G=None):
"""
Add new data to model and update.
We assume that X is an (N,D)-array, Y is an N-vector, and G is either
an (N,D)-array or None. Further, if X or G are a single D-dimensional
vector these will be interpreted as (1,D)-arrays, i.e. one observation.
"""
X = array(X, copy=False, dtype=float, ndmin=2)
Y = array(Y, copy=False, dtype=float, ndmin=1).flatten()
G = array(G, copy=False, dtype=float, ndmin=2) if (G is not None) else None
assert len(Y) == len(X), 'wrong number of Y-observations given'
assert G is None or G.shape == X.shape, 'wrong number (or dimensionality) of gradient-observations given'
# print '(', len(self.X), self.G, G, ')'
# assert not (len(self.X) > 0 and self.G is not None and G is None), 'gradients must either be always or never given'
# this just makes sure that if we used the default gradient noise for
# each dimension it gets lengthened to the proper size.
if len(self.X) == 0 and len(self.gnoise) == 1:
self.gnoise = tile(self.gnoise, X.shape[1])
# compute the correlations between our data points.
r, m = \
self._computeCorrelations(X) if (G is None) else \
self._computeCorrelationsWithGradients(X)
if len(self.X) == 0:
self.X = copy(X)
self.Y = copy(Y)
self.G = copy(G) if (G is not None) else None
self.R = r
self.L = linalg.cholesky(self.R)
else:
self.X = r_[self.X, X]
self.Y = r_[self.Y, Y]
self.G = r_[self.G, G] if (G is not None) else None
self.R = r_[c_[self.R, m], c_[m.T, r]]
z = linalg.solve(self.L, m)
d = linalg.cholesky(r - dot(z.T, z))
self.L = r_[c_[self.L, zeros(z.shape)], c_[z.T, d]]
# print '\nself.G =', G, ', for which selfG is None is', (self.G is None)
def getYfromX(self, qx):
"""
get the (first) Y value for a given training datum X. return None if x
is not found.
"""
for x, y in zip(self.X, self.Y):
if all(qx==x):
return y
return None
def done(self, x):
"""
indication that the GP has been terminated and that a final point has
been selected (mostly relevant for logging)
"""
self.selected = x
self.endtime = time()
class PrefGaussianProcess(GaussianProcess):
"""
Like a regular Gaussian Process, but trained on preference data. Note
that you cannot (currently) add non-preference data. This is because I
haven't gotten around to it, not because it's impossible.
"""
def __init__(self, kernel, prefs=None, **kwargs):
super(PrefGaussianProcess, self).__init__(kernel, **kwargs)
self.preferences = []
self.C = None
if prefs is not None:
self.addPreferences(prefs)
def addPreferences(self, prefs, useC=True, showPrefLikelihood=False):
"""
Add a set of preferences to the GP and update.
@param prefs: sequence of preference triples (xv, xu, d) where xv
is a datum preferred to xu and d is the degree of
preference (0 = 'standard', 1 = 'greatly preferred')
"""
def S(x, prefinds, L, useC=False):
"""
the MAP functional to be minimized
"""
# print '***** x =',
# for xx in x:
# print '%.3f'%xx,
if useC:
lpath = ctypes.util.find_library('ego')
lib = cdll[lpath]
lib.logCDFs.restype = c_double
lib.logCDFs.argtypes = [c_int, POINTER(c_int), POINTER(c_double)]
pf = array(prefinds[:], dtype=c_int).reshape(-1)
cx = array(x.copy(), dtype=c_double)
result = lib.logCDFs(c_int(len(pf)), pf.ctypes.data_as(POINTER(c_int)), cx.ctypes.data_as(POINTER(c_double)))
logCDFs = result
else:
logCDFs = 0.
sigma = 1
epsilon = 1e-10
Z = sqrt(2) * sigma
for v, u, d in prefinds:
logCDFs += (d+1) * log(CDF((x[v]-x[u])/Z)+epsilon)
Lx = linalg.solve(L, x)
val = -logCDFs + dot(Lx, Lx)/2
if not isfinite(val):
print 'non-finite val!'
pdb.set_trace()
# print '\n***** val =', val
return val
# add new preferences
self.preferences.extend(prefs)
x2ind = {}
ind = 0
prefinds = []
vs = set()
for v, u, d in self.preferences:
v = tuple(v)
vs.add(v)
u = tuple(u)
if v not in x2ind:
x2ind[v] = ind
ind += 1
if u not in x2ind:
x2ind[u] = ind
ind += 1
prefinds.append((x2ind[v], x2ind[u], d))
newX = array([x for x, _ in sorted(x2ind.items(), key=lambda x:x[1])])
# use existing Ys as starting point for optimizer
lastY = {}
for x, y in zip(self.X, self.Y):
lastY[tuple(x)] = y
if len(self.Y) > 0:
ymax = max(self.Y)
ymin = min(self.Y)
else:
ymax = .5
ymin = -.5
start = []
for x in newX:
if tuple(x) in lastY:
start.append(lastY[tuple(x)])
else:
if tuple(x) in vs:
start.append(ymax)
else:
start.append(ymin)
# update X, R
self.X = zeros((0,0))
r, m = \
self._computeCorrelations(newX)
self.X = newX
self.R = r
self.L = linalg.cholesky(self.R)
# optimize S to find a good Y
# self.Y = fmin_bfgs(S, start, args=(prefinds, self.L), epsilon=0.1, maxiter=30, disp=0)
self.Y = fmin_bfgs(S, start, args=(prefinds, self.L), disp=0)
# print '[addPreferences] checking pref pairs'
for r, c, _ in self.preferences:
r = tuple(r)
c = tuple(c)
# print '%d %d' % (x2ind[r], x2ind[c]),
if self.Y[x2ind[r]] <= self.Y[x2ind[c]]:
# print ' FAILED! %.2f ! > %.2f' % (self.Y[x2ind[r]], self.Y[x2ind[c]])
# print ' can we fix it?'
# if there is nothing preferred to this item, bump it up
for r1, c1, _ in self.preferences:
if all(c1==r):
break
else:
self.Y[x2ind[r]] = self.Y[x2ind[c]] + .1
# print ' changed Y to %.2f' % self.Y[x2ind[r]]
# now we can learn the C matrix
self.C = eye(len(self.X), dtype=float) * 5
# self.C = zeros((len(self.X), len(self.X)))
for i in xrange(len(self.X)):
for j in xrange(len(self.X)):
for r, c, _ in self.preferences:
# print '******', r,c
alpha = 0
if all(r==self.X[i]) and all(c==self.X[j]):
alpha = -1
elif all(r==self.X[j]) and all(c==self.X[i]):
alpha = -1
elif all(r==self.X[i]) and i==j:
alpha = 1
elif all(c==self.X[i]) and i==j:
alpha = 1
if alpha != 0:
# print 'have an entry for %d, %d!' % (i,j)
d = (self.mu(r)-self.mu(c)) / (sqrt(2)*sqrt(self.noise))
# print '\td=',d
cdf = CDF(d)
pdf = PDF(d)
if cdf < 1e-10:
cdf = 1e-10
if pdf < 1e-10:
pdf = 1e-10
self.C[i,j] += alpha / (2*self.noise) * (pdf**2/cdf**2 + d * pdf/cdf)
try:
self.L = linalg.cholesky(self.R+linalg.inv(self.C))
except LinAlgError:
print '[addPreferences] GP.C matrix is ill-conditioned, adding regularizer delta = 1'
for i in xrange(10):
self.C += eye(len(self.X))
try:
self.L = linalg.cholesky(self.R+linalg.inv(self.C))
except LinAlgError:
print '[addPreferences] GP.C matrix is ill-conditioned, adding regularizer delta = %d' % (i+2)
else:
break
def addObservationPoint(self, X):
"""
Add a point at which we will observe, but for which we don't have the
observation yet. (Used by the gallery selection algorithms.)
"""
X = array(X, copy=False, dtype=float, ndmin=2)
if self.augR is None:
self.augR = self.R.copy()
self.augX = self.X.copy()
r, m = self._computeAugCorrelations(X)
self.augR = r_[c_[self.augR, m], c_[m.T, r]]
invC = zeros_like(self.augR)
invC[:self.C.shape[0], :self.C.shape[0]] = linalg.inv(self.C)
self.augL = linalg.cholesky(self.augR+invC)
self.augX = r_[self.augX, X]
def addData(self, X, Y, G=None):
"""
I have an idea about how to do this... (see notebook).
"""
raise NotImplementedError("can't (yet) add explicit ratings to preference GP")
|
{"hexsha": "c3e3e527d81ec6b4d050c740a815804ef64a3e81", "size": 18612, "ext": "py", "lang": "Python", "max_stars_repo_path": "ego/gaussianprocess/__init__.py", "max_stars_repo_name": "misterwindupbird/IBO", "max_stars_repo_head_hexsha": "17e502e4fab4b1c20abbe26c6ce8a258c32bf49a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2015-06-27T15:18:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T17:23:04.000Z", "max_issues_repo_path": "ego/gaussianprocess/__init__.py", "max_issues_repo_name": "misterwindupbird/IBO", "max_issues_repo_head_hexsha": "17e502e4fab4b1c20abbe26c6ce8a258c32bf49a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-07-04T08:41:41.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-23T18:06:55.000Z", "max_forks_repo_path": "ego/gaussianprocess/__init__.py", "max_forks_repo_name": "misterwindupbird/IBO", "max_forks_repo_head_hexsha": "17e502e4fab4b1c20abbe26c6ce8a258c32bf49a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-11-14T21:45:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-12T11:18:32.000Z", "avg_line_length": 35.1169811321, "max_line_length": 125, "alphanum_fraction": 0.484579841, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 4650}
|
# -*- coding: utf-8 -*-
import time
import numpy
import quantarhei as qr
from quantarhei import LabSetup
from quantarhei.utils.vectors import X #, Y, Z
###############################################################################
#
#
# PARAMETERS
#
#
###############################################################################
omega = 500.0 # mode frequency in 1/cm
hr_factor = 0.01 # Huag-Rhys factor
Ng = 2 # number of vibrational states in the ground state
Ne = 2 # number of vibrational states in the excited state
E1 = 12500.0 # electronic transition energy
width = 100 # spectral width of the electronic transition in 1/cm
Nt2 = 100 # number of steps in t2
dt2 = 10.0 # time step in t2
plot_window = [11500, 13500, 11500, 13500]
dOmega = 50.0
fft_of = qr.signal_REPH # qr.signal_NONR, qr.signal_TOTL
###############################################################################
#
#
# MODEL DEFINITION
#
#
###############################################################################
#
# Create a two-level molecule with one intra-molecular harmonic mode
#
with qr.energy_units("1/cm"):
mol = qr.Molecule(elenergies=[0.0, E1])
mol.set_dipole(0, 1, [1.0, 0.0, 1.0])
mod = qr.Mode(frequency=omega)
mol.add_Mode(mod)
mod.set_nmax(0, Ng)
mod.set_nmax(1, Ne)
mod.set_HR(1, hr_factor)
mol.set_transition_width((0,1), width)
#
# Create an aggregate
#
agg = qr.Aggregate(molecules=[mol])
agg.build()
#
# Time axes and the calculator
#
t1axis = qr.TimeAxis(0.0, 1000, 10.0)
t3axis = qr.TimeAxis(0.0, 1000, 10.0)
t2axis = qr.TimeAxis(0.0, Nt2, dt2)
# FIXME: TwoDResponseCalculator
msc = qr.MockTwoDResponseCalculator(t1axis, t2axis, t3axis)
msc.bootstrap(rwa=qr.convert(E1,"1/cm","int"), shape="Gaussian")
#
# Laboratory setup
#
lab = LabSetup()
lab.set_polarizations(pulse_polarizations=[X,X,X], detection_polarization=X)
#
# Hamiltonian is required
#
H = agg.get_Hamiltonian()
#
# Evolution superoperator
#
# FIXME: relaxation in the oscillator
eUt = qr.EvolutionSuperOperator(t2axis, H)
eUt.set_dense_dt(10)
eUt.calculate(show_progress=False)
#
# We calulate all 2D spectra here
#
print("Calculating 2D spectra ...")
t1 = time.time()
om1 = qr.convert(omega-dOmega,"1/cm","int")
om2 = qr.convert(omega+dOmega,"1/cm","int")
print(" - positive frequency")
cont1 = msc.calculate_all_system(agg, eUt, lab,
selection=[["omega2",[om1, om2]],
["order"]])
print(" - negative frequency")
cont2 = msc.calculate_all_system(agg, eUt, lab,
selection=[["omega2",[-om2, -om1]],
["order"]])
t2 = time.time()
print("... done in", t2-t1, "sec")
#cont.save("container.qrp")
###############################################################################
#
#
# OUTPUT
#
#
###############################################################################
#
# Example spectrum to plot and a movie of the time evolution
#
#res = cont1.get_response(tag=0.0)
#specttwd = res.get_TwoDSpectrum(dtype=qr.signal_TOTL)
#with qr.energy_units("1/cm"):
# res.plot(stype=qr.signal_TOTL)
# specttwd.plot()
# print("Creating a movie ...")
# cont.make_movie("movie.mp4")
# print("... done")
#
# The containers will now keep only the 2D spectrum we want
#
cont1 = cont1.get_TwoDSpectrumContainer(stype=fft_of)
cont2 = cont2.get_TwoDSpectrumContainer(stype=fft_of)
#
# Trim the spectra to a smaller region
#
with qr.energy_units("1/cm"):
cont1.trimall_to(window=plot_window)
cont2.trimall_to(window=plot_window)
#
#
# Global fit
# cont_residue contains the oscillatory residue
#
#params_guess = []
#params_out, cont_residue = cont.global_fit_exponential(params_guess)
#
# Window function for subsequenty FFT
#
import quantarhei.functions as func
window = func.Tukey(t2axis, r=0.3, sym=False)
#
# FFT of the spectra
#
print("\nCalculating FFT of the 2D maps")
fcont1 = cont1.fft(window=window, offset=0.0)
fcont2 = cont2.fft(window=window, offset=0.0)
#
# Have a look which frequencies we actually have
#
#Ndat = len(fcont1.axis.data)
#print("\nNumber of frequency points:", Ndat)
#print("In 1/cm they are:")
#with qr.energy_units("1/cm"):
# for k_i in range(Ndat):
# print(k_i, fcont1.axis.data[k_i])
#
# Which spectrum we want to see
#
with qr.frequency_units("1/cm"):
sp21, show_Npoint2 = fcont1.get_nearest(500.0)
sp11, show_Npoint1 = fcont2.get_nearest(-500.0)
#
# Plotting the corresponding map
#
units = "1/cm"
with qr.energy_units(units):
print("\nPlotting spectrum at frequency:",
fcont1.axis.data[show_Npoint1], units)
sp11.plot(window=plot_window, Npos_contours=20, spart=qr.part_ABS)
fftfile = "twod_fft_map_1.png"
sp11.savefig(fftfile)
print("... saved into: ", fftfile)
evol1 = fcont2.get_point_evolution(13000, 12500,
fcont1.axis)
evol1.data = numpy.abs(evol1.data)
evol1.plot()
print("\nPlotting spectrum at frequency:",
fcont1.axis.data[show_Npoint2], units)
sp21.plot(window=plot_window, Npos_contours=20, spart=qr.part_ABS)
fftfile = "twod_fft_map_2.png"
sp21.savefig(fftfile)
print("... saved into: ", fftfile)
evol2 = fcont1.get_point_evolution(12500,13000,
fcont2.axis)
evol2.data = numpy.abs(evol2.data)
evol2.plot()
###############################################################################
|
{"hexsha": "ba5d902553cb3248878f7dc1fa3f626d9823c703", "size": 5698, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantarhei/wizard/examples/ex_035_2D_Vibronic.py", "max_stars_repo_name": "slamavl/quantarhei", "max_stars_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-10-16T13:26:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T11:40:52.000Z", "max_issues_repo_path": "quantarhei/wizard/examples/ex_035_2D_Vibronic.py", "max_issues_repo_name": "slamavl/quantarhei", "max_issues_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 61, "max_issues_repo_issues_event_min_datetime": "2016-09-19T10:45:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-10T13:53:06.000Z", "max_forks_repo_path": "quantarhei/wizard/examples/ex_035_2D_Vibronic.py", "max_forks_repo_name": "slamavl/quantarhei", "max_forks_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2016-08-30T09:09:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T03:16:35.000Z", "avg_line_length": 24.7739130435, "max_line_length": 79, "alphanum_fraction": 0.58002808, "include": true, "reason": "import numpy", "num_tokens": 1523}
|
import torch
import torch.nn as nn
import numpy as np
from transforms import *
class PlanarFlow(nn.Module):
def __init__(self, dim=20, K=16):
super().__init__()
self.transforms = nn.ModuleList([PlanarTransform(dim) for k in range(K)])
def forward(self, z, logdet=False):
zK = z
SLDJ = 0.
for transform in self.transforms:
out = transform(zK, logdet=logdet)
if logdet:
SLDJ += out[1]
zK = out[0]
else:
zK = out
if logdet:
return zK, SLDJ
return zK
if __name__ == '__main__':
planar = PlanarFlow(dim=5, K=4)
print([p.size() for p in planar.parameters()])
planar.cuda()
z0 = torch.randn(3, 5).cuda()
z0 = z0*4
print(z0)
print(planar(z0, True))
|
{"hexsha": "245d67b4f2e79a8f5b738dadc8cea71c373b2cc4", "size": 848, "ext": "py", "lang": "Python", "max_stars_repo_path": "flows.py", "max_stars_repo_name": "kaiqi-ken/planar-flow-pytorch", "max_stars_repo_head_hexsha": "3761883bbb53a40f903b5840c18e772bfd3e3d99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-12-02T11:56:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T05:51:33.000Z", "max_issues_repo_path": "flows.py", "max_issues_repo_name": "kaiqi-ken/planar-flow-pytorch", "max_issues_repo_head_hexsha": "3761883bbb53a40f903b5840c18e772bfd3e3d99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-14T08:58:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-17T16:07:53.000Z", "max_forks_repo_path": "flows.py", "max_forks_repo_name": "abdulfatir/planar-flow-pytorch", "max_forks_repo_head_hexsha": "3761883bbb53a40f903b5840c18e772bfd3e3d99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-02-29T05:25:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T16:25:44.000Z", "avg_line_length": 25.696969697, "max_line_length": 81, "alphanum_fraction": 0.5353773585, "include": true, "reason": "import numpy", "num_tokens": 236}
|
function voxels = carveall( voxels, cameras )
%CARVEALL carve away voxels using all cameras
%
% VOXELS = CARVEALL(VOXELS, CAMERAS) simple calls CARVE for each of the
% cameras specified
% Copyright 2005-2009 The MathWorks, Inc.
% $Revision: 1.0 $ $Date: 2006/06/30 00:00:00 $
for ii=1:numel(cameras);
voxels = carve(voxels,cameras(ii));
end
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/26160-carving-a-dinosaur/SpaceCarving/+spacecarving/carveall.m"}
|
import jieba
import pandas as pd
# import gensim
import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('../data/train_first.csv')
train_line = train['Discuss'].values
dict = {}
for idx, line in enumerate(train_line):
words = list(jieba.cut(line.strip().replace('\n','')))
# print(len(words))
for w in words:
if w not in dict.keys():
dict[w] = 1
else:
dict[w] += 1
from wordcloud import WordCloud
wordcloud = WordCloud(font_path='../data/simhei.ttf',
scale=32,
mode='RGBA',
background_color='white')
wc = wordcloud.fit_words(dict)
plt.imshow(wc)
plt.axis('off')
plt.savefig('../fig/wc.png')
|
{"hexsha": "bde0bf2d94eaa93fdeafed109d58774be8fa56ee", "size": 758, "ext": "py", "lang": "Python", "max_stars_repo_path": "Analyse/process3.py", "max_stars_repo_name": "TwT520Ly/YUN-2020", "max_stars_repo_head_hexsha": "1abdfbe399dc841e64ae546b22eed6c17d58a6e0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Analyse/process3.py", "max_issues_repo_name": "TwT520Ly/YUN-2020", "max_issues_repo_head_hexsha": "1abdfbe399dc841e64ae546b22eed6c17d58a6e0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Analyse/process3.py", "max_forks_repo_name": "TwT520Ly/YUN-2020", "max_forks_repo_head_hexsha": "1abdfbe399dc841e64ae546b22eed6c17d58a6e0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-21T17:48:50.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T17:48:50.000Z", "avg_line_length": 19.4358974359, "max_line_length": 58, "alphanum_fraction": 0.5949868074, "include": true, "reason": "import numpy", "num_tokens": 183}
|
\section{Algorithms}
In this section we will describe the randomized algorithms in detail, provide the
corresponding computational complexity analysis, and state the main theoretical results
that guarantee the accuracy of the approximation.
We split this section in three main parts: Stage 1 \ref{sec:stage1} and Stage 2
\ref{sec:stage2} that will study in depth the algorithmic details of both stages,
and Full Algorithms \ref{sec:full-algos} in which full algorithms will be presented
for a diversity of matrices with certain properties.
\subsection{Stage 1}
\label{sec:stage1}
In the introductory section, we provided some intuition on the randomized procedure
and we developed a general Proto-Algorithm \ref{alg:proto-algorithm} to find
the range-approximating matrix $\mtx{Q}$. However, Proto-Algorithm \ref{alg:proto-algorithm} is very general and can be
tunned depending on the problem requirements.
The number $T_{\rm basic}$ of flops required by Proto-Algorithm \ref{alg:proto-algorithm} satisfies
\begin{equation}
\label{eq:cost_basic}
T_{\rm basic} \sim \ell n \, T_{\rm rand} + \ell\,T_{\rm mult} + \ell^{2}m %\,T_{\rm flop},
\end{equation}
where $T_{\rm rand}$ is the cost of generating a Gaussian random number
and $T_{\rm mult}$ is the cost of multiplying $\mtx{A}$ by a vector.
The last term comes from the orthonormalization procedure of $\mtx{Y}$.
We will now describe some specific realizations of Proto-Algorithm \ref{alg:proto-algorithm}
that will be intended for problems with different requirements.
%%%%%%%%%%%%%%%%%% RANDOMIZED RANGE FINDER %%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Randomized Range Finder} \label{alg:randomized-range-finder}
This is the most naive and simplest implementation of Proto-Algorithm \ref{alg:proto-algorithm}.
Given an oversampling parameter $p$, the \textit{Randomized Range Finder}
performs Proto-Algorithm \ref{alg:proto-algorithm} with a gaussian test matrix
$\mtx{\Omega}\in\Rspace{n\times\ell}$ with $\ell=k+p$ and $k$ being a pre-specified
target rank. Then, it orthogonalizes the rows of the resulting matrix $\mtx{Y}$ by computing
a QR decomposition. A numerical issue arises when computing the orthogonalization
procedure due to the fact that the columns of $\mtx{Y}$ are almost linearly
dependent. The authors in ~\cite{halko2011finding} found that
using the \textit{double orthogonalization}
~\cite{bjorck1994numerics} was enough to guarantee stability
of the procedure.
\begin{figure}[ht]
\begin{center}
\fbox{
\begin{minipage}{.9\textwidth}
\begin{center}
\textsc{Randomized Range Finder}
\end{center}
\begin{tabbing}
\hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \kill
\anum{1} \>Draw an $n\times \ell$ Gaussian random matrix $\mtx{\Omega}$.\\
\anum{2} \>Form the $m\times \ell$ matrix $\mtx{Y} = \mtx{A}\mtx{\Omega}$.\\
\anum{3} \>Construct an $m \times \ell$ matrix $\mtx{Q}$ whose columns form an orthonormal\\
\> basis for the range of $\mtx{Y}$, e.g., using the QR factorization $\mtx{Y} = \mtx{Q}\mtx{R}$.
\end{tabbing}
\end{minipage}}
\end{center}
\end{figure}
The complexity analysis of the Algorithm \ref{alg:randomized-range-finder}
gives
\begin{equation}\label{eq:analysis-rand-finder}
T_{\text{Randomized Range Finder}}\sim \mathcal{O}(mn\ell)
\end{equation}
this is because generating a gaussian random number is $\mathcal{O}(1)$
and computing a matrix vector multiplication is $\mathcal{O}(mn)$
\footnote{Note that parallel computation can be extremely helpful here
to drastically reduce the effective complexity of the product $\mtx{A}\mtx{\Omega}$.}.
One important observation is that the complexity $\mathcal{O}(mn\ell)$
can be prohibitive for massive datasets \footnote{Obviously, we can't hope
to do better than $\mathcal{O}(mn)$ which is the size of the matrix
we want to approximate. The term that we want to address is the linear term
in $\ell$. Classical algorithms to compute the $\ell$-SVD cost
$\mathcal{O}(mn\ell)$, by first computing s rank-revealing QR factorization
~\cite{gu1996efficient} and then manipulating the terms to get to the desired
factorization.}. A variant of this procedure
will be studied in \ref{alg:fast-randomized-range-finder}
to get around this problem,
%%%%%%%%%%%%%%%%%% ADAPTIVE RANDOMIZED RANGE FINDER %%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Adaptive Randomized Range Finder}
\label{alg:adaptive-randomized-range-finder}
One important pitfall of the \textit{Randomized Range Finder}
\ref{alg:randomized-range-finder} is that it
requires to know in advance the target rank $k$. However, if we intend
to solve the \textit{fixed-precision approximation problem}, we need a scheme to estimate
the error $\|\mtx{A}-\mtx{Q}\mtx{Q}^\adj\mtx{A}\|$ during the algorithm
in order to match the requited tolerance $\epsilon$.
This scheme is possible and it is direct consequence of the following lemma.
\begin{lemma}
\label{thm:aposteriori}
Let $\mtx{B}$ be a real $m\times n$ matrix.
Fix a positive integer $r$ and a real number $\alpha > 1$.
Draw an independent family $\{ \vct{\omega}^{(i)} : i = 1, 2, \dots, r \}$
of standard Gaussian vectors. Then
\begin{equation*}
\norm{\mtx{B}}
\leq \alpha \sqrt{\frac{2}{\pi}} \max_{i = 1, \dots, r}
\smnorm{}{\mtx{B}\vct{\omega}^{(i)} }
\end{equation*}
except with probability $\alpha^{-r}$.
\end{lemma}
Lemma \ref{thm:aposteriori} says that we can bound the error with high
probability using inexpensive computations in an online manner.
The Lemma \ref{thm:aposteriori} applied to our problem reads
\begin{equation}
\label{eq:errorest}
\norm{ (\Id - \mtx{Q}\mtx{Q}^{\adj})\mtx{A}}
\leq 10 \sqrt{\frac{2}{\pi}} \max_{i = 1, \dots, r}
\smnorm{}{ (\Id - \mtx{Q}\mtx{Q}^{\adj}) \mtx{A}\vct{\omega}^{(i)} }
\end{equation}
with probability at least $1 - 10^{-r}$.
The high probability bound \ref{eq:errorest} gives a simple online scheme
to decide when we have a good enough $\mtx{Q}$ that matches the pre-specified
tolerance. The goal here is to find an integer $l$ and a $m\times l$
orthonormal matrix $\mtx{Q}^{(l)}$ such that
\begin{equation} \label{eqn:err_est_err_bd}
\smnorm{}{ \big(\Id - \mtx{Q}^{(\ell)} (\mtx{Q}^{(\ell)})^{\adj} \big)\mtx{A} } \leq \eps.
\end{equation}
We call \textit{Adaptive Randomized Range Finder}
\ref{alg:adaptive-randomized-range-finder} to the
algorithm derived from Lemma \ref{thm:aposteriori} that solves this
problem.
\begin{figure}[ht]
\begin{center}
\fbox{
\begin{minipage}{.9\textwidth}
\begin{center}
\textsc{Adaptive Randomized Range Finder}
\end{center}
\begin{tabbing}
\hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \kill
\anum {1} \> Draw standard Gaussian vectors $\vct{\omega}^{(1)}, \dots, \vct{\omega}^{(r)}$ of length $n$.\\
\anum{2} \> For $i = 1,2,\dots,r$, compute $\vct{y}^{(i)} = \mtx{A}\vct{\omega}^{(i)}$.\\
\anum{3} \> $j=0$.\\
\anum{4} \> $\mtx{Q}^{(0)} = [\ ]$, the $m\times 0$ empty matrix. \\
\anum{5} \> \textbf{while} $\displaystyle
\max\left\{\smnorm{}{\vct{y}^{(j+1)}},\smnorm{}{\vct{y}^{(j+2)}},\dots,\smnorm{}{\vct{y}^{(j+r)}} \right\} >
\varepsilon/(10\sqrt{2/\pi})$,\\
\anum{6} \> \> $j = j + 1$.\\
\anum{7} \> \> Overwrite $\vct{y}^{(j)}$ by $\bigl(\Id - \mtx{Q}^{(j-1)}(\mtx{Q}^{(j-1)})^{\adj}\bigr)\vct{y}^{(j)}$.\\
\anum{8} \> \> $\vct{q}^{(j)} = \vct{y}^{(j)}/\norm{\vct{y}^{(j)}}$.\\
\anum{9} \> \> $\mtx{Q}^{(j)} = [\mtx{Q}^{(j-1)}\ \vct{q}^{(j)}]$.\\
\anum{10} \> \> Draw a standard Gaussian vector $\vct{\omega}^{(j+r)}$ of length $n$.\\
\anum{11} \> \> $\vct{y}^{(j+r)} = \left(\Id - \mtx{Q}^{(j)}(\mtx{Q}^{(j)})^{\adj}\right)\mtx{A}\vct{\omega}^{(j+r)}$.\\
\anum{12} \> \> \textbf{for} $i = (j+1),(j+2),\dots,(j+r-1)$,\\
\anum{13} \> \> \> Overwrite $\vct{y}^{(i)}$ by $\vct{y}^{(i)} - \vct{q}^{(j)}\ip{\vct{q}^{(j)}}{\vct{y}^{(i)}}$.\\
\anum{14} \> \> \textbf{end for}\\
\anum{15} \> \textbf{end while}\\
\anum{16} \> $\mtx{Q} = \mtx{Q}^{(j)}$.
\end{tabbing}
\end{minipage}}
\end{center}
\end{figure}
One important question regarding Algorithm \ref{alg:adaptive-randomized-range-finder}
is how good the bound given by Lemma \ref{thm:aposteriori}
is in practice. If there is a significant gap between theory and practice
the optimal $l$ will be overestimated. This question will be addressed in the experimental section
\ref{sec:gaussian-matrices}.
%%%%%%%%%%%%%%%%%% RANDOMIZED POWER ITERATION %%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Randomized Power Iteration}
\label{alg:randomized-power-iteration}
The \textit{Randomized Range Finder} \ref{alg:randomized-range-finder}
algorithm assumes that the singular values
of the matrix decay fast. This can be intuitively seen from equation \ref{eq:iter-range},
where the small singular
values interfere with the calculation of the range. This intuition is made precise
in Theorem \ref{thm:avg-frob-error-gauss}, where the error of the approximation depends on
the $\sigma_{k+1}$ and $\sum_{j>k}\sigma_j^2$.
The goal here is to reduce the weight of the
small singular values by taking powers of the matrix whose range we want
to approximate. Instead of applying the sampling scheme to $\mtx{A}$, we will
apply it to $\mtx{B}=(\mtx{A}\mtx{A}^\adj)^q\mtx{A}$ where $q>0$ is a small
integer.
The matrix $\mtx{B}$ has the same singular vectors than $\mtx{A}$ (hence,
the same range), but its singular values decay much more quickly.
\begin{equation}\label{eq:sing-values-power}
\sigma_j(\mtx{B}) = \sigma_j(\mtx{A})^{2q+1},
\hspace{0.5cm} j=1,2,3,\ldots
\end{equation}
The \textit{Randomized Power Iteration}
\ref{alg:randomized-power-iteration} algorithm is the same
as the \textit{Randomized Range Finder} \ref{alg:randomized-range-finder}
but replacing the formula
$\mtx{Y}=\mtx{A}\mtx{\Omega}$ by $\mtx{Y}=\mtx{B}\mtx{\Omega}$.
\begin{figure}[ht]
\begin{center}
\fbox{
\begin{minipage}{.9\textwidth}
\begin{center}
\textsc{Randomized Power Iteration}
\end{center}
\begin{tabbing}
\hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \kill
\anum{1} \>Draw an $n\times \ell$ Gaussian random matrix $\mtx{\Omega}$.\\
\anum{2} \>Form the $m\times \ell$ matrix $\mtx{Y} = (\mtx{A}\mtx{A}^{\adj})^{q}\mtx{A}\mtx{\Omega}$ via alternating application\\
\>of $\mtx{A}$ and $\mtx{A}^{\adj}$.\\
\anum{3} \>Construct an $m \times \ell$ matrix $\mtx{Q}$ whose columns form an orthonormal\\
\> basis for the range of $\mtx{Y}$, e.g., via the QR factorization $\mtx{Y} = \mtx{Q}\mtx{R}$.
\end{tabbing}
\end{minipage}}
\end{center}
\end{figure}
The computational complexity of the Algorithm \ref{alg:randomized-power-iteration}
is essentially the same because
it only requires $2q+1$ as many matrix-multiplications as Algorithm
\ref{alg:randomized-range-finder}
but the number $q$ is in practice 2,3 or 4. This can be seen from
Corollary \ref{cor:power-method-spec-gauss},
which shows that the power iteration drives the approximation
gap to 1 exponentially fast as $q$ increases.
%%%%%%%%%%%%%%%%%% FAST RANDOMIZED RANGE FINDER %%%%%%%%%%%%%%%%%%%%%%%%
\subsubsection{Fast Randomized Range Finder}
\label{alg:fast-randomized-range-finder}
A simple inspection to equation \ref{eq:cost_basic} reveals the computational
bottleneck of the sampling procedure. This is the matrix multiplication
$\mtx{Y}=\mtx{A}\mtx{\Omega}$ that takes $\mathcal{O}(mn\ell)$ operations
for dense matrices, which is the same as the $\ell$-SVD.
The key idea is to use a \textit{structured} random matrix that allows us
to compute the product in $\mathcal{O}(mn\log(\ell))$ operations.
The simplest structured random matrix that meets our goals is the so-called
\textit{subsampled random Fourier transform} (SRFT).
An SRFT is an $n \times \ell$ matrix of the form
\begin{equation}
\label{eq:def_srft}
\mtx{\Omega} = \sqrt{\frac{n}{\ell}} \, \mtx{DFR},
\end{equation}
where
\lsp
\begin{itemize}
\item $\mtx{D}$ is an $n \times n$ diagonal matrix whose entries are
independent random variables uniformly distributed on the complex unit circle.
\item $\mtx{F}$ is the $n \times n$ unitary discrete Fourier transform (DFT),
whose entries take the values $f_{pq} = n^{-1/2} \, \econst^{-2\pi\iunit (p-1)(q-1)/n}$ for $p, q = 1, 2, \dots, n$
\item $\mtx{R}$ is an $n \times \ell$ matrix that samples $\ell$ coordinates
from $n$ uniformly at random, i.e., its $\ell$ columns are drawn randomly
without replacement from the columns of the $n \times n$ identity matrix.
\end{itemize}
\lsp
Now, via a subsampled FFT ~\cite{woolfe2008fast}, we can compute the
sample matrix $\mtx{Y}=\mtx{A}\mtx{\Omega}$ with
$\mathcal{O}(mn\log(\ell))$ operations.
The total number of operations required by this procedure is reduced to
\begin{equation}
\label{eq:cost_SRFT}
T_{\rm struct} \sim mn \log(\ell) + \ell^2 n
\end{equation}
Hence, the computational complexity of the approach is essentially
$\mathcal{O}(mn \log(\ell))$.
% If $\ell$ is substantially larger than the numerical rank $r(\mtx{A})$
% \ref{eq:num-rank}, then the orthogonalization can be done in
% $\mathcal{O}(k\ell n)$ instead of $\mathcal{O}(\ell^2n)$.
\begin{figure}[ht]
\begin{center}
\fbox{
\begin{minipage}{.9\textwidth}
\begin{center}
\textsc{Fast Randomized Range Finder}
\end{center}
\begin{tabbing}
\hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \hspace{5mm} \= \kill
\anum{1} \>Draw an $n\times \ell$ SRFT test matrix $\mtx{\Omega}$, as defined by \eqref{eq:def_srft}. \\
%\pgnotate{Added reference.}\\
\anum{2} \>Form the $m\times \ell$ matrix $\mtx{Y} = \mtx{A}\mtx{\Omega}$ using a (subsampled) FFT.\\
\anum{3} \>Construct an $m \times \ell$ matrix $\mtx{Q}$ whose columns form an orthonormal\\
\> basis for the range of $\mtx{Y}$, e.g., using the QR factorization $\mtx{Y} = \mtx{Q}\mtx{R}$.
\end{tabbing}
\end{minipage}}
\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% STAGE 2 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Stage 2} \label{sec:stage2}
The output of the Stage 1 \ref{sec:stage1} produces an orthonormal matrix $\mtx{Q}$ whose
range captures the action of the matrix $\mtx{A}$. The goal of Stage 2
\ref{sec:stage2} is
to produce standard approximate matrix factorizations of $\mtx{A}$
using this $\mtx{Q}$.
This subsection is divided into three parts; first, we will show how to compute
standard approximate matrix factorizations (SVD and QR) from a general
approximate low-rank factorization. Recall that taking $\mtx{B}=\mtx{Q}$
and $\mtx{C}=\mtx{Q}^\adj\mtx{A}$ we readily have a factorization that
satisfies $\|\mtx{A}-\mtx{B}\mtx{C}\|\leq\varepsilon$.
Then, we will describe in detail the \textit{Direct SVD} algorithm, which will
consist in constructing an SVD from $\mtx{B}$ and $\mtx{C}$. Finally, we will
comment on other more involved methods that avoid computing the expensive
product $\mtx{C}=\mtx{Q}^\adj\mtx{A}$.
\subsubsection{Compute standard QR and SVD from a general factorization}
Now we will specify how we can compute the standards SVD and QR decompositions
from a general low rank decomposition $\|\mtx{A} - \mtx{B}\mtx{C}\|\leq\varepsilon$
maintaining the tolerance $\epsilon$ from Stage 1 \ref{sec:stage1}.
\begin{itemize}
\item \label{itm:SVD-from-C} \textit{SVD decomposition:} $\|\mtx{A}-\mtx{U}\mtx{\Sigma}\mtx{V}^\adj\|\leq\varepsilon$
\lsp
\begin{enumerate}
\item Compute a QR factorization of $\mtx{B}$ so that $\mtx{B} = \mtx{Q}_{1}\mtx{R}_{1}$.
\item Form the product $\mtx{D} = \mtx{R}_{1}\mtx{C}$, and compute an SVD:
$\mtx{D} = \mtx{U}_{2}\mtx{\Sigma}\mtx{V}^{\adj}$.
\item Form the product $\mtx{U} = \mtx{Q}_{1}\mtx{U}_{2}$.
\end{enumerate}
\lsp
% The result is a diagonal matrix $\mtx{\Sigma}$ and orthonormal matrices $\mtx{U}$ and $\mtx{V}$
% such that $\norm{\mtx{A} - \mtx{U}\mtx{\Sigma}\mtx{V}^{\adj}} \leq \varepsilon$.
\item \label{itm:QR-from-C} \textit{QR decomposition:} $\|\mtx{A}-\mtx{Q}\mtx{R}\|\leq\varepsilon$
\lsp
\begin{enumerate}
\item Compute a QR factorization of $\mtx{B}$ so that $\mtx{B} = \mtx{Q}_{1}\mtx{R}_{1}$.
\item Form the product $\mtx{D} = \mtx{R}_{1}\mtx{C}$, and compute a QR factorization: $\mtx{D} = \mtx{Q}_{2}\mtx{R}$.
\item Form the product $\mtx{Q} = \mtx{Q}_{1}\mtx{Q}_{2}$.
\end{enumerate}
% The result is an orthonormal matrix $\mtx{Q}$ and a weakly upper-triangular matrix $\mtx{R}$ such
% that $\norm{\mtx{A} - \mtx{Q}\mtx{R}} \leq \varepsilon$.
\end{itemize}
\subsubsection{Direct SVD} \label{alg:direct-svd}
The procedure described in \ref{itm:SVD-from-C} to compute the approximate
SVD decomposition without sacrificing error, defines what we call
the \textit{Direct SVD} \ref{alg:direct-svd}.
\begin{figure}[ht]
\begin{center}
\fbox{\begin{minipage}{.9\textwidth}
\begin{center}
\textsc{Direct SVD}
\end{center}
\anum{1} Form the matrix
$
\mtx{B} = \mtx{Q}^{\adj}\mtx{A}.
%\mtx{B} = \bigl(\mtx{A}^{\adj}\mtx{Q}\bigr)^{\adj}.
$
\anum{2} Compute an SVD of the small matrix:
$
\mtx{B} = \widetilde{\mtx{U}}\mtx{\Sigma}\mtx{V}^{\adj}.
$
\noindent
\anum{3} Form the orthonormal matrix
$
\mtx{U} = \mtx{Q}\widetilde{\mtx{U}}.
$
\end{minipage}}
\end{center}
%\caption{trash}
%\label{alg:Atranspose}
\end{figure}
Although using \textit{Direct SVD} \ref{alg:direct-svd} algorithm for
Stage 2 \ref{sec:stage2} does not incur additional errors, the computation
of $\mtx{C}=\mtx{Q}^\adj\mtx{A}$ is in general too expensive for dense matrices.
More concretely, the product costs $\mathcal{O}(mn\ell)$, even more
expensive that the cost of Stage 1 \ref{sec:stage1} when using
the accelerated Algorithm
\ref{alg:fast-randomized-range-finder}.
\subsubsection{Different Procedures} \label{sec:dif-app}
\begin{itemize}
\item \textit{Match the cost} $\mathcal{O}(mn\log(\ell))$ \textit{of accelerated
Stage 1} \ref{alg:fast-randomized-range-finder}.
In order to match the complexity $\mathcal{O}(mn\log(\ell))$ from
Stage 1 \ref{sec:stage1} we must avoid the product $\mtx{Q}^\adj\mtx{A}$.
In \cite{halko2011finding}, the authors propose algorithms based
on row extraction of $\mtx{Q}$ via its \textit{Interpolative Decomposition}
$\mtx{Q}=\mtx{X}\mtx{Q}_{(J,:)}$ \cite{cheng2005compression}.
Now, $\mtx{Q}_{(J,:)}$ is a $k\times k$ matrix. The proposed
algorithm takes $\mtx{Q}$ as input and constructs a rank-$k$ matrix
factorization
\begin{equation}\label{eq:via-row-extraction}
\mtx{A}\approx\mtx{X}\mtx{B}
\end{equation}
where $\mtx{B}$ is a $k \times n$ matrix consisting of $k$ rows
extracted from $\mtx{A}$.
The key here is that \ref{eq:via-row-extraction} can be produced without any large
matrix-matrix multiplication resulting in a total of $\mathcal{O}(k^2(m+n))$
operations. The drawback is that the initial error is larger than the one
incurred by $\mtx{Q}^\adj\mtx{Q}\mtx{A}$
\footnote{In \cite{halko2011finding}, they prove that the error is increased at
most by $\left(\sqrt{1+4k(n-k)}\right)\varepsilon$, which can be large
when $\varepsilon$ is not that small or $kn$ is large.}.
\item \textit{Single-Pass algorithms}
The previously described algorithms require revisit the matrix $\mtx{A}$
multiple times. In \cite{halko2011finding} they propose single-pass
algorithms relying on the observation that all the information
you need to compute the decomposition is in the matrices $\mtx{\Omega},\mtx{Y}$
and $\mtx{Q}$. They call the Algorithm \textit{Eigenvalue Decomposition in
One-pass}. However, this algorithm too adds additional error to the Stage 1
\footnote{Authors in \cite{halko2011finding} argue that this issue can be
addressed with extra oversampling.}.
\end{itemize}
\subsection{Full Algorithms}\label{sec:full-algos}
Let's now propose a full algorithm depending on the input matrix properties.
\begin{itemize}
\item \textit{General Matrices That Fit in Core Memory}
In this case, the appropiate method for Stage 1 is the accelerated
Algorithm \ref{alg:fast-randomized-range-finder} that uses structured random
matrices. For Stage 2, use the \textit{row-extraction technique}
described in \ref{sec:dif-app}. The overall cost $T_{\rm random}$ reduces to
\begin{equation} \label{eq:cost-1}
\boxed{T_{\rm random} \sim mn \log(k) + k^2 (m+n)}
\end{equation}
The overall approximation error satisfies
\footnote{This can be derived by combining the error analysis
of range approximation with SRFT (which can be found in \cite{halko2011finding})
and the additional error incurred by the \textit{row-extraction technique}.}
\begin{equation} \label{eq:totalerror-1}
\norm{ \mtx{A} - \mtx{U\Sigma V}^\adj }
\lesssim n \cdot \sigma_{k+1},
\end{equation}
\item \textit{Matrices for which Matrix-Vector Products are Cheap.}
In this set of matrices we include for example sparse and structured matrices for
which the matrix-vector multiplication is substantially cheaper than the general case.
The appropiate method for Stage 1 is Algorithm \ref{alg:randomized-range-finder}
with $p$ constant \footnote{If we are dealing
with the \textit{fixed-precision problem}, then we use
Algorithm \ref{alg:adaptive-randomized-range-finder}.}, or more generally
Algorithm \ref{alg:randomized-power-iteration} with $q>0$. For Stage 2, we apply
the \textit{Direct SVD} \ref{alg:direct-svd}. The total cost $T_{\rm sparse}$ satisfies
\begin{equation}
\label{eq:sparsealg-cost}
\boxed{T_{\rm sparse} = (2q+2)\,(k + p) \, T_{\rm mult} + \bigO(k^2 (m + n))}
\end{equation}
The overall approximation error satisfies (see Corollary \ref{cor:power-method-spec-gauss}).
\begin{equation}
\label{eq:sparsealg-error2}
\norm{ \mtx{A} - \mtx{U\Sigma V}^\adj } \lesssim (kn)^{1/2(2q+1)} \cdot \sigma_{k+1}.
\end{equation}
\end{itemize}
\newpage
|
{"hexsha": "57f30c917031213f9c88e42a01fde935b14fe6ec", "size": 21485, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "report/sections/algorithms.tex", "max_stars_repo_name": "alexnowakvila/ProbAlgosProj", "max_stars_repo_head_hexsha": "819251313cb2b9ca4748892ee62f40c602453ddc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-10T11:13:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T07:55:46.000Z", "max_issues_repo_path": "report/sections/algorithms.tex", "max_issues_repo_name": "alexnowakvila/ProbAlgosProj", "max_issues_repo_head_hexsha": "819251313cb2b9ca4748892ee62f40c602453ddc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "report/sections/algorithms.tex", "max_forks_repo_name": "alexnowakvila/ProbAlgosProj", "max_forks_repo_head_hexsha": "819251313cb2b9ca4748892ee62f40c602453ddc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-05T07:55:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-05T07:55:49.000Z", "avg_line_length": 43.668699187, "max_line_length": 130, "alphanum_fraction": 0.7006748895, "num_tokens": 6988}
|
import gsum as gm
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from stats_utils import *
from matter import *
import seaborn as sns
import time
from os import path
mpl.rcParams['text.usetex'] = True
mpl.rcParams['figure.dpi'] = 150
mpl.rcParams['font.family'] = 'serif'
mpl.rc('savefig', transparent=False, bbox='tight', pad_inches=0.05, format='pdf')
df = pd.read_csv('../data/all_matter_data.csv')
# Convert differences to total prediction at each MBPT order
mbpt_orders = ['Kin', 'MBPT_HF', 'MBPT_2', 'MBPT_3', 'MBPT_4']
df[mbpt_orders] = df[mbpt_orders].apply(np.cumsum, axis=1)
# 'total' is now unnecessary. Remove it.
df.pop('total')
orders = np.array([0, 2, 3, 4])
# body = 'NN-only'
body = 'NN+3N'
Lambda = 450
info_list = []
from itertools import product
for body, Lambda in product(['NN+3N', 'NN-only'], [450, 500]):
fits = {450: [1, 7], 500: [4, 10]}
train1 = slice(None, None, 5)
valid1 = slice(2, None, 5)
# valid1 = np.array([i % 5 != 0 for i in range(len())])
[fit_n2lo, fit_n3lo] = fits[Lambda]
savefigs = True
Lb = 600
breakdown_min = 300
breakdown_max = 1000
breakdown_num = 100
Lb_vals = np.linspace(breakdown_min, breakdown_max, breakdown_num)
Lb_logprior_vals = Lb_logprior(Lb_vals)
ls_min = 0.1
ls_max = 1.5
ls_num = 50
ls_vals = np.linspace(ls_min, ls_max, ls_num)
# ls_min = ls_max = ls_num = ls_vals = None
nugget = 1e-4
kernel1 = RBF(length_scale=1, length_scale_bounds=(5e-2, 4)) + \
WhiteKernel(noise_level=nugget, noise_level_bounds='fixed')
# kernel1 = RBF(length_scale=1, length_scale_bounds=(1e-2, 100)) + \
# WhiteKernel(noise_level=nugget, noise_level_bounds='fixed')
kernel1_theta = kernel1.theta
ref1 = 16
hyperparams = dict(
center=0,
disp=0,
df=1,
scale=1
)
mask_fit = np.isin(df['fit'], fits[Lambda]) | np.isnan(df['fit'])
mask1 = \
(df['Body'] == body) & \
mask_fit & \
(df['Lambda'] == Lambda)
# df_fit = df[mask_fit]
df_n = df[mask1 & (df['x'] == 0)]
df_s = df[mask1 & (df['x'] == 0.5)]
kf_n = df_n[df_n['OrderEFT'] == 'LO']['kf'].values
kf_s = df_s[df_s['OrderEFT'] == 'LO']['kf'].values
density = df_n[df_n['OrderEFT'] == 'LO']['n'].values
kf_d = kf_n.copy()
# valid1 = np.arange(len(kf_n)) % 5 != 0
Kf_n = kf_n[:, None]
Kf_s = kf_s[:, None]
Kf_d = kf_d[:, None]
y1_n = np.array([df_n[df_n['OrderEFT'] == order]['MBPT_4'].values for order in df_n['OrderEFT'].unique()]).T
y1_s = np.array([df_s[df_s['OrderEFT'] == order]['MBPT_4'].values for order in df_s['OrderEFT'].unique()]).T
y1_d = y1_n - y1_s
fig_path = 'new_figures'
analysis_n = MatterConvergenceAnalysis(
X=Kf_n, y=y1_n, orders=orders, train=train1, valid=valid1, ref=ref1, ratio='kf', density=density,
kernel=kernel1, system='neutron', fit_n2lo=fit_n2lo, fit_n3lo=fit_n3lo, Lambda=Lambda,
body=body, savefigs=savefigs, fig_path=fig_path, **hyperparams
)
analysis_s = MatterConvergenceAnalysis(
X=Kf_s, y=y1_s, orders=orders, train=train1, valid=valid1, ref=ref1, ratio='kf', density=density,
kernel=kernel1, system='symmetric', fit_n2lo=fit_n2lo, fit_n3lo=fit_n3lo, Lambda=Lambda,
body=body, savefigs=savefigs, fig_path=fig_path, **hyperparams
)
analysis_d = MatterConvergenceAnalysis(
X=Kf_d, y=y1_d, orders=orders, train=train1, valid=valid1, ref=ref1, ratio='kf', density=density,
kernel=kernel1, system='difference', fit_n2lo=fit_n2lo, fit_n3lo=fit_n3lo, Lambda=Lambda,
body=body, savefigs=savefigs, fig_path=fig_path, **hyperparams
)
t_start = time.time()
analysis_n.setup_posteriors(
breakdown_min=breakdown_min, breakdown_max=breakdown_max, breakdown_num=breakdown_num,
ls_min=ls_min, ls_max=ls_max, ls_num=ls_num,
max_idx=[2, 3], logprior=None
)
analysis_s.setup_posteriors(
breakdown_min=breakdown_min, breakdown_max=breakdown_max, breakdown_num=breakdown_num,
ls_min=ls_min, ls_max=ls_max, ls_num=ls_num,
max_idx=[2, 3], logprior=None
)
analysis_d.setup_posteriors(
breakdown_min=breakdown_min, breakdown_max=breakdown_max, breakdown_num=breakdown_num,
ls_min=ls_min, ls_max=ls_max, ls_num=ls_num,
max_idx=[2, 3], logprior=None
)
print(time.time() - t_start)
df_Lb_pdf_all = analysis_n.df_breakdown.copy()
df_Lb_pdf_all['pdf'] = analysis_n.df_breakdown['pdf'] * analysis_s.df_breakdown['pdf'] * analysis_d.df_breakdown['pdf']
df_Lb_pdf_all['system'] = 'All'
def dict_to_str(d):
s = ''
for key, value in d.items():
s += f'{key}-{value}_'
s = s.replace('.', 'p')
return s[:-1]
fig, ax = plt.subplots(figsize=(3.4, 4.4))
df_Lb_pdf = pd.concat([analysis_n.df_breakdown, analysis_s.df_breakdown, analysis_d.df_breakdown, df_Lb_pdf_all])
ax = pdfplot(
x=r'$\Lambda_b$ (MeV)', y='system', pdf='pdf', data=df_Lb_pdf, hue='Order',
order=[r'$E/N$', r'$E/A$', r'$S_2$', 'All'], hue_order=[r'N$^2$LO', r'N$^3$LO'], cut=1e-2, linewidth=1,
palette="coolwarm", saturation=1., ax=ax, margin=0.3,
)
ax.set_xlim(0, 1200)
ax.set_xticks([0, 300, 600, 900, 1200])
ax.grid(axis='x')
ax.set_axisbelow(True)
if savefigs:
name = analysis_n.figure_name(
'Lb_pdfs_', breakdown=(breakdown_min, breakdown_max, breakdown_num), include_system=False,
ls=(ls_min, ls_max, ls_num),
)
fig.savefig(
name
)
info = analysis_n.model_info()
name = path.relpath(name, analysis_n.fig_path)
info['name'] = name
info_list.append(info)
df_ls_pdf = pd.concat([analysis_n.df_ls, analysis_s.df_ls, analysis_d.df_ls])
ax = pdfplot(
x=r'$\ell$ (fm$^{-1}$)', y='system', pdf='pdf', data=df_ls_pdf, hue='Order',
order=[r'$E/N$', r'$E/A$', r'$S_2$'], hue_order=[r'N$^2$LO', r'N$^3$LO'], cut=1e-2, linewidth=1.,
palette="coolwarm", saturation=1., ax=None, margin=0.3,
)
ax.set_xticks([0, 0.5, 1.])
ax.grid(axis='x')
ax.set_axisbelow(True)
if savefigs:
name = analysis_n.figure_name(
'ls_pdfs_', breakdown=(breakdown_min, breakdown_max, breakdown_num), include_system=False,
ls=(ls_min, ls_max, ls_num),
)
fig.savefig(
name
)
info = analysis_n.model_info()
name = path.relpath(name, analysis_n.fig_path)
info['name'] = name
info_list.append(info)
_, info = analysis_n.plot_joint_breakdown_ls(max_idx=3, return_info=True)
info_list.append(info)
_, info = analysis_s.plot_joint_breakdown_ls(max_idx=3, return_info=True)
info_list.append(info)
_, info = analysis_d.plot_joint_breakdown_ls(max_idx=2, return_info=True)
info_list.append(info)
# _, info = analysis_n.plot_coeff_diagnostics(breakdown=Lb, return_info=True)
# info_list.append(info)
#
# fig = analysis_n.plot_observables(breakdown=Lb, show_process=True)
_, info = analysis_n.plot_coeff_diagnostics(breakdown=Lb, return_info=True)
info_list.append(info)
_, info = analysis_n.plot_observables(breakdown=Lb, show_process=True, return_info=True)
info_list.append(info)
_, info = analysis_n.plot_coeff_diagnostics(breakdown=None, return_info=True)
info_list.append(info)
_, info = analysis_n.plot_observables(breakdown=None, show_process=True, return_info=True)
info_list.append(info)
_, info = analysis_s.plot_coeff_diagnostics(breakdown=Lb, return_info=True)
info_list.append(info)
_, info = analysis_s.plot_observables(breakdown=Lb, show_process=True, return_info=True)
info_list.append(info)
_, info = analysis_s.plot_coeff_diagnostics(breakdown=None, return_info=True)
info_list.append(info)
_, info = analysis_s.plot_observables(breakdown=None, show_process=True, return_info=True)
info_list.append(info)
_, info = analysis_d.plot_coeff_diagnostics(breakdown=Lb, return_info=True)
info_list.append(info)
# _, info = analysis_d.plot_observables(breakdown=Lb, show_process=True, return_info=True)
# info_list.append(info)
_, info = analysis_d.plot_coeff_diagnostics(breakdown=None, return_info=True)
info_list.append(info)
# _, info = analysis_d.plot_observables(breakdown=None, show_process=True, return_info=True)
# info_list.append(info)
columns = list(info_list[0].keys())
fig_db = pd.DataFrame(info_list, columns=columns)
fig_db.to_csv('fig_db.csv', index=False)
|
{"hexsha": "7c5ad9dd05212ef993942cf8561ad267a4e0e674", "size": 8861, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/populate_fig_database.py", "max_stars_repo_name": "buqeye/nuclear-matter-convergence", "max_stars_repo_head_hexsha": "6500e686c3b0579a1ac7c7570d84ffe8e09ad085", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-15T19:08:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T15:37:27.000Z", "max_issues_repo_path": "analysis/populate_fig_database.py", "max_issues_repo_name": "buqeye/nuclear-matter-convergence", "max_issues_repo_head_hexsha": "6500e686c3b0579a1ac7c7570d84ffe8e09ad085", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/populate_fig_database.py", "max_forks_repo_name": "buqeye/nuclear-matter-convergence", "max_forks_repo_head_hexsha": "6500e686c3b0579a1ac7c7570d84ffe8e09ad085", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-08-31T18:54:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T14:35:18.000Z", "avg_line_length": 34.0807692308, "max_line_length": 123, "alphanum_fraction": 0.6568107437, "include": true, "reason": "import numpy", "num_tokens": 2651}
|
[STATEMENT]
lemma subd_0 [simp]: "subd p 0 = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. subd p 0 = 0
[PROOF STEP]
by (induction p) auto
|
{"llama_tokens": 71, "file": null, "length": 1}
|
\section{Notes}
\todo{\begin{enumerate}
\item Merge this delta section - use for snippets
\item Consider moving Code optimization out of intro
\item Optimizable range is only a small part of this part
\item Write a clear limitations section
\end{enumerate}
}
\todo{Say how the baseline does not include halts, but halted states are time dependent only }
\begin{figure}
\label{fig:tlpilp}
\includegraphics[width=0.9\linewidth]{./Plots/tlp_vs_ilp/tlpilp-figure0.pdf}
\caption{Power Overhead from TLP and ILP}
\end{figure}
\todo{Make comment about how reducing the activity factor is in some sense counter to time optimization - that doing more per unit time is better. }
\todo{Energy Measurement and Prediction for Multi-threaded Programs -- this gives a simple model based on power modelling and claims sub 10 \%}
\todo{citation S. Kaxiras and M. Martonosi, Computer Architecture Techniques for Power-Efficiency, 1st ed. Morgan and
Claypool Publishers, 2008.
The end of Dennard scaling is expected to shrink the range of DVFS in future nodes, limiting the energy savings of this technique. This paper evaluates how much we can increase the effectiveness of DVFS by using a software decoupled access-execute approach. Decoupling the data access from execution allows us to apply optimal voltage-frequency selection for each phase and therefore improve energy efficiency over standard coupled execution.
}
\todo{Cite this paper for dennard: A 30 Year Retrospective on Dennard's MOSFET Scaling Paper}
\reword{Dennard scaling, which roughly, that as transistors get smaller their power density stays constant, so that the power use stays in proportion with area: both voltage and current scale (downward) with length \todo{cite Dennard's paper}}
\fragment{When viewed in this context, all event-driven power models are simply linking performance events back to their associated activity costs/factors}
\reword{The uncore is a collection of components of a processor
not in the core but essential for core performance. The
CPU core contains components involved in executing in-
structions, including execution units, L1 and L2 cache,
branch prediction logic, etc. Uncore functions include
the last level cache (LLC), integrated memory controllers
(IMC), on-chip interconnect (OCI), power control logic
(PWR), etc. as shown in Figure 1. With growing cache
sizes and the integration of various SoC components on
CPU die, the uncore is becoming an increasingly impor-
tant contributor to total SoC power.}
\todo{PMB2014 checkpointing paper - exascale expects 70\% of energy to go to memory dimms.}
\todo{PMBS2014 checkpointing paper 2 - extreme low or high frequency higher failure rate per job.}
\todo{Destroy this: Models and Metrics to Enable Energy-Efficiency Optimizations}
%\begin{table}
%\centering
%\small
%\begin{tabular}{@{}lll@{}} \toprule
%&\multicolumn{2}{c}{Granularity} \\ \cmidrule(r){2-3}
%Approach & Instruction Level & Architecture Level \\ %\midrule
%Measurement & Hanhel et. al.~\cite{hahnel:2012aa} & %\todo{find eg}\\
%Perf. Counters & Shao \& Brooks~\cite{shao:2013aa} & %Isci \& Martonosi~\cite{isci:2003aa} \\
%Simulation & Tiwari et. al.~\cite{tiwari:1994aa} & Li et. al.~\cite{li:2009aa} \\
%Analytical Modelling & Hong et. al.~\cite{hong:2010aa} %& Karkhanis \& Smith~\cite{karkhanis:2007aa} \\
%\bottomrule
%\end{tabular}
% \caption{Energy Estimation Taxonomy}
%\label{tab:taxonomy}
%d\end{table}
|
{"hexsha": "605e079d4a32d865f9b28725abf788ce6dfcfb1a", "size": 3471, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Paper/shadow.tex", "max_stars_repo_name": "csrhau/Iconoclast", "max_stars_repo_head_hexsha": "a77cf2604ea7687415e12ccae6ee79f57f7c4a57", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-18T05:55:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T05:55:43.000Z", "max_issues_repo_path": "Paper/shadow.tex", "max_issues_repo_name": "csrhau/Iconoclast", "max_issues_repo_head_hexsha": "a77cf2604ea7687415e12ccae6ee79f57f7c4a57", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Paper/shadow.tex", "max_forks_repo_name": "csrhau/Iconoclast", "max_forks_repo_head_hexsha": "a77cf2604ea7687415e12ccae6ee79f57f7c4a57", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3663366337, "max_line_length": 442, "alphanum_fraction": 0.7689426678, "num_tokens": 913}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code demonstrating the Python Hanabi interface."""
from __future__ import print_function
import os
from datetime import datetime
import numpy as np
from hanabi_learning_environment import pyhanabi
import intention_update
def run_game(game_parameters):
"""Play a game, selecting random actions."""
game = pyhanabi.HanabiGame(game_parameters)
print(game.parameter_string(), end="")
state = game.new_initial_state()
counter = 0
# initial intention is agnostic for PLAY, DISCARD, KEEP
intention = np.array(
[
[[0.3, 0.2, 0.5] for i in range(game.hand_size())]
for pi in range(game.num_players())
]
)
intention_history = []
while not state.is_terminal():
if state.cur_player() == pyhanabi.CHANCE_PLAYER_ID:
state.deal_random_card()
continue
print()
print()
print()
print("counter: ", counter)
print("STATE")
print(state)
print("KNOWLEDGE")
knowledge = intention_update.generate_knowledge(game, state)
print(knowledge)
legal_moves = state.legal_moves()
print("")
print("Number of legal moves: {}".format(len(legal_moves)))
print(legal_moves)
move = np.random.choice(legal_moves)
print("Chose random legal move: {}".format(move))
# make screenshot of old state before apply the move
old_state = state.copy()
state.apply_move(move)
# code intentions
print()
print("INTENTION")
intention = intention_update.infer_joint_intention(
game=game,
action=move,
state=old_state,
knowledge=knowledge,
prior=intention,
)
intention_history.append(intention)
np.set_printoptions(precision=2, suppress=True)
print(intention)
counter += 1
print("")
print("Game done. Terminal state:")
print("")
print(state)
print("")
print("score: {}".format(state.score()))
path = os.path.join(
os.getcwd(),
"examples/history",
"score:{}_{}.npy".format(state.score(), datetime.now()),
)
np.save(
path, intention_history
) # save histories of intention change for later analysis
if __name__ == "__main__":
# Check that the cdef and library were loaded from the standard paths.
assert pyhanabi.cdef_loaded(), "cdef failed to load"
assert pyhanabi.lib_loaded(), "lib failed to load"
# currently, only using colors=4 ensures correct return of firework after copying the state due to bug
run_game({"players": 3, "hand_size": 2, "colors": 4, "random_start_player": False})
|
{"hexsha": "ee5e4c058e693ab28ff269e9b5b71abd09b6cfaa", "size": 3309, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/game_example.py", "max_stars_repo_name": "moorugi98/hanabi-learning-environment", "max_stars_repo_head_hexsha": "3c294fe98b6764a5a700b15f1c407dc8c171b593", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-13T21:04:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-13T21:04:59.000Z", "max_issues_repo_path": "examples/game_example.py", "max_issues_repo_name": "moorugi98/hanabi-learning-environment", "max_issues_repo_head_hexsha": "3c294fe98b6764a5a700b15f1c407dc8c171b593", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/game_example.py", "max_forks_repo_name": "moorugi98/hanabi-learning-environment", "max_forks_repo_head_hexsha": "3c294fe98b6764a5a700b15f1c407dc8c171b593", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5142857143, "max_line_length": 106, "alphanum_fraction": 0.6476276821, "include": true, "reason": "import numpy", "num_tokens": 734}
|
from glob import glob
import os
import cv2
import numpy as np
from tqdm import tqdm
from collections import defaultdict
colors = [[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
# --- Modified to work with the MiniFrance dataset --- #
# https://ieee-dataport.org/open-access/minifrance
# --- Define folder structure --- #
# Set the folder paths
base_folder = '/data/datasets/DFC2022/Raw_data'
lbl_tr_dir = f'{base_folder}/labeled_train'
unlbl_tr_dir = f'{base_folder}/unlabeled_train'
lbl_val_dir = f'{base_folder}/val'
# --- Get the city names --- #
lbl_tr_cities = os.listdir(lbl_tr_dir)
# Print the city names
print("Labeled Train Cities:")
print(lbl_tr_cities)
# Default Attributes for every image
TRUTH_attr = 'UrbanAtlas'
# --- Define dictionaries with datapaths --- #
lbl_tr_paths = defaultdict(list)
for city in lbl_tr_cities:
truth_path = f"{lbl_tr_dir}/{city}/{TRUTH_attr}"
for truth_img_path in glob(f"{truth_path}/*"):
lbl_tr_paths[city].append(truth_img_path)
# --- Load and transform Data --- #
for city, paths in lbl_tr_paths.items():
for img_path in tqdm(paths):
image = cv2.imread(img_path)
results = np.ones_like(image[:, :, 0]) * 250
for i in range(len(colors)):
color_i = colors[i]
class_i_image1 = image[:, :, 0] == color_i[2]
class_i_image2 = image[:, :, 1] == color_i[1]
class_i_image3 = image[:, :, 2] == color_i[0]
class_i_image = class_i_image1 & class_i_image2 & class_i_image3
results[class_i_image] = i
img_path = img_path.replace(f'{TRUTH_attr}', f'{TRUTH_attr}_translated')
img_folder = f'{os.sep}'.join((img_path.split(os.sep)[:-1]))
os.makedirs(img_folder, exist_ok=True)
cv2.imwrite(img_path, results)
|
{"hexsha": "8385e64081764db09d886fe8e2fde540879bec7b", "size": 2224, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/translate_labels.py", "max_stars_repo_name": "drkostas/SemiSeg-Contrastive", "max_stars_repo_head_hexsha": "af6b133400368911ef77f401b7673894fe6aa05c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/translate_labels.py", "max_issues_repo_name": "drkostas/SemiSeg-Contrastive", "max_issues_repo_head_hexsha": "af6b133400368911ef77f401b7673894fe6aa05c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/translate_labels.py", "max_forks_repo_name": "drkostas/SemiSeg-Contrastive", "max_forks_repo_head_hexsha": "af6b133400368911ef77f401b7673894fe6aa05c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0540540541, "max_line_length": 80, "alphanum_fraction": 0.5854316547, "include": true, "reason": "import numpy", "num_tokens": 643}
|
/*
* ******************************************************************************
* Copyright 2014-2016 Spectra Logic Corporation. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use
* this file except in compliance with the License. A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file.
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
* ****************************************************************************
*/
#include <stdio.h>
#include <string.h>
#include "ds3.h"
#include "ds3_net.h"
#include "test.h"
#include <boost/test/unit_test.hpp>
#include "checksum.h"
#include <glib.h>
static const unsigned char MAX_UNIT_TEST_FILEPATH_LENGTH = 64;
static const char FOLDER_PREFIX[] = "resources/";
static const unsigned char LENGTH_OF_FOLDER_PREFIX = sizeof(FOLDER_PREFIX);
static const char TEMP_PREFIX[] = "temp-";
static const unsigned char LENGTH_OF_TEMP_PREFIX = sizeof(TEMP_PREFIX);
struct checksum_result {
char original_name[MAX_UNIT_TEST_FILEPATH_LENGTH];
char tmp_name[MAX_UNIT_TEST_FILEPATH_LENGTH];
int num_chunks;
bool passed;
};
bool check_all_passed(uint32_t num_files, checksum_result* results) {
bool result = true;
for (uint32_t i = 0; i < num_files; i++) {
result = result && results[i].passed;
}
return result;
}
uint32_t get_number_of_chunks(uint32_t num_files, checksum_result* results, const char* filename) {
for (uint32_t i = 0; i < num_files; i++) {
if (g_strcmp0(results[i].original_name, filename) == 0) {
return results[i].num_chunks;
}
}
return 0;
}
uint32_t get_sum_of_chunks(uint32_t num_files, checksum_result* results) {
uint32_t result = 0;
for (uint32_t i = 0; i < num_files; i++) {
result += results[i].num_chunks;
}
return result;
}
uint64_t getFileIndexForChunk(uint64_t* max_file_index, ds3_str* current_obj_name, checksum_result* results) {
int64_t file_index = -1;
for (uint64_t current_file_index = 0; current_file_index < *max_file_index; current_file_index++) {
if (g_strcmp0(current_obj_name->value, results[current_file_index].original_name) == 0) {
file_index = current_file_index;
break;
}
}
if (file_index == -1) {
file_index = *max_file_index;
(*max_file_index)++;
memcpy(results[file_index].original_name, current_obj_name->value, current_obj_name->size);
memcpy(results[file_index].tmp_name, FOLDER_PREFIX, LENGTH_OF_FOLDER_PREFIX); // "resources/"
memcpy(results[file_index].tmp_name+LENGTH_OF_FOLDER_PREFIX-1, TEMP_PREFIX, LENGTH_OF_TEMP_PREFIX); // "resources/temp-"
memcpy(results[file_index].tmp_name+LENGTH_OF_FOLDER_PREFIX+LENGTH_OF_TEMP_PREFIX-2, current_obj_name->value+LENGTH_OF_FOLDER_PREFIX-1, current_obj_name->size-LENGTH_OF_FOLDER_PREFIX+1);
}
results[file_index].num_chunks++;
return (uint64_t)file_index;
}
void checkChunkResponse(ds3_client* client, uint32_t num_files, ds3_master_object_list_response* chunk_response, checksum_result* results) {
uint64_t max_file_index = 0;
int64_t file_index = 0;
ds3_request* request = NULL;
ds3_error* error = NULL;
for (uint64_t chunk_index = 0; chunk_index < chunk_response->num_objects; chunk_index++) {
ds3_objects_response* chunk_object_list = chunk_response->objects[chunk_index];
for (uint64_t chunk_object_index = 0; chunk_object_index < chunk_object_list->num_objects; chunk_object_index++) {
FILE* w_file;
ds3_bulk_object_response* current_obj = chunk_object_list->objects[chunk_object_index];
file_index = getFileIndexForChunk(&max_file_index, current_obj->name, results);
const uint64_t length = current_obj->length;
const uint64_t offset = current_obj->offset;
request = ds3_init_get_object_request(chunk_response->bucket_name->value, current_obj->name->value, length);
ds3_request_set_job(request, chunk_response->job_id->value);
ds3_request_set_offset(request, offset);
w_file = fopen(results[file_index].tmp_name, "a+");
fseek(w_file, (long)current_obj->offset, SEEK_SET);
error = ds3_get_object_request(client, request, w_file, ds3_write_to_file);
ds3_request_free(request);
fclose(w_file);
handle_error(error);
}
}
for (uint64_t current_file_index = 0; current_file_index < max_file_index; current_file_index++) {
printf("------Performing Data Integrity Test-------\n");
results[current_file_index].passed = compare_hash(results[current_file_index].original_name, results[current_file_index].tmp_name);
unlink(results[current_file_index].tmp_name);
}
}
void checkChunkResponsePartials(ds3_client* client, uint32_t num_files, ds3_master_object_list_response* chunk_response, checksum_result* results, uint32_t segment_size) {
uint64_t max_file_index = 0;
int64_t file_index = 0;
ds3_request* request = NULL;
ds3_error* error = NULL;
for (uint64_t chunk_index = 0; chunk_index < chunk_response->num_objects; chunk_index++) {
ds3_objects_response* chunk_object_list = chunk_response->objects[chunk_index];
for (uint64_t chunk_object_index = 0; chunk_object_index < chunk_object_list->num_objects; chunk_object_index++) {
FILE* w_file;
ds3_bulk_object_response* current_obj = chunk_object_list->objects[chunk_object_index];
file_index = getFileIndexForChunk(&max_file_index, current_obj->name, results);
const uint64_t length = current_obj->length;
const uint64_t offset = current_obj->offset;
request = ds3_init_get_object_request(chunk_response->bucket_name->value, current_obj->name->value, length);
ds3_request_set_job(request, chunk_response->job_id->value);
ds3_request_set_offset(request, offset);
ds3_request_set_byte_range(request, segment_size, segment_size*2-1);
ds3_request_set_byte_range(request, segment_size*3, segment_size*4-1);
w_file = fopen(results[file_index].tmp_name, "a+");
fseek(w_file, (long)current_obj->offset, SEEK_SET);
error = ds3_get_object_request(client, request, w_file, ds3_write_to_file);
ds3_request_free(request);
fclose(w_file);
handle_error(error);
}
}
for (uint64_t current_file_index = 0; current_file_index < max_file_index; current_file_index++) {
printf("------Performing Data Integrity Test-------\n");
bool check1 = compare_hash_extended(results[current_file_index].original_name, results[current_file_index].tmp_name, segment_size, segment_size, 0);
bool check2 = compare_hash_extended(results[current_file_index].original_name, results[current_file_index].tmp_name, segment_size, segment_size*3, segment_size);
results[current_file_index].passed = check1 && check2;
unlink(results[current_file_index].tmp_name);
}
}
BOOST_AUTO_TEST_CASE( bulk_get ) {
ds3_request* request = NULL;
ds3_error* error = NULL;
ds3_master_object_list_response* bulk_response = NULL;
const uint32_t num_files = 5;
ds3_bulk_object_list_response* object_list = default_object_list();
ds3_master_object_list_response* chunk_response = NULL;
ds3_client* client = get_client();
const char* bucket_name = "test_bulk_get_bucket";
printf("-----Testing Bulk GET-------\n");
populate_with_objects(client, bucket_name);
request = ds3_init_get_bulk_job_spectra_s3_request(bucket_name, object_list);
ds3_job_chunk_client_processing_order_guarantee chunk_order = DS3_JOB_CHUNK_CLIENT_PROCESSING_ORDER_GUARANTEE_NONE;
ds3_request_set_chunk_client_processing_order_guarantee_ds3_job_chunk_client_processing_order_guarantee(request, chunk_order);
error = ds3_get_bulk_job_spectra_s3_request(client, request, &bulk_response);
ds3_request_free(request);
ds3_bulk_object_list_response_free(object_list);
handle_error(error);
chunk_response = ensure_available_chunks(client, bulk_response->job_id);
handle_error(error);
checksum_result* checksum_results = (checksum_result*) calloc(num_files, sizeof(checksum_result));
checkChunkResponse(client, num_files, chunk_response, checksum_results);
BOOST_CHECK(check_all_passed(num_files, checksum_results) == true);
BOOST_CHECK(get_sum_of_chunks(num_files, checksum_results) == num_files);
free(checksum_results);
// check to make sure that the 'job' has completed
/* works sporadically in 1.2, shjould be fixed in 3.0
ds3_bulk_response* completed_job = NULL;
request = ds3_init_get_job(bulk_response->job_id->value);
error = ds3_get_job(client, request, &completed_job);
handle_error(error);
BOOST_CHECK(completed_job != NULL);
BOOST_CHECK(completed_job->status == COMPLETED);
ds3_free_request(request);
ds3_free_bulk_response(completed_job);*/
ds3_master_object_list_response_free(chunk_response);
ds3_master_object_list_response_free(bulk_response);
clear_bucket(client, bucket_name);
free_client(client);
}
BOOST_AUTO_TEST_CASE( max_upload_size ) {
ds3_request* request = NULL;
ds3_error* error = NULL;
ds3_master_object_list_response* bulk_response = NULL;
ds3_bulk_object_list_response* object_list = NULL;
ds3_master_object_list_response* chunk_response = NULL;
ds3_client* client = get_client();
const char* bucket_name = "test_max_upload_size_bucket";
printf("-----Testing Bulk GET with max_upload_size-------\n");
const uint32_t num_files = 6;
const char* books[num_files] = {"resources/beowulf.txt", "resources/sherlock_holmes.txt", "resources/tale_of_two_cities.txt", "resources/ulysses.txt", "resources/ulysses_large.txt", "resources/ulysses_46mb.txt"};
object_list = ds3_convert_file_list(books, num_files);
request = populate_bulk_return_request(client, bucket_name, object_list);
ds3_request_set_max_upload_size(request, 10485760);
bulk_response = populate_bulk_return_response(client, request);
populate_with_objects_from_bulk(client, bucket_name, bulk_response);
ds3_master_object_list_response_free(bulk_response);
request = ds3_init_get_bulk_job_spectra_s3_request(bucket_name, object_list);
ds3_job_chunk_client_processing_order_guarantee chunk_order = DS3_JOB_CHUNK_CLIENT_PROCESSING_ORDER_GUARANTEE_NONE;
ds3_request_set_chunk_client_processing_order_guarantee_ds3_job_chunk_client_processing_order_guarantee(request, chunk_order);
error = ds3_get_bulk_job_spectra_s3_request(client, request, &bulk_response);
ds3_request_free(request);
ds3_bulk_object_list_response_free(object_list);
handle_error(error);
chunk_response = ensure_available_chunks(client, bulk_response->job_id);
checksum_result* checksum_results = (checksum_result*) calloc(num_files, sizeof(checksum_result));
checkChunkResponse(client, num_files, chunk_response, checksum_results);
BOOST_CHECK(check_all_passed(num_files, checksum_results) == true);
BOOST_CHECK(get_number_of_chunks(num_files, checksum_results, "resources/ulysses_large.txt") == 2);
BOOST_CHECK(get_number_of_chunks(num_files, checksum_results, "resources/ulysses_46mb.txt") == 5);
BOOST_CHECK(get_sum_of_chunks(num_files, checksum_results) == 11);
free(checksum_results);
// check to make sure that the 'job' has completed
/* works sporadically in 1.2, shjould be fixed in 3.0
ds3_bulk_response* completed_job = NULL;
request = ds3_init_get_job(bulk_response->job_id->value);
error = ds3_get_job(client, request, &completed_job);
handle_error(error);
BOOST_CHECK(completed_job != NULL);
BOOST_CHECK(completed_job->status == COMPLETED);
ds3_free_request(request);
ds3_free_bulk_response(completed_job);*/
ds3_master_object_list_response_free(chunk_response);
ds3_master_object_list_response_free(bulk_response);
clear_bucket(client, bucket_name);
free_client(client);
}
BOOST_AUTO_TEST_CASE( chunk_preference ) {
ds3_request* request = NULL;
ds3_error* error = NULL;
ds3_master_object_list_response* bulk_response = NULL;
const uint32_t num_files = 5;
ds3_bulk_object_list_response* object_list = default_object_list();
ds3_master_object_list_response* chunk_response = NULL;
bool retry_get;
ds3_client* client = get_client();
const char* bucket_name = "test_chunk_preference_bucket";
printf("-----Testing Bulk GET with chunk_preference-------\n");
populate_with_objects(client, bucket_name);
request = ds3_init_get_bulk_job_spectra_s3_request(bucket_name, object_list);
ds3_job_chunk_client_processing_order_guarantee chunk_order = DS3_JOB_CHUNK_CLIENT_PROCESSING_ORDER_GUARANTEE_NONE;
ds3_request_set_chunk_client_processing_order_guarantee_ds3_job_chunk_client_processing_order_guarantee(request, chunk_order);
error = ds3_get_bulk_job_spectra_s3_request(client, request, &bulk_response);
ds3_request_free(request);
ds3_bulk_object_list_response_free(object_list);
handle_error(error);
do {
retry_get = false;
request = ds3_init_get_job_chunks_ready_for_client_processing_spectra_s3_request(bulk_response->job_id->value);
error = ds3_get_job_chunks_ready_for_client_processing_spectra_s3_request(client, request, &chunk_response);
ds3_request_free(request);
handle_error(error);
BOOST_CHECK(chunk_response != NULL);
if (chunk_response->num_objects == 0) {
// if this happens we need to try the request
BOOST_TEST_MESSAGE( "Hit retry, sleeping for 30 seconds..."); //<< chunk_response->retry_after);
retry_get = true;
//TODO parse metadata retry_after
//sleep(chunk_response->retry_after);
g_usleep(30);
ds3_master_object_list_response_free(chunk_response);
}
} while(retry_get);
handle_error(error);
checksum_result* checksum_results = (checksum_result*) calloc(num_files, sizeof(checksum_result));
checkChunkResponse(client, num_files, chunk_response, checksum_results);
BOOST_CHECK(check_all_passed(num_files, checksum_results) == true);
BOOST_CHECK(get_sum_of_chunks(num_files, checksum_results) == num_files);
free(checksum_results);
// check to make sure that the 'job' has completed
/* works sporadically in 1.2, shjould be fixed in 3.0
ds3_bulk_response* completed_job = NULL;
request = ds3_init_get_job(bulk_response->job_id->value);
error = ds3_get_job(client, request, &completed_job);
handle_error(error);
BOOST_CHECK(completed_job != NULL);
BOOST_CHECK(completed_job->status == COMPLETED);
ds3_free_request(request);
ds3_free_bulk_response(completed_job);*/
ds3_master_object_list_response_free(chunk_response);
ds3_master_object_list_response_free(bulk_response);
clear_bucket(client, bucket_name);
free_client(client);
}
BOOST_AUTO_TEST_CASE( partial_get ) {
ds3_request* request = NULL;
ds3_error* error = NULL;
ds3_master_object_list_response* bulk_response = NULL;
const uint32_t num_files = 5;
ds3_bulk_object_list_response* object_list = default_object_list();
ds3_master_object_list_response* chunk_response = NULL;
ds3_client* client = get_client();
const char* bucket_name = "unit_test_bucket";
printf("-----Testing Bulk GET with partial range-------\n");
populate_with_objects(client, bucket_name);
request = ds3_init_get_bulk_job_spectra_s3_request(bucket_name, object_list);
ds3_job_chunk_client_processing_order_guarantee chunk_order = DS3_JOB_CHUNK_CLIENT_PROCESSING_ORDER_GUARANTEE_NONE;
ds3_request_set_chunk_client_processing_order_guarantee_ds3_job_chunk_client_processing_order_guarantee(request, chunk_order);
error = ds3_get_bulk_job_spectra_s3_request(client, request, &bulk_response);
ds3_request_free(request);
ds3_bulk_object_list_response_free(object_list);
handle_error(error);
chunk_response = ensure_available_chunks(client, bulk_response->job_id);
handle_error(error);
checksum_result* checksum_results = (checksum_result*) calloc(num_files, sizeof(checksum_result));
checkChunkResponsePartials(client, num_files, chunk_response, checksum_results, 3200);
BOOST_CHECK(check_all_passed(num_files, checksum_results) == true);
BOOST_CHECK(get_sum_of_chunks(num_files, checksum_results) == num_files);
free(checksum_results);
// check to make sure that the 'job' has completed
/* works sporadically in 1.2, shjould be fixed in 3.0
ds3_bulk_response* completed_job = NULL;
request = ds3_init_get_job(bulk_response->job_id->value);
error = ds3_get_job(client, request, &completed_job);
handle_error(error);
BOOST_CHECK(completed_job != NULL);
BOOST_CHECK(completed_job->status == COMPLETED);
ds3_free_request(request);
ds3_free_bulk_response(completed_job);*/
ds3_master_object_list_response_free(chunk_response);
ds3_master_object_list_response_free(bulk_response);
clear_bucket(client, bucket_name);
free_client(client);
}
BOOST_AUTO_TEST_CASE( escape_urls ) {
const char *delimiters[4] = {"or", "/", "@", "="};
const char *strings_to_test[5] = {"some normal text", "/an/object/name", "bytes=0-255,300-400,550-800", "orqwerty/qwerty@qwerty=", "`1234567890-=~!@#$%^&*()_+[]\\{}|;:,./<>?"};
const char *object_name_results[5] = {"some%20normal%20text", "/an/object/name", "bytes%3D0-255%2C300-400%2C550-800", "orqwerty/qwerty%40qwerty%3D",
"%601234567890-%3D~%21%40%23%24%25%5E%26%2A%28%29_%2B%5B%5D%5C%7B%7D%7C%3B%3A%2C./%3C%3E%3F"};
const char *range_header_results[5] = {"some%20normal%20text", "%2Fan%2Fobject%2Fname", "bytes=0-255,300-400,550-800", "orqwerty%2Fqwerty%40qwerty=",
"%601234567890-=~%21%40%23%24%25%5E%26%2A%28%29_%2B%5B%5D%5C%7B%7D%7C%3B%3A,.%2F%3C%3E%3F"};
const char *general_delimiter_results[5] = {"some%20normal%20text", "/an/object/name", "bytes=0-255%2C300-400%2C550-800", "orqwerty/qwerty@qwerty=",
"%601234567890-=~%21@%23%24%25%5E%26%2A%28%29_%2B%5B%5D%5C%7B%7D%7C%3B%3A%2C./%3C%3E%3F"};
printf("-----Testing escape url helpers-------\n");
for (int i = 0; i < 5; i++) {
char* escaped_url = escape_url_object_name(strings_to_test[i]);
if(strcmp(escaped_url, object_name_results[i]) != 0){
printf("%s != %s\n", escaped_url, object_name_results[i]);
}
BOOST_CHECK(strcmp(escaped_url, object_name_results[i]) == 0);
free(escaped_url);
}
for (int i = 0; i < 5; i++) {
char* escaped_url = escape_url_range_header(strings_to_test[i]);
if(strcmp(escaped_url, range_header_results[i]) != 0){
printf("%s != %s\n", escaped_url, range_header_results[i]);
}
BOOST_CHECK(strcmp(escaped_url, range_header_results[i]) == 0);
free(escaped_url);
}
for (int i = 0; i < 5; i++) {
char* escaped_url = escape_url_extended(strings_to_test[i], delimiters, 4);
if(strcmp(escaped_url, general_delimiter_results[i]) != 0){
printf("%s != %s\n", escaped_url, general_delimiter_results[i]);
}
BOOST_CHECK(strcmp(escaped_url, general_delimiter_results[i]) == 0);
free(escaped_url);
}
}
BOOST_AUTO_TEST_CASE( convert_list_helper ) {
const char* books[4] = {"beowulf.txt", "sherlock_holmes.txt", "tale_of_two_cities.txt", "ulysses.txt"};
ds3_bulk_object_list_response* obj_list;
printf("-----Testing convert_list helper-------\n");
obj_list = ds3_convert_file_list_with_basepath(books, 4, "resources/");
BOOST_CHECK(strcmp(obj_list->objects[0]->name->value, "beowulf.txt") == 0);
BOOST_CHECK(obj_list->objects[0]->length == 294059);
ds3_bulk_object_list_response_free(obj_list);
}
BOOST_AUTO_TEST_CASE( directory_size ) {
const char* books[1] = {"resources"};
ds3_bulk_object_list_response* obj_list;
printf("-----Testing directory size on convert list helper-------\n");
obj_list = ds3_convert_file_list_with_basepath(books, 1, NULL);
BOOST_CHECK(strcmp(obj_list->objects[0]->name->value, "resources") == 0);
BOOST_CHECK(obj_list->objects[0]->length == 0);
ds3_bulk_object_list_response_free(obj_list);
}
|
{"hexsha": "1e99ff5c13cc5230f9482f8d24f5296605cd9536", "size": 20888, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/bulk_get.cpp", "max_stars_repo_name": "RachelTucker/ds3_c_sdk", "max_stars_repo_head_hexsha": "b0a32aef4ccc4eb87ed6ce4f08b3b7c01e047234", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2015-03-26T09:56:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-21T00:36:20.000Z", "max_issues_repo_path": "test/bulk_get.cpp", "max_issues_repo_name": "RachelTucker/ds3_c_sdk", "max_issues_repo_head_hexsha": "b0a32aef4ccc4eb87ed6ce4f08b3b7c01e047234", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 42.0, "max_issues_repo_issues_event_min_datetime": "2015-04-16T14:28:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-24T19:24:23.000Z", "max_forks_repo_path": "test/bulk_get.cpp", "max_forks_repo_name": "RachelTucker/ds3_c_sdk", "max_forks_repo_head_hexsha": "b0a32aef4ccc4eb87ed6ce4f08b3b7c01e047234", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17.0, "max_forks_repo_forks_event_min_datetime": "2015-03-24T22:46:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-16T08:13:09.000Z", "avg_line_length": 43.5166666667, "max_line_length": 216, "alphanum_fraction": 0.7203657602, "num_tokens": 5115}
|
function arcsinh_values_test ( )
%*****************************************************************************80
%
%% ARCSINH_VALUES_TEST demonstrates the use of ARCSINH_VALUES.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 23 June 2007
%
% Author:
%
% John Burkardt
%
fprintf ( 1, '\n' );
fprintf ( 1, 'ARCSINH_VALUES_TEST:\n' );
fprintf ( 1, ' ARCSINH_VALUES stores values of \n' );
fprintf ( 1, ' the hyperbolic arc sine function.\n' );
fprintf ( 1, '\n' );
fprintf ( 1, ' X FX\n' );
fprintf ( 1, '\n' );
n_data = 0;
while ( 1 )
[ n_data, x, fx ] = arcsinh_values ( n_data );
if ( n_data == 0 )
break
end
fprintf ( 1, ' %12f %24.16f\n', x, fx );
end
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/test_values/arcsinh_values_test.m"}
|
#include "redist.h"
#include "../Log.h"
#include "../Config.h"
#include <map>
#include <boost/foreach.hpp>
#define foreach BOOST_FOREACH
void Redistribution(GameState& state) {
static bool redist = Config::Value<bool>("redist");
static bool use_future = Config::Value<bool>("redist.future");
if ( !redist ) return;
LOG("Redistribution phase");
std::map<int,bool> locked_planets = use_future ? state.FutureFrontierPlanets(ME) : state.FrontierPlanets(ME);
// determine distances of all planets to closest ENEMY
const std::vector<PlanetPtr> planets = state.Planets();
std::map<int,int> distances;
foreach ( PlanetPtr p, planets ) {
PlanetPtr enemy = state.ClosestPlanetByOwner(p, ENEMY);
distances[p->id] = enemy ? Map::Distance(p->id, enemy->id) : 0;
}
std::map<int,int> redist_map;
// determine 1 step redistribution
foreach ( PlanetPtr p, planets ) {
int p_id = p->id;
// We only want to redistribute from planets where we are the future owner :D
if ( p->FutureOwner() != ME ) {
continue;
}
// Filter out planets we don't want to redistribute from
if ( locked_planets[p_id] || p->Ships(true) <= 0 ) {
continue;
}
// Find the closest planet with a lower distance to the enemy
int distance = distances[p_id];
const std::vector<int>& sorted = Map::PlanetsByDistance( p_id );
int closest = -1;
foreach ( int s_id, sorted ) {
PlanetPtr s = state.Planet(s_id);
int s_owner = use_future ? s->FutureOwner() : s->Owner();
if ( s_owner == ME && distances[s_id] < distance ) {
closest = s_id;
// If we comment this out then we just throw all units at the planet
// closest to the enemy. This leaves our units quite vunerable
// But for some reason it is sometimes effective. Find out when and WHY!
break; // Redist has this commented out
}
}
if (closest >= 0) {
redist_map[p_id] = closest;
}
}
std::pair<int,int> item;
std::map<int,int>::iterator found;
std::map<int,int> original_map(redist_map);
// send ships directly to the end of the redistribution chain
// this means that more ships get to the front lines quicker
// it also means that planets in the middle have permanently locked ships
// (because growth and arrivals are processed AFTER we make our
// move order but BEFORE the orders are carried out)
//
// Possibly experiment with more conserative skips
// (maybe for defence mode - higher growth rate, lower ships)
foreach (item, redist_map ) {
int source = item.first;
int dest = item.second;
int current = source;
while ( (found = redist_map.find(dest)) != redist_map.end() ) {
current = dest;
dest = found->second;
}
redist_map[source] = dest;
}
static int redist_slack = Config::Value<int>("redist.slack");
// route through a staging planet if there is enough slack
foreach ( item, redist_map ) {
int source = item.first;
int dest = item.second;
int current = source;
int distance = Map::Distance(source, dest);
while ( (found = original_map.find(current)) != redist_map.end() && found->second != dest ) {
current = found->second;
if ( Map::Distance(source, current) + Map::Distance(current,dest) <= redist_slack + distance ) {
LOG(" hopping through " << current);
redist_map[source] = current;
break;
}
}
redist_map[source] = dest;
}
// Output the redistributions
foreach (item, redist_map ) {
int source_id = item.first;
int dest_id = item.second;
PlanetPtr p = state.Planet(source_id);
// Can't redisribute from unowned planets!
if ( p->Owner() != ME ) continue;
// Don't mess up neutral stealing!
// This prevents us prematurely sending ships to a planet which we might be neutral stealing from the enemy
if ( state.Planet( dest_id )->FutureState( Map::Distance( source_id, dest_id ) ).owner == NEUTRAL ) continue;
state.IssueOrder(Fleet(source_id, dest_id, p->Ships(true)));
LOG( " Redistributing from " << source_id << " to " << dest_id );
}
}
|
{"hexsha": "c36bf802f0fc5259e3db73998cbfd9eb4a209e49", "size": 4517, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/strategy/redist.cc", "max_stars_repo_name": "sigh/Planet-Wars", "max_stars_repo_head_hexsha": "7855ea11143ac97c32cd13bcac42d212c8ed3c30", "max_stars_repo_licenses": ["Apache-1.1"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/strategy/redist.cc", "max_issues_repo_name": "sigh/Planet-Wars", "max_issues_repo_head_hexsha": "7855ea11143ac97c32cd13bcac42d212c8ed3c30", "max_issues_repo_licenses": ["Apache-1.1"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/strategy/redist.cc", "max_forks_repo_name": "sigh/Planet-Wars", "max_forks_repo_head_hexsha": "7855ea11143ac97c32cd13bcac42d212c8ed3c30", "max_forks_repo_licenses": ["Apache-1.1"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.136, "max_line_length": 117, "alphanum_fraction": 0.5975204782, "num_tokens": 1066}
|
MODULE m_kkintgr
!------------------------------------------------------------------------------
!
! MODULE: m_kkintgr
!
!> @author
!> Henning Janßen
!
! DESCRIPTION:
!> Performs the Kramer-Kronig-Transformation to obtain the Green's function
!> in the complex plane from the imaginary part calculated on the real axis
!
! TODO: Look at FFT for Transformation
! How to do changing imaginary parts
!------------------------------------------------------------------------------
USE m_constants
USE m_juDFT
IMPLICIT NONE
INTEGER, PARAMETER :: method_maclaurin = 1
INTEGER, PARAMETER :: method_deriv = 2
INTEGER, PARAMETER :: method_direct = 3
INTEGER, PARAMETER :: method_fft = 4
CHARACTER(len=10), PARAMETER :: smooth_method = 'lorentzian' !(lorentzian or gaussian)
CONTAINS
SUBROUTINE kkintgr(im,eMesh,ez,l_conjg,g,method)
!calculates the Kramer Kronig Transformation on the same contour where the imaginary part was calculated
!Re(G(E+i * delta)) = -1/pi * int_bot^top dE' P(1/(E-E')) * Im(G(E'+i*delta))
!The dominant source of error for this routine is a insufficiently dense energy mesh on the real axis
!TODO: Some way to estimate the error (maybe search for the sharpest peak and estimate from width)
USE m_smooth
USE m_lorentzian_smooth
REAL, INTENT(IN) :: im(:) !Imaginary part of the green's function on the real axis
REAL, INTENT(IN) :: eMesh(:) !Energy grid on the real axis
COMPLEX, INTENT(IN) :: ez(:) !Complex energy contour
LOGICAL, INTENT(IN) :: l_conjg !Switch determines wether we calculate g on the complex conjugate of the contour ez
COMPLEX, INTENT(INOUT) :: g(:) !Green's function on the complex plane
INTEGER, INTENT(IN) :: method !Integer associated with the method to be used (definitions above)
INTEGER :: iz,izp,n1,n2,ne,nz
INTEGER :: ismooth,nsmooth
REAL :: eb,del
REAL :: re_n1,re_n2,im_n1,im_n2
INTEGER, ALLOCATABLE :: smoothInd(:)
REAL, ALLOCATABLE :: sigma(:)
REAL, ALLOCATABLE :: smoothed(:,:)
nz = SIZE(ez)
ne = SIZE(eMesh)
eb = eMesh(1)
del = eMesh(2) - eMesh(1)
ALLOCATE(smoothInd(nz),source=0)
ALLOCATE(sigma(nz),source=0.0)
IF(method.NE.method_direct) THEN
CALL timestart("kkintgr: smoothing")
!Smooth the imaginary part beforehand
!Determine how many unique values there are for the imaginary part
nsmooth = 0
outer: DO iz = 1, nz
DO izp = 1, iz-1
IF(ABS(AIMAG(ez(izp))-AIMAG(ez(iz))).LT.1e-12) THEN
smoothInd(iz) = smoothInd(izp)
CYCLE outer
ENDIF
ENDDO
nsmooth = nsmooth + 1
smoothInd(iz) = nsmooth
sigma(nsmooth) = AIMAG(ez(iz))
ENDDO outer
ALLOCATE(smoothed(ne,nsmooth), source=0.0)
!$OMP parallel do default(none) &
!$OMP shared(nsmooth,smoothed,sigma,ne,eMesh,im) &
!$OMP private(ismooth)
DO ismooth = 1, nsmooth
smoothed(:,ismooth) = im(:ne)
IF(ABS(sigma(ismooth)).LT.1e-12) CYCLE
SELECT CASE (TRIM(ADJUSTL(smooth_method)))
CASE('lorentzian')
CALL lorentzian_smooth(eMesh,smoothed(:,ismooth),sigma(ismooth),ne)
CASE('gaussian')
CALL smooth(eMesh,smoothed(:,ismooth),sigma(ismooth),ne)
CASE DEFAULT
CALL juDFT_error("No valid smooth_method set",&
hint="This is a bug in FLEUR, please report",&
calledby="kkintgr")
END SELECT
ENDDO
!$OMP end parallel do
CALL timestop("kkintgr: smoothing")
ENDIF
CALL timestart("kkintgr: integration")
!$OMP parallel do default(none) &
!$OMP shared(nz,ne,method,del,eb,l_conjg) &
!$OMP shared(g,ez,eMesh,im,smoothed,smoothInd) &
!$OMP private(iz,n1,n2,re_n1,re_n2,im_n1,im_n2)
DO iz = 1, nz
SELECT CASE(method)
CASE(method_direct)
g(iz) = kk_direct(im,eMesh,MERGE(conjg(ez(iz)),ez(iz),l_conjg))
CASE(method_maclaurin, method_deriv)
!Use the previously smoothed version and interpolate after
!Next point to the left
n1 = INT((REAL(ez(iz))-eb)/del) +1
!next point to the right
n2 = n1 + 1
!Here we perform the Kramers-kronig-Integration
re_n2 = kk_num(smoothed(:,smoothInd(iz)),ne,n2,method)
re_n1 = kk_num(smoothed(:,smoothInd(iz)),ne,n1,method)
!Interpolate to the energy ez(iz)
!Real Part
g(iz) = (re_n2-re_n1)/del * (REAL(ez(iz))-(n1-1)*del-eb) + re_n1
!Imaginary Part (0 outside of the energy range)
IF(n1.LE.ne.AND.n1.GE.1) THEN
im_n1 = smoothed(n1,smoothInd(iz))
ELSE
im_n1 = 0.0
ENDIF
IF(n2.LE.ne.AND.n2.GE.1) THEN
im_n2 = smoothed(n2,smoothInd(iz))
ELSE
im_n2 = 0.0
ENDIF
g(iz) = g(iz) + ImagUnit *( (im_n2-im_n1)/del * (REAL(ez(iz))-(n1-1)*del-eb) + im_n1 )
IF(l_conjg) g(iz) = conjg(g(iz))
CASE(method_fft)
CALL juDFT_error("Not implemented yet", calledby="kkintgr")
CASE DEFAULT
CALL juDFT_error("Not a valid integration method", calledby="kkintgr")
END SELECT
ENDDO
!$OMP end parallel do
CALL timestop("kkintgr: integration")
END SUBROUTINE kkintgr
PURE COMPLEX FUNCTION kk_direct(im,eMesh,z)
USE m_trapz
REAL, INTENT(IN) :: im(:)
REAL, INTENT(IN) :: eMesh(:)
COMPLEX, INTENT(IN) :: z
COMPLEX :: integrand(SIZE(eMesh))
integrand = 1.0/(z-eMesh) * im
kk_direct = -1/pi_const *trapz(integrand,eMesh(2)-eMesh(1),SIZE(eMesh))
END FUNCTION kk_direct
PURE REAL FUNCTION kk_num(im,ne,ire,method)
REAL, INTENT(IN) :: im(:) !Imaginary part
INTEGER, INTENT(IN) :: ne !Dimension of the energy grid
INTEGER, INTENT(IN) :: ire !Position where to calculate the real part
INTEGER, INTENT(IN) :: method !Method to be used
INTEGER i,j
REAL y,im_ire
kk_num = 0.0
IF(ire.LE.ne.AND.ire.GE.1) THEN
im_ire = im(ire)
ELSE
im_ire = 0.0
ENDIF
SELECT CASE(method)
CASE (method_maclaurin)
!Calculate the real part on the same energy points as the imaginary part
!regardless of the contour
!If i is odd skip the odd points and the other way around and use the trapezian method
DO i = MERGE(1,2,MOD(ire,2)==0), ne, 2
y = - 1/pi_const * 2.0 * im(i)/REAL(ire-i)
IF(i.EQ.1 .OR. i.EQ.2 .OR.&
i.EQ.ne .OR. i.EQ.ne-1) y = y/2.0
kk_num = kk_num + y
ENDDO
CASE (method_deriv)
!Remove the singularity and treat it analytically
DO j = 1, ne
IF(j-ire.NE.0) THEN
y = -1/pi_const * (im(j)-im_ire)/REAL(ire-j)
ELSE
IF(ire.EQ.1) THEN
y = -1/pi_const * (im(2)-im(1))
ELSE IF(ire.EQ.ne) THEN
y = -1/pi_const * (im(ne)-im(ne-1))
ELSE IF((ire.LT.ne).AND.(ire.GT.1)) THEN
y = -1/pi_const * (im(ire+1)-im(ire-1))/2.0
ENDIF
ENDIF
IF(j.EQ.1 .OR. j.EQ.ne) y = y/2.0
kk_num = kk_num + y
ENDDO
CASE default
END SELECT
END FUNCTION kk_num
END MODULE m_kkintgr
|
{"hexsha": "b3877bdfcb8e0fe738d1adb39bbba91068a99e71", "size": 7883, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "greensf/kkintgr.f90", "max_stars_repo_name": "MRedies/FLEUR", "max_stars_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "greensf/kkintgr.f90", "max_issues_repo_name": "MRedies/FLEUR", "max_issues_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "greensf/kkintgr.f90", "max_forks_repo_name": "MRedies/FLEUR", "max_forks_repo_head_hexsha": "84234831c55459a7539e78600e764ff4ca2ec4b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.99543379, "max_line_length": 134, "alphanum_fraction": 0.54788786, "num_tokens": 2305}
|
from nvidia.dali.pipeline import Pipeline
from nvidia.dali import fn
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import scipy.io.wavfile
import numpy as np
import math
import json
import librosa
import tempfile
import os
from test_audio_decoder_utils import generate_waveforms, rosa_resample
tmp_dir = tempfile.TemporaryDirectory()
names = [
os.path.join(tmp_dir.name, "dali_test_1C.wav"),
os.path.join(tmp_dir.name, "dali_test_2C.wav"),
os.path.join(tmp_dir.name, "dali_test_4C.wav")
]
freqs = [
np.array([0.02]),
np.array([0.01, 0.012]),
np.array([0.01, 0.012, 0.013, 0.014])
]
rates = [ 22050, 22050, 12347 ]
lengths = [ 10000, 54321, 12345 ]
def create_ref():
ref = []
for i in range(len(names)):
wave = generate_waveforms(lengths[i], freqs[i])
wave = (wave * 32767).round().astype(np.int16)
ref.append(wave)
return ref
ref_i = create_ref()
ref_text = [
np.array([100, 97, 108, 105, 32, 116, 101, 115, 116, 32, 49, 67, 0], dtype=np.uint8),
np.array([100, 97, 108, 105, 32, 116, 101, 115, 116, 32, 50, 67, 0], dtype=np.uint8),
np.array([100, 97, 108, 105, 32, 116, 101, 115, 116, 32, 52, 67, 0], dtype=np.uint8)
]
def create_wav_files():
for i in range(len(names)):
scipy.io.wavfile.write(names[i], rates[i], ref_i[i])
create_wav_files()
nemo_asr_manifest = os.path.join(tmp_dir.name, "nemo_asr_manifest.json")
def create_manifest_file():
entry0 = {}
entry0["audio_filepath"] = names[0]
entry0["duration"] = lengths[0] * (1.0 / rates[0])
entry0["text"] = "dali test 1C"
entry1 = {}
entry1["audio_filepath"] = names[1]
entry1["duration"] = lengths[1] * (1.0 / rates[1])
entry1["text"] = "dali test 2C"
entry2 = {}
entry2["audio_filepath"] = names[2]
entry2["duration"] = lengths[2] * (1.0 / rates[2])
entry2["text"] = "dali test 4C"
data = [entry0, entry1, entry2]
with open(nemo_asr_manifest, 'w') as f:
for entry in data:
json.dump(entry, f)
f.write('\n')
create_manifest_file()
rate1 = 16000
rate2 = 44100
class NemoAsrReaderPipeline(Pipeline):
def __init__(self, batch_size=8):
super(NemoAsrReaderPipeline, self).__init__(batch_size=batch_size, num_threads=1, device_id=0,
exec_async=True, exec_pipelined=True)
def define_graph(self):
fixed_seed = 12345
audio_plain_i = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.INT16, downmix = False,
read_sample_rate = False, read_text = False, seed=fixed_seed)
audio_plain_f = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.FLOAT, downmix = False,
read_sample_rate = False, read_text = False, seed=fixed_seed)
audio_downmix_i = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.INT16, downmix = True,
read_sample_rate=False, read_text=False, seed=fixed_seed)
audio_downmix_f = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.FLOAT, downmix = True,
read_sample_rate=False, read_text=False, seed=fixed_seed)
audio_resampled1_i, sr1_i = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.INT16, downmix = True,
sample_rate=rate1, read_sample_rate=True, read_text=False, seed=fixed_seed)
audio_resampled1_f, sr1_f = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.FLOAT, downmix = True,
sample_rate=rate1, read_sample_rate=True, read_text=False, seed=fixed_seed)
audio_resampled2_i, sr1_i = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.INT16, downmix = True,
sample_rate=rate2, read_sample_rate=True, read_text=False, seed=fixed_seed)
audio_resampled2_f, sr1_f = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.FLOAT, downmix = True,
sample_rate=rate2, read_sample_rate=True, read_text=False, seed=fixed_seed)
_, _, text = fn.nemo_asr_reader(manifest_filepaths = [nemo_asr_manifest], dtype = types.INT16, downmix = True,
read_sample_rate=True, read_text=True, seed=fixed_seed)
return audio_plain_i, audio_plain_f, audio_downmix_i, audio_downmix_f, \
audio_resampled1_i, audio_resampled1_f, audio_resampled2_i, audio_resampled2_f, \
text
def test_decoded_vs_generated(batch_size=3):
pipeline = NemoAsrReaderPipeline(batch_size=batch_size)
pipeline.build()
for iter in range(1):
out = pipeline.run()
for idx in range(batch_size):
audio_plain_i = out[0].at(idx)
audio_plain_f = out[1].at(idx)
audio_downmix_i = out[2].at(idx)
audio_downmix_f = out[3].at(idx)
audio_resampled1_i = out[4].at(idx)
audio_resampled1_f = out[5].at(idx)
audio_resampled2_i = out[6].at(idx)
audio_resampled2_f = out[7].at(idx)
text = out[8].at(idx)
ref_plain_i = ref_i[idx]
np.testing.assert_allclose(audio_plain_i, ref_plain_i, rtol = 1e-7)
ref_plain_f = ref_i[idx].astype(np.float32) / 32767
np.testing.assert_allclose(audio_plain_f, ref_plain_f, rtol = 1e-4)
ref_downmix_i_float = ref_i[idx].astype(np.float32).mean(axis = 1, keepdims = 1)
ref_downmix_i = ref_downmix_i_float.astype(np.int16).flatten()
np.testing.assert_allclose(audio_downmix_i, ref_downmix_i, atol = 1)
ref_downmix_f = (ref_downmix_i_float / 32767).flatten()
np.testing.assert_allclose(audio_downmix_f, ref_downmix_f, rtol = 1e-4)
ref_resampled1_float = generate_waveforms(lengths[idx] * rate1 / rates[idx], freqs[idx] * (rates[idx] / rate1))
ref_resampled1_downmix = ref_resampled1_float.astype(np.float32).mean(axis = 1, keepdims = 1)
ref_resampled1_i = (ref_resampled1_downmix * 32767).astype(np.int16).flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(audio_resampled1_i, ref_resampled1_i, atol=round(32767 * 1e-3))
ref_resampled1_f = ref_resampled1_downmix.flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(audio_resampled1_f, ref_resampled1_f, atol=1e-3)
ref_resampled2_float = generate_waveforms(lengths[idx] * rate2 / rates[idx], freqs[idx] * (rates[idx] / rate2))
ref_resampled2_downmix = ref_resampled2_float.astype(np.float32).mean(axis = 1, keepdims = 1)
ref_resampled2_i = (ref_resampled2_downmix * 32767).astype(np.int16).flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(audio_resampled2_i, ref_resampled2_i, atol=round(32767 * 1e-3))
ref_resampled2_f = ref_resampled2_downmix.flatten()
# resampling - allow for 1e-3 dynamic range error
np.testing.assert_allclose(audio_resampled2_f, ref_resampled2_f, atol=1e-3)
np.testing.assert_equal(text, ref_text[idx])
|
{"hexsha": "b52f86cd6353a191a0df54197744c1883a06a6fd", "size": 7168, "ext": "py", "lang": "Python", "max_stars_repo_path": "dali/test/python/test_operator_nemo_asr_reader.py", "max_stars_repo_name": "a-sansanwal/DALI", "max_stars_repo_head_hexsha": "83aeb96792d053f60dd4252b8efa0fc8fdd9012a", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-07T23:07:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-07T23:07:23.000Z", "max_issues_repo_path": "dali/test/python/test_operator_nemo_asr_reader.py", "max_issues_repo_name": "MAKali4737/DALI", "max_issues_repo_head_hexsha": "3b114c6ebee38ff3815a9b4a234402e4d1affaa0", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dali/test/python/test_operator_nemo_asr_reader.py", "max_forks_repo_name": "MAKali4737/DALI", "max_forks_repo_head_hexsha": "3b114c6ebee38ff3815a9b4a234402e4d1affaa0", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.8, "max_line_length": 129, "alphanum_fraction": 0.6771763393, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2078}
|
from pathlib import Path
import pandas as pd
import numpy as np
def next_monday(date):
return pd.date_range(start=date, end=date + pd.offsets.Day(6), freq='W-MON')[0]
def get_relevant_dates(dates):
wds = pd.Series(d.day_name() for d in dates)
next_mondays = pd.Series(next_monday(d) for d in dates)
relevant_dates = []
for day in ['Monday', 'Sunday', 'Saturday', 'Friday']:
relevant_dates.extend(dates[(wds == day) &
~pd.Series(n in relevant_dates for n in next_mondays) &
~pd.Series(n in relevant_dates for n in (next_mondays - pd.offsets.Day(1))) &
~pd.Series(n in relevant_dates for n in (next_mondays - pd.offsets.Day(2)))
])
return [str(r.date()) for r in relevant_dates] # return as strings
path = Path('../../data-processed')
models = [f.name for f in path.iterdir() if f.name !='ABC-exampleModel1']
VALID_TARGETS = [f"{_} wk ahead inc death" for _ in range(-1, 5)] + \
[f"{_} wk ahead cum death" for _ in range(-1, 5)]
VALID_QUANTILES = [0.025, 0.975]
dfs = []
for m in models:
p = path/m
forecasts = [f.name for f in p.iterdir() if '.csv' in f.name]
available_dates = pd.Series(pd.to_datetime(filename[:10]) for filename in forecasts)
relevant_dates = get_relevant_dates(available_dates)
relevant_forecasts = [f for f in forecasts if f[:10] in relevant_dates]
for f in relevant_forecasts:
df_temp = pd.read_csv(path/m/f)
df_temp['model'] = m
dfs.append(df_temp)
df = pd.concat(dfs)
df.forecast_date = pd.to_datetime(df.forecast_date)
df.target_end_date = pd.to_datetime(df.target_end_date)
df = df[df.target.isin(VALID_TARGETS) &
(df['quantile'].isin(VALID_QUANTILES) | (df.type=='point') | (df.type=='observed'))].reset_index(drop=True)
df['timezero'] = df.forecast_date.apply(next_monday)
df = df[['forecast_date', 'target', 'target_end_date', 'location', 'type', 'quantile', 'value', 'timezero', 'model']]
df.to_csv('../data/forecasts_to_plot.csv', index=False)
|
{"hexsha": "7f08f62e7b776457185193eda0555588a3260b32", "size": 2136, "ext": "py", "lang": "Python", "max_stars_repo_path": "app_forecasts_de/code/data_preparation.py", "max_stars_repo_name": "QEDHamburg/covid19-forecast-hub-de", "max_stars_repo_head_hexsha": "0fa2daa6ebe4fcb3b91a9c5a852d86d1a36e6d9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app_forecasts_de/code/data_preparation.py", "max_issues_repo_name": "QEDHamburg/covid19-forecast-hub-de", "max_issues_repo_head_hexsha": "0fa2daa6ebe4fcb3b91a9c5a852d86d1a36e6d9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app_forecasts_de/code/data_preparation.py", "max_forks_repo_name": "QEDHamburg/covid19-forecast-hub-de", "max_forks_repo_head_hexsha": "0fa2daa6ebe4fcb3b91a9c5a852d86d1a36e6d9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5555555556, "max_line_length": 117, "alphanum_fraction": 0.6310861423, "include": true, "reason": "import numpy", "num_tokens": 568}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipype.testing import (assert_equal, example_data, skipif, assert_true)
from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS
import numpy as np
nonitime = True
try:
import nitime
nonitime = False
except ImportError:
pass
def test_fd():
tempdir = mkdtemp()
ground_truth = np.loadtxt(example_data('fsl_motion_outliers_fd.txt'))
fdisplacement = FramewiseDisplacement(in_plots=example_data('fsl_mcflirt_movpar.txt'),
out_file=tempdir + '/fd.txt')
res = fdisplacement.run()
yield assert_true, np.allclose(ground_truth, np.loadtxt(res.outputs.out_file), atol=.16)
yield assert_true, np.abs(ground_truth.mean() - res.outputs.fd_average) < 1e-2
rmtree(tempdir)
@skipif(nonitime)
def test_dvars():
tempdir = mkdtemp()
ground_truth = np.loadtxt(example_data('ds003_sub-01_mc.DVARS'))
dvars = ComputeDVARS(in_file=example_data('ds003_sub-01_mc.nii.gz'),
in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'),
save_all=True)
os.chdir(tempdir)
res = dvars.run()
dv1 = np.loadtxt(res.outputs.out_std)
yield assert_equal, (np.abs(dv1 - ground_truth).sum()/ len(dv1)) < 0.05, True
|
{"hexsha": "c41ed485a19cef6a4c8c42610d69d438a016dcb2", "size": 1375, "ext": "py", "lang": "Python", "max_stars_repo_path": "nipype/algorithms/tests/test_confounds.py", "max_stars_repo_name": "Conxz/nipype", "max_stars_repo_head_hexsha": "1281723ae56eacd103597ff4081a205583706e62", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nipype/algorithms/tests/test_confounds.py", "max_issues_repo_name": "Conxz/nipype", "max_issues_repo_head_hexsha": "1281723ae56eacd103597ff4081a205583706e62", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nipype/algorithms/tests/test_confounds.py", "max_forks_repo_name": "Conxz/nipype", "max_forks_repo_head_hexsha": "1281723ae56eacd103597ff4081a205583706e62", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.976744186, "max_line_length": 92, "alphanum_fraction": 0.6858181818, "include": true, "reason": "import numpy", "num_tokens": 357}
|
import os
import shutil
import unittest
import numpy as np
import bilby_pipe
from bilby_pipe.input import Input
from bilby_pipe.utils import BilbyPipeError, parse_args
class TestParser(unittest.TestCase):
def test_parser_defaults(self):
example_prior_file = "tests/example_prior.prior"
known_args_list = [example_prior_file, "-n", "1"]
parser = bilby_pipe.create_injections.create_parser()
args, unknown_args = parse_args(known_args_list, parser)
self.assertEqual(args.prior_file, example_prior_file)
self.assertEqual(args.n_injection, 1)
self.assertEqual(args.extension, "dat")
def test_parser_with_prior_file(self):
example_prior_file = "tests/example_prior.prior"
known_args_list = [example_prior_file, "--n-injection", "3", "-s", "1234"]
parser = bilby_pipe.create_injections.create_parser()
args, unknown_args = parse_args(known_args_list, parser)
self.assertEqual(args.prior_file, example_prior_file)
self.assertEqual(args.n_injection, 3)
self.assertEqual(args.generation_seed, 1234)
def test_parser_with_default_prior_file(self):
known_args_list = ["4s", "--n-injection", "3"]
parser = bilby_pipe.create_injections.create_parser()
args, unknown_args = parse_args(known_args_list, parser)
self.assertEqual(args.prior_file, "4s")
self.assertEqual(args.n_injection, 3)
def test_parser_with_json(self):
example_prior_file = "tests/example_prior.prior"
known_args_list = [example_prior_file, "--n-injection", "3", "-e", "json"]
parser = bilby_pipe.create_injections.create_parser()
args, unknown_args = parse_args(known_args_list, parser)
self.assertEqual(args.extension, "json")
class TestCreateInjections(unittest.TestCase):
def setUp(self):
self.outdir = "outdir"
self.example_prior_file = "tests/example_prior.prior"
self.filename = f"{self.outdir}/injection.dat"
def tearDown(self):
try:
shutil.rmtree(self.outdir)
except FileNotFoundError:
pass
def test_create_injection_file(self):
filename = f"{self.outdir}/injections.dat"
prior_file = self.example_prior_file
n_injection = 3
bilby_pipe.create_injections.create_injection_file(
filename,
n_injection,
prior_file=prior_file,
generation_seed=None,
extension="dat",
)
self.assertTrue(os.path.isfile(filename))
injections = np.genfromtxt(filename, names=True)
self.assertEqual(len(injections), n_injection)
def test_create_injection_file_dat_ext(self):
filename = f"{self.outdir}/injections"
prior_file = self.example_prior_file
n_injection = 3
bilby_pipe.create_injections.create_injection_file(
filename,
n_injection,
prior_file=prior_file,
generation_seed=None,
extension="dat",
)
actual_filename = filename + ".dat"
self.assertTrue(os.path.isfile(actual_filename))
df = Input.read_dat_injection_file(actual_filename)
self.assertEqual(len(df), n_injection)
def test_create_injection_file_json_ext(self):
filename = f"{self.outdir}/injections"
prior_file = self.example_prior_file
n_injection = 3
bilby_pipe.create_injections.create_injection_file(
filename,
n_injection,
prior_file=prior_file,
generation_seed=None,
extension="json",
)
actual_filename = filename + ".json"
self.assertTrue(os.path.isfile(actual_filename))
df = Input.read_json_injection_file(actual_filename)
self.assertEqual(len(df), n_injection)
def test_create_injection_file_with_gps_file(self):
filename = f"{self.outdir}/injections"
prior_file = self.example_prior_file
n_injection = 2
bilby_pipe.create_injections.create_injection_file(
filename,
n_injection,
prior_file=prior_file,
generation_seed=None,
extension="json",
gps_file="tests/gps_file.txt",
)
filename += ".json"
gps_vals = np.loadtxt("tests/gps_file.txt")
df = Input.read_json_injection_file(filename)
self.assertEqual(len(df), n_injection)
self.assertTrue(
len(df.columns.values) > 1, f"Column names: {df.columns.values}"
)
self.assertAlmostEqual(
df["geocenter_times"].iloc[0] / 100, gps_vals[0] / 100, places=1
)
def test_create_injection_file_json(self):
filename = f"{self.outdir}/injections.json"
prior_file = self.example_prior_file
n_injection = 3
bilby_pipe.create_injections.create_injection_file(
filename,
n_injection,
prior_file=prior_file,
generation_seed=None,
extension="dat",
)
actual_filename = filename
self.assertTrue(os.path.isfile(actual_filename))
df = Input.read_json_injection_file(actual_filename)
self.assertEqual(len(df), n_injection)
def test_create_injection_file_generation_seed(self):
filename = f"{self.outdir}/injections"
prior_file = self.example_prior_file
n_injection = 3
bilby_pipe.create_injections.create_injection_file(
filename + "_A", n_injection, prior_file=prior_file, generation_seed=123
)
injectionsA = np.genfromtxt(filename + "_A.dat", names=True)
bilby_pipe.create_injections.create_injection_file(
filename + "_B", n_injection, prior_file=prior_file, generation_seed=123
)
injectionsB = np.genfromtxt(filename + "_B.dat", names=True)
bilby_pipe.create_injections.create_injection_file(
filename + "_C", n_injection, prior_file=prior_file, generation_seed=321
)
injectionsC = np.genfromtxt(filename + "_C.dat", names=True)
self.assertTrue(np.all(injectionsA == injectionsB))
self.assertFalse(np.all(injectionsA == injectionsC))
def test_n_injection_error(self):
with self.assertRaises(BilbyPipeError):
n_injection = None
bilby_pipe.create_injections.create_injection_file(
self.filename, n_injection, prior_file=self.example_prior_file
)
with self.assertRaises(BilbyPipeError):
n_injection = -1
bilby_pipe.create_injections.create_injection_file(
self.filename, n_injection, prior_file=self.example_prior_file
)
with self.assertRaises(BilbyPipeError):
n_injection = np.inf
bilby_pipe.create_injections.create_injection_file(
self.filename, n_injection, prior_file=self.example_prior_file
)
def test_unknown_prior_file(self):
prior_file = "not_a_file"
with self.assertRaises(FileNotFoundError):
bilby_pipe.create_injections.create_injection_file(
self.filename, 1, prior_file=prior_file
)
def test_none_prior_file(self):
prior_file = None
with self.assertRaises(BilbyPipeError):
bilby_pipe.create_injections.create_injection_file(
self.filename, 1, prior_file=prior_file
)
def test_unknown_ext(self):
with self.assertRaises(BilbyPipeError):
bilby_pipe.create_injections.create_injection_file(
"test", 1, self.example_prior_file, extension="other"
)
def test_unknown_ext_from_filename(self):
with self.assertRaises(BilbyPipeError):
bilby_pipe.create_injections.create_injection_file(
"test.other", 1, self.example_prior_file
)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "98b4b32cf19a7000ec44b68a40f24c0a93ff009c", "size": 8035, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/create_injection_test.py", "max_stars_repo_name": "Samanwaya1301/tidal-heating-bilby-pipe", "max_stars_repo_head_hexsha": "b495d4f3ffe3ef61a46ce5b87c826e10b087e2e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/create_injection_test.py", "max_issues_repo_name": "Samanwaya1301/tidal-heating-bilby-pipe", "max_issues_repo_head_hexsha": "b495d4f3ffe3ef61a46ce5b87c826e10b087e2e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/create_injection_test.py", "max_forks_repo_name": "Samanwaya1301/tidal-heating-bilby-pipe", "max_forks_repo_head_hexsha": "b495d4f3ffe3ef61a46ce5b87c826e10b087e2e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1936936937, "max_line_length": 84, "alphanum_fraction": 0.6548848787, "include": true, "reason": "import numpy", "num_tokens": 1706}
|
import sys
import os
import numpy as np
from PIL import Image
from external_model import load_external_model, pred_by_external_model
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
APS = 100;
TileFolder = sys.argv[1] + '/';
CNNModel = sys.argv[2];
#CNNModel = '/home/shahira/quip_classification/NNFramework_TF/config/test_wsi_ext/config_vgg-mix_test_ext.ini'
#CNNModel = '/home/shahira/quip_classification/NNFramework_TF/config/test_wsi_ext/config_incep-mix_test_ext.ini'
#CNNModel = '/home/shahira/quip_classification/NNFramework_TF/config/test_wsi_ext/config_vgg-mix_test_ext_binary.ini'
#CNNModel = '/home/shahira/quip_classification/NNFramework_TF/config/test_wsi_ext/config_incep-mix_test_ext_binary.ini'
heat_map_out = sys.argv[3];
BatchSize = int(sys.argv[4]); # shahira: Batch size argument
#BatchSize = 96;
#BatchSize = 48;
print('BatchSize = ', BatchSize);
def whiteness(png):
wh = (np.std(png[:,:,0].flatten()) + np.std(png[:,:,1].flatten()) + np.std(png[:,:,2].flatten())) / 3.0;
return wh;
def load_data(todo_list, rind):
X = np.zeros(shape=(BatchSize*40, 3, APS, APS), dtype=np.float32);
inds = np.zeros(shape=(BatchSize*40,), dtype=np.int32);
coor = np.zeros(shape=(20000000, 2), dtype=np.int32);
xind = 0;
lind = 0;
cind = 0;
for fn in todo_list:
lind += 1;
full_fn = TileFolder + '/' + fn;
if not os.path.isfile(full_fn):
continue;
if len(fn.split('_')) < 4:
continue;
x_off = float(fn.split('_')[0]);
y_off = float(fn.split('_')[1]);
svs_pw = float(fn.split('_')[2]);
png_pw = float(fn.split('_')[3].split('.png')[0]);
png = np.array(Image.open(full_fn).convert('RGB'));
for x in range(0, png.shape[1], APS):
if x + APS > png.shape[1]:
continue;
for y in range(0, png.shape[0], APS):
if y + APS > png.shape[0]:
continue;
if (whiteness(png[y:y+APS, x:x+APS, :]) >= 12):
X[xind, :, :, :] = png[y:y+APS, x:x+APS, :].transpose();
inds[xind] = rind;
xind += 1;
coor[cind, 0] = np.int32(x_off + (x + APS/2) * svs_pw / png_pw);
coor[cind, 1] = np.int32(y_off + (y + APS/2) * svs_pw / png_pw);
cind += 1;
rind += 1;
if xind >= BatchSize:
break;
X = X[0:xind];
inds = inds[0:xind];
coor = coor[0:cind];
return todo_list[lind:], X, inds, coor, rind;
def val_fn_epoch_on_disk(classn, model):
all_or = np.zeros(shape=(20000000, classn), dtype=np.float32);
all_inds = np.zeros(shape=(20000000,), dtype=np.int32);
all_coor = np.zeros(shape=(20000000, 2), dtype=np.int32);
rind = 0;
n1 = 0;
n2 = 0;
n3 = 0;
todo_list = os.listdir(TileFolder);
# shahira: Handling tensorflow memory exhaust issue on large slides
reset_limit = 100;
cur_indx = 0;
while len(todo_list) > 0:
todo_list, inputs, inds, coor, rind = load_data(todo_list, rind);
if len(inputs) == 0:
break;
output = pred_by_external_model(model, inputs);
all_or[n1:n1+len(output)] = output;
all_inds[n2:n2+len(inds)] = inds;
all_coor[n3:n3+len(coor)] = coor;
n1 += len(output);
n2 += len(inds);
n3 += len(coor);
# shahira: Handling tensorflow memory exhaust issue on large slides
cur_indx += 1;
if(cur_indx > reset_limit):
cur_indx = 0;
print('Restarting model!');
try:
model.restart_model();
print('Restarted!');
except:
print('Restart not supported!');
all_or = all_or[:n1];
all_inds = all_inds[:n2];
all_coor = all_coor[:n3];
return all_or, all_inds, all_coor;
def split_validation(classn):
model = load_external_model(CNNModel)
# Testing
Or, inds, coor = val_fn_epoch_on_disk(classn, model);
Or_all = np.zeros(shape=(coor.shape[0],), dtype=np.float32);
Or_all[inds] = Or[:, 0];
fid = open(TileFolder + '/' + heat_map_out, 'w');
for idx in range(0, Or_all.shape[0]):
fid.write('{} {} {}\n'.format(coor[idx][0], coor[idx][1], Or_all[idx]));
fid.close();
return;
def main():
if not os.path.exists(TileFolder):
exit(0);
classes = ['Lymphocytes'];
classn = len(classes);
sys.setrecursionlimit(10000);
split_validation(classn);
print('DONE!');
if __name__ == "__main__":
main();
|
{"hexsha": "027b1e467945350f04d7ed170c3a2e2af73f7b39", "size": 4642, "ext": "py", "lang": "Python", "max_stars_repo_path": "u24_lymphocyte/prediction/lymphocyte/pred_by_external_model.py", "max_stars_repo_name": "ShahiraAbousamra/til_classification", "max_stars_repo_head_hexsha": "cede5453cb46b9c168a1f50f76ded43f8ca3fcbe", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-25T15:58:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T11:28:44.000Z", "max_issues_repo_path": "u24_lymphocyte/prediction/lymphocyte/pred_by_external_model.py", "max_issues_repo_name": "ShahiraAbousamra/til_classification", "max_issues_repo_head_hexsha": "cede5453cb46b9c168a1f50f76ded43f8ca3fcbe", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "u24_lymphocyte/prediction/lymphocyte/pred_by_external_model.py", "max_forks_repo_name": "ShahiraAbousamra/til_classification", "max_forks_repo_head_hexsha": "cede5453cb46b9c168a1f50f76ded43f8ca3fcbe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-16T00:45:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T17:28:39.000Z", "avg_line_length": 29.7564102564, "max_line_length": 119, "alphanum_fraction": 0.5794915984, "include": true, "reason": "import numpy", "num_tokens": 1318}
|
# Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cugraph
from cugraph.tests import utils
# Temporarily suppress warnings till networkX fixes deprecation warnings
# (Using or importing the ABCs from 'collections' instead of from
# 'collections.abc' is deprecated, and in 3.8 it will stop working) for
# python 3.7. Also, this import networkx needs to be relocated in the
# third-party group once this gets fixed.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import networkx as nx
print("Networkx version : {} ".format(nx.__version__))
SEEDS = [0, 5, 13]
RADIUS = [1, 2, 3]
@pytest.mark.parametrize("graph_file", utils.DATASETS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("radius", RADIUS)
def test_ego_graph_nx(graph_file, seed, radius):
gc.collect()
# Nx
df = utils.read_csv_for_nx(graph_file, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
df, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
ego_nx = nx.ego_graph(Gnx, seed, radius=radius)
# cugraph
ego_cugraph = cugraph.ego_graph(Gnx, seed, radius=radius)
assert nx.is_isomorphic(ego_nx, ego_cugraph)
@pytest.mark.parametrize("graph_file", utils.DATASETS)
@pytest.mark.parametrize("seeds", [[0, 5, 13]])
@pytest.mark.parametrize("radius", [1, 2, 3])
def test_batched_ego_graphs(graph_file, seeds, radius):
gc.collect()
# Nx
df = utils.read_csv_for_nx(graph_file, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
df, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
# cugraph
df, offsets = cugraph.batched_ego_graphs(Gnx, seeds, radius=radius)
for i in range(len(seeds)):
ego_nx = nx.ego_graph(Gnx, seeds[i], radius=radius)
ego_df = df[offsets[i]:offsets[i + 1]]
ego_cugraph = nx.from_pandas_edgelist(
ego_df, source="src", target="dst", edge_attr="weight"
)
assert nx.is_isomorphic(ego_nx, ego_cugraph)
|
{"hexsha": "b259c2567dcfbc7013f1d5eec7ca924adf753c7e", "size": 2628, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cugraph/tests/test_egonet.py", "max_stars_repo_name": "mike-wendt/cugraph", "max_stars_repo_head_hexsha": "1f0f14eba2e6253423b1a58ca38989261308df6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/cugraph/tests/test_egonet.py", "max_issues_repo_name": "mike-wendt/cugraph", "max_issues_repo_head_hexsha": "1f0f14eba2e6253423b1a58ca38989261308df6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/cugraph/tests/test_egonet.py", "max_forks_repo_name": "mike-wendt/cugraph", "max_forks_repo_head_hexsha": "1f0f14eba2e6253423b1a58ca38989261308df6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6923076923, "max_line_length": 79, "alphanum_fraction": 0.7176560122, "include": true, "reason": "import networkx", "num_tokens": 711}
|
import matplotlib.pyplot as plt
import numpy as np
from ROOT import TFile,TAxis,TH1,gROOT
import os
import numpy as np
import pickle
# from dataclasses import dataclass
# @dataclass
class PyHist:
""" Basic wrapper for ROOT histogram
Should contain no ROOT functionality, just a container for the information
"""
def __init__(self,Name,Bin_Values,Bin_Errors,Bin_Centres,Bin_Edges,**kwargs):
# Data
self.Name = Name
self.Bin_Values = Bin_Values
self.Bin_Errors = Bin_Errors
self.Bin_Centres = Bin_Centres
self.Bin_Edges = Bin_Edges
# Plot meta-data
self.colour = kwargs["colour"] if "colour" in kwargs else "blue"
self.linewidth = kwargs["linewdith"] if "linewdith" in kwargs else 1.5
self.legend_entry = kwargs["legend_entry"] if "legend_entry" in kwargs else ""
class Histogram_Wrapper:
"""
Larger wrapper which contains PyHists, including methods for ROOT->numpy conversion
"""
@staticmethod
def get_bin_values(hist):
return np.asarray([hist.GetBinContent(binn+1) for binn in range(0,hist.GetNbinsX())])
@staticmethod
def get_bin_errors(hist):
return np.asarray([hist.GetBinError(binn+1) for binn in range(0,hist.GetNbinsX())])
@staticmethod
def get_bin_edges(hist):
return np.asarray([hist.GetXaxis().GetBinUpEdge(binn) for binn in range(0,hist.GetXaxis().GetNbins()+1)])
@staticmethod
def get_bin_centres(hist):
return np.asarray([hist.GetXaxis().GetBinCenter(binn+1) for binn in range(0,hist.GetNbinsX())])
@staticmethod
def Compute_Normalised(ROOT_hist):
""" Takes a ROOT histogram and normalised it"""
h1dN = ROOT_hist.Clone(ROOT_hist.GetName()+"_norm")
h1dN.Scale(1/ROOT_hist.Integral())
return h1dN
@classmethod
def Create_Wrapper(self,hist,name,**kwargs):
Bin_Values = self.get_bin_values(hist)
Bin_Errors = self.get_bin_errors(hist)
Bin_Centres = self.get_bin_centres(hist)
Bin_Edges = self.get_bin_edges(hist)
return PyHist(name,Bin_Values,Bin_Errors,Bin_Centres,Bin_Edges,**kwargs)
def __init__(self,ROOT_hist,name,**kwargs):
'''
The Histogram_Wrapper object is for converting ROOT histograms into numpy objects,
but the data members of the class should contain no ROOT objects.
'''
# Meta-data
self.name = name
self.observable_type = kwargs["obs"] if "obs" in kwargs else None
self.file_name = kwargs["filename"] if "filename" in kwargs else None
# Plotting information
self.legend_entry = kwargs["legend_entry"] if "legend_entry" in kwargs else ""
self.colour = kwargs["colour"] if "colour" in kwargs else "black"
self.linewidth = kwargs["linewidth"] if "linewidth" in kwargs else 2
# ROOT histograms
self.UnNorm_ROOT_hist = ROOT_hist
# Unnormalised Hist wrapper
self.UnNorm_PyWrap_Hist = self.Create_Wrapper(ROOT_hist,self.name+"_Unnormalised",
colour=self.colour,legend_entry=self.legend_entry,linewidth=self.linewidth)
# Normalising histograms should depend on if histogram is empty or not
if ROOT_hist.Integral()!=0.0:
norm_hist = self.Compute_Normalised(ROOT_hist)
self.Norm_ROOT_hist = norm_hist
# Normalised Hist wrapper
self.Norm_PyWrap_Hist = self.Create_Wrapper(norm_hist,self.name+"_Normalised",
colour=self.colour,legend_entry=self.legend_entry,linewidth=self.linewidth)
|
{"hexsha": "c3ebdf3364e7f00596ee2e40a38828e565bbb24f", "size": 3813, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/heptools/histplot/PyHist_Class.py", "max_stars_repo_name": "ethansimpson285/HEPTools", "max_stars_repo_head_hexsha": "7ccb54d6264e693667d97966ab0cd6a4f815d120", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/heptools/histplot/PyHist_Class.py", "max_issues_repo_name": "ethansimpson285/HEPTools", "max_issues_repo_head_hexsha": "7ccb54d6264e693667d97966ab0cd6a4f815d120", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/heptools/histplot/PyHist_Class.py", "max_forks_repo_name": "ethansimpson285/HEPTools", "max_forks_repo_head_hexsha": "7ccb54d6264e693667d97966ab0cd6a4f815d120", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3055555556, "max_line_length": 113, "alphanum_fraction": 0.6422764228, "include": true, "reason": "import numpy", "num_tokens": 924}
|
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
import numpy as np
def run(self,Input):
t_shutdown = 10 # days
repl_cost = 4.48 # M$
risk_free_rate = 0.03
hard_savings = 0.
self.sm_npv_a = Input['sm_p_failure'] * t_shutdown + repl_cost + hard_savings
self.sm_npv_b = self.sm_npv_a / (1.+risk_free_rate)**2
self.sm_npv_c = self.sm_npv_a / (1.+risk_free_rate)**3
|
{"hexsha": "3ddb29dd25251f4f91fbceee35d7b8e971e888e3", "size": 399, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/MilestoneTestsSeptFY19/use_case_II/MC/seismic_modification.py", "max_stars_repo_name": "dgarrett622/LOGOS", "max_stars_repo_head_hexsha": "7234b8b5e80bc79526b4cbced7efd5ae482f7c44", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-05-04T08:42:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T13:14:12.000Z", "max_issues_repo_path": "tests/MilestoneTestsSeptFY19/use_case_II/MC/seismic_modification.py", "max_issues_repo_name": "albernsrya/LOGOS", "max_issues_repo_head_hexsha": "535a25ccd3a83259b615acd569257d751fe00439", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2021-01-12T17:41:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-03T18:20:16.000Z", "max_forks_repo_path": "tests/MilestoneTestsSeptFY19/use_case_II/MC/seismic_modification.py", "max_forks_repo_name": "albernsrya/LOGOS", "max_forks_repo_head_hexsha": "535a25ccd3a83259b615acd569257d751fe00439", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-02-05T17:18:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T14:36:42.000Z", "avg_line_length": 28.5, "max_line_length": 79, "alphanum_fraction": 0.7117794486, "include": true, "reason": "import numpy", "num_tokens": 135}
|
[STATEMENT]
lemma map_values_cong:
assumes "\<And>x y. Mapping.lookup t x = Some y \<Longrightarrow> f x y = f' x y"
shows "Mapping.map_values f t = Mapping.map_values f' t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Mapping.map_values f t = Mapping.map_values f' t
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Mapping.map_values f t = Mapping.map_values f' t
[PROOF STEP]
have "map_option (f x) (Mapping.lookup t x) = map_option (f' x) (Mapping.lookup t x)" for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_option (f x) (Mapping.lookup t x) = map_option (f' x) (Mapping.lookup t x)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
Mapping.lookup t ?x = Some ?y \<Longrightarrow> f ?x ?y = f' ?x ?y
goal (1 subgoal):
1. map_option (f x) (Mapping.lookup t x) = map_option (f' x) (Mapping.lookup t x)
[PROOF STEP]
by (cases "Mapping.lookup t x") auto
[PROOF STATE]
proof (state)
this:
map_option (f ?x) (Mapping.lookup t ?x) = map_option (f' ?x) (Mapping.lookup t ?x)
goal (1 subgoal):
1. Mapping.map_values f t = Mapping.map_values f' t
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
map_option (f ?x) (Mapping.lookup t ?x) = map_option (f' ?x) (Mapping.lookup t ?x)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
map_option (f ?x) (Mapping.lookup t ?x) = map_option (f' ?x) (Mapping.lookup t ?x)
goal (1 subgoal):
1. Mapping.map_values f t = Mapping.map_values f' t
[PROOF STEP]
by (auto simp: lookup_map_values intro!: mapping_eqI)
[PROOF STATE]
proof (state)
this:
Mapping.map_values f t = Mapping.map_values f' t
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 689, "file": "Eval_FO_Ailamazyan", "length": 8}
|
import pandas as pd
import numpy as np
import yfinance as yf
def book_to_market():
"""
Calculates the book to market ratio (shareholders equity/ market cap) for every company based on the latest
stock price and annual financial statement.
:return: DataFrame with ratio for each company.
"""
# load data
df_prices = pd.read_parquet('./data/stock_returns.parquet.gzip')
df_financials = pd.read_parquet("./data/financial_statements_annual.parquet.gzip")
# price data
# only keep latest date
df_prices = df_prices[-1:]
# transpose and rename column and index
df_prices = df_prices.T
df_prices.index.name = 'Stock'
df_prices.columns = ['stock_value']
# financial data
# only keep needed measures for the ratio
df_financials = df_financials[['year', 'cik', 'ticker', 'StockholdersEquity',
'WeightedAverageNumberOfSharesOutstandingBasic']]
# only keep companies with at least 2 annual statements
df_financials = df_financials.groupby('cik').filter(lambda x: len(x) > 2)
# for every company keep the latest values
df_financials = df_financials.sort_values('year', ascending=False).drop_duplicates('cik').sort_index()
# only keep companies that filled all the needed tags
df_financials = df_financials.dropna()
# only keep companies that handed in their annual report in the last 2 years
df_financials = df_financials[df_financials.loc[:, 'year'] >= df_financials['year'].max() - 1]
# merge financial and stock return data
df_financials = df_financials.merge(df_prices, left_on='ticker', right_index=True, how='left')
df_financials = df_financials.dropna()
# create book to market ratio
df_financials['book_to_market'] = df_financials['StockholdersEquity'] / \
(df_financials['WeightedAverageNumberOfSharesOutstandingBasic'] * df_financials[
'stock_value'])
df_financials.index = df_financials['ticker']
df_financials.index.name = 'Stock'
df_financials = df_financials[['book_to_market']]
return df_financials
def f_score():
"""
Creates the data for the F-Score strategy.
Steps:
1) Load financial data
2) Get Book to Market ratio
3) Only keep top 5 quantile of book to market companies
4) Calculate Scores + final score
5) Keep latest annual statement for each company
6) Only keep companies that have at least 5 measures
7) Create signal
:return: DataFrame indicating which stocks to long and short
"""
# load data
df_financials = pd.read_parquet("./data/financial_statements_annual.parquet.gzip")
# create book to market ratio
btm = book_to_market()
# keep top 5 quantile
btm['quantile_rank'] = pd.qcut(btm['book_to_market'], 5, labels=False)
btm = btm[btm.loc[:, 'quantile_rank'] == 4]
# keep companies in top 5 quantile in financial DataFrame
df_financials = df_financials.merge(btm, how='inner', left_on='ticker', right_index=True)
# Get assets beginning of the year and avg last 2 years
df_financials['assets_beginning'] = df_financials.groupby('cik', sort=False)['Assets'].apply(
lambda x: (x.shift())).to_numpy()
df_financials['assets_avg'] = df_financials.groupby('cik', sort=False)['Assets'].apply(
lambda x: ((x+x.shift())/2)).to_numpy()
# first year for every company --> keep assets of that year
df_financials['assets_avg'] = df_financials['assets_avg'].fillna(df_financials['Assets'])
# score 1 - RoA
df_financials['RoA'] = df_financials['OperatingIncomeLoss']/df_financials['assets_beginning']
df_financials['score_1'] = np.where(df_financials['RoA'] > 0, 1, 0)
# score 2 - CFO
df_financials['CFO'] = df_financials['NetCashProvidedByUsedInOperatingActivities']/df_financials['assets_beginning']
df_financials['score_2'] = np.where(df_financials['CFO'] > 0, 1, 0)
# score 3 - delta RoA
df_financials['delta_RoA'] = df_financials.groupby('cik', sort=False)['RoA'].apply(
lambda x: (x.shift())).to_numpy()
df_financials['score_3'] = np.where(df_financials['delta_RoA'] > 0, 1, 0)
# score 4 - Accruals
df_financials['accrual'] = df_financials['RoA']-df_financials['CFO']
df_financials['score_4'] = np.where(df_financials['accrual'] < 0, 1, 0)
# score 5 - delta Leverage
df_financials['noncurrent_liab'] = df_financials['Liabilities']-df_financials['LiabilitiesCurrent']
df_financials['noncurrent_liab'] = df_financials['noncurrent_liab'].fillna(df_financials['OtherLiabilitiesNoncurrent'])
df_financials['leverage'] = df_financials['noncurrent_liab']/df_financials['assets_avg']
df_financials['delta_leverage'] = df_financials.groupby('cik', sort=False)['leverage'].apply(
lambda x: (x-x.shift())).to_numpy()
df_financials['score_5'] = np.where(df_financials['delta_leverage'] < 0, 1, 0)
# score 6 - delta liquid
df_financials['current_ratio'] = df_financials['AssetsCurrent']/df_financials['LiabilitiesCurrent']
df_financials['delta_liquid'] = df_financials.groupby('cik', sort=False)['current_ratio'].apply(
lambda x: (x-x.shift())).to_numpy()
df_financials['score_6'] = np.where(df_financials['delta_liquid'] > 0, 1, 0)
# score 7 - Equity-offer
df_financials['delta_equity'] = df_financials.groupby('cik', sort=False)['WeightedAverageNumberOfSharesOutstandingBasic'].apply(
lambda x: (x - x.shift())).to_numpy()
df_financials['score_7'] = np.where(df_financials['delta_equity'] > 0, 0, 1)
# score 8 - delta margin
df_financials['Revenues'] = df_financials['Revenues'].fillna(df_financials['RevenueFromContractWithCustomerExcludingAssessedTax'])
df_financials['gross_profit'] = df_financials['Revenues']-df_financials['CostOfGoodsAndServicesSold']
df_financials['gross_profit'] = df_financials['gross_profit'].fillna(df_financials['Revenues']-df_financials['CostOfRevenue'])
df_financials['gross_margin'] = df_financials['gross_profit']/df_financials['Revenues']
df_financials['delta_gross_margin'] = df_financials.groupby('cik', sort=False)['gross_margin'].apply(
lambda x: (x-x.shift())).to_numpy()
df_financials['score_8'] = np.where(df_financials['delta_gross_margin'] > 0, 1, 0)
# score 9 - delta turn
df_financials['turnover_ratio'] = df_financials['Revenues'] / df_financials[
'assets_beginning']
df_financials['delta_turnover'] = df_financials.groupby('cik', sort=False)['turnover_ratio'].apply(
lambda x: (x - x.shift())).to_numpy()
df_financials['score_9'] = np.where(df_financials['delta_turnover'] > 0, 1, 0)
# for every company keep the latest values
df_financials = df_financials.sort_values('year', ascending=False).drop_duplicates('cik').sort_index()
# count number of missing values and remove big numbers
df_financials['missing_values'] = df_financials[['RoA', 'CFO', 'delta_RoA', 'accrual', 'delta_leverage',
'delta_liquid', 'delta_gross_margin', 'delta_turnover',
'delta_equity']].isnull().sum(axis=1)
df_financials = df_financials[df_financials.loc[:, 'missing_values'] < 5]
# final score
df_financials['score'] = df_financials['score_1']+df_financials['score_2']+df_financials['score_3']+\
df_financials['score_4']+df_financials['score_5']+df_financials['score_6']+\
df_financials['score_7']+df_financials['score_8']+df_financials['score_9']
# create signal
df_financials['Signal'] = np.where(df_financials['score'] >= 7, 'Long',
np.where(df_financials['score'] <= 2, 'Short', np.nan))
df_financials = df_financials[df_financials.loc[:, 'Signal'].isin(['Long', 'Short'])]
df_financials.index = df_financials['ticker']
df_financials.index.name = 'Stock'
df_financials = df_financials['Signal']
df_financials.to_excel('./data/f_score.xlsx')
return df_financials
def pead():
"""
Creates the data for the Post Earnings Announcement Drift strategy
Steps:
1) Load SEC annual financial data and only keep companies which report EPS
2) Only keep companies that have at least 3 annual statements
3) Calculate the expected returns by taking the mean of the last 4 years
4) For every company keep the latest annual statement and merge with expected value
5) Calculate unexpected earnings
6) Calculate standardized unexpected earnings
7) Create ranking and signal
:return: DataFrame indicating which stocks to long and short
"""
# load data
df = pd.read_parquet("./data/financial_statements_annual.parquet.gzip")
# set stock as index
df.index = df['ticker']
df.index.name = 'Stock'
# only keep needed columns
df = df[['cik', 'year', 'ticker', 'EarningsPerShareBasic']]
# get rid of companies without earnings per share
df = df.dropna(subset=['EarningsPerShareBasic'])
# only keep companies with at least 3 annual statements
df = df.groupby('cik').filter(lambda x: len(x) > 3)
# for every company get the previous 4 years mean and std
df['count'] = df.groupby('cik').cumcount(ascending=False)
last_4_df = df[(df.loc[:, 'count'] <= 4) & (df.loc[:, 'count'] > 0)]
last_4_df = last_4_df.groupby('ticker')['EarningsPerShareBasic'].agg(['mean', 'std'])
# for every company keep the latest values
df = df.sort_values('year', ascending=False).drop_duplicates('cik').sort_index()
# join average and DataFrame
df = df.merge(last_4_df, left_index=True, right_index=True, how='left')
# calculate unexpected earnings
df['unexpected_earnings'] = df['EarningsPerShareBasic']-df['mean']
# calculate standardized unexpected earnings
df['sue'] = df['unexpected_earnings']/df['std']
# create rank
df['decile_rank'] = pd.qcut(df['sue'], 10, labels=False)
# filter for winners and losers and rename
df = df[df.loc[:, 'decile_rank'].isin([0, 9])]
df['Signal'] = np.where(df['decile_rank'] == 0, 'Short', 'Long')
df = df[['Signal']]
df.index.name = 'Stock'
df.to_excel('./data/pead.xlsx')
return df
def momentum(lookback_period=12):
"""
Creates the data for the momentum strategy.
Steps:
1) load stock price data
2) create daily return
3) calculate monthly return
4) for strategy with lookback period = 12 only keep last 12 month
5) Remove latest month
6) calculate average return over last 12 month
7) Create rank and keep first and last decile
:param lookback_period: lookback period for momentum strategy
:return: DataFrame indicating which stocks to long and short
"""
# load data
df = pd.read_parquet('./data/stock_returns.parquet.gzip')
# drop columns with all #NA and last rows #NA --> not tradeable anymore
df = df.dropna(axis=1, how='all')
df = df.dropna(axis=1, subset=[df.index[-5]], how='all')
# daily return
df = df.pct_change()
# monthly return
df = df.resample('M').agg(lambda x: (x + 1).prod() - 1)
# keep last 12 month
df = df.tail(n=lookback_period)
# remove last month
df = df[:-1]
# get average
df = df.mean(axis=0).to_frame('avg_return')
# create rank
df['decile_rank'] = pd.qcut(df['avg_return'], 10, labels=False)
# filter for winners and losers and rename
df = df[df.loc[:, 'decile_rank'].isin([0, 9])]
df['Signal'] = np.where(df['decile_rank'] == 0, 'Short', 'Long')
df = df[['Signal']]
df.index.name = 'Stock'
df.to_excel('./data/momentum.xlsx')
return df
def g_score():
"""
Creates the data for the G-Score strategy
Steps:
1) Load annual SEC data
2) Load book to market ratios and only keep lowest quantile
3) Calculate scores
4) Create 2-digit-SIC code
5) Only keep industries with at least 4 companies in it
6) Calculate final score
7) Create signal
:return: DataFrame indicating which stocks to long and short
"""
# load data
df = pd.read_parquet("./data/financial_statements_annual.parquet.gzip")
# create book to market ratio
btm = book_to_market()
# keep last quantile
btm['quantile_rank'] = pd.qcut(btm['book_to_market'], 5, labels=False)
btm = btm[btm.loc[:, 'quantile_rank'] == 0]
# keep companies in lowest quantile in financial DataFrame
df = df.merge(btm, how='inner', left_on='ticker', right_index=True)
# create sic 2 digit code
df['sic'] = df['sic'].astype(int).astype(str)
df['sic'] = df['sic'].apply(lambda x: '{0:0>4}'.format(x))
df['sic_2_digits'] = df['sic'].str[:2]
# calculate avg assets last two years
df['assets_avg'] = df.groupby('cik', sort=False)['Assets'].apply(
lambda x: ((x+x.shift())/2)).to_numpy()
# first year for every company --> keep assets of that year
df['assets_avg'] = df['assets_avg'].fillna(df['Assets'])
# calculate avg assets last two years
df['assets_begin'] = df.groupby('cik', sort=False)['Assets'].apply(
lambda x: (x.shift())).to_numpy()
# calculate RoA
df['RoA'] = df['OperatingIncomeLoss']/df['assets_avg']
# calculate RoA std per company
df['RoA_var'] = df.groupby('cik')['RoA'].transform('std')
# calculate CFO
df['CFO'] = df['NetCashProvidedByUsedInOperatingActivities']/df['assets_avg']
# calculate sales (revenue) growth per company
df['Revenues'] = df['Revenues'].fillna(df['RevenueFromContractWithCustomerExcludingAssessedTax'])
df['Sales_growth'] = df.groupby('cik')['Revenues'].pct_change()
df['Sales_growth_var'] = df.groupby('cik')['Sales_growth'].transform('std')
# for every company keep the latest values
df = df.sort_values('year', ascending=False).drop_duplicates('cik').sort_index()
# only keep industries with at least 4 companies
df = df.groupby('sic_2_digits').filter(lambda x: len(x) > 4)
# score 1 - RoA
df['RoA_median_industry'] = df.groupby('sic_2_digits')['RoA'].transform('median')
df['score_1'] = np.where(df['RoA'] > df['RoA_median_industry'], 1, 0)
# score 2 - CFO
df['CFO_median_industry'] = df.groupby('sic_2_digits')['CFO'].transform('median')
df['score_2'] = np.where(df['CFO'] > df['CFO_median_industry'], 1, 0)
# score 3 - Accruals
df['accrual'] = df['RoA']-df['CFO']
df['score_3'] = np.where(df['CFO'] > df['RoA'], 1, 0)
# score 4 - Variance RoA
df['RoA_std_median_industry'] = df.groupby('sic_2_digits')['RoA_var'].transform('median')
df['score_4'] = np.where(df['RoA_var'] < df['RoA_std_median_industry'], 1, 0)
# score 5 - Variance Sales Growth
df['Sales_growth_var_industry'] = df.groupby('sic_2_digits')['Sales_growth_var'].transform('median')
df['score_5'] = np.where(df['Sales_growth_var'] < df['Sales_growth_var_industry'], 1, 0)
# score 6 - R&D intensity
df['RaD_intensity'] = df['ResearchAndDevelopmentExpense']/df['assets_begin']
df['RaD_median_industry'] = df.groupby('sic_2_digits')['RaD_intensity'].transform('median')
df['score_6'] = np.where(df['RaD_intensity'] > df['RaD_median_industry'], 1, 0)
# score 7 - capital expenditure intensity
df['capex'] = df['PaymentsToAcquirePropertyPlantAndEquipment']/df['assets_begin']
df['capex_median_industry'] = df.groupby('sic_2_digits')['capex'].transform('median')
df['score_7'] = np.where(df['capex'] > df['capex_median_industry'], 1, 0)
# score 8 - advertising expense intensity
df['ads'] = df['SellingGeneralAndAdministrativeExpense']/df['assets_begin']
df['ads_median_industry'] = df.groupby('sic_2_digits')['ads'].transform('median')
df['score_8'] = np.where(df['ads'] > df['ads_median_industry'], 1, 0)
# count number of missing values and remove big numbers
df['missing_values'] = df[['RoA', 'CFO', 'accrual', 'RoA_var', 'Sales_growth_var',
'RaD_intensity', 'capex', 'ads']].isnull().sum(axis=1)
df = df[df.loc[:, 'missing_values'] < 4]
# final score
df['score'] = df['score_1']+df['score_2']+df['score_3']+df['score_4']+df['score_5']+df['score_6']+\
df['score_7']+df['score_8']
# create signal
df['Signal'] = np.where(df['score'] >= 6, 'Long', np.where(df['score'] <= 2, 'Short', np.nan))
df = df[df.loc[:, 'Signal'].isin(['Long', 'Short'])]
df.index = df['ticker']
df.index.name = 'Stock'
df = df['Signal']
df.to_excel('./data/g_score.xlsx')
return df
def accrual_anatomy():
"""
Creates the data for the accrual anatomy strategy
Steps:
1) load annual financial statement data
2) Create delta columns
3) Create average assets column
4) For every company keep the latest statement
5) Only keep companies with latest statements in the last 2 years
6) Only keep companies that filled in all the needed tags
7) Calculate accruals
8) Calculate income rate, cash rate, accrual rate
9) Create Signal based on cash component
:return: DataFrame indicating which stocks to long and short
"""
# load data
df = pd.read_parquet("./data/financial_statements_annual.parquet.gzip")
# create delta columns
df = df.sort_values(['year', 'cik']).reset_index(drop=True)
df['Delta_Assets'] = df.groupby('cik', sort=False)['AssetsCurrent'].apply(
lambda x: x - x.shift()).to_numpy()
df['Delta_Cash'] = df.groupby('cik', sort=False)['CashAndCashEquivalentsAtCarryingValue'].apply(
lambda x: x - x.shift()).to_numpy()
df['Delta_Liab'] = df.groupby('cik', sort=False)['LiabilitiesCurrent'].apply(
lambda x: x - x.shift()).to_numpy()
df['Delta_Taxes'] = df.groupby('cik', sort=False)['IncomeTaxesPaid'].apply(
lambda x: x - x.shift()).to_numpy()
# Create assets AVG column
df['AVG_Assets'] = df.groupby('cik', sort=False)['Assets'].apply(
lambda x: (x + x.shift()) / 2).to_numpy()
# keep needed columns
df = df[['year', 'cik', 'name', 'ticker', 'Delta_Assets', 'Delta_Cash', 'Delta_Liab', 'Delta_Taxes',
'DepreciationDepletionAndAmortization', 'AVG_Assets', 'OperatingIncomeLoss']]
# for every company keep the latest values
df = df.sort_values('year', ascending=False).drop_duplicates('cik').sort_index()
# only keep companies that filled all the needed tags
df = df.dropna()
# only keep companies that handed in their annual report in the last 2 years
df = df[df.loc[:, 'year'] >= df['year'].max() - 1]
# calculate accrual
df['Accrual'] = df['Delta_Assets'] - df['Delta_Cash'] - (df['Delta_Liab'] - df['Delta_Taxes']) - \
df['DepreciationDepletionAndAmortization']
df['Income_Rate'] = df['OperatingIncomeLoss'] / df['AVG_Assets']
df['Accrual_Component'] = df['Accrual'] / df['AVG_Assets']
df['Cash_Component'] = df['Income_Rate'] - df['Accrual_Component']
# create rank
df['decile_rank'] = pd.qcut(df['Cash_Component'], 10, labels=False)
# filter for winners and losers and rename
df = df[df.loc[:, 'decile_rank'].isin([0, 9])]
df['Signal'] = np.where(df['decile_rank'] == 0, 'Short', 'Long')
df.index = df['ticker']
df = df[['Signal']]
df.index.name = 'Stock'
df.to_excel('./data/accruals.xlsx')
return df
def betting_against_beta(start_date):
"""
Creates the data for the betting against beta strategy.
Steps:
1) Load the Wilshere 5000 data as market index and calculate return
2) Load stock data and calculate return
3) Calculate beta by dividing covariance from stock and market by variance from market
4) Create long and short signals: long --> stock over median, short --> stock under median
:param start_date: Date to pull Wilshere 5000 data from
:return: DataFrame indicating which stocks to long and short
"""
# market data
tick = yf.Ticker('^W5000')
wilshere5000 = tick.history(start=start_date)
wilshere5000 = wilshere5000.pct_change()
var_market = wilshere5000['Close'].var()
# load data
df = pd.read_parquet('./data/stock_returns.parquet.gzip')
# drop columns with all #NA and last rows #NA --> not tradeable anymore
df = df.dropna(axis=1, how='all')
df = df.dropna(axis=1, subset=[df.index[-5]], how='all')
# daily return
df = df.pct_change()
# calculate beta for each stock
beta = pd.Series({symbol: wilshere5000['Close'].cov(df[symbol]) for symbol in df}) / var_market
beta = beta.to_frame('beta')
# create signal
median = beta['beta'].median()
beta['Signal'] = np.where(beta['beta'] >= median, 'Short', 'Long')
beta = beta[['Signal']]
beta.index.name = 'Stock'
beta.to_excel('./data/beta.xlsx')
return beta
def equity_pairs():
"""
Creates the data for the equity pairs strategy.
Steps:
1) load stock price data
2) create daily return
3) calculate monthly return
4) Calculate the correlation between the stocks and keep top 50 for every stock
5) Calculate expected return by taking average return of 50 stocks with highest correlation
6) Take the difference between actual and expected return for every stock
7) Create decile and short biggest positive difference and long biggest negative difference
:return: DataFrame indicating which stocks to long and short
"""
# load data
df = pd.read_parquet('./data/stock_returns.parquet.gzip')
# drop columns with all #NA and last rows #NA --> not tradeable anymore
df = df.dropna(axis=1, how='all')
df = df.dropna(axis=1, subset=[df.index[-5]], how='all')
# daily return
df = df.pct_change()
# monthly return
df = df.resample('M').agg(lambda x: (x + 1).prod() - 1)
# calculate correlation
corr = df.corr()
corr = corr.unstack().reset_index()
corr.columns = ['stock1', 'stock2', 'correlation']
# delete correlation from stock with itself
corr = corr[corr['stock1'] != corr['stock2']]
# only keep top 50
corr['rank'] = corr.groupby(['stock1'])['correlation'].rank(ascending=False)
corr = corr[corr.loc[:, 'rank'] <= 50]
# drop last month
df = df[:-1]
# keep last full month
last_month = df.tail(n=1).T
last_month.columns = ['exp_return']
last_month.index.name = 'stock'
# merge correlation with last month and calculate average return
corr = corr.merge(last_month, left_on='stock2', right_index=True, how='left')
corr = corr.groupby(['stock1'])['exp_return'].mean().to_frame()
# merge actual return last month and calculate difference
last_month.columns = ['actual_return']
corr = corr.merge(last_month, left_on='stock1', right_index=True, how='left')
corr['difference'] = corr['actual_return'] - corr['exp_return']
corr['decile_rank'] = pd.qcut(corr['difference'], 10, labels=False)
# filter for winners and short and long
corr = corr[corr.loc[:, 'decile_rank'].isin([0, 9])]
corr['Signal'] = np.where(corr['decile_rank'] == 0, 'Long', 'Short')
corr = corr[['Signal']]
corr.index.name = 'Stock'
corr.to_excel('./data/equity_pairs.xlsx')
return corr
f_score()
pead()
momentum()
g_score()
accrual_anatomy()
betting_against_beta('2015-01-01')
equity_pairs()
|
{"hexsha": "d53ef58aa587d920dcd7a992b24a728612323f42", "size": 24014, "ext": "py", "lang": "Python", "max_stars_repo_path": "strategies.py", "max_stars_repo_name": "TimBstn/Fundamental-Trading-Strategies", "max_stars_repo_head_hexsha": "b620c129a8826e3cf99ecf19eb2a32d63058e352", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "strategies.py", "max_issues_repo_name": "TimBstn/Fundamental-Trading-Strategies", "max_issues_repo_head_hexsha": "b620c129a8826e3cf99ecf19eb2a32d63058e352", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "strategies.py", "max_forks_repo_name": "TimBstn/Fundamental-Trading-Strategies", "max_forks_repo_head_hexsha": "b620c129a8826e3cf99ecf19eb2a32d63058e352", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7634782609, "max_line_length": 135, "alphanum_fraction": 0.647913717, "include": true, "reason": "import numpy", "num_tokens": 6588}
|
"""
Use Gaussian distributions to randomly generate two sets. Then use bhatta_dist() on the sets. Compare the results to the
theoretical Bhattacharyya distance for the distributions.
The Bhattacharyya distance between two Gaussian distributions is given on this page:
https://en.wikipedia.org/wiki/Bhattacharyya_distance
Created on 4/17/2018
Author: Eric Williamson (ericpaulwill@gmail.com)
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
from bhatta_dist import bhatta_dist
###We will generate our distributions from these parameters:
sigma1 = 0.5 #standard deviation
mu1 = 2.0 #mean
n1 = 5000 #population size
sigma2 = 1.5
mu2 = 3.1
n2 = 10000
###Calculate theoretical Bhattacharyya distance:
var1 = sigma1**2
var2 = sigma2**2
bdist_theory = np.log( (var1/var2 + var2/var1 + 2)/4 ) / 4 + (mu1-mu2)**2 / (var1+var2) / 4
###Generate random data that follows our normal distributions:
X1 = np.random.normal(mu1,sigma1,n1)
X2 = np.random.normal(mu2,sigma2,n2)
###Plot distributions of X1 and X2 to verify that we generated the data we want:
def get_density(x,cov_factor=0.1):
#Produces a continuous density function for the data in 'x'. Some benefit may be gained from adjusting the cov_factor.
#Note that cov_factor is not related to scaling or bias.
density = gaussian_kde(x)
density.covariance_factor = lambda:cov_factor
density._compute_covariance()
return density
N_STEPS = 200
d1 = get_density(X1)
d2 = get_density(X2)
xs = np.linspace(0,10,N_STEPS)
fig, ax = plt.subplots()
p1 = np.exp(-((xs-mu1)**2) / (2*var1)) / np.sqrt(2*np.pi*var1)
p2 = np.exp(-((xs-mu2)**2) / (2*var2)) / np.sqrt(2*np.pi*var2)
ax.plot(xs,d1(xs))
ax.plot(xs,d2(xs))
ax.plot(xs,p1)
ax.plot(xs,p2)
ax.legend(['X1 density','X2 density','theoretical 1','theoretical 2'])
plt.show()
###Use bhatta_dist() function to calculate Bhattacharyya distance:
#Note that the 'noiseless' method is not tested here, since the feature we've generated is quantitative.
bdist_cont = bhatta_dist(X1,X2,method='continuous')
bdist_hist = bhatta_dist(X1,X2,method='hist')
bdist_ahist = bhatta_dist(X1,X2,method='autohist')
#Show results:
print("Theoretical: {:.3f}".format(bdist_theory))
print("Test 'continuous': {:.3f} Error: {:.6f}".format(bdist_cont, bdist_cont-bdist_theory))
print("Test 'hist': {:.3f} Error: {:.6f}".format(bdist_hist, bdist_hist-bdist_theory))
print("Test 'autohist': {:.3f} Error: {:.6f}".format(bdist_ahist, bdist_ahist-bdist_theory))
###Test 'noiseless' method by binning the values first (makes no sense for a real dataset, just for testing):
cX = np.concatenate((X1,X2))
N_BINS = 20
h1 = np.histogram(X1,bins=N_BINS,range=(min(cX),max(cX)), density=False)[0]
h2 = np.histogram(X2,bins=N_BINS,range=(min(cX),max(cX)), density=False)[0]
fakeX1 = []
fakeX2 = []
for i in range(N_BINS):
fakeX1 += [i] * h1[i]
fakeX2 += [i] * h2[i]
bdist_nl = bhatta_dist(fakeX1,fakeX2,method='noiseless')
print("Test 'noiseless': {:.3f} Error: {:.6f}".format(bdist_nl, bdist_nl-bdist_theory))
|
{"hexsha": "2fdfebe18856dd94f431455f647129433f854060", "size": 3153, "ext": "py", "lang": "Python", "max_stars_repo_path": "verification/bhatta_test.py", "max_stars_repo_name": "EricPWilliamson/bhattacharyya-distance", "max_stars_repo_head_hexsha": "d67498d58bed342151c9d820a520254a503abdc8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2018-04-14T14:27:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T02:27:07.000Z", "max_issues_repo_path": "verification/bhatta_test.py", "max_issues_repo_name": "EricPWilliamson/bhattacharyya-distance", "max_issues_repo_head_hexsha": "d67498d58bed342151c9d820a520254a503abdc8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-02T05:25:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-02T05:25:06.000Z", "max_forks_repo_path": "verification/bhatta_test.py", "max_forks_repo_name": "EricPWilliamson/bhattacharyya-distance", "max_forks_repo_head_hexsha": "d67498d58bed342151c9d820a520254a503abdc8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-02-02T07:51:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-20T07:55:39.000Z", "avg_line_length": 39.4125, "max_line_length": 123, "alphanum_fraction": 0.7002854424, "include": true, "reason": "import numpy,from scipy", "num_tokens": 963}
|
import torch.utils.data as data
from torchvision import datasets, models, transforms
IN_SIZE = 224
import pickle
from PIL import Image
import matplotlib.pyplot as plt
import os
import os.path
import sys
import numpy as np
import torch
project_root = os.getcwd()
data_root = "%s/data"%project_root
def get_image_attributes(imfile):
parts = imfile.split('.')[0].split('-')
category = parts[0]
obj_id = parts[1]
obj_instance = "%s_%s"%(category,obj_id)
background = parts[2]
elev = parts[3]
azimuth = parts[4]
light = parts[5]
focus = parts[6]
return category, obj_instance, background, elev, azimuth, light, focus
def make_dataset(list_file):
images = []
labels = []
with open(list_file,'r') as F:
lines = F.readlines()
for line in lines:
image = line.rstrip()
images.append("%s/%s"%(data_root,image))
# images.append(image)
label = image
labels.append(label)
return images, labels
class FileListFolder(data.Dataset):
def __init__(self, file_list, attributes_dict, transform):
samples,targets = make_dataset(file_list)
if len(samples) == 0:
raise(RuntimeError("Found 0 samples"))
self.root = file_list
self.samples = samples
self.targets = targets
self.transform = transform
with open(attributes_dict, 'rb') as F:
attributes = pickle.load(F)
self.attributes = attributes
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
#instance_list = ['car_i0066', 'car_i0070', 'car_i0072', 'car_i0063', 'car_i0079', 'car_i0082', ]
cats_list = ['bus','car','plane','heli','tank','monster']
azimuths = ['r01', 'r02', 'r03', 'r04', 'r06', 'r07']
impath = self.samples[index]
label_path = impath
# impath = impath.replace('om2','om5')
sample = Image.open(impath)
imname = impath.split('/')[-1]
category, obj_instance, background, elev, azimuth, light, focus = get_image_attributes(imname)
cat_num = cats_list.index(category)
# instance_num = instance_list.index(obj_instance)
azimuth_num = azimuths.index(azimuth)
sample_label = [0, azimuth_num, 0, cat_num]
floated_labels = []
for s in sample_label:
floated_labels.append(float(s))
if self.transform is not None:
transformed_sample = self.transform(sample)
transformed_labels = torch.LongTensor(floated_labels)
return transformed_sample, transformed_labels, impath
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
# tmp = ' Target Transforms (if any): '
# fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
|
{"hexsha": "ed0a48e2686c7720cd6b7206fc3a55a7eeb6762a", "size": 3470, "ext": "py", "lang": "Python", "max_stars_repo_path": "res/loader/multi_attribute_loader_file_list_ilab.py", "max_stars_repo_name": "GregoryEHunter/generalization_to_OOD_category_viewpoint_combinations", "max_stars_repo_head_hexsha": "52aacbb3420639cae64ce65085c17b245e5ef865", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2022-02-21T17:15:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T16:06:40.000Z", "max_issues_repo_path": "res/loader/multi_attribute_loader_file_list_ilab.py", "max_issues_repo_name": "GregoryEHunter/generalization_to_OOD_category_viewpoint_combinations", "max_issues_repo_head_hexsha": "52aacbb3420639cae64ce65085c17b245e5ef865", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "res/loader/multi_attribute_loader_file_list_ilab.py", "max_forks_repo_name": "GregoryEHunter/generalization_to_OOD_category_viewpoint_combinations", "max_forks_repo_head_hexsha": "52aacbb3420639cae64ce65085c17b245e5ef865", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-29T16:23:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T16:23:46.000Z", "avg_line_length": 28.9166666667, "max_line_length": 112, "alphanum_fraction": 0.590778098, "include": true, "reason": "import numpy", "num_tokens": 836}
|
##
# @file Placer.py
# @author Yibo Lin (DREAMPlace), Rachel Selina Rajarathnam (DREAMPlaceFPGA)
# @date Sep 2020
# @brief Main file to run the entire placement flow.
#
import matplotlib
matplotlib.use('Agg')
import os
import sys
import time
import numpy as np
import logging
# for consistency between python2 and python3
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if root_dir not in sys.path:
sys.path.append(root_dir)
import dreamplacefpga.configure as configure
from Params import *
from PlaceDB import *
from NonLinearPlace import *
import pdb
def placeFPGA(params):
"""
@brief Top API to run the entire placement flow.
@param params parameters
"""
assert (not params.gpu) or configure.compile_configurations["CUDA_FOUND"] == 'TRUE', \
"CANNOT enable GPU without CUDA compiled"
np.random.seed(params.random_seed)
# Read Database
tt = time.time()
placedb = PlaceDBFPGA()
placedb(params) #Call function
#logging.info("Reading database takes %.2f seconds" % (time.time()-tt))
# Random Initial Placement
tt = time.time()
placer = NonLinearPlaceFPGA(params, placedb)
#logging.info("non-linear placement initialization takes %.2f seconds" % (time.time()-tt))
metrics = placer(params, placedb)
logging.info("Global Placement completed in %.2f seconds" % (time.time()-tt))
# write placement solution
path = "%s/%s" % (params.result_dir, params.design_name())
if not os.path.exists(path):
os.system("mkdir -p %s" % (path))
gp_out_file = os.path.join(path, "%s.gp.pl" % (params.design_name()))
placedb.write(params, gp_out_file)
# legalization and detailed placement using UTPlaceF
if params.legalize_and_detailed_place_flag and os.path.exists("thirdparty/elfPlace_LG_DP"):
#elfPlace binary picks file named gp.pl in the current directory
cp_cmd = "cp %s gp.pl" %(gp_out_file)
os.system(cp_cmd)
out_file = os.path.join(path, "%s_final.%s" % (params.design_name(), params.solution_file_suffix()))
cmd = "./thirdparty/elfPlace_LG_DP --aux %s --numThreads %s --pl %s" % (params.aux_input, params.num_threads, out_file)
logging.info("Legalization and Detailed Placement run using elfPlace (CPU): %s" % (cmd))
tt = time.time()
os.system(cmd)
logging.info("Legalization and detailed placement completed in %.3f seconds" % (time.time()-tt))
elif params.legalize_and_detailed_place_flag == 0:
logging.info("Legalization & Detailed Placement not run")
else:
logging.warning("External legalization & detailed placement engine NOT found at thirdparty/elfPlace_LG_DP")
def place(params):
"""
@brief Top API to run the entire placement flow.
@param params parameters
"""
assert (not params.gpu) or configure.compile_configurations["CUDA_FOUND"] == 'TRUE', \
"CANNOT enable GPU without CUDA compiled"
np.random.seed(params.random_seed)
# read database
tt = time.time()
placedb = PlaceDB.PlaceDB()
placedb(params)
#logging.info("reading database takes %.2f seconds" % (time.time()-tt))
# solve placement
tt = time.time()
placer = NonLinearPlace.NonLinearPlace(params, placedb)
#logging.info("non-linear placement initialization takes %.2f seconds" % (time.time()-tt))
metrics = placer(params, placedb)
logging.info("non-linear placement takes %.2f seconds" % (time.time()-tt))
# write placement solution
path = "%s/%s" % (params.result_dir, params.design_name())
if not os.path.exists(path):
os.system("mkdir -p %s" % (path))
gp_out_file = os.path.join(path, "%s.gp.%s" % (params.design_name(), params.solution_file_suffix()))
placedb.write(params, gp_out_file)
# call external detailed placement
# TODO: support more external placers, currently only support
# 1. NTUplace3/NTUplace4h with Bookshelf format
# 2. NTUplace_4dr with LEF/DEF format
# if params.detailed_place_engine and os.path.exists(params.detailed_place_engine):
# logging.info("Use external detailed placement engine %s" % (params.detailed_place_engine))
# if params.solution_file_suffix() == "pl" and any(dp_engine in params.detailed_place_engine for dp_engine in ['ntuplace3', 'ntuplace4h']):
# dp_out_file = gp_out_file.replace(".gp.pl", "")
# # add target density constraint if provided
# target_density_cmd = ""
# if params.target_density < 1.0 and not params.routability_opt_flag:
# target_density_cmd = " -util %f" % (params.target_density)
# cmd = "%s -aux %s -loadpl %s %s -out %s -noglobal %s" % (params.detailed_place_engine, params.aux_input, gp_out_file, target_density_cmd, dp_out_file, params.detailed_place_command)
# logging.info("%s" % (cmd))
# tt = time.time()
# os.system(cmd)
# logging.info("External detailed placement takes %.2f seconds" % (time.time()-tt))
#
# if params.plot_flag:
# # read solution and evaluate
# placedb.read_pl(params, dp_out_file+".ntup.pl")
# iteration = len(metrics)
# pos = placer.init_pos
# pos[0:placedb.num_physical_nodes] = placedb.node_x
# pos[placedb.num_nodes:placedb.num_nodes+placedb.num_physical_nodes] = placedb.node_y
# hpwl, density_overflow, max_density = placer.validate(placedb, pos, iteration)
# logging.info("iteration %4d, HPWL %.3E, overflow %.3E, max density %.3E" % (iteration, hpwl, density_overflow, max_density))
# placer.plot(params, placedb, iteration, pos)
# elif 'ntuplace_4dr' in params.detailed_place_engine:
# dp_out_file = gp_out_file.replace(".gp.def", "")
# cmd = "%s" % (params.detailed_place_engine)
# for lef in params.lef_input:
# if "tech.lef" in lef:
# cmd += " -tech_lef %s" % (lef)
# else:
# cmd += " -cell_lef %s" % (lef)
# cmd += " -floorplan_def %s" % (gp_out_file)
# cmd += " -verilog %s" % (params.verilog_input)
# cmd += " -out ntuplace_4dr_out"
# cmd += " -placement_constraints %s/placement.constraints" % (os.path.dirname(params.verilog_input))
# cmd += " -noglobal %s ; " % (params.detailed_place_command)
# cmd += "mv ntuplace_4dr_out.fence.plt %s.fense.plt ; " % (dp_out_file)
# cmd += "mv ntuplace_4dr_out.init.plt %s.init.plt ; " % (dp_out_file)
# cmd += "mv ntuplace_4dr_out %s.ntup.def ; " % (dp_out_file)
# cmd += "mv ntuplace_4dr_out.ntup.overflow.plt %s.ntup.overflow.plt ; " % (dp_out_file)
# cmd += "mv ntuplace_4dr_out.ntup.plt %s.ntup.plt ; " % (dp_out_file)
# if os.path.exists("%s/dat" % (os.path.dirname(dp_out_file))):
# cmd += "rm -r %s/dat ; " % (os.path.dirname(dp_out_file))
# cmd += "mv dat %s/ ; " % (os.path.dirname(dp_out_file))
# logging.info("%s" % (cmd))
# tt = time.time()
# os.system(cmd)
# logging.info("External detailed placement takes %.2f seconds" % (time.time()-tt))
# else:
# logging.warning("External detailed placement only supports NTUplace3/NTUplace4dr API")
# elif params.detailed_place_engine:
# logging.warning("External detailed placement engine %s or aux file NOT found" % (params.detailed_place_engine))
return metrics
if __name__ == "__main__":
"""
@brief main function to invoke the entire placement flow.
"""
logging.root.name = 'DREAMPlaceFPGA'
logging.basicConfig(level=logging.INFO, format='[%(levelname)-7s] %(name)s - %(message)s', stream=sys.stdout)
if len(sys.argv) < 2:
logging.error("Input parameters required in json format")
paramsArray = []
for i in range(1, len(sys.argv)):
params = ParamsFPGA()
params.load(sys.argv[i])
paramsArray.append(params)
logging.info("Parameters[%d] = %s" % (len(paramsArray), paramsArray))
#Settings to minimze non-determinism
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
torch.manual_seed(params.random_seed)
np.random.seed(params.random_seed)
#random.seed(params.random_seed)
if params.gpu:
torch.cuda.manual_seed_all(params.random_seed)
torch.cuda.manual_seed(params.random_seed)
tt = time.time()
for params in paramsArray:
placeFPGA(params)
logging.info("Completed Placement in %.3f seconds" % (time.time()-tt))
|
{"hexsha": "cc17d54c09603aa8a280b2c8ce1048f9611a9171", "size": 8787, "ext": "py", "lang": "Python", "max_stars_repo_path": "dreamplacefpga/Placer.py", "max_stars_repo_name": "rachelselinar/DREAMPlaceFPGA", "max_stars_repo_head_hexsha": "b8dd961718144a7c2471dd670379c3d1923171f9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2021-11-05T13:20:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T17:16:08.000Z", "max_issues_repo_path": "dreamplacefpga/Placer.py", "max_issues_repo_name": "rachelselinar/DREAMPlaceFPGA", "max_issues_repo_head_hexsha": "b8dd961718144a7c2471dd670379c3d1923171f9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-25T07:35:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T03:08:45.000Z", "max_forks_repo_path": "dreamplacefpga/Placer.py", "max_forks_repo_name": "rachelselinar/DREAMPlaceFPGA", "max_forks_repo_head_hexsha": "b8dd961718144a7c2471dd670379c3d1923171f9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-11-16T14:33:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T02:21:51.000Z", "avg_line_length": 45.765625, "max_line_length": 194, "alphanum_fraction": 0.6442471833, "include": true, "reason": "import numpy", "num_tokens": 2219}
|
import numpy as np
import pandas as pd
import importlib
from qlib.data.ops import ElemOperator, PairOperator
from qlib.config import C
from qlib.data.cache import H
from qlib.data.data import Cal
from qlib.contrib.ops.high_freq import get_calendar_day
class DayLast(ElemOperator):
"""DayLast Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value equals the last value of its day
"""
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = get_calendar_day(freq=freq)
series = self.feature.load(instrument, start_index, end_index, freq)
return series.groupby(_calendar[series.index]).transform("last")
class FFillNan(ElemOperator):
"""FFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a forward fill nan feature
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.fillna(method="ffill")
class BFillNan(ElemOperator):
"""BFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a backfoward fill nan feature
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.fillna(method="bfill")
class Date(ElemOperator):
"""Date Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value is the date corresponding to feature.index
"""
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = get_calendar_day(freq=freq)
series = self.feature.load(instrument, start_index, end_index, freq)
return pd.Series(_calendar[series.index], index=series.index)
class Select(PairOperator):
"""Select Operator
Parameters
----------
feature_left : Expression
feature instance, select condition
feature_right : Expression
feature instance, select value
Returns
----------
feature:
value(feature_right) that meets the condition(feature_left)
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series_condition = self.feature_left.load(instrument, start_index, end_index, freq)
series_feature = self.feature_right.load(instrument, start_index, end_index, freq)
return series_feature.loc[series_condition]
class IsNull(ElemOperator):
"""IsNull Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
A series indicating whether the feature is nan
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.isnull()
class Cut(ElemOperator):
"""Cut Operator
Parameters
----------
feature : Expression
feature instance
l : int
l > 0, delete the first l elements of feature (default is None, which means 0)
r : int
r < 0, delete the last -r elements of feature (default is None, which means 0)
Returns
----------
feature:
A series with the first l and last -r elements deleted from the feature.
Note: It is deleted from the raw data, not the sliced data
"""
def __init__(self, feature, l=None, r=None):
self.l = l
self.r = r
if (self.l is not None and self.l <= 0) or (self.r is not None and self.r >= 0):
raise ValueError("Cut operator l shoud > 0 and r should < 0")
super(Cut, self).__init__(feature)
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.iloc[self.l : self.r]
def get_extended_window_size(self):
ll = 0 if self.l is None else self.l
rr = 0 if self.r is None else abs(self.r)
lft_etd, rght_etd = self.feature.get_extended_window_size()
lft_etd = lft_etd + ll
rght_etd = rght_etd + rr
return lft_etd, rght_etd
|
{"hexsha": "175f4f66be772e35d95ab0e9d4eaec76b66c732c", "size": 4524, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/highfreq/highfreq_ops.py", "max_stars_repo_name": "wan9c9/qlib", "max_stars_repo_head_hexsha": "cc95099d7696ca850205b8ca220a99fba35a637a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8637, "max_stars_repo_stars_event_min_datetime": "2020-09-21T05:07:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:02:54.000Z", "max_issues_repo_path": "examples/highfreq/highfreq_ops.py", "max_issues_repo_name": "Sainpse/qlib", "max_issues_repo_head_hexsha": "84103c7d43eaa0ff74118a4d05884f659f0548eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 711, "max_issues_repo_issues_event_min_datetime": "2020-09-21T03:32:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:18:42.000Z", "max_forks_repo_path": "examples/highfreq/highfreq_ops.py", "max_forks_repo_name": "Sainpse/qlib", "max_forks_repo_head_hexsha": "84103c7d43eaa0ff74118a4d05884f659f0548eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1569, "max_forks_repo_forks_event_min_datetime": "2020-09-21T10:21:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T01:14:12.000Z", "avg_line_length": 26.9285714286, "max_line_length": 91, "alphanum_fraction": 0.6443412909, "include": true, "reason": "import numpy", "num_tokens": 1030}
|
""" Tonti Diagrams
This file includes a framework for constructing and evaluating Tonti diagrams.
Tonti diagrams are stored as ACSets, and have an imperative interface for
describing physical variables and the relationships between them. This tooling
also lets a Tonti diagram be converted to a vectorfield, allowing for
simulation of physical systems through any traditional vectorfield solver.
"""
module TontiDiagrams
using Catlab.Present
using Catlab.Theories
using Catlab.CategoricalAlgebra
using Catlab.CategoricalAlgebra.FinSets
using Catlab.Graphs
import Catlab.Graphs: Graph
using Catlab.Graphics
using Catlab.Programs
using Catlab.WiringDiagrams
using CombinatorialSpaces
using CombinatorialSpaces: ⋆
export TheoryTontiDiagram, TontiDiagram, Space,
add_variable!, add_variables!,
add_derivative!, add_derivatives!,
add_time_dep!,
add_laplacian!, add_transition!, add_bc!,
vectorfield, Open, gen_form
""" ACSet definition for a Tonti diagram.
This diagram is visualized in the q.uiver framework
[here](https://q.uiver.app/?q=WzAsMTAsWzEsMiwiTyJdLFsyLDIsIkkiXSxbMSwxLCJWIl0sWzEsMywiVCJdLFswLDIsIkJDIl0sWzAsMCwiVEQiXSxbMCwzLCJGdW5jIl0sWzMsMCwiQ29tcCJdLFszLDEsIkRpbWVuc2lvbiJdLFszLDIsIkxhYmVsIl0sWzQsMiwiYmN2Il0sWzUsMiwiaW50ZWciLDAseyJvZmZzZXQiOi0xfV0sWzAsMiwib3YiLDFdLFswLDMsIm90IiwxXSxbMSwyLCJpdiIsMV0sWzEsMywiaXQiLDFdLFs1LDIsImRlcml2IiwyLHsib2Zmc2V0IjoxfV0sWzQsNiwiYmNmdW5jIiwyLHsic3R5bGUiOnsiYm9keSI6eyJuYW1lIjoiZGFzaGVkIn19fV0sWzMsNiwidGZ1bmMiLDAseyJzdHlsZSI6eyJib2R5Ijp7Im5hbWUiOiJkYXNoZWQifX19XSxbMiw5LCJzeW1ib2wiLDEseyJzdHlsZSI6eyJib2R5Ijp7Im5hbWUiOiJkYXNoZWQifX19XSxbMiw4LCJkaW1lbnNpb24iLDEseyJzdHlsZSI6eyJib2R5Ijp7Im5hbWUiOiJkYXNoZWQifX19XSxbMiw3LCJjb21wbGV4IiwxLHsic3R5bGUiOnsiYm9keSI6eyJuYW1lIjoiZGFzaGVkIn19fV1d).
This Tonti diagram definition is currently very close to the simulation level
and generalized away from the DEC tooling. Future work will be done to provide
a closer integration between Tonti diagrams and DEC.
See Catlab.jl documentation for a description of the @present syntax.
"""
@present TheoryTontiDiagram(FreeSchema) begin
Func::Data
Comp::Data
Dimension::Data
Label::Data
V::Ob
I::Ob
O::Ob
T::Ob
BC::Ob
TD::Ob
iv::Hom(I,V)
it::Hom(I,T)
ov::Hom(O,V)
ot::Hom(O,T)
bcv::Hom(BC,V)
deriv::Hom(TD,V)
integ::Hom(TD,V)
tfunc::Attr(T,Func)
bcfunc::Attr(BC,Func)
complex::Attr(V,Comp)
dimension::Attr(V,Dimension)
symbol::Attr(V, Label)
tlabel::Attr(T, Label)
bclabel::Attr(BC, Label)
end
""" Space
Structure for storing the computed values for the primal/dual complex, the
boundary operators, the hodge stars, the laplacian operators, and the lie
operators. This is the operator which provides the DEC connection for the Tonti
diagra tooling.
Accessing the boundary, hodge, and laplacian operators is slightly complicated
by using arrays in a 1-indexed system. The hodge and boundary operators are
accessed with two indices, where the first determines the complex (1 for primal
and 2 for dual) and the second determines the dimension (1 for 0-forms, 2 for
1-forms, 3 for 2-forms, etc.). Thus, the boundary operator from dual 1-forms to
dual 2-forms is given as:
```julia
sp = Space(s)
sp.boundary[2,2]
```
The other goal of this structure is to cache the computed operators, yet
optimally this caching should be perfomed during the `vectorfield` operation.
"""
struct Space
s::EmbeddedDeltaSet2D
sd::EmbeddedDeltaDualComplex2D
boundary
hodge
laplacian
lie
end
""" Space(s::EmbeddedDeltaSet3D)
Caclulates all of the values stored in the `Space` object for a given complex
`s`.
"""
function Space(s::EmbeddedDeltaSet2D{O, P}) where {O, P}
sd = EmbeddedDeltaDualComplex2D{O, eltype(P), P}(s)
subdivide_duals!(sd, Barycenter())
boundary = Array{typeof(d(0,s)), 2}(undef, 2,2)
boundary[[1,2,3,4]] .= [d(0,s), dual_derivative(0,sd), d(1,s), dual_derivative(1,sd)]
hodge = Array{typeof(⋆(0,sd)), 2}(undef, 2,3)
hodge[[1,2,3,4,5,6]] .= [⋆(0,sd), inv(⋆(2,sd)), ⋆(1,sd), -1 .* inv(⋆(1,sd)), ⋆(2,sd), inv(⋆(0,sd))]
# TODO:
# Add Laplacian and Lie operators here
laplacian = Array{typeof(hodge[1,2]*boundary[1,1]),1}(undef,3)
laplacian[1] = hodge[2,3]*boundary[2,2]*hodge[1,2]*boundary[1,1]
laplacian[2] = hodge[2,2]*boundary[2,1]*hodge[1,3]*boundary[1,2] .+
boundary[1,1]*hodge[2,3]*boundary[2,2]*hodge[1,2]
laplacian[3] = boundary[1,2]*hodge[2,2]*boundary[2,1]*hodge[1,3]
lie_op = [hodge[2,3]*boundary[2,2]*hodge[1,2], hodge[2,2]*boundary[2,1]*hodge[1,3]]
lie = [(∂s,s,v)->(∂s .= boundary[1,2]*∧(Tuple{0,1},sd,s,bound[2,1]*v)),
(∂u,u,v)->(∂u .= (star_1*∧(Tuple{1,0},sd,v,inv_star_0*cobound_1_2*u) .+ cobound_0_1*star_2*∧(Tuple{1,1},sd,v,inv_star_1*u))),
]
Space(s, sd, boundary, hodge, laplacian, nothing)
end
# TODO:
# Make more petri-style simulation ACSet
#
# Make more Tonti-aware ACSet w/ support of multiple complexes
# These functions are necessary for defining a TontiDiagram data structure from
# the ACSet
const AbstractTontiDiagram = AbstractACSetType(TheoryTontiDiagram)
const TontiDiagram = ACSetType(TheoryTontiDiagram,index=[:iv, :it, :ov, :ot, :bcv],
unique_index=[:symbol])
# Define an interface for an OpenTontiDiagram which allows Tonti diagrams to
# be composed over their corners
const OpenTontiDiagramOb, OpenTontiDiagram = OpenACSetTypes(TontiDiagram,
:V)
""" Open(td::TontiDiagram, states::Symbol)
Generates an OpenTontiDiagram with cospan legs on variables defined by the
symbols included in `states`. This OpenTontiDiagram can then be composed with
other OpenTontiDiagrams over a pattern given by an undirected wiring diagram.
```julia
OpenTontiDiagram(td, :x, :v)
```
"""
Open(td, states...) = OpenTontiDiagram{Function, Bool, Int64, Symbol}(td,
map(v->FinFunction([incident(td, v, :symbol)], nparts(td,:V)), states)...)
""" TontiDiagram()
Initialize an empty TontiDiagram object.
"""
TontiDiagram() = TontiDiagram{Function, Bool, Int64, Symbol}()
""" vectorfield(td::AbstractTontiDiagram, sp::Space)
Generates a Julia function which calculates the vectorfield of the Tonti
diagram. The state of the system is defined by a single vector which is a
flattening of all state variables of the system. Thus, this function returns
both the indices of each variable in the state-vector along with the
vectorfield function itself.
The resulting function has a signature of the form `f!(du, u, p, t)` and can be
passed to the DifferentialEquations.jl solver package.
"""
function vectorfield(td, sp::Space)
fv_mem, ft_mem = init_mem(td, sp)
dg = Graph(td)
order = topological_sort(dg)
state_vars = filter(x->length(incident(td,x,:ov))==0, 1:nparts(td, :V))
input_vars = Dict{Symbol, Tuple{Int64,Int64}}()
cur_head = 1
for i in state_vars
v_size = length(fv_mem[i])
input_vars[td[i,:symbol]] = (cur_head,cur_head+v_size-1)
cur_head += v_size
end
data_buffer = Dict{Type, Tuple{Array, Array}}(Float64=>(fv_mem, ft_mem))
function system(du, u, t, p)
if !(eltype(u) ∈ keys(data_buffer))
data_buffer[eltype(u)] = init_mem(td, sp, type=eltype(u))
end
v_mem, t_mem = data_buffer[eltype(u)]
for cur in order
# Check if current is a variable or transition
if cur > nparts(td, :V)
cur -= nparts(td, :V)
inputs = td[incident(td, cur, :it), :iv]
outputs = incident(td, cur, :ot)
td[cur,:tfunc](t_mem[outputs]..., v_mem[inputs]...)
else
inputs = incident(td, cur, :ov)
if(length(inputs) == 0)
# This means this is a state variable
data_source = input_vars[td[cur,:symbol]]
v_mem[cur] .= u[data_source[1]:data_source[2]]
else
v_mem[cur] .= 0
for i in inputs
v_mem[cur] .+= t_mem[i]
end
end
bcs = incident(td, cur, :bcv)
for bc in bcs
td[bc, :bcfunc](v_mem[cur])
end
end
end
# If a state variable does not have a derivative defined, we keep it out
# (we'll want to move these to the parameter argument instead)
du .= 0
for i in state_vars
state_range = input_vars[td[i,:symbol]]
out_var = td[incident(td, i, :integ), :deriv]
if length(out_var) != 0
du[state_range[1]:state_range[2]] .= v_mem[out_var[1]]
end
end
end
input_vars, system
end
""" add_variable!(td:TontiDiagram, symbol::Symbol, dimension::Int64, complex::Bool)
Adds a variable to the TontiDiagram system which can later be referenced by its
`symbol`. This constructor requires the dimensionality of the variable (0 ->
point, 1 -> line, etc.) and the complex it is defined on
(true -> primal/straight, false -> dual/twisted).
Definiting a system with a variable `v` defined on the primal lines would be
constructed as:
```@example
td = TontiDiagram()
add_variable!(td, :v, 1, true)
```
"""
function add_variable!(td, symbol::Symbol, dimension::Int64, complex::Bool)
add_part!(td, :V,symbol=symbol, dimension=dimension, complex=complex)
end
""" add_variables!(td:TontiDiagram, vars::Tuple{Symbol, Int64, Bool}...)
Adds multiple variables to the TontiDiagram system which can later be
referenced by their symbols. This constructor follows the same pattern as
`add_variable!` with each variable specified as a tuple of:
```julia
(symbol, dimension, complex)
```
Defining a system with a variable `v` defined on the primal lines, a variable
`p̃` defined on the dual surfaces, and a variable `C` defined on the primal
surfaces would be constructed as:
```@example
td = TontiDiagram()
add_variables!(td, (:v, 1, true), (:p̃, 1, true), (:C, 2, true))
```
"""
function add_variables!(td, vars::Tuple{Symbol, Int64, Bool}...)
for v in vars
add_variable!(td, v...)
end
end
""" add_transition!(td, dom_sym::Vector{Symbol}, func!, codom_sym::Vector{Symbol})
Adds a transition function from variables `dom_sym` to variables `codom_sym`
with its transition defined by `func!`. `func!` is expected to have the signature
`func(codom_sym..., dom_sym...)` and is expected to modify the values of the
`codom_sym` variables.
Defining a transition from variables `x` and `y` to `m` that calculates the
magnitude of the values in `x` and `y` as vector coordinates would be defined as:
```@example
td = TontiDiagram()
add_variables!(td, (:x, 0, 1), (:y, 0, 1). (:m, 0, 1))
add_transition!(td, [:x, :y], (m,x,y)->(m .= sqrt.(x .* y)), [:m])
```
"""
function add_transition!(td, dom_sym::Array{Symbol,1}, func!, codom_sym::Array{Symbol,1}; label=Symbol(""))
dom = [findfirst(v->v == s, td[:symbol]) for s in dom_sym]
codom = [findfirst(v->v == s, td[:symbol]) for s in codom_sym]
t = add_part!(td, :T, tfunc=func!, tlabel=label)
add_parts!(td, :I, length(dom), iv=dom, it=t)
add_parts!(td, :O, length(codom), ov=codom, ot=t)
end
""" add_derivative!(td::TontiDiagram, sp::Space, dom_sym::Symbol,
codom_sym::Symbol)
Adds a derivative transition from variable `dom_sym` to variable `codom_sym`
using the boundary operators from `sp`. This function determines which boundary
operator to use and inserts an appropriate transition between the two
variables.
Defining a spatial derivative relationship between the primal 0-form `x` and
the primal 1-form `Δx` cis given as follows:
```julia
add_derivative(td, sp, :x, :Δx)
```
"""
function add_derivative!(td, sp, dom_sym, codom_sym)
dom = findfirst(v->v==dom_sym, td[:symbol])
codom = findfirst(v->v==codom_sym, td[:symbol])
# TODO:
# Add tests for proper dimensions, complexes, etc.
# This will later be replaced as we pre-initialize all boundary operators
bound = sp.boundary[(td[dom,:complex] ? 1 : 2), td[dom,:dimension]+1]
func(x,y) = (x.=bound*y)
add_transition!(td, [dom_sym],func,[codom_sym]; label=:d)
end
""" add_derivatives!(td::TontiDiagram, sp::Space, vars:Pair{Symbol, Symbol}...)
Adds multiple derivative transition between pairs of variables, using the same
syntax as in `add_derivative!`.
Example usage:
```julia
add_derivatives!(td, sp, (:x,:Δx), (:y, :Δy))
```
"""
function add_derivatives!(td, sp, vars::Pair{Symbol, Symbol}...)
for v in vars
add_derivative!(td, sp, v[1],v[2])
end
end
""" add_time_dep!(td::TontiDiagram, deriv_sym::Symbol, integ_sym::Symbol)
Adds a time derivative relationship between the variables `deriv_sym` and
`integ_sym` (where `deriv_sym` is the time derivative of `integ_sym`). These
relationships are used to determine the state-variables of the system.
"""
function add_time_dep!(td, deriv_sym::Symbol, integ_sym::Symbol)
deriv = findfirst(v->v==deriv_sym, td[:symbol])
integ = findfirst(v->v==integ_sym, td[:symbol])
add_part!(td, :TD, integ=integ, deriv=deriv)
end
""" add_bc!(td::TontiDiagram, var_sys::Symbol, func::Function)
Adds a "boundary condition" to the variable `var_sys` by applying `func` to the
values of this variable during simulation. This function is the last one
evaluated on the data of the variable `var_sys`, and so can be used to enforce
any relevant boundary conditions.
TODO:
Add time dependency of boundary condition function to allow for time-varying BCs.
"""
function add_bc!(td, var_sym, func; label=:BC)
var = findfirst(v->v==var_sym, td[:symbol])
add_part!(td, :BC, bcfunc=func, bcv=var, bclabel=label)
end
# Note: This function can be made more efficient if combined with existing
# transformations.
# e.g. Advection-diffusion can be merged after the initial wedge
# product/coboundary operator
#
# Currently only defined on primal complices (can this be applied to dual
# complices?)
""" add_laplacian!(td::TontiDiagrams, sp::Space, dom_sym::Symbol, codom_sym::Symbol; coef::Float64)
Adds a transition which defines `codom_sym` as the laplacian of `dom_sym` with
a constant scaling factor of `coef`.
"""
function add_laplacian!(td, sp, dom_sym, codom_sym; coef=1.0)
sd = sp.sd
dom = findfirst(v->v==dom_sym, td[:symbol])
codom = findfirst(v->v==codom_sym, td[:symbol])
lap_op = sp.laplacian[td[dom,:dimension]+1] # laplace_beltrami(Val{td[dom,:dimension]},sd)
func(x,y) = (x .= coef * (lap_op*y))
add_transition!(td, [dom_sym], func, [codom_sym]; label=:Δ)
end
function init_mem(td, s::EmbeddedDeltaSet1D)
# Fill out this function
end
function init_mem(td, sp::Space; type=Float64)
s = sp.s
primal_size = [nv(s), ne(s), ntriangles(s)]
dual_size = [ntriangles(s), ne(s), nv(s)]
t_mem = Array{Array{type,1},1}()
v_mem = Array{Array{type,1},1}()
for i in 1:nparts(td, :O)
var = td[i,:ov]
push!(t_mem, zeros(type,
td[var,:complex] ? primal_size[td[var,:dimension]+1] :
dual_size[td[var,:dimension]+1]))
end
for v in 1:nparts(td,:V)
push!(v_mem, zeros(type,
td[v,:complex] ? primal_size[td[v,:dimension]+1] :
dual_size[td[v,:dimension]+1]))
end
v_mem, t_mem
end
function Graph(td; extended=false)
g = Graph()
add_vertices!(g, nparts(td, :V) + nparts(td, :T))
nvars = nparts(td, :V)
for i in 1:nparts(td, :I)
add_edge!(g, td[i,:iv], td[i,:it] + nvars)
end
for o in 1:nparts(td, :O)
add_edge!(g, td[o,:ot] + nvars, td[o,:ov])
end
if extended
for bc in 1:nparts(td, :BC)
add_edge!(g, td[bc, :bcv], td[bc, :bcv])
end
for t in 1:nparts(td, :TD)
add_edge!(g, td[t, :deriv], td[t, :integ])
end
end
g
end
""" Construct a 0-form based on a scalar function
This operator accepts a scalar function and evaulates it at each point on the
simplex, returning a 0-form.
"""
function gen_form(s::EmbeddedDeltaSet2D, f::Function)
map(f, point(s))
end
function propertygraph(td::TontiDiagram;
prog::AbstractString="neato", graph_attrs::AbstractDict=Dict(),
node_attrs::AbstractDict=Dict(), edge_attrs::AbstractDict=Dict(:len=>"1.5"),
node_labels::Bool=true, edge_labels::Bool=false)
nvars = nparts(td, :V)
isVar(v) = (v <= nvars)
g = Graph(td, extended = true)
dim_colors = ["green", "yellow", "red", "blue"]
node_labeler(v) = begin
if isVar(v)
Dict(:label=>"$(td[v, :symbol])",
:style=>td[v,:complex] ? "filled" : "filled, dotted",
:width=>"0.75", :height=>"0.75", :fixedsize=>"false",
:shape=>"circle",
:fillcolor=>dim_colors[td[v,:dimension]+1],
)
else
Dict(:label=>"$(td[v - nvars, :tlabel])",
:width=>"0.75", :height=>"0.75", :fixedsize=>"false",
:shape=>"square"
)
end
end
edge_labeler(e) = begin
if e <= nparts(td, :I) + nparts(td, :O)
Dict(:color=>"black")
else
e -= nparts(td, :I) + nparts(td, :O)
if e <= nparts(td, :BC)
Dict(:color=>"black",
:label=>"$(td[e, :bclabel])")
else
e -= nparts(td, :BC)
Dict(:color=>"black", :style=>"dotted")
end
end
end
PropertyGraph{Any}(g, node_labeler, edge_labeler;
prog = prog,
graph = merge!(Dict(:rankdir => "TB"), graph_attrs),
node = merge!(Graphics.GraphvizGraphs.default_node_attrs(node_labels), node_attrs),
edge = merge!(Dict(:arrowsize => "0.5"), edge_attrs),
)
end
function Graphics.to_graphviz(td::TontiDiagram; kwargs...)
propertygraph(td; kwargs...) |> to_graphviz
end
end
|
{"hexsha": "7c049720c9bf73dd36f55886302992897b0ee06d", "size": 17536, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TontiDiagrams.jl", "max_stars_repo_name": "mgatlin3-gtri/Decapods.jl", "max_stars_repo_head_hexsha": "793c5992f2219e5d066a437ae9a719a8ed809a23", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-25T05:22:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T16:44:03.000Z", "max_issues_repo_path": "src/TontiDiagrams.jl", "max_issues_repo_name": "mgatlin3-gtri/Decapods.jl", "max_issues_repo_head_hexsha": "793c5992f2219e5d066a437ae9a719a8ed809a23", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-11-08T20:41:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T15:04:56.000Z", "max_forks_repo_path": "src/TontiDiagrams.jl", "max_forks_repo_name": "mgatlin3-gtri/Decapods.jl", "max_forks_repo_head_hexsha": "793c5992f2219e5d066a437ae9a719a8ed809a23", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-17T18:35:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T01:33:11.000Z", "avg_line_length": 33.8532818533, "max_line_length": 728, "alphanum_fraction": 0.681455292, "num_tokens": 5512}
|
"""
This file generates the html for the Flood Risk Map.
"""
# -------
# IMPORTS
# -------
import geopandas as gpd # for loading/manipulating vector data
from shapely.geometry import Polygon
import rasterio # for loading/manipulating raster data
import folium # for creating the interactive map
import numpy as np
import pandas as pd
import os
from PIL import Image, ImageChops
import warnings
import logging
import time
warnings.filterwarnings('ignore', 'GeoSeries.notna', UserWarning)
# ---------
# FUNCTIONS
# ---------
def set_crs(df, crs):
"""
Sets the coordinate reference system `crs` based on the information in the dataframe `df`.
:param df: DataFrame
:param crs: Coordinate Reference System
:return:
"""
if type(df.crs).__name__ in ['CRS']:
if str(df.crs).upper() != crs:
df = df.to_crs(crs)
else:
df.crs = crs
return df
def add_choropleth(feature_group_name, paths, colours, map_obj, crs, bounding_box):
"""
:param feature_group_name:
:param path:
:param colours:
:param map_obj:
:param crs:
:param bounding_box:
:return:
"""
feature_group = folium.FeatureGroup(feature_group_name, show=False)
for i, path in enumerate(paths):
gp_df = gpd.read_file(path)
# TODO: Figure out why clipping doesn't work for these created geometries. Or just maybe add an extra data_dict key.
try:
assert(str(gp_df.crs).lower() == crs.lower())
except AssertionError:
continue # TODO: logging
gp_df = set_crs(gp_df, crs)
colour = colours[i]
for _, gp_row in gp_df.iterrows():
polygon_obj = gp_row.geometry
coords = list(zip(polygon_obj.exterior.coords.xy[1], polygon_obj.exterior.coords.xy[0]))
polygon = folium.vector_layers.Polygon(coords,
fill_color=colour,
fill_opacity=0.7,
stroke=False)
feature_group.add_child(polygon)
map_obj.add_child(feature_group)
return map_obj
def add_polygon(polygon_name, polygon_path, outline_colour, outline_thickness, map_obj, crs, bounding_box):
"""
:param polygon_name:
:param polygon_path:
:param outline_colour:
:param outline_thickness:
:param map_obj:
:param crs:
:param bounding_box:
:return:
"""
gp_df = gpd.read_file(polygon_path)
gp_df = gpd.clip(gp_df, bounding_box)
gp_df = set_crs(gp_df, crs)
feature_group = folium.FeatureGroup(
name=polygon_name,
show=False,
)
for i, gp_row in gp_df.iterrows():
if not type(gp_row.geometry).__name__ == 'MultiPolygon':
continue
for polygon_obj in gp_row.geometry:
coords = list(zip(polygon_obj.exterior.coords.xy[1], polygon_obj.exterior.coords.xy[0]))
polygon = folium.vector_layers.Polygon(coords, color=outline_colour, line_thickness=outline_thickness)
feature_group.add_child(polygon)
map_obj.add_child(feature_group)
return map_obj
def add_line(line_name, line_path, line_colour, line_thickness, map_obj, crs, bounding_box):
"""
:param line_name:
:param line_path:
:param line_colour:
:param line_thickness:
:param map_obj:
:param crs:
:param bounding_box:
:return:
"""
gp_df = gpd.read_file(line_path)
gp_df = set_crs(gp_df, crs)
gp_df = gpd.clip(gp_df, bounding_box)
feature_group = folium.FeatureGroup(
name=line_name,
show=False,
)
for _, gp_row in gp_df.iterrows():
if not type(gp_row.geometry).__name__ == 'LineString':
# TODO: deal with multiline strings
continue
coords = list(zip(gp_row.geometry.xy[1], gp_row.geometry.xy[0]))
line = folium.vector_layers.PolyLine(coords, color=line_colour, weight=line_thickness, opacity=1)
feature_group.add_child(line)
map_obj.add_child(feature_group)
return map_obj
def add_markers(feature_group_name, marker_path, icon_url, map_obj, crs, bounding_box, file_suffix):
"""
:param feature_group_name:
:param marker_path:
:param icon_url:
:param map_obj:
:param crs:
:param bounding_box:
:return:
"""
gp_df = gpd.read_file(marker_path)
gp_df = set_crs(gp_df, crs)
gp_df = gpd.clip(gp_df, mask=bounding_box)
try:
assert (file_suffix in ['shp', 'gpkg'])
except AssertionError:
raise
# TODO: logging
feature_group = folium.FeatureGroup(
name=feature_group_name,
show=False,
)
icon_url = os.path.join(MapVars.website, icon_url)
for _, gp_row in gp_df.iterrows():
if gp_row.geometry.type == 'Point':
lat = gp_row.geometry.y
lon = gp_row.geometry.x
elif gp_row.geometry.type == 'MultiPolygon': # TODO: Could draw polygon with marker, ask Laurence
lat = gp_row.geometry.centroid.y
lon = gp_row.geometry.centroid.x
icon = folium.features.CustomIcon(icon_url, icon_size=[14, 14])
marker = folium.Marker(
[lat, lon],
icon=icon,
icon_size=[8, 8], # TODO: Ask about preferred icon size? 20?
)
feature_group.add_child(marker)
feature_group.add_to(map_obj)
return map_obj
def icon_colourmap(data_dict, out_dir, colour_col='marker_colour'):
"""
Recolour icons and add the recoloured icon files to the data dict
:param data_dict:
:param out_dir: output directory
:param colour_col:
:return:
"""
data_dict['new_icon'] = pd.Series(index=data_dict.index, dtype=object)
for name, row in data_dict.iterrows():
colour = row[colour_col]
if not pd.isna(colour) and row.info_type == 'marker':
icon_file = row.icon
new_icon_file = recolour_icon(icon_file, colour, out_dir)
data_dict.loc[name, 'new_icon'] = new_icon_file
return data_dict
def recolour_icon(old_icon_file, new_colour_hex, out_dir, new_subdir='recoloured', old_color_hex='#418fde'):
"""
:param old_icon_file:
:param new_colour_hex:
:param out_dir:
:param new_subdir:
:param old_color_hex:
:return:
"""
# TODO: If there is no colour, add path fill = the new colour.
old_icon_file = os.path.join(out_dir, old_icon_file)
directory_path, file_name = os.path.split(old_icon_file)
directory_path = os.path.join(directory_path, new_subdir)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_name, file_extension = os.path.splitext(file_name)
new_colour_name = new_colour_hex[1:]
new_icon_file = os.path.join(directory_path, f"{file_name}_{new_colour_name}.svg")
try:
assert(file_extension == '.svg')
except AssertionError:
# TODO: warnings/logging
raise
if not os.path.isfile(new_icon_file):
with open(old_icon_file, 'r') as f:
text = f.read()
occurrence_count = text.count(old_color_hex)
text = text.replace(old_color_hex, new_colour_hex, occurrence_count)
with open(new_icon_file, 'w') as f:
f.write(text)
return new_icon_file.strip(out_dir)
def png_to_tif(tif, dest_png):
"""
:param tif:
:param dest_png:
:return:
"""
# tif files are crazy so they can have negative numbers and floats convert to 8 bit:
# TODO: add colourmap option
raster = tif.read()
alpha = np.array(raster)
raster[raster < 0] = 0
raster = (255 * (raster - raster.min())) / (raster.max() - raster.min())
raster = raster.astype(int).squeeze()
shape = raster.shape
zeroes = np.zeros(shape)
image = np.zeros([shape[0], shape[1], 4], dtype=np.uint8)
image[:, :, 2] = raster
image[:, :, 3] = raster
img = Image.fromarray(image)
downsize_by = 0.2 # 20%
new_size = [int(i * downsize_by) for i in shape]
img.thumbnail(new_size, Image.ANTIALIAS)
# img = trim(img)
img.save(dest_png)
return None
def display_tif(src_tif, dest_png, dest_crs):
"""
:param src_tif:
:param dest_png:
:param dest_crs:
:return:
"""
# TODO: Put into a function, where you can specify the colourmap/gradient.
with rasterio.open(src_tif, 'r', driver='GTiff') as tif:
bounds = tif.bounds
if not os.path.exists(dest_png):
png_to_tif(tif, dest_png)
png_file = os.path.join(website, 'pngs', src_tif.replace('.tif', '.png'))
image_overlay = folium.raster_layers.ImageOverlay(
image=png_file,
name=index,
interactive=False,
z_index=100,
opacity=1,
bounds=[[bounds.bottom, bounds.left], [bounds.top, bounds.right]],
cross_origin=False,
)
m.add_child(image_overlay)
assert(tif.crs == dest_crs)
return None
def trim(img):
"""
:param img:
:return:
"""
border = Image.new(img.mode, img.size, img.getpixel((0, 0)))
diff = ImageChops.difference(img, border)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
img = img.crop(bbox)
return img
class MapVars:
"""
Hard-coded variables relating to the map
"""
website = 'https://nataliethurlby.github.io/useful_flood_data/'
# bounding box:
north = 35 # lon_max
east = -19 # lat_max
south = 34 # lon_min
west = -20 # lat_min
center = [np.mean([west, east]), np.mean([south, north])]
bounding_box = [
(north, west),
(north, east),
(south, east),
(south, west),
]
crs = 'EPSG:4326' # Coordinate Reference System
base_maps = {
'OSM Humanitarian':
('https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png',
'<a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, '
'Tiles style by <a href="https://www.hotosm.org/" target="_blank">Humanitarian OpenStreetMap Team</a> '
'hosted by <a href="https://openstreetmap.fr/" target="_blank">OpenStreetMap France</a>).'),
'OSM Standard':
('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
'Data by © <a href="http://openstreetmap.org">OpenStreetMap</a>, '
'under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.'),
'ESRI Satellite':
('https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}', 'ESRI'),
# TODO: add attributions for basemaps
'ESRI World Topo':
(
'https://services.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}', 'ESRI'),
# TODO: add attributions for basemaps
}
def map_setup():
"""
Sets up the map object
:return: m - map object
"""
m = folium.Map(
location=MapVars.center,
tiles=None,
max_bounds=True,
# prevent map from panning out of the bounding box:
min_lat=MapVars.west,
max_lat=MapVars.east,
min_lon=MapVars.south,
max_lon=MapVars.north,
)
# sets the ideal initial zoom level:
sw = [MapVars.west, MapVars.south]
ne = [MapVars.east, MapVars.north]
m.fit_bounds(bounds=[sw, ne])
# Add base map options:
for map_name in MapVars.base_maps.keys():
folium.TileLayer(MapVars.base_maps[map_name][0],
name=map_name,
attr=MapVars.base_maps[map_name][1],
min_zoom=9, # done by trial and error currently
).add_to(m)
return m
def load_map_data(data_dict_df, m):
"""
:param data_dict_df:
:return:
"""
polygon = Polygon([x[0], x[1]] for x in MapVars.bounding_box)
for index, row in data_dict_df.iterrows():
file_path = row.file_path
file_suffix = file_path.split('.')[-1]
relative_path_to_data = os.path.join(os.path.dirname(__file__), '../')
if row.info_type == 'image_overlay':
continue
# if file_suffix == 'tif':
# file_name = os.path.basename(file_path)
# png_file = os.path.join(out_dir, 'pngs', file_name.replace('.tif', '.png'))
# # img = display_tif(src_tif=file_path, dest_png = png_file, dest_crs=crs)
# continue # TODO: Hopefully convert to geojson/geopackage because the pixels look bad.
elif row.info_type == 'choropleth':
multiple_paths = file_path.split(';')
multiple_paths = [os.path.join(relative_path_to_data, path) for path in multiple_paths]
m = add_choropleth(feature_group_name=index,
paths=multiple_paths,
colours=row.colour.split(';'),
map_obj=m,
crs=MapVars.crs,
bounding_box=polygon)
elif row.info_type == 'marker':
m = add_markers(feature_group_name=index,
marker_path=os.path.join(relative_path_to_data, file_path),
icon_url=row.new_icon,
map_obj=m,
crs=MapVars.crs,
bounding_box=polygon,
file_suffix=file_suffix)
elif row.info_type == 'line':
m = add_line(line_name=index,
line_path=os.path.join(relative_path_to_data, file_path),
line_colour=row.colour,
line_thickness=row.thickness,
map_obj=m,
crs=MapVars.crs,
bounding_box=polygon)
elif row.info_type == 'polygon':
m = add_polygon(polygon_name=index,
polygon_path=os.path.join(relative_path_to_data, file_path),
outline_colour=row.colour,
outline_thickness=row.thickness,
map_obj=m,
crs=MapVars.crs,
bounding_box=polygon)
else:
logging.warning(f"Info type detected not known: {row.info_type}")
return m
if __name__ == '__main__':
start = time.time()
out_dir = os.path.join(os.path.dirname(__file__), '../docs/')
data_dict_file = os.path.join(os.path.dirname(__file__), '../data/data_control.csv')
m = map_setup()
# load data dict:
data_dict_df = pd.read_csv(data_dict_file,
header=0, # Row index 0 is the header row.
skiprows=[1], # Row index 1 is the example row.
index_col='data_name',
skip_blank_lines=True)
data_dict_df = data_dict_df[data_dict_df.index.notnull()]
data_dict_df = icon_colourmap(data_dict_df, out_dir, colour_col='colour') # icon colours:
m = load_map_data(data_dict_df, m)
# draw bounding box:
bounding_box_coords = [(MapVars.west, MapVars.north),
(MapVars.east, MapVars.north),
(MapVars.east, MapVars.south),
(MapVars.west, MapVars.south)]
bounding_box_poly = folium.vector_layers.Polygon(bounding_box_coords, color='#7a7a7a')
m.add_child(bounding_box_poly)
folium.LayerControl(collapsed=False).add_to(m)
# -------
# OUTPUT
# ------
html_file = os.path.join(out_dir, 'index.html')
m.save(html_file)
logging.info(f"Map html created in {time.time()-start:.2f} sections")
|
{"hexsha": "719429cc1f5c154a111f72aab29c09b625e8ceaf", "size": 15957, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/create_map.py", "max_stars_repo_name": "NatalieThurlby/useful_flood_data", "max_stars_repo_head_hexsha": "66b041ef5176239fd370209afd0bbf910806ffdc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/create_map.py", "max_issues_repo_name": "NatalieThurlby/useful_flood_data", "max_issues_repo_head_hexsha": "66b041ef5176239fd370209afd0bbf910806ffdc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-12-08T15:28:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-26T14:59:50.000Z", "max_forks_repo_path": "scripts/create_map.py", "max_forks_repo_name": "NatalieThurlby/useful_flood_data", "max_forks_repo_head_hexsha": "66b041ef5176239fd370209afd0bbf910806ffdc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-02T09:57:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-02T09:57:08.000Z", "avg_line_length": 31.2882352941, "max_line_length": 124, "alphanum_fraction": 0.59428464, "include": true, "reason": "import numpy", "num_tokens": 3842}
|
import ccxt
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils.constants import OHLCV_COLS
class BinanceAPICallException(Exception):
pass
class IndicatorNotFoundException(Exception):
pass
exchange = ccxt.binance()
def get_price_by_coin_pair(pair: str = "BTC/USDT") -> float:
try:
if exchange.has['fetchTicker']:
return float(exchange.fetch_ticker(pair.upper())['info']['lastPrice'])
except Exception as e:
raise BinanceAPICallException(e)
def fetch_olhcv_candles_dataframe(
symol="BNB/USDT",
timeframe="5m",
limit=50,
emas=None
) -> pd.DataFrame:
candles = exchange.fetch_ohlcv(symbol=symol, timeframe=timeframe, limit=limit)
df = pd.DataFrame(candles, columns=OHLCV_COLS)
df['timestamp'] = df['timestamp'].values.astype(dtype='datetime64[ms]')
if emas is not None:
for ema in emas:
if ema <= limit:
df['ema' + str(ema)] = df['closed'].ewm(span=ema, adjust=False).mean()
return df
def get_current_ema_values(candles: pd.DataFrame, emas: list) -> list:
ema_values = []
current_ohlcv = candles.iloc[-1]
for ema in emas:
indicator_name = 'ema' + str(ema)
if indicator_name not in candles.columns:
raise IndicatorNotFoundException
ema_values.append(current_ohlcv[indicator_name])
return ema_values
def get_series_orientation(series: pd.Series, plot=False) -> (float, list):
diffs = np.diff(series)
gradients = np.sign(diffs)
orientation = gradients.mean()
if plot:
series.plot()
plt.show()
return orientation, gradients
def is_indicator_on_uptrend(
candles: pd.DataFrame,
emas: list,
steps: int = 10,
trend_threshold: float = 0.2,
plot=True
) -> bool:
uptrends = []
for ema in emas:
if ema <= candles.shape[0]:
indicator_name = 'ema' + str(ema)
if indicator_name not in candles.columns:
raise IndicatorNotFoundException
indicator_series = candles[indicator_name][-steps:]
orientation, _ = get_series_orientation(indicator_series, plot=plot)
uptrend = orientation >= trend_threshold
uptrends.append(uptrend)
return sum(uptrends) > len(uptrends) / 2
def is_ema_picking_momentum(candles: pd.DataFrame, emas: list) -> bool:
current_ema_values = get_current_ema_values(candles, emas=emas)
smallest_ema = current_ema_values[0]
rest_emas = current_ema_values[1:]
uptrend = True
for ema in rest_emas:
uptrend &= smallest_ema >= ema
return uptrend
def is_ema_losing_momentum(candles: pd.DataFrame, emas: list) -> bool:
current_ema_values = get_current_ema_values(candles, emas=emas)
smallest_ema = current_ema_values[0]
rest_emas = current_ema_values[1:]
for ema in rest_emas:
if smallest_ema < ema:
return True
return False
|
{"hexsha": "b29ad5c9b25680ff1103dfeca85191e4bead9c3d", "size": 3024, "ext": "py", "lang": "Python", "max_stars_repo_path": "technical_analysis/moving_averages.py", "max_stars_repo_name": "elmesaoudee/signarly", "max_stars_repo_head_hexsha": "b642ea3a6d6064e6e9705115880cc93072c4ccb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-10-24T00:17:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T10:25:50.000Z", "max_issues_repo_path": "technical_analysis/moving_averages.py", "max_issues_repo_name": "elmesaoudee/signarly", "max_issues_repo_head_hexsha": "b642ea3a6d6064e6e9705115880cc93072c4ccb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "technical_analysis/moving_averages.py", "max_forks_repo_name": "elmesaoudee/signarly", "max_forks_repo_head_hexsha": "b642ea3a6d6064e6e9705115880cc93072c4ccb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5853658537, "max_line_length": 86, "alphanum_fraction": 0.6547619048, "include": true, "reason": "import numpy", "num_tokens": 760}
|
import numpy as np
import nimfa
V = np.random.rand(40, 100)
pmfcc = nimfa.Pmfcc(V, seed="random_vcol", rank=10, max_iter=30,
theta=np.random.rand(V.shape[1], V.shape[1]))
pmfcc_fit = pmfcc()
|
{"hexsha": "93703c5327a3dc23e98491211b68f0e72f75163a", "size": 213, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/code/snippet_pmfcc.py", "max_stars_repo_name": "askerdb/nimfa", "max_stars_repo_head_hexsha": "3e3353e60d53fd409b53c46fde23f4f6fef64aaf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 325, "max_stars_repo_stars_event_min_datetime": "2015-04-05T01:37:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-01T08:06:02.000Z", "max_issues_repo_path": "docs/code/snippet_pmfcc.py", "max_issues_repo_name": "askerdb/nimfa", "max_issues_repo_head_hexsha": "3e3353e60d53fd409b53c46fde23f4f6fef64aaf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2015-03-30T12:55:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-17T10:53:54.000Z", "max_forks_repo_path": "docs/code/snippet_pmfcc.py", "max_forks_repo_name": "askerdb/nimfa", "max_forks_repo_head_hexsha": "3e3353e60d53fd409b53c46fde23f4f6fef64aaf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 104, "max_forks_repo_forks_event_min_datetime": "2015-03-25T22:42:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-30T23:06:36.000Z", "avg_line_length": 23.6666666667, "max_line_length": 65, "alphanum_fraction": 0.6384976526, "include": true, "reason": "import numpy", "num_tokens": 67}
|
# 2018-9-10
# 函数
import cv2
import numpy as np
def resize(img, scale_factor):
"""
缩小图像尺寸
"""
return cv2.resize(img, (int(img.shape[1] * (1 / scale_factor)), int(img.shape[0] * (1 / scale_factor))), interpolation=cv2.INTER_AREA)
def pyramid(image, scale=1.5, min_size=(200, 80)):
"""
图像金字塔
"""
yield image
while True:
image = resize(image, scale)
# 使用高斯模糊
# image = cv2.GaussianBlur(img, (11, 11), 0)
if image.shape[0] < min_size[1] or image.shape[1] < min_size[0]:
break
yield image
def slidingWindow(image, step, window_size):
"""
滑动窗口
"""
for y in range(0, image.shape[0], step):
for x in range(0, image.shape[1], step):
yield (x, y, image[y : y + window_size[1], x : x + window_size[0]])
def nonMaxSuppressionFast(boxes, overlapThresh):
"""
非最大抑制。
图像中可能包含被检测多次的对象, 若将这些检测作为结果则不准确, 这时需要采取非最大抑制来解决。
思路:
对输入矩形按评分排序, 从最高的矩阵开始,消除所有重叠超过一定阈值的矩形,消除规则是计算相交区域,并看这些相交区域是否大于某一阈值。
"""
if len(boxes) == 0:
return []
# numpy.dtype.kind
# A character code (one of ‘biufcmMOSUV’) identifying the general kind of data.
# b boolean
# i signed integer
# u unsigned integer
# f floating-point
# c complex floating-point
# m timedelta
# M datetime
# O object
# S (byte-)string
# U Unicode
# V void
if boxes.dtype.kind == "i":
# 因为要做除法,所以转换为浮点型
boxes = boxes.astype("float")
# 分别获得矩形四个点坐标以及对于评分
x1 = boxes[ : , 0]
y1 = boxes[ : , 1]
x2 = boxes[ : , 2]
y2 = boxes[ : , 3]
scores = boxes[ : , 4]
# 获得矩形面积
area = (x2 - x1 + 1) * (y2 - y1 + 1)
# 排序评分, 由大到小
idxs = np.argsort(scores)[::-1]
pick = [] # 存储符合的矩形
while len(idxs) > 0:
last = len(idxs) - 1
# i = scores.index(idxs[last])
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
print("pick: ",pick)
return boxes[pick].astype("int")
|
{"hexsha": "e1cd2602900333131ccf54f8a63393b163d018d0", "size": 2406, "ext": "py", "lang": "Python", "max_stars_repo_path": "Image/OpenCV/car_detect/func.py", "max_stars_repo_name": "YangXiaoo/NoteBook", "max_stars_repo_head_hexsha": "37056acad7a05b876832f72ac34d3d1a41e0dd22", "max_stars_repo_licenses": ["CNRI-Python", "RSA-MD", "CECILL-B"], "max_stars_count": 58, "max_stars_repo_stars_event_min_datetime": "2019-03-03T04:42:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T04:36:31.000Z", "max_issues_repo_path": "Image/OpenCV/car_detect/func.py", "max_issues_repo_name": "YangXiaoo/NoteBook", "max_issues_repo_head_hexsha": "37056acad7a05b876832f72ac34d3d1a41e0dd22", "max_issues_repo_licenses": ["CNRI-Python", "RSA-MD", "CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Image/OpenCV/car_detect/func.py", "max_forks_repo_name": "YangXiaoo/NoteBook", "max_forks_repo_head_hexsha": "37056acad7a05b876832f72ac34d3d1a41e0dd22", "max_forks_repo_licenses": ["CNRI-Python", "RSA-MD", "CECILL-B"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2019-08-11T01:25:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-22T06:46:06.000Z", "avg_line_length": 23.1346153846, "max_line_length": 138, "alphanum_fraction": 0.5498753117, "include": true, "reason": "import numpy", "num_tokens": 936}
|
## ---- load data ---- ##
# data
df<-read.table("rdata.csv", sep="\t", encoding="UTF-8", header=TRUE)
# module names
modules<-c("ALP1", "ALP2", "ALP3", "ALP4", "ALP5", "SWP", "MafI1", "MafI2", "MafI3", "GTI", "PS", "DBS", "TI1", "TI2", "TI3", "TI4", "AWS", "SWT")
# weeks per sem
wps <-c( 17, 13, 17, 13, 17, 15, 17, 13, 17, 13, 17, 13, 17, 13, 17, 13, 3, 13)
# LP
lps <-c( 8, 8, 8, 5, 5, 10, 8, 8, 8, 7, 3, 7, 5, 5, 5, 5, 4, 6)
# load plots
source("plotting.r")
# data points above will be assumed outsiders
dpupperbound<-80
## ------------ ##
## ---- remove unwanted ---- ##
df$SERIAL<-NULL
df$REF<-NULL
df$QUESTNNR<-NULL
df$MODE<-NULL
df$STARTED<-NULL
df$MAILSENT<-NULL
df$LASTDATA<-NULL
df$LASTPAGE<-NULL
df$MISSING<-NULL
df$MISSREL<-NULL
df$DEG_MISS<-NULL
df$DEG_TIME<-NULL
df$DEGRADE<-NULL
df$TIME001<-NULL
df$TIME002<-NULL
df$TIME003<-NULL
df$TIME004<-NULL
df$TIME005<-NULL
df$TIME006<-NULL
df$TIME007<-NULL
df$TIME008<-NULL
df$TIME009<-NULL
df$TIME010<-NULL
df$TIME011<-NULL
df$TIME012<-NULL
df$TIME013<-NULL
df$TIME014<-NULL
df$TIME015<-NULL
df$TIME016<-NULL
df$TIME017<-NULL
df$TIME018<-NULL
df$TIME019<-NULL
df$TIME020<-NULL
df$TIME021<-NULL
df$TIME022<-NULL
df$TIME023<-NULL
df$TIME024<-NULL
df$TIME025<-NULL
df$TIME026<-NULL
df$TIME027<-NULL
df$TIME028<-NULL
df$TIME029<-NULL
df$TIME030<-NULL
df$TIME031<-NULL
df$TIME032<-NULL
df$TIME033<-NULL
df$TIME034<-NULL
df$TIME035<-NULL
df$TIME036<-NULL
df$TIME037<-NULL
df$TIME038<-NULL
df$TIME039<-NULL
df$TIME040<-NULL
## ------------ ##
## ---- choose labels and data types ---- ##
df$InZielgruppe<-factor(df$AD01, levels=c(1, 2), labels=c("Nein", "Ja"))
df$AD01<-NULL
df$Abgeschlossen<-factor(df$AD02, levels=c(1, 2), labels=c("Ja", "Nein"))
df$AD02<-NULL
df$Erststudium<-factor(df$AD03, levels=c(1, 2), labels=c("Ja", "Nein"))
df$AD03<-NULL
df$Fachsemester<-factor(df$AD04, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), labels=c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", ">14"))
df$AD04<-NULL
df$TeilVollzeit<-factor(df$AD05, levels=c(1, 2, 3), labels=c("Vollzeit", "Teilzeit", "andere Regelung"))
df$AD05<-NULL
df$Gesundheitszustand<-factor(df$AD08_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$AD08_01<-NULL
df$eigenerComputer<-factor(df$AD10, levels=c(1, 2), labels=c("Ja", "Nein"))
df$AD10<-NULL
df$Vorwissen<-factor(df$AD12, levels=c(1, 2), labels=c("Ja", "Nein"))
df$AD12<-NULL
df$ALP1Absolviert<-df$AM01_01
df$AM01_01<-NULL
df$ALP2Absolviert<-df$AM01_02
df$AM01_02<-NULL
df$ALP3Absolviert<-df$AM01_03
df$AM01_03<-NULL
df$ALP4Absolviert<-df$AM01_04
df$AM01_04<-NULL
df$ALP5Absolviert<-df$AM01_05
df$AM01_05<-NULL
df$SWPAbsolviert<-df$AM01_18
df$AM01_18<-NULL
df$MafI1Absolviert<-df$AM01_06
df$AM01_06<-NULL
df$MafI2Absolviert<-df$AM01_07
df$AM01_07<-NULL
df$MafI3Absolviert<-df$AM01_08
df$AM01_08<-NULL
df$GTIAbsolviert<-df$AM01_09
df$AM01_09<-NULL
df$PSAbsolviert<-df$AM01_17
df$AM01_17<-NULL
df$DBSAbsolviert<-df$AM01_14
df$AM01_14<-NULL
df$TI1Absolviert<-df$AM01_10
df$AM01_10<-NULL
df$TI2Absolviert<-df$AM01_11
df$AM01_11<-NULL
df$TI3Absolviert<-df$AM01_12
df$AM01_12<-NULL
df$TI4Absolviert<-df$AM01_13
df$AM01_13<-NULL
df$AWSAbsolviert<-df$AM01_15
df$AM01_15<-NULL
df$SWTAbsolviert<-df$AM01_16
df$AM01_16<-NULL
df$ALP1KlausurNachklausur<-factor(df$AM02_01, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_01<-NULL
df$ALP2KlausurNachklausur<-factor(df$AM02_02, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_02<-NULL
df$ALP3KlausurNachklausur<-factor(df$AM02_03, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_03<-NULL
df$ALP4KlausurNachklausur<-factor(df$AM02_04, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_04<-NULL
df$ALP5KlausurNachklausur<-factor(df$AM02_05, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_05<-NULL
df$MafI1KlausurNachklausur<-factor(df$AM02_06, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_06<-NULL
df$MafI2KlausurNachklausur<-factor(df$AM02_07, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_07<-NULL
df$MafI3KlausurNachklausur<-factor(df$AM02_08, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_08<-NULL
df$GTIKlausurNachklausur<-factor(df$AM02_09, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_09<-NULL
df$TI1KlausurNachklausur<-factor(df$AM02_10, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_10<-NULL
df$TI2KlausurNachklausur<-factor(df$AM02_11, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_11<-NULL
df$TI3KlausurNachklausur<-factor(df$AM02_12, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_12<-NULL
df$TI4KlausurNachklausur<-factor(df$AM02_13, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_13<-NULL
df$DBSKlausurNachklausur<-factor(df$AM02_14, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_14<-NULL
df$AWSKlausurNachklausur<-factor(df$AM02_15, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_15<-NULL
df$SWTKlausurNachklausur<-factor(df$AM02_16, levels=c(1, 2, 3), labels=c("Klausur", "Nachklausur", "nicht bestanden"))
df$AM02_16<-NULL
df$ALP1Fachsemester<-factor(df$AM04_01, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_01<-NULL
df$ALP2Fachsemester<-factor(df$AM04_02, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_02<-NULL
df$ALP3Fachsemester<-factor(df$AM04_03, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_03<-NULL
df$ALP4Fachsemester<-factor(df$AM04_04, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_04<-NULL
df$ALP5Fachsemester<-factor(df$AM04_05, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_05<-NULL
df$MafI1Fachsemester<-factor(df$AM04_06, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_06<-NULL
df$MafI2Fachsemester<-factor(df$AM04_07, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_07<-NULL
df$MafI3Fachsemester<-factor(df$AM04_08, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_08<-NULL
df$GTIFachsemester<-factor(df$AM04_09, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_09<-NULL
df$TI1Fachsemester<-factor(df$AM04_10, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_10<-NULL
df$TI2Fachsemester<-factor(df$AM04_11, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_11<-NULL
df$TI3Fachsemester<-factor(df$AM04_12, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_12<-NULL
df$TI4Fachsemester<-factor(df$AM04_13, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_13<-NULL
df$DBSFachsemester<-factor(df$AM04_14, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_14<-NULL
df$AWSFachsemester<-factor(df$AM04_15, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_15<-NULL
df$SWTFachsemester<-factor(df$AM04_16, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_16<-NULL
df$PSFachsemester<-factor(df$AM04_17, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_17<-NULL
df$SWPFachsemester<-factor(df$AM04_18, levels=c(1, 2, 3, 4), labels=c("nicht bestanden", "1", "2", "3 oder mehr"))
df$AM04_18<-NULL
df$ALP1Beschäftigung<-as.numeric(df$AM05_01)
df$AM05_01<-NULL
df$ALP2Beschäftigung<-as.numeric(df$AM05_02)
df$AM05_02<-NULL
df$ALP3Beschäftigung<-as.numeric(df$AM05_03)
df$AM05_03<-NULL
df$ALP4Beschäftigung<-as.numeric(df$AM05_04)
df$AM05_04<-NULL
df$ALP5Beschäftigung<-as.numeric(df$AM05_05)
df$AM05_05<-NULL
df$SWPBeschäftigung<-as.numeric(df$AM05_06)
df$AM05_06<-NULL
df$MafI1Beschäftigung<-as.numeric(df$AM05_07)
df$AM05_07<-NULL
df$MafI2Beschäftigung<-as.numeric(df$AM05_08)
df$AM05_08<-NULL
df$MafI3Beschäftigung<-as.numeric(df$AM05_09)
df$AM05_09<-NULL
df$GTIBeschäftigung<-as.numeric(df$AM05_10)
df$AM05_10<-NULL
df$PSBeschäftigung<-as.numeric(df$AM05_11)
df$AM05_11<-NULL
df$DBSBeschäftigung<-as.numeric(df$AM05_12)
df$AM05_12<-NULL
df$TI1Beschäftigung<-as.numeric(df$AM05_13)
df$AM05_13<-NULL
df$TI2Beschäftigung<-as.numeric(df$AM05_14)
df$AM05_14<-NULL
df$TI3Beschäftigung<-as.numeric(df$AM05_15)
df$AM05_15<-NULL
df$TI4Beschäftigung<-as.numeric(df$AM05_16)
df$AM05_16<-NULL
df$AWSBeschäftigung<-as.numeric(df$AM05_17)
df$AM05_17<-NULL
df$SWTBeschäftigung<-as.numeric(df$AM05_18)
df$AM05_18<-NULL
df$Alter<-as.factor(df$PD01_01)
df$PD01_01<-NULL
df$Geschlecht<-factor(df$PD02, levels=c(1, 2, 3), labels=c("maennlich", "weiblich", "anderes"))
df$PD02<-NULL
df$Familienstand<-factor(df$PD03, levels=c(1, 2, 3), labels=c("Single", "verheiratet", "geschieden"))
df$PD03<-NULL
df$Kinder<-factor(df$PD04, levels=c(1, 2, 3, 4), labels=c("0", "1", "2", "3 oder mehr"))
df$PD04<-NULL
df$StaatsangehörigkeitDeutsch<-factor(df$PD05_01, levels=c(1, 2), labels=c("Nein", "Ja"))
df$PD05_01<-NULL
df$StaatsangehörigkeitAndere<-factor(df$PD05_02, levels=c(1, 2), labels=c("Nein", "Ja"))
df$PD05_02<-NULL
df$Wohnsituation<-factor(df$PD07, levels=c(1, 2, 3, 4, 6), labels=c("alleine", "mit Eltern", "mit Partner", "in einer Wohngemeinschaft", "andere Wohnsituation"))
df$PD07<-NULL
df$Einnahmen<-factor(df$PD08, levels=c(1, 2, 3, 4), labels=c("bis zu 400", "bis zu 800", "bis zu 1200", "mehr als 1200"))
df$PD08<-NULL
df$ALP1GesamterWorkload<-as.numeric(df$M113_01, levels=c(), labels=c())
df$M113_01<-NULL
df$ALP1Schwierigkeit<-factor(df$M106_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M106_01<-NULL
df$ALP1Interesse<-factor(df$M105_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M105_01<-NULL
df$ALP1Vorkenntnisse<-factor(df$M107_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M107_01<-NULL
df$ALP1VBesucht<-factor(df$M114_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M114_01<-NULL
df$ALP1UBearbeitet<-factor(df$M115_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M115_01<-NULL
df$ALP1WorkloadDiff<-factor(df$M117_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M117_01<-NULL
df$ALP2GesamterWorkload<-as.numeric(df$M213_01, levels=c(), labels=c())
df$M213_01<-NULL
df$ALP2Schwierigkeit<-factor(df$M206_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M206_01<-NULL
df$ALP2Interesse<-factor(df$M205_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M205_01<-NULL
df$ALP2Vorkenntnisse<-factor(df$M207_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M207_01<-NULL
df$ALP2VBesucht<-factor(df$M214_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M214_01<-NULL
df$ALP2UBearbeitet<-factor(df$M215_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M215_01<-NULL
df$ALP2WorkloadDiff<-factor(df$M217_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M217_01<-NULL
df$ALP3GesamterWorkload<-as.numeric(df$M313_01, levels=c(), labels=c())
df$M313_01<-NULL
df$ALP3Schwierigkeit<-factor(df$M306_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M306_01<-NULL
df$ALP3Interesse<-factor(df$M305_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M305_01<-NULL
df$ALP3Vorkenntnisse<-factor(df$M307_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M307_01<-NULL
df$ALP3VBesucht<-factor(df$M314_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M314_01<-NULL
df$ALP3UBearbeitet<-factor(df$M315_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M315_01<-NULL
df$ALP3WorkloadDiff<-factor(df$M317_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M317_01<-NULL
df$ALP4GesamterWorkload<-as.numeric(df$M413_01, levels=c(), labels=c())
df$M413_01<-NULL
df$ALP4Schwierigkeit<-factor(df$M406_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M406_01<-NULL
df$ALP4Interesse<-factor(df$M405_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M405_01<-NULL
df$ALP4Vorkenntnisse<-factor(df$M407_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M407_01<-NULL
df$ALP4VBesucht<-factor(df$M414_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M414_01<-NULL
df$ALP4UBearbeitet<-factor(df$M415_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M415_01<-NULL
df$ALP4WorkloadDiff<-factor(df$M417_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M417_01<-NULL
df$ALP5GesamterWorkload<-as.numeric(df$M513_01, levels=c(), labels=c())
df$M513_01<-NULL
df$ALP5Schwierigkeit<-factor(df$M506_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M506_01<-NULL
df$ALP5Interesse<-factor(df$M505_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M505_01<-NULL
df$ALP5Vorkenntnisse<-factor(df$M507_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M507_01<-NULL
df$ALP5VBesucht<-factor(df$M514_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M514_01<-NULL
df$ALP5UBearbeitet<-factor(df$M515_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M515_01<-NULL
df$ALP5WorkloadDiff<-factor(df$M517_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M517_01<-NULL
df$SWPGesamterWorkload<-as.numeric(df$M613_01, levels=c(), labels=c())
df$M613_01<-NULL
df$SWPSchwierigkeit<-factor(df$M606_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M606_01<-NULL
df$SWPInteresse<-factor(df$M605_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M605_01<-NULL
df$SWPVorkenntnisse<-factor(df$M607_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M607_01<-NULL
df$SWPPBearbeitet<-factor(df$M615_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M615_01<-NULL
df$SWPWorkloadDiff<-factor(df$M617_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M617_01<-NULL
df$Gebiet<-factor(df$M618, levels=c(1, 2, 3, 4), labels=c("Angewandte Informatik", "Praktische Informatik", "Technische Informatik", "Theoretische Informatik"))
df$M618<-NULL
df$MafI1GesamterWorkload<-as.numeric(df$M713_01, levels=c(), labels=c())
df$M713_01<-NULL
df$MafI1Schwierigkeit<-factor(df$M706_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M706_01<-NULL
df$MafI1Interesse<-factor(df$M705_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M705_01<-NULL
df$MafI1Vorkenntnisse<-factor(df$M707_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M707_01<-NULL
df$MafI1VBesucht<-factor(df$M714_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M714_01<-NULL
df$MafI1UBearbeitet<-factor(df$M715_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M715_01<-NULL
df$MafI1WorkloadDiff<-factor(df$M717_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M717_01<-NULL
df$MafI2GesamterWorkload<-as.numeric(df$M813_01, levels=c(), labels=c())
df$M813_01<-NULL
df$MafI2Schwierigkeit<-factor(df$M806_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M806_01<-NULL
df$MafI2Interesse<-factor(df$M805_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M805_01<-NULL
df$MafI2Vorkenntnisse<-factor(df$M807_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M807_01<-NULL
df$MafI2VBesucht<-factor(df$M814_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M814_01<-NULL
df$MafI2UBearbeitet<-factor(df$M815_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M815_01<-NULL
df$MafI2WorkloadDiff<-factor(df$M817_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M817_01<-NULL
df$MafI3GesamterWorkload<-as.numeric(df$M913_01, levels=c(), labels=c())
df$M913_01<-NULL
df$MafI3Schwierigkeit<-factor(df$M906_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$M906_01<-NULL
df$MafI3Interesse<-factor(df$M905_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M905_01<-NULL
df$MafI3Vorkenntnisse<-factor(df$M907_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$M907_01<-NULL
df$MafI3VBesucht<-factor(df$M914_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M914_01<-NULL
df$MafI3UBearbeitet<-factor(df$M915_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$M915_01<-NULL
df$MafI3WorkloadDiff<-factor(df$M917_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$M917_01<-NULL
df$GTIGesamterWorkload<-as.numeric(df$N113_01, levels=c(), labels=c())
df$N113_01<-NULL
df$GTISchwierigkeit<-factor(df$N106_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N106_01<-NULL
df$GTIInteresse<-factor(df$N105_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N105_01<-NULL
df$GTIVorkenntnisse<-factor(df$N107_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N107_01<-NULL
df$GTIVBesucht<-factor(df$N114_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N114_01<-NULL
df$GTIUBearbeitet<-factor(df$N115_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N115_01<-NULL
df$GTIWorkloadDiff<-factor(df$N117_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N117_01<-NULL
df$PSGesamterWorkload<-as.numeric(df$N213_01, levels=c(), labels=c())
df$N213_01<-NULL
df$PSSchwierigkeit<-factor(df$N206_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N206_01<-NULL
df$PSInteresse<-factor(df$N205_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N205_01<-NULL
df$PSVorkenntnisse<-factor(df$N207_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N207_01<-NULL
df$PSPSBearbeitet<-factor(df$N214_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N214_01<-NULL
df$PSWorkloadDiff<-factor(df$N217_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N217_01<-NULL
df$Gebiet<-factor(df$N218, levels=c(1, 2, 3, 4), labels=c("Angewandte Informatik", "Praktische Informatik", "Technische Informatik", "Theoretische Informatik"))
df$N218<-NULL
df$DBSGesamterWorkload<-as.numeric(df$N313_01, levels=c(), labels=c())
df$N313_01<-NULL
df$DBSSchwierigkeit<-factor(df$N306_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N306_01<-NULL
df$DBSInteresse<-factor(df$N305_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N305_01<-NULL
df$DBSVorkenntnisse<-factor(df$N307_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N307_01<-NULL
df$DBSVBesucht<-factor(df$N314_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N314_01<-NULL
df$DBSUBearbeitet<-factor(df$N315_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N315_01<-NULL
df$DBSPBearbeitet<-factor(df$N318_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -1), labels=c("0", "10", "20", "30", "40", "50", "60", "70", "80", "90", "100", "Es gab kein Projekt"))
df$N318_01<-NULL
df$DBSWorkloadDiff<-factor(df$N317_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N317_01<-NULL
df$TI1GesamterWorkload<-as.numeric(df$N413_01, levels=c(), labels=c())
df$N413_01<-NULL
df$TI1Schwierigkeit<-factor(df$N406_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N406_01<-NULL
df$TI1Interesse<-factor(df$N405_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N405_01<-NULL
df$TI1Vorkenntnisse<-factor(df$N407_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N407_01<-NULL
df$TI1VBesucht<-factor(df$N414_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N414_01<-NULL
df$TI1UBearbeitet<-factor(df$N415_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N415_01<-NULL
df$TI1WorkloadDiff<-factor(df$N417_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N417_01<-NULL
df$TI2GesamterWorkload<-as.numeric(df$N513_01, levels=c(), labels=c())
df$N513_01<-NULL
df$TI2Schwierigkeit<-factor(df$N506_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N506_01<-NULL
df$TI2Interesse<-factor(df$N505_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N505_01<-NULL
df$TI2Vorkenntnisse<-factor(df$N507_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N507_01<-NULL
df$TI2VBesucht<-factor(df$N514_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N514_01<-NULL
df$TI2UBearbeitet<-factor(df$N515_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N515_01<-NULL
df$TI2WorkloadDiff<-factor(df$N517_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N517_01<-NULL
df$TI3GesamterWorkload<-as.numeric(df$N613_01, levels=c(), labels=c())
df$N613_01<-NULL
df$TI3Schwierigkeit<-factor(df$N606_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N606_01<-NULL
df$TI3Interesse<-factor(df$N605_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N605_01<-NULL
df$TI3Vorkenntnisse<-factor(df$N607_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N607_01<-NULL
df$TI3VBesucht<-factor(df$N614_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N614_01<-NULL
df$TI3UBearbeitet<-factor(df$N615_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N615_01<-NULL
df$TI3WorkloadDiff<-factor(df$N617_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N617_01<-NULL
df$TI4GesamterWorkload<-as.numeric(df$N901_01, levels=c(), labels=c())
df$N901_01<-NULL
df$TI4Schwierigkeit<-factor(df$N902_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N902_01<-NULL
df$TI4Interesse<-factor(df$N903_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N903_01<-NULL
df$TI4Vorkenntnisse<-factor(df$N904_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N904_01<-NULL
df$TI4PBearbeitet<-factor(df$N905_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N905_01<-NULL
df$TI4WorkloadDiff<-factor(df$N906_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N906_01<-NULL
df$AWSGesamterWorkload<-as.numeric(df$N713_01, levels=c(), labels=c())
df$N713_01<-NULL
df$AWSSchwierigkeit<-factor(df$N706_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N706_01<-NULL
df$AWSInteresse<-factor(df$N705_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N705_01<-NULL
df$AWSVorkenntnisse<-factor(df$N707_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N707_01<-NULL
df$AWSVBesucht<-factor(df$N714_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N714_01<-NULL
df$AWSUBearbeitet<-factor(df$N715_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N715_01<-NULL
df$AWSWorkloadDiff<-factor(df$N717_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N717_01<-NULL
df$SWTGesamterWorkload<-as.numeric(df$N813_01, levels=c(), labels=c())
df$N813_01<-NULL
df$SWTSchwierigkeit<-factor(df$N806_01, levels=c(1, 2, 3, 4, 5, 6), labels=c("1", "2", "3", "4", "5", "6"))
df$N806_01<-NULL
df$SWTInteresse<-factor(df$N805_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N805_01<-NULL
df$SWTVorkenntnisse<-factor(df$N807_01, levels=c(1, 2, 3, 4, 5, 6, 7), labels=c("1", "2", "3", "4", "5", "6", "7"))
df$N807_01<-NULL
df$SWTVBesucht<-factor(df$N814_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N814_01<-NULL
df$SWTUBearbeitet<-factor(df$N815_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), labels=c("10", "20", "30", "40", "50", "60", "70", "80", "90", "100"))
df$N815_01<-NULL
df$SWTWorkloadDiff<-factor(df$N817_01, levels=c(1, 2, 3, 4, 5, 6, 7, 8, 9, -1), labels=c("-100%", "-75%", "-50%", "-25%", "0", "+25%", "+50%", "+75%", "+100%", "nicht zutreffend"))
df$N817_01<-NULL
df$ALP1WorkloadU<-as.numeric(df$K101_01, levels=c(), labels=c())
df$K101_01<-NULL
df$ALP1WorkloadK<-as.numeric(df$K102_01, levels=c(), labels=c())
df$K102_01<-NULL
df$ALP1WorkloadV<-as.numeric(df$K103_01, levels=c(), labels=c())
df$K103_01<-NULL
df$ALP1Ereignis<-factor(df$K105, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K105<-NULL
df$ALP2WorkloadU<-as.numeric(df$K201_01, levels=c(), labels=c())
df$K201_01<-NULL
df$ALP2WorkloadK<-as.numeric(df$K202_01, levels=c(), labels=c())
df$K202_01<-NULL
df$ALP2WorkloadV<-as.numeric(df$K203_01, levels=c(), labels=c())
df$K203_01<-NULL
df$ALP2Ereignis<-factor(df$K205, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K205<-NULL
df$ALP3WorkloadU<-as.numeric(df$K301_01, levels=c(), labels=c())
df$K301_01<-NULL
df$ALP3WorkloadK<-as.numeric(df$K302_01, levels=c(), labels=c())
df$K302_01<-NULL
df$ALP3WorkloadV<-as.numeric(df$K303_01, levels=c(), labels=c())
df$K303_01<-NULL
df$ALP3Ereignis<-factor(df$K305, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K305<-NULL
df$ALP4WorkloadU<-as.numeric(df$K401_01, levels=c(), labels=c())
df$K401_01<-NULL
df$ALP4WorkloadK<-as.numeric(df$K402_01, levels=c(), labels=c())
df$K402_01<-NULL
df$ALP4WorkloadV<-as.numeric(df$K403_01, levels=c(), labels=c())
df$K403_01<-NULL
df$ALP4Ereignis<-factor(df$K405, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K405<-NULL
df$ALP5WorkloadU<-as.numeric(df$K501_01, levels=c(), labels=c())
df$K501_01<-NULL
df$ALP5WorkloadK<-as.numeric(df$K502_01, levels=c(), labels=c())
df$K502_01<-NULL
df$ALP5WorkloadV<-as.numeric(df$K503_01, levels=c(), labels=c())
df$K503_01<-NULL
df$ALP5Ereignis<-factor(df$K505, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K505<-NULL
df$SWPWorkloadKomm<-as.numeric(df$K601_01, levels=c(), labels=c())
df$K601_01<-NULL
df$SWPWorkloadProgrammieren<-as.numeric(df$K603_01, levels=c(), labels=c())
df$K603_01<-NULL
df$SWPEreignis<-factor(df$K605, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K605<-NULL
df$MafI1WorkloadU<-as.numeric(df$K701_01, levels=c(), labels=c())
df$K701_01<-NULL
df$MafI1WorkloadK<-as.numeric(df$K702_01, levels=c(), labels=c())
df$K702_01<-NULL
df$MafI1WorkloadV<-as.numeric(df$K703_01, levels=c(), labels=c())
df$K703_01<-NULL
df$MafI1Ereignis<-factor(df$K705, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K705<-NULL
df$MafI2WorkloadU<-as.numeric(df$K801_01, levels=c(), labels=c())
df$K801_01<-NULL
df$MafI2WorkloadK<-as.numeric(df$K802_01, levels=c(), labels=c())
df$K802_01<-NULL
df$MafI2WorkloadV<-as.numeric(df$K803_01, levels=c(), labels=c())
df$K803_01<-NULL
df$MafI2Ereignis<-factor(df$K805, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K805<-NULL
df$MafI3WorkloadU<-as.numeric(df$K901_01, levels=c(), labels=c())
df$K901_01<-NULL
df$MafI3WorkloadK<-as.numeric(df$K902_01, levels=c(), labels=c())
df$K902_01<-NULL
df$MafI3WorkloadV<-as.numeric(df$K903_01, levels=c(), labels=c())
df$K903_01<-NULL
df$MafI3Ereignis<-factor(df$K905, levels=c(1, 2), labels=c("Ja", "Nein"))
df$K905<-NULL
df$GTIWorkloadU<-as.numeric(df$L101_01, levels=c(), labels=c())
df$L101_01<-NULL
df$GTIWorkloadK<-as.numeric(df$L102_01, levels=c(), labels=c())
df$L102_01<-NULL
df$GTIWorkloadV<-as.numeric(df$L103_01, levels=c(), labels=c())
df$L103_01<-NULL
df$GTIEreignis<-factor(df$L105, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L105<-NULL
df$PSWorkloadVortrag<-as.numeric(df$L201_01, levels=c(), labels=c())
df$L201_01<-NULL
df$PSEreignis<-factor(df$L205, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L205<-NULL
df$DBSWorkloadU<-as.numeric(df$L301_01, levels=c(), labels=c())
df$L301_01<-NULL
df$DBSWorkloadProjekt<-as.numeric(df$L307_01, levels=c(), labels=c())
df$L307_01<-NULL
df$DBSWorkloadK<-as.numeric(df$L302_01, levels=c(), labels=c())
df$L302_01<-NULL
df$DBSWorkloadV<-as.numeric(df$L303_01, levels=c(), labels=c())
df$L303_01<-NULL
df$DBSEreignis<-factor(df$L305, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L305<-NULL
df$TI1WorkloadU<-as.numeric(df$L401_01, levels=c(), labels=c())
df$L401_01<-NULL
df$TI1WorkloadK<-as.numeric(df$L402_01, levels=c(), labels=c())
df$L402_01<-NULL
df$TI1WorkloadV<-as.numeric(df$L403_01, levels=c(), labels=c())
df$L403_01<-NULL
df$TI1Ereignis<-factor(df$L405, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L405<-NULL
df$TI2WorkloadU<-as.numeric(df$L501_01, levels=c(), labels=c())
df$L501_01<-NULL
df$TI2WorkloadK<-as.numeric(df$L502_01, levels=c(), labels=c())
df$L502_01<-NULL
df$TI2WorkloadV<-as.numeric(df$L503_01, levels=c(), labels=c())
df$L503_01<-NULL
df$TI2Ereignis<-factor(df$L505, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L505<-NULL
df$TI3WorkloadU<-as.numeric(df$L601_01, levels=c(), labels=c())
df$L601_01<-NULL
df$TI3WorkloadK<-as.numeric(df$L602_01, levels=c(), labels=c())
df$L602_01<-NULL
df$TI3WorkloadV<-as.numeric(df$L603_01, levels=c(), labels=c())
df$L603_01<-NULL
df$TI3Ereignis<-factor(df$L605, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L605<-NULL
df$TI4WorkloadProgrammieren<-as.numeric(df$L701_01, levels=c(), labels=c())
df$L701_01<-NULL
df$TI4WorkloadDoku<-as.numeric(df$L703_01, levels=c(), labels=c())
df$L703_01<-NULL
df$TI4Ereignis<-factor(df$L705, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L705<-NULL
df$AWSWorkloadU<-as.numeric(df$L801_01, levels=c(), labels=c())
df$L801_01<-NULL
df$AWSWorkloadK<-as.numeric(df$L802_01, levels=c(), labels=c())
df$L802_01<-NULL
df$AWSWorkloadV<-as.numeric(df$L803_01, levels=c(), labels=c())
df$L803_01<-NULL
df$AWSEreignis<-factor(df$L805, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L805<-NULL
df$SWTWorkloadU<-as.numeric(df$L901_01, levels=c(), labels=c())
df$L901_01<-NULL
df$SWTWorkloadK<-as.numeric(df$L902_01, levels=c(), labels=c())
df$L902_01<-NULL
df$SWTWorkloadV<-as.numeric(df$L903_01, levels=c(), labels=c())
df$L903_01<-NULL
df$SWTEreignis<-factor(df$L905, levels=c(1, 2), labels=c("Ja", "Nein"))
df$L905<-NULL
## ------------ ##
## ---- compute total workload ---- ##
for(m in modules) {
if(m == "SWP") {
df$SWPWorkloadCalc<-df$SWPWorkloadKomm + df$SWPWorkloadProgrammieren
df$SWPWorkloadCalcSc<-df$SWPWorkloadKomm + df$SWPWorkloadProgrammieren * as.numeric(df$SWPPBearbeitet) / 100
}
else if(m == "PS") {
df$PSWorkloadCalc<-df$PSWorkloadVortrag / wps[[which(modules==m)]]
df$PSWorkloadCalcSc<-df$PSWorkloadVortrag * as.numeric(df$PSPSBearbeitet) / (wps[[which(modules==m)]] * 100)
}
else if(m == "DBS") {
df$DBSWorkloadCalc<-df$DBSWorkloadV + df$DBSWorkloadK / wps[[which(modules==m)]] + df$DBSWorkloadU + df$DBSWorkloadProjekt / wps[[which(modules==m)]]
df$DBSWorkloadCalcSc<-df$DBSWorkloadV * as.numeric(df$DBSVBesucht) / 100 + df$DBSWorkloadK / wps[[which(modules==m)]] + df$DBSWorkloadU * as.numeric(df$DBSUBearbeitet) / 100 + df$DBSWorkloadProjekt * as.numeric(df$DBSPBearbeitet) / (wps[[which(modules==m)]] * 100)
}
else if(m == "TI4") {
df$TI4WorkloadCalc<-df$TI4WorkloadProgrammieren + df$TI4WorkloadDoku
df$TI4WorkloadCalcSc<-(df$TI4WorkloadProgrammieren + df$TI4WorkloadDoku) * as.numeric(df$TI4PBearbeitet) / 100
}
else {
df[paste(m, "WorkloadCalc", sep="")]<-df[paste(m, "WorkloadV", sep="")] + df[paste(m, "WorkloadK", sep="")] / wps[[which(modules==m)]] + df[paste(m, "WorkloadU", sep="")]
df[paste(m, "WorkloadCalcSc", sep="")]<-df[paste(m, "WorkloadV", sep="")] * as.numeric(df[[paste(m, "VBesucht", sep="")]]) / 100 + df[paste(m, "WorkloadK", sep="")] / wps[[which(modules==m)]] + df[paste(m, "WorkloadU", sep="")] * as.numeric(df[[paste(m, "UBearbeitet", sep="")]]) / 100
}
}
## ------------ ##
## ---- compute reliabilty of workload in [-1,1]. "0" means reliability is high, ##
## "-1"/"1" means overall workload was very low/high compared to the computed ##
## workload ---- ##
for(m in modules)
df[paste(m, "WorkloadReliability", sep="")]<-as.numeric(sapply(df[paste(m, "GesamterWorkload", sep="")] / df[paste(m, "WorkloadCalc", sep="")], function(x) ifelse(is.na(x), NA, ifelse(x<1, x-1, 1-1/x))))
## ------------ ##
## ---- remove outsiders and scale workload per lp ---- ##
for(m in 1:length(modules))
for(i in 1:length(df[[1]])) {
v<-df[[paste(modules[m], "GesamterWorkload", sep="")]][i] * wps[[m]] / lps[m]
ifelse((!is.na(df[[paste(modules[m], "WorkloadReliability", sep="")]][i]) && abs(df[[paste(modules[m], "WorkloadReliability", sep="")]][i])>0.5)
|| is.na(df[[paste(modules[m], "GesamterWorkload", sep="")]][i])
|| v >= dpupperbound || v < 0,
df[[paste(modules[m], "WLperLP", sep="")]][i]<-NA,
df[[paste(modules[m], "WLperLP", sep="")]][i]<-v)
v<-df[[paste(modules[m], "WorkloadCalc", sep="")]][i] * wps[[m]] / lps[m]
ifelse((!is.na(df[[paste(modules[m], "WorkloadReliability", sep="")]][i]) && abs(df[[paste(modules[m], "WorkloadReliability", sep="")]][i])>0.5)
|| is.na(df[[paste(modules[m], "WorkloadCalc", sep="")]][i])
|| v >= dpupperbound || v < 0,
df[[paste(modules[m], "WLperLPCalc", sep="")]][i]<-NA,
df[[paste(modules[m], "WLperLPCalc", sep="")]][i]<-v)
v<-df[[paste(modules[m], "WorkloadCalcSc", sep="")]][i] * wps[[m]] / lps[m]
ifelse((!is.na(df[[paste(modules[m], "WorkloadReliability", sep="")]][i]) && abs(df[[paste(modules[m], "WorkloadReliability", sep="")]][i])>0.5)
|| is.na(df[[paste(modules[m], "WorkloadCalcSc", sep="")]][i])
|| v >= dpupperbound || v < 0,
df[[paste(modules[m], "WLperLPCalcSc", sep="")]][i]<-NA,
df[[paste(modules[m], "WLperLPCalcSc", sep="")]][i]<-v)
}
## ------------ ##
## ---- define functions ---- ##
# get unification of columns with given suffix for all modules
getUnionForAllModules<-function(name) {
r<-NULL
for(m in modules)
r<-c(r, df[[paste(m, name, sep="")]])
return(r)
}
# get subset of df with all modules for a given suffix
getSubsetForAllModules<-function(name) {
l<-NULL
for(m in modules)
l<-c(l, paste(m, name, sep=""))
r<-df[, c(l)]
names(r)<-modules
return(r)
}
# add count of non-NAs to columnnames
countToName<-function(frame) {
r<-NULL
for(n in names(frame))
r<-c(r, paste(n, " (", length(which(!is.na(frame[[n]]))), ")", sep=""))
names(frame)<-r
return(frame)
}
# add count of levels to levelnames
countToLevel<-function(column) {
r<-NULL
for(n in levels(column))
r<-c(r, paste(n, " (", length(which(column==n)), ")", sep=""))
return(factor(column, levels=levels(column), labels=r))
}
# list available plots
plots<-function() {
writeLines("\navailable plots (call ectsplot.plot(name) to draw):\n-----------------------")
for(p in ectsplots)
writeLines(paste(p@name, ": ", p@description, "\n", sep=""))
writeLines("-----------------------\n")
}
# write plots to file
writePlots<-function() {
if(!file.exists("plots"))
dir.create("plots")
for(p in ectsplots) {
print(paste("writing: ", p@name, sep=""))
png(paste("plots", .Platform$file.sep, p@name, ".png", sep=""), 600, 600)
p@f()
dev.off()
}
writeLines("done.")
}
## ------------ ##
## ---- print instructions ---- ##
writeLines("
---- Instructions: ----
df: provides access to a dataframe wich contains the whole data
modules: provides a list of all modulenames in case You want to iterate over them (for instance with for(m in modules){...})
getUnionForAllModules(name): returns the unification of all columns where a modulename is followed by \"name\". For instance getUnionForAllModules(\"GesamterWorkload\") returns the data of <X>GesamterWorkload for every module X in a single vector.
getSubsetForAllModules(name): returns a subset of df with all columns where a modulename is followed by \"name\". For instance getSubsetForAllModules(\"GesamterWorkload\") returns a dataframe with every \"GesamterWorkload\"-column of df.
plots(): show a list of implemented plots. To plot a plot p write ectsplot.plot(p)
writePlots(): write every implemented plot to a file
If You want to start playing around with the data, try:
barplot(colMeans(getSubsetForAllModules(\"GesamterWorkload\"), na.rm=TRUE), las=2)
plot(getUnionForAllModules(\"WorkloadReliability\"))
summary(df)
plots()
-----------------------
")
## ------------ ##
|
{"hexsha": "a02b00a666ff10887e84e3f9ffbfe163b5747a59", "size": 40139, "ext": "r", "lang": "R", "max_stars_repo_path": "survey/start.r", "max_stars_repo_name": "xconnect/fub.msc.empirical-evaluation", "max_stars_repo_head_hexsha": "7a995bfc8df23e43ed03b334d6c8bcb186129d78", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "survey/start.r", "max_issues_repo_name": "xconnect/fub.msc.empirical-evaluation", "max_issues_repo_head_hexsha": "7a995bfc8df23e43ed03b334d6c8bcb186129d78", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "survey/start.r", "max_forks_repo_name": "xconnect/fub.msc.empirical-evaluation", "max_forks_repo_head_hexsha": "7a995bfc8df23e43ed03b334d6c8bcb186129d78", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.5263157895, "max_line_length": 287, "alphanum_fraction": 0.6285657341, "num_tokens": 18227}
|
import numpy as np
import os
import pandas as pd
from load_paths import load_box_paths
from datetime import date, timedelta, datetime
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
def load_sim_data(exp_name, region_suffix ='_All', input_wdir=None, fname='trajectoriesDat.csv',
input_sim_output_path =None, column_list=None, add_incidence=True) :
input_wdir = input_wdir or wdir
sim_output_path_base = os.path.join(input_wdir, 'simulation_output', exp_name)
sim_output_path = input_sim_output_path or sim_output_path_base
df = pd.read_csv(os.path.join(sim_output_path, fname), usecols=column_list) ## engine='python'
df = df.dropna()
try:
first_day = datetime.strptime(df['startdate'].unique()[0], '%Y-%m-%d')
except:
first_day = datetime.strptime(df['startdate'].unique()[0], '%m/%d/%Y')
df['date'] = df['time'].apply(lambda x: first_day + timedelta(days=int(x)))
df['date'] = pd.to_datetime(df['date']).dt.date
if region_suffix !=None :
df.columns = df.columns.str.replace(region_suffix, '')
if add_incidence:
if 'recovered' in df.columns:
df['infected_cumul'] = df['infected'] + df['recovered'] + df['deaths']
df = calculate_incidence(df)
else:
df = calculate_incidence(df, trimmed =True)
return df
def load_sim_data_age(exp_name,channel, age_suffix ='_All', input_wdir=None,fname='trajectoriesDat.csv', input_sim_output_path =None) :
input_wdir = input_wdir or wdir
sim_output_path_base = os.path.join(input_wdir, 'simulation_output', exp_name)
sim_output_path = input_sim_output_path or sim_output_path_base
column_list = ['scen_num', 'time', 'startdate']
for grp in ageGroup_list:
column_list.append(channel + str(grp))
df = pd.read_csv(os.path.join(sim_output_path, fname), usecols=column_list)
df = df.dropna()
first_day = datetime.strptime(df['startdate'].unique()[0], '%Y-%m-%d')
df['date'] = df['time'].apply(lambda x: first_day + timedelta(days=int(x)))
df['date'] = pd.to_datetime(df['date']).dt.date
df.columns = df.columns.str.replace(age_suffix, '')
return df
def merge_county_covidregions(df_x, key_x='region', key_y='County'):
""" Add covidregions (new_restore_regions from covidregion_population_by_county.csv)
to a file that only includes counties. Country names are changes to lowercase before the merge.
Keeps all rows from df_x and only those that match from df_y (left join).
"""
df_y = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'EMS Population','covidregion_population_by_county.csv'))
df_x[key_x] = df_x[key_x] .str.lower()
df_y[key_y] = df_y[key_y] .str.lower()
df = pd.merge(how='left', left=df_x, left_on=key_x, right=df_y, right_on=key_y)
return df
def get_latest_LLfiledate(file_path, split_string ='_jg_' , file_pattern='aggregated_covidregion.csv'):
files= os.listdir(file_path)
filedates = [x.split(split_string)[0] for x in files if file_pattern in x]
latest_filedate = max([int(x) for x in filedates])
return latest_filedate
def get_vents(crit_det_array):
vent_frac_global = 0.660
return crit_det_array * vent_frac_global
def get_latest_LLfiledate(file_path, split_string ='_jg_' , file_pattern='aggregated_covidregion.csv'):
files= os.listdir(file_path)
filedates = [x.split(split_string)[0] for x in files if file_pattern in x]
latest_filedate = max([int(x) for x in filedates])
return latest_filedate
def loadEMSregions(regionname) :
regions = {'northcentral' : ['EMS_1', 'EMS_2'],
'northeast' : ['EMS_7', 'EMS_8', 'EMS_9', 'EMS_10', 'EMS_11'],
'central' : ['EMS_3', 'EMS_6'],
'southern': ['EMS_4', 'EMS_5']
}
if regionname != "all" :
out = regions[regionname]
elif regionname == "all" :
out = regions
return out
def count_new(df, curr_ch) :
ch_list = list(df[curr_ch].values)
diff = [0] + [ch_list[x] - ch_list[x - 1] for x in range(1, len(df))]
return diff
def CI_5(x) :
return np.percentile(x, 5)
def CI_95(x) :
return np.percentile(x, 95)
def CI_25(x) :
return np.percentile(x, 25)
def CI_75(x) :
return np.percentile(x, 75)
def CI_2pt5(x) :
return np.percentile(x, 2.5)
def CI_97pt5(x) :
return np.percentile(x, 97.5)
def CI_50(x) :
return np.percentile(x, 50)
def load_ref_df(ems_nr):
ref_df_emr = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'emresource_by_region.csv'))
ref_df_emr['suspected_and_confirmed_covid_icu'] = ref_df_emr['suspected_covid_icu'] + ref_df_emr['confirmed_covid_icu']
data_channel_names_emr = ['confirmed_covid_deaths_prev_24h', 'confirmed_covid_icu', 'covid_non_icu','suspected_covid_icu','suspected_and_confirmed_covid_icu']
ref_df_emr = ref_df_emr.groupby(['date_of_extract','covid_region'])[data_channel_names_emr].agg(np.sum).reset_index()
ref_df_emr['date'] = pd.to_datetime(ref_df_emr['date_of_extract'])
LL_file_date = get_latest_LLfiledate(file_path=os.path.join(datapath, 'covid_IDPH', 'Cleaned Data'))
ref_df_ll = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Cleaned Data', f'{LL_file_date}_jg_aggregated_covidregion.csv'))
ref_df_ll['date'] = pd.to_datetime(ref_df_ll['date'])
ref_df_cli = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'CLI_admissions.csv'))
ref_df_cli = merge_county_covidregions(df_x=ref_df_cli, key_x='region', key_y='County')
ref_df_cli = ref_df_cli.groupby(['date','new_restore_region'])['inpatient'].agg(np.sum).reset_index()
ref_df_cli = ref_df_cli.rename(columns={'new_restore_region': 'covid_region'})
ref_df_cli['date'] = pd.to_datetime(ref_df_cli['date'])
ref_df_public = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'IDPH_public_county.csv'))
ref_df_public = merge_county_covidregions(df_x=ref_df_public, key_x='county', key_y='County')
ref_df_public = ref_df_public.groupby(['test_date','new_restore_region'])['confirmed_cases'].agg(np.sum).reset_index()
ref_df_public = ref_df_public.rename(columns={'new_restore_region': 'covid_region'})
ref_df_public['test_date'] = pd.to_datetime(ref_df_public['test_date'])
ref_df_public.rename(columns={"test_date" : "date"}, inplace=True)
if not isinstance(ems_nr, list):
if ems_nr > 0:
ref_df_emr = ref_df_emr[ref_df_emr['covid_region'] == ems_nr]
ref_df_ll = ref_df_ll[ref_df_ll['covid_region'] == ems_nr]
ref_df_cli = ref_df_cli[ref_df_cli['covid_region'] == ems_nr]
ref_df_public = ref_df_public[ref_df_public['covid_region'] == ems_nr]
if ems_nr == 0:
ref_df_emr['covid_region'] = 0
ref_df_ll['covid_region'] = 0
ref_df_cli['covid_region'] = 0
ref_df_public['covid_region'] = 0
ref_df_emr = ref_df_emr.groupby('date').agg(np.sum).reset_index()
ref_df_ll = ref_df_ll.groupby('date').agg(np.sum).reset_index()
ref_df_cli = ref_df_cli.groupby('date').agg(np.sum).reset_index()
ref_df_public = ref_df_public.groupby('date').agg(np.sum).reset_index()
ref_df_public = ref_df_public.sort_values('date')
ref_df_public['new_confirmed_cases'] = count_new(ref_df_public, 'confirmed_cases')
if isinstance(ems_nr, list):
inc_df = pd.DataFrame()
for region, df in ref_df_public.groupby('covid_region'):
df = df.sort_values('date')
sdf = pd.DataFrame({'date': df['date'], 'new_confirmed_cases': count_new(df, 'confirmed_cases')})
sdf['covid_region'] = region
inc_df = pd.concat([inc_df, sdf])
ref_df_public_ext = pd.merge(left=ref_df_public, right=inc_df, on=['date', 'covid_region'])
ref_df_public = ref_df_public_ext
merge_keys = ['date', 'covid_region']
ref_df = pd.merge(how='outer', left=ref_df_ll, right=ref_df_emr, on=merge_keys)
ref_df = pd.merge(how='outer', left=ref_df, right=ref_df_cli, on=merge_keys)
ref_df = pd.merge(how='outer', left=ref_df, right=ref_df_public, on=merge_keys)
if isinstance(ems_nr, list):
ref_df[ref_df['covid_region'].isin(ems_nr)]
ref_df = ref_df.sort_values(['covid_region', 'date'])
ref_df['date'] = pd.to_datetime(ref_df['date']).dt.date
ref_df = ref_df[(ref_df['date'] > pd.to_datetime(date(2020,1,1))) &
(ref_df['date'] <= pd.to_datetime(date.today()))]
return ref_df
def calculate_prevalence(df, ems=None):
if ems is None:
ems = ['EMS-%d' % x for x in range(1, 12)]
for ems_num in ems:
df[f'N_{ems_num}'] = df[f'N_{str(ems_num.replace("-","_"))}'] - df[f'deaths_{ems_num}']
df[f'IFR_{ems_num}'] = df[f'deaths_{ems_num}'] / (df[f'recovered_{ems_num}'] + df[f'deaths_{ems_num}'])
df[f'IFR_t_{ems_num}'] = df[f'new_deaths_{ems_num}'] / (df[f'new_recovered_{ems_num}'] + df[f'new_deaths_{ems_num}'])
df[f'prevalence_{ems_num}'] = df[f'infected_{ems_num}'] / df[f'N_{ems_num}']
df[f'seroprevalence_current_{ems_num}'] = df[f'recovered_{ems_num}'] / df[f'N_{ems_num}']
df[f'seroprevalence_{ems_num}'] = df.groupby(['scen_num', 'sample_num'])[f'seroprevalence_current_{ems_num}'].transform('shift', 14)
if f'infected_det_{ems_num}' in df.columns:
df[f'N_det_{ems_num}'] = df[f'N_{str(ems_num.replace("-", "_"))}'] - df[f'deaths_det_{ems_num}']
df[f'IFR_det_{ems_num}'] = df[f'deaths_det_{ems_num}'] / (df[f'recovered_det_{ems_num}'] + df[f'deaths_det_{ems_num}'])
df[f'IFR_det_t_{ems_num}'] = df[f'new_deaths_det_{ems_num}'] / (df[f'new_recovered_det_{ems_num}'] + df[f'new_deaths_det_{ems_num}'])
df[f'prevalence_det_{ems_num}'] = df[f'infected_det_{ems_num}'] / df[f'N_det_{ems_num}']
df[f'seroprevalence_current_det_{ems_num}'] = df[f'recovered_det_{ems_num}'] / df[f'N_det_{ems_num}']
df[f'seroprevalence_det_{ems_num}'] = df.groupby(['scen_num', 'sample_num'])[f'seroprevalence_current_det_{ems_num}'].transform('shift', 14)
return df
def calculate_incidence(adf, output_filename=None, trimmed=False) :
inc_df = pd.DataFrame()
for (run, samp, scen), df in adf.groupby(['run_num','sample_num', 'scen_num']) :
if trimmed == False:
sdf = pd.DataFrame({'time' : df['time'],
'new_exposures' : [-1*x for x in count_new(df, 'susceptible')],
'new_infected': count_new(df, 'infected_cumul'),
#'new_infected_detected': count_new(df, 'infected_det_cumul'),
'new_asymptomatic' : count_new(df, 'asymp_cumul'),
'new_asymptomatic_detected' : count_new(df, 'asymp_det_cumul'),
'new_symptomatic_mild' : count_new(df, 'symp_mild_cumul'),
'new_symptomatic_severe': count_new(df, 'symp_severe_cumul'),
'new_detected_symptomatic_mild': count_new(df, 'symp_mild_det_cumul'),
'new_detected_symptomatic_severe': count_new(df, 'symp_severe_det_cumul'),
'new_detected_hospitalized' : count_new(df, 'hosp_det_cumul'),
'new_hospitalized' : count_new(df, 'hosp_cumul'),
'new_detected' : count_new(df, 'detected_cumul'),
'new_critical' : count_new(df, 'crit_cumul'),
'new_detected_critical' : count_new(df, 'crit_det_cumul'),
'new_detected_deaths' : count_new(df, 'death_det_cumul'),
'new_deaths' : count_new(df, 'deaths')
})
if trimmed == True:
sdf = pd.DataFrame({'time': df['time'],
'new_detected_hospitalized' : count_new(df, 'hosp_det_cumul'),
'new_hospitalized' : count_new(df, 'hosp_cumul'),
'new_critical' : count_new(df, 'crit_cumul'),
'new_detected_critical' : count_new(df, 'crit_det_cumul'),
'new_detected_deaths' : count_new(df, 'death_det_cumul'),
'new_deaths' : count_new(df, 'deaths')
})
sdf['run_num'] = run
sdf['sample_num'] = samp
sdf['scen_num'] = scen
inc_df = pd.concat([inc_df, sdf])
adf = pd.merge(left=adf, right=inc_df, on=['run_num','sample_num', 'scen_num', 'time'])
if output_filename :
adf.to_csv(output_filename, index=False)
return adf
def calculate_incidence_by_age(adf, age_group, output_filename=None) :
inc_df = pd.DataFrame()
for (run, samp, scen), df in adf.groupby(['run_num','sample_num', 'scen_num']) :
sdf = pd.DataFrame( { 'time' : df['time'],
'new_exposures_%s' % age_group : [-1*x for x in count_new(df, 'susceptible_%s' % age_group)],
'new_asymptomatic_%s' % age_group : count_new(df, 'asymp_cumul_%s' % age_group),
'new_asymptomatic_detected_%s' % age_group : count_new(df, 'asymp_det_cumul_%s' % age_group),
'new_symptomatic_mild_%s' % age_group : count_new(df, 'symp_mild_cumul_%s' % age_group),
'new_symptomatic_severe_%s' % age_group: count_new(df, 'symp_severe_cumul_%s' % age_group),
'new_detected_symptomatic_mild_%s' % age_group: count_new(df, 'symp_mild_det_cumul_%s' % age_group),
'new_detected_symptomatic_severe_%s' % age_group: count_new(df, 'symp_severe_det_cumul_%s' % age_group),
'new_detected_hospitalized_%s' % age_group : count_new(df, 'hosp_det_cumul_%s' % age_group),
'new_hospitalized_%s' % age_group : count_new(df, 'hosp_cumul_%s' % age_group),
'new_detected_%s' % age_group : count_new(df, 'detected_cumul_%s' % age_group),
'new_critical_%s' % age_group : count_new(df, 'crit_cumul_%s' % age_group),
'new_detected_critical_%s' % age_group : count_new(df, 'crit_det_cumul_%s' % age_group),
'new_detected_deaths_%s' % age_group : count_new(df, 'death_det_cumul_%s' % age_group),
'new_deaths_%s' % age_group : count_new(df, 'deaths_%s' % age_group)
})
sdf['run_num'] = run
sdf['sample_num'] = samp
sdf['scen_num'] = scen
inc_df = pd.concat([inc_df, sdf])
adf = pd.merge(left=adf, right=inc_df, on=['run_num', 'sample_num', 'scen_num', 'time'])
if output_filename :
adf.to_csv(output_filename, index=False)
return adf
def load_capacity(ems):
### note, names need to match, simulations and capacity data already include outputs for all illinois
file_path = os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'hospital_capacity_thresholds')
files = os.listdir(file_path)
files = [name for name in files if not 'extra_thresholds' in name]
filedates = [item.replace('capacity_weekday_average_', '') for item in files]
filedates = [item.replace('.csv', '') for item in filedates]
latest_filedate = max([int(x) for x in filedates])
fname = 'capacity_weekday_average_' + str(latest_filedate) + '.csv'
ems_fname = os.path.join(datapath, 'covid_IDPH/Corona virus reports/hospital_capacity_thresholds/', fname)
df = pd.read_csv(ems_fname)
df = df[df['overflow_threshold_percent'] == 1]
df['ems'] = df['geography_modeled']
df['ems'] = df['geography_modeled'].replace("covidregion_", "", regex=True)
df = df[['ems', 'resource_type', 'avg_resource_available']]
df = df.drop_duplicates()
df = df.pivot(index='ems', columns='resource_type', values='avg_resource_available')
df.index.name = 'ems'
df.reset_index(inplace=True)
if ems == 'illinois' or ems == 0:
df['ems'] = 'illinois'
df = df.groupby('ems')[['hb_availforcovid', 'icu_availforcovid', 'vent_availforcovid']].agg(np.sum).reset_index()
else :
df = df[df['ems'] == str(ems)]
capacity = {'hosp_det': int(df['hb_availforcovid']),
'crit_det': int(df['icu_availforcovid']),
'ventilators': int(df['vent_availforcovid'])}
return capacity
def civis_colnames(reverse=False) :
colnames = { "ems": "geography_modeled",
"infected_median": "cases_median",
"infected_95CI_lower": "cases_lower",
"infected_95CI_upper": "cases_upper",
"new_infected_median": "cases_new_median",
"new_infected_95CI_lower": "cases_new_lower",
"new_infected_95CI_upper": "cases_new_upper",
"new_symptomatic_median": "symptomatic_new_median",
"new_symptomatic_95CI_lower": "symptomatic_new_lower",
"new_symptomatic_95CI_upper": "symptomatic_new_upper",
"new_deaths_median": "deaths_median",
"new_deaths_95CI_lower": "deaths_lower",
"new_deaths_95CI_upper": "deaths_upper",
"new_detected_deaths_median": "deaths_det_median",
"new_detected_deaths_95CI_lower": "deaths_det_lower",
"new_detected_deaths_95CI_upper": "deaths_det_upper",
"hospitalized_median": "hosp_bed_median",
"hospitalized_95CI_lower": "hosp_bed_lower",
"hospitalized_95CI_upper": "hosp_bed_upper",
"hosp_det_median": "hosp_det_bed_median",
"hosp_det_95CI_lower": "hosp_det_bed_lower",
"hosp_det_95CI_upper": "hosp_det_bed_upper",
"critical_median": "icu_median",
"critical_95CI_lower": "icu_lower",
"critical_95CI_upper": "icu_upper",
"crit_det_median": "icu_det_median",
"crit_det_95CI_lower": "icu_det_lower",
"crit_det_95CI_upper": "icu_det_upper",
"ventilators_median": "vent_median",
"ventilators_95CI_lower": "vent_lower",
"ventilators_95CI_upper": "vent_upper",
"recovered_median": "recovered_median",
"recovered_95CI_lower": "recovered_lower",
"recovered_95CI_upper": "recovered_upper"}
if reverse == True : colnames = {value: key for key, value in col_names.items()}
return(colnames)
def get_parameter_names(include_new=True):
sample_params_core = ['time_to_infectious',
'time_to_symptoms',
'time_to_hospitalization',
'time_to_critical',
'time_to_death',
'time_to_detection',
'time_to_detection_As',
'time_to_detection_Sym',
'time_to_detection_Sys',
'recovery_time_asymp',
'recovery_time_mild',
'recovery_time_hosp',
'recovery_time_crit',
'fraction_symptomatic',
'fraction_severe',
'fraction_critical',
'cfr',
'reduced_inf_of_det_cases',
'd_Sys',
'd_As',
'd_P']
IL_specific_param = ['d_Sys_incr1',
'd_Sys_incr2',
'd_Sys_incr3',
'd_Sys_incr4',
'd_Sys_incr5',
'd_Sys_incr6',
'd_Sys_incr7',
'fraction_critical_incr1',
'fraction_critical_incr2',
'fraction_critical_incr3',
'detection_time_1',
'detection_time_2',
'detection_time_3',
'detection_time_4',
'detection_time_5',
'detection_time_6',
'detection_time_7',
'crit_time_1',
'crit_time_2',
'crit_time_3',
'd_Sym_change_time_1',
'd_Sym_change_time_2',
'd_Sym_change_time_3',
'd_Sym_change_time_4',
'd_Sym_change_time_5',
'cfr_time_1',
'cfr_time_2']
IL_locale_param_stem = ['ki_multiplier_3a','ki_multiplier_3b','ki_multiplier_3c',
'ki_multiplier_4','ki_multiplier_5','ki_multiplier_6',
'ki_multiplier_7','ki_multiplier_8','ki_multiplier_9',
'ki_multiplier_10','ki_multiplier_11','ki_multiplier_12',
'd_Sym','d_Sym_change1','d_Sym_change2','d_Sym_change3',
'd_Sym_change4','d_Sym_change5','Ki','time_infection_import']
sample_params = sample_params_core + IL_specific_param
if include_new:
sample_params_new = ['reduced_infectious_As', 'time_to_loose_immunity', 'fraction_lost_immunity']
sample_params = sample_params.append(sample_params_new)
return sample_params, sample_params_core, IL_specific_param, IL_locale_param_stem
|
{"hexsha": "c528aadc832c18f25d5d556c376e3bc7fae80ddc", "size": 22051, "ext": "py", "lang": "Python", "max_stars_repo_path": "processing_helpers.py", "max_stars_repo_name": "reeserich/covid-chicago", "max_stars_repo_head_hexsha": "88bdc556aebdd7e443e4756e7421160d230f5a01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "processing_helpers.py", "max_issues_repo_name": "reeserich/covid-chicago", "max_issues_repo_head_hexsha": "88bdc556aebdd7e443e4756e7421160d230f5a01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "processing_helpers.py", "max_forks_repo_name": "reeserich/covid-chicago", "max_forks_repo_head_hexsha": "88bdc556aebdd7e443e4756e7421160d230f5a01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.0022222222, "max_line_length": 162, "alphanum_fraction": 0.6014693211, "include": true, "reason": "import numpy", "num_tokens": 5676}
|
import os
import yaml
import glob
import numpy as np
from hexrd import imageseries
from PySide2.QtGui import QCursor
from PySide2.QtCore import QObject, Qt, QPersistentModelIndex, QThreadPool, Signal
from PySide2.QtWidgets import QTableWidgetItem, QFileDialog, QMenu, QMessageBox
from hexrd.ui.async_worker import AsyncWorker
from hexrd.ui.cal_progress_dialog import CalProgressDialog
from hexrd.ui.hexrd_config import HexrdConfig
from hexrd.ui.image_file_manager import ImageFileManager
from hexrd.ui.ui_loader import UiLoader
"""
This panel is in charge of loading file(s) for the experiment. It is built
up in a few steps, and defines how they should be loaded, transformed, and
attempts to apply intelligent templates to avoid manual entry of everything.
The final act is to click load data and bring the data set in.
"""
class LoadPanel(QObject):
# Emitted when new images are loaded
new_images_loaded = Signal()
def __init__(self, parent=None):
super(LoadPanel, self).__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('load_panel.ui', parent)
self.ims = HexrdConfig().imageseries_dict
self.parent_dir = HexrdConfig().images_dir
self.files = []
self.omega_min = []
self.omega_max = []
self.dark_file = None
self.idx = 0
self.ext = ''
self.setup_gui()
self.setup_connections()
# Setup GUI
def setup_gui(self):
if not HexrdConfig().load_panel_state:
HexrdConfig().load_panel_state = {'agg': 0, 'trans': 0, 'dark': 0}
self.state = HexrdConfig().load_panel_state
self.ui.aggregation.setCurrentIndex(self.state['agg'])
self.ui.transform.setCurrentIndex(self.state['trans'])
self.ui.darkMode.setCurrentIndex(self.state['dark'])
if 'dark_file' in self.state:
self.dark_file = self.state['dark_file']
self.dark_mode_changed()
if not self.parent_dir:
self.ui.img_directory.setText('No directory set')
else:
self.ui.img_directory.setText(os.path.dirname(self.parent_dir))
self.detectors_changed()
self.ui.file_options.resizeColumnsToContents()
def setup_connections(self):
self.ui.image_folder.clicked.connect(self.select_folder)
self.ui.image_files.clicked.connect(self.select_images)
self.ui.selectDark.clicked.connect(self.select_dark_img)
self.ui.read.clicked.connect(self.read_data)
self.ui.darkMode.currentIndexChanged.connect(self.dark_mode_changed)
self.ui.detector.currentIndexChanged.connect(self.create_table)
self.ui.aggregation.currentIndexChanged.connect(self.agg_changed)
self.ui.transform.currentIndexChanged.connect(self.trans_changed)
self.ui.file_options.customContextMenuRequested.connect(
self.contextMenuEvent)
self.ui.file_options.cellChanged.connect(self.omega_data_changed)
HexrdConfig().detectors_changed.connect(self.detectors_changed)
# Handle GUI changes
def dark_mode_changed(self):
self.state['dark'] = self.ui.darkMode.currentIndex()
if self.state['dark'] == 4:
self.ui.selectDark.setEnabled(True)
self.ui.dark_file.setText(
self.dark_file if self.dark_file else '(No File Selected)')
self.enable_read()
else:
self.ui.selectDark.setEnabled(False)
self.ui.dark_file.setText(
'(Using ' + str(self.ui.darkMode.currentText()) + ')')
self.enable_read()
if 'dark_file' in self.state:
del self.state['dark_file']
def detectors_changed(self):
self.ui.detector.clear()
self.ui.detector.addItems(HexrdConfig().get_detector_names())
def agg_changed(self):
self.state['agg'] = self.ui.aggregation.currentIndex()
def trans_changed(self):
self.state['trans'] = self.ui.transform.currentIndex()
def dir_changed(self):
self.ui.img_directory.setText(os.path.dirname(self.parent_dir))
def select_folder(self, new_dir=None):
# This expects to define the root image folder.
if not new_dir:
caption = HexrdConfig().images_dirtion = 'Select directory for images'
new_dir = QFileDialog.getExistingDirectory(
self.ui, caption, dir=self.parent_dir)
# Only update if a new directory is selected
if new_dir and new_dir != self.parent_dir:
self.ui.image_files.setEnabled(True)
HexrdConfig().set_images_dir(new_dir)
self.parent_dir = new_dir
self.dir_changed()
def select_dark_img(self):
# This takes one image to use for dark subtraction.
caption = HexrdConfig().images_dirtion = 'Select image file'
selected_file, selected_filter = QFileDialog.getOpenFileNames(
self.ui, caption, dir=self.parent_dir)
if selected_file:
self.dark_file = selected_file[0]
self.state['dark_file'] = self.dark_file
self.dark_mode_changed()
self.enable_read()
def select_images(self):
# This takes one or more images for a single detector.
caption = HexrdConfig().images_dirtion = 'Select image file(s)'
selected_files, selected_filter = QFileDialog.getOpenFileNames(
self.ui, caption, dir=self.parent_dir)
if selected_files:
if self.parent_dir is None:
self.select_folder(os.path.dirname(selected_files[0]))
self.reset_data()
self.load_image_data(selected_files)
self.create_table()
self.enable_read()
def reset_data(self):
self.directories = []
self.empty_frames = 0
self.total_frames = []
self.omega_min = []
self.omega_max = []
self.delta = []
self.files = []
def load_image_data(self, selected_files):
self.ext = os.path.splitext(selected_files[0])[1]
has_omega = False
# Select the path if the file(s) are HDF5
if (ImageFileManager().is_hdf5(self.ext) and not
ImageFileManager().path_exists(selected_files[0])):
if ImageFileManager().path_prompt(selected_files[0]) is not None:
return
fnames = []
tmp_ims = []
for img in selected_files:
f = os.path.split(img)[1]
name = os.path.splitext(f)[0]
if not self.ui.subdirectories.isChecked():
name = name.rsplit('_', 1)[0]
if self.ext != '.yml':
tmp_ims.append(ImageFileManager().open_file(img))
fnames.append(name)
self.find_images(fnames)
if not self.files:
return
if self.ext == '.yml':
for yf in self.yml_files[0]:
ims = ImageFileManager().open_file(yf)
self.total_frames.append(len(ims))
for f in self.files[0]:
with open(f, 'r') as raw_file:
data = yaml.safe_load(raw_file)
if 'ostart' in data['meta'] or 'omega' in data['meta']:
self.get_yaml_omega_data(data)
else:
self.omega_min = [''] * len(self.yml_files[0])
self.omega_max = [''] * len(self.yml_files[0])
self.delta = [''] * len(self.yml_files[0])
self.empty_frames = data['options']['empty-frames']
else:
for ims in tmp_ims:
has_omega = 'omega' in ims.metadata
self.total_frames.append(len(ims))
if has_omega:
self.get_omega_data(ims)
else:
self.omega_min.append('')
self.omega_max.append('')
self.delta.append('')
def get_omega_data(self, ims):
minimum = ims.metadata['omega'][0][0]
size = len(ims.metadata['omega']) - 1
maximum = ims.metadata['omega'][size][1]
self.omega_min.append(minimum)
self.omega_max.append(maximum)
self.delta.append((maximum - minimum)/len(ims))
def get_yaml_omega_data(self, data):
if 'ostart' in data['meta']:
self.omega_min.append(data['meta']['ostart'])
self.omega_max.append(data['meta']['ostop'])
wedge = (data['meta']['ostop'] - data['meta']['ostart']) / self.total_frames[0]
self.delta.append(wedge)
else:
if isinstance(data['meta']['omega'], str):
words = data['meta']['omega'].split()
fname = os.path.join(self.parent_dir, words[2])
nparray = np.load(fname)
else:
nparray = data['meta']['omega']
for idx, vals in enumerate(nparray):
self.omega_min.append(vals[0])
self.omega_max.append(vals[1])
self.delta.append((vals[1] - vals[0]) / self.total_frames[idx])
def find_images(self, fnames):
if (self.ui.subdirectories.isChecked()):
self.find_directories()
self.match_dirs_images(fnames)
else:
self.match_images(fnames)
if self.files and self.ext == '.yml':
self.get_yml_files()
def find_directories(self):
# Find all detector directories
num_det = len(HexrdConfig().get_detector_names())
dirs = []
for sub_dir in os.scandir(os.path.dirname(self.parent_dir)):
if (os.path.isdir(sub_dir)
and sub_dir.name in HexrdConfig().get_detector_names()):
dirs.append(sub_dir.path)
# Show error if expected detector directories are not found
if len(dirs) != num_det:
dir_names = []
if len(dirs) > 0:
for path in dirs:
dir_names.append(os.path.basename(path))
diff = list(
set(HexrdConfig().get_detector_names()) - set(dir_names))
msg = (
'ERROR - No directory found for the following detectors: \n'
+ str(diff)[1:-1])
QMessageBox.warning(None, 'HEXRD', msg)
return
self.directories = sorted(dirs)[:num_det]
def match_images(self, fnames):
file_list = []
dets = []
for item in os.scandir(self.parent_dir):
file_name = os.path.splitext(item.name)[0]
instance = file_name.rsplit('_', 1)[0]
if instance == file_name:
continue
det = file_name.rsplit('_', 1)[1]
if os.path.isfile(item) and instance in fnames:
file_list.append(item.path)
if det and det not in dets:
dets.append(det)
self.files.append([])
for f in file_list:
det = f.rsplit('.', 1)[0].rsplit('_', 1)[1]
if det in dets:
i = dets.index(det)
self.files[i].append(f)
# Display error if equivalent files are not found for ea. detector
files_per_det = all(len(self.files[0]) == len(elem) for elem in self.files)
num_det = len(HexrdConfig().get_detector_names())
if len(self.files) != num_det or not files_per_det:
msg = ('ERROR - There must be the same number of files for each detector.')
QMessageBox.warning(None, 'HEXRD', msg)
self.files = []
return
self.files = sorted(self.files)[:len(self.files)]
def match_dirs_images(self, fnames):
# Find the images with the same name for the remaining detectors
for i in range(len(self.directories)):
self.files.append([])
for item in os.scandir(self.directories[i]):
fname = os.path.splitext(item.name)[0]
if os.path.isfile(item) and fname in fnames:
self.files[i].append(item.path)
# Display error if equivalent files are not found for ea. detector
if i > 0 and len(self.files[i]) != len(fnames):
diff = list(set(self.files[i]) - set(self.files[i-1]))
msg = ('ERROR - No equivalent file(s) found for '
+ str(diff)[1:-1] + ' in ' + self.directories[i])
QMessageBox.warning(None, 'HEXRD', msg)
self.files = []
break
def get_yml_files(self):
self.yml_files = []
for det in self.files:
files = []
for f in det:
with open(f, 'r') as yml_file:
data = yaml.safe_load(yml_file)['image-files']
raw_images = data['files'].split()
for raw_image in raw_images:
files.extend(glob.glob(
os.path.join(data['directory'], raw_image)))
self.yml_files.append(files)
def enable_read(self):
if (self.ext == '.tiff'
or '' not in self.omega_min and '' not in self.omega_max):
if self.state['dark'] == 4 and self.dark_file is not None:
self.ui.read.setEnabled(len(self.files))
return
elif self.state['dark'] != 4 and len(self.files):
self.ui.read.setEnabled(True)
return
self.ui.read.setEnabled(False)
# Handle table setup and changes
def create_table(self):
# Create the table if files have successfully been selected
if not len(self.files):
return
if self.ext == '.yml':
table_files = self.yml_files
else:
table_files = self.files
self.idx = self.ui.detector.currentIndex()
self.ui.file_options.setRowCount(
len(table_files[self.idx]))
# Create the rows
for row in range(self.ui.file_options.rowCount()):
for column in range(self.ui.file_options.columnCount()):
item = QTableWidgetItem()
item.setTextAlignment(Qt.AlignCenter)
self.ui.file_options.setItem(row, column, item)
# Populate the rows
for i in range(self.ui.file_options.rowCount()):
curr = table_files[self.idx][i]
self.ui.file_options.item(i, 0).setText(os.path.split(curr)[1])
self.ui.file_options.item(i, 1).setText(str(self.empty_frames))
self.ui.file_options.item(i, 2).setText(str(self.total_frames[i]))
self.ui.file_options.item(i, 3).setText(str(self.omega_min[i]))
self.ui.file_options.item(i, 4).setText(str(self.omega_max[i]))
self.ui.file_options.item(i, 5).setText(str(self.delta[i]))
# Set tooltips
self.ui.file_options.item(i, 0).setToolTip(curr)
self.ui.file_options.item(i, 3).setToolTip('Minimum must be set')
self.ui.file_options.item(i, 4).setToolTip(
'Must set either maximum or delta')
self.ui.file_options.item(i, 5).setToolTip(
'Must set either maximum or delta')
# Don't allow editing of file name or total frames
self.ui.file_options.item(i, 0).setFlags(Qt.ItemIsEnabled)
self.ui.file_options.item(i, 2).setFlags(Qt.ItemIsEnabled)
# If raw data offset can only be changed in YAML file
if self.ext == '.yml':
self.ui.file_options.item(i, 1).setFlags(Qt.ItemIsEnabled)
self.ui.file_options.resizeColumnsToContents()
def contextMenuEvent(self, event):
# Allow user to delete selected file(s)
menu = QMenu(self.ui)
remove = menu.addAction('Remove Selected Files')
action = menu.exec_(QCursor.pos())
# Re-selects the current row if context menu is called on disabled cell
i = self.ui.file_options.indexAt(event)
self.ui.file_options.selectRow(i.row())
indices = []
if action == remove:
for index in self.ui.file_options.selectedIndexes():
indices.append(QPersistentModelIndex(index))
for idx in indices:
self.ui.file_options.removeRow(idx.row())
if self.ui.file_options.rowCount():
for i in range(len(self.files)):
self.files[i] = []
for row in range(self.ui.file_options.rowCount()):
f = self.ui.file_options.item(row, 0).text()
for i in range(len(self.files)):
self.files[i].append(self.directories[i] + f)
else:
self.directories = []
self.files = []
self.enable_read()
def omega_data_changed(self, row, column):
# Update the values for equivalent files when the data is changed
self.blockSignals(True)
curr_val = self.ui.file_options.item(row, column).text()
total_frames = self.total_frames[row] - self.empty_frames
if curr_val != '':
if column == 1:
self.empty_frames = int(curr_val)
for r in range(self.ui.file_options.rowCount()):
self.ui.file_options.item(r, column).setText(str(curr_val))
self.omega_data_changed(row, 3)
# Update delta when min or max omega are changed
elif column == 3:
self.omega_min[row] = float(curr_val)
if self.omega_max[row] or self.delta[row]:
self.omega_data_changed(row, 4)
elif column == 4:
self.omega_max[row] = float(curr_val)
if self.omega_min[row] != '':
diff = abs(self.omega_max[row] - self.omega_min[row])
delta = diff / total_frames
self.delta[row] = delta
self.ui.file_options.item(row, 5).setText(
str(round(delta, 2)))
elif column == 5:
self.delta[row] = float(curr_val)
if self.omega_min[row] != '':
diff = self.delta[row] * total_frames
maximum = self.omega_min[row] + diff
self.omega_max[row] = maximum
self.ui.file_options.item(row, 4).setText(
str(float(maximum)))
self.enable_read()
self.blockSignals(False)
# Process files
def read_data(self):
# When this is pressed read in a complete set of data for all detectors.
# Run the imageseries processing in a background thread and display a
# loading dialog
# Create threads and loading dialog
thread_pool = QThreadPool(self.parent())
progress_dialog = CalProgressDialog(self.parent())
progress_dialog.setWindowTitle('Loading Processed Imageseries')
# Start processing in background
worker = AsyncWorker(self.process_ims)
thread_pool.start(worker)
# On completion load imageseries nd close loading dialog
worker.signals.result.connect(self.finish_processing_ims)
worker.signals.finished.connect(progress_dialog.accept)
progress_dialog.exec_()
def process_ims(self):
# Open selected images as imageseries
det_names = HexrdConfig().get_detector_names()
if len(self.files[0]) > 1:
for i, det in enumerate(det_names):
if self.directories:
dirs = self.directories[i]
else:
dirs = self.parent_dir
ims = ImageFileManager().open_directory(dirs, self.files[i])
HexrdConfig().imageseries_dict[det] = ims
else:
ImageFileManager().load_images(det_names, self.files)
# Process the imageseries
self.apply_operations(HexrdConfig().imageseries_dict)
if self.state['agg']:
self.display_aggregation(HexrdConfig().imageseries_dict)
elif '' not in self.omega_min:
self.add_omega_metadata(HexrdConfig().imageseries_dict)
def finish_processing_ims(self):
# Display processed images on completion
# The setEnabled options will not be needed once the panel
# is complete - those dialogs will be removed.
self.parent().action_edit_angles.setEnabled(True)
self.parent().image_tab_widget.load_images()
self.new_images_loaded.emit()
def apply_operations(self, ims_dict):
# Apply the operations to the imageseries
for key in ims_dict.keys():
ops = []
if self.state['dark'] != 5:
if not self.empty_frames and self.state['dark'] == 1:
msg = ('ERROR: \n No empty frames set. '
+ 'No dark subtracion will be performed.')
QMessageBox.warning(None, 'HEXRD', msg)
return
else:
self.get_dark_op(ops, ims_dict[key])
if self.state['trans']:
self.get_flip_op(ops)
frames = self.get_range(ims_dict[key])
ims_dict[key] = imageseries.process.ProcessedImageSeries(
ims_dict[key], ops, frame_list=frames)
def get_dark_op(self, oplist, ims):
# Create or load the dark image if selected
if self.state['dark'] != 4:
frames = len(ims)
if self.state['dark'] == 0:
darkimg = imageseries.stats.median(ims, frames)
elif self.state['dark'] == 1:
darkimg = imageseries.stats.average(ims, self.empty_frames)
elif self.state['dark'] == 2:
darkimg = imageseries.stats.average(ims, frames)
else:
darkimg = imageseries.stats.max(ims, frames)
else:
darkimg = imageseries.stats.median(
ImageFileManager().open_file(self.dark_file))
oplist.append(('dark', darkimg))
def get_flip_op(self, oplist):
# Change the image orientation
if self.state['trans'] == 0:
return
if self.state['trans'] == 1:
key = 'v'
elif self.state['trans'] == 2:
key = 'h'
elif self.state['trans'] == 3:
key = 't'
elif self.state['trans'] == 4:
key = 'r90'
elif self.state['trans'] == 5:
key = 'r180'
else:
key = 'r270'
oplist.append(('flip', key))
def get_range(self, ims):
if self.ext == '.yml':
return range(len(ims))
else:
return range(self.empty_frames, len(ims))
def display_aggregation(self, ims_dict):
# Display aggregated image from imageseries
for key in ims_dict.keys():
if self.state['agg'] == 1:
ims_dict[key] = [imageseries.stats.max(
ims_dict[key], len(ims_dict[key]))]
elif self.state['agg'] == 2:
ims_dict[key] = [imageseries.stats.median(
ims_dict[key], len(ims_dict[key]))]
else:
ims_dict[key] = [imageseries.stats.average(
ims_dict[key], len(ims_dict[key]))]
def add_omega_metadata(self, ims_dict):
# Add on the omega metadata if there is any
files = self.yml_files if self.ext == '.yml' else self.files
for key in ims_dict.keys():
nframes = len(ims_dict[key])
omw = imageseries.omega.OmegaWedges(nframes)
for i in range(len(files[0])):
nsteps = self.total_frames[i] - self.empty_frames
start = self.omega_min[i]
stop = self.omega_max[i]
# Don't add wedges if defaults are unchanged
if not (start - stop):
return
omw.addwedge(start, stop, nsteps)
ims_dict[key].metadata['omega'] = omw.omegas
|
{"hexsha": "6499e3654ac695d909bce9dd12cc93e07f8450b3", "size": 24151, "ext": "py", "lang": "Python", "max_stars_repo_path": "hexrd/ui/load_panel.py", "max_stars_repo_name": "psavery/hexrdgui", "max_stars_repo_head_hexsha": "2a02dff4284bdddc48aa0bb17473cc651e161759", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hexrd/ui/load_panel.py", "max_issues_repo_name": "psavery/hexrdgui", "max_issues_repo_head_hexsha": "2a02dff4284bdddc48aa0bb17473cc651e161759", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hexrd/ui/load_panel.py", "max_forks_repo_name": "psavery/hexrdgui", "max_forks_repo_head_hexsha": "2a02dff4284bdddc48aa0bb17473cc651e161759", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0161550889, "max_line_length": 91, "alphanum_fraction": 0.5745517784, "include": true, "reason": "import numpy", "num_tokens": 5097}
|
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
#include "featurizer.h"
int main() {
Featurizer* feat = new Featurizer();
ifstream is("feature_index.dat", ios::binary);
boost::archive::binary_iarchive iar(is);
iar >> feat;
vector<string> features;
features.push_back("q0=a");
features.push_back("q0=b");
features.push_back("q0=c");
cout << "Total feature of : " << feat->num << endl;
for (auto i: feat->get_feature_indices(features)) {
cout << i << endl;
}
return 0;
}
|
{"hexsha": "65f491f81bf4e8d314028e088292ea1029eb27e8", "size": 734, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/serializetest.cpp", "max_stars_repo_name": "shurain/codesprint2014r1", "max_stars_repo_head_hexsha": "980b0191e9e90adc54778bdc5dbfbb41538e96f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-12-07T08:13:37.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-07T08:13:37.000Z", "max_issues_repo_path": "src/serializetest.cpp", "max_issues_repo_name": "shurain/codesprint2014r1", "max_issues_repo_head_hexsha": "980b0191e9e90adc54778bdc5dbfbb41538e96f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/serializetest.cpp", "max_forks_repo_name": "shurain/codesprint2014r1", "max_forks_repo_head_hexsha": "980b0191e9e90adc54778bdc5dbfbb41538e96f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6774193548, "max_line_length": 55, "alphanum_fraction": 0.6662125341, "num_tokens": 183}
|
// Copyright John Maddock 2008.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
# include <pch.hpp>
#ifndef BOOST_MATH_TR1_SOURCE
# define BOOST_MATH_TR1_SOURCE
#endif
#include <boost/math/tr1.hpp>
#include <boost/math/special_functions/bessel.hpp>
#include "c_policy.hpp"
extern "C" float BOOST_MATH_TR1_DECL cyl_bessel_if BOOST_PREVENT_MACRO_SUBSTITUTION(float nu, float x) BOOST_MATH_C99_THROW_SPEC
{
return c_policies::cyl_bessel_i BOOST_PREVENT_MACRO_SUBSTITUTION(nu, x);
}
|
{"hexsha": "4f4efaa2ba872131fad2d0838858dc348ce8c3b4", "size": 654, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/math/src/tr1/cyl_bessel_if.cpp", "max_stars_repo_name": "zyiacas/boost-doc-zh", "max_stars_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2015-07-12T13:04:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-30T23:23:46.000Z", "max_issues_repo_path": "libs/math/src/tr1/cyl_bessel_if.cpp", "max_issues_repo_name": "sdfict/boost-doc-zh", "max_issues_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/math/src/tr1/cyl_bessel_if.cpp", "max_forks_repo_name": "sdfict/boost-doc-zh", "max_forks_repo_head_hexsha": "689e5a3a0a4dbead1a960f7b039e3decda54aa2c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2015-12-23T01:51:57.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-25T04:58:32.000Z", "avg_line_length": 32.7, "max_line_length": 129, "alphanum_fraction": 0.7629969419, "num_tokens": 173}
|
import numpy
import pylab
import tables
import math
import matplotlib.transforms as mtransforms
pylab.rc('text', usetex=True)
# open HDF5 file
lcFh = tables.openFile("s34-rte-slab_sol.h5")
mu = lcFh.root.mu.read()
muM = mu*0.0
for i in range(mu.shape[0]):
muM[i] = -mu[mu.shape[0]-i-1]
muExtended = numpy.zeros( (2*mu.shape[0], ), numpy.float )
muExtended[0:mu.shape[0]] = muM
muExtended[mu.shape[0]:] = mu
def computeRadiance(rads, phi):
nm, nmu = rads.shape[0], rads.shape[1]
totalRad = numpy.zeros( (nmu, ), numpy.float )
for m in range(nm):
factor = 1.0
if m == 0: factor = 0.5
totalRad = totalRad + factor*rads[m,:]*math.cos(m*phi)
return totalRad
titleList = [r'$\tau=0$', r'$\tau=\tau_0/20$', r'$\tau=\tau_0/10$', \
r'$\tau=\tau_0/5$', r'\tau=\tau_0/2$', r'$\tau=3\tau_0/4$', r'$\tau=\tau_0$']
phiVals = [0.0, math.pi/2]
fileNms = ["gs-radiances-phi0.csv", "gs-radiances-phiPi2.csv"]
count = 0
for phi in phiVals:
gsDat = numpy.loadtxt(fileNms[count], delimiter=",")
gsMu = gsDat[:,0]
fig = pylab.figure(count)
fig.subplots_adjust(hspace=1.0)
radiances = numpy.zeros( (2*mu.shape[0], ), numpy.float )
for d in range(7):
up = lcFh.root._v_children["upward_radiance_%d" % d].read()
down = lcFh.root._v_children["downward_radiance_%d" % d].read()
totalUp = computeRadiance(up, phi)
totalDown = computeRadiance(down, phi)
totalUpM = totalUp*0.0
for i in range(totalUp.shape[0]):
totalUpM[i] = totalUp[totalUp.shape[0]-i-1]
radiances[0:totalUp.shape[0]] = totalUpM
radiances[totalUp.shape[0]:] = totalDown
ax = pylab.subplot(7, 1, d+1)
pylab.plot(gsMu, gsDat[:,d+1], 'ro')
pylab.plot(muExtended, radiances, '-k')
if d < 6:
ax.set_xticklabels([""]) # zap labels from X axis
ylims = ax.get_ylim()
ax.set_yticks([ylims[0], 0.5*(ylims[0]+ylims[1]), ylims[1]])
pylab.title(titleList[d])
pylab.savefig("s34-rte-slab-%s.png" % fileNms[count])
count = count + 1
|
{"hexsha": "eae2e0407983cb003278ce5e5f73d389b78f7ad4", "size": 2125, "ext": "py", "lang": "Python", "max_stars_repo_path": "sims/s34/s34-mkplots.py", "max_stars_repo_name": "ammarhakim/ammar-simjournal", "max_stars_repo_head_hexsha": "85b64ddc9556f01a4fab37977864a7d878eac637", "max_stars_repo_licenses": ["MIT", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-19T16:21:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-19T16:21:13.000Z", "max_issues_repo_path": "sims/s34/s34-mkplots.py", "max_issues_repo_name": "ammarhakim/ammar-simjournal", "max_issues_repo_head_hexsha": "85b64ddc9556f01a4fab37977864a7d878eac637", "max_issues_repo_licenses": ["MIT", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sims/s34/s34-mkplots.py", "max_forks_repo_name": "ammarhakim/ammar-simjournal", "max_forks_repo_head_hexsha": "85b64ddc9556f01a4fab37977864a7d878eac637", "max_forks_repo_licenses": ["MIT", "Unlicense"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-08T06:23:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-08T07:06:50.000Z", "avg_line_length": 28.3333333333, "max_line_length": 90, "alphanum_fraction": 0.5967058824, "include": true, "reason": "import numpy", "num_tokens": 719}
|
C * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C * *
C * copyright (c) 1999 by UCAR *
C * *
C * UNIVERSITY CORPORATION for ATMOSPHERIC RESEARCH *
C * *
C * all rights reserved *
C * *
C * FISHPACK version 4.1 *
C * *
C * A PACKAGE OF FORTRAN SUBPROGRAMS FOR THE SOLUTION OF *
C * *
C * SEPARABLE ELLIPTIC PARTIAL DIFFERENTIAL EQUATIONS *
C * *
C * BY *
C * *
C * JOHN ADAMS, PAUL SWARZTRAUBER AND ROLAND SWEET *
C * *
C * OF *
C * *
C * THE NATIONAL CENTER FOR ATMOSPHERIC RESEARCH *
C * *
C * BOULDER, COLORADO (80307) U.S.A. *
C * *
C * WHICH IS SPONSORED BY *
C * *
C * THE NATIONAL SCIENCE FOUNDATION *
C * *
C * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C
C PROGRAM TO ILLUSTRATE THE USE OF SUBROUTINE POISTG TO
C SOLVE THE EQUATION
C
C (1/COS(X))(D/DX)(COS(X)(DU/DX)) + (D/DY)(DU/DY) =
C
C 2*Y**2*(6-Y**2)*SIN(X)
C
C ON THE RECTANGLE -PI/2 .LT. X .LT. PI/2 AND
C 0 .LT. Y .LT. 1 WITH THE BOUNDARY CONDITIONS
C
C (DU/DX) (-PI/2,Y) = (DU/DX)(PI/2,Y) = 0 , 0 .LE. Y .LE. 1 (2)
C
C U(X,0) = 0 (3)
C -PI/2 .LE. X .LE. PI/2
C (DU/DY)(X,1) = 4SIN(X) (4)
C
C USING FINITE DIFFERENCES ON A STAGGERED GRID WITH
C DELTAX (= DX) = PI/40 AND DELTAY (= DY) = 1/20 .
C TO SET UP THE FINITE DIFFERENCE EQUATIONS WE DEFINE
C THE GRID POINTS
C
C X(I) = -PI/2 + (I-0.5)DX I=1,2,...,40
C
C Y(J) = (J-O.5)DY J=1,2,...,20
C
C AND LET V(I,J) BE AN APPROXIMATION TO U(X(I),Y(J)).
C NUMBERING THE GRID POINTS IN THIS FASHION GIVES THE SET
C OF UNKNOWNS AS V(I,J) FOR I=1,2,...,40 AND J=1,2,...,20.
C HENCE, IN THE PROGRAM M = 40 AND N = 20. AT THE INTERIOR
C GRID POINT (X(I),Y(J)), WE REPLACE ALL DERIVATIVES IN
C EQUATION (1) BY SECOND ORDER CENTRAL FINITE DIFFERENCES,
C MULTIPLY BY DY**2, AND COLLECT COEFFICIENTS OF V(I,J) TO
C GET THE FINITE DIFFERENCE EQUATION
C
C A(I)V(I-1,J) + B(I)V(I,J) + C(I)V(I+1,J)
C
C + V(I,J-1) - 2V(I,J) + V(I,J+1) = F(I,J) (5)
C
C WHERE S = (DY/DX)**2, AND FOR I=2,3,...,39
C
C A(I) = S*COS(X(I)-DX/2)
C
C B(I) = -S*(COS(X(I)-DX/2)+COS(X(I)+DX/2))
C
C C(I) = S*COS(X(I)+DX/2)
C
C F(I,J) = 2DY**2*Y(J)**2*(6-Y(J)**2)*SIN(X(I)) , J=1,2,...,19.
C
C TO OBTAIN EQUATIONS FOR I = 1, WE REPLACE EQUATION (2)
C BY THE SECOND ORDER APPROXIMATION
C
C (V(1,J)-V(0,J))/DX = 0
C
C AND USE THIS EQUATION TO ELIMINATE V(0,J) IN EQUATION (5)
C TO ARRIVE AT THE EQUATION
C
C B(1)V(1,J) + C(1)V(2,J) + V(1,J-1) - 2V(1,J) + V(1,J+1)
C
C = F(1,J)
C
C WHERE
C
C B(1) = -S*(COS(X(1)-DX/2)+COS(X(1)+DX/2))
C
C C(1) = -B(1)
C
C FOR COMPLETENESS, WE SET A(1) = 0.
C TO OBTAIN EQUATIONS FOR I = 40, WE REPLACE THE DERIVATIVE
C IN EQUATION (2) AT X=PI/2 IN A SIMILAR FASHION, USE THIS
C EQUATION TO ELIMINATE THE VIRTUAL UNKNOWN V(41,J) IN EQUATION
C (5) AND ARRIVE AT THE EQUATION
C
C A(40)V(39,J) + B(40)V(40,J)
C
C + V(40,J-1) - 2V(40,J) + V(40,J+1) = F(40,J)
C
C WHERE
C
C A(40) = -B(40) = -S*(COS(X(40)-DX/2)+COS(X(40)+DX/2))
C
C FOR COMPLETENESS, WE SET C(40) = 0. HENCE, IN THE
C PROGRAM MPEROD = 1.
C FOR J = 1, WE REPLACE EQUATION (3) BY THE SECOND ORDER
C APPROXIMATION
C
C (V(I,0) + V(I,1))/2 = 0
C
C TO ARRIVE AT THE CONDITION
C
C V(I,0) = -V(I,1) .
C
C FOR J = 20, WE REPLACE EQUATION (4) BY THE SECOND ORDER
C APPROXIMATION
C
C (V(I,21) - V(I,20))/DY = 4*SIN(X)
C
C AND COMBINE THIS EQUATION WITH EQUATION (5) TO ARRIVE AT
C THE EQUATION
C
C A(I)V(I-1,20) + B(I)V(I,20) + C(I)V(I+1,20)
C
C + V(I,19) - 2V(I,20) + V(I,21) = F(I,20)
C
C WHERE
C
C V(I,21) = V(I,20) AND
C
C F(I,20) = 2*DY**2*Y(J)**2*(6-Y(J)**2)*SIN(X(I)) - 4*DY*SIN(X(I))
C
C HENCE, IN THE PROGRAM NPEROD = 2 .
C THE EXACT SOLUTION TO THIS PROBLEM IS
C
C U(X,Y) = Y**4*COS(X) .
C
DIMENSION F(42,20) ,A(40) ,B(40) ,C(40) ,
1 W(600) ,X(40) ,Y(20)
C
C FROM DIMENSION STATEMENT WE GET VALUE OF IDIMF = 42. ALSO
C NOTE THAT W HAS BEEN DIMENSIONED
C 9M + 4N + M(INT(LOG2(N))) = 360 + 80 + 160 = 600 .
C
IDIMF = 42
MPEROD = 1
M = 40
PI = PIMACH(DUM)
DX = PI/FLOAT(M)
NPEROD = 2
N = 20
DY = 1./FLOAT(N)
C
C GENERATE AND STORE GRID POINTS FOR COMPUTATION.
C
DO 101 I=1,M
X(I) = -PI/2.+(FLOAT(I)-0.5)*DX
101 CONTINUE
DO 102 J=1,N
Y(J) = (FLOAT(J)-0.5)*DY
102 CONTINUE
C
C GENERATE COEFFICIENTS .
C
S = (DY/DX)**2
A(1) = 0.
B(1) = -S*COS(-PI/2.+DX)/COS(X(1))
C(1) = -B(1)
DO 103 I=2,M
A(I) = S*COS(X(I)-DX/2.)/COS(X(I))
C(I) = S*COS(X(I)+DX/2.)/COS(X(I))
B(I) = -(A(I)+C(I))
103 CONTINUE
A(40) = -B(40)
C(40) = 0.
C
C GENERATE RIGHT SIDE OF EQUATION.
C
DO 105 I=1,M
DO 104 J=1,N
F(I,J) = 2.*DY**2*Y(J)**2*(6.-Y(J)**2)*SIN(X(I))
104 CONTINUE
105 CONTINUE
DO 106 I=1,M
F(I,N) = F(I,N)-4.*DY*SIN(X(I))
106 CONTINUE
CALL POISTG (NPEROD,N,MPEROD,M,A,B,C,IDIMF,F,IERROR,W)
C
C COMPUTE DISCRETIZATION ERROR. THE EXACT SOLUTION IS
C
C U(X,Y) = Y**4*SIN(X)
C
ERR = 0.
DO 108 I=1,M
DO 107 J=1,N
T = ABS(F(I,J)-Y(J)**4*SIN(X(I)))
IF (T .GT. ERR) ERR = T
107 CONTINUE
108 CONTINUE
PRINT 1001 , IERROR,ERR,W(1)
STOP
C
1001 FORMAT (1H1,20X,25HSUBROUTINE POISTG EXAMPLE///
1 10X,46HTHE OUTPUT FROM THE NCAR CONTROL DATA 7600 WAS//
2 32X,10HIERROR = 0/
3 18X,34HDISCRETIZATION ERROR = 5.64171E-04/
4 12X,32HREQUIRED LENGTH OF W ARRAY = 560//
5 10X,32HTHE OUTPUT FROM YOUR COMPUTER IS//
6 32X,8HIERROR =,I2/18X,22HDISCRETIZATION ERROR =,E12.5/
7 12X,28HREQUIRED LENGTH OF W ARRAY =,F4.0)
C
END
|
{"hexsha": "0287df6a19803891fd9c3aa621e19c139a2d84d1", "size": 7519, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/omuse/community/qgmodel/src/fishpack4.1/test/tpoistg.f", "max_stars_repo_name": "ipelupessy/omuse", "max_stars_repo_head_hexsha": "83850925beb4b8ba6050c7fa8a1ef2371baf6fbb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-03-25T10:02:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T00:28:35.000Z", "max_issues_repo_path": "src/omuse/community/qgmodel/src/fishpack4.1/test/tpoistg.f", "max_issues_repo_name": "ipelupessy/omuse", "max_issues_repo_head_hexsha": "83850925beb4b8ba6050c7fa8a1ef2371baf6fbb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 45, "max_issues_repo_issues_event_min_datetime": "2020-03-03T16:07:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T09:01:07.000Z", "max_forks_repo_path": "src/omuse/community/qgmodel/src/fishpack4.1/test/tpoistg.f", "max_forks_repo_name": "ipelupessy/omuse", "max_forks_repo_head_hexsha": "83850925beb4b8ba6050c7fa8a1ef2371baf6fbb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-03-03T13:28:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-26T09:20:02.000Z", "avg_line_length": 34.1772727273, "max_line_length": 71, "alphanum_fraction": 0.4150817928, "num_tokens": 2680}
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
def make_plots():
'''
A dummy function.
'''
pass
def plot_detected_planet_contrasts(planet_table,wv_index,detected,flux_ratios,instrument,telescope,
show=True,save=False,ymin=1e-9,ymax=1e-4,xmin=0.,xmax=1.,alt_data=None,alt_label=""):
'''
Make a plot of the planets detected at a given wavelenth_index
Inputs:
planet_table - a Universe.planets table
wv_index - the index from the instrument.current_wvs
wavelength array to consider
detected - a boolean array of shape [n_planets,n_wvs]
that indicates whether or not a planet was detected
at a given wavelength
flux_ratios - an array of flux ratios between the planet and the star
at the given wavelength. sape [n_planets,n_wvs]
instuemnt - an instance of the psisim.instrument class
telescope - an instance of the psisim.telescope class
Keyword Arguments:
show - do you want to show the plot? Boolean
save - do you want to save the plot? Boolean
ymin,ymax,xmin,xmax - the limits on the plot
alt_data - An optional argument to pass to show a secondary set of data.
This could be e.g. detection limits, or another set of atmospheric models
alt_label - This sets the legend label for the alt_data
'''
fig,ax = plt.subplots(1,1,figsize=(7,5))
seps = np.array([planet_table_entry['AngSep'].to(u.arcsec).value for planet_table_entry in planet_table])
# import pdb; pdb.set_trace()
#Plot the non-detections
ax.scatter(seps[~detected[:,wv_index]],flux_ratios[:,wv_index][~detected[:,wv_index]],
marker='.',label="Full Sample",s=20)
# print(seps[~detected[:,wv_index]],flux_ratios[:,wv_index][~detected[:,wv_index]])
masses = np.array([planet_table_entry['PlanetMass'].to(u.earthMass).value for planet_table_entry in planet_table])
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
#Plot the detections
scat = ax.scatter(seps[detected[:,wv_index]],flux_ratios[:,wv_index][detected[:,wv_index]],marker='o',
label="Detected",c=masses[detected[:,wv_index]],cmap='gist_heat',edgecolors='k',norm=colors.LogNorm(vmin=1,vmax=1000))
fig.colorbar(scat,label=r"Planet Mass [$M_{\oplus}$]",ax=ax)
#Plot 1 and 2 lambda/d
ax.plot([instrument.current_wvs[wv_index]*1e-6/telescope.diameter*206265,instrument.current_wvs[wv_index]*1e-6/telescope.diameter*206265],
[0,1.],label=r"$\lambda/D$ at $\lambda=${:.3f}$\mu m$".format(instrument.current_wvs[wv_index]),color='k')
ax.plot([2*instrument.current_wvs[wv_index]*1e-6/telescope.diameter*206265,2*instrument.current_wvs[wv_index]*1e-6/telescope.diameter*206265],
[0,1.],'-.',label=r"$2\lambda/D$ at $\lambda=${:.3f}$\mu m$".format(instrument.current_wvs[wv_index]),color='k')
#If detection_limits is passed, then plot the 5-sigma detection limits for each source
if alt_data is not None:
ax.scatter(seps,alt_data[:,wv_index],marker='.',
label=alt_label,color='darkviolet',s=20)
for i,sep in enumerate(seps):
ax.plot([sep,sep],[flux_ratios[i,wv_index],alt_data[i,wv_index]],color='k',alpha=0.1,linewidth=1)
#Axis title
ax.set_title("Planet Detection Yield at {:.3}um".format(instrument.current_wvs[wv_index]),fontsize=18)
#Legend
legend = ax.legend(loc='upper right',fontsize=13)
legend.legendHandles[-1].set_color('orangered')
legend.legendHandles[-1].set_edgecolor('k')
#Plot setup
ax.set_ylabel("Total Intensity Flux Ratio",fontsize=16)
ax.set_xlabel("Separation ['']",fontsize=16)
# ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
ax.set_yscale('log')
ax.set_xscale('log')
#Do we show it?
if show:
plt.show()
plt.tight_layout()
#Do we save it?
if save:
plt.savefig("Detected_Planets_flux_v_sma.png",bbox_inches="tight")
#Return the figure so that the user can manipulate it more if they so please
return fig,ax
def plot_detected_planet_magnitudes(planet_table,wv_index,detected,flux_ratios,instrument,telescope,
show=True,save=False,ymin=1,ymax=30,xmin=0.,xmax=1.,alt_data=None,alt_label=""):
'''
Make a plot of the planets detected at a given wavelenth_index
Inputs:
planet_table - a Universe.planets table
wv_index - the index from the instrument.current_wvs
wavelength array to consider
detected - a boolean array of shape [n_planets,n_wvs]
that indicates whether or not a planet was detected
at a given wavelength
flux_ratios - an array of flux ratios between the planet and the star
at the given wavelength. sape [n_planets,n_wvs]
instuemnt - an instance of the psisim.instrument class
telescope - an instance of the psisim.telescope class
Keyword Arguments:
show - do you want to show the plot? Boolean
save - do you want to save the plot? Boolean
ymin,ymax,xmin,xmax - the limits on the plot
alt_data - An optional argument to pass to show a secondary set of data.
This could be e.g. detection limits, or another set of atmospheric models
alt_label - This sets the legend label for the alt_data
'''
fig,ax = plt.subplots(1,1,figsize=(7,5))
#convert flux ratios to delta_mags
dMags = -2.5*np.log10(flux_ratios[:,wv_index])
band = instrument.current_filter
if band == 'R':
bexlabel = 'CousinsR'
starlabel = 'StarRmag'
elif band == 'I':
bexlabel = 'CousinsI'
starlabel = 'StarImag'
elif band == 'J':
bexlabel = 'SPHEREJ'
starlabel = 'StarJmag'
elif band == 'H':
bexlabel = 'SPHEREH'
starlabel = 'StarHmag'
elif band == 'K':
bexlabel = 'SPHEREKs'
starlabel = 'StarKmag'
elif band == 'L':
bexlabel = 'NACOLp'
starlabel = 'StarKmag'
elif band == 'M':
bexlabel = 'NACOMp'
starlabel = 'StarKmag'
else:
raise ValueError("Band needs to be 'R', 'I', 'J', 'H', 'K', 'L', 'M'. Got {0}.".format(band))
stellar_mags = planet_table[starlabel]
stellar_mags = np.array(stellar_mags)
planet_mag = stellar_mags+dMags
# import pdb;pdb.set_trace()
seps = np.array([planet_table_entry['AngSep'].to(u.arcsec).value for planet_table_entry in planet_table])
# import pdb; pdb.set_trace()
#Plot the non-detections
ax.scatter(seps[~detected[:,wv_index]],planet_mag[:][~detected[:,wv_index]],
marker='.',label="Full Sample",s=20)
# print(seps[~detected[:,wv_index]],flux_ratios[:,wv_index][~detected[:,wv_index]])
masses = np.array([planet_table_entry['PlanetMass'].to(u.earthMass).value for planet_table_entry in planet_table])
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
#Plot the detections
scat = ax.scatter(seps[detected[:,wv_index]],planet_mag[:][detected[:,wv_index]],marker='o',
label="Detected",c=masses[detected[:,wv_index]],cmap='gist_heat',edgecolors='k',norm=colors.LogNorm(vmin=1,vmax=1000))
fig.colorbar(scat,label=r"Planet Mass [$M_{\oplus}$]",ax=ax)
import pdb; pdb.set_trace()
#Plot 1 and 2 lambda/d
ax.axvline(instrument.current_wvs[wv_index]*1e-6/telescope.diameter*206265,color='k',)
ax.axvline(2*instrument.current_wvs[wv_index]*1e-6/telescope.diameter*206265,color='k',linestyle='--')
ax.axhline(18.7+0.4,color='r',linestyle='-.',label="")
#If detection_limits is passed, then plot the 5-sigma detection limits for each source
if alt_data is not None:
ax.scatter(seps,alt_data[:,wv_index],marker='.',
label=alt_label,color='darkviolet',s=20)
for i,sep in enumerate(seps):
ax.plot([sep,sep],[flux_ratios[i,wv_index],alt_data[i,wv_index]],color='k',alpha=0.1,linewidth=1)
#Axis title
ax.set_title("Planet Detection Yield at {:.3}um".format(instrument.current_wvs[wv_index]),fontsize=18)
#Legend
legend = ax.legend(loc='upper right',fontsize=13)
legend.legendHandles[-1].set_color('orangered')
legend.legendHandles[-1].set_edgecolor('k')
#Plot setup
ax.set_ylabel(r"Planet Magnitude at {:.1f}$\mu m$".format(instrument.current_wvs[wv_index]),fontsize=16)
ax.set_xlabel("Separation ['']",fontsize=16)
# ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
# ax.set_yscale('log')
ax.set_xscale('log')
#Do we show it?
if show:
plt.show()
plt.tight_layout()
#Do we save it?
if save:
plt.savefig("Detected_Planets_flux_v_sma.png",bbox_inches="tight")
#Return the figure so that the user can manipulate it more if they so please
return fig,ax
def plot_detected_planet_mass(planet_table,detected,show=True,**kwargs):
'''
Plot a histogram of detected and non-detected planets
'''
masses = [planet_table_entry['PlanetMass'].to(u.earthMass).value for planet_table_entry in planet_table]
fig = plt.figure(figsize=(7,4))
ax1 = fig.add_subplot(111)
ax1.hist(masses[~detected],label="Non-Detections",density=True,**kwargs)
ax1.hist(masses[detected],label="Detections",density=True,**kwargs)
ax1.set_xlabel(r"Planet Masses [M$_{Earth}$]")
ax1.set_ylabel(r"Number of Planets")
ax1.set_xscale("log")
def plot_detected_planet_mass(planet_table,detected,show=True,**kwargs):
'''
Plot a histogram of detected and non-detected planets
'''
masses = [planet_table_entry['PlanetMass'].to(u.earthMass) for planet_table_entry in planet_table]
fig = plt.figure(figsize=(7,4))
ax1 = fig.add_subplot(111)
ax1.hist(masses[~detected],label="Non-Detections",density=True,**kwargs)
ax1.hist(masses[detected],label="Detections",density=True,**kwargs)
ax1.set_xlabel(r"Planet Masses [M$_{Earth}$]")
ax1.set_ylabel(r"Number of Planets")
ax1.set_xscale("log")
|
{"hexsha": "0635b561796402814ff51cd4e08da51d9c081716", "size": 10358, "ext": "py", "lang": "Python", "max_stars_repo_path": "psisim/plots.py", "max_stars_repo_name": "abgibbs/psisim", "max_stars_repo_head_hexsha": "9b0a6ac4f134cabcd2b10a03e20b2fcb58c8afe7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-06-08T01:09:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T21:36:20.000Z", "max_issues_repo_path": "psisim/plots.py", "max_issues_repo_name": "abgibbs/psisim", "max_issues_repo_head_hexsha": "9b0a6ac4f134cabcd2b10a03e20b2fcb58c8afe7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2019-06-26T20:42:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T21:52:44.000Z", "max_forks_repo_path": "psisim/plots.py", "max_forks_repo_name": "abgibbs/psisim", "max_forks_repo_head_hexsha": "9b0a6ac4f134cabcd2b10a03e20b2fcb58c8afe7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-22T21:28:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-29T22:50:08.000Z", "avg_line_length": 39.9922779923, "max_line_length": 146, "alphanum_fraction": 0.652249469, "include": true, "reason": "import numpy", "num_tokens": 2852}
|
Require Export SfLib.
Require Export HelperFunctions.
Inductive bplustree (b: nat) (X:Type) : Type :=
| bptLeaf : list (nat * X) -> bplustree b X
| bptNode : list (nat * (bplustree b X)) -> bplustree b X
.
Notation "[[ b , X | x , .. , y ]]" := (bptLeaf b X (cons x .. (cons y []) ..)) (at level 100, format
"[[ b , X | '[v ' x , .. , y ']' ]]").
Notation "{{ b , X | f , x , .. , y }}" := (bptNode b X f (cons x .. (cons y []) ..)) (at level 99, format
"{{ b , X | '[v ' '//' f , '//' x , .. , y ']' '//' }}").
Example test := bptLeaf 2 bool [(1, true), (2, false)].
Inductive appears_in_kvl {X:Type} (sk: nat) : list (nat * X) -> Prop :=
| aik_here: forall v l, appears_in_kvl sk ((sk, v)::l)
| aik_later: forall k v l, appears_in_kvl sk l -> appears_in_kvl sk ((k, v)::l).
Inductive kv_appears_in_kvl {X:Type} (sk: nat) (sv: X) : list (nat * X) -> Prop :=
| kv_aik_here: forall l, kv_appears_in_kvl sk sv ((sk, sv)::l)
| kv_aik_later: forall k v l, kv_appears_in_kvl sk sv l -> kv_appears_in_kvl sk sv ((k, v)::l).
Lemma kv_appears_in_kvl_impl_appears_in_kvl: forall (X: Type) (k: nat) (v: X) (l: list (nat * X)),
kv_appears_in_kvl k v l -> appears_in_kvl k l.
Proof.
intros.
induction H; constructor; assumption.
Qed.
Inductive appears_in_tree {X:Type} {b: nat} (sk: nat) : bplustree b X -> Prop :=
| ait_leaf: forall l, appears_in_kvl sk l -> appears_in_tree sk (bptLeaf b X l)
| ait_node_last: forall k1 k2 v1 v2,
appears_in_tree sk v2 -> k2 <= sk ->
appears_in_tree sk (bptNode b X [(k1, v1), (k2, v2)])
| ait_node_here: forall k1 k2 v1 v2 l,
appears_in_tree sk v1 -> k1 <= sk /\ sk < k2 ->
appears_in_tree sk (bptNode b X ((k1, v1)::(k2, v2)::l))
| ait_node_later: forall x k1 k2 v1 v2 l,
appears_in_tree sk (bptNode b X ((k1, v1)::(k2, v2)::l)) ->
k1 <= sk ->
appears_in_tree sk (bptNode b X (x::(k1, v1)::(k2, v2)::l)).
Inductive kv_appears_in_tree {X:Type} {b: nat} (sk: nat) (sv: X) : bplustree b X -> Prop :=
| kv_ait_leaf: forall l, kv_appears_in_kvl sk sv l -> kv_appears_in_tree sk sv (bptLeaf b X l)
| kv_ait_node_last: forall k1 k2 v1 v2,
kv_appears_in_tree sk sv v2 -> k2 <= sk ->
kv_appears_in_tree sk sv (bptNode b X [(k1, v1), (k2, v2)])
| kv_ait_node_here: forall k1 k2 v1 v2 l,
kv_appears_in_tree sk sv v1 -> k1 <= sk /\ sk < k2 ->
kv_appears_in_tree sk sv (bptNode b X ((k1, v1)::(k2, v2)::l))
| kv_ait_node_later: forall x k1 k2 v1 v2 l,
kv_appears_in_tree sk sv (bptNode b X ((k1, v1)::(k2, v2)::l)) ->
k1 <= sk ->
kv_appears_in_tree sk sv (bptNode b X (x::(k1, v1)::(k2, v2)::l)).
Inductive kvl_sorted {X: Type}: list (nat * X) -> Prop :=
kvl_sorted_0 : kvl_sorted []
| kvl_sorted_1 : forall (n: nat) (x: X),
kvl_sorted [(n, x)]
| kvl_sorted_cons : forall (n1 n2: nat) (x1 x2: X) (lst: list (nat * X)),
kvl_sorted ((n2,x2)::lst) ->
blt_nat n1 n2 = true ->
kvl_sorted ((n1,x1)::(n2,x2)::lst).
(* Some props for having a prop apply to all elements in a list *)
Inductive all_values (X : Type) (P : X -> Prop) : list (nat * X) -> Prop :=
| av_empty : all_values X P []
| av_next : forall (x:X) (n: nat) (l: list (nat * X)), all_values X P l -> P x -> all_values X P ((n,x)::l)
.
Inductive all_keys (X : Type) (P : nat -> Prop) : list (nat * X) -> Prop :=
| ak_empty : all_keys X P []
| ak_next : forall (x:X) (n: nat) (l: list (nat * X)), all_keys X P l -> P n -> all_keys X P ((n,x)::l)
.
Inductive all (P : nat -> Prop) : list nat -> Prop :=
| a_empty : all P []
| a_next : forall (n: nat) (l: list nat), all P l -> P n -> all P (n::l)
.
Inductive all_values_eq_prop (X: Type)(P: X -> X -> Prop) : list (nat * X) -> Prop :=
| alep_0 : all_values_eq_prop X P []
| alep_1 : forall (x:X) (n: nat), all_values_eq_prop X P [(n, x)]
| alep_next : forall (x1 x2:X) (n1 n2: nat) l,
all_values_eq_prop X P ((n2, x2) :: l) ->
P x1 x2 ->
all_values_eq_prop X P ((n1, x1) :: (n2, x2) :: l).
(* Some helper functions for checking if a number is above or below a given number *)
Definition below (n: nat) : nat -> Prop :=
fun o => blt_nat o n = true.
Definition below_equal (n: nat) : nat -> Prop :=
fun o => ble_nat o n = true.
Definition between (n m: nat) : nat -> Prop :=
fun o => andb (ble_nat n o) (blt_nat o m) = true.
Definition above (m: nat) : nat -> Prop :=
fun o => ble_nat m o = true.
|
{"author": "nicolaidahl", "repo": "BPlusTrees", "sha": "f017e4d3a334f72e1fd1cfb777e5bdd78cd9ca49", "save_path": "github-repos/coq/nicolaidahl-BPlusTrees", "path": "github-repos/coq/nicolaidahl-BPlusTrees/BPlusTrees-f017e4d3a334f72e1fd1cfb777e5bdd78cd9ca49/code/InductiveDataTypes.v"}
|
%!TEX root = ../../report.tex
\subsection{Undiscovered City} % (fold)
\label{sub:undiscovered_city}
In \cite{Greuter2003} Stefan Greuter et al. presented a system that generates in real-time pseudo infinite virtual cities which can be interactively explored from a first person perspective. In their approach ``all geometrical components of the city are generated as they are encountered by the user." As shown in the Figure~\ref{fig:viewingRange} only the part of city that is inside the viewing range is generate. This method allows the visualization of massive amounts of geometry, buildings in this case, by generating in real time only the geometry that on sight, and since this subset is usually much smaller than all the geometry this results in huge benefits in performance.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.85\textwidth]{img/Real-Time-procedural-generation/viewing-range.png}
\caption{Viewing Range}
\label{fig:viewingRange}
\end{figure}
\subsubsection{Road Network} % (fold)
\label{ssub:road_network}
The system uses a 2D grid that divide the terrain into square cells. The cells represent proxies for the content that will be procedurally generated. Before the content of each cell is generated, the potential visibility of it is tested, and after that, only the visible cells are filled with content.
Then the roads are created in a uniform grid pattern. This grid does not feel very natural, and in the continuation of the work, this system evolved into a more realistic one with the join of some of the grids to create a less uniform distribution of the buildings.
% subsubsection road_network (end)
\subsubsection{Buildings} % (fold)
\label{ssub:buildings}
To compute the form and appearance of each building, it is used a ``single 32 bit pseudo random number generator seed. The random sequence determines building properties such as width, height and number of floors."
Similar sequences of number result in similar buildings. To avoid that, it is used a a hash function to convert each cell position into a seed.
To generate a building the first is to generate a floor plan. To do so, it is randomly selected and merged a set of regular polygons and rectangles, then this is extruded. This is an iterative process, that creates sections from the top to the bottom, by adding more shapes to the the initial shape and extruding as shown in the Figure~\ref{fig:UC_buildings}. Starting from the left, first there is a simple polygon, that is merged with a rectangle and after extrusion, forms the first block that will be the top of the building. After that, another extrusion is made to generate the next block followed by the merge of a rectangle to the floor shape and the generation of a new block and so on.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.85\textwidth]{img/Real-Time-procedural-generation/Building-Generation.png}
\caption{buildings}
\label{fig:UC_buildings}
\end{figure}
With the application of this method very complex architectural forms can be generated, depending only on which forms are selected and the order that is used to merge them.
% subsubsection buildings (end)
% subsection undiscovered_city (end)
|
{"hexsha": "b56a8502a95cbfdbf3b84e7c80b56b20b256e726", "size": 3204, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sections/Works/2-Undiscovered-City.tex", "max_stars_repo_name": "arturalkaim/v2ProceduralGeneration", "max_stars_repo_head_hexsha": "496ecd2bf9885b6fa634cb958b696dad7a2166b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sections/Works/2-Undiscovered-City.tex", "max_issues_repo_name": "arturalkaim/v2ProceduralGeneration", "max_issues_repo_head_hexsha": "496ecd2bf9885b6fa634cb958b696dad7a2166b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sections/Works/2-Undiscovered-City.tex", "max_forks_repo_name": "arturalkaim/v2ProceduralGeneration", "max_forks_repo_head_hexsha": "496ecd2bf9885b6fa634cb958b696dad7a2166b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 72.8181818182, "max_line_length": 695, "alphanum_fraction": 0.7946317104, "num_tokens": 719}
|
import numpy as np
import cv2
"""使用均值漂移检测目标移动的例子
效果很不好
这种方式存在一个问题,就是窗口的大小不与跟踪帧中的目标大小一起变化
"""
cap = cv2.VideoCapture(0)
# 获得第一帧图像
ret, frame = cap.read()
# 标志 ROI的区域
r, h, c, w = 10, 200, 10, 200
track_window = (c, r, w, h)
# 提取roi区域
roi = frame[r:r + h, c:c + w]
# 将图片转为HSV格式
hsv_roi = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# 创建包含具有HSV值ROI所有像素的掩码,HSV值的范围在上下界之间
mask = cv2.inRange(hsv_roi, np.array((100., 30., 32.)), np.array((180., 120., 255.)))
# 计算roi的直方图
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
# 将值归一化到0~255的范围之内
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
# 均值漂移在收敛之前会迭代多次,但不一定能保证收敛
# 下面是均值漂移终止的条件
# 均值漂移迭代10次或者中心移动至少1个像素
# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
while True:
ret, frame = cap.read()
if ret == True:
# 将图片转化为HSV色彩空间
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# 执行直方图反向投影
# cv2.calcBackProject用来计算每个像素属于原始图像的概率
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1)
print(dst)
# 获得跟踪目标的新位置
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
# 计算窗口的新坐标。将位置的矩形画出来
x, y, w, h = track_window
img = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
cv2.imshow('img', img)
k = cv2.waitKey(60) & 0xff
if k == ord("q"):
break
else:
break
cv2.destroyAllWindows()
cap.release()
|
{"hexsha": "4d334a9424fec2560832aa79f7776ad83b3b83af", "size": 1507, "ext": "py", "lang": "Python", "max_stars_repo_path": "my_cv/08/08_04.py", "max_stars_repo_name": "strawsyz/straw", "max_stars_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-06T09:09:19.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-24T03:59:55.000Z", "max_issues_repo_path": "my_cv/08/08_04.py", "max_issues_repo_name": "strawsyz/straw", "max_issues_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_cv/08/08_04.py", "max_forks_repo_name": "strawsyz/straw", "max_forks_repo_head_hexsha": "db313c78c2e3c0355cd10c70ac25a15bb5632d41", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9107142857, "max_line_length": 85, "alphanum_fraction": 0.6416721964, "include": true, "reason": "import numpy", "num_tokens": 710}
|
import csv
import collections
import operator
from csv import DictReader
from datetime import datetime
import argparse
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from itertools import islice
import random
import numpy as np
def setup_seed(seed):
np.random.seed(seed)
random.seed(seed)
def to_time_frac(hour, min, time_frac_dict):
for key in time_frac_dict[hour].keys():
if key[0] <= min <= key[1]:
return str(time_frac_dict[hour][key])
def to_libsvm_encode(datapath, sample_type, time_frac_dict):
print('###### to libsvm encode ######\n')
oses = ["windows", "ios", "mac", "android", "linux"]
browsers = ["chrome", "sogou", "maxthon", "safari", "firefox", "theworld", "opera", "ie"]
f1s = ["weekday", "hour", "IP", "region", "city", "adexchange", "domain", "slotid", "slotwidth", "slotheight",
"slotvisibility", "slotformat", "creative", "advertiser"]
f1sp = ["useragent", "slotprice"]
f2s = ["weekday,region"]
def featTrans(name, content):
content = content.lower()
if name == "useragent":
operation = "other"
for o in oses:
if o in content:
operation = o
break
browser = "other"
for b in browsers:
if b in content:
browser = b
break
return operation + "_" + browser
if name == "slotprice":
price = int(content)
if price > 100:
return "101+"
elif price > 50:
return "51-100"
elif price > 10:
return "11-50"
elif price > 0:
return "1-10"
else:
return "0"
def getTags(content):
if content == '\n' or len(content) == 0:
return ["null"]
return content.strip().split(',')[:5]
# initialize
namecol = {}
featindex = {}
maxindex = 0
fi = open(datapath + 'train.bid.all.csv', 'r')
first = True
featindex['truncate'] = maxindex
maxindex += 1
for line in fi:
s = line.split(',')
if first:
first = False
for i in range(0, len(s)):
namecol[s[i].strip()] = i
if i > 0:
featindex[str(i) + ':other'] = maxindex
maxindex += 1
continue
for f in f1s:
col = namecol[f]
content = s[col]
feat = str(col) + ':' + content
if feat not in featindex:
featindex[feat] = maxindex
maxindex += 1
for f in f1sp:
col = namecol[f]
content = featTrans(f, s[col])
feat = str(col) + ':' + content
if feat not in featindex:
featindex[feat] = maxindex
maxindex += 1
# col = namecol["usertag"]
# tags = getTags(s[col])
# # for tag in tags:
# feat = str(col) + ':' + ''.join(tags)
# if feat not in featindex:tian
# featindex[feat] = maxindex
# maxindex += 1
print('feature size: ' + str(maxindex))
featvalue = sorted(featindex.items(), key=operator.itemgetter(1))
fo = open(datapath + 'feat.bid.all.txt', 'w')
fo.write(str(maxindex) + '\n')
for fv in featvalue:
fo.write(fv[0] + '\t' + str(fv[1]) + '\n')
fo.close()
# indexing train
print('indexing ' + datapath + 'train.bid.all.csv')
fi = open(datapath + 'train.bid.all.csv', 'r')
fo = open(datapath + 'train.bid.all.txt', 'w')
first = True
for line in fi:
if first:
first = False
continue
s = line.split(',')
time_frac = s[4][8: 12]
fo.write(s[0] + ',' + s[23] + ',' + s[2] + ',' + to_time_frac(int(time_frac[0:2]), int(time_frac[2:4]), time_frac_dict)) # click + winning price + hour + timestamp
index = featindex['truncate']
fo.write(',' + str(index))
for f in f1s: # every direct first order feature
col = namecol[f]
content = s[col]
feat = str(col) + ':' + content
if feat not in featindex:
feat = str(col) + ':other'
index = featindex[feat]
fo.write(',' + str(index))
for f in f1sp:
col = namecol[f]
content = featTrans(f, s[col])
feat = str(col) + ':' + content
if feat not in featindex:
feat = str(col) + ':other'
index = featindex[feat]
fo.write(',' + str(index))
# col = namecol["usertag"]
# tags = getTags(s[col])
# # for tag in tags:
# feat = str(col) + ':' + ''.join(tags)
# if feat not in featindex:
# feat = str(col) + ':other'
# index = featindex[feat]
# fo.write(',' + str(index))
fo.write('\n')
fo.close()
# indexing val
print('indexing ' + datapath + 'val.bid.all.csv')
fi = open(datapath + 'val.bid.all.csv', 'r')
fo = open(datapath + 'val.bid.all.txt', 'w')
first = True
for line in fi:
if first:
first = False
continue
s = line.split(',')
time_frac = s[4][8: 12]
fo.write(s[0] + ',' + s[23] + ',' + s[2] + ',' + to_time_frac(int(time_frac[0:2]), int(time_frac[2:4]),
time_frac_dict)) # click + winning price + hour + timestamp
index = featindex['truncate']
fo.write(',' + str(index))
for f in f1s: # every direct first order feature
col = namecol[f]
if col >= len(s):
print('col: ' + str(col))
print(line)
content = s[col]
feat = str(col) + ':' + content
if feat not in featindex:
feat = str(col) + ':other'
index = featindex[feat]
fo.write(',' + str(index))
for f in f1sp:
col = namecol[f]
content = featTrans(f, s[col])
feat = str(col) + ':' + content
if feat not in featindex:
feat = str(col) + ':other'
index = featindex[feat]
fo.write(',' + str(index))
# col = namecol["usertag"]
# tags = getTags(s[col])
# # for tag in tags:
# feat = str(col) + ':' + ''.join(tags)
# if feat not in featindex:
# feat = str(col) + ':other'
# index = featindex[feat]
# fo.write(',' + str(index))
fo.write('\n')
# indexing test
print('indexing ' + datapath + 'test.bid.' + sample_type + '.csv')
fi = open(datapath + 'test.bid.' + sample_type + '.csv', 'r')
fo = open(datapath + 'test.bid.' + sample_type + '.txt', 'w')
first = True
for line in fi:
if first:
first = False
continue
s = line.split(',')
time_frac = s[4][8: 12]
fo.write(s[0] + ',' + s[23] + ',' + s[2] + ',' + to_time_frac(int(time_frac[0:2]), int(time_frac[2:4]),
time_frac_dict)) # click + winning price + hour + timestamp
index = featindex['truncate']
fo.write(',' + str(index))
for f in f1s: # every direct first order feature
col = namecol[f]
if col >= len(s):
print('col: ' + str(col))
print(line)
content = s[col]
feat = str(col) + ':' + content
if feat not in featindex:
feat = str(col) + ':other'
index = featindex[feat]
fo.write(',' + str(index))
for f in f1sp:
col = namecol[f]
content = featTrans(f, s[col])
feat = str(col) + ':' + content
if feat not in featindex:
feat = str(col) + ':other'
index = featindex[feat]
fo.write(',' + str(index))
# col = namecol["usertag"]
# tags = getTags(s[col])
# # for tag in tags:
# feat = str(col) + ':' + ''.join(tags)
# if feat not in featindex:
# feat = str(col) + ':other'
# index = featindex[feat]
# fo.write(',' + str(index))
fo.write('\n')
fo.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', default='../../data/')
parser.add_argument('--dataset_name', default='ipinyou/', help='ipinyou')
parser.add_argument('--campaign_id', default='1458/', help='1458, 3427')
parser.add_argument('--is_to_csv', default=True)
setup_seed(1)
args = parser.parse_args()
data_path = args.data_path + args.dataset_name + args.campaign_id
time_frac_dict = {}
count = 0
for i in range(24):
hour_frac_dict = {}
for item in [(0, 15), (15, 30), (30, 45), (45, 60)]:
hour_frac_dict.setdefault(item, count)
count += 1
time_frac_dict.setdefault(i, hour_frac_dict)
if args.is_to_csv:
print('to csv')
file_name = 'train.log.txt'
data_path = args.data_path + args.dataset_name + args.campaign_id
with open(data_path + 'train.all.origin.csv', 'w', newline='') as csvfile: # newline防止每两行就空一行
spamwriter = csv.writer(csvfile, dialect='excel') # 读要转换的txt文件,文件每行各词间以@@@字符分隔
with open(data_path + file_name, 'r') as filein:
for i, line in enumerate(filein):
line_list = line.strip('\n').split('\t')
spamwriter.writerow(line_list)
print('train-data读写完毕')
file_name = 'train.all.origin.csv'
train_data_path = data_path + file_name
day_to_weekday = {4: '6', 5: '7', 6: '8', 0: '9', 1: '10', 2: '11', 3: '12'}
train_data = pd.read_csv(train_data_path)
train_data.iloc[:, 1] = train_data.iloc[:, 1].astype(int)
print('###### separate datas from train day ######\n')
day_data_indexs = []
for key in day_to_weekday.keys():
day_datas = train_data[train_data.iloc[:, 1] == key]
day_indexs = day_datas.index
day_data_indexs.append([int(day_to_weekday[key]), int(day_indexs[0]), int(day_indexs[-1])])
day_data_indexs_df = pd.DataFrame(data=day_data_indexs)
day_data_indexs_df.to_csv(data_path + 'day_indexs.csv', index=None, header=None)
day_indexs = pd.read_csv(data_path + 'day_indexs.csv', header=None).values.astype(int)
train_indexs = day_indexs[day_indexs[:, 0] == 11][0]
test_indexs = day_indexs[day_indexs[:, 0] == 12][0]
origin_train_data = pd.read_csv(data_path + 'train.all.origin.csv')
train_data = origin_train_data.iloc[:train_indexs[1], :] # 6-10
val_data = origin_train_data.iloc[train_indexs[1]: train_indexs[2] + 1, :] # 11
test_data = origin_train_data.iloc[train_indexs[2]:, :] # 12
train_data.to_csv(data_path + 'train.bid.all.csv', index=None)
val_data.to_csv(data_path + 'val.bid.all.csv', index=None)
test_data.to_csv(data_path + 'test.bid.all.csv', index=None)
to_libsvm_encode(data_path, 'all', time_frac_dict)
|
{"hexsha": "4d10ef934d657a93e99d031973283906ecbe8345", "size": 11391, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctr/encode/data_.py", "max_stars_repo_name": "JiaXingBinggan/FAB_expr", "max_stars_repo_head_hexsha": "354d274b28f4a9933695b82494d829c87531a772", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ctr/encode/data_.py", "max_issues_repo_name": "JiaXingBinggan/FAB_expr", "max_issues_repo_head_hexsha": "354d274b28f4a9933695b82494d829c87531a772", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ctr/encode/data_.py", "max_forks_repo_name": "JiaXingBinggan/FAB_expr", "max_forks_repo_head_hexsha": "354d274b28f4a9933695b82494d829c87531a772", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-01T02:39:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-01T02:39:12.000Z", "avg_line_length": 34.9417177914, "max_line_length": 172, "alphanum_fraction": 0.5118075674, "include": true, "reason": "import numpy", "num_tokens": 2971}
|
import numpy as np
from sklearn.dummy import DummyClassifier
################################################################################
def array2c (array, fmt = None):
"Converts an array in a C string. fmt can be a %format, a callable or None"
if fmt is None:
fmt_ = lambda x: "%.20f" % x
elif isinstance (fmt, str):
fmt_ = lambda x: fmt % x
else:
fmt_ = fmt
if isinstance (array, (list, tuple, set)):
array = np.asarray(array)
if isinstance (array, (int,float,str)) or len(array.shape) == 0:
return fmt_(array)
return "{%s}"%(", ".join([array2c(row,fmt) for row in array]))
################################################################################
def get_n_features (algo):
if hasattr(algo, 'n_features'): return algo.n_features
elif hasattr(algo, 'n_features_'): return algo.n_features_
elif algo.__class__.__name__ == 'Sequential':
return algo.layers[-1].kernel.shape[-1]
elif algo.__class__.__name__ == 'DecorrTransformer':
return algo.eig.shape[-1]
elif algo.__class__.__name__ == 'StandardScaler':
return algo.mean_.shape[-1] if algo.mean_ is not None else algo.var_.shape[-1]
elif algo.__class__.__name__ == 'MinMaxScaler':
return algo.data_min_.shape[-1]
elif algo.__class__.__name__ == 'QuantileTransformer':
return algo.quantiles_.shape[-1]
elif algo.__class__.__name__ == 'Pipeline':
return get_n_features (algo.steps[-1])
raise TypeError ("Cannot determine output features for %s" % type(algo))
################################################################################
def retrieve_prior (bdt):
"Retrieve the prior for BDT classifiers"
if bdt.init_ == 'zero':
return np.zeros(bdt.n_classes_)
elif isinstance (bdt.init_, DummyClassifier):
X = np.empty([1, bdt.n_classes_])
return np.asarray(bdt.loss_.get_init_raw_predictions(X, bdt.init_)).ravel()
raise NotImplementedError (
"Cannot convert initializer %s" % str(bdt.init_)
)
################################################################################
def get_interpolation_function (func_name):
return """
extern "C"
FLOAT_T %(func_name)s ( FLOAT_T x, FLOAT_T *xs, FLOAT_T *ys, int N )
{
int min = 0;
int max = N;
int n;
if (N<=1) return ys[0];
if (x <= xs[0]) return ys[0];
if (x >= xs[N-1]) return ys[N-1];
for (;;)
{
n = (min + max)/2;
if ( x < xs[n] )
max = n;
else if ( x >= xs[n+1] )
min = n;
else
break;
}
return (x - xs[n])/(xs[n+1]-xs[n])*(ys[n+1]-ys[n]) + ys[n];
}
""" % dict(func_name=func_name);
|
{"hexsha": "fd3e311f7300e9bcd79ba02717b49004a2eeb8af", "size": 2703, "ext": "py", "lang": "Python", "max_stars_repo_path": "scikinC/_tools.py", "max_stars_repo_name": "landerlini/scikinC", "max_stars_repo_head_hexsha": "c408e2b63a32eecefc514193a4483b9d95b8d0fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scikinC/_tools.py", "max_issues_repo_name": "landerlini/scikinC", "max_issues_repo_head_hexsha": "c408e2b63a32eecefc514193a4483b9d95b8d0fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-07-30T16:30:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-15T07:27:38.000Z", "max_forks_repo_path": "scikinC/_tools.py", "max_forks_repo_name": "landerlini/scikinC", "max_forks_repo_head_hexsha": "c408e2b63a32eecefc514193a4483b9d95b8d0fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0689655172, "max_line_length": 82, "alphanum_fraction": 0.5390307066, "include": true, "reason": "import numpy", "num_tokens": 706}
|
import cv2
import numpy as np
from open3d import PointCloud, Vector3dVector, draw_geometries
import open3d as o3d
import time
import random
from interaction import opt
import sys
brush_temp = None
points = np.zeros((1, 3))
color = np.zeros((1, 3))
def pc_cube(pt1, pt2):
x = np.linspace(pt1[0], pt2[0])
y = np.linspace(pt1[1], pt2[1])
z = np.linspace(pt1[2], pt2[2])
x1, y1 = np.meshgrid(x, y)
X = x1.ravel()
Y = y1.ravel()
face1 = np.c_[X, Y, np.full_like(X, pt1[2])]
face2 = np.c_[X, Y, np.full_like(X, pt2[2])]
z1, y1= np.meshgrid(z, y)
Z = z1.ravel()
Y = y1.ravel()
face3 = np.c_[np.full_like(Z, pt1[0]), Y, Z]
face4 = np.c_[np.full_like(Z, pt2[0]), Y, Z]
x1, z1 = np.meshgrid(x, z)
X = x1.ravel()
Z = z1.ravel()
face5 = np.c_[X, np.full_like(X, pt1[1]), Z]
face6 = np.c_[X, np.full_like(X, pt2[1]), Z]
ans = np.concatenate((face1, face2, face3, face4, face5, face6))
return ans
def pc_line(pt1, pt2):
alpha = 5 * int(np.linalg.norm(np.asarray(pt1)-np.asarray(pt2)))
x = np.linspace(pt1[0], pt2[0], alpha)
y = np.linspace(pt1[1], pt2[1], alpha)
z = np.linspace(pt1[2], pt2[2], alpha)
return np.c_[x, y, z]
def pc_sphere(center, radius):
t = np.linspace(0, np.pi * 2, 100)
s = np.linspace(0, np.pi, 100)
t, s = np.meshgrid(t, s)
x = radius * np.cos(t) * np.sin(s) + center[0]
y = radius * np.sin(t) * np.sin(s) + center[1]
z = radius * np.cos(s) + center[2]
return np.c_[x.ravel(), y.ravel(), z.ravel()]
def pc_text(pt1, text: str):
img = np.ones((480, 640, 3), np.uint8)
cv2.putText(img, text, (100, 100), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 0), 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
x, y = np.where(gray == 0)
x += pt1[1] - 100
y += pt1[0] - 100
black = np.c_[y, x]
points = np.zeros((1, 3))
for i in range(10):
selected_index = random.sample(range(len(black)), random.randint(int(len(black)/3), int(2*len(black)/3)))
for j in selected_index:
temp = np.append(black[j], i+pt1[2]).reshape(1, 3)
points = np.concatenate((points, temp))
return np.delete(points, 0, axis=0)
def gen_element(info):
global brush_temp, points, color
append_points = None
append_color = None
try:
nums = list(map(int, info[1:]))
except ValueError:
nums = list(map(int, info[1:4]))
if info[0] == "c": # cuboid
append_points = pc_cube(nums[:3], nums[3:])
append_color = np.ones(append_points.shape) * np.asarray([0.30, 1, 0.96])
brush_temp = None
elif info[0] == "d": # dots
append_points = np.asarray(nums).reshape(1, 3)
append_color = np.asarray([0.001, 1, 0]).reshape((1, 3))
brush_temp = None
elif info[0] == "s": # sphere
append_points = pc_sphere(nums[:3], nums[-1])
append_color = np.ones(append_points.shape) * np.asarray([1, 1, 0])
brush_temp = None
elif info[0] == "l": # line
append_points = pc_line(nums[:3], nums[3:])
append_color = np.ones(append_points.shape) * np.asarray([1, 0, 0.8])
brush_temp = None
elif info[0] == "b": # brush
if brush_temp is not None:
append_points = pc_line(nums[:3], brush_temp)
append_color = np.ones(append_points.shape) * (np.asarray(nums[3:]) / 255)
brush_temp = nums[:3] # 3d
elif info[0] == "t": # text
if len(info) == 5:
append_points = pc_text(nums, info[-1])
else:
text = " ".join(info[i] for i in range(4, len(info)))
append_points = pc_text(nums, text)
append_color = np.ones(append_points.shape) * np.asarray([1, 0.97, 0.4])
brush_temp = None
elif info[0] == 'clear':
points = np.ones((1, 3))
color = np.ones((1, 3))
brush_temp = None
elif info[0] == 'move':
brush_temp = None
return append_points, append_color
def gen3d():
global brush_temp, points, color
with open('trace.txt') as f:
project_name = f.readline()[:-1]
for index, line in enumerate(f):
info = line.strip().split(' ')
append_points, append_color = gen_element(info)
if append_color is None:
continue
points = np.concatenate((points, append_points))
color = np.concatenate((color, append_color))
points = np.delete(points, 0, axis=0)
color = np.delete(color, 0, axis=0)
point_cloud = PointCloud()
point_cloud.points = Vector3dVector(points)
if opt.pc_color == 'default':
point_cloud.colors = Vector3dVector(color)
if opt.export3d:
o3d.io.write_point_cloud('./output/{}/result.ply'.format(project_name), point_cloud, True)
if opt.view3d:
draw_geometries([point_cloud])
def trace3d(project=''):
global points, color, brush_temp
points = np.zeros((1, 3))
color = np.zeros((1, 3))
vis = o3d.visualization.Visualizer()
vis.create_window(left=0, top=0)
point_cloud = o3d.geometry.PointCloud()
to_reset = True
vis.add_geometry(point_cloud)
if project == '':
file = 'trace.txt'
else:
file = './output/{}/trace.txt'.format(project)
with open(file) as f:
f.readline()
points = np.delete(points, 0, axis=0)
color = np.delete(color, 0, axis=0)
for index, line in enumerate(f):
info = line.strip().split(' ')
append_points, append_color = gen_element(info)
if append_points is None:
continue
points = np.concatenate((points, append_points))
color = np.concatenate((color, append_color))
point_cloud.points = o3d.utility.Vector3dVector(points)
if opt.pc_color == 'default':
point_cloud.colors = o3d.Vector3dVector(color)
vis.update_geometry()
if to_reset:
vis.reset_view_point(True)
to_reset = False
vis.poll_events()
vis.update_renderer()
time.sleep(0.1)
else:
vis.destroy_window()
if __name__ == '__main__':
# opt.export3d = True
# opt.pc_color = 'default'
# print(sys.argv)
gen3d()
# if len(sys.argv) == 2:
# trace3d(sys.argv[1])
|
{"hexsha": "10080abdeb8e06a358a6409d50ab452d2b648c06", "size": 6446, "ext": "py", "lang": "Python", "max_stars_repo_path": "gen3d.py", "max_stars_repo_name": "HarryXD2018/3DPainter", "max_stars_repo_head_hexsha": "d58e705c203eddd6e46007e6640543f7a4bf44d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-07-26T11:55:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T02:29:41.000Z", "max_issues_repo_path": "gen3d.py", "max_issues_repo_name": "HarryXD2018/3DPainter", "max_issues_repo_head_hexsha": "d58e705c203eddd6e46007e6640543f7a4bf44d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-19T09:46:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-02T08:49:10.000Z", "max_forks_repo_path": "gen3d.py", "max_forks_repo_name": "HarryXD2018/3DPainter", "max_forks_repo_head_hexsha": "d58e705c203eddd6e46007e6640543f7a4bf44d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5555555556, "max_line_length": 113, "alphanum_fraction": 0.5685696556, "include": true, "reason": "import numpy", "num_tokens": 1898}
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.1.7
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import os
import json
import pydicom
import piexif
import csv
from PIL import Image
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# %%
data_root = Path(os.environ.get('KAGGLE_INPUT_DIR', '.')) / "siim-acr-pneumothorax-segmentation" / "jpeg-images-train"
# %%
def rle2mask(array, width, height):
mask= np.zeros(width* height)
if len(array) != 1:
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
current_position += start
mask[current_position:current_position+lengths[index]] = 255
current_position += lengths[index]
return mask.reshape(height, width).T
# %%
def read_jpg(path, empty_mask_is_negative=False):
img = Image.open(path)
makernote_bytes = piexif.load(img.info["exif"])["Exif"][piexif.ExifIFD.MakerNote]
attr = json.loads(makernote_bytes.decode("ascii"))
if empty_mask_is_negative:
attr['Masks'] = attr.get('Masks', [[-1]])
masks = None
if 'Masks' in attr:
masks = [rle2mask(encoded_pixels, img.width, img.height) for encoded_pixels in attr['Masks']]
del attr['Masks']
return np.asarray(img), attr, masks
# %%
for i, p in enumerate(data_root.glob('*.jpg')):
pixel_array, attr, masks = read_jpg(p, True)
plt.figure(i)
fig, axs = plt.subplots(1, 1 + len(masks))
axs[0].imshow(pixel_array)
for j, m in enumerate(masks):
axs[j+1].imshow(masks[j])
if 8 < i:
break
|
{"hexsha": "8b6d0ac15ac6918a33e708c221bec294fcc694a1", "size": 1870, "ext": "py", "lang": "Python", "max_stars_repo_path": "kernels/jpeg_io_test/jpeg_io_test.py", "max_stars_repo_name": "ar90n/siim-acr-pneumothorax-segmentation", "max_stars_repo_head_hexsha": "e9b88f0a023798431dfd76704047388ba81a86e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kernels/jpeg_io_test/jpeg_io_test.py", "max_issues_repo_name": "ar90n/siim-acr-pneumothorax-segmentation", "max_issues_repo_head_hexsha": "e9b88f0a023798431dfd76704047388ba81a86e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kernels/jpeg_io_test/jpeg_io_test.py", "max_forks_repo_name": "ar90n/siim-acr-pneumothorax-segmentation", "max_forks_repo_head_hexsha": "e9b88f0a023798431dfd76704047388ba81a86e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6052631579, "max_line_length": 118, "alphanum_fraction": 0.6310160428, "include": true, "reason": "import numpy", "num_tokens": 528}
|
program persist
implicit none
! Explicit types for blas calls
integer, parameter :: i32 = 4
integer, parameter :: i64 = 8
integer, parameter :: f32 = kind(1.e0)
integer, parameter :: f64 = kind(1.d0)
real(f64) :: ddot
! Iteration variables
integer(i32) i, j
! Physical constants
integer(i32), parameter :: shape = 43_i32
real(f64), parameter :: scale = 1.91163_f64
! Specified constants
real(f64) :: kon, koff, sigma
integer(i32) :: m, n, d
! Program variables
real(f64), allocatable :: A(:,:), x(:), y(:)
integer(i32), allocatable :: ipiv(:)
real(f64) :: z
! Parse command-line input
character(len=128) :: arg
if (command_argument_count() < 4) then
print *, "Usage: ./persist mass minsize beta gamma"
call exit()
end if
call get_command_argument(1, arg)
read(arg,*) m
call get_command_argument(2, arg)
read(arg,*) n
call get_command_argument(3, arg)
read(arg,*) kon
call get_command_argument(4, arg)
read(arg,*) koff
! Program constants
d = m-n+1
sigma = 2.0_f64 * kon / koff
! Allocate and initialize the memory for the problem
allocate(A(n+2, n:m), x(n:m), y(n:m), ipiv(n:m))
! super-diagonals
A(2:n, n:m) = 1.0_f64
! diagonal
A(n+1, n:m) = (/ (-sigma*(m-i) - (i-1), i=n, m) /)
! sub-diagonal
A(n+2, n:m) = (/ (sigma*(m-i), i=n, m) /)
! summation vector
x(n:(2*n-1)) = 0.0_f64
x((2*n):m) = (/ (i-2*n+1, i=2*n, m) /)
! unit vector
y(n) = 1.0_f64
y((n+1):m) = 0.0_f64
! LU-factorize the matrix operator
call dgbtrf(d, d, 1, n-1, A, n+2, ipiv, i)
! update y <- inv(A)*y
call dgbtrs('N', d, 1, n-1, 1, A, n+2, ipiv, y, d, i)
z = ddot(d, x, 1, y, 1)
! update A again (maybe could update factors directly if too slow)
! super-diagonals
A(2:n, n:m) = -koff*scale*1.0_f64
! diagonal
A(n+1, n:m) = (/ (1 + (koff*scale)*(sigma*(m-i) + (i-1)), i=n, m) /)
! sub-diagonal
A(n+2, n:m) = (/ (-(koff*scale)*sigma*(m-i), i=n, m) /)
! LU-factorize the matrix operator
call dgbtrf(d, d, 1, n-1, A, n+2, ipiv, i)
do j = 1, shape
call dgbtrs('N', d, 1, n-1, 1, A, n+2, ipiv, y, d, i)
end do
print *, m, n, kon, koff, ddot(d, x, 1, y, 1)-z
! Cleanup the problem
deallocate(A, x, y, ipiv)
end program persist
|
{"hexsha": "5a643cd6514751b65f971effd4c4dc0b956c2481", "size": 2515, "ext": "f95", "lang": "FORTRAN", "max_stars_repo_path": "persist/persist.f95", "max_stars_repo_name": "jasondark/dissertation", "max_stars_repo_head_hexsha": "3e1117ef0d14aa8d659f80df3edde1c266815856", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "persist/persist.f95", "max_issues_repo_name": "jasondark/dissertation", "max_issues_repo_head_hexsha": "3e1117ef0d14aa8d659f80df3edde1c266815856", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "persist/persist.f95", "max_forks_repo_name": "jasondark/dissertation", "max_forks_repo_head_hexsha": "3e1117ef0d14aa8d659f80df3edde1c266815856", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-18T01:05:58.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-18T01:05:58.000Z", "avg_line_length": 28.2584269663, "max_line_length": 73, "alphanum_fraction": 0.524055666, "num_tokens": 912}
|
[STATEMENT]
lemma Macaulay_list_Nil [simp]: "Macaulay_list [] = ([]::('t \<Rightarrow>\<^sub>0 'b::field) list)" (is "?l = _")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Macaulay_list [] = []
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Macaulay_list [] = []
[PROOF STEP]
have "length ?l \<le> length (mat_to_polys (Keys_to_list ([]::('t \<Rightarrow>\<^sub>0 'b) list))
(row_echelon (Macaulay_mat ([]::('t \<Rightarrow>\<^sub>0 'b) list))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (Macaulay_list []) \<le> length (mat_to_polys (Keys_to_list []) (row_echelon (Macaulay_mat [])))
[PROOF STEP]
unfolding Macaulay_list_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (filter (\<lambda>p. p \<noteq> 0) (mat_to_polys (Keys_to_list []) (row_echelon (Macaulay_mat [])))) \<le> length (mat_to_polys (Keys_to_list []) (row_echelon (Macaulay_mat [])))
[PROOF STEP]
by (fact length_filter_le)
[PROOF STATE]
proof (state)
this:
length (Macaulay_list []) \<le> length (mat_to_polys (Keys_to_list []) (row_echelon (Macaulay_mat [])))
goal (1 subgoal):
1. Macaulay_list [] = []
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
length (Macaulay_list []) \<le> length (mat_to_polys (Keys_to_list []) (row_echelon (Macaulay_mat [])))
goal (1 subgoal):
1. Macaulay_list [] = []
[PROOF STEP]
have "... = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (mat_to_polys (Keys_to_list []) (row_echelon (Macaulay_mat []))) = 0
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length (mat_to_polys (Keys_to_list []) (row_echelon (Macaulay_mat []))) = 0
goal (1 subgoal):
1. Macaulay_list [] = []
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
length (Macaulay_list []) \<le> 0
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
length (Macaulay_list []) \<le> 0
goal (1 subgoal):
1. Macaulay_list [] = []
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Macaulay_list [] = []
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 912, "file": "Groebner_Bases_Macaulay_Matrix", "length": 11}
|
REBOL [
Title: "Builds a set of Red/System Float Tests to run on an ARM host"
File: %build-arm-float-tests.r
Author: "Peter W A Wood"
Version: 0.1.0
License: "BSD-3 - https://github.com/dockimbel/Red/blob/master/BSD-3-License.txt"
]
;; This script must be run from the Red/system/tests dir
;; supress script messages
store-quiet-mode: system/options/quiet
system/options/quiet: true
;; init
file-chars: charset [#"a" - #"z" #"A" - #"Z" #"0" - #"9" "-" "/"]
a-file-name: ["%" some file-chars ".reds" ]
a-test-file: ["--run-test-file-quiet " copy file a-file-name]
;; make the Arm-Float dir if needed
arm-dir: %runnable/arm-float-tests/
make-dir arm-dir
;; empty the Arm dir
foreach file read arm-dir [delete join arm-dir file]
;; get the list of test source files
test-files: copy []
all-tests: read %run-float.r
parse/all all-tests [any [a-test-file (append test-files to file! file) | skip] end]
;; compile the tests and move the executables to runnable/arm-tests
change-dir %../
foreach test-file test-files [
insert next test-file "tests/"
do/args %rsc.r join "-t Linux-ARM " test-file
exe: copy find/last/tail test-file "/"
exe: replace exe ".reds" ""
write/binary join %tests/runnable/arm-float-tests/ exe read/binary join %builds/ exe
]
change-dir %tests/
;; copy the bash script and mark it as executable
write/binary %runnable/arm-float-tests/run-all.sh read/binary %run-all.sh
runner: open %runnable/arm-float-tests/run-all.sh
set-modes runner [
owner-execute: true
group-execute: true
world-execute: true
]
close runner
;; tidy up
system/options/quiet: store-quiet-mode
print "ARM Float tests built"
|
{"hexsha": "d34fa8845227e262936e5700882f30fa199340b2", "size": 1655, "ext": "r", "lang": "R", "max_stars_repo_path": "system/tests/build-arm-float-tests.r", "max_stars_repo_name": "7hi4g0/red", "max_stars_repo_head_hexsha": "a9f561552696d9922198bb41a19b2f9fc0052adc", "max_stars_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "system/tests/build-arm-float-tests.r", "max_issues_repo_name": "7hi4g0/red", "max_issues_repo_head_hexsha": "a9f561552696d9922198bb41a19b2f9fc0052adc", "max_issues_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "system/tests/build-arm-float-tests.r", "max_forks_repo_name": "7hi4g0/red", "max_forks_repo_head_hexsha": "a9f561552696d9922198bb41a19b2f9fc0052adc", "max_forks_repo_licenses": ["BSL-1.0", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5833333333, "max_line_length": 86, "alphanum_fraction": 0.6990936556, "num_tokens": 479}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 19 09:55:36 2017
@author: cheers
"""
import scipy.io as sio
import matplotlib.pyplot as plt
import numpy as np
image_size = 32
num_labels = 10
def display_data():
print 'loading Matlab data...'
train = sio.loadmat('train_32x32.mat')
data=train['X']
label=train['y']
for i in range(10):
plt.subplot(2,5,i+1)
plt.title(label[i][0])
plt.imshow(data[...,i])
plt.axis('off')
plt.show()
def load_data(one_hot = False):
train = sio.loadmat('train_32x32.mat')
test = sio.loadmat('test_32x32.mat')
train_data=train['X']
train_label=train['y']
test_data=test['X']
test_label=test['y']
train_data = np.swapaxes(train_data, 0, 3)
train_data = np.swapaxes(train_data, 2, 3)
train_data = np.swapaxes(train_data, 1, 2)
test_data = np.swapaxes(test_data, 0, 3)
test_data = np.swapaxes(test_data, 2, 3)
test_data = np.swapaxes(test_data, 1, 2)
test_data = test_data / 255.
train_data =train_data / 255.
for i in range(train_label.shape[0]):
if train_label[i][0] == 10:
train_label[i][0] = 0
for i in range(test_label.shape[0]):
if test_label[i][0] == 10:
test_label[i][0] = 0
if one_hot:
train_label = (np.arange(num_labels) == train_label[:,]).astype(np.float32)
test_label = (np.arange(num_labels) == test_label[:,]).astype(np.float32)
return train_data,train_label, test_data,test_label
if __name__ == '__main__':
load_data(one_hot = True)
display_data()
|
{"hexsha": "989114d5e7ea12a03c96522b5a640b3dd6d2221f", "size": 1669, "ext": "py", "lang": "Python", "max_stars_repo_path": "svhn_data.py", "max_stars_repo_name": "Yisongsong/TFlearn-SVHN", "max_stars_repo_head_hexsha": "fd655853a0d8a495cebb35b035f2007581120332", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "svhn_data.py", "max_issues_repo_name": "Yisongsong/TFlearn-SVHN", "max_issues_repo_head_hexsha": "fd655853a0d8a495cebb35b035f2007581120332", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "svhn_data.py", "max_forks_repo_name": "Yisongsong/TFlearn-SVHN", "max_forks_repo_head_hexsha": "fd655853a0d8a495cebb35b035f2007581120332", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6769230769, "max_line_length": 83, "alphanum_fraction": 0.6045536249, "include": true, "reason": "import numpy,import scipy", "num_tokens": 482}
|
#%%
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import model_selection, preprocessing
from sklearn import linear_model
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import binarize
import os
import sys
#%%
#import xgboost as xgb
color = sns.color_palette()
Number_Monolayers = 773
path = './data/LASSO_BR2_1/'
n_alpha = 2
# Create directory
# Create target Directory if don't exist
if not os.path.exists(path):
os.mkdir(path)
os.mkdir(path+'/DATA_SETS/')
os.mkdir(path+'/Figs/')
os.mkdir(path+'/SavedModels/')
os.mkdir(path+'/LASSO_Converged/')
print("Directory " , path , " Created ")
else:
print("Directory " , path , " already exists")
#%%
#%matplotlib inline
pd.options.mode.chained_assignment = None # default='warn'
pd.set_option('display.max_columns', 3000)
#%%
from pathlib import Path
filename = Path(path) / ("1l_atomicPLMF_"+str(Number_Monolayers)+"structures.csv")
#%%
monolayer_descriptors = pd.read_csv("1l_atomicPLMF_"+str(Number_Monolayers)+"structures.csv",header=0) # read file with monolayers names and descriptors
titles = pd.read_csv("1l_atomicPLMF_"+str(Number_Monolayers)+"structures.csv",header=None)
numMonolayerColumns = monolayer_descriptors.shape[1]
numMonolayerRecords = monolayer_descriptors.shape[0]
#%%
#sys.exit(-1)
#print('numMonolayerColumns',numMonolayerColumns)
#print('numMonolayerRecords',numMonolayerRecords)
#
BilayerProperty = pd.read_csv('C33_DFT.csv',header=0) # read file with bilayers names and target values
#Bilayer_Energy_additional
#print(BilayerProperty.iloc[:,1])
#sys.exit(-1)
numBilayerRecords = BilayerProperty.shape[0]
print('numBilayerRecords',numBilayerRecords)
bilayers = BilayerProperty.iloc[:,0]
#print('bilayers',bilayers)
monolayers = monolayer_descriptors.iloc[:,0]
#print('monolayers',monolayers)
#sys.exit(-1)
##############################################################################
#dataset = []
#
#for b in bilayers:
# print(b)
# bt=b.split("_")
# b_d = BilayerProperty.loc[BilayerProperty.Bilayer==b]
# bilayer_record = []
# m1 = monolayer_descriptors.loc[monolayer_descriptors.Monolayer==bt[0]]
# m2 = monolayer_descriptors.loc[monolayer_descriptors.Monolayer==bt[1]]
# for i in range(1,numMonolayerColumns):
## print(bt[:])
# sum = m1.iloc[0,i] + m2.iloc[0,i]
# bilayer_record += [sum]
# bilayer_record += [b_d.iloc[0,1]]
# dataset += [bilayer_record]
##############################################################################
df_dataset=pd.read_csv("PLMF.csv",header=0)
#df_dataset=pd.read_csv("PLMF.csv")
#print(dataset)
#
#
#df_dataset=pd.DataFrame(dataset)
#df_dataset.to_csv("PLMF.csv",header=True)
#print(df_dataset.iloc[:,1:numMonolayerColumns])
#print(df_dataset.iloc[:,-1])
x = df_dataset.iloc[:,1:-1]
y = df_dataset.iloc[:,-1]
#print(y.iloc[:])
#sys.exit(-1)
#####################Standardization###########################################
#scalerx = preprocessing.StandardScaler().fit(x)
#x = pd.DataFrame(scalerx.transform(x), index=x.index.values, columns=x.columns.values)
#
#scalery = preprocessing.StandardScaler()
#y = scalery.fit_transform(y.values.reshape(-1, 1))
#y = pd.DataFrame(y)
#y = y.iloc[:,-1]
###############################################################################
#sys.exit(-1)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=.25, random_state=None)
#sys.exit(-1)
############################### Regression ###############################
#1- LASSO ###############################################
#https://www.analyticsvidhya.com/blog/2016/01/complete-tutorial-ridge-lasso-regression-python/
#alphas = [
# 4e-6,
# 5e-6,
# 6e-6,
# 7e-6,
# 8e-6,
# 9e-6,
# 1e-5,
# 2e-5,
# 3e-5,
# 4e-5,
# 5e-5,
# 6e-5
#
#]
alphas = [
1e-5,
2e-5,
3e-5,
4e-5,
5e-5,
6e-5,
7e-5,
8e-5,
9e-5,
1e-4,
2e-4,
3e-4
]
#alphas = [ #4e-06 0.0035858855970350386 0.6312484080242622
# 2e-1,
# 3e-1,
# 4e-1,
# 5e-1,
# 6e-1,
# 7e-1,
# 8e-1,
# 9e-1,
# 1,
# 2,
# 3,
# 4
#]
#alphas = np.logspace(1e-7, 3e-6, 12)
#print(alphas)
#finding best alpha
thefilecoeff = open(str(path)+'lasso_coefficients.csv', 'w')
print ('stop0')
for i in range(len(alphas)):
lassoreg = Lasso(alpha=alphas[i],normalize=True, max_iter=1e9)
lassoreg.fit(x,y)
y_pred = lassoreg.predict(X_test)
print(alphas[i],mean_squared_error(y_test, y_pred),r2_score(y_test, y_pred))
for item in lassoreg.coef_:
thefilecoeff.write("%s," % item)
thefilecoeff.write("\n")
thefilecoeff.close()
#sys.exit(-1)
#creating the new_training_set.csv
coeff = pd.read_csv(str(path)+"lasso_coefficients.csv",header=None)
sub=coeff.iloc[0:12,0:numMonolayerColumns-1]
thefile = open(str(path)+'lasso_fields.csv', 'w')
new_training_set = open(str(path)+'BR2_training-test_set.csv', 'w')
#new_training_set = open('LASSO/new_training_set.csv', 'w')
#sys.exit(-1)
lasso_fields=np.array([])
for i in range(0,numMonolayerColumns-2):
counter=0
for j in range(0,12):
if sub[i][j]!=0:
counter=counter+1;
if counter>=n_alpha: ########NUMBER OF NON ZERO LASSO COEFF FOR EACH ALPHA
print("Found one at ",i)
lasso_fields=np.append(lasso_fields,i)
thefile.write("%s\n" % i)
thefile.close()
numFields = lasso_fields.shape[0]
#sys.exit(-1)
for j in range(0,numBilayerRecords):
for i in range(0,numMonolayerColumns-1):
if i in lasso_fields:
# new_training_set.write("%s," % x[j][i])
new_training_set.write("%s," % str(x.iloc[j,i]))
# new_training_set.write("%s\n" % y[j])
new_training_set.write("%s\n," % str(y.iloc[j]))
new_training_set.close()
for i in range(0,titles.shape[1]-1):
if i in lasso_fields:
print(titles.iloc[0,i+1])
lasso_monolayer_data= open(str(path)+'lasso_monolayer_data.csv', 'w')
for j in range(0,Number_Monolayers-1): #####NUMBER OF MONOLAYERS+1 ######
lasso_monolayer_data.write("%s," % titles.iloc[j,0])
for i in range(0,numMonolayerColumns-2):
if i in lasso_fields:
lasso_monolayer_data.write("%s," % titles.iloc[j,i+1])
lasso_monolayer_data.write("\n")
lasso_monolayer_data.close()
#%%
|
{"hexsha": "7c66ce7f899f2c972f26f73fa13fd390b3f90e01", "size": 7184, "ext": "py", "lang": "Python", "max_stars_repo_path": "WR/WR_LASSO.py", "max_stars_repo_name": "mutazag/ilab1", "max_stars_repo_head_hexsha": "c37ae969d0fa13029ee08e7c0e102990e98e65b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "WR/WR_LASSO.py", "max_issues_repo_name": "mutazag/ilab1", "max_issues_repo_head_hexsha": "c37ae969d0fa13029ee08e7c0e102990e98e65b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2019-09-15T09:48:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-28T05:08:17.000Z", "max_forks_repo_path": "WR/WR_LASSO.py", "max_forks_repo_name": "mutazag/ilab1", "max_forks_repo_head_hexsha": "c37ae969d0fa13029ee08e7c0e102990e98e65b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8671096346, "max_line_length": 153, "alphanum_fraction": 0.6295935412, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2071}
|
# Script to replicate figure 4 and 5
using StatsBase, Statistics, LinearAlgebra, StatsPlots, XLSX, PrettyTables, GLM
# -------------- Figure 4 --------------------------------------------------------------------------------------------------------------------------------
t_fig_4 = DataFrame(XLSX.readtable("clean/t_fig_2_3.xlsx", "Sheet1")...) # load directly from file
transform!(t_fig_4, :country .=> ByRow(string) .=> :country, renamecols=false) # change type to String
transform!(t_fig_4, :year .=> ByRow(Int) .=> :year, renamecols=false) # change type to Int64
transform!(t_fig_4, [:FD_GO, :VA_GO, :U, :D] .=> ByRow(Float64) .=> [:FD_GO, :VA_GO, :U, :D], renamecols=false) # change type to Float64
function plot_fig4(df::DataFrame, a, b, year::Int64)
lims = ifelse(a == :D, (1.4,3.1), (0.28,0.72)) # different x, y axis limits
df = subset(df, :year => ByRow(x -> x == year))
# unfortunately unable to change size of annotations
p = @df df scatter(cols(a), cols(b), xlims=lims, ylims=lims, legend=:none, series_annotations=:country,
xlabel="$(String(a)) ($(year))", ylabel="$(String(b)) ($(year))") # need to use cols(a) to pass from function
plot!(0.1:0.1:3.1,0.1:0.1:3.1, col=:red, lw=2) # 45 degree line
return p
end
l = @layout [a b ; c d] # 2×2 layout
p1 = plot_fig4(t_fig_4, :VA_GO, :FD_GO, 1995)
p2 = plot_fig4(t_fig_4, :VA_GO, :FD_GO, 2011)
p3 = plot_fig4(t_fig_4, :D, :U, 1995)
p4 = plot_fig4(t_fig_4, :D, :U, 2011)
title = "Figure 4: GVC Measures and their Correlation over time"
p_fig4 = plot(p1, p2, p3, p4, layout=l, plot_title=title, plot_titlefontsize=10)
savefig(p_fig4, "images/figure4.png") # export image to folder
# -------------- Figure 5 and 6 --------------------------------------------------------------------------------------------------------------------------------
# data for upper panel
gdf = groupby(t_fig_4, :year)
corr_FD_VA = combine(gdf, [:FD_GO, :VA_GO] => ((x, y) -> cor(x, y)) => :correlation)
corr_U_D = combine(gdf, [:U, :D] => ((x, y) -> cor(x, y)) => :correlation)
# data for lower panel: simple OLS grouped per year
corr_confint_FD_VA = DataFrame(year=Int[], beta=Float64[], lower=Float64[], upper=Float64[]) # initiatlizing DataFrame
corr_confint_U_D = DataFrame(year=Int[], beta=Float64[], lower=Float64[], upper=Float64[])
for i in years
df = subset(t_fig_4, :year => ByRow(x -> x == i))
ols = lm(@formula(FD_GO ~ VA_GO), df)
β = coef(ols)[2]
conf = confint(ols)[2,:]
push!(corr_confint_FD_VA, [i β conf[1] conf[2]]) # push data to the dataframe
ols = lm(@formula(U ~ D), df)
β = coef(ols)[2]
conf = confint(ols)[2,:]
push!(corr_confint_U_D, [i β conf[1] conf[2]]) # push data to the dataframe
end
# figure 5
p1 = @df corr_FD_VA plot(:year, :correlation, xlims=(1994,2012), ylims=(0.79,1.01),
lw=2, color=:blue, label=:none, ylabel="Correlation: FD on VA")
p2 = @df corr_confint_FD_VA scatter(:year, [:beta, :lower, :upper], xlims=(1994,2012), ylims=(0.68,1.32), label=:none,
ylabel="Slope coeff and conf int")
plot!(corr_confint_FD_VA.year, corr_confint_FD_VA.beta, color=:blue, lw=2, label=:none)
l = @layout [a ; b] # 2×2 layout
title = "Figure 5: FU/GO and VA/GO over time"
p_fig5 = plot(p1, p2, layout=l, plot_title=title, plot_titlefontsize=10)
savefig(p_fig5, "images/figure5.png") # export image to folder
# figure 6
p1 = @df corr_U_D plot(:year, :correlation, xlims=(1994,2012), ylims=(0.79,1.01),
lw=2, color=:blue, label=:none, ylabel="Correlation: U on D")
p2 = @df corr_confint_U_D scatter(:year, [:beta, :lower, :upper], xlims=(1994,2012), ylims=(0.68,1.32), label=:none,
ylabel="Slope coeff and conf int")
plot!(corr_confint_U_D.year, corr_confint_U_D.beta, color=:blue, lw=2, label=:none)
l = @layout [a ; b] # 2×2 layout
title = "Figure 6: U and D over time"
p_fig6 = plot(p1, p2, layout=l, plot_title=title, plot_titlefontsize=10)
savefig(p_fig6, "images/figure6.png") # export image to folder
|
{"hexsha": "8909334194c4a82f286e03941cc2c5f5717c2210", "size": 3960, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/figure_4_5_6.jl", "max_stars_repo_name": "forsthuber92/antras_chor_2018.jl", "max_stars_repo_head_hexsha": "7c520db82566fa75a6b2eb41ae0cab3ca26f522f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/figure_4_5_6.jl", "max_issues_repo_name": "forsthuber92/antras_chor_2018.jl", "max_issues_repo_head_hexsha": "7c520db82566fa75a6b2eb41ae0cab3ca26f522f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/figure_4_5_6.jl", "max_forks_repo_name": "forsthuber92/antras_chor_2018.jl", "max_forks_repo_head_hexsha": "7c520db82566fa75a6b2eb41ae0cab3ca26f522f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.5172413793, "max_line_length": 160, "alphanum_fraction": 0.6191919192, "num_tokens": 1322}
|
import numpy as np
from research import wrappers
from collections import defaultdict
from torch.utils.tensorboard import SummaryWriter
from research.define_config import env_fn
import gym
from gym.vector.async_vector_env import AsyncVectorEnv
import torch as th
from research.nets import net_map
from jax.tree_util import tree_multimap, tree_map
from research import utils
import PIL
from PIL import Image, ImageDraw, ImageFont
TN = 8
class RLAlgo:
def __init__(self, G):
self.G = G
print(G.full_cmd)
# th.manual_seed(G.seed)
# np.random.seed(G.seed)
# Set up logger and save configuration
self.logger = defaultdict(lambda: [])
self.writer = SummaryWriter(G.logdir)
self.tenv = env_fn(G, G.seed)() # test env
self.obs_space = self.tenv.observation_space
self.act_space = self.tenv.action_space
self.real_tvenv = AsyncVectorEnv([env_fn(G) for _ in range(TN)])
if G.lenv:
sd = th.load(G.weightdir / f'{G.model}.pt')
mG = sd.pop('G')
mG.device = G.device
model = net_map[G.model](self.tenv, mG)
model.to(G.device)
model.eval()
for p in model.parameters():
p.requires_grad = False
self.env = wrappers.RewardLenv(wrappers.LearnedEnv(G.num_envs, model, G))
self.tvenv = self.learned_tvenv = wrappers.RewardLenv(wrappers.LearnedEnv(TN, model, G))
#self.obs_space.spaces = utils.subdict(self.obs_space.spaces, self.env.observation_space.spaces.keys())
self.obs_space = self.env.observation_space
def fx(x):
x.shape = x.shape[1:]
return x
self.obs_space.spaces = tree_map(fx, self.env.observation_space.spaces)
if G.preproc:
preproc = model.ronald
self.env = wrappers.PreprocVecEnv(preproc, self.env, G)
self.tvenv = self.learned_tvenv = wrappers.PreprocVecEnv(preproc, self.learned_tvenv, G)
self.real_tvenv = wrappers.PreprocVecEnv(preproc, self.real_tvenv, G)
self.obs_space.spaces['zstate'] = gym.spaces.Box(-1, 1, (preproc.z_size,))
if 'goal:proprio' in self.obs_space.spaces:
self.obs_space.spaces['goal:zstate'] = gym.spaces.Box(-1, 1, (preproc.z_size,))
else:
self.env = AsyncVectorEnv([env_fn(G) for _ in range(G.num_envs)])
self.tvenv = self.real_tvenv
if G.preproc:
sd = th.load(G.weightdir / f'{G.model}.pt')
mG = sd.pop('G')
mG.device = G.device
preproc = net_map[G.model](self.tenv, mG)
preproc.to(G.device)
preproc.load(G.weightdir)
for p in preproc.parameters():
p.requires_grad = False
preproc.eval()
self.env = wrappers.PreprocVecEnv(preproc, self.env, G)
self.real_tvenv = self.tvenv = wrappers.PreprocVecEnv(preproc, self.tvenv, G)
self.obs_space.spaces['zstate'] = gym.spaces.Box(-1, 1, (preproc.z_size,))
if 'goal:proprio' in self.obs_space.spaces:
self.obs_space.spaces['goal:zstate'] = gym.spaces.Box(-1, 1, (preproc.z_size,))
# tenv.reset()
if self.tenv.__class__.__name__ == 'BodyGoalEnv':
self.goal_key = 'goal:proprio'
elif self.tenv.__class__.__name__ == 'CubeGoalEnv':
self.goal_key = 'goal:object'
def get_av(self, o):
raise NotImplementedError()
def test_agent(self, itr, use_lenv=False):
# init
REP = 4
if use_lenv:
pf = th
_env = self.learned_tvenv
o, ep_ret, ep_len = _env.reset(), th.zeros(TN).to(self.G.device), th.zeros(TN).to(self.G.device)
else:
pf = np
_env = self.real_tvenv
o, ep_ret, ep_len = _env.reset(), np.zeros(TN), np.zeros(TN)
# run
frames = []
dones = []
rs = []
vs = []
all_done = pf.zeros_like(ep_ret)
success = pf.zeros_like(ep_ret)
for i in range(self.G.ep_len):
# Take deterministic actions at test time
a, v = self.get_av(o)
#a, v, logp = self.ac.step(o)
if not use_lenv and self.G.lenv:
a = a.detach().cpu().numpy()
v = v.detach().cpu().numpy()
o, r, d, info = _env.step(a)
all_done = pf.logical_or(all_done, d)
if i != (self.G.ep_len - 1):
success = pf.logical_or(success, d)
rs += [r]
vs += [v]
dones += [d]
ep_ret += r * ~all_done
ep_len += 1 * ~all_done
if 'lcd' in o:
delta = (1.0 * o['lcd'] - 1.0 * o['goal:lcd'] + 1) / 2
#frame = np.concatenate([1.0 * o['goal:lcd'], 1.0 * o['lcd'], delta], axis=-2)
frame = delta
frames += [frame]
else:
frames = []
if use_lenv:
def proc(x): return x.detach().cpu().float()
prefix = 'learned'
else:
def proc(x): return x
prefix = 'real'
if len(frames) != 0:
if use_lenv:
frames = th.stack(frames)
frames = frames.detach().cpu().numpy()
else:
frames = np.stack(frames)
frames = frames[..., None].repeat(REP, -3).repeat(REP, -2).repeat(3, -1)
frames = frames.transpose(0, 2, 1, 3, 4).reshape([-1, self.G.lcd_h * 1 * REP, TN * self.G.lcd_w * REP, 3])
# make borders
for k in range(TN):
if use_lenv:
frames[:, :, k * REP * self.G.lcd_w] = [0, 0, 1]
else:
frames[:, :, k * REP * self.G.lcd_w] = [1, 0, 0]
#frames[:, :, k * REP * self.G.lcd_w] = 0.0
dframes = []
yellow = (255, 255, 50)
white = (255, 255, 255)
purple = (75, 0, 130)
for i in range(len(frames)):
frame = frames[i]
pframe = Image.fromarray((frame * 255).astype(np.uint8))
# get a drawing context
draw = ImageDraw.Draw(pframe)
fnt = ImageFont.truetype("Pillow/Tests/fonts/FreeMono.ttf", 60)
for j in range(TN):
if use_lenv:
color = yellow if dones[i][j].detach().cpu().numpy() and i != self.G.ep_len - 1 else white
draw.text((self.G.lcd_w * REP * j + 10, 10), f't: {i} r:{rs[i][j].detach().cpu().numpy():.3f}\nV: {vs[i][j].detach().cpu().numpy():.3f}', fill=color, fnt=fnt)
draw.text((self.G.lcd_w * REP * j + 5, 5), f'{"*"*int(success[j].detach().cpu().numpy())}', fill=yellow, fnt=fnt)
#draw.text((self.G.lcd_w * REP * (j+1) - 20, 10), '[]', fill=purple, fnt=fnt)
else:
color = yellow if dones[i][j] and i != self.G.ep_len - 1 else white
draw.text((self.G.lcd_w * REP * j + 10, 10), f't: {i} r:{rs[i][j]:.3f}\nV: {vs[i][j]:.3f}', fill=color, fnt=fnt)
draw.text((self.G.lcd_w * REP * j + 5, 5), f'{"*"*int(success[j])}', fill=yellow, fnt=fnt)
dframes += [np.array(pframe)]
dframes = np.stack(dframes)
vid = dframes.transpose(0, -1, 1, 2)[None]
utils.add_video(self.writer, f'{prefix}_rollout', vid, itr + 1, fps=self.G.fps)
print('wrote video', prefix)
self.logger[f'{prefix}_test/EpRet'] += [proc(ep_ret).mean()]
self.logger[f'{prefix}_test/EpLen'] += [proc(ep_len).mean()]
self.logger[f'{prefix}_test/success_rate'] += [proc(success).mean()]
|
{"hexsha": "7b8e48cc38ef616416d6f26dfb023f9a162cb65a", "size": 6987, "ext": "py", "lang": "Python", "max_stars_repo_path": "research/rl/_base.py", "max_stars_repo_name": "matwilso/boxLCD", "max_stars_repo_head_hexsha": "7505e27f47e6694026303aa6cf12477959fc9fba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-05-17T14:33:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T07:14:03.000Z", "max_issues_repo_path": "research/rl/_base.py", "max_issues_repo_name": "matwilso/boxLCD", "max_issues_repo_head_hexsha": "7505e27f47e6694026303aa6cf12477959fc9fba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "research/rl/_base.py", "max_forks_repo_name": "matwilso/boxLCD", "max_forks_repo_head_hexsha": "7505e27f47e6694026303aa6cf12477959fc9fba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6988636364, "max_line_length": 170, "alphanum_fraction": 0.5933877201, "include": true, "reason": "import numpy,from jax", "num_tokens": 2133}
|
# -*- coding: utf-8 -*-
"""
_____________________________________________________________________________
This file contain code for converting pretrain Pytorch model into TensorRT engine
_____________________________________________________________________________
"""
from icecream import ic
import sys
import os
from pathlib import Path
import torch
import numpy as np
import argparse
from torch2trt_dynamic import torch2trt_dynamic
from utils import experiment_loader, initial_logger, copyStateDict, get_cfg_defaults
sys.path.append("../")
from craft import CRAFT
logger = initial_logger()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def build(args):
"""Load the network and export it to tensorRT
Parameters
----------
args.weight : str
Path to pretrain model of CRAFT (default is ./weights)
args.dynamic : bool
A flag used to apply dynamic input shape for TensorRT engine (default is
True)
Returns
-------
None
Output is saved in ./weights/*.engine, no return
"""
logger.info("Converting CRAFT Pytorch pth to TensorRT engine...")
model_path, model_config = experiment_loader(model_format='pth', data_path=args.weight)
# Load config come with pretrain model
cfg_detec = get_cfg_defaults()
cfg_detec.merge_from_file(model_config)
cfg_detec.INFERENCE.TRT_DYNAMIC = args.dynamic
# Set output path for tensorRT files
output_path = Path('../weights/')
# Set name for tensorRT files
output_detec = os.path.join(output_path, "detec_rt.engine")
# Dummy input data for models
input_tensor = torch.randn((1, 3, 768, 768), requires_grad=False)
input_tensor=input_tensor.cuda()
input_tensor=input_tensor.to(device=device)
# Load net
net = CRAFT()
net.load_state_dict(copyStateDict(torch.load(model_path)))
net = net.cuda()
net.eval()
# Convert the model into tensorRT
opt_shape_param = [
[
cfg_detec.INFERENCE.TRT_MIN_SHAPE, # min
cfg_detec.INFERENCE.TRT_OPT_SHAPE, # opt
cfg_detec.INFERENCE.TRT_MAX_SHAPE # max
]
]
if cfg_detec.INFERENCE.TRT_DYNAMIC:
model_trt = torch2trt_dynamic(net, [input_tensor], fp16_mode=cfg_detec.INFERENCE.TRT_AMP, opt_shape_param=opt_shape_param)
else:
model_trt = torch2trt_dynamic(net, [input_tensor], fp16_mode=cfg_detec.INFERENCE.TRT_AMP)
logger.info("Compare Pytorch output vs TensorRT engine output...")
y = net(input_tensor)
y_trt = model_trt(input_tensor)
ic('Pytorch pth output: ', y)
ic('TensorRT engine output: ', y_trt)
with open(output_detec, "wb") as f:
f.write(model_trt.engine.serialize())
logger.info("Convert CRAFT Pytorch pth to TensorRT engine sucess")
def main():
parser = argparse.ArgumentParser(description="Exports model to TensorRT, and post-processes it to insert TensorRT plugins")
parser.add_argument("--weight", required=False, help="Path to input model folder", default='../weights')
parser.add_argument("--dynamic", required=False, help="Use dynamic or not", default=True)
args=parser.parse_args()
build(args)
if __name__ == '__main__':
main()
|
{"hexsha": "b750d7a0aa7cb01b60126a174c02a9280320c80b", "size": 3259, "ext": "py", "lang": "Python", "max_stars_repo_path": "converters/torch2trt.py", "max_stars_repo_name": "k9ele7en/torch2tensorRT-dynamic-CRAFT-pytorch", "max_stars_repo_head_hexsha": "40191e9ac0d6f3a8d2763ab11d02d391f4880944", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-09-06T17:56:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T00:09:37.000Z", "max_issues_repo_path": "converters/torch2trt.py", "max_issues_repo_name": "k9ele7en/torch2tensorRT-dynamic-CRAFT-pytorch", "max_issues_repo_head_hexsha": "40191e9ac0d6f3a8d2763ab11d02d391f4880944", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "converters/torch2trt.py", "max_forks_repo_name": "k9ele7en/torch2tensorRT-dynamic-CRAFT-pytorch", "max_forks_repo_head_hexsha": "40191e9ac0d6f3a8d2763ab11d02d391f4880944", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3365384615, "max_line_length": 130, "alphanum_fraction": 0.706044799, "include": true, "reason": "import numpy", "num_tokens": 764}
|
import os
from tqdm import tqdm
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import cv2
import torch
from torchvision import transforms
from models.model_with_tcn_big import Model
from utils.hwdb2_0_chars import char_set
from utils.get_dgrl_data import get_pred_data
from utils.pred_utils import get_ar_cr, get_pred_str, polygon_IOU, normal_leven
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def predict(model, pred_iter, file_path, show=False):
with torch.no_grad():
img_np, img_tensor, boxes, page_label = next(pred_iter)
label_np = np.ones_like(img_np, dtype=np.uint8) * 255
boxes = boxes[0]
imgs = img_tensor.to(device)
kernel, out_chars, sub_img_nums, line_top_lefts, line_contours = model(imgs, None, is_train=False)
line_contours = line_contours[0]
prediction_char = out_chars
prediction_char = prediction_char.log_softmax(-1)
pred_strs = get_pred_str(prediction_char, char_set)
print(pred_strs)
pred_str_group = ['' for _ in range(len(page_label))]
not_in_char = ''
TP = 0
FP = 0
FN = 0
for pred_i in range(len(pred_strs)):
pred_str_poly = line_contours[pred_i]
pred_str_poly = np.squeeze(pred_str_poly, 1)
find_flag = 0
for label_i in range(len(boxes)):
label_box = boxes[label_i] / 4
pred_iou = polygon_IOU(pred_str_poly, label_box)
if pred_iou > 0.9:
pred_str_group[label_i] += pred_strs[pred_i]
find_flag = 1
break
if find_flag == 0:
FP += 1
not_in_char += pred_strs[pred_i]
for i in range(len(pred_str_group)):
if len(pred_str_group[i]) / len(page_label[i]):
TP += 1
else:
FN += 1
pred_strs_s = ''.join(pred_str_group) + not_in_char
# CR, AR, All = get_ar_cr(pred_strs_s, ''.join(page_label))
CR, AR, All = 0, 0, 0
char_c = len(''.join(page_label))
edit_d = normal_leven(pred_strs_s, ''.join(page_label))
for sub_p, sub_l in zip(pred_str_group, page_label):
sub_cr, sub_ar, sub_all = get_ar_cr(sub_p, sub_l)
CR += sub_cr
AR += sub_ar
All += sub_all
AR -= len(not_in_char)
if show:
line_contours = list(map(lambda x: x*4, line_contours))
for box in line_contours:
box = np.int_(box)
cv2.polylines(img_np, [box], True, 128, 1)
char_size = int(label_np.shape[1] / len(page_label) / 5)
if isinstance(label_np, np.ndarray):
label_np = Image.fromarray(cv2.cvtColor(label_np, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(label_np)
fontText = ImageFont.truetype('simfang.ttf', char_size, encoding="utf-8")
draw.text((0, 0), 'CR:{:.6f} AR:{:.6f}'.format(CR / All, AR / All), (0, 0, 0), font=fontText)
for i in range(len(pred_str_group)):
left = boxes[i][0][0]
top = boxes[i][0][1]
draw.text((left, top), 'label:' + page_label[i], (0, 0, 0), font=fontText)
draw.text((left, top + char_size), 'preds:' + pred_str_group[i], (0, 0, 0), font=fontText)
label_np = cv2.cvtColor(np.asarray(label_np), cv2.COLOR_RGB2BGR)
show_np = np.hstack([img_np, label_np])
show_np = cv2.resize(show_np, None, fx=0.7, fy=0.7)
print("labels:", page_label)
print("predicts:", pred_str_group)
# cv2.drawContours(img_np, line_contours, -1, (0, 0, 255), 1)
(path, filename) = os.path.split(file_path)
save_name = filename.split('.')[0] + '_cl.jpg'
save_dir = './output/result'
save_path = os.path.join(save_dir, save_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cv2.imwrite(save_path, show_np)
return CR, AR, All, edit_d, char_c, TP, FP, FN
if __name__ == '__main__':
device = torch.device('cuda')
img_transform = transforms.ToTensor()
model = Model(num_classes=3000, line_height=32, is_transformer=True, is_TCN=True).to(device)
model.load_state_dict(torch.load('./output/model.pth', map_location=device))
model.eval()
test_file_dir = '../dgrl_test'
file_paths = []
for file_path in os.listdir(test_file_dir):
if file_path.endswith('dgrl'):
file_paths.append(os.path.join(test_file_dir, file_path))
CR_all, AR_all, All_all = 0, 0, 0
EDIT_DISTANCE_ALL, CHAR_COUNT_ALL = 0, 0
TP_all, FP_all, FN_all = 0, 0, 0
pbar = tqdm(total=len(file_paths))
pred_iter = iter(get_pred_data(file_paths, 1600))
for i in range(len(file_paths)):
cr, ar, all, edit_d, char_c, TP, FP, FN = predict(model, pred_iter, file_paths[i], True)
CR_all += cr
AR_all += ar
All_all += all
EDIT_DISTANCE_ALL += edit_d
CHAR_COUNT_ALL += char_c
TP_all += TP
FP_all += FP
FN_all += FN
Precision = TP_all / (TP_all + FP_all)
Recall = TP_all / (TP_all + FN_all)
F1 = 2 / (1 / Precision + 1 / Recall)
pbar.display('CR:{:.6f} AR:{:.6f} edit_d:{:.6f} Precision:{:.6f} Recall:{:.6f} F1:{:.6f}\n'.format(
CR_all / All_all, AR_all / All_all, (CHAR_COUNT_ALL - EDIT_DISTANCE_ALL) / CHAR_COUNT_ALL,
Precision, Recall, F1))
pbar.update(1)
|
{"hexsha": "fe644e89a72d4bf2d85db3d724b66fedcc34674e", "size": 5641, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval_hwdb_with_center_line.py", "max_stars_repo_name": "BruceHan98/OCHTPS", "max_stars_repo_head_hexsha": "5bee02bcbff36029cd47b4802178216f980a4298", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eval_hwdb_with_center_line.py", "max_issues_repo_name": "BruceHan98/OCHTPS", "max_issues_repo_head_hexsha": "5bee02bcbff36029cd47b4802178216f980a4298", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eval_hwdb_with_center_line.py", "max_forks_repo_name": "BruceHan98/OCHTPS", "max_forks_repo_head_hexsha": "5bee02bcbff36029cd47b4802178216f980a4298", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1148648649, "max_line_length": 107, "alphanum_fraction": 0.5904981386, "include": true, "reason": "import numpy", "num_tokens": 1515}
|
import numpy as np
from scipy import interpolate
import os
import shutil
import cv2
import argparse
import _init_paths
from datasets.json_dataset import JsonDataset
from six.moves import cPickle as pickle
import pdb
np.seterr(divide='ignore',invalid='ignore')
# windows: origin and multi-window 1,2,3
windows = [[-1024,3071],[-174,274],[-1493,484],[-534,1425]]
score_threshold = 0.5
def parse_args():
parser = argparse.ArgumentParser(description='Test lesion FROC')
parser.add_argument('--baseline', default='/home/lizihao/baseline.pkl', help='path to the baseline detections.pkl')
parser.add_argument('--ours', help='path to our detections.pkl')
parser.add_argument('--dataset', default='lesion_test', help='test dataset name')
return parser.parse_args()
def windowing(img, window):
im = img.copy()
im = (im-window[0])/(window[1]-window[0])
return im
def draw_det_results(im, boxes, gts):
for box in boxes:
if box[4] < score_threshold:
continue
#center = (int((box[0]+box[2])/2), int((box[1]+box[3])/2))
#axes = (int((box[2]-box[0])), int((box[3]-box[1])))
#cv2.ellipse(im, center, axes, 0, 0, 360, (0, 0 , 255), 2)
cv2.rectangle(im, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0,0,255), 1)
for box in gts:
cv2.rectangle(im, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0,255,0), 1)
return im
def save_image(boxes1, boxes2, gts, image_path, save_dir):
name_split = image_path.split('/')
save_name = name_split[-2] + '__' + name_split[-1]
# read image
img = cv2.imread(image_path, -1)
img = img.astype(np.float32) - 32768
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# draw detections
ori_img = windowing(img,windows[0])
img_0 = draw_det_results(ori_img, boxes1, gts)
ori_img = windowing(img,windows[1])
img_1 = draw_det_results(ori_img, boxes2, gts)
ori_img = windowing(img,windows[2])
img_2 = draw_det_results(ori_img, boxes2, gts)
ori_img = windowing(img,windows[3])
img_3 = draw_det_results(ori_img, boxes2, gts)
img_concat = np.hstack((img_0, img_1, img_2, img_3))
# save image
cv2.imwrite(os.path.join(save_dir, save_name), img_concat*255)
def get_roidb_info(roidb):
gt_boxes = [[] for _ in range(len(roidb))]
name_list = []
for i, entry in enumerate(roidb):
gt_boxes[i] = roidb[i]['boxes']
name_list.append(roidb[i]['image'])
return gt_boxes,name_list
def IOU(box1, gts):
# compute overlaps
# intersection
ixmin = np.maximum(gts[:, 0], box1[0])
iymin = np.maximum(gts[:, 1], box1[1])
ixmax = np.minimum(gts[:, 2], box1[2])
iymax = np.minimum(gts[:, 3], box1[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((box1[2] - box1[0] + 1.) * (box1[3] - box1[1] + 1.) +
(gts[:, 2] - gts[:, 0] + 1.) *
(gts[:, 3] - gts[:, 1] + 1.) - inters)
overlaps = inters / uni
# ovmax = np.max(overlaps)
# jmax = np.argmax(overlaps)
return overlaps
def get_hits(boxes, gt_boxes, iou_th=0.5):
"""
Input: pred boxes and gt boxes of an image.
Output: num of hits(ture positive) in one image.
"""
hit = np.zeros((gt_boxes.shape[0],), dtype=np.bool)
for box in boxes:
if box[4] < score_threshold:
continue
overlaps = IOU(box, gt_boxes)
hits = overlaps>= iou_th
hit = np.logical_or(hit, overlaps >= iou_th)
tp = np.count_nonzero(hit)
return tp
def find_diff_detections(boxes_all1, boxes_all2, roidb, save_dir, iou_th):
gts_all,name_list = get_roidb_info(roidb)
nImg = len(boxes_all1)
diff = 0
ours = 0
baseline = 0
print('starting ...')
for i in range(nImg-3000):
hits1 = get_hits(boxes_all1[i], gts_all[i], iou_th)
hits2 = get_hits(boxes_all2[i], gts_all[i], iou_th)
#print(hits1, hits2)
if not hits1 == hits2:
diff+=1
if hits1 > hits2:
baseline += 1
else:
ours += 1
save_image(boxes_all1[i], boxes_all2[i], gts_all[i], name_list[i], save_dir)
print(len(boxes_all1), len(boxes_all2), len(gts_all),len(name_list))
print('starting ...')
print('found {} different cases totally. \
Ours better:{}, baseline better{}'.format(diff, ours, baseline))
def main():
args = parse_args()
det_file1 = args.baseline
det_file2 = args.ours
dataset_name = args.dataset
work_dir = os.getcwd()
save_dir = os.path.join(work_dir, '..' ,'Outputs', 'case_study')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
else:
shutil.rmtree(save_dir)
os.mkdir(save_dir)
# detections['all_boxes'][cls][image] = N x 5 array with columns (x1, y1, x2, y2, score)
# only one class in DeepLesion. 0-background, 1-lesion.
# all_boxes[image] = a N*5 list.
with open(det_file1, 'rb') as f:
detections = pickle.load(f)
all_boxes1= detections['all_boxes']
with open(det_file2, 'rb') as f:
detections = pickle.load(f)
all_boxes2= detections['all_boxes']
dataset = JsonDataset(dataset_name)
roidb = dataset.get_roidb(gt = True)
find_diff_detections(all_boxes1[1], all_boxes2[1], roidb, save_dir, iou_th=0.5)
if __name__ == '__main__':
main()
|
{"hexsha": "56e9761352fb5a83eaa38450dafcda33200d2220", "size": 5439, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/case_study.py", "max_stars_repo_name": "xixiobba/MVP-Net", "max_stars_repo_head_hexsha": "07bf00390080670b5d9a643b99f633419322a1ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-12-17T04:01:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T03:20:12.000Z", "max_issues_repo_path": "tools/case_study.py", "max_issues_repo_name": "xixiobba/MVP-Net", "max_issues_repo_head_hexsha": "07bf00390080670b5d9a643b99f633419322a1ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-02-18T12:14:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-07T01:08:54.000Z", "max_forks_repo_path": "tools/case_study.py", "max_forks_repo_name": "xixiobba/MVP-Net", "max_forks_repo_head_hexsha": "07bf00390080670b5d9a643b99f633419322a1ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-12-17T08:38:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-10T04:56:47.000Z", "avg_line_length": 33.7826086957, "max_line_length": 119, "alphanum_fraction": 0.6184960471, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1671}
|
import numpy as np
import tensorflow as tf
#import cv2
import matplotlib.pyplot as plt
from PIL import Image
import csv
import math
import os
from keras.layers import Dense, Flatten, Lambda, Activation, MaxPooling2D, ELU, Dropout
from keras.layers.convolutional import Conv2D
from keras.models import Sequential, model_from_json
from keras.optimizers import Adam
from keras.preprocessing import image
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import json
from keras import backend as K
num_epochs = 30
batch_size = 64
def preporcess_img(img, state):
#img = cv2.resize(img, (64, 48), interpolation = cv2.INTER_CUBIC)
#img.thumbnail((128,96), Image.ANTIALIAS)
return img, state
# generate training/validation batch
def get_batch(X, batch_size = 64):
# randomly pickup training data to create a batch
while(True):
X_batch = []
y_batch = []
picked = []
n_imgs = 0
# randomly selected batch size images and light state
while n_imgs < batch_size:
i = np.random.randint(0, len(X))
if (i in picked):
continue # skip if this image has been picked
y_state = int(X[i][1])
picked.append(i)
img_path = './images/' + X[i][0].strip()
light_img = plt.imread(img_path)
light_img = Image.open(img_path)
light_img = image.load_img(img_path, target_size=(96, 128))
img_array = image.img_to_array(light_img)
#light_img = cv2.imread(img_path)
#img_array = cv2.resize(light_img, (128, 96), interpolation = cv2.INTER_CUBIC)
# preprocess image
#light_img, y_state = preporcess_img(light_img, y_state)
X_batch.append(img_array)
st_v = [0, 0, 0]
st_v[y_state] = 1
y_batch.append(st_v)
n_imgs += 1
yield np.array(X_batch), np.array(y_batch)
def get_samples_per_epoch(num_samples, batch_size):
# return samples per epoch that is multiple of batch_size
return math.ceil(num_samples/batch_size)
def get_model():
model = Sequential()
# normalization layer
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(96, 128, 3)))
# convolution 2D with filter 5x5
#model.add(Convolution2D(24, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Conv2D(24, (5, 5), padding='same', strides=(2, 2)))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
#model.add(Convolution2D(36, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Conv2D(36, (5, 5), padding='same', strides=(2, 2)))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Dropout(0.4))
#model.add(Convolution2D(48, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Conv2D(48, (5, 5), padding='same', strides=(2, 2)))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Dropout(0.25))
#model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Conv2D(64, (3, 3), padding='same', strides=(1, 1)))
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Dropout(0.25))
#model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Conv2D(64, (3, 3), padding='same', strides=(1, 1)))
model.add(ELU())
model.add(Flatten())
model.add(Dense(1164))
model.add(ELU())
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(ELU())
model.add(Dense(50))
model.add(ELU())
model.add(Dense(10))
model.add(ELU())
model.add(Dense(3))
model.add(Activation('softmax'))
return model
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a prunned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
prunned so subgraphs that are not neccesary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
from tensorflow.python.framework.graph_util import convert_variables_to_constants
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
if __name__ == "__main__":
driving_data = []
# create a list of image paths and angles
with open('traffic_light_data.csv') as drvfile:
reader = csv.DictReader(drvfile)
for row in reader:
driving_data.append((row['images'], row['state']))
driving_data = shuffle(driving_data)
# split the data, 20% for validation
X_train, X_validation = train_test_split(driving_data, test_size = 0.2, random_state = 7898)
train_generator = get_batch(X_train)
val_generator = get_batch(X_validation)
model = get_model()
model.compile(optimizer = Adam(lr = 0.0001), loss='mse', metrics=['accuracy'])
print("Start training...")
h = model.fit_generator( train_generator,
steps_per_epoch = get_samples_per_epoch(len(X_train), batch_size),
epochs = num_epochs,
validation_data = val_generator,
validation_steps = get_samples_per_epoch(len(X_validation), batch_size))
#print ("fit history: ", h.history.keys())
# save model and weights
model_json = model.to_json()
with open("./model.json", "w") as json_file:
json.dump(model_json, json_file)
model.save_weights("./model.h5")
print("Saved model to disk")
model.save("./k_model.h5")
#frozen_graph = freeze_session(K.get_session(), output_names=[model.output.op.name])
#tf.train.write_graph(frozen_graph, "./", "traffic_light_frozen.pb", as_text=False)
#tf.train.write_graph(K.get_session().graph.as_graph_def(), "./","model_graph.ascii", as_text=True)
|
{"hexsha": "08dd50406111d9a6ed28cdff4de6c4d01b250235", "size": 6420, "ext": "py", "lang": "Python", "max_stars_repo_path": "experimental/model.py", "max_stars_repo_name": "tho15/tfplusplus", "max_stars_repo_head_hexsha": "e151986f7d449ee5ccb440fbb947fbc64fd62f49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2018-02-15T16:33:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T15:30:27.000Z", "max_issues_repo_path": "experimental/model.py", "max_issues_repo_name": "DefTruth/tfplusplus", "max_issues_repo_head_hexsha": "e151986f7d449ee5ccb440fbb947fbc64fd62f49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-08-21T12:02:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-17T03:53:35.000Z", "max_forks_repo_path": "experimental/model.py", "max_forks_repo_name": "DefTruth/tfplusplus", "max_forks_repo_head_hexsha": "e151986f7d449ee5ccb440fbb947fbc64fd62f49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-10-10T20:09:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T11:45:29.000Z", "avg_line_length": 31.7821782178, "max_line_length": 105, "alphanum_fraction": 0.7068535826, "include": true, "reason": "import numpy", "num_tokens": 1746}
|
import AnalysisFunctions as af
import pandas as pd
#defaultdict to use nested dictionaries
from collections import defaultdict
import matplotlib.pyplot as plt
import statsmodels.api as sm
import numpy as np
import dill
"""
-------------------------------------------------------------------------------------------------------------------------------------------------
PRE-PROCESSING: Import data, organize them, set observations subsets, calculate the quantiles:
-------------------------------------------------------------------------------------------------------------------------------------------------
"""
# Dictionary initialization:
df = af.dictionary()
list(df.keys())
# Decide simulation starting time:
sim_start = '2018-10-26 12:00:00'
# Some parameters for the basin:
Verzasca_area = 186*1000.0**2 #m2
conv_factor = Verzasca_area/(1000.0*3600.0)
# Runoff observation: open observation dataframe
obs_pattern = '/home/ciccuz/hydro/prevah/runoff/2605.dat'
obs_columns = ['year','month','day','hour','runoff']
obs_df = pd.DataFrame(pd.read_csv(obs_pattern, names=obs_columns, delim_whitespace=True, header=None))
obs_df['date'] = pd.to_datetime(obs_df[['year', 'month', 'day', 'hour']])
#Precipitation observation: open precipitation observation dataframe obtained by cosmoE forecast in the past
#data concatenated series before the initialization of the model
"""
prec_obs_12nov_28nov = df['2018-11-29 00:00:00']['rm00_pin01'][['P-kor', 'date']].iloc[0:419]
prec_obs_26ott_12nov = df['2018-11-12 12:00:00']['rm00_pin01'][['P-kor', 'date']].iloc[0:420]
prec_obs_9ott_26ott = df['2018-10-26 12:00:00']['rm00_pin01'][['P-kor', 'date']].iloc[0:420]
prec_obs_series = pd.concat([prec_obs_9ott_26ott, prec_obs_26ott_12nov, prec_obs_12nov_28nov]).reset_index(drop=True)
dill.dump(prec_obs_series, open( "prec_obs/prec_obs_series.txt", "wb") )
prec_obs_df_OLD = af.dictionary(pattern="/home/ciccuz/hydro/PrecObs/cosmo1_{simul_time}/{otherstuff}",
folders_pattern = '/home/ciccuz/hydro/PrecObs/cosmo1_*')
prec_obs_series_OLD = prec_obs_df['2018-11-09 12:00:00']['Ver500.'][['P-kor','date']].loc[prec_obs_df['2018-11-09 12:00:00']['Ver500.'].date < '2018-11-09 13:00:00']
"""
prec_obs_series= dill.load( open( "prec_obs/prec_obs_series.txt", "rb" ) )
# Extract from the dictionary the dataframe containing all the different realizations of the same event:
#every ensemble member and parameter set combination for the runoff, every ensemble member for the precipitation.
ens_df_prec = af.ensemble_df(df, sim_start, Verzasca_area, 'P-kor')
ens_df_runoff = af.ensemble_df(df, sim_start, Verzasca_area,'RGES')
# Calculate the quantiles for the variable chosen considering all the different realizations for the 120h ahead.
quant_prec = af.quantiles(ens_df_prec)
quant_runoff = af.quantiles(ens_df_runoff)
# Define the subset of runoff and precipitation observation based on quantiles dataframe date boundaries
obs_indexes_runoff = obs_df.loc[obs_df.index[obs_df['date'] == str(quant_runoff.date[0])] |
obs_df.index[obs_df['date'] == str(quant_runoff.date[119])]]
obs_indexes_prec = prec_obs_series.loc[prec_obs_series.index[prec_obs_series['date'] == str(quant_runoff.date[0])] |
prec_obs_series.index[prec_obs_series['date'] == str(quant_runoff.date[119])]]
obs_subset = obs_df.loc[obs_indexes_runoff.index[0]:obs_indexes_runoff.index[1]]
prec_obs_subset = prec_obs_series.loc[obs_indexes_prec.index[0]:obs_indexes_prec.index[1]]
# Observed runoff plot for 10-11 2018:
fig, ax = plt.subplots(1, 1, figsize=(10,6), dpi=100)
obsss = obs_df.loc[obs_df.year == 2018].loc[obs_df.month >= 10]
plt.plot(obsss.date, obsss.runoff)
plt.ylim(20,200)
plt.show()
# Observed precipitation plot for 10-11 2018:
fig, ax = plt.subplots(1, 1, figsize=(10,6), dpi=100)
plt.plot(prec_obs_series.date, prec_obs_series['P-kor'])
plt.show()
"""
______________________________________________________________________
Spaghetti and hydrograph plotting for the entire set of realizations:
______________________________________________________________________
"""
# Spaghetti plot of all realizations
af.spaghetti_plot(ens_df_runoff, ens_df_prec, obs_subset, prec_obs_subset, sim_start)
# Hydrograph plot for all realizations
af.hydrograph(quant_runoff, quant_prec, obs_subset, prec_obs_subset, sim_start)
"""
________________________
Meteorological medians:
________________________
"""
# Select groups of realizations based on the same ensemble members:
# dictionaries sorted by ensemble members
rm_groups_runoff = af.ens_param_groups(ens_df_runoff)[0]
# Quantiles dictionaries from above rm groups dictionary
quant_rm_dict = lambda: defaultdict(quant_rm_dict)
quant_rm_groups_runoff = quant_rm_dict()
for rm in range(21):
quant_rm_groups_runoff[rm] = af.quantiles(rm_groups_runoff[rm])
# Construct a dataframe having all the medians obtained for every group of realizations
# associated to an ens member
rm_medians = pd.DataFrame(index=range(120))
for rm in range(21):
rm_medians[rm] = quant_rm_groups_runoff[rm]['0.5']
rm_medians['date'] = quant_rm_groups_runoff[rm]['date']
rm_medians.columns = ['rm00','rm01','rm02','rm03','rm04','rm05','rm06','rm07','rm08','rm09','rm10','rm11','rm12',
'rm13','rm14','rm15','rm16','rm17','rm18','rm19','rm20','date']
# Quantiles on rm medians:
quant_rm_medians = af.quantiles(rm_medians)
"""
-------------------------------------------------------------------------------------------------------------------------------------------------
DECOMPOSED SOURCES OF UNCERTAINTIES: meteorological and hydrological uncertainties
-------------------------------------------------------------------------------------------------------------------------------------------------
______________________________
- Meteorological uncertainty
______________________________
"""
#Spaghetti plot with the 21 rm medians:
af.spaghetti_plot(rm_medians, ens_df_prec, obs_subset, prec_obs_subset, sim_start, medians=True)
# Quantify the meteorological uncertainty by plotting the range of spread among all the 21 rm medians obtained:
#af.hydrograph(quant_rm_medians, quant_prec, obs_subset, prec_obs_subset, sim_start, medians=True)
af.comparison_meteo_hydrograph(quant_rm_medians, quant_runoff, quant_prec, obs_subset, prec_obs_subset, sim_start)[1]
plt.savefig('/home/ciccuz/Thesis/AAAAAAAAAAAAAAAAAAAA.pdf', bbox_inches='tight')
"""
______________________________
- Thinning of the spread: remove spread extremes to try to obtain a sharper forecast, more compact around the observation
______________________________
"""
#for every leadtime find maximum and minimum value and its realization of fcst runoff:
max_runoff = pd.DataFrame(index=range(120), columns=['runoff', 'member'])
min_runoff = pd.DataFrame(index=range(120), columns=['runoff', 'member'])
for leadtime in range(120):
max_runoff['runoff'][leadtime] = ens_df_runoff.loc[:, ens_df_runoff.columns != 'date'].loc[[leadtime]].max(axis=1).values[0]
max_runoff['member'][leadtime] = (list(ens_df_runoff.loc[:, ens_df_runoff.columns != 'date'].loc[[leadtime]].stack().idxmax()))[1]
min_runoff['runoff'][leadtime] = ens_df_runoff.loc[:, ens_df_runoff.columns != 'date'].loc[[leadtime]].min(axis=1).values[0]
min_runoff['member'][leadtime] = (list(ens_df_runoff.loc[:, ens_df_runoff.columns != 'date'].loc[[leadtime]].stack().idxmin()))[1]
#remove from dataframe the meteo member related to the maximum/minimum runoff: replace those member with NaNs
ens_df_runoff_thin = ens_df_runoff.copy()
for leadtime in range(120):
ens_df_runoff_thin.replace(ens_df_runoff_thin.loc[[leadtime]].filter(regex=max_runoff.loc[[leadtime]]['member'].values[0][:4]),
np.nan, inplace=True)
ens_df_runoff_thin.replace(ens_df_runoff_thin.loc[[leadtime]].filter(regex=min_runoff.loc[[leadtime]]['member'].values[0][:4]),
np.nan, inplace=True)
#calculate quantiles on this new set of data:
quant_runoff_thin = af.quantiles(ens_df_runoff_thin)
af.comparison_meteo_hydrograph(quant_runoff_thin, quant_runoff, quant_prec, obs_subset, prec_obs_subset, sim_start, thinning=True)[1]
"""
_____________________________________________
- Hydrological uncertainty (of the forecast)
_____________________________________________
"""
# example: spaghetti and hydrograph plots for 1 selected ens member:
rm = 10
af.spaghetti_plot(rm_groups_runoff[rm], ens_df_prec, obs_subset, prec_obs_subset, sim_start,
runoff_label='\n'.join((r'rm = %02d' % rm, r'All pin realizations')))
af.hydrograph(quant_rm_groups_runoff[rm], quant_prec, obs_subset, prec_obs_subset, sim_start)
# Look at different rm realizations how the hydrological spread behaves: detect three realizations having different behaviours
""" For sim_start = '2018-10-27 00:00:00'
rm_high = 19
rm_medium = 11
rm_low = 13
"""
"""For sim_start = '2018-10-30 00:00:00'
"""
rm_high = 7
rm_medium = 6
rm_low = 17
af.hydrograph_rms(rm_high, rm_medium, rm_low, ens_df_prec, quant_rm_groups_runoff, quant_runoff,
obs_subset, prec_obs_subset, sim_start)
# Quantify the hydrological uncertainty considering the quantiles around every rm median:
af.hydro_unc_boxplot(quant_rm_groups_runoff, sim_start, normalized = True)
#plt.savefig('/home/ciccuz/Thesis/hydro_unc_boxplot2.pdf', bbox_inches='tight', dpi=1000)
"""
________________________________________________________________________________________________________
- Hydrological uncertainty (past): needed for a certain sim_start the initialization 5 days ahead of it
________________________________________________________________________________________________________
"""
#Hydrological uncertainty in the past: look at the 5 days before the initialization date
past_sim_start = str(ens_df_runoff.date[119])
past_ens_df = af.past_hydro_unc_ensemble_df(df, past_sim_start, Verzasca_area, 'RGES')
past_quant = af.quantiles(past_ens_df)
past_obs_indexes_runoff = obs_df.loc[obs_df.index[obs_df['date'] == str(past_quant.date[0])] |
obs_df.index[obs_df['date'] == str(past_quant.date[119])]]
past_obs_indexes_prec = prec_obs_series.loc[prec_obs_series.index[prec_obs_series['date'] == str(past_quant.date[0])] |
prec_obs_series.index[prec_obs_series['date'] == str(past_quant.date[119])]]
past_obs_subset = obs_df.loc[past_obs_indexes_runoff.index[0]:past_obs_indexes_runoff.index[1]]
past_prec_obs_subset = prec_obs_series.loc[past_obs_indexes_prec.index[0]:past_obs_indexes_prec.index[1]]
af.spaghetti_plot(past_ens_df, ens_df_prec, past_obs_subset, past_prec_obs_subset, past_sim_start, past=True)
af.hydrograph(past_quant, quant_prec, past_obs_subset, past_prec_obs_subset, past_sim_start, past=True)
"""
-------------------------------------------------------------------------------------------------------------------------------------------------
HYDROLOGICAL PARAMETERS ANALYSIS: use file hydro_parameters.py
-------------------------------------------------------------------------------------------------------------------------------------------------
"""
"""
-------------------------------------------------------------------------------------------------------------------------------------------------
FORECAST VERIFICATION: use file verification.py
-------------------------------------------------------------------------------------------------------------------------------------------------
"""
'''Correlation:'''
X = obs_subset['runoff']
y = quant_runoff['0.5']
X = sm.add_constant(X)
model = sm.OLS(y, X)
results = model.fit()
print(results.summary())
# Correlation plot:
af.correlation_plot(quant_runoff['0.5'], obs_subset, lead_times, title_text = ', all realizations median')
"""
-------------------------------------------------------------------------------------------------------------------------------------------------
PEAK-BOX APPROACH
-------------------------------------------------------------------------------------------------------------------------------------------------
"""
import peakbox_classic_multipeaksV2 as pb
pb.peak_box_multipeaks(rm_medians, obs_subset, sim_start, delta_t=10, gamma=0.6, decreashours=10, beta = 0.8)
#plt.savefig('/home/ciccuz/Thesis/PeakBox/AAAAAAAAAA2.pdf', bbox_inches='tight', dpi=1000)
import peakbox_classic_multipeaksV3_cluster as pbk
pbk.peak_box_multipeaks_kmeans(rm_medians, obs_subset, sim_start, delta_t=10, gamma=0.6)
plt.savefig('/home/ciccuz/Thesis/PeakBox/AAAAAAAAAA2.pdf', bbox_inches='tight', dpi=1000)
"""
-------------------------------------------------------------------------------------------------------------------------------------------------
CLUSTER ANALYSIS
-------------------------------------------------------------------------------------------------------------------------------------------------
"""
"""
_______________________________
- RM extraction and dendrogram
_______________________________
"""
import cluster_funct as cl
#plot the dendrogram
pgf_with_latex = {"pgf.texsystem": "xelatex",
"text.usetex": False}
cl.clustered_dendrogram(ens_df_prec.drop('date', axis=1), sim_start)
#plt.savefig('/home/ciccuz/Thesis/cluster/dendrogram.pdf', bbox_inches='tight', dpi=1000)
#choose how many clusters you want (3, 5 or 7):
Nclusters = 3
#representative members
RM = cl.clustered_RM(ens_df_prec.drop('date', axis=1), sim_start, Nclusters = Nclusters)
#extract the sub-dataframe for prec and runoff forecasts containing only the members related to the new extracted representative members:
clust_ens_df_prec = pd.DataFrame()
clust_ens_df_runoff = pd.DataFrame()
for rm_index in range(Nclusters):
clust_ens_df_prec = pd.concat([clust_ens_df_prec, ens_df_prec.loc[:, ens_df_prec.columns == f'rm{RM[rm_index]:02d}_pin01']], axis=1, sort=False)
for pin in range(1,26):
clust_ens_df_runoff = pd.concat([clust_ens_df_runoff, ens_df_runoff.loc[:, ens_df_runoff.columns == f'rm{RM[rm_index]:02d}_pin{pin:02d}']], axis=1, sort=False)
clust_ens_df_prec = pd.concat([clust_ens_df_prec, ens_df_prec.date], axis=1)
clust_ens_df_runoff = pd.concat([clust_ens_df_runoff, ens_df_runoff.date], axis=1)
# Cluster quantiles:
clust_quant_prec = af.quantiles(clust_ens_df_prec)
clust_quant_runoff = af.quantiles(clust_ens_df_runoff)
"""
___________________________________________________________________________________________________________
- Hydrograph and spaghetti plots for the RMs extracted, compared to the spread obtained without clustering
___________________________________________________________________________________________________________
"""
# Spaghetti plot of clustered forecasts
af.spaghetti_plot(clust_ens_df_runoff, clust_ens_df_prec, obs_subset, prec_obs_subset, sim_start, clustered=True)
# Hydrograph plot of clustered forecasts
cl.cluster_hydrograph(clust_quant_runoff, clust_quant_prec, quant_runoff, quant_prec, obs_subset, prec_obs_subset, sim_start, Nclusters=Nclusters)[2]
plt.savefig(f'/home/ciccuz/Thesis/cluster/cluster_hydrograph_{Nclusters}RMASDASDASD.pdf', bbox_inches='tight')
"""
_________________________________
- Cluster meteorological medians
_________________________________
"""
# Select groups of realizations based on the same ensemble members:
# dictionaries sorted by ensemble members
clust_rm_groups_runoff = af.ens_param_groups(clust_ens_df_runoff)[0]
# Quantiles dictionaries from above rm groups dictionary
clust_quant_rm_dict = lambda: defaultdict(clust_quant_rm_dict)
clust_quant_rm_groups_runoff = clust_quant_rm_dict()
for rm in RM:
clust_quant_rm_groups_runoff[rm] = af.quantiles(clust_rm_groups_runoff[rm])
# Construct a dataframe having all the medians obtained for every group of realizations
# associated to an ens member
clust_rm_medians = pd.DataFrame(index=range(120))
for rm in RM:
clust_rm_medians[rm] = clust_quant_rm_groups_runoff[rm]['0.5']
clust_rm_medians['date'] = clust_quant_rm_groups_runoff[rm]['date']
clust_rm_medians.columns = np.append(['rm' + f'{rm:02d}' for rm in RM], 'date')
# Quantiles on cluster rm medians:
clust_quant_rm_medians = af.quantiles(clust_rm_medians)
#Spaghetti plot with the 5 cluster rm medians:
af.spaghetti_plot(clust_rm_medians, clust_ens_df_prec, obs_subset, prec_obs_subset, sim_start, clustered=True, medians=True)
# Quantify the meteorological uncertainty by plotting the range of spread among all rm medians obtained:
cl.cluster_hydrograph(clust_quant_rm_medians, clust_quant_prec, quant_rm_medians, quant_prec, obs_subset, prec_obs_subset, sim_start, Nclusters=Nclusters,
medians=True)
"""
_________________________________________
- Peak-box approach with clustered subset
_________________________________________
"""
pbk.peak_box_multipeaks_kmeans(clust_rm_medians, obs_subset, sim_start, delta_t=10, gamma=0.6)
|
{"hexsha": "30a2c28ddb8c1fce4d3f3a6e587c15218c9e8515", "size": 16940, "ext": "py", "lang": "Python", "max_stars_repo_path": "AnalysisScriptNEW.py", "max_stars_repo_name": "Ciccuz/ensemble-flood-forecasting", "max_stars_repo_head_hexsha": "a3e1eb5cb3625bd4228513c73a7188c9cfafc3ef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "AnalysisScriptNEW.py", "max_issues_repo_name": "Ciccuz/ensemble-flood-forecasting", "max_issues_repo_head_hexsha": "a3e1eb5cb3625bd4228513c73a7188c9cfafc3ef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "AnalysisScriptNEW.py", "max_forks_repo_name": "Ciccuz/ensemble-flood-forecasting", "max_forks_repo_head_hexsha": "a3e1eb5cb3625bd4228513c73a7188c9cfafc3ef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-07T06:09:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-07T06:09:18.000Z", "avg_line_length": 42.9949238579, "max_line_length": 167, "alphanum_fraction": 0.6963400236, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 4032}
|
#!/usr/bin/env python
import unittest
import numpy as np
from plico.utils.decorator import override
from plico.rpc.dummy_remote_procedure_call import DummyRpcHandler
from plico.rpc.dummy_sockets import DummySockets
from plico_dm.client.deformable_mirror_client import DeformableMirrorClient
from plico_dm.utils.timeout import Timeout
from plico_dm.types.deformable_mirror_status import DeformableMirrorStatus
class TesterRpcHandler(DummyRpcHandler):
def __init__(self):
self._sendRequestHistory = []
self._receivePickableHistory = []
self._sendRequestCounter = 0
self._receivePickableCounter = 0
@override
def sendRequest(self, socket, command, args, timeout=1):
self._sendRequestHistory.append(
(socket, command, args, timeout))
self._sendRequestCounter += 1
@override
def receivePickable(self, socket, timeout=1):
self._receivePickableCounter += 1
self._receivePickableHistory.append(
(socket, timeout))
return self._objToReturnWithReceivePickable
def getLastSendRequestArguments(self):
return self._sendRequestHistory[-1]
def getLastReceivePickableArguments(self):
return self._receivePickableHistory[-1]
def wantsPickable(self, objToReturnWithReceivePickable):
self._objToReturnWithReceivePickable = \
objToReturnWithReceivePickable
class MyRpcHandler(TesterRpcHandler):
pass
class MySockets(DummySockets):
pass
class DeformableMirrorClientTest(unittest.TestCase):
def setUp(self):
self._rpc = MyRpcHandler()
self._sockets = MySockets()
self._client = DeformableMirrorClient(self._rpc, self._sockets)
def tearDown(self):
pass
def testSetShape(self):
mirrorWantedShape = np.identity(4)
self._client.set_shape(mirrorWantedShape)
self.assertEqual(
self._rpc.getLastSendRequestArguments(),
(self._sockets.serverRequest(),
'setShape',
[mirrorWantedShape],
Timeout.MIRROR_SET_SHAPE))
def testGetPosition(self):
timeoutInSec = 12
_ = self._client.get_shape(timeoutInSec)
self.assertEqual(
self._rpc.getLastSendRequestArguments(),
(self._sockets.serverRequest(),
'getShape',
[],
timeoutInSec))
def testGetStatus(self):
wantedInstrumentStatus = DeformableMirrorStatus(
12, 34, 123, 'cmdtag')
self._rpc.wantsPickable(wantedInstrumentStatus)
timeoutInSec = 22
gotInstrumentStatus = self._client.get_status(timeoutInSec)
self.assertEqual(
wantedInstrumentStatus, gotInstrumentStatus)
self.assertEqual(
self._rpc.getLastReceivePickableArguments(),
(self._sockets.serverStatus(),
timeoutInSec))
def testSaveCurrentCommandAsReference(self):
timeoutInSec = 12
tag = 'asdf'
self._client.save_current_shape_as_reference(
tag, timeoutInSec=timeoutInSec)
self.assertEqual(
self._rpc.getLastSendRequestArguments(),
(self._sockets.serverRequest(),
'save_current_shape_as_reference',
[tag],
timeoutInSec))
def testLoadReference(self):
timeoutInSec = 12
tag = 'assss'
self._client.load_reference(
tag, timeoutInSec=timeoutInSec)
self.assertEqual(
self._rpc.getLastSendRequestArguments(),
(self._sockets.serverRequest(),
'load_reference',
[tag],
timeoutInSec))
def testGetReferenceCommand(self):
timeoutInSec = 12
_ = self._client.get_reference_shape(
timeoutInSec=timeoutInSec)
self.assertEqual(
self._rpc.getLastSendRequestArguments(),
(self._sockets.serverRequest(),
'get_reference_shape',
[],
timeoutInSec))
def testGetReferenceCommandTag(self):
wantedInstrumentStatus = DeformableMirrorStatus(
12, 34, 22, 'cmdtag')
self._rpc.wantsPickable(wantedInstrumentStatus)
timeoutInSec = 314
cmdtag = self._client.get_reference_shape_tag(
timeoutInSec=timeoutInSec)
self.assertEqual(cmdtag, 'cmdtag')
self.assertEqual(
self._rpc.getLastReceivePickableArguments(),
(self._sockets.serverStatus(),
timeoutInSec))
def testGetNumberOfActuators(self):
nact = 12
wantedInstrumentStatus = DeformableMirrorStatus(
nact, 34, 22, 'cmdtag')
self._rpc.wantsPickable(wantedInstrumentStatus)
timeoutInSec = 314
gotnact = self._client.get_number_of_actuators(
timeoutInSec=timeoutInSec)
self.assertEqual(gotnact, nact)
self.assertEqual(
self._rpc.getLastReceivePickableArguments(),
(self._sockets.serverStatus(),
timeoutInSec))
def testGetNumberOfModes(self):
nmodes = 11
wantedInstrumentStatus = DeformableMirrorStatus(
231, nmodes, 22, 'cmdtag')
self._rpc.wantsPickable(wantedInstrumentStatus)
timeoutInSec = 314
gotnmodes = self._client.get_number_of_modes(
timeoutInSec=timeoutInSec)
self.assertEqual(gotnmodes, nmodes)
self.assertEqual(
self._rpc.getLastReceivePickableArguments(),
(self._sockets.serverStatus(),
timeoutInSec))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "8ebf6336f72b5460565e8a9817db5a0b49af588f", "size": 5661, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/client/deformable_mirror_client_test.py", "max_stars_repo_name": "lbusoni/palpao", "max_stars_repo_head_hexsha": "95ffeb3733437ab9d96ea47c4a266f73142acca6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/client/deformable_mirror_client_test.py", "max_issues_repo_name": "lbusoni/palpao", "max_issues_repo_head_hexsha": "95ffeb3733437ab9d96ea47c4a266f73142acca6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-24T23:03:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T00:31:52.000Z", "max_forks_repo_path": "test/client/deformable_mirror_client_test.py", "max_forks_repo_name": "lbusoni/palpao", "max_forks_repo_head_hexsha": "95ffeb3733437ab9d96ea47c4a266f73142acca6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.625698324, "max_line_length": 75, "alphanum_fraction": 0.6500618265, "include": true, "reason": "import numpy", "num_tokens": 1245}
|
'''
Pan_X tracker
====================
Made by: Jan-Jaap van de Velde
Keys
----
ESC - exit
'''
import numpy as np
import cv2
import datetime
lk_params = dict( winSize = (10, 10),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
class video_obj:
def __init__(self, filename_video, scan_window=None):
self.filename_video = filename_video
self.filename_log = filename_video.rsplit('.',1)[0] + '.log'
self.cam = None
self.cam = cv2.VideoCapture(filename_video)
if self.cam is None or not self.cam.isOpened():
print('Warning: unable to open video source: ', filename_video)
print('Error opening file')
raise
# get some video properties
self.frame_width = int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH))
self.frame_heigth = int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = self.cam.get(cv2.CAP_PROP_FPS)
self.frame_count = int(self.cam.get(cv2.CAP_PROP_FRAME_COUNT))
self.length = float(self.frame_count - 1) / self.fps
if not scan_window==None:
self.set_scan_window(scan_window)
def read(self):
ret, frame = self.cam.read()
return frame
def set_grid(self, roi, num=100):
x0,x1,y0,y1 = roi
dx, dy = x1-x0, y1-y0
x = np.linspace(x0, x1, int(np.sqrt(num * dx / dy)))
y = np.linspace(y0, y1, int(np.sqrt(num * dy / dx)))
x, y = np.meshgrid(x, y)
self.grid = np.float32(np.array([x, y]).T.reshape(-1, 1, 2))
def convert_rel_coor(self, rel_coor):
abs_coor = np.array(rel_coor) / 100.
abs_coor[0:2] *= self.frame_width
abs_coor[2:4] *= self.frame_heigth
return abs_coor
def get_roi(self,frame_gray):
class mouse_action:
def on_mouse(self,click, x, y, drag, args):
if click==1: #mouse down
self.x0, self.y0 = x,y
elif click==4: #mouse release
if self.x0<x : self.x1 = x
else : self.x0,self.x1 = x,self.x0
if self.y0<y: self.y1 = y
else: self.y0,self.y1 = y,self.y0
self.finish_flag = True
if click==0 and drag==1:
frame_gray = np.copy(self.frame_gray)
cv2.rectangle(frame_gray, (self.x0, self.y0), (x,y), [255,150,150])
cv2.imshow('Set Scan Window', frame_gray)
pass #update box
def __init__(self):
self.frame_gray = np.copy(frame_gray)
cv2.imshow('Set Scan Window', frame_gray)
cv2.SetMouseCallback('Set Scan Window', self.on_mouse,0)
self.finish_flag = False
m = mouse_action()
while not m.finish_flag: #loop till finished
cv2.waitKey(1)
cv2.destroyWindow('Set Scan Window')
print [m.x0 * 100 / self.frame_width,
m.x1 * 100 / self.frame_width,
m.y0 * 100 / self.frame_heigth,
m.y1 * 100 / self.frame_heigth]
return [m.x0, m.x1, m.y0 , m.y1]
def set_scan_window(self, scan_window):
roi = np.array(scan_window[0:4])
roi[0:2] *= self.frame_width
roi[2:4] *= self.frame_heigth
roi = roi.astype(np.int)
self.set_grid(roi, scan_window[4])
def process_video(self, filename_log, video_output=True):
def draw_str(dst, coor, s):
x, y = coor
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
def calc_flow(prev_gray, frame_gray, p0):
#calculate the flow for each point in p0
p1, st, err = cv2.calcOpticalFlowPyrLK(prev_gray, frame_gray, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(frame_gray, prev_gray, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
pan = (p0 - p1)[good,0][:,0].mean() #will be nan if no flow could be calculated
return pan
k = 4
frame_width = self.frame_width // k
frame_heigth = self.frame_heigth // k
if hasattr(self, 'grid'): self.grid /= k
## pan_x = np.zeros(self.frame_count)
pan_x = []
t0 = datetime.datetime.now()
for frame_nr in range(self.frame_count):
frame = self.read()
frame = cv2.resize(frame, (frame_width, frame_heigth))
try:
prev_gray = frame_gray
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
except UnboundLocalError: #error is raise on the first frame
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
prev_gray = frame_gray #first 2 frame are equal, so on frame 0 pan_x is 0
if not hasattr(self, 'grid'):
roi = self.get_roi(frame_gray)
self.set_grid(roi,100)
pan = calc_flow(prev_gray, frame_gray, self.grid)
t1 = datetime.datetime.now()
dt, t0 = t1-t0, t1
if video_output:
# try:
for p in self.grid:
cv2.circle(frame, tuple(p[0]), 2, (0, 255, 0), -1)
start_point = (frame_width//2, frame_heigth//k-10)
end_point = (frame_width//2+int(pan*50), frame_heigth-20)
color = (255,150,150)
thickness = 2
cv2.rectangle(frame, start_point, end_point, color, thickness)
draw_str(frame, (20, 20), 'frame nr: {0:d}'.format(frame_nr))
draw_str(frame, (20, 40), 'fps: {0:0.0f}'.format(1e6/dt.microseconds))
draw_str(frame, (20, 60), 'progress: {0:0.1f}'.format(frame_nr * 100.0 / self.frame_count))
cv2.imshow('pan_x', frame)
if 0xFF & cv2.waitKey(1) == 27 : break
# except:
# print('frame processing error on frame:',frame_nr)
pan_x.append(pan)
self.pan_x = np.nan_to_num(pan_x)
cv2.destroyAllWindows()
# write the log file
with open(filename_log, 'w') as f:
for i,j in enumerate(self.pan_x):
#write frame_number pan_x_value
f.write('{0}\t{1:0.3f}\n'.format(i,j))
return self.pan_x
|
{"hexsha": "b68ad1d4104abc88397659ce1b8876d1962a8dec", "size": 6889, "ext": "py", "lang": "Python", "max_stars_repo_path": "process_video.py", "max_stars_repo_name": "Jan-Jaap/gps_video_sync", "max_stars_repo_head_hexsha": "dcdbce948a1e37a15fbc497b8fcc8dae831a8b5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-09-14T20:54:04.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-11T23:57:30.000Z", "max_issues_repo_path": "process_video.py", "max_issues_repo_name": "Jan-Jaap/gps_video_sync", "max_issues_repo_head_hexsha": "dcdbce948a1e37a15fbc497b8fcc8dae831a8b5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "process_video.py", "max_forks_repo_name": "Jan-Jaap/gps_video_sync", "max_forks_repo_head_hexsha": "dcdbce948a1e37a15fbc497b8fcc8dae831a8b5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-05-11T23:57:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-03T20:18:44.000Z", "avg_line_length": 34.9695431472, "max_line_length": 121, "alphanum_fraction": 0.5259108724, "include": true, "reason": "import numpy", "num_tokens": 1782}
|
const Vec2 = Vec{2, Float32}
const Vec3 = Vec{3, Float32}
const Vec4 = Vec{4, Float32}
const iVec2 = Vec{2, Cint}
const iVec3 = Vec{3, Cint}
const iVec4 = Vec{4, Cint}
const uVec2 = Vec{2, Cuint}
const uVec3 = Vec{3, Cuint}
const uVec4 = Vec{4, Cuint}
function test_textures()
N = 100
t1 = Texture(RGBA{N0f8}, (512,10), minfilter=:nearest, x_repeat=:clamp_to_edge)
t2 = Texture(Vec{2, GLushort}, (77,91), minfilter=:nearest, x_repeat=:clamp_to_edge)
intensity2Di = Texture(Cint[0 for i=1:N, j=1:N])
intensity2Dui = Texture(Cuint[0 for i=1:N, j=1:N])
rg2Df = Texture([Vec2(0) for i=1:N, j=1:N])
rg2Di = Texture([iVec2(0) for i=1:N, j=1:N])
rg2Dui = Texture([uVec2(0f0) for i=1:N, j=1:N])
rgb2Df = Texture([Vec3(0) for i=1:N, j=1:N])
rgb2Di = Texture([iVec3(0) for i=1:N, j=1:N])
rgb2Dui = Texture([uVec3(0f0) for i=1:N, j=1:N])
rgba2Df = Texture([Vec4(0) for i=1:N, j=1:N])
rgba2Di = Texture([iVec4(0) for i=1:N, j=1:N])
rgba2Dui = Texture([uVec4(0f0) for i=1:N, j=1:N])
z = Vec4[Vec4(0f0) for j=1:N, i=1:N]
arraytexture = Texture(z)
@test toglsltype_string(rg2Df) == "uniform sampler2D"
@test toglsltype_string(rgb2Df) == "uniform sampler2D"
@test toglsltype_string(rgba2Df) == "uniform sampler2D"
@test ndims(rgba2Df) == 2
@test eltype(rgba2Df) == Vec4
end
test_textures()
|
{"hexsha": "ce5262246f9399857f7c6d4376ba81f1973142da", "size": 1372, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/texture.jl", "max_stars_repo_name": "Tuebel/GLAbstraction.jl", "max_stars_repo_head_hexsha": "5dd969f1d1313d4ff3fc423de90feaa532d5f608", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2015-03-10T12:43:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T16:25:47.000Z", "max_issues_repo_path": "test/texture.jl", "max_issues_repo_name": "Tuebel/GLAbstraction.jl", "max_issues_repo_head_hexsha": "5dd969f1d1313d4ff3fc423de90feaa532d5f608", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2015-02-27T19:32:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-29T15:39:16.000Z", "max_forks_repo_path": "test/texture.jl", "max_forks_repo_name": "Tuebel/GLAbstraction.jl", "max_forks_repo_head_hexsha": "5dd969f1d1313d4ff3fc423de90feaa532d5f608", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2015-03-10T12:43:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-23T20:13:21.000Z", "avg_line_length": 28.5833333333, "max_line_length": 88, "alphanum_fraction": 0.6319241983, "num_tokens": 573}
|
(* Author: Alexander Bentkamp, Universität des Saarlandes
*)
section \<open>Matrix Rank\<close>
theory DL_Rank
imports VS_Connect DL_Missing_List
Determinant
Missing_VectorSpace
begin
lemma (in vectorspace) full_dim_span:
assumes "S \<subseteq> carrier V"
and "finite S"
and "vectorspace.dim K (span_vs S) = card S"
shows "lin_indpt S"
proof -
have "vectorspace K (span_vs S)"
using field.field_axioms vectorspace_def submodule_is_module[OF span_is_submodule[OF assms(1)]] by metis
have "S \<subseteq> carrier (span_vs S)" by (simp add: assms(1) in_own_span)
have "LinearCombinations.module.span K (vs (span S)) S = carrier (vs (span S))"
using module.span_li_not_depend[OF _ span_is_submodule[OF assms(1)]]
by (simp add: assms(1) in_own_span)
have "vectorspace.basis K (vs (span S)) S"
using vectorspace.dim_gen_is_basis[OF \<open>vectorspace K (span_vs S)\<close> \<open>finite S\<close> \<open>S \<subseteq> carrier (span_vs S)\<close>
\<open>LinearCombinations.module.span K (vs (span S)) S = carrier (vs (span S))\<close>] \<open>vectorspace.dim K (span_vs S) = card S\<close>
by simp
then have "LinearCombinations.module.lin_indpt K (vs (span S)) S"
using vectorspace.basis_def[OF \<open>vectorspace K (span_vs S)\<close>] by blast
then show ?thesis using module.span_li_not_depend[OF _ span_is_submodule[OF assms(1)]]
by (simp add: assms(1) in_own_span)
qed
lemma (in vectorspace) dim_span:
assumes "S \<subseteq> carrier V"
and "finite S"
and "maximal U (\<lambda>T. T \<subseteq> S \<and> lin_indpt T)"
shows "vectorspace.dim K (span_vs S) = card U"
proof -
have "lin_indpt U" "U \<subseteq> S" by (metis assms(3) maximal_def)+
then have "U \<subseteq> span S" using in_own_span[OF assms(1)] by blast
then have lin_indpt: "LinearCombinations.module.lin_indpt K (span_vs S) U"
using module.span_li_not_depend(2)[OF \<open>U \<subseteq> span S\<close>] \<open>lin_indpt U\<close> assms(1) span_is_submodule by blast
have "span U = span S"
proof (rule ccontr)
assume "span U \<noteq> span S"
have "span U \<subseteq> span S" using span_is_monotone \<open>U\<subseteq>S\<close> by metis
then have "\<not> S \<subseteq> span U" by (meson \<open>U \<subseteq> S\<close> \<open>span U \<noteq> span S\<close> assms(1) span_is_submodule
span_is_subset subset_antisym subset_trans)
then obtain s where "s\<in>S" "s \<notin> span U" by blast
then have "lin_indpt (U\<union>{s})" using lindep_span
by (meson \<open>U \<subseteq> S\<close> \<open>lin_indpt U\<close> assms(1) lin_dep_iff_in_span rev_subsetD span_mem subset_trans)
have "s\<notin>U" using \<open>U \<subseteq> S\<close> \<open>s \<notin> span U\<close> assms(1) span_mem by auto
then have "(U\<union>{s}) \<subseteq> S \<and> lin_indpt (U\<union>{s})" using \<open>U \<subseteq> S\<close> \<open>lin_indpt (U \<union> {s})\<close> \<open>s \<in> S\<close> by auto
then have "\<not>maximal U (\<lambda>T. T \<subseteq> S \<and> lin_indpt T)"
unfolding maximal_def using Un_subset_iff \<open>s \<notin> U\<close> insert_subset order_refl by auto
then show False using assms by metis
qed
then have span:"LinearCombinations.module.span K (vs (span S)) U = span S"
using module.span_li_not_depend[OF \<open>U \<subseteq> span S\<close>]
by (simp add: LinearCombinations.module.span_is_submodule assms(1) module_axioms)
have "vectorspace K (vs (span S))"
using field.field_axioms vectorspace_def submodule_is_module[OF span_is_submodule[OF assms(1)]] by metis
then have "vectorspace.basis K (vs (span S)) U" using vectorspace.basis_def[OF \<open>vectorspace K (vs (span S))\<close>]
by (simp add: span \<open>U \<subseteq> span S\<close> lin_indpt)
then show ?thesis
using \<open>U \<subseteq> S\<close> \<open>vectorspace K (vs (span S))\<close> assms(2) infinite_super vectorspace.dim_basis by blast
qed
definition (in vec_space) rank ::"'a mat \<Rightarrow> nat"
where "rank A = vectorspace.dim class_ring (span_vs (set (cols A)))"
lemma (in vec_space) rank_card_indpt:
assumes "A \<in> carrier_mat n nc"
assumes "maximal S (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)"
shows "rank A = card S"
proof -
have "set (cols A) \<subseteq> carrier_vec n" using cols_dim assms(1) by blast
have "finite (set (cols A))" by blast
show ?thesis using dim_span[OF \<open>set (cols A) \<subseteq> carrier_vec n\<close> \<open>finite (set (cols A))\<close> assms(2)]
unfolding rank_def by blast
qed
lemma maximal_exists_superset:
assumes "finite S"
assumes maxc: "\<And>A. P A \<Longrightarrow> A \<subseteq> S" and "P B"
shows "\<exists>A. finite A \<and> maximal A P \<and> B \<subseteq> A"
proof -
have "finite (S-B)" using assms(1) assms(3) infinite_super maxc by blast
then show ?thesis using \<open>P B\<close>
proof (induction "S-B" arbitrary:B rule: finite_psubset_induct)
case (psubset B)
then show ?case
proof (cases "maximal B P")
case True
then show ?thesis using order_refl psubset.hyps by (metis assms(1) maxc psubset.prems rev_finite_subset)
next
case False
then obtain B' where "B \<subset> B'" "P B'" using maximal_def psubset.prems by (metis dual_order.order_iff_strict)
then have "B' \<subseteq> S" "B \<subseteq> S" using maxc \<open>P B\<close> by auto
then have "S - B' \<subset> S - B" using \<open>B \<subset> B'\<close> by blast
then show ?thesis using psubset(2)[OF \<open>S - B' \<subset> S - B\<close> \<open>P B'\<close>] using \<open>B \<subset> B'\<close> by fast
qed
qed
qed
lemma (in vec_space) rank_ge_card_indpt:
assumes "A \<in> carrier_mat n nc"
assumes "U \<subseteq> set (cols A)"
assumes "lin_indpt U"
shows "rank A \<ge> card U"
proof -
obtain S where "maximal S (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)" "U\<subseteq>S" "finite S"
using maximal_exists_superset[of "set (cols A)" "(\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)" U]
using List.finite_set assms(2) assms(3) maximal_exists_superset by blast
then show ?thesis
unfolding rank_card_indpt[OF \<open>A \<in> carrier_mat n nc\<close> \<open>maximal S (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)\<close>]
using card_mono by blast
qed
lemma (in vec_space) lin_indpt_full_rank:
assumes "A \<in> carrier_mat n nc"
assumes "distinct (cols A)"
assumes "lin_indpt (set (cols A))"
shows "rank A = nc"
proof -
have "maximal (set (cols A)) (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)"
by (simp add: assms(3) maximal_def subset_antisym)
then have "rank A = card (set (cols A))" using assms(1) vec_space.rank_card_indpt by blast
then show ?thesis using assms(1) assms(2) distinct_card by fastforce
qed
lemma (in vec_space) rank_le_nc:
assumes "A \<in> carrier_mat n nc"
shows "rank A \<le> nc"
proof -
obtain S where "maximal S (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)"
using maximal_exists[of "(\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)" "card (set (cols A))" "{}"]
by (meson List.finite_set card_mono empty_iff empty_subsetI finite_lin_indpt2 rev_finite_subset)
then have "card S \<le> card (set (cols A))" by (simp add: card_mono maximal_def)
then have "card S \<le> nc"
using assms(1) cols_length card_length carrier_matD(2) by (metis dual_order.trans)
then show ?thesis
using rank_card_indpt[OF \<open>A \<in> carrier_mat n nc\<close> \<open>maximal S (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)\<close>]
by simp
qed
lemma (in vec_space) full_rank_lin_indpt:
assumes "A \<in> carrier_mat n nc"
assumes "rank A = nc"
assumes "distinct (cols A)"
shows "lin_indpt (set (cols A))"
proof -
have 1:"set (cols A) \<subseteq> carrier_vec n" using assms(1) cols_dim by blast
have 2:"finite (set (cols A))" by simp
have "card (set (cols A)) = nc"
using assms(1) assms(3) distinct_card by fastforce
have 3:"vectorspace.dim class_ring (span_vs (set (cols A))) = card (set (cols A))"
using \<open>rank A = nc\<close>[unfolded rank_def]
using assms(1) assms(3) distinct_card by fastforce
show ?thesis using full_dim_span[OF 1 2 3] .
qed
lemma (in vec_space) mat_mult_eq_lincomb:
assumes "A \<in> carrier_mat n nc"
assumes "distinct (cols A)"
shows "A *\<^sub>v (vec nc (\<lambda>i. a (col A i))) = lincomb a (set (cols A))"
proof (rule eq_vecI)
have "finite (set (cols A))" using assms(1) by simp
then show "dim_vec (A *\<^sub>v (vec nc (\<lambda>i. a (col A i)))) = dim_vec (lincomb a (set (cols A)))"
using assms cols_dim vec_space.lincomb_dim by (metis dim_mult_mat_vec carrier_matD(1))
fix i assume "i < dim_vec (lincomb a (set (cols A)))"
then have "i < n" using \<open>dim_vec (A *\<^sub>v (vec nc (\<lambda>i. a (col A i)))) = dim_vec (lincomb a (set (cols A)))\<close> assms by auto
have "set (cols A) \<subseteq> carrier_vec n" using cols_dim \<open>A \<in> carrier_mat n nc\<close> carrier_matD(1) by blast
have "bij_betw (nth (cols A)) {..<length (cols A)} (set (cols A))"
unfolding bij_betw_def by (rule conjI, simp add: inj_on_nth \<open>distinct (cols A)\<close>;
metis subset_antisym in_set_conv_nth lessThan_iff rev_image_eqI subsetI
image_subsetI lessThan_iff nth_mem)
then have " (\<Sum>x\<in>set (cols A). a x * x $ i) =
(\<Sum>j\<in>{..<length (cols A)}. a (cols A ! j) * (cols A ! j) $ i)"
using bij_betw_imp_surj_on bij_betw_imp_inj_on by (metis (no_types, lifting) sum.reindex_cong)
also have "... = (\<Sum>j\<in>{..<length (cols A)}. a (col A j) * (cols A ! j) $ i)"
using assms(1) assms(2) find_first_unique[OF \<open>distinct (cols A)\<close>] \<open>i < n\<close> by auto
also have "... = (\<Sum>j\<in>{..<length (cols A)}. (cols A ! j) $ i * a (col A j))" by (metis mult_commute_abs)
also have "... = (\<Sum>j\<in>{..<length (cols A)}. row A i $ j * a (col A j))" using \<open>i < n\<close> assms(1) assms(2) by auto
finally show "(A *\<^sub>v (vec nc (\<lambda>i. a (col A i)))) $ i = lincomb a (set (cols A)) $ i"
unfolding lincomb_index[OF \<open>i < n\<close> \<open>set (cols A) \<subseteq> carrier_vec n\<close>]
unfolding mult_mat_vec_def scalar_prod_def
using \<open>i < n\<close> assms(1) atLeast0LessThan lessThan_def carrier_matD(1) index_vec sum.cong by auto
qed
lemma (in vec_space) lincomb_eq_mat_mult:
assumes "A \<in> carrier_mat n nc"
assumes "v \<in> carrier_vec nc"
assumes "distinct (cols A)"
shows "lincomb (\<lambda>a. v $ find_first a (cols A)) (set (cols A)) = (A *\<^sub>v v)"
proof -
have "\<And>i. i < nc \<Longrightarrow> find_first (col A i) (cols A) = i"
using assms(1) assms(3) find_first_unique by fastforce
then have "vec nc (\<lambda>i. v $ find_first (col A i) (cols A)) = v"
using assms(2) by auto
then show ?thesis
using mat_mult_eq_lincomb[where a = "(\<lambda>a. v $ find_first a (cols A))", OF assms(1) assms(3)] by auto
qed
lemma (in vec_space) lin_depI:
assumes "A \<in> carrier_mat n nc"
assumes "v \<in> carrier_vec nc" "v \<noteq> 0\<^sub>v nc" "A *\<^sub>v v = 0\<^sub>v n"
assumes "distinct (cols A)"
shows "lin_dep (set (cols A))"
proof -
have 1: "finite (set (cols A))" by simp
have 2: "set (cols A) \<subseteq> set (cols A)" by auto
have 3: "(\<lambda>a. v $ find_first a (cols A)) \<in> set (cols A) \<rightarrow> UNIV" by simp
obtain i where "v $ i \<noteq> 0" "i < nc"
using \<open>v \<noteq> 0\<^sub>v nc\<close>
by (metis assms(2) dim_vec carrier_vecD vec_eq_iff zero_vec_def index_zero_vec(1))
then have "i < dim_col A" using assms(1) by blast
have 4:"col A i \<in> set (cols A)"
using cols_nth[OF \<open>i < dim_col A\<close>] \<open>i < dim_col A\<close> in_set_conv_nth by fastforce
have 5:"v $ find_first (col A i) (cols A) \<noteq> 0"
using find_first_unique[OF \<open>distinct (cols A)\<close>] cols_nth[OF \<open>i < dim_col A\<close>] \<open>i < nc\<close> \<open>v $ i \<noteq> 0\<close>
assms(1) by auto
have 6:"lincomb (\<lambda>a. v $ find_first a (cols A)) (set (cols A)) = 0\<^sub>v n"
using assms(1) assms(2) assms(4) assms(5) lincomb_eq_mat_mult by auto
show ?thesis using lin_dep_crit[OF 1 2 _ 4 5 6] by metis
qed
lemma (in vec_space) lin_depE:
assumes "A \<in> carrier_mat n nc"
assumes "lin_dep (set (cols A))"
assumes "distinct (cols A)"
obtains v where "v \<in> carrier_vec nc" "v \<noteq> 0\<^sub>v nc" "A *\<^sub>v v = 0\<^sub>v n"
proof -
have "finite (set (cols A))" by simp
obtain a w where "a \<in> set (cols A) \<rightarrow> UNIV" "lincomb a (set (cols A)) = 0\<^sub>v n" "w \<in> set (cols A)" "a w \<noteq> 0"
using finite_lin_dep[OF \<open>finite (set (cols A))\<close> \<open>lin_dep (set (cols A))\<close>]
using assms(1) cols_dim carrier_matD(1) by blast
define v where "v = vec nc (\<lambda>i. a (col A i))"
have 1:"v \<in> carrier_vec nc" by (simp add: v_def)
have 2:"v \<noteq> 0\<^sub>v nc"
proof -
obtain i where "w = col A i" "i < length (cols A)"
by (metis \<open>w \<in> set (cols A)\<close> cols_length cols_nth in_set_conv_nth)
have "v $ i \<noteq> 0"
unfolding v_def
using \<open>a w \<noteq> 0\<close>[unfolded \<open>w = col A i\<close>] index_vec[OF \<open>i < length (cols A)\<close>]
assms(1) cols_length carrier_matD(2) by (metis (no_types) \<open>A \<in> carrier_mat n nc\<close>
\<open>\<And>f. vec (length (cols A)) f $ i = f i\<close> \<open>a (col A i) \<noteq> 0\<close> cols_length carrier_matD(2))
then show ?thesis using \<open>i < length (cols A)\<close> assms(1) by auto
qed
have 3:"A *\<^sub>v v = 0\<^sub>v n" unfolding v_def
using \<open>lincomb a (set (cols A)) = 0\<^sub>v n\<close> mat_mult_eq_lincomb[OF \<open>A \<in> carrier_mat n nc\<close> \<open>distinct (cols A)\<close>] by auto
show thesis using 1 2 3 by (simp add: that)
qed
lemma (in vec_space) non_distinct_low_rank:
assumes "A \<in> carrier_mat n n"
and "\<not> distinct (cols A)"
shows "rank A < n"
proof -
obtain S where "maximal S (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)"
using maximal_exists[of "(\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)" "card (set (cols A))" "{}"]
by (meson List.finite_set card_mono empty_iff empty_subsetI finite_lin_indpt2 rev_finite_subset)
then have "card S \<le> card (set (cols A))" by (simp add: card_mono maximal_def)
then have "card S < n"
using assms(1) cols_length card_length \<open>\<not> distinct (cols A)\<close> card_distinct carrier_matD(2) nat_less_le
by (metis dual_order.antisym dual_order.trans)
then show ?thesis
using rank_card_indpt[OF \<open>A \<in> carrier_mat n n\<close> \<open>maximal S (\<lambda>T. T \<subseteq> set (cols A) \<and> lin_indpt T)\<close>]
by simp
qed
text \<open>The theorem "det non-zero $\longleftrightarrow$ full rank" is practically proven in det\_0\_iff\_vec\_prod\_zero\_field,
but without an actual definition of the rank.\<close>
lemma (in vec_space) det_zero_low_rank:
assumes "A \<in> carrier_mat n n"
and "det A = 0"
shows "rank A < n"
proof (rule ccontr)
assume "\<not> rank A < n"
then have "rank A = n" using rank_le_nc assms le_neq_implies_less by blast
obtain v where "v \<in> carrier_vec n" "v \<noteq> 0\<^sub>v n" "A *\<^sub>v v = 0\<^sub>v n"
using det_0_iff_vec_prod_zero_field[OF assms(1)] assms(2) by blast
then show False
proof (cases "distinct (cols A)")
case True
then have "lin_indpt (set (cols A))" using full_rank_lin_indpt using \<open>rank A = n\<close> assms(1) by auto
then show False using lin_depI[OF assms(1) \<open>v \<in> carrier_vec n\<close> \<open>v \<noteq> 0\<^sub>v n\<close> \<open>A *\<^sub>v v = 0\<^sub>v n\<close>] True by blast
next
case False
then show False using non_distinct_low_rank \<open>rank A = n\<close> \<open>\<not> rank A < n\<close> assms(1) by blast
qed
qed
lemma det_identical_cols:
assumes A: "A \<in> carrier_mat n n"
and ij: "i \<noteq> j"
and i: "i < n" and j: "j < n"
and r: "col A i = col A j"
shows "det A = 0"
using det_identical_rows det_transpose
by (metis A i ij j carrier_matD(2) transpose_carrier_mat r row_transpose)
lemma (in vec_space) low_rank_det_zero:
assumes "A \<in> carrier_mat n n"
and "det A \<noteq> 0"
shows "rank A = n"
proof -
have "distinct (cols A)"
proof (rule ccontr)
assume "\<not> distinct (cols A)"
then obtain i j where "i\<noteq>j" "(cols A) ! i = (cols A) ! j" "i<length (cols A)" "j<length (cols A)"
using distinct_conv_nth by blast
then have "col A i = col A j" "i<n" "j<n" using assms(1) by auto
then have "det A = 0" using det_identical_cols using \<open>i \<noteq> j\<close> assms(1) by blast
then show False using \<open>det A \<noteq> 0\<close> by auto
qed
have "\<And>v. v \<in> carrier_vec n \<Longrightarrow> v \<noteq> 0\<^sub>v n \<Longrightarrow> A *\<^sub>v v \<noteq> 0\<^sub>v n"
using det_0_iff_vec_prod_zero_field[OF assms(1)] assms(2) by auto
then have "lin_indpt (set (cols A))" using lin_depE[OF assms(1) _ \<open>distinct (cols A)\<close>] by auto
then show ?thesis using lin_indpt_full_rank[OF assms(1) \<open>distinct (cols A)\<close>] by metis
qed
lemma (in vec_space) det_rank_iff:
assumes "A \<in> carrier_mat n n"
shows "det A \<noteq> 0 \<longleftrightarrow> rank A = n"
using assms det_zero_low_rank low_rank_det_zero by force
section "Subadditivity of rank"
text \<open>Subadditivity is the property of rank, that rank (A + B) <= rank A + rank B.\<close>
lemma (in Module.module) lincomb_add:
assumes "finite (b1 \<union> b2)"
assumes "b1 \<union> b2 \<subseteq> carrier M"
assumes "x1 = lincomb a1 b1" "a1\<in> (b1\<rightarrow>carrier R)"
assumes "x2 = lincomb a2 b2" "a2\<in> (b2\<rightarrow>carrier R)"
assumes "x = x1 \<oplus>\<^bsub>M\<^esub> x2"
shows "lincomb (\<lambda>v. (\<lambda>v. if v \<in> b1 then a1 v else \<zero>) v \<oplus> (\<lambda>v. if v \<in> b2 then a2 v else \<zero>) v) (b1 \<union> b2) = x"
proof -
have "finite (b1 \<union> (b2-b1))" "finite (b2 \<union> (b1-b2))"
"b1 \<union> (b2 - b1) \<subseteq> carrier M" "b2 \<union> (b1-b2) \<subseteq> carrier M"
"b1 \<inter> (b2 - b1) = {}" "b2 \<inter> (b1 - b2) = {}"
"(\<lambda>b. \<zero>\<^bsub>R\<^esub>) \<in> b2 - b1 \<rightarrow> carrier R" "(\<lambda>b. \<zero>\<^bsub>R\<^esub>) \<in> b1 - b2 \<rightarrow> carrier R"
using \<open>finite (b1 \<union> b2)\<close> \<open>b1 \<union> b2 \<subseteq> carrier M\<close> \<open>a2\<in> (b2\<rightarrow>carrier R)\<close> by auto
have "lincomb (\<lambda>b. \<zero>\<^bsub>R\<^esub>) (b2 - b1) = \<zero>\<^bsub>M\<^esub>" "lincomb (\<lambda>b. \<zero>\<^bsub>R\<^esub>) (b1 - b2) = \<zero>\<^bsub>M\<^esub>"
unfolding lincomb_def using M.finsum_all0 assms(2) lmult_0 subset_iff
by (metis (no_types, lifting) Un_Diff_cancel2 inf_sup_aci(5) le_sup_iff)+
then have "x1 = lincomb (\<lambda>v. if v \<in> b1 then a1 v else \<zero>) (b1 \<union> b2)"
"x2 = lincomb (\<lambda>v. if v \<in> b2 then a2 v else \<zero>) (b1 \<union> b2)"
using lincomb_union2[OF \<open>finite (b1 \<union> (b2-b1))\<close> \<open>b1 \<union> (b2 - b1) \<subseteq> carrier M\<close> \<open>b1 \<inter> (b2 - b1) = {}\<close> \<open>a1\<in> (b1\<rightarrow>carrier R)\<close> \<open>(\<lambda>b. \<zero>\<^bsub>R\<^esub>) \<in> b2 - b1 \<rightarrow> carrier R\<close>]
lincomb_union2[OF \<open>finite (b2 \<union> (b1-b2))\<close> \<open>b2 \<union> (b1-b2) \<subseteq> carrier M\<close> \<open>b2 \<inter> (b1 - b2) = {}\<close> \<open>a2\<in> (b2\<rightarrow>carrier R)\<close> \<open>(\<lambda>b. \<zero>\<^bsub>R\<^esub>) \<in> b1 - b2 \<rightarrow> carrier R\<close>]
using assms(2) assms(3) assms(4) assms(5) assms(6) by (simp_all add:Un_commute)
have "(\<lambda>v. if v \<in> b1 then a1 v else \<zero>) \<in> (b1 \<union> b2) \<rightarrow> carrier R"
"(\<lambda>v. if v \<in> b2 then a2 v else \<zero>) \<in> (b1 \<union> b2) \<rightarrow> carrier R" using assms(4) assms(6) by auto
show "lincomb (\<lambda>v. (\<lambda>v. if v \<in> b1 then a1 v else \<zero>) v \<oplus> (\<lambda>v. if v \<in> b2 then a2 v else \<zero>) v) (b1 \<union> b2) = x"
using lincomb_sum[OF \<open>finite (b1 \<union> b2)\<close> \<open>b1 \<union> b2 \<subseteq> carrier M\<close>
\<open>(\<lambda>v. if v \<in> b1 then a1 v else \<zero>) \<in> (b1 \<union> b2) \<rightarrow> carrier R\<close> \<open>(\<lambda>v. if v \<in> b2 then a2 v else \<zero>) \<in> (b1 \<union> b2) \<rightarrow> carrier R\<close>]
\<open>x1 = lincomb (\<lambda>v. if v \<in> b1 then a1 v else \<zero>) (b1 \<union> b2)\<close> \<open>x2 = lincomb (\<lambda>v. if v \<in> b2 then a2 v else \<zero>) (b1 \<union> b2)\<close> assms(7) by blast
qed
lemma (in vectorspace) dim_subadditive:
assumes "subspace K W1 V"
and "vectorspace.fin_dim K (vs W1)"
assumes "subspace K W2 V"
and "vectorspace.fin_dim K (vs W2)"
shows "vectorspace.dim K (vs (subspace_sum W1 W2)) \<le> vectorspace.dim K (vs W1) + vectorspace.dim K (vs W2)"
proof -
have "vectorspace K (vs W1)" "vectorspace K (vs W2)" "submodule K W1 V" "submodule K W2 V"
by (simp add: \<open>subspace K W1 V\<close> \<open>subspace K W2 V\<close> subspace_is_vs)+
obtain b1 b2 where "vectorspace.basis K (vs W1) b1" "vectorspace.basis K (vs W2) b2" "finite b1" "finite b2"
using vectorspace.finite_basis_exists[OF \<open>vectorspace K (vs W1)\<close> \<open>vectorspace.fin_dim K (vs W1)\<close>]
using vectorspace.finite_basis_exists[OF \<open>vectorspace K (vs W2)\<close> \<open>vectorspace.fin_dim K (vs W2)\<close>]
by blast
then have "LinearCombinations.module.gen_set K (vs W1) b1" "LinearCombinations.module.gen_set K (vs W2) b2"
using \<open>vectorspace K (vs W1)\<close> \<open>vectorspace K (vs W2)\<close> vectorspace.basis_def by blast+
then have "span b1 = W1" "span b2 = W2"
using module.span_li_not_depend(1) \<open>submodule K W1 V\<close> \<open>submodule K W2 V\<close>
\<open>vectorspace K (vs W1)\<close> \<open>vectorspace.basis K (vs W1) b1\<close> \<open>vectorspace K (vs W2)\<close>
\<open>vectorspace.basis K (vs W2) b2\<close> vectorspace.basis_def by force+
have "W1 \<subseteq> carrier V" "W2 \<subseteq> carrier V" using \<open>subspace K W1 V\<close> \<open>subspace K W2 V\<close> subspace_def submodule_def by metis+
have "b1 \<subseteq> carrier V"
using \<open>vectorspace.basis K (vs W1) b1\<close> \<open>vectorspace K (vs W1)\<close> vectorspace.basis_def
\<open>W1 \<subseteq> carrier V\<close> by fastforce
have "b2 \<subseteq> carrier V"
using \<open>vectorspace.basis K (vs W2) b2\<close> \<open>vectorspace K (vs W2)\<close> vectorspace.basis_def
\<open>W2 \<subseteq> carrier V\<close> by fastforce
have "finite (b1 \<union> b2)" "b1 \<union> b2 \<subseteq> carrier V"
by (simp_all add: \<open>finite b1\<close> \<open>finite b2\<close> \<open>b2 \<subseteq> carrier V\<close> \<open>b1 \<subseteq> carrier V\<close>)
have "subspace_sum W1 W2 \<subseteq> span (b1\<union>b2)"
proof (rule subsetI)
fix x assume "x \<in> subspace_sum W1 W2"
obtain x1 x2 where "x1 \<in> W1" "x2 \<in> W2" "x = x1 \<oplus>\<^bsub>V\<^esub> x2"
using imageE[OF \<open>x \<in> subspace_sum W1 W2\<close>[unfolded submodule_sum_def]]
by (metis (no_types, lifting) BNF_Def.Collect_case_prodD split_def)
obtain a1 where "x1 = lincomb a1 b1" "a1\<in> (b1\<rightarrow>carrier K)"
using \<open>span b1 = W1\<close> finite_span[OF \<open>finite b1\<close> \<open>b1 \<subseteq> carrier V\<close>] \<open>x1 \<in> W1\<close> by auto
obtain a2 where "x2 = lincomb a2 b2" "a2\<in> (b2\<rightarrow>carrier K)"
using \<open>span b2 = W2\<close> finite_span[OF \<open>finite b2\<close> \<open>b2 \<subseteq> carrier V\<close>] \<open>x2 \<in> W2\<close> by auto
obtain a where "x = lincomb a (b1 \<union> b2)" using lincomb_add[OF \<open>finite (b1 \<union> b2)\<close> \<open>b1 \<union> b2 \<subseteq> carrier V\<close>
\<open>x1 = lincomb a1 b1\<close> \<open>a1\<in> (b1\<rightarrow>carrier K)\<close> \<open>x2 = lincomb a2 b2\<close> \<open>a2\<in> (b2\<rightarrow>carrier K)\<close> \<open>x = x1 \<oplus>\<^bsub>V\<^esub> x2\<close>] by blast
then show "x \<in> span (b1 \<union> b2)" using finite_span[OF \<open>finite (b1 \<union> b2)\<close> \<open>(b1 \<union> b2) \<subseteq> carrier V\<close>]
using \<open>b1 \<subseteq> carrier V\<close> \<open>b2 \<subseteq> carrier V\<close> \<open>span b1 = W1\<close> \<open>span b2 = W2\<close> \<open>x \<in> subspace_sum W1 W2\<close> span_union_is_sum by auto
qed
have "b1 \<subseteq> W1" "b2 \<subseteq> W2"
using \<open>vectorspace K (vs W1)\<close> \<open>vectorspace K (vs W2)\<close> \<open>vectorspace.basis K (vs W1) b1\<close>
\<open>vectorspace.basis K (vs W2) b2\<close> vectorspace.basis_def local.carrier_vs_is_self by blast+
then have "b1\<union>b2 \<subseteq> subspace_sum W1 W2" using \<open>submodule K W1 V\<close> \<open>submodule K W2 V\<close> in_sum
by (metis assms(1) assms(3) dual_order.trans sup_least vectorspace.vsum_comm vectorspace_axioms)
have "subspace_sum W1 W2 = LinearCombinations.module.span K (vs (subspace_sum W1 W2)) (b1\<union>b2)"
proof (rule subset_antisym)
have "submodule K (subspace_sum W1 W2) V" by (simp add: \<open>submodule K W1 V\<close> \<open>submodule K W2 V\<close> sum_is_submodule)
show "subspace_sum W1 W2 \<subseteq> LinearCombinations.module.span K (vs (subspace_sum W1 W2)) (b1\<union>b2)"
using module.span_li_not_depend(1)[OF \<open>b1\<union>b2 \<subseteq> subspace_sum W1 W2\<close> \<open>submodule K (subspace_sum W1 W2) V\<close>]
by (simp add: \<open>subspace_sum W1 W2 \<subseteq> span (b1 \<union> b2)\<close>)
show "subspace_sum W1 W2 \<supseteq> LinearCombinations.module.span K (vs (subspace_sum W1 W2)) (b1\<union>b2)"
using \<open>b1\<union>b2 \<subseteq> subspace_sum W1 W2\<close> by (metis (full_types) LinearCombinations.module.span_is_subset2
LinearCombinations.module.submodule_is_module \<open>submodule K (subspace_sum W1 W2) V\<close> local.carrier_vs_is_self submodule_def)
qed
have "vectorspace K (vs (subspace_sum W1 W2))" using assms(1) assms(3) subspace_def sum_is_subspace vectorspace.subspace_is_vs by blast
then have "vectorspace.dim K (vs (subspace_sum W1 W2)) \<le> card (b1 \<union> b2)"
using vectorspace.gen_ge_dim[OF \<open>vectorspace K (vs (subspace_sum W1 W2))\<close> \<open>finite (b1 \<union> b2)\<close>]
\<open>b1 \<union> b2 \<subseteq> subspace_sum W1 W2\<close>
\<open>subspace_sum W1 W2 = LinearCombinations.module.span K (vs (subspace_sum W1 W2)) (b1 \<union> b2)\<close>
local.carrier_vs_is_self by blast
also have "... \<le> card b1 + card b2" by (simp add: card_Un_le)
also have "... = vectorspace.dim K (vs W1) + vectorspace.dim K (vs W2)"
by (metis \<open>finite b1\<close> \<open>finite b2\<close> \<open>vectorspace K (vs W1)\<close> \<open>vectorspace K (vs W2)\<close>
\<open>vectorspace.basis K (vs W1) b1\<close> \<open>vectorspace.basis K (vs W2) b2\<close> vectorspace.dim_basis)
finally show ?thesis by auto
qed
lemma (in Module.module) nested_submodules:
assumes "submodule R W M"
assumes "submodule R X M"
assumes "X \<subseteq> W"
shows "submodule R X (md W)"
unfolding submodule_def
using \<open>X \<subseteq> W\<close> submodule_is_module[OF \<open>submodule R W M\<close>] using \<open>submodule R X M\<close>[unfolded submodule_def] by auto
lemma (in vectorspace) nested_subspaces:
assumes "subspace K W V"
assumes "subspace K X V"
assumes "X \<subseteq> W"
shows "subspace K X (vs W)"
using assms nested_submodules subspace_def subspace_is_vs by blast
lemma (in vectorspace) subspace_dim:
assumes "subspace K X V" "fin_dim" "vectorspace.fin_dim K (vs X)"
shows "vectorspace.dim K (vs X) \<le> dim"
proof -
have "vectorspace K (vs X)" using assms(1) subspace_is_vs by auto
then obtain b where "vectorspace.basis K (vs X) b" using vectorspace.finite_basis_exists
using assms(3) by blast
then have "b \<subseteq> carrier V" "LinearCombinations.module.lin_indpt K (vs X) b"
using vectorspace.basis_def[OF \<open>vectorspace K (vs X)\<close>] \<open>subspace K X V\<close>[unfolded subspace_def submodule_def] by auto
then have "lin_indpt b"
by (metis LinearCombinations.module.span_li_not_depend(2) \<open>vectorspace K (vs X)\<close> \<open>vectorspace.basis K (vs X) b\<close>
assms(1) is_module local.carrier_vs_is_self submodule_def vectorspace.basis_def)
show ?thesis using li_le_dim(2)[OF \<open>fin_dim\<close> \<open>b \<subseteq> carrier V\<close> \<open>lin_indpt b\<close>]
using \<open>b \<subseteq> carrier V\<close> \<open>lin_indpt b\<close> \<open>vectorspace K (vs X)\<close> \<open>vectorspace.basis K (vs X) b\<close> assms(2)
fin_dim_li_fin vectorspace.dim_basis by fastforce
qed
lemma (in vectorspace) fin_dim_subspace_sum:
assumes "subspace K W1 V"
assumes "subspace K W2 V"
assumes "vectorspace.fin_dim K (vs W1)" "vectorspace.fin_dim K (vs W2)"
shows "vectorspace.fin_dim K (vs (subspace_sum W1 W2))"
proof -
obtain b1 where "finite b1" "b1 \<subseteq> W1" "LinearCombinations.module.gen_set K (vs W1) b1"
using assms vectorspace.fin_dim_def subspace_is_vs by force
obtain b2 where "finite b2" "b2 \<subseteq> W2" "LinearCombinations.module.gen_set K (vs W2) b2"
using assms vectorspace.fin_dim_def subspace_is_vs by force
have 1:"finite (b1 \<union> b2)" by (simp add: \<open>finite b1\<close> \<open>finite b2\<close>)
have 2:"b1 \<union> b2 \<subseteq> subspace_sum W1 W2"
by (metis (no_types, lifting) \<open>b1 \<subseteq> W1\<close> \<open>b2 \<subseteq> W2\<close> assms(1) assms(2)
le_sup_iff subset_Un_eq vectorspace.in_sum_vs vectorspace.vsum_comm vectorspace_axioms)
have 3:"LinearCombinations.module.gen_set K (vs (subspace_sum W1 W2)) (b1 \<union> b2)"
proof (rule subset_antisym)
have 0:"LinearCombinations.module.span K (vs (subspace_sum W1 W2)) (b1 \<union> b2) = span (b1 \<union> b2)"
using span_li_not_depend(1)[OF \<open>b1 \<union> b2 \<subseteq> subspace_sum W1 W2\<close>] sum_is_subspace[OF assms(1) assms(2)] by auto
then show "LinearCombinations.module.span K (vs (subspace_sum W1 W2)) (b1 \<union> b2) \<subseteq> carrier (vs (subspace_sum W1 W2))"
using \<open>b1 \<union> b2 \<subseteq> subspace_sum W1 W2\<close> span_is_subset sum_is_subspace[OF assms(1) assms(2)] by auto
show "carrier (vs (subspace_sum W1 W2)) \<subseteq> LinearCombinations.module.span K (vs (subspace_sum W1 W2)) (b1 \<union> b2)"
unfolding 0
proof
fix x assume assumption:"x \<in> carrier (vs (subspace_sum W1 W2))"
then have "x\<in>subspace_sum W1 W2" by auto
then obtain x1 x2 where "x = x1 \<oplus>\<^bsub>V\<^esub> x2" "x1\<in>W1" "x2\<in>W2"
using imageE[OF \<open>x \<in> subspace_sum W1 W2\<close>[unfolded submodule_sum_def]]
by (metis (no_types, lifting) BNF_Def.Collect_case_prodD split_def)
have "x1\<in>span b1" "x2\<in>span b2"
using \<open>LinearCombinations.module.span K (vs W1) b1 = carrier (vs W1)\<close> \<open>b1 \<subseteq> W1\<close> \<open>x1 \<in> W1\<close>
\<open>LinearCombinations.module.span K (vs W2) b2 = carrier (vs W2)\<close> \<open>b2 \<subseteq> W2\<close> \<open>x2 \<in> W2\<close>
assms(1) assms(2) span_li_not_depend(1) by auto
then have "x1\<in>span (b1 \<union> b2)" "x2\<in>span (b1 \<union> b2)" by (meson le_sup_iff subsetD span_is_monotone subsetI)+
then show "x \<in> span (b1 \<union> b2)" unfolding \<open>x = x1 \<oplus>\<^bsub>V\<^esub> x2\<close>
by (meson \<open>b1 \<union> b2 \<subseteq> subspace_sum W1 W2\<close> assms(1) assms(2) is_module submodule.subset
subset_trans sum_is_submodule vectorspace.span_add1 vectorspace_axioms)
qed
qed
show ?thesis using 1 2 3 vectorspace.fin_dim_def
by (metis assms(1) assms(2) local.carrier_vs_is_self subspace_def sum_is_subspace vectorspace.subspace_is_vs)
qed
lemma (in vec_space) rank_subadditive:
assumes "A \<in> carrier_mat n nc"
assumes "B \<in> carrier_mat n nc"
shows "rank (A + B) \<le> rank A + rank B"
proof -
define W1 where "W1 = span (set (cols A))"
define W2 where "W2 = span (set (cols B))"
have "set (cols (A + B)) \<subseteq> subspace_sum W1 W2"
proof
fix x assume "x \<in> set (cols (A + B))"
obtain i where "x = col (A + B) i" "i < length (cols (A + B))"
using \<open>x \<in> set (cols (A + B))\<close> nth_find_first cols_nth find_first_le by (metis cols_length)
then have "x = col A i + col B i" using \<open>i < length (cols (A + B))\<close> assms(1) assms(2) by auto
have "col A i \<in> span (set (cols A))" "col B i \<in> span (set (cols B))"
using \<open>i < length (cols (A + B))\<close> assms(1) assms(2) in_set_conv_nth
by (metis cols_dim cols_length cols_nth carrier_matD(1) carrier_matD(2) index_add_mat(3) span_mem)+
then show "x \<in> subspace_sum W1 W2"
unfolding W1_def W2_def \<open>x = col A i + col B i\<close> submodule_sum_def by blast
qed
have "subspace class_ring (subspace_sum W1 W2) V"
by (metis W1_def W2_def assms(1) assms(2) cols_dim carrier_matD(1) span_is_submodule subspace_def sum_is_submodule vec_vs)
then have "span (set (cols (A + B))) \<subseteq> subspace_sum W1 W2"
by (simp add: \<open>set (cols (A + B)) \<subseteq> subspace_sum W1 W2\<close> span_is_subset)
have "subspace class_ring (span (set (cols (A + B)))) V" by (metis assms(2) cols_dim add_carrier_mat carrier_matD(1) span_is_subspace)
have subspace:"subspace class_ring (span (set (cols (A + B)))) (vs (subspace_sum W1 W2))"
using nested_subspaces[OF \<open>subspace class_ring (subspace_sum W1 W2) V\<close> \<open>subspace class_ring (span (set (cols (A + B)))) V\<close>
\<open>span (set (cols (A + B))) \<subseteq> subspace_sum W1 W2\<close>] .
have "vectorspace.fin_dim class_ring (vs W1)" "vectorspace.fin_dim class_ring (vs W2)"
"subspace class_ring W1 V" "subspace class_ring W2 V"
using span_is_subspace W1_def W2_def assms(1) assms(2) cols_dim carrier_matD fin_dim_span_cols by auto
then have fin_dim: "vectorspace.fin_dim class_ring (vs (subspace_sum W1 W2))" using fin_dim_subspace_sum by auto
have "vectorspace.fin_dim class_ring (span_vs (set (cols (A + B))))" using assms(2) add_carrier_mat vec_space.fin_dim_span_cols by blast
then have "rank (A + B) \<le> vectorspace.dim class_ring (vs (subspace_sum W1 W2))" unfolding rank_def
using vectorspace.subspace_dim[OF subspace_is_vs[OF \<open>subspace class_ring (subspace_sum W1 W2) V\<close>] subspace fin_dim] by auto
also have "vectorspace.dim class_ring (vs (subspace_sum W1 W2)) \<le> rank A + rank B" unfolding rank_def
using W1_def W2_def \<open>subspace class_ring W1 V\<close> \<open>subspace class_ring W2 V\<close> \<open>vectorspace.fin_dim class_ring (vs W1)\<close>
\<open>vectorspace.fin_dim class_ring (vs W2)\<close> subspace_def vectorspace.dim_subadditive by blast
finally show ?thesis by auto
qed
lemma (in vec_space) span_zero: "span {zero V} = {zero V}"
by (metis (no_types, lifting) empty_subsetI in_own_span span_is_submodule span_is_subset
span_is_subset2 subset_antisym vectorspace.span_empty vectorspace_axioms)
lemma (in vec_space) dim_zero_vs: "vectorspace.dim class_ring (span_vs {}) = 0"
proof -
have "vectorspace class_ring (span_vs {})" using field.field_axioms span_is_submodule submodule_is_module vectorspace_def by auto
have "{} \<subseteq> carrier_vec n \<and> lin_indpt {}"
by (metis (no_types) empty_subsetI fin_dim finite_basis_exists subset_li_is_li vec_vs vectorspace.basis_def)
then have "vectorspace.basis class_ring (span_vs {}) {}" using vectorspace.basis_def
by (simp add: \<open>vectorspace class_ring (vs (span {}))\<close> span_is_submodule span_li_not_depend(1) span_li_not_depend(2) vectorspace.basis_def)
then show ?thesis using \<open>vectorspace class_ring (vs (span {}))\<close> vectorspace.dim_basis by fastforce
qed
lemma (in vec_space) rank_0I: "rank (0\<^sub>m n nc) = 0"
proof -
have "set (cols (0\<^sub>m n nc)) \<subseteq> {0\<^sub>v n}"
by (metis col_zero cols_length cols_nth in_set_conv_nth insertCI index_zero_mat(3) subsetI)
have "set (cols (0\<^sub>m n nc::'a mat)) = {} \<or> set (cols (0\<^sub>m n nc)) = {0\<^sub>v n::'a vec}"
by (meson \<open>set (cols (0\<^sub>m n nc)) \<subseteq> {0\<^sub>v n}\<close> subset_singletonD)
then have "span (set (cols (0\<^sub>m n nc))) = {0\<^sub>v n}"
by (metis (no_types) span_empty span_zero vectorspace.span_empty vectorspace_axioms)
then show ?thesis unfolding rank_def \<open>span (set (cols (0\<^sub>m n nc))) = {0\<^sub>v n}\<close>
using span_empty dim_zero_vs by simp
qed
lemma (in vec_space) rank_le_1_product_entries:
fixes f g::"nat \<Rightarrow> 'a"
assumes "A \<in> carrier_mat n nc"
assumes "\<And>r c. r<dim_row A \<Longrightarrow> c<dim_col A \<Longrightarrow> A $$ (r,c) = f r * g c"
shows "rank A \<le> 1"
proof -
have "set (cols A) \<subseteq> span {vec n f}"
proof
fix v assume "v \<in> set (cols A)"
then obtain c where "c < dim_col A" "v = col A c" by (metis cols_length cols_nth in_set_conv_nth)
have "g c \<cdot>\<^sub>v vec n f = v"
proof (rule eq_vecI)
show "dim_vec (g c \<cdot>\<^sub>v Matrix.vec n f) = dim_vec v" using \<open>v = col A c\<close> assms(1) by auto
fix r assume "r < dim_vec v"
then have "r < dim_vec (Matrix.vec n f)" using \<open>dim_vec (g c \<cdot>\<^sub>v Matrix.vec n f) = dim_vec v\<close> by auto
then have "r < n" "r < dim_row A"using index_smult_vec(2) \<open>A \<in> carrier_mat n nc\<close> by auto
show "(g c \<cdot>\<^sub>v Matrix.vec n f) $ r = v $ r"
unfolding \<open>v = col A c\<close> col_def index_smult_vec(1)[OF \<open>r < dim_vec (Matrix.vec n f)\<close>]
index_vec[OF \<open>r < n\<close>] index_vec[OF \<open>r < dim_row A\<close>] by (simp add: \<open>c < dim_col A\<close> \<open>r < dim_row A\<close> assms(2))
qed
then show "v \<in> span {vec n f}" using submodule.smult_closed[OF span_is_submodule]
using UNIV_I empty_subsetI insert_subset span_self dim_vec module_vec_simps(4) by auto
qed
have "vectorspace class_ring (vs (span {Matrix.vec n f}))" using span_is_subspace[THEN subspace_is_vs, of "{vec n f}"] by auto
have "submodule class_ring (span {Matrix.vec n f}) V" by (simp add: span_is_submodule)
have "subspace class_ring(span (set (cols A))) (vs (span {Matrix.vec n f}))"
using vectorspace.span_is_subspace[OF \<open>vectorspace class_ring (vs (span {Matrix.vec n f}))\<close>, of "set (cols A)", unfolded
span_li_not_depend(1)[OF \<open>set (cols A) \<subseteq> span {vec n f}\<close> \<open>submodule class_ring (span {Matrix.vec n f}) V\<close>]]
\<open>set (cols A) \<subseteq> span {vec n f}\<close> by auto
have fin_dim:"vectorspace.fin_dim class_ring (vs (span {Matrix.vec n f}))"
"vectorspace.fin_dim class_ring (vs (span {Matrix.vec n f})\<lparr>carrier := span (set (cols A))\<rparr>)"
using fin_dim_span fin_dim_span_cols \<open>A \<in> carrier_mat n nc\<close> by auto
have "vectorspace.dim class_ring (vs (span {Matrix.vec n f})) \<le> 1"
using vectorspace.dim_le1I[OF \<open>vectorspace class_ring (vs (span {Matrix.vec n f}))\<close>]
span_mem span_li_not_depend(1)[OF _ \<open>submodule class_ring (span {Matrix.vec n f}) V\<close>] by simp
then show ?thesis unfolding rank_def using "vectorspace.subspace_dim"[OF
\<open>vectorspace class_ring (vs (span {Matrix.vec n f}))\<close> \<open>subspace class_ring (span (set (cols A))) (vs (span {Matrix.vec n f}))\<close>
fin_dim(1) fin_dim(2)] by simp
qed
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Evaluation/Jordan_Normal_Form/DL_Rank.thy"}
|
# -*- coding: utf-8 -*-
"""
Helper functions to visualize the data in plotly
"""
import plotly.graph_objs as go
import numpy as np
"""Visualizaiton functions do the scatter plots in plotly since it seems to be more efficient."""
def get_plotly_scatter_plot(
data_in: np.ndarray,
lat_mat: np.ndarray,
skips: int = 5,
logcolor: bool = False,
mask: np.ndarray = None,
opacity: float = 0.5,
marker_size: int = 5,
) -> go.Figure:
"""
Returns a plotly fig object for plotting.
Args:
data_in: Structured grid data to be plotted
lat_mat: Lattice vectors of the cell
skips: reduction factor of the grid points for plotting, only show [::skips] in each direction
logcolor: If True, assign the color in log scale
mask: Filter the points to plot
opacity: opacity of each point being plotted
marker_size: size of the markers in the 3D scatter plot
Returns:
plotly Figure object
"""
ndim = len(data_in.shape)
if ndim > 3:
raise NotImplementedError("Can only render data of 1, 2, or 3 dimensions.")
ss = slice(0, None, skips)
trimmed_data = np.real(data_in).copy()
trimmed_data = trimmed_data[(ss,) * ndim]
if mask is not None:
flat_mask = mask[(ss,) * ndim].flatten()
else:
flat_mask = np.ones_like(trimmed_data, dtype=bool).flatten()
vecs = [np.linspace(0, 1, trimmed_data.shape[_], endpoint=False) for _ in range(ndim)]
gridded = np.meshgrid(*vecs, indexing="ij") # indexing to match the labeled array
res = np.dot(lat_mat.T, [g_.flatten() for g_ in gridded])
if logcolor:
cc = np.log(trimmed_data.flatten())
else:
cc = trimmed_data.flatten()
if ndim == 1:
xx = res[flat_mask]
elif ndim > 1:
xx = res[0, flat_mask]
yy = res[1, flat_mask]
if ndim > 2:
zz = res[2, flat_mask]
cc = cc[flat_mask]
if ndim == 1:
data = go.Scatter(x=xx, y=cc, mode="markers", marker=dict(size=marker_size, color="red",))
if ndim == 2:
data = go.Scatter(
x=xx,
y=yy,
mode="markers",
marker=dict(
size=marker_size,
color=cc, # set color to an array/list of desired values
colorscale="Viridis", # choose a colorscale
opacity=opacity,
),
)
if ndim == 3:
data = go.Scatter3d(
x=xx,
y=yy,
z=zz,
mode="markers",
marker=dict(size=marker_size, color=cc, colorscale="Viridis", opacity=opacity,),
)
fig = go.Figure(data=[data])
fig.update_layout(template="plotly_white")
if ndim == 2:
fig.update_layout(width=800, height=800, yaxis=dict(scaleanchor="x", scaleratio=1))
if ndim == 3:
fig.update_layout(width=800, height=800, scene_aspectmode="data")
return fig
|
{"hexsha": "2467ce2f52a85b3063ff76cb21746cd18dd90592", "size": 2951, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyrho/vis/plotly.py", "max_stars_repo_name": "mattmcdermott/pyrho", "max_stars_repo_head_hexsha": "7ab3bd893a8b310b8be61f33a1105b090a46cd32", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-02-26T21:12:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T01:34:54.000Z", "max_issues_repo_path": "src/pyrho/vis/plotly.py", "max_issues_repo_name": "mattmcdermott/pyrho", "max_issues_repo_head_hexsha": "7ab3bd893a8b310b8be61f33a1105b090a46cd32", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 61, "max_issues_repo_issues_event_min_datetime": "2021-02-27T00:55:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T17:18:07.000Z", "max_forks_repo_path": "src/pyrho/vis/plotly.py", "max_forks_repo_name": "mattmcdermott/pyrho", "max_forks_repo_head_hexsha": "7ab3bd893a8b310b8be61f33a1105b090a46cd32", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-15T18:38:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T00:06:56.000Z", "avg_line_length": 30.112244898, "max_line_length": 102, "alphanum_fraction": 0.5926804473, "include": true, "reason": "import numpy", "num_tokens": 790}
|
\section{Moves in Detail}
\section{Multiclass Moves}
\section{Multiclass Dabbler}
\section{Multiclass Initiate}
\section{Multiclass Master}
For the purposes of these multiclass moves the cleric's commune and cast a spell count as one move. Likewise for the wizard's spellbook, prepare Spells, and cast a Spell.
When you first select a multiclass move that grants you the ability to cast spells you prepare and cast spells as if you had one level in the casting class. Every additional time you level up you increase the level you prepare and cast spells at by one.
When Ajax gains 3rd level he takes Multiclass Dabbler to get Commune and Cast a Spell from the Cleric class. He casts and prepares spells like a first level Cleric: first level spells and rotes only, a total of 2 levels of spells prepared. When he later gains 4th level, he prepares and casts spells as a second level Cleric.
\section{Bard Moves}
\section{Bardic Lore}
Treat the areas of your lore like books. Is the upwards-flowing waterfall you just came across something important that would be covered in a book or college course called ``On Spells and Magicks?'' If so, your Bardic Lore of that name applies.
If you care enough to ask a question about it then it's probably important. Don't second guess yourself: if you care enough to want to know more about it then it has some importance.
\section{Charming and Open}
Speaking frankly means you really are being open with them, not just giving the appearance of openness. It's your true sincerity that puts others at ease and lets you get information out of them; if you're trying to maintain a lie at the same time you won't get very far.
\section{It Goes To Eleven}
Of course, the creature you effect must have some way of harming your target of choice. Spurring a wolf into a frenzy to attack the eagle lord circling above doesn't do any good, the wolf doesn't have a way to attack it.
\section{An Ear for Magic}
Acting on the answers can mean acting against them or taking advantage of them. Either way you take +1 forward.
\section{Cleric}
\section{Commune}
If you like you can prepare the same spell more than once.
\section{Cleric Spells}
\section{Guidance}
It's up to the creativity of your deity (and the GM) to communicate as much as possible through the motions and gestures of your deity's symbol. You don't get visions or a voice from heaven, just some visual cue of what your deity would have you do (even if it's not in your best interest).
\section{Magic Weapon}
Casting Magic Weapon on the same weapon again has no effect. No matter how many times you cast it on the same weapon it's still just magic +1d4 damage.
That said, even a weak enchantment is nothing to be scoffed at. Having a magic weapon may give you an advantage against some of the stranger beasts of Dungeon World, ghosts and the like. The exact effects depend on the monster and circumstances, so make the most of it.
\section{Animate Dead}
Treating the zombie as your character means you make moves with its stats based on the fiction, just like always. Unless its brain is functioning on its own, the zombie can't do much besides follow the last order it was given, so you'd better stay close. Even if its brain works it's still bound to follow your orders.
\section{Fighter Moves}
\section{Signature Weapon}
The base description you choose is just a description. Choosing a spear doesn't give you Close range, for example. You could choose a spear as the description, then Hand as the range. Your spear is something special, or your technique with it is different, just describe why your weapon has the tags you've chosen.
\section{Heirloom}
The exact nature of the spirits (and therefore what knowledge they can offer to you) is up to you and the GM to decide. Maybe they're dead ancestors, echoes of people you've slain, or a minor demon. Up to you.
\section{Armor Mastery}
Armor and shields that are reduced to 0 armor are effectively destroyed. You'll pretty much be paying for a new one anyway, so you might as well drop them and haul out some gold instead.
\section{Paladin Moves}
\section{Evidence of Faith}
Your +1 forward applies to anything you do based on your knowledge of the spell's effects: defying it, defending against it, using it to your advantage, etc.
\section{Ranger Moves}
\section{Command}
Your bonuses only apply when your animal is doing something it's trained in. An animal not trained to attack monsters won't be any help when you're attacking an otyugh.
\section{Thief Moves}
\section{Backstab}
Reducing armor until they repair it means that they lose armor until they do something that compensates for your damage. If you're fighting an armored knight that might mean a fresh suit of armor, but for a thick-hided ogre it's until they've had time to heal up (or protect the wound you left).
\section{Poisoner}
In order to make more doses of your chosen poison you need to be reasonably able to gather the required materials. If you're locked up at the top of a tower you're not going to be able to get the materials you need.
\section{Wealth and Taste}
In order to use this move it's really got to be your most valuable possession. It's the honest value you place on it that draws others, no lies.
\section{Disguise}
Your disguise covers your appearance and any basics like accents and limps. It doesn't grant you any special knowledge of the target, so if someone asks you what your favorite color is you'd better think fast. Defying danger with CHA is a common part of maintaining a disguise.
\section{Wizard Moves}
\section{Prepare Spells}
You can prepare the same spell more than once if you like.
\section{Empowered Magic}
Maximizing the effects of a spell is simple for spells that involve a roll: a maximized Magic Missile does 8 damage. In other cases it's down to the circumstances. A maximized Identify might result in far more information than expected. If there's no clear way to maximize it you can't choose that option.
Likewise for doubling the targets. If the spell doesn't have targets you can't choose to double them.
\section{Wizard Spells}
\section{Dispel Magic}
The exact effects depend on the circumstances. A goblin orkaster's spell might just be ended, while a deity's consecration is probably just dimmed. The GM will tell you the likely effects of Dispelling a given effect before you cast.
\section{Fireball}
``Nearby''depends on context; a few paces or so in an open space, considerably more in an enclosed room. Be careful!.
\section{Polymorph}
In some cases the GM may choose the last option more than once to list each unexpected benefit or weakness.
\section{Summon Monster}
The exact type of monster you get is up to the GM, based on your choices. If you want a non-reckless swimming creature you might get a water elemental, a 1d8 damage +2 Str creature might be a barbed devil. Whatever the creature is you still get to play it.
|
{"hexsha": "0f15b67febb612633ff99459fec42fa5fc0b8132", "size": 6990, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/Class_Moves_Discussion.tex", "max_stars_repo_name": "Hegz/DW-Latex", "max_stars_repo_head_hexsha": "49a230f82fdeab7faa7c736ef81ef13266ac399d", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2015-04-27T22:54:43.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-15T21:25:14.000Z", "max_issues_repo_path": "tex/Class_Moves_Discussion.tex", "max_issues_repo_name": "Hegz/DW-Latex", "max_issues_repo_head_hexsha": "49a230f82fdeab7faa7c736ef81ef13266ac399d", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/Class_Moves_Discussion.tex", "max_forks_repo_name": "Hegz/DW-Latex", "max_forks_repo_head_hexsha": "49a230f82fdeab7faa7c736ef81ef13266ac399d", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-09-01T13:27:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-27T03:56:49.000Z", "avg_line_length": 55.0393700787, "max_line_length": 326, "alphanum_fraction": 0.7799713877, "num_tokens": 1569}
|
import numpy as np
from Utils.Data.DatasetUtils import is_test_or_val_set, get_train_set_id_from_test_or_val_set, \
get_test_or_val_set_id_from_train
from Utils.Data.Dictionary.TweetTextFeaturesDictArray import TweetTokenLengthFeatureDictArray, \
TweetTokenLengthUniqueFeatureDictArray
from Utils.Data.Features.Generated.TweetFeature.IsEngagementType import *
from Utils.Data.Features.MappedFeatures import MappedFeatureEngagerId, MappedFeatureCreatorId, MappedFeatureTweetId
def find_ratio_and_update(creator_id, creator_length_array, creator_length_unique_array,
current_tweet_length, current_tweet_length_unique):
# find the ratio
if creator_length_array == 0:
current_ratio = 0
else:
current_ratio = creator_length_unique_array[creator_id] / creator_length_array[creator_id]
# update the arrays
creator_length_array[creator_id] += current_tweet_length
creator_length_unique_array[creator_id] += current_tweet_length_unique
# return the result
return current_ratio
class CreatorFrequencyUniqueTokens(GeneratedFeaturePickle):
def __init__(self, dataset_id: str):
super().__init__("creator_feature_frequency_of_unique_tokens", dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/creator_features/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/creator_features/{self.feature_name}.csv.gz")
def create_feature(self):
# Check if the dataset id is train or test
if is_test_or_val_set(self.dataset_id):
train_dataset_id = get_train_set_id_from_test_or_val_set(self.dataset_id)
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
else:
train_dataset_id = self.dataset_id
test_dataset_id = get_test_or_val_set_id_from_train(train_dataset_id)
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(train_dataset_id)
creators_feature = MappedFeatureCreatorId(train_dataset_id)
tweet_id_feature = MappedFeatureTweetId(train_dataset_id)
# Save the column name
creators_col = creators_feature.feature_name
tweet_id_col = tweet_id_feature.feature_name
length_dict = TweetTokenLengthFeatureDictArray().load_or_create()
length_unique_dict = TweetTokenLengthUniqueFeatureDictArray().load_or_create()
dataframe = pd.concat([
creators_feature.load_or_create(),
creation_timestamps_feature.load_or_create(),
tweet_id_feature.load_or_create()
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
creator_length_array = np.zeros(dataframe[creators_feature.feature_name].max() + 1, dtype=int)
creator_length_unique_array = np.zeros(dataframe[creators_feature.feature_name].max() + 1, dtype=int)
result = pd.DataFrame(
[find_ratio_and_update(creator_id, creator_length_array, creator_length_unique_array,
length_dict[tweet_id], length_unique_dict[tweet_id])
for creator_id, tweet_id in zip(dataframe[creators_col], dataframe[tweet_id_col])],
index=dataframe.index
)
if not CreatorFrequencyUniqueTokens(train_dataset_id).has_feature():
result.sort_index(inplace=True)
CreatorFrequencyUniqueTokens(train_dataset_id).save_feature(result)
if not CreatorFrequencyUniqueTokens(test_dataset_id).has_feature():
# Load features
creation_timestamps_feature = RawFeatureTweetTimestamp(test_dataset_id)
creators_feature = MappedFeatureCreatorId(test_dataset_id)
tweet_id_feature = MappedFeatureTweetId(test_dataset_id)
# Save the column name
creators_col = creators_feature.feature_name
tweet_id_col = tweet_id_feature.feature_name
dataframe = pd.concat([
creation_timestamps_feature.load_or_create(),
creators_feature.load_or_create(),
tweet_id_feature.load_or_create(),
], axis=1)
dataframe.sort_values(creation_timestamps_feature.feature_name, inplace=True)
# if there are new creators in the test set, pad the arrays
if dataframe[creators_col].max() + 1 > creator_length_array.size:
creator_length_array = np.pad(
creator_length_array,
pad_width=(0, dataframe[creators_col].max() + 1 - creator_length_array.size),
mode='constant',
constant_values=0
)
creator_length_unique_array = np.pad(
creator_length_array,
pad_width=(0, dataframe[creators_col].max() + 1 - creator_length_unique_array.size),
mode='constant',
constant_values=0
)
result = pd.DataFrame(
[find_ratio_and_update(creator_id, creator_length_array, creator_length_unique_array,
length_dict[tweet_id], length_unique_dict[tweet_id])
for creator_id, tweet_id in zip(dataframe[creators_col], dataframe[tweet_id_col])],
index=dataframe.index
)
result.sort_index(inplace=True)
CreatorFrequencyUniqueTokens(test_dataset_id).save_feature(result)
|
{"hexsha": "7e52ebc151d6aff155b7245a742b18311eaaabe7", "size": 5593, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/Data/Features/Generated/CreatorFeature/CreatorFrequencyUniqueTokens.py", "max_stars_repo_name": "MaurizioFD/recsys-challenge-2020-twitter", "max_stars_repo_head_hexsha": "95dc024fb4f8777aa62e1304536daece640428de", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2020-07-09T11:31:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T05:50:48.000Z", "max_issues_repo_path": "Utils/Data/Features/Generated/CreatorFeature/CreatorFrequencyUniqueTokens.py", "max_issues_repo_name": "kiminh/recsys-challenge-2020-twitter", "max_issues_repo_head_hexsha": "567f0db40be7db3d21c360f2ca6cdf2addc7c698", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-02T18:55:21.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-13T22:13:58.000Z", "max_forks_repo_path": "Utils/Data/Features/Generated/CreatorFeature/CreatorFrequencyUniqueTokens.py", "max_forks_repo_name": "kiminh/recsys-challenge-2020-twitter", "max_forks_repo_head_hexsha": "567f0db40be7db3d21c360f2ca6cdf2addc7c698", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-08-08T14:55:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-06T09:17:03.000Z", "avg_line_length": 44.0393700787, "max_line_length": 115, "alphanum_fraction": 0.6906847846, "include": true, "reason": "import numpy", "num_tokens": 1096}
|
[STATEMENT]
lemma InvariantWatchesElNotifyWatchesLoop:
fixes literal :: Literal and Wl :: "nat list" and newWl :: "nat list" and state :: State
assumes
"InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)" and
"\<forall> (c::nat). c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)"
shows
"let state' = (notifyWatches_loop literal Wl newWl state) in
InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (induct Wl arbitrary: newWl state)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set [] \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal [] newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a Wl newWl state. \<lbrakk>\<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state'); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set (a # Wl) \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal (a # Wl) newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set [] \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (2 subgoals):
1. \<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set [] \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal [] newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a Wl newWl state. \<lbrakk>\<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state'); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set (a # Wl) \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal (a # Wl) newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set [] \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal [] newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal [] newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. \<And>a Wl newWl state. \<lbrakk>\<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state'); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set (a # Wl) \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal (a # Wl) newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a Wl newWl state. \<lbrakk>\<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state'); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set (a # Wl) \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal (a # Wl) newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case (Cons clause Wl')
[PROOF STATE]
proof (state)
this:
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. \<And>a Wl newWl state. \<lbrakk>\<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state'); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set (a # Wl) \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal (a # Wl) newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from \<open>\<forall> (c::nat). c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<close>
[PROOF STATE]
proof (chain)
picking this:
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
[PROOF STEP]
have "0 \<le> clause" and "clause < length (getF state)"
[PROOF STATE]
proof (prove)
using this:
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. 0 \<le> clause &&& clause < length (getF state)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> clause
clause < length (getF state)
goal (1 subgoal):
1. \<And>a Wl newWl state. \<lbrakk>\<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state'); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set (a # Wl) \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal (a # Wl) newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 \<le> clause
clause < length (getF state)
[PROOF STEP]
obtain wa::Literal and wb::Literal
where "getWatch1 state clause = Some wa" and "getWatch2 state clause = Some wb"
[PROOF STATE]
proof (prove)
using this:
0 \<le> clause
clause < length (getF state)
goal (1 subgoal):
1. (\<And>wa wb. \<lbrakk>getWatch1 state clause = Some wa; getWatch2 state clause = Some wb\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
0 \<le> clause
clause < length (getF state)
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. (\<And>wa wb. \<lbrakk>getWatch1 state clause = Some wa; getWatch2 state clause = Some wb\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
0 \<le> clause
clause < length (getF state)
\<lbrakk>\<forall>clause. 0 \<le> clause \<and> clause < length (getF ?state) \<longrightarrow> (\<exists>w1 w2. getWatch1 ?state clause = Some w1 \<and> getWatch2 ?state clause = Some w2 \<and> w1 el getF ?state ! clause \<and> w2 el getF ?state ! clause); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in \<forall>clause. 0 \<le> clause \<and> clause < length (getF state') \<longrightarrow> (\<exists>w1 w2. getWatch1 state' clause = Some w1 \<and> getWatch2 state' clause = Some w2 \<and> w1 el getF state' ! clause \<and> w2 el getF state' ! clause)
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. (\<And>wa wb. \<lbrakk>getWatch1 state clause = Some wa; getWatch2 state clause = Some wb\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
goal (1 subgoal):
1. \<And>a Wl newWl state. \<lbrakk>\<And>newWl state. \<lbrakk>InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set Wl \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state'); InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state); \<forall>c. c \<in> set (a # Wl) \<longrightarrow> 0 \<le> c \<and> c < length (getF state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal (a # Wl) newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (cases "Some literal = getWatch1 state clause")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Some literal = getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
Some literal = getWatch1 state clause
goal (2 subgoals):
1. Some literal = getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state' = "swapWatches clause state"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Some literal = getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?w1 = wb
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Some literal = getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getWatch1 ?state' clause = Some ?w1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getWatch1 (swapWatches clause state) clause = Some wb
[PROOF STEP]
using \<open>getWatch2 state clause = Some wb\<close>
[PROOF STATE]
proof (prove)
using this:
getWatch2 state clause = Some wb
goal (1 subgoal):
1. getWatch1 (swapWatches clause state) clause = Some wb
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
getWatch2 state clause = Some wb
goal (1 subgoal):
1. getWatch1 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) clause = Some wb
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
getWatch1 (swapWatches clause state) clause = Some wb
goal (2 subgoals):
1. Some literal = getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?w2 = wa
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Some literal = getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getWatch2 ?state' clause = Some ?w2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getWatch2 (swapWatches clause state) clause = Some wa
[PROOF STEP]
using \<open>getWatch1 state clause = Some wa\<close>
[PROOF STATE]
proof (prove)
using this:
getWatch1 state clause = Some wa
goal (1 subgoal):
1. getWatch2 (swapWatches clause state) clause = Some wa
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
getWatch1 state clause = Some wa
goal (1 subgoal):
1. getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) clause = Some wa
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
getWatch2 (swapWatches clause state) clause = Some wa
goal (2 subgoals):
1. Some literal = getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (cases "literalTrue ?w1 (elements (getM ?state'))")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
literalTrue wb (elements (getM (swapWatches clause state)))
goal (2 subgoals):
1. literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from Cons(2)
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
[PROOF STEP]
have "InvariantWatchesEl (getF ?state') (getWatch1 ?state') (getWatch2 ?state')"
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
goal (1 subgoal):
1. InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (swapWatches clause state)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (swapWatches clause state) clausea = Some w1 \<and> getWatch2 (swapWatches clause state) clausea = Some w2 \<and> w1 el getF (swapWatches clause state) ! clausea \<and> w2 el getF (swapWatches clause state) ! clausea)
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) clausea = Some w1 \<and> getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) clausea = Some w2 \<and> w1 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) ! clausea \<and> w2 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) ! clausea)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
goal (2 subgoals):
1. literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
goal (2 subgoals):
1. literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getF ?state' = getF state"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (swapWatches clause state) = getF state
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) = getF state
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getF (swapWatches clause state) = getF state
goal (2 subgoals):
1. literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
getF (swapWatches clause state) = getF state
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
getF (swapWatches clause state) = getF state
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
getF (swapWatches clause state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
getF (swapWatches clause state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal = getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
getF (swapWatches clause state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal = getWatch1 state clause
getWatch1 (swapWatches clause state) clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
getF (swapWatches clause state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal = getWatch1 state clause
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state)) (getWatch1 (swapWatches clause state)) (getWatch2 (swapWatches clause state))
getF (swapWatches clause state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal = getWatch1 state clause
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
literalTrue wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. \<not> literalTrue wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (cases "getNonWatchedUnfalsifiedLiteral (nth (getF ?state') clause) ?w1 ?w2 (getM ?state')")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case (Some l')
[PROOF STATE]
proof (state)
this:
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some l'
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
hence "l' el (nth (getF ?state') clause)"
[PROOF STATE]
proof (prove)
using this:
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some l'
goal (1 subgoal):
1. l' el getF (swapWatches clause state) ! clause
[PROOF STEP]
using getNonWatchedUnfalsifiedLiteralSomeCharacterization
[PROOF STATE]
proof (prove)
using this:
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some l'
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> ?l el ?clause
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> ?l \<noteq> ?w1.0
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> ?l \<noteq> ?w2.0
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> \<not> literalFalse ?l (elements ?M)
goal (1 subgoal):
1. l' el getF (swapWatches clause state) ! clause
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
l' el getF (swapWatches clause state) ! clause
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state'' = "setWatch2 clause l' ?state'"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from Cons(2)
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
[PROOF STEP]
have "InvariantWatchesEl (getF ?state'') (getWatch1 ?state'') (getWatch2 ?state'')"
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
goal (1 subgoal):
1. InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
[PROOF STEP]
using \<open>l' el (nth (getF ?state') clause)\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
l' el getF (swapWatches clause state) ! clause
goal (1 subgoal):
1. InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
l' el getF (swapWatches clause state) ! clause
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (setWatch2 clause l' (swapWatches clause state))) \<longrightarrow> (\<exists>w1 w2. getWatch1 (setWatch2 clause l' (swapWatches clause state)) clausea = Some w1 \<and> getWatch2 (setWatch2 clause l' (swapWatches clause state)) clausea = Some w2 \<and> w1 el getF (setWatch2 clause l' (swapWatches clause state)) ! clausea \<and> w2 el getF (setWatch2 clause l' (swapWatches clause state)) ! clausea)
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
l' el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) ! clause
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (setWatch2 clause l' (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>))) \<longrightarrow> (\<exists>w1 w2. getWatch1 (setWatch2 clause l' (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)) clausea = Some w1 \<and> getWatch2 (setWatch2 clause l' (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)) clausea = Some w2 \<and> w1 el getF (setWatch2 clause l' (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)) ! clausea \<and> w2 el getF (setWatch2 clause l' (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)) ! clausea)
[PROOF STEP]
unfolding setWatch2_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
l' el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) ! clause
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getWatch2 := getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)(clause \<mapsto> l'), getWatchList := (getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>))(l' := clause # getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) l')\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getWatch2 := getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)(clause \<mapsto> l'), getWatchList := (getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>))(l' := clause # getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) l')\<rparr>) clausea = Some w1 \<and> getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getWatch2 := getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)(clause \<mapsto> l'), getWatchList := (getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>))(l' := clause # getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) l')\<rparr>) clausea = Some w2 \<and> w1 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getWatch2 := getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)(clause \<mapsto> l'), getWatchList := (getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>))(l' := clause # getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) l')\<rparr>) ! clausea \<and> w2 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getWatch2 := getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)(clause \<mapsto> l'), getWatchList := (getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>))(l' := clause # getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) l')\<rparr>) ! clausea)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getF ?state'' = getF state"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (setWatch2 clause l' (swapWatches clause state)) = getF state
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (setWatch2 clause l' (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)) = getF state
[PROOF STEP]
unfolding setWatch2_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getWatch2 := getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>)(clause \<mapsto> l'), getWatchList := (getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>))(l' := clause # getWatchList (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) l')\<rparr>) = getF state
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Some
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' (swapWatches clause state))) (getWatch1 (setWatch2 clause l' (swapWatches clause state))) (getWatch2 (setWatch2 clause l' (swapWatches clause state)))
getF (setWatch2 clause l' (swapWatches clause state)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = Some l'
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case None
[PROOF STATE]
proof (state)
this:
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None
goal (1 subgoal):
1. getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (cases "literalFalse ?w1 (elements (getM ?state'))")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
literalFalse wb (elements (getM (swapWatches clause state)))
goal (2 subgoals):
1. literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state'' = "?state'\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from Cons(2)
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
[PROOF STEP]
have "InvariantWatchesEl (getF ?state'') (getWatch1 ?state'') (getWatch2 ?state'')"
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
goal (1 subgoal):
1. InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) clausea = Some w1 \<and> getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) clausea = Some w2 \<and> w1 el getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) ! clausea \<and> w2 el getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) ! clausea)
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getConflictFlag := True, getConflictClause := clause\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getConflictFlag := True, getConflictClause := clause\<rparr>) clausea = Some w1 \<and> getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getConflictFlag := True, getConflictClause := clause\<rparr>) clausea = Some w2 \<and> w1 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getConflictFlag := True, getConflictClause := clause\<rparr>) ! clausea \<and> w2 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getConflictFlag := True, getConflictClause := clause\<rparr>) ! clausea)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
goal (2 subgoals):
1. literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
goal (2 subgoals):
1. literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getF ?state'' = getF state"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
goal (2 subgoals):
1. literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using None
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>literalFalse ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (swapWatches clause state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None
literalFalse wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> literalFalse wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state'' = "setReason ?w1 clause (?state'\<lparr>getQ := (if ?w1 el (getQ ?state') then (getQ ?state') else (getQ ?state') @ [?w1])\<rparr>)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from Cons(2)
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
[PROOF STEP]
have "InvariantWatchesEl (getF ?state'') (getWatch1 ?state'') (getWatch2 ?state'')"
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
goal (1 subgoal):
1. InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) \<longrightarrow> (\<exists>w1 w2. getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) clausea = Some w1 \<and> getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) clausea = Some w2 \<and> w1 el getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) ! clausea \<and> w2 el getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) ! clausea)
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (setReason wb clause (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>))) \<longrightarrow> (\<exists>w1 w2. getWatch1 (setReason wb clause (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)) clausea = Some w1 \<and> getWatch2 (setReason wb clause (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)) clausea = Some w2 \<and> w1 el getF (setReason wb clause (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)) ! clausea \<and> w2 el getF (setReason wb clause (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)) ! clausea)
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb], getReason := getReason (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)(wb \<mapsto> clause)\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb], getReason := getReason (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)(wb \<mapsto> clause)\<rparr>) clausea = Some w1 \<and> getWatch2 (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb], getReason := getReason (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)(wb \<mapsto> clause)\<rparr>) clausea = Some w2 \<and> w1 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb], getReason := getReason (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)(wb \<mapsto> clause)\<rparr>) ! clausea \<and> w2 el getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb], getReason := getReason (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)(wb \<mapsto> clause)\<rparr>) ! clausea)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
goal (1 subgoal):
1. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
goal (1 subgoal):
1. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getF ?state'' = getF state"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (setReason wb clause (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)) = getF state
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb], getReason := getReason (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause), getQ := if wb el getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) then getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) else getQ (state\<lparr>getWatch1 := (getWatch1 state)(clause := getWatch2 state clause), getWatch2 := (getWatch2 state)(clause := getWatch1 state clause)\<rparr>) @ [wb]\<rparr>)(wb \<mapsto> clause)\<rparr>) = getF state
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
goal (1 subgoal):
1. \<not> literalFalse wb (elements (getM (swapWatches clause state))) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using None
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalFalse ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch1 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>))) (getWatch2 (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)))
getF (setReason wb clause (swapWatches clause state\<lparr>getQ := if wb el getQ (swapWatches clause state) then getQ (swapWatches clause state) else getQ (swapWatches clause state) @ [wb]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 (swapWatches clause state) clause = Some wb
getWatch2 (swapWatches clause state) clause = Some wa
Some literal = getWatch1 state clause
\<not> literalTrue wb (elements (getM (swapWatches clause state)))
getNonWatchedUnfalsifiedLiteral (getF (swapWatches clause state) ! clause) wb wa (getM (swapWatches clause state)) = None
\<not> literalFalse wb (elements (getM (swapWatches clause state)))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
Some literal \<noteq> getWatch1 state clause
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state' = state
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?w1 = wa
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getWatch1 ?state' clause = Some ?w1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getWatch1 state clause = Some wa
[PROOF STEP]
using \<open>getWatch1 state clause = Some wa\<close>
[PROOF STATE]
proof (prove)
using this:
getWatch1 state clause = Some wa
goal (1 subgoal):
1. getWatch1 state clause = Some wa
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
getWatch1 state clause = Some wa
goal (1 subgoal):
1. getWatch1 state clause = Some wa
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
getWatch1 state clause = Some wa
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?w2 = wb
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getWatch2 ?state' clause = Some ?w2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getWatch2 state clause = Some wb
[PROOF STEP]
using \<open>getWatch2 state clause = Some wb\<close>
[PROOF STATE]
proof (prove)
using this:
getWatch2 state clause = Some wb
goal (1 subgoal):
1. getWatch2 state clause = Some wb
[PROOF STEP]
unfolding swapWatches_def
[PROOF STATE]
proof (prove)
using this:
getWatch2 state clause = Some wb
goal (1 subgoal):
1. getWatch2 state clause = Some wb
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
getWatch2 state clause = Some wb
goal (1 subgoal):
1. Some literal \<noteq> getWatch1 state clause \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (cases "literalTrue ?w1 (elements (getM ?state'))")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. literalTrue wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalTrue wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
literalTrue wa (elements (getM state))
goal (2 subgoals):
1. literalTrue wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalTrue wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
literalTrue wa (elements (getM state))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
literalTrue wa (elements (getM state))
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
literalTrue wa (elements (getM state))
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal \<noteq> getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
literalTrue wa (elements (getM state))
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal \<noteq> getWatch1 state clause
getWatch1 state clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
literalTrue wa (elements (getM state))
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal \<noteq> getWatch1 state clause
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
literalTrue wa (elements (getM state))
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
Some literal \<noteq> getWatch1 state clause
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
literalTrue wa (elements (getM state))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add:Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. \<not> literalTrue wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> literalTrue wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> literalTrue wa (elements (getM state))
goal (1 subgoal):
1. \<not> literalTrue wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (cases "getNonWatchedUnfalsifiedLiteral (nth (getF ?state') clause) ?w1 ?w2 (getM ?state')")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case (Some l')
[PROOF STATE]
proof (state)
this:
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some l'
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
hence "l' el (nth (getF ?state') clause)"
[PROOF STATE]
proof (prove)
using this:
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some l'
goal (1 subgoal):
1. l' el getF state ! clause
[PROOF STEP]
using getNonWatchedUnfalsifiedLiteralSomeCharacterization
[PROOF STATE]
proof (prove)
using this:
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some l'
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> ?l el ?clause
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> ?l \<noteq> ?w1.0
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> ?l \<noteq> ?w2.0
getNonWatchedUnfalsifiedLiteral ?clause ?w1.0 ?w2.0 ?M = Some ?l \<Longrightarrow> \<not> literalFalse ?l (elements ?M)
goal (1 subgoal):
1. l' el getF state ! clause
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
l' el getF state ! clause
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state'' = "setWatch2 clause l' ?state'"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from Cons
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
[PROOF STEP]
have "InvariantWatchesEl (getF ?state'') (getWatch1 ?state'') (getWatch2 ?state'')"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
[PROOF STEP]
using \<open>l' el (nth (getF ?state') clause)\<close>
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
l' el getF state ! clause
goal (1 subgoal):
1. InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<forall>clause. 0 \<le> clause \<and> clause < length (getF ?state) \<longrightarrow> (\<exists>w1 w2. getWatch1 ?state clause = Some w1 \<and> getWatch2 ?state clause = Some w2 \<and> w1 el getF ?state ! clause \<and> w2 el getF ?state ! clause); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in \<forall>clause. 0 \<le> clause \<and> clause < length (getF state') \<longrightarrow> (\<exists>w1 w2. getWatch1 state' clause = Some w1 \<and> getWatch2 state' clause = Some w2 \<and> w1 el getF state' ! clause \<and> w2 el getF state' ! clause)
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
l' el getF state ! clause
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (setWatch2 clause l' state)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (setWatch2 clause l' state) clausea = Some w1 \<and> getWatch2 (setWatch2 clause l' state) clausea = Some w2 \<and> w1 el getF (setWatch2 clause l' state) ! clausea \<and> w2 el getF (setWatch2 clause l' state) ! clausea)
[PROOF STEP]
unfolding setWatch2_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<forall>clause. 0 \<le> clause \<and> clause < length (getF ?state) \<longrightarrow> (\<exists>w1 w2. getWatch1 ?state clause = Some w1 \<and> getWatch2 ?state clause = Some w2 \<and> w1 el getF ?state ! clause \<and> w2 el getF ?state ! clause); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in \<forall>clause. 0 \<le> clause \<and> clause < length (getF state') \<longrightarrow> (\<exists>w1 w2. getWatch1 state' clause = Some w1 \<and> getWatch2 state' clause = Some w2 \<and> w1 el getF state' ! clause \<and> w2 el getF state' ! clause)
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
l' el getF state ! clause
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (state\<lparr>getWatch2 := getWatch2 state(clause \<mapsto> l'), getWatchList := (getWatchList state)(l' := clause # getWatchList state l')\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (state\<lparr>getWatch2 := getWatch2 state(clause \<mapsto> l'), getWatchList := (getWatchList state)(l' := clause # getWatchList state l')\<rparr>) clausea = Some w1 \<and> getWatch2 (state\<lparr>getWatch2 := getWatch2 state(clause \<mapsto> l'), getWatchList := (getWatchList state)(l' := clause # getWatchList state l')\<rparr>) clausea = Some w2 \<and> w1 el getF (state\<lparr>getWatch2 := getWatch2 state(clause \<mapsto> l'), getWatchList := (getWatchList state)(l' := clause # getWatchList state l')\<rparr>) ! clausea \<and> w2 el getF (state\<lparr>getWatch2 := getWatch2 state(clause \<mapsto> l'), getWatchList := (getWatchList state)(l' := clause # getWatchList state l')\<rparr>) ! clausea)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getF ?state'' = getF state"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (setWatch2 clause l' state) = getF state
[PROOF STEP]
unfolding setWatch2_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (state\<lparr>getWatch2 := getWatch2 state(clause \<mapsto> l'), getWatchList := (getWatchList state)(l' := clause # getWatchList state l')\<rparr>) = getF state
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getF (setWatch2 clause l' state) = getF state
goal (2 subgoals):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<And>a. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some a \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Some
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setWatch2 clause l' state)) (getWatch1 (setWatch2 clause l' state)) (getWatch2 (setWatch2 clause l' state))
getF (setWatch2 clause l' state) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = Some l'
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case None
[PROOF STATE]
proof (state)
this:
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None
goal (1 subgoal):
1. getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
proof (cases "literalFalse ?w1 (elements (getM ?state'))")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
literalFalse wa (elements (getM state))
goal (2 subgoals):
1. literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state'' = "?state'\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from Cons
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
[PROOF STEP]
have "InvariantWatchesEl (getF ?state'') (getWatch1 ?state'') (getWatch2 ?state'')"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<forall>clause. 0 \<le> clause \<and> clause < length (getF ?state) \<longrightarrow> (\<exists>w1 w2. getWatch1 ?state clause = Some w1 \<and> getWatch2 ?state clause = Some w2 \<and> w1 el getF ?state ! clause \<and> w2 el getF ?state ! clause); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in \<forall>clause. 0 \<le> clause \<and> clause < length (getF state') \<longrightarrow> (\<exists>w1 w2. getWatch1 state' clause = Some w1 \<and> getWatch2 state' clause = Some w2 \<and> w1 el getF state' ! clause \<and> w2 el getF state' ! clause)
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) clausea = Some w1 \<and> getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) clausea = Some w2 \<and> w1 el getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) ! clausea \<and> w2 el getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) ! clausea)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
goal (2 subgoals):
1. literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
goal (2 subgoals):
1. literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getF ?state'' = getF state"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
goal (2 subgoals):
1. literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
2. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using None
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>literalFalse ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch1 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>)) (getWatch2 (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>))
getF (state\<lparr>getConflictFlag := True, getConflictClause := clause\<rparr>) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None
literalFalse wa (elements (getM state))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal (1 subgoal):
1. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> literalFalse wa (elements (getM state))
goal (1 subgoal):
1. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
let ?state'' = "setReason ?w1 clause (?state'\<lparr>getQ := (if ?w1 el (getQ ?state') then (getQ ?state') else (getQ ?state') @ [?w1])\<rparr>)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
from Cons(2)
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
[PROOF STEP]
have "InvariantWatchesEl (getF ?state'') (getWatch1 ?state'') (getWatch2 ?state'')"
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
goal (1 subgoal):
1. InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
[PROOF STEP]
unfolding InvariantWatchesEl_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) \<longrightarrow> (\<exists>w1 w2. getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) clausea = Some w1 \<and> getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) clausea = Some w2 \<and> w1 el getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) ! clausea \<and> w2 el getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) ! clausea)
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
using this:
\<forall>clause. 0 \<le> clause \<and> clause < length (getF state) \<longrightarrow> (\<exists>w1 w2. getWatch1 state clause = Some w1 \<and> getWatch2 state clause = Some w2 \<and> w1 el getF state ! clause \<and> w2 el getF state ! clause)
goal (1 subgoal):
1. \<forall>clausea. 0 \<le> clausea \<and> clausea < length (getF (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa], getReason := getReason (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)(wa \<mapsto> clause)\<rparr>)) \<longrightarrow> (\<exists>w1 w2. getWatch1 (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa], getReason := getReason (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)(wa \<mapsto> clause)\<rparr>) clausea = Some w1 \<and> getWatch2 (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa], getReason := getReason (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)(wa \<mapsto> clause)\<rparr>) clausea = Some w2 \<and> w1 el getF (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa], getReason := getReason (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)(wa \<mapsto> clause)\<rparr>) ! clausea \<and> w2 el getF (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa], getReason := getReason (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)(wa \<mapsto> clause)\<rparr>) ! clausea)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
goal (1 subgoal):
1. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
goal (1 subgoal):
1. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
have "getF ?state'' = getF state"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
[PROOF STEP]
unfolding setReason_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. getF (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa], getReason := getReason (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)(wa \<mapsto> clause)\<rparr>) = getF state
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
goal (1 subgoal):
1. \<not> literalFalse wa (elements (getM state)) \<Longrightarrow> let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using Cons
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch1 ?state' clause = Some ?w1\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>getWatch2 ?state' clause = Some ?w2\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> Some literal = getWatch1 state clause\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalTrue ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using None
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
using \<open>\<not> literalFalse ?w1 (elements (getM ?state'))\<close>
[PROOF STATE]
proof (prove)
using this:
InvariantWatchesEl (getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch1 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>))) (getWatch2 (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)))
getF (setReason wa clause (state\<lparr>getQ := if wa el getQ state then getQ state else getQ state @ [wa]\<rparr>)) = getF state
\<lbrakk>InvariantWatchesEl (getF ?state) (getWatch1 ?state) (getWatch2 ?state); \<forall>c. c \<in> set Wl' \<longrightarrow> 0 \<le> c \<and> c < length (getF ?state)\<rbrakk> \<Longrightarrow> let state' = notifyWatches_loop literal Wl' ?newWl ?state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
InvariantWatchesEl (getF state) (getWatch1 state) (getWatch2 state)
\<forall>c. c \<in> set (clause # Wl') \<longrightarrow> 0 \<le> c \<and> c < length (getF state)
getWatch1 state clause = Some wa
getWatch2 state clause = Some wb
Some literal \<noteq> getWatch1 state clause
\<not> literalTrue wa (elements (getM state))
getNonWatchedUnfalsifiedLiteral (getF state ! clause) wa wb (getM state) = None
\<not> literalFalse wa (elements (getM state))
goal (1 subgoal):
1. let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
let state' = notifyWatches_loop literal (clause # Wl') newWl state in InvariantWatchesEl (getF state') (getWatch1 state') (getWatch2 state')
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 60965, "file": "SATSolverVerification_AssertLiteral", "length": 232}
|
import numpy as np
import scipy as sp
import openmdao.api as om
import random
from . import VariableType
def hyperplane_coefficients(points):
A = np.c_[points[:, :-1], np.ones(points.shape[0])]
B = points[:, -1]
coeff, _, _, _ = sp.linalg.lstsq(A, B)
return coeff
def is_assertion_error(err, *args):
return issubclass(err[0], AssertionError)
class NoiseComponent(om.ExplicitComponent):
def setup(self):
self.add_output("y")
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
outputs["y"] = random.random()
class PassthroughComponent(om.ExplicitComponent):
def initialize(self):
self.options.declare("shape")
self.options.declare("in_type", types=VariableType)
self.options.declare("out_type", types=VariableType)
def setup(self):
if self.options["in_type"] is VariableType.CONTINUOUS:
self.add_input("in", shape=self.options["shape"])
else:
self.add_discrete_input("in", None)
if self.options["out_type"] is VariableType.CONTINUOUS:
self.add_output("out", shape=self.options["shape"])
else:
self.add_discrete_output("out", None)
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
inputs_ = inputs if self.options["in_type"] is VariableType.CONTINUOUS else discrete_inputs
outputs_ = outputs if self.options["out_type"] is VariableType.CONTINUOUS else discrete_outputs
outputs_["out"] = inputs_["in"]
|
{"hexsha": "e7ad3830bdb61702b4b8538d6abce23f65e4562f", "size": 1558, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/openmdao_omt/testing.py", "max_stars_repo_name": "ovidner/openmdao-utils", "max_stars_repo_head_hexsha": "dfc4041cac48bf7d1d4537c23a6d18c29339f6f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-17T14:15:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-17T14:15:44.000Z", "max_issues_repo_path": "src/openmdao_omt/testing.py", "max_issues_repo_name": "ovidner/openmdao-utils", "max_issues_repo_head_hexsha": "dfc4041cac48bf7d1d4537c23a6d18c29339f6f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/openmdao_omt/testing.py", "max_forks_repo_name": "ovidner/openmdao-utils", "max_forks_repo_head_hexsha": "dfc4041cac48bf7d1d4537c23a6d18c29339f6f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.16, "max_line_length": 103, "alphanum_fraction": 0.676508344, "include": true, "reason": "import numpy,import scipy", "num_tokens": 359}
|
"""Methods for computing, reading, and writing occlusion maps."""
import numpy
import netCDF4
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from ml4tc.machine_learning import neural_net
from ml4tc.machine_learning import gradcam
NUM_EXAMPLES_PER_BATCH = 32
EXAMPLE_DIMENSION_KEY = 'example'
CYCLONE_ID_CHAR_DIM_KEY = 'cyclone_id_char'
GRID_ROW_DIMENSION_KEY = 'grid_row'
GRID_COLUMN_DIMENSION_KEY = 'grid_column'
MODEL_FILE_KEY = 'model_file_name'
TARGET_CLASS_KEY = 'target_class'
HALF_WINDOW_SIZE_KEY = 'half_window_size_px'
STRIDE_LENGTH_KEY = 'stride_length_px'
FILL_VALUE_KEY = 'fill_value'
CYCLONE_IDS_KEY = 'cyclone_id_strings'
INIT_TIMES_KEY = 'init_times_unix_sec'
OCCLUSION_PROBS_KEY = 'occlusion_prob_matrix'
NORMALIZED_OCCLUSION_KEY = 'normalized_occlusion_matrix'
def get_occlusion_maps(
model_object, predictor_matrices, target_class, half_window_size_px,
stride_length_px, fill_value=0.):
"""Computes occlusion map for each example.
E = number of examples
T = number of input tensors to model
M = number of rows in brightness-temperature grid
N = number of columns in brightness-temperature grid
:param model_object: Trained model (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param predictor_matrices: length-T list of numpy arrays, formatted in the
same way as the training data. The first axis (i.e., the example axis)
of each numpy array should have length E.
:param target_class: Occlusion maps will be created for this class.
Must be an integer in 0...(K - 1), where K = number of classes.
:param half_window_size_px: Half-size of occlusion window (pixels). If
half-size is P, the full window will (2 * P + 1) rows by (2 * P + 1)
columns.
:param stride_length_px: Stride length for occlusion window (pixels).
:param fill_value: Fill value. Inside the occlusion window, all brightness
temperatures will be assigned this value, to simulate missing data.
:return: occlusion_prob_matrix: E-by-M-by-N numpy array of predicted
probabilities after occlusion.
:return: original_probs: length-E numpy array of predicted probabilities
before occlusion.
"""
error_checking.assert_is_numpy_array(
predictor_matrices[0], num_dimensions=5
)
error_checking.assert_is_integer(target_class)
error_checking.assert_is_geq(target_class, 0)
error_checking.assert_is_integer(half_window_size_px)
error_checking.assert_is_geq(half_window_size_px, 0)
error_checking.assert_is_integer(stride_length_px)
error_checking.assert_is_geq(stride_length_px, 1)
error_checking.assert_is_not_nan(fill_value)
num_examples = predictor_matrices[0].shape[0]
num_grid_rows_orig = predictor_matrices[0].shape[1]
num_grid_columns_orig = predictor_matrices[0].shape[2]
num_grid_rows_occluded = int(numpy.ceil(
float(num_grid_rows_orig) / stride_length_px
))
num_grid_columns_occluded = int(numpy.ceil(
float(num_grid_columns_orig) / stride_length_px
))
dimensions = (
num_examples, num_grid_rows_occluded, num_grid_columns_occluded
)
occlusion_prob_matrix = numpy.full(dimensions, numpy.nan)
for i in range(num_grid_rows_occluded):
orig_row_index = min([
i * stride_length_px,
num_grid_rows_orig - 1
])
print('Occluding windows centered on row {0:d} of {1:d}...'.format(
orig_row_index, num_grid_rows_orig
))
for j in range(num_grid_columns_occluded):
orig_column_index = min([
j * stride_length_px,
num_grid_columns_orig - 1
])
first_row = max([orig_row_index - half_window_size_px, 0])
last_row = min([
orig_row_index + half_window_size_px + 1,
num_grid_rows_orig
])
first_column = max([orig_column_index - half_window_size_px, 0])
last_column = min([
orig_column_index + half_window_size_px + 1,
num_grid_columns_orig
])
new_brightness_temp_matrix = predictor_matrices[0] + 0.
new_brightness_temp_matrix[
:, first_row:last_row, first_column:last_column, ...
] = fill_value
if len(predictor_matrices) > 1:
new_predictor_matrices = (
[new_brightness_temp_matrix] + predictor_matrices[1:]
)
else:
new_predictor_matrices = [new_brightness_temp_matrix]
this_prob_array = neural_net.apply_model(
model_object=model_object,
predictor_matrices=new_predictor_matrices,
num_examples_per_batch=NUM_EXAMPLES_PER_BATCH,
verbose=True
)
this_prob_array = numpy.squeeze(this_prob_array)
if len(this_prob_array.shape) == 1:
error_checking.assert_is_leq(target_class, 1)
if target_class == 1:
occlusion_prob_matrix[:, i, j] = this_prob_array
else:
occlusion_prob_matrix[:, i, j] = 1. - this_prob_array
else:
num_classes = this_prob_array.shape[1]
error_checking.assert_is_less_than(target_class, num_classes)
occlusion_prob_matrix[:, i, j] = (
this_prob_array[:, target_class]
)
if stride_length_px > 1:
occlusion_prob_matrix_coarse = occlusion_prob_matrix + 0.
dimensions = (num_examples, num_grid_rows_orig, num_grid_columns_orig)
occlusion_prob_matrix = numpy.full(dimensions, numpy.nan)
for i in range(num_examples):
if numpy.mod(i, 100) == 0:
print((
'Have upsampled {0:d} of {1:d} occlusion maps to predictor '
'resolution...'
).format(
i, num_examples
))
occlusion_prob_matrix[i, ...] = gradcam._upsample_cam(
class_activation_matrix=occlusion_prob_matrix_coarse[i, ...],
new_dimensions=numpy.array(
[num_grid_rows_orig, num_grid_columns_orig], dtype=int
)
)
occlusion_prob_matrix = numpy.maximum(occlusion_prob_matrix, 0.)
occlusion_prob_matrix = numpy.minimum(occlusion_prob_matrix, 1.)
print((
'Have upsampled all {0:d} occlusion maps to predictor resolution!'
).format(
num_examples
))
del occlusion_prob_matrix_coarse
original_prob_array = neural_net.apply_model(
model_object=model_object,
predictor_matrices=predictor_matrices,
num_examples_per_batch=NUM_EXAMPLES_PER_BATCH,
verbose=True
)
if len(original_prob_array.shape) == 1:
error_checking.assert_is_leq(target_class, 1)
if target_class == 1:
original_probs = original_prob_array
else:
original_probs = 1. - original_prob_array
else:
original_probs = original_prob_array[:, target_class]
return occlusion_prob_matrix, original_probs
def normalize_occlusion_maps(occlusion_prob_matrix, original_probs):
"""Normalizes occlusion maps (scales to range -inf...1).
:param occlusion_prob_matrix: See output doc for `get_occlusion_maps`.
:param original_probs: Same.
:return: normalized_occlusion_matrix: numpy array with same shape as input,
except that each value is now a normalized *decrease* in probability.
A value of 1 means that probability decreases all the way zero; a value
of 0 means that probability does not decrease at all; a value of -1
means that probability doubles; ...; etc.
"""
error_checking.assert_is_numpy_array(
occlusion_prob_matrix, num_dimensions=3
)
error_checking.assert_is_geq_numpy_array(occlusion_prob_matrix, 0.)
error_checking.assert_is_leq_numpy_array(occlusion_prob_matrix, 1.)
num_examples = occlusion_prob_matrix.shape[0]
expected_dim = numpy.array([num_examples], dtype=int)
error_checking.assert_is_numpy_array(
original_probs, exact_dimensions=expected_dim
)
error_checking.assert_is_geq_numpy_array(original_probs, 0.)
error_checking.assert_is_leq_numpy_array(original_probs, 1.)
normalized_occlusion_matrix = numpy.full(
occlusion_prob_matrix.shape, numpy.nan
)
original_probs_with_nan = original_probs + 0.
original_probs_with_nan[original_probs_with_nan == 0] = numpy.nan
for i in range(num_examples):
normalized_occlusion_matrix[i, ...] = (
(original_probs_with_nan[i] - occlusion_prob_matrix[i, ...]) /
original_probs_with_nan[i]
)
normalized_occlusion_matrix[numpy.isnan(normalized_occlusion_matrix)] = 0.
return normalized_occlusion_matrix
def write_file(
netcdf_file_name, occlusion_prob_matrix, normalized_occlusion_matrix,
cyclone_id_strings, init_times_unix_sec, model_file_name, target_class,
half_window_size_px, stride_length_px, fill_value):
"""Writes occlusion maps to NetCDF file.
E = number of examples
M = number of rows in brightness-temperature grid
N = number of columns in brightness-temperature grid
:param netcdf_file_name: Path to output file.
:param occlusion_prob_matrix: E-by-M-by-N numpy array of predicted
probabilities after occlusion.
:param normalized_occlusion_matrix: E-by-M-by-N numpy array of normalized
*decreases* in probability. For more details, see output doc for
`normalize_occlusion_maps`.
:param cyclone_id_strings: length-E list of cyclone IDs.
:param init_times_unix_sec: length-E numpy array of forecast-init times.
:param model_file_name: Path to file with neural net used to create
occlusion maps (readable by `neural_net.read_model`).
:param target_class: See doc for `get_occlusion_maps`.
:param half_window_size_px: Same.
:param stride_length_px: Same.
:param fill_value: Same.
"""
# Check input args.
error_checking.assert_is_geq_numpy_array(occlusion_prob_matrix, 0.)
error_checking.assert_is_leq_numpy_array(occlusion_prob_matrix, 1.)
error_checking.assert_is_numpy_array(
occlusion_prob_matrix, num_dimensions=3
)
error_checking.assert_is_leq_numpy_array(normalized_occlusion_matrix, 1.)
error_checking.assert_is_numpy_array(
normalized_occlusion_matrix,
exact_dimensions=numpy.array(occlusion_prob_matrix.shape, dtype=int)
)
num_examples = occlusion_prob_matrix.shape[0]
expected_dim = numpy.array([num_examples], dtype=int)
error_checking.assert_is_string_list(cyclone_id_strings)
error_checking.assert_is_numpy_array(
numpy.array(cyclone_id_strings), exact_dimensions=expected_dim
)
error_checking.assert_is_integer_numpy_array(init_times_unix_sec)
error_checking.assert_is_numpy_array(
init_times_unix_sec, exact_dimensions=expected_dim
)
error_checking.assert_is_string(model_file_name)
error_checking.assert_is_integer(target_class)
error_checking.assert_is_geq(target_class, 0)
error_checking.assert_is_integer(half_window_size_px)
error_checking.assert_is_geq(half_window_size_px, 0)
error_checking.assert_is_integer(stride_length_px)
error_checking.assert_is_geq(stride_length_px, 1)
error_checking.assert_is_not_nan(fill_value)
# Write to NetCDF file.
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
dataset_object = netCDF4.Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET'
)
dataset_object.setncattr(MODEL_FILE_KEY, model_file_name)
dataset_object.setncattr(TARGET_CLASS_KEY, target_class)
dataset_object.setncattr(HALF_WINDOW_SIZE_KEY, half_window_size_px)
dataset_object.setncattr(STRIDE_LENGTH_KEY, stride_length_px)
dataset_object.setncattr(FILL_VALUE_KEY, fill_value)
num_grid_rows = occlusion_prob_matrix.shape[1]
num_grid_columns = occlusion_prob_matrix.shape[2]
dataset_object.createDimension(EXAMPLE_DIMENSION_KEY, num_examples)
dataset_object.createDimension(GRID_ROW_DIMENSION_KEY, num_grid_rows)
dataset_object.createDimension(GRID_COLUMN_DIMENSION_KEY, num_grid_columns)
if num_examples == 0:
num_id_characters = 1
else:
num_id_characters = numpy.max(numpy.array([
len(id) for id in cyclone_id_strings
]))
dataset_object.createDimension(CYCLONE_ID_CHAR_DIM_KEY, num_id_characters)
this_string_format = 'S{0:d}'.format(num_id_characters)
cyclone_ids_char_array = netCDF4.stringtochar(numpy.array(
cyclone_id_strings, dtype=this_string_format
))
dataset_object.createVariable(
CYCLONE_IDS_KEY, datatype='S1',
dimensions=(EXAMPLE_DIMENSION_KEY, CYCLONE_ID_CHAR_DIM_KEY)
)
dataset_object.variables[CYCLONE_IDS_KEY][:] = numpy.array(
cyclone_ids_char_array
)
dataset_object.createVariable(
INIT_TIMES_KEY, datatype=numpy.int32, dimensions=EXAMPLE_DIMENSION_KEY
)
dataset_object.variables[INIT_TIMES_KEY][:] = init_times_unix_sec
these_dim = (
EXAMPLE_DIMENSION_KEY, GRID_ROW_DIMENSION_KEY,
GRID_COLUMN_DIMENSION_KEY
)
dataset_object.createVariable(
OCCLUSION_PROBS_KEY, datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[OCCLUSION_PROBS_KEY][:] = occlusion_prob_matrix
dataset_object.createVariable(
NORMALIZED_OCCLUSION_KEY, datatype=numpy.float32, dimensions=these_dim
)
dataset_object.variables[NORMALIZED_OCCLUSION_KEY][:] = (
normalized_occlusion_matrix
)
dataset_object.close()
def read_file(netcdf_file_name):
"""Reads occlusion maps from NetCDF file.
:param netcdf_file_name: Path to input file.
:return: occlusion_dict: Dictionary with the following keys.
occlusion_dict['occlusion_prob_matrix']: See doc for `write_file`.
occlusion_dict['normalized_occlusion_matrix']: Same.
occlusion_dict['cyclone_id_strings']: Same.
occlusion_dict['init_times_unix_sec']: Same.
occlusion_dict['model_file_name']: Same.
occlusion_dict['target_class']: Same.
occlusion_dict['half_window_size_px']: Same.
occlusion_dict['stride_length_px']: Same.
occlusion_dict['fill_value']: Same.
"""
dataset_object = netCDF4.Dataset(netcdf_file_name)
occlusion_dict = {
OCCLUSION_PROBS_KEY: dataset_object.variables[OCCLUSION_PROBS_KEY][:],
NORMALIZED_OCCLUSION_KEY:
dataset_object.variables[NORMALIZED_OCCLUSION_KEY][:],
CYCLONE_IDS_KEY: [
str(id) for id in
netCDF4.chartostring(dataset_object.variables[CYCLONE_IDS_KEY][:])
],
INIT_TIMES_KEY: dataset_object.variables[INIT_TIMES_KEY][:],
MODEL_FILE_KEY: str(getattr(dataset_object, MODEL_FILE_KEY)),
TARGET_CLASS_KEY: int(getattr(dataset_object, TARGET_CLASS_KEY)),
HALF_WINDOW_SIZE_KEY:
int(getattr(dataset_object, HALF_WINDOW_SIZE_KEY)),
STRIDE_LENGTH_KEY: int(getattr(dataset_object, STRIDE_LENGTH_KEY)),
FILL_VALUE_KEY: float(getattr(dataset_object, FILL_VALUE_KEY))
}
dataset_object.close()
return occlusion_dict
|
{"hexsha": "333ed325682e2682481bc098cdaccb1e3fee3957", "size": 15608, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml4tc/machine_learning/occlusion.py", "max_stars_repo_name": "NOAA-GSL/ml4tc", "max_stars_repo_head_hexsha": "e9f8faa51e5bfb86b2a78648d7b1d0e61d09b6c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ml4tc/machine_learning/occlusion.py", "max_issues_repo_name": "NOAA-GSL/ml4tc", "max_issues_repo_head_hexsha": "e9f8faa51e5bfb86b2a78648d7b1d0e61d09b6c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml4tc/machine_learning/occlusion.py", "max_forks_repo_name": "NOAA-GSL/ml4tc", "max_forks_repo_head_hexsha": "e9f8faa51e5bfb86b2a78648d7b1d0e61d09b6c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5382716049, "max_line_length": 80, "alphanum_fraction": 0.7059841107, "include": true, "reason": "import numpy", "num_tokens": 3537}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 29 16:30:36 2020
@author: aparravi
"""
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import matplotlib.lines as lines
import pandas as pd
import numpy as np
import scipy.stats as st
from matplotlib.patches import Patch, Rectangle
from plot_exec_time import get_exp_label, get_upper_ci_size
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.lines import Line2D
from plot_utils import *
bt1 = "#55819E"
bt2 = "#538F6F"
# DAC
FPGA_RESULT_FOLDER = "../../../../data/results/fpga/2020_11_21_15_07_03"
# Thesis
# FPGA_RESULT_FOLDER = "../../../../data/results/fpga/2021_08_14_19_20_18"
GPU_RESULT_FOLDER = "../../../../data/results/gpu/2020_11_19_15_39_53"
# FPGA_RESULT_FOLDER = "../../../../data/results/fpga/2020_11_22"
# GPU_RESULT_FOLDER = "../../../../data/results/gpu/2020_11_22"
DATE = "2021_08_14"
THRESHOLDS = [8, 16, 32, 50, 75, 100]
# KIND = "uniform" # Plot uniform + glove
# KIND = "gamma" # Plot gamma
KIND = "all" # Plot all
def read_data_fpga():
result_list = []
for f in os.listdir(FPGA_RESULT_FOLDER):
res_file = os.path.join(FPGA_RESULT_FOLDER, f)
if res_file.endswith(".csv"):
with open(res_file) as file:
result = file.readlines()[1:]
# Parse the file name;
hardware, rows, max_cols, distribution, nnz_per_row, n_bit, n_cores, mhz, k, n_iter = os.path.splitext(f)[0].split("_")
n_cores = int(n_cores.replace("core", ""))
# Parse the file name;
try:
n_bit = int(n_bit[:-3]) if n_bit != "float" else "F32"
except ValueError:
pass
for r in result:
try:
_, iteration, error_idx, error_val, sw_full_time_ms, sw_topk_time_ms, \
hw_setup_time_ms, hw_exec_time_ms, full_hw_exec_time_ms, readback_time_ms, k, sw_res_idx, \
sw_res_val, hw_res_idx, hw_res_val = r.split(",")
except ValueError:
iteration, error_idx, error_val, sw_full_time_ms, sw_topk_time_ms, \
hw_setup_time_ms, hw_exec_time_ms, full_hw_exec_time_ms, readback_time_ms, k, sw_res_idx, \
sw_res_val, hw_res_idx, hw_res_val = r.split(",")
except ValueError:
iteration, error_idx, error_val, sw_full_time_ms, sw_topk_time_ms, \
hw_setup_time_ms, hw_exec_time_ms, readback_time_ms, k, sw_res_idx, \
sw_res_val, hw_res_idx, hw_res_val = r.split(",")
k = int(k)
# Process results;
sw_res_idx = [int(x) for x in sw_res_idx.split(";")]
sw_res_val = [float(x) for x in sw_res_val.split(";")]
hw_res_idx = [int(x) for x in hw_res_idx.split(";")][:k]
hw_res_val = [float(x) for x in hw_res_val.split(";")][:k]
assert(len(sw_res_idx) == k)
assert(len(sw_res_val) == k)
assert(len(hw_res_idx) == k)
assert(len(hw_res_val) == k)
prec = []
kendall = []
ndcg_vals = []
for t in THRESHOLDS:
set_cpu = set(sw_res_idx[:t])
set_fpga = set(hw_res_idx[:t])
prec += [len(set_cpu.intersection(set_fpga)) / t]
kendall += [kendall_tau(sw_res_idx[:t], hw_res_idx[:t])]
ndcg_vals += [ndcg(sw_res_idx[:t], sw_res_val[:t], hw_res_idx[:t], hw_res_val[:t])[0]]
# Add the result line to the list;
new_res_line = [hardware, int(rows), int(max_cols), distribution, int(nnz_per_row), str(n_bit), int(n_cores), int(iteration), int(n_iter), int(error_idx), int(error_val), \
float(sw_full_time_ms), float(sw_topk_time_ms), float(hw_setup_time_ms),
float(hw_exec_time_ms), float(readback_time_ms), int(k)] + prec + kendall + ndcg_vals
if float(hw_exec_time_ms) <= (100 if n_bit != "F32" else 300):
result_list += [new_res_line]
# Create a dataframe;
result_df = pd.DataFrame(result_list,
columns=["hardware", "rows", "max_cols", "distribution", "nnz_per_row", "n_bit", "n_cores", "n_iter", "max_iter", "error_idx", "error_val",
"sw_full_time_ms", "sw_topk_time_ms", "hw_setup_time_ms",
"hw_exec_time_ms", "readback_time_ms", "k"]
+ [f"prec_{x}" for x in THRESHOLDS]
+ [f"kendall_{x}" for x in THRESHOLDS]
+ [f"ndcg_{x}" for x in THRESHOLDS])
# Remove outliers;
res = remove_outliers_df_grouped(result_df, "hw_exec_time_ms", ["hardware", "rows", "max_cols", "distribution", "nnz_per_row", "n_bit", "n_cores"], reset_index=True, drop_index=True, sigmas=2)
return res, res.groupby(["hardware", "rows", "max_cols", "distribution", "nnz_per_row", "n_bit", "n_cores"]).aggregate(np.mean).reset_index()
def read_data_gpu():
result_list = []
for f in os.listdir(GPU_RESULT_FOLDER):
res_file = os.path.join(GPU_RESULT_FOLDER, f)
if res_file.endswith(".csv"):
with open(res_file) as file:
result = file.readlines()[1:]
# Parse the file name;
hardware, rows, max_cols, distribution, nnz_per_row, impl, half_precision, k, n_iter = os.path.splitext(f)[0].split("_")
n_cores = 56
# Parse the file name;
try:
n_bit = "F16" if half_precision == "True" else "F32"
except ValueError:
pass
for r in result:
iteration, error_idx, error_val, sw_full_time_ms, sw_topk_time_ms, \
hw_setup_time_ms, hw_spmv_only_time_ms, hw_exec_time_ms, readback_time_ms, k, sw_res_idx, \
sw_res_val, hw_res_idx, hw_res_val = r.split(",")
k = int(k)
# Process results;
sw_res_idx = [int(x) for x in sw_res_idx.split(";")]
sw_res_val = [float(x) for x in sw_res_val.split(";")]
hw_res_idx = [int(x) for x in hw_res_idx.split(";")][:k]
hw_res_val = [float(x) for x in hw_res_val.split(";")][:k]
assert(len(sw_res_idx) == k)
assert(len(sw_res_val) == k)
assert(len(hw_res_idx) == k)
assert(len(hw_res_val) == k)
prec = []
kendall = []
ndcg_vals = []
for t in THRESHOLDS:
set_cpu = set(sw_res_idx[:t])
set_fpga = set(hw_res_idx[:t])
prec += [len(set_cpu.intersection(set_fpga)) / t]
kendall += [kendall_tau(sw_res_idx[:t], hw_res_idx[:t])]
ndcg_vals += [ndcg(sw_res_idx[:t], sw_res_val[:t], hw_res_idx[:t], hw_res_val[:t])[0]]
# Add the result line to the list;
new_res_line = [hardware, int(rows), int(max_cols), distribution, int(nnz_per_row), str(n_bit), int(n_cores), impl, int(iteration), int(n_iter), int(error_idx), int(error_val), \
float(sw_full_time_ms), float(sw_topk_time_ms), float(hw_setup_time_ms), float(hw_spmv_only_time_ms),
float(hw_exec_time_ms), float(readback_time_ms), int(k)] + prec + kendall + ndcg_vals
result_list += [new_res_line]
# Create a dataframe;
result_df = pd.DataFrame(result_list,
columns=["hardware", "rows", "max_cols", "distribution", "nnz_per_row", "n_bit", "n_cores", "impl", "n_iter", "max_iter", "error_idx", "error_val",
"sw_full_time_ms", "sw_topk_time_ms", "hw_setup_time_ms", "hw_spmv_only_time_ms",
"hw_exec_time_ms", "readback_time_ms", "k"]
+ [f"prec_{x}" for x in THRESHOLDS]
+ [f"kendall_{x}" for x in THRESHOLDS]
+ [f"ndcg_{x}" for x in THRESHOLDS])
# Remove outliers;
res = remove_outliers_df_grouped(result_df, "hw_exec_time_ms", ["hardware", "rows", "max_cols", "distribution", "nnz_per_row", "n_bit", "n_cores", "impl"], reset_index=True, drop_index=True, sigmas=2)
return res, res.groupby(["hardware", "rows", "max_cols", "distribution", "nnz_per_row", "n_bit", "n_cores", "impl"]).aggregate(np.mean).reset_index()
def kendall_tau(reference_rank, predicted_rank):
# Items with correct relative rank;
c_plus = 0
# Items without correct relative rank;
c_minus = 0
# Items for which a ranking exists in the predicted rank;
c_s = 0
# Items for which a ranking exists in the reference rank;
c_u = 0
item_set = set(reference_rank + predicted_rank)
reference_rank_dict = {item: pos for pos, item in enumerate(reference_rank)}
predicted_rank_dict = {item: pos for pos, item in enumerate(predicted_rank)}
for i, item_1 in enumerate(item_set):
for j, item_2 in enumerate(item_set):
# Consider each pair exactly once;
if i >= j:
continue
else:
ref_found = False
pred_found = False
if item_1 in reference_rank_dict and item_2 in reference_rank_dict:
ref_found = True
c_u += 1
if item_1 in predicted_rank_dict and item_2 in predicted_rank_dict:
pred_found = True
c_s += 1
if ref_found and pred_found:
if (reference_rank_dict[item_1] - reference_rank_dict[item_2]) * (predicted_rank_dict[item_1] - predicted_rank_dict[item_2]) > 0:
c_plus += 1
else:
c_minus += 1
return (c_plus - c_minus) / (np.sqrt(c_u) * np.sqrt(c_s))
def ndcg(sw_res_idx, sw_res_val, hw_res_idx, hw_res_val):
sw_res = {k: v for (k, v) in zip(sw_res_idx, sw_res_val)}
dcg = 0
idcg = 0
for i, (idx, res) in enumerate(zip(hw_res_idx, hw_res_val)):
relevance = sw_res[idx] if idx in sw_res else 0
dcg += relevance / np.log2(i + 1 + 1)
for i, (idx, res) in enumerate(zip(sw_res_idx, sw_res_val)):
relevance = res
idcg += relevance / np.log2(i + 1 + 1)
return dcg / idcg, dcg, idcg
def plot_errors(agg_in):
agg = agg_in.copy()
z_added = False
# Setup plot;
plt.rcdefaults()
sns.set_style("white", {"ytick.left": True})
plt.rcParams["font.family"] = ["Latin Modern Roman Demi"]
plt.rcParams['axes.titlepad'] = 40
plt.rcParams['axes.labelpad'] = 4
plt.rcParams['axes.titlesize'] = 22
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.major.pad'] = 10
plt.rcParams['mathtext.fontset'] = "cm"
error_metrics = ["Precision", "Kendall's " + r"$\mathbf{\tau}$", "NDCG"]
error_metrics_raw = ["prec", "kendall", "ndcg"]
error_max = [1, 1, 1]
# DAC
if KIND == "uniform" or KIND == "all":
error_min = [0.96, 0.95, 0.96]
elif KIND == "gamma":
error_min = [0.80, 0.80, 0.80]
# Thesis
# error_min = [0.99, 0.99, 0.99]
sizes = sorted(agg["rows"].unique())
sizes = sizes[1:] + [sizes[0]]
num_col = len(sizes)
num_rows = len(error_metrics)
fig = plt.figure(figsize=(1.7 * num_col, (1.4 if KIND == "uniform" else 1.6) * num_rows))
gs = gridspec.GridSpec(num_rows, num_col)
plt.subplots_adjust(top=0.9,
bottom=0.15 if KIND == "uniform" else 0.25,
left=0.13 if KIND == "gamma" else 0.11,
right=0.97,
hspace=0.5,
wspace=0.5)
# markers = ["o", "X", "D", "P", "P"]
# palette = [COLORS["peach1"], COLORS["bb2"], "#A5E6C6", COLORS["bb5"], COLORS["bb5"]]
# palette = [COLORS["bb2"], "#A5E6C6", COLORS["bb5"], COLORS["peach1"]]
palette = ["#E7F7DF", "#B5E8B5", "#71BD9D", "#469C94"]
palette_dict = {"z_F16": COLORS["peach1"], "F32": COLORS["bb5"], "32":"#A5E6C6", "26": COLORS["bb3"], "20": COLORS["bb2"]}
palette_dict = {"z_F16": "#ED9E6F", "F32": palette[0], "32": palette[1], "26": palette[2], "20": palette[3]}
markers_dict = {"z_F16": "P", "F32": "D", "32": "X", "26": "^", "20": "o"}
palette = list(palette_dict.values())[::-1]
markers = list(markers_dict.values())[::-1]
if not z_added:
z_added = True
agg.loc[agg["hardware"] == "gpu", "n_bit"] = "z_" + agg.loc[agg["hardware"] == "gpu", "n_bit"]
# One row per graph;
for i, size in enumerate(sizes):
data = agg[agg["rows"] == size]
data = data.melt(id_vars=["n_bit"], value_vars=[e + "_" + str(d) for e in error_metrics_raw for d in THRESHOLDS])
data["error_type"] = [s.split("_")[0] for s in data["variable"]]
data["error_size"] = [int(s.split("_")[1]) for s in data["variable"]]
# One column per error metric;
for j, e in enumerate(error_metrics_raw):
curr_data = data[data["error_type"] == e]
curr_data = data[data["error_size"] >= error_min[j]]
curr_data["error_size"] = curr_data["error_size"].astype(str)
# data = group[1].sort_values(["n_bit"], ascending=False).reset_index(drop=True)
order = sorted(data["n_bit"].unique(), reverse=False)
ax = fig.add_subplot(gs[j, i])
colors = len(curr_data["n_bit"].unique())
ax = sns.lineplot(x="error_size", y="value", hue="n_bit", data=curr_data, ax=ax, sort=False, palette=palette_dict,
err_style="bars", linewidth=2, legend=False, zorder=2, ci=None, hue_order=order, clip_on=False)
data_averaged = curr_data.groupby(["n_bit", "error_size"], as_index=False).mean()
ax = sns.scatterplot(x="error_size", y="value", hue="n_bit", data=data_averaged, ax=ax, edgecolor="#0f0f0f", palette=palette_dict,
size_norm=30, legend=False, zorder=3, ci=None, markers=markers_dict, style="n_bit", linewidth=0.05, hue_order=order, style_order=order, clip_on=False)
ax.set_ylim([error_min[j], error_max[j]])
# ax.set_xlim([min(curr_data["n_bit"]), max(curr_data["n_bit"])])
ax.set_xlabel(None)
if i == 0:
ax.set_ylabel(f"{error_metrics[j]}", fontsize=12)
else:
ax.set_ylabel(None)
# Matrix name;
if j == 0:
ax.annotate(f"{get_exp_label(sizes[i], 'N=', True)}" if i < 3 else "Sparse GloVe", xy=(0.5 if i < 3 else 0.4, 1), xycoords="axes fraction", fontsize=12, textcoords="offset points", xytext=(0, 15),
horizontalalignment="center", verticalalignment="center")
ax.yaxis.set_major_locator(plt.LinearLocator(5))
# sns.despine(ax=ax)
ax.xaxis.grid(False)
# if i > 0:
# sns.despine(ax=ax, left=False, top=True, right=True)
ax.yaxis.grid(True)
if j == 0:
ax.set_yticklabels(labels=[f"{int(l * 100)}%" for l in ax.get_yticks()], ha="right")
else:
ax.set_yticklabels(labels=[f"{l:.3f}" for l in ax.get_yticks()], ha="right")
# sns.despine(ax=ax)
ax.tick_params(labelcolor="black", labelsize=9, pad=2)
ax.tick_params(axis='x', which='major', rotation=0, labelcolor="black", labelsize=9, pad=2)
for tic in ax.xaxis.get_major_ticks():
tic.tick1line.set_visible(True)
plt.annotate("Top-K (from 8 to 100)", fontsize=12, xy=(0.5, 0.04 if KIND == "uniform" else 0.17), xycoords="figure fraction", ha="center")
# fig.suptitle("Top-K SpMV accuracy for\ndifferent architectures",
# fontsize=16, ha="left", x=0.03)
# plt.annotate("(higher is better)", fontsize=14, xy=(0.03, 0.86), xycoords="figure fraction", ha="left")
# Legend;
if KIND != "uniform":
labels = ["FPGA 20b", "FPGA 25b", "FPGA 32b", "FPGA F32", "GPU F16"]
custom_lines = [
Line2D([], [], color="white", marker=markers[i],
markersize=10, label=labels[i], markerfacecolor=palette[i], markeredgecolor="#2f2f2f") for i in range(len(markers))
]
labels, custom_lines = transpose_legend_labels(labels, custom_lines)
leg = fig.legend(custom_lines,labels,
bbox_to_anchor=(0.5, 0), fontsize=12, ncol=3, handletextpad=0.3, loc="lower center", columnspacing=0.4)
leg.set_title(None)
leg._legend_box.align = "left"
save_plot("../../../../data/plots", f"errors_{KIND}_{DATE}" + ".{}")
if __name__ == "__main__":
res_fpga, agg_fpga = read_data_fpga()
res_gpu, agg_gpu = read_data_gpu()
#%%
# Filter wrong output data;
old_size = len(agg_fpga)
# agg_fpga = agg_fpga[agg_fpga["prec_100"] > 0.2]
if len(agg_fpga) < old_size:
print(f"warning: removed {len(agg_fpga)- old_size} rows with low precision")
agg_fpga = agg_fpga[agg_fpga["n_bit"].isin(["20", "26", "32", "F32"])]
agg_gpu = agg_gpu[agg_gpu["n_bit"] == "F16"]
agg_gpu = agg_gpu[agg_gpu["impl"] == "0"]
agg = pd.concat([agg_fpga, agg_gpu], ignore_index=True).reset_index(drop=True)
agg = agg[agg["max_cols"] != 512]
old_size = len(agg)
# agg = agg[agg["prec_100"] > 0.9]
if len(agg) < old_size:
print(f"warning: removed {len(agg)- old_size} rows with low precision")
# Use only uniform + glove;
if KIND == "uniform":
agg = agg[agg["distribution"].isin(["uniform", "glove"])]
elif KIND == "gamma":
# Use only gamma;
agg = agg[agg["distribution"] == "gamma"]
#%%
for KIND in ["uniform", "gamma", "all"]:
plot_errors(agg)
#%%
# Setup plot;
# plt.rcParams['mathtext.fontset'] = "cm"
# error_metrics = ["Precision", "Kendall's " + r"$\mathbf{\tau}$", "NDCG"]
# error_metrics_raw = ["prec", "kendall", "ndcg"]
# error_max = [1, 1, 1]
# error_min = [0.4, 0.4, 0.8]
# sizes = sorted(agg["rows"].unique())[-3:]
# num_col = len(sizes) * 2
# num_rows = len(error_metrics)
# fig = plt.figure(figsize=(1.1 * num_col, 1.8 * num_rows))
# gs = gridspec.GridSpec(num_rows, num_col)
# plt.subplots_adjust(top=0.72,
# bottom=0.12,
# left=0.2,
# right=0.95,
# hspace=0.5,
# wspace=0.1)
# markers = [["o", "X", "D", "P"], ["X", "D", "P"]]
# palette = [[COLORS["peach1"], COLORS["bb2"], "#A5E6C6", COLORS["bb5"]], [COLORS["bb2"], "#A5E6C6", COLORS["bb5"]]]
# agg["group"] = [1 if (x[0] == 16) else 0 for x in zip(agg["n_cores"], agg["n_bit"])]
# # One row per graph;
# for i in range(num_col):
# g = i % 2
# size = sizes[i // 2]
# data = agg[agg["group"] == g]
# data = data[data["rows"] == size]
# data = data.melt(id_vars=["n_bit", "n_cores"], value_vars=[e + "_" + str(d) for e in error_metrics_raw for d in THRESHOLDS])
# data["error_type"] = [s.split("_")[0] for s in data["variable"]]
# data["error_size"] = [int(s.split("_")[1]) for s in data["variable"]]
# # One column per error metric;
# for j, e in enumerate(error_metrics_raw):
# curr_data = data[data["error_type"] == e]
# curr_data["error_size"] = curr_data["error_size"].astype(str)
# # data = group[1].sort_values(["n_bit"], ascending=False).reset_index(drop=True)
# order = sorted(data["n_bit"].unique(), reverse=True)
# ax = fig.add_subplot(gs[j, i])
# ax = sns.lineplot(x="error_size", y="value", hue="n_bit", data=curr_data, ax=ax, sort=False, palette=palette[g],
# err_style="bars", linewidth=2, legend=False, zorder=2, ci=None, hue_order=order)
# data_averaged = curr_data.groupby(["n_bit", "error_size"], as_index=False).mean()
# ax = sns.scatterplot(x="error_size", y="value", hue="n_bit", data=data_averaged, ax=ax, edgecolor="#0f0f0f", palette=palette[g],
# size_norm=30, legend=False, zorder=3, ci=None, markers=markers[g], style="n_bit", linewidth=0.05, hue_order=order, style_order=order)
# ax.set_ylim([error_min[j], error_max[j]])
# # ax.set_xlim([min(curr_data["n_bit"]), max(curr_data["n_bit"])])
# ax.set_xlabel(None)
# if i == 0:
# ax.set_ylabel(f"{error_metrics[j]}", fontsize=12)
# else:
# ax.set_ylabel(None)
# # Matrix name;
# if j == 0:
# ax.annotate(r"$\mathdefault{N=10^" + f"{int(np.log10(sizes[i // 2]))}" + r"}$",
# xy=(0.5, 1), xycoords="axes fraction", fontsize=14, textcoords="offset points", xytext=(0, 15),
# horizontalalignment="center", verticalalignment="center")
# ax.yaxis.set_major_locator(plt.LinearLocator(5))
# sns.despine(ax=ax)
# ax.xaxis.grid(False)
# if i > 0: # Hide tick markers;
# for tic in ax.yaxis.get_major_ticks():
# tic.tick1line.set_visible(False)
# tic.tick2line.set_visible(False)
# ax.set_yticklabels([])
# # ax.get_yaxis().set_visible(False)
# sns.despine(ax=ax, left=True, top=True, right=True)
# ax.yaxis.grid(True)
# # if j == 2:
# # ax.set_yticklabels(labels=[f"{int(l * 100)}%" for l in ax.get_yticks()], ha="right")
# # Turn off tick lines;
# # sns.despine(ax=ax)
# ax.tick_params(labelcolor="black", labelsize=10, pad=6)
# ax.tick_params(axis='x', which='major', labelsize=10, rotation=0)
# plt.annotate("Top-K Value", fontsize=14, xy=(0.5, 0.015), xycoords="figure fraction", ha="center")
# fig.suptitle("Top-K SpMV accuracy for\ndifferent architectures",
# fontsize=16, ha="left", x=0.03)
# plt.annotate("(higher is better)", fontsize=14, xy=(0.03, 0.86), xycoords="figure fraction", ha="left")
# # Legend;
# # labels = ["Float 32, 16 cores", "32 bits, 16 cores", "24 bits, 28 cores", "20 bits, 32 cores", ]
# # custom_lines = [
# # Line2D([], [], color="white", marker=markers[0],
# # markersize=10, label=labels[0], markerfacecolor=palette[0], markeredgecolor="#2f2f2f"),
# # Line2D([], [], color="white", marker=markers[1],
# # markersize=10, label=labels[1], markerfacecolor=palette[1], markeredgecolor="#2f2f2f"),
# # Line2D([], [], color="white", marker=markers[2],
# # markersize=10, label=labels[2], markerfacecolor=palette[2], markeredgecolor="#2f2f2f"),
# # Line2D([], [], color="white", marker=markers[3],
# # markersize=10, label=labels[3], markerfacecolor=palette[3], markeredgecolor="#2f2f2f"),
# # ]
# # leg = fig.legend(custom_lines,labels,
# # bbox_to_anchor=(0.98, 1), fontsize=12, ncol=1)
# # leg.set_title(None)
# # leg._legend_box.align = "left"
# plt.savefig(f"../../../../data/plots/errors_2_{DATE}.pdf")
|
{"hexsha": "47104339ad9a180f917bb6bdcd546e859db0c97b", "size": 24781, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/resources/python/plotting/plot_errors.py", "max_stars_repo_name": "AlbertoParravicini/approximate-spmv-topk", "max_stars_repo_head_hexsha": "f98dd1846d81a5c21faa9af48bb5b531543424c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-04-06T08:41:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T22:19:37.000Z", "max_issues_repo_path": "src/resources/python/plotting/plot_errors.py", "max_issues_repo_name": "necst/approximate-spmv-topk", "max_issues_repo_head_hexsha": "f98dd1846d81a5c21faa9af48bb5b531543424c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-01T02:10:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-29T13:36:57.000Z", "max_forks_repo_path": "src/resources/python/plotting/plot_errors.py", "max_forks_repo_name": "necst/approximate-spmv-topk", "max_forks_repo_head_hexsha": "f98dd1846d81a5c21faa9af48bb5b531543424c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-03-07T17:45:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-12T01:44:04.000Z", "avg_line_length": 48.2120622568, "max_line_length": 212, "alphanum_fraction": 0.5363786772, "include": true, "reason": "import numpy,import scipy", "num_tokens": 6643}
|
"Update the header bounding box and count based on point data"
function update!(h::LasHeader, pvec::Vector{T}) where T <: LasPoint
x_min, y_min, z_min = Inf, Inf, Inf
x_max, y_max, z_max = -Inf, -Inf, -Inf
for p in pvec
x, y, z = xcoord(p, h), ycoord(p, h), zcoord(p, h)
if x < x_min
x_min = x
end
if y < y_min
y_min = y
end
if z < z_min
z_min = z
end
if x > x_max
x_max = x
end
if y > y_max
y_max = y
end
if z > z_max
z_max = z
end
end
h.x_min = x_min
h.y_min = y_min
h.z_min = z_min
h.x_max = x_max
h.y_max = y_max
h.z_max = z_max
h.records_count = length(pvec)
nothing
end
|
{"hexsha": "098dd6dee516e084703810f38109d567ca5ee747", "size": 800, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/util.jl", "max_stars_repo_name": "vernimmen/LasIO.jl", "max_stars_repo_head_hexsha": "bf4ae3f87c77fa2628da36c47e9bbdc049d2687e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2016-10-04T02:08:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T15:42:40.000Z", "max_issues_repo_path": "src/util.jl", "max_issues_repo_name": "vernimmen/LasIO.jl", "max_issues_repo_head_hexsha": "bf4ae3f87c77fa2628da36c47e9bbdc049d2687e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2016-11-10T09:11:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-27T18:01:29.000Z", "max_forks_repo_path": "src/util.jl", "max_forks_repo_name": "vernimmen/LasIO.jl", "max_forks_repo_head_hexsha": "bf4ae3f87c77fa2628da36c47e9bbdc049d2687e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2016-11-10T08:03:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-09T06:57:09.000Z", "avg_line_length": 22.8571428571, "max_line_length": 67, "alphanum_fraction": 0.48625, "num_tokens": 261}
|
import numpy as np
from sigmoid import sigmoid
def predict(Theta1, Theta2, X):
""" outputs the predicted label of X given the
trained weights of a neural network (Theta1, Theta2)
"""
if X.ndim == 1:
X = X.reshape(1, -1)
# Useful values
m = len(X)
# ====================== YOUR CODE HERE ======================
# Instructions: Complete the following code to make predictions using
# your learned neural network. You should set p to a
# vector containing labels between 1 to num_labels.
#
# Hint: The max function might come in useful. In particular, the max
# function can also return the index of the max element, for more
# information see 'help max'. If your examples are in rows, then, you
# can use max(A, [], 2) to obtain the max for each row.
#
# Input Layer
z_1 = X
a_1 = np.c_[np.ones(m), z_1]
# Hidden Layer
z_2 = a_1 @ Theta1.T
a_2 = np.c_[np.ones(m), sigmoid(z_2)]
# Output Layer
z_3 = a_2 @ Theta2.T
a_3 = sigmoid(z_3)
H = a_3
p = np.argmax(H, axis=1)
if m == 1:
p = p.squeeze()
# =========================================================================
return p + 1 # add 1 to offset index of maximum in A row
|
{"hexsha": "2bdf2fc934208963eb168970500b5c921c3b8f4c", "size": 1320, "ext": "py", "lang": "Python", "max_stars_repo_path": "ex3/predict.py", "max_stars_repo_name": "junwon1994/Coursera-ML", "max_stars_repo_head_hexsha": "91e96c3c14c058cd6d745a4fada1baf40d91458f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-16T01:48:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-14T09:52:58.000Z", "max_issues_repo_path": "ex3/predict.py", "max_issues_repo_name": "junwon1994/Coursera-ML", "max_issues_repo_head_hexsha": "91e96c3c14c058cd6d745a4fada1baf40d91458f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ex3/predict.py", "max_forks_repo_name": "junwon1994/Coursera-ML", "max_forks_repo_head_hexsha": "91e96c3c14c058cd6d745a4fada1baf40d91458f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9387755102, "max_line_length": 79, "alphanum_fraction": 0.5340909091, "include": true, "reason": "import numpy", "num_tokens": 353}
|
from sage.matrix.constructor import matrix
from sage.matrix.matrix import is_Matrix
from sage.rings.arith import legendre_symbol
from sage.rings.integer_ring import ZZ
def is_triangular_number(n):
"""
Determines if the integer n is a triangular number.
(I.e. determine if n = a*(a+1)/2 for some natural number a.)
If so, return the number a, otherwise return False.
Note: As a convention, n=0 is considered triangular for the
number a=0 only (and not for a=-1).
WARNING: Any non-zero value will return True, so this will test as
True iff n is triangular and not zero. If n is zero, then this
will return the integer zero, which tests as False, so one must test
if is_triangular_number(n) != False:
instead of
if is_triangular_number(n):
to get zero to appear triangular.
INPUT:
an integer
OUTPUT:
either False or a non-negative integer
EXAMPLES:
sage: is_triangular_number(3)
2
sage: is_triangular_number(1)
1
sage: is_triangular_number(2)
False
sage: is_triangular_number(0)
0
sage: is_triangular_number(-1)
False
sage: is_triangular_number(-11)
False
sage: is_triangular_number(-1000)
False
sage: is_triangular_number(-0)
0
sage: is_triangular_number(10^6 * (10^6 +1)/2)
1000000
"""
if n < 0:
return False
elif n == 0:
return ZZ(0)
else:
from sage.functions.all import sqrt
## Try to solve for the integer a
try:
disc_sqrt = ZZ(sqrt(1+8*n))
a = ZZ( (ZZ(-1) + disc_sqrt) / ZZ(2) )
return a
except Exception:
return False
def extend_to_primitive(A_input):
"""
Given a matrix (resp. list of vectors), extend it to a square
matrix (resp. list of vectors), such that its determinant is the
gcd of its minors (i.e. extend the basis of a lattice to a
"maximal" one in Z^n).
Author(s): Gonzalo Tornaria and Jonathan Hanke.
INPUT:
a matrix, or a list of length n vectors (in the same space)
OUTPUT:
a square matrix, or a list of n vectors (resp.)
EXAMPLES:
sage: A = Matrix(ZZ, 3, 2, range(6))
sage: extend_to_primitive(A)
[ 0 1 0]
[ 2 3 0]
[ 4 5 -1]
sage: extend_to_primitive([vector([1,2,3])])
[(1, 2, 3), (0, 1, 0), (0, 0, 1)]
"""
## Deal with a list of vectors
if not is_Matrix(A_input):
A = matrix(A_input) ## Make a matrix A with the given rows.
vec_output_flag = True
else:
A = A_input
vec_output_flag = False
## Arrange for A to have more columns than rows.
if A.is_square():
return A
if A.nrows() > A.ncols():
return extend_to_primitive(A.transpose()).transpose()
## Setup
k = A.nrows()
n = A.ncols()
R = A.base_ring()
# Smith normal form transformation, assuming more columns than rows
V = A.smith_form()[2]
## Extend the matrix in new coordinates, then switch back.
B = A * V
B_new = matrix(R, n-k, n)
for i in range(n-k):
B_new[i, n-i-1] = 1
C = B.stack(B_new)
D = C * V**(-1)
## DIAGNOSTIC
#print "A = ", A, "\n"
#print "B = ", B, "\n"
#print "C = ", C, "\n"
#print "D = ", D, "\n"
# Normalize for a positive determinant
if D.det() < 0:
D.rescale_row(n-1, -1)
## Return the current information
if vec_output_flag:
return D.rows()
else:
return D
def least_quadratic_nonresidue(p):
"""
Returns the smallest positive integer quadratic non-residue in Z/pZ for primes p>2.
EXAMPLES::
sage: least_quadratic_nonresidue(5)
2
sage: [least_quadratic_nonresidue(p) for p in prime_range(3,100)]
[2, 2, 3, 2, 2, 3, 2, 5, 2, 3, 2, 3, 2, 5, 2, 2, 2, 2, 7, 5, 3, 2, 3, 5]
TESTS:
Raises an error if input is a positive composite integer.
::
sage: least_quadratic_nonresidue(20)
Traceback (most recent call last):
...
ValueError: Oops! p must be a prime number > 2.
Raises an error if input is 2. This is because every integer is a
quadratic residue modulo 2.
::
sage: least_quadratic_nonresidue(2)
Traceback (most recent call last):
...
ValueError: Oops! There are no quadratic non-residues in Z/2Z.
"""
from sage.functions.all import floor
p1 = abs(p)
## Deal with the prime p = 2 and |p| <= 1.
if p1 == 2:
raise ValueError("Oops! There are no quadratic non-residues in Z/2Z.")
if p1 < 2:
raise ValueError("Oops! p must be a prime number > 2.")
## Find the smallest non-residue mod p
## For 7/8 of primes the answer is 2, 3 or 5:
if p%8 in (3,5):
return ZZ(2)
if p%12 in (5,7):
return ZZ(3)
if p%5 in (2,3):
return ZZ(5)
## default case (first needed for p=71):
if not p.is_prime():
raise ValueError("Oops! p must be a prime number > 2.")
from sage.misc.misc import xsrange
for r in xsrange(7,p):
if legendre_symbol(r, p) == -1:
return ZZ(r)
|
{"hexsha": "5d145b7688666198fa9ccc2f0dcf4576ea2b870f", "size": 5301, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/quadratic_forms/extras.py", "max_stars_repo_name": "bopopescu/classic_diff_geom", "max_stars_repo_head_hexsha": "2b1d88becbc8cb30962e0995cc78e429e0f5589f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sage/quadratic_forms/extras.py", "max_issues_repo_name": "bopopescu/classic_diff_geom", "max_issues_repo_head_hexsha": "2b1d88becbc8cb30962e0995cc78e429e0f5589f", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sage/quadratic_forms/extras.py", "max_forks_repo_name": "bopopescu/classic_diff_geom", "max_forks_repo_head_hexsha": "2b1d88becbc8cb30962e0995cc78e429e0f5589f", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-24T12:08:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-24T12:08:30.000Z", "avg_line_length": 26.2425742574, "max_line_length": 87, "alphanum_fraction": 0.5804565176, "include": true, "reason": "from sage", "num_tokens": 1540}
|
\documentclass[8pt]{beamer}
\usepackage[utf8]{inputenc}
\usetheme{default}
\fontfamily{ppl}
\usetheme{Antibes}
\usecolortheme{spruce}
\usefonttheme{serif}
\title{APC Project: SimpleQuadTree}
\author{Thomas Bellotti}
\date{24 - 05 - 2019}
%\usepackage{mathpazo} % add possibly `sc` and `osf` options
\begin{document}
\begin{frame}
\maketitle
\end{frame}
\begin{frame}
\tableofcontents
\end{frame}
\section{Introduction}
\begin{frame}
\begin{center}
\begin{huge}
Introduction
\end{huge}
\end{center}
\end{frame}
\subsection{Aims of the project}
\begin{frame}
\frametitle{Aims of the project}
\pause
Original target:
\begin{block}{Aim 1}
Construct \textbf{highly adaptive Cartesian non-uniform meshes} on which we can perform \textbf{numerical quadrature} and eventually implement \textbf{numerical solvers for PDEs}, mainly based on the Finite Volume method.
\end{block}
\pause
After some work...
\begin{block}{Aim 2}
Implement a simple method to \textbf{compress digital images}, which recognizes large areas of almost uniform color. Could be eventually used also for shape recognition.
\end{block}
\pause
These two objectives may seems quite far one from the other but actually they can be linked by... \textbf{Quadtrees}.
\end{frame}
\subsection{Quadtrees}
\begin{frame}
\frametitle{What is a quadtree}
\pause
Basically just an unbalanced tree structure allowing \textbf{four children}. Generalized in 3D by the octrees.
\pause
It has a clear geometrical counterpart in terms of AMR (Adaptive Mesh Refinement), where they are built by \textbf{recursively splitting} larger cells.
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.8\textwidth]{./figures/quadtree.eps}
\begin{tiny}
Image from: T. Bellotti, M. Theillard - A coupled level-set and reference map method for interface representation with applications to two-phase flows simulation - Volume 392, 2019, J. Comput. Phys.
\end{tiny}
\end{center}
\end{figure}
\end{frame}
\subsection{Some terminology}
\begin{frame}
\frametitle{Some terminology}
\pause
\begin{itemize}
\item \textbf{Leaf}: cell without children.\pause
\item \textbf{Level} (of a cell): number of times the largest cell has been split to generate the current cell.\pause
\item \textbf{Maximum level} ($\text{max}_{\text{level}}$): maximum number of allowed splits.
\item \textbf{Minimum level} ($\text{min}_{\text{level}}$): minimum number of necessary splits.
\end{itemize}
\end{frame}
\section{Models and tools to understand what follows}
\begin{frame}
\begin{center}
\begin{huge}
Models and tools to understand what follows
\end{huge}
\end{center}
\end{frame}
\subsection{Level-set theory}
\begin{frame}
\frametitle{Level-set theory}
\pause
In this work, we only see it as a \textbf{strategy to build} beautiful and meaningful \textbf{adaptive meshes} (there is more than this).\pause
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.7\textwidth]{./figures/levelset}
\begin{tiny}
Image from: T. Bellotti, M. Theillard - A coupled level-set and reference map method for interface representation with applications to two-phase flows simulation - Volume 392, 2019, J. Comput. Phys.
\end{tiny}
\end{center}
\end{figure}
\begin{flalign*}
\Gamma &= \left \{ \mathbf{x} \in \Omega ~ : ~ \phi(\mathbf{x}) = 0 \right \}, \nonumber \\
\Omega^- &= \left \{ \mathbf{x} \in \Omega ~ : ~ \phi(\mathbf{x}) < 0 \right \}, \label{eq:levelsetdef} \\
\Omega^+ &= \left \{ \mathbf{x} \in \Omega ~ : ~ \phi(\mathbf{x}) > 0 \right \}.\nonumber
\end{flalign*}
\end{frame}
\begin{frame}
\frametitle{Level-set theory}
It is probably the easiest way of \textbf{representing an interface} in a computationally efficient manner.
\pause
\begin{block}{Example}
In 2D, a circle centered in $(x_0, y_0)$ with radius $R$ can be represented by:
\begin{equation*}
\phi(x,y) = \sqrt{(x-x_0)^2 + (y-y_0)^2} - R.
\end{equation*}
\pause
\end{block}
Notice that, given $\Gamma$, the level-set is not unique, but this is not our problem now. It is unique if we assume (and we will do so) that $\phi(x,y)$ is nothing but the \textbf{signed distance} of $(x,y)$ from $\Gamma$.
\pause
\begin{equation*}
\text{split ~}\mathcal{C} ~ \text{if} \quad : \quad \left | \phi(\mathbf{x}_{\mathcal{C}}) \right | \leq \text{Lip}(\phi) \cdot \text{diag}(\mathcal{C}) \qquad \text{and} \qquad \text{level}(\mathcal{C}) \leq \textrm{max}_{\textrm{level}},
\end{equation*}
This naively means that we split a cell wheather its diagonal length exceeds the distance from the interface $\Gamma$ represented by the level-set $\phi$.
\end{frame}
\begin{frame}
And the result can be\dots Very nice!
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.5\textwidth]{./figures/integrator/pi_1.pdf}
\end{center}
\end{figure}
\end{frame}
\subsection{Gaussian quadrature}
\begin{frame}
\frametitle{Gaussian quadrature}
\pause
Considering a stardard quadrilateral domain $\Omega = [-1,1]^2$ and given a possibly smooth function $f : \Omega \to \mathbb{R}$, we want to compute:
\begin{equation*}
\int_{\Omega} f(x,y)dxdy.
\end{equation*}
This can be done by quadrature formulae, based on simple evaluation of the function $f$ at certain points of the domain.
For our purpose, we use two choices:\pause
\begin{enumerate}
\item \textbf{``Naive'' formula}, with only one function evaluation.
\begin{equation*}
\int_{\Omega}f(x,y)dx dy \simeq |\Omega| f(0,0) = 4 f(0,0)
\end{equation*}\pause
\item \textbf{3rd order Gaussian formula}, with nine function evaluations (plus extra computations to adapt to a generic domain).
\begin{align*}
\int_{\Omega} f(x,y)dx dy &\simeq \frac{5}{81} \left [ 5 f \left (-\sqrt{\frac{3}{5}},-\sqrt{\frac{3}{5}} \right ) + 8 f \left (0,-\sqrt{\frac{3}{5}} \right ) + 5 f \left (\sqrt{\frac{3}{5}},-\sqrt{\frac{3}{5}} \right )\right ] \\
&+\frac{8}{81} \left [ 5 f \left (-\sqrt{\frac{3}{5}},0 \right ) + 8 f \left (0,0 \right ) + 5 f \left (0,\sqrt{\frac{3}{5}} \right )\right ] \\
&+\frac{5}{81} \left [ 5 f \left (-\sqrt{\frac{3}{5}},\sqrt{\frac{3}{5}} \right ) + 8 f \left (0,\sqrt{\frac{3}{5}} \right ) + 5 f \left (\sqrt{\frac{3}{5}},\sqrt{\frac{3}{5}} \right )\right ].
\end{align*}
\end{enumerate}
\end{frame}
\subsection{Rgb colors and their ``distance''}
\begin{frame}
\frametitle{Rgb colors and their ``distance''}
\pause
It is probably the simplest way of representing colors, as a combination of three components, \textbf{\textcolor{red}{red}, \textcolor{green}{green} and \textcolor{blue}{blue}}.
Thus, a color $C$ is nothing else than a triplet:
\begin{equation*}
C = \left [ \textcolor{red}{C_{\text{red}}}, \textcolor{green}{C_{\text{green}}}, \textcolor{blue}{C_{\text{blue}}} \right ],
\end{equation*}
where $\textcolor{red}{C_{\text{red}}},\textcolor{green}{C_{\text{green}}},\textcolor{blue}{C_{\text{blue}}} \in \{0,\dots, 255 \}$. In this way, we can represent 16,777,216 different tones.
\pause
For our purpose, it is useful to define the notion of \textbf{distance} between two colors, which can be defined in many ways. In our case, we use the simple ``corrected'' formula:
\begin{equation*}
d(C^1,C^2) = \sqrt{\textcolor{red}{2 \left (C^1_{\text{red}}-C^2_{\text{red}} \right )^2} + \textcolor{green}{4 \left (C^1_{\text{green}}-C^2_{\text{green}} \right )^2} + \textcolor{blue}{3 \left (C^1_{\text{blue}}-C^2_{\text{blue}} \right )^2}}.
\end{equation*}\pause
The mean color between $\left \{ C^1, \dots, C^N\right \} $ is defined by:
\begin{equation*}
\overline{C} \left ( \left \{ C^1, \dots, C^N\right \} \right ) = \left[ \textcolor{red}{\frac{1}{N} \sum_{n=1}^N C_{\text{red}}^n}, \textcolor{green}{\frac{1}{N} \sum_{n=1}^N C_{\text{green}}^n},\textcolor{blue}{ \frac{1}{N} \sum_{n=1}^N C_{\text{blue}}^n}\right ],
\end{equation*}\pause
and a sort of standard deviation as:
\begin{equation*}
\sigma \left ( \left \{ C^1, \dots, C^N\right \} \right ) = \frac{1}{N} \sqrt{\sum_{n=1}^N d(C_n,\overline{C})^2} \in [0,765].
\end{equation*}
\end{frame}
\begin{frame}
\frametitle{How to compress an image?}
\pause
Let $\mathcal{P}$ be a preleave whose children are $\mathcal{L}(\mathcal{P})$. We define a tolerance $0< \epsilon \ll 1$. Then:
\begin{equation*}
\text{merge} \quad \mathcal{P} \quad \text{if} ~ : ~ \sigma \left ( \mathcal{L}(\mathcal{P}) \right ) \leq 765 \cdot \epsilon.
\end{equation*}\pause
In the case of merging, we consider:
\begin{equation*}
C_{\mathcal{P}} = \overline{C} \left ( \mathcal{L}(\mathcal{P}) \right ).
\end{equation*}
\end{frame}
\section{Implementation}
\begin{frame}
\begin{center}
\begin{huge}
Implementation
\end{huge}
\end{center}
\end{frame}
\subsection{Classes}
\begin{frame}
\frametitle{The Cell class}
\pause
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.75\textwidth]{./figures/cell_h.eps}
\end{center}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{The Pixel class inheriting from Cell}\pause
The ``generalized'' pixel is basically a Cell...
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.6\textwidth]{./figures/pixel_h.eps}
\end{center}
\end{figure}
plus a \textbf{color}.
We have to be careful to \textbf{cast pointers} when we need to extract information from the Pixel.
\end{frame}
\begin{frame}
\frametitle{The QuadTree class}\pause
This class is mostly a \textbf{wrapper} of the Cell class, but it is what the user interacts with.
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.9\textwidth]{./figures/quadtree_h.eps}
\end{center}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{The Image class}\pause
This class is mostly a \textbf{wrapper} of the Pixel class.
We distinguished it from the QuadTree class (it does not inherit from it) because there are many features that they do not share.
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.5\textwidth]{./figures/image_h.eps}
\end{center}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{The way we update the mesh: the RefinementCriterion class}\pause
This is a very simple \textbf{abstract class} with an operator telling us if we have to split a Cell or not.
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.6\textwidth]{./figures/refinementcriterion_h.eps}
\end{center}
\end{figure}\pause
Many important criteria inherit from it:
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.6\textwidth]{./figures/RefineAlwaysCriterion_h.eps}
\includegraphics[width=0.6\textwidth]{./figures/LevelSetCriterion_h.eps}
\includegraphics[width=0.6\textwidth]{./figures/CriterionVariance_h.eps}
\end{center}
\end{figure}
\end{frame}
\subsection{Parallelization of the quadrature}
\begin{frame}
\frametitle{How we parallelize the quadrature}\pause
We want to take advantage of the \textbf{modular nature} of the quadtree structure in order to \textbf{avoid communications} between processes.
\pause
The key idea is to have a number of core which is a power of 4 and have a minimum level large enough, so that we can avoid communications between cores and each of them has a local tree (no shared memory).
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.65\textwidth]{./figures/parallel_integration.eps}
\end{center}
\end{figure}
And each subtree (core) integrates independently. At the very end, the sub-integrals are summed with a \textbf{reduce} procedure.
\end{frame}
\section{Tests and results}
\begin{frame}
\begin{center}
\begin{huge}
Tests and results
\end{huge}
\end{center}
\end{frame}
\subsection{Construction of highly adaptive grids}
\begin{frame}
\frametitle{We are able to construct very general meshes}\pause
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.35\textwidth]{./figures/level_set_color.pdf}
\includegraphics[width=0.52\textwidth]{./figures/mandelbrot_criterion_color.pdf} \\
\includegraphics[angle=90, width=0.6\textwidth]{./figures/space_criterion_color.pdf}
\end{center}
\end{figure}
\end{frame}
\subsection{Numerical quadrature}
\begin{frame}
\frametitle{We are able to perform parallel quadratures}
\pause
Computations have been tested on \textbf{4 cores} with the following specifications:
\begin{itemize}
\item Product: Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz
\end{itemize}
Taking:
\begin{equation*}
\text{min}_{\text{level}} = 3 \qquad \text{max}_{\text{level}} = 11,
\end{equation*}
so, for the uniform mesh, we deal with
\begin{equation*}
2^{11} \times 2^{11} \quad \text{cells} = 4'194'304 \quad \text{cells}.
\end{equation*}
Each time, we perform two tests and take the average time in order to avoid spurious effects.
\end{frame}
\begin{frame}\begin{footnotesize}
\begin{gather*}
\Omega = [-2,2]^2 \quad \phi(x,y) = \sqrt{x^2+y^2}-1 \quad f(x,y) = \mathbb{I}_{\{\phi(x,y) \leq 0\}} \quad
\int_{\Omega} f(x,y) dx dy = \pi.
\end{gather*}\end{footnotesize}
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.32\textwidth]{./figures/integrator/pi_1.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/pi_2.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/pi_3.pdf}
\end{center}
\end{figure}
\begin{footnotesize}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{Naive} & \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 6.585 & - & 6.639 & - & 7.279 & - \\
4 & 1.721 & \textbf{3.826} & 1.774 & \textbf{3.742} & 1.912 & \textbf{3.807} \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{3rd Gaussian}& \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 7.138 & - & 6.859 & - & 73.234 & - \\
4 & 1.856 & \textbf{3.846} & 1.917 & \textbf{3.578} & 18.980 & \textbf{3.858} \\
\hline
\end{tabular}
\end{center}
\end{footnotesize}
\end{frame}
\begin{frame}\begin{footnotesize}
\begin{gather*}
\Omega = [-2, 2] \quad f(x,y) = \frac{1}{2 \pi \sigma_x \sigma_y} e^{-\frac{1}{2} \left ( \frac{x^2}{\sigma_x^2}+\frac{y^2}{\sigma_y^2} \right )} \quad \sigma_x = 0.1 \quad \sigma_y = 0.05 \quad \int_{\Omega}f(x,y)dx dy \simeq 1.
\end{gather*}\end{footnotesize}
We refine in the ellipse within 10 standard deviations.
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.32\textwidth]{./figures/integrator/gauss_1.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/gauss_2.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/gauss_3.pdf}
\end{center}
\end{figure}
\begin{footnotesize}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{Naive} & \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 7.376 & - & 7.061 & - & 7.397 & - \\
4 & 1.922 & \textbf{3.838} & 1.975 & \textbf{3.575} & 2.007 & \textbf{3.686} \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{3rd Gaussian}& \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 13.873 & - & 10.410 & - & 74.364 & - \\
4 & 3.633 & \textbf{3.819} & 3.679 & \textbf{2.830} & 19.418 & \textbf{3.830} \\
\hline
\end{tabular}
\end{center}
\end{footnotesize}
\end{frame}
\begin{frame}\begin{footnotesize}
\begin{gather*}
\Omega = [-2,2]^2 \quad \phi(x,y) = \max{ \left \{ |x-0.25|-0.75, |y-0.25|-0.75 \right \} } \\
f(x,y) = (x^2+y^2) \left[ \cos{(\pi x)} + \sin{(\pi y) }\right ]\mathbb{I}_{ \{ \phi(x,y) \leq 0 \}} \quad \int_{\Omega} f(x,y) dx dy = \frac{3(-16 - 12 \pi + 7 \pi^2)}{8 \pi^3} \simeq 0.186109
\end{gather*}\end{footnotesize}
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.32\textwidth]{./figures/integrator/square_1.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/square_2.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/square_3.pdf}
\end{center}
\end{figure}
\begin{footnotesize}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{Naive} & \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 6.426 & - & 6.634 & - & 7.532 & - \\
4 & 1.686 & \textbf{3.811} & 1.757 & \textbf{3.776} & 1.948 & \textbf{3.867} \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{3rd Gaussian}& \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 7.057 & - & 6.914 & - & 75,551 & - \\
4 & 1.902 & \textbf{3.710} & 1.987 & \textbf{3.480} & 20.245 & \textbf{3.732} \\
\hline
\end{tabular}
\end{center}
\end{footnotesize}
\end{frame}
\begin{frame}
\begin{footnotesize}
\begin{gather*}
\Omega = [0,8]^2 \quad \phi(x,y) = \min_{i,j=0,\dots,3} \left ( \sqrt{(x-(2i+1))^2 + (y-(2j+1))^2}-0.15\right ) \quad f(x,y) = \mathbb{I}_{\{ \phi(x,y) \leq 0\}} \\
\int_{\Omega} \psi(x,y) dx dy = \frac{9 \pi}{25}
\end{gather*}
\end{footnotesize}
\begin{figure}[!h]
\begin{center}
\includegraphics[width=0.32\textwidth]{./figures/integrator/unif_1.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/unif_2.pdf}
\includegraphics[width=0.32\textwidth]{./figures/integrator/unif_3.pdf}
\end{center}
\end{figure}
\begin{footnotesize}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{Naive} & \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 8.145 & - & 7.445 & - & 12.063 & - \\
4 & 2.146 & \textbf{3.795} & 2.186 & \textbf{3.406} & 3.167 & \textbf{3.809} \\
\hline
\end{tabular}
\end{center}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|}
\hline
\textbf{3rd Gaussian}& \multicolumn{2}{|c|}{Mesh 1} & \multicolumn{2}{|c|}{Mesh 2} & \multicolumn{2}{|c|}{Mesh 3}\\
\hline
\# of cores & Time [s] & Speedup & Time[s] & Speedup & Time [s] & Speedup \\
\hline
1 & 9.165 & - & 7.956 & - & 116.295 & - \\
4 & 2.375 & \textbf{3.859} & 2.449 & \textbf{3.249} & 30.707 & \textbf{3.787} \\
\hline
\end{tabular}
\end{center}
\end{footnotesize}
\end{frame}
\begin{frame}
\pause
We observe that:
\\
The inhomogeneity of the mesh does not play a very huge role since the time needed to construct and refine the mesh dominates over the time needed to integrate on it.
Nevertheless, where the inhomogeneity is really strong, we observe the most important differences.
\end{frame}
\subsection{Image compression}
\begin{frame}
Three different $\epsilon$: $\epsilon = 0.012, 0.024, 0.048$.
\pause
\begin{center}
\begin{figure}[!h]
\begin{footnotesize}
Original figures size: 262144 px.
\end{footnotesize}\\
\includegraphics[width=0.9\textwidth]{./figures/s1_comp_small} \\
\begin{footnotesize}131509 px - comp. ratio = 1.99 \hfill 84808 px - comp. ratio = 3.09 \hfill 38152 px - comp. ratio = 6.87 \end{footnotesize} \\
\pause
\includegraphics[width=0.9\textwidth]{./figures/s2_comp_small}\\
\begin{footnotesize}115438 px - comp. ratio = 2.27 \hfill 69259 px - comp. ratio = 3.78 \hfill 27394 px - comp. ratio = 9.57 \end{footnotesize} \\
\end{figure}
\end{center}
\end{frame}
\begin{frame}
\begin{center}
\begin{figure}[!h]
\includegraphics[width=0.9\textwidth]{./figures/s3_comp_small} \\
\begin{footnotesize}32188 px - comp. ratio = 8.14 \hfill 16405 px - comp. ratio = 15.98 \hfill 6571 px - comp. ratio = 39.89 \end{footnotesize} \\
\pause
\includegraphics[width=0.9\textwidth]{./figures/s4_comp_small}\\
\begin{footnotesize}75172 px - comp. ratio = 3.49 \hfill 29890 px - comp. ratio = 8.77 \hfill 8902 px - comp. ratio = 29.45 \end{footnotesize} \\
\end{figure}
\end{center}
\end{frame}
\begin{frame}
\begin{center}
\begin{figure}[!h]
\includegraphics[width=0.9\textwidth]{./figures/s6_comp_small}\\
\begin{footnotesize}171517 px - comp. ratio = 1.53 \hfill 85258 px - comp. ratio = 3.07 \hfill 24466 px - comp. ratio = 10.71
\end{footnotesize} \\
\end{figure}
\end{center}
\end{frame}
\section{Conclusions and perspectives}
\begin{frame}
\begin{center}
\begin{huge}
Conclusions and perspectives
\end{huge}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Conclusions}
\pause
\begin{itemize}
\item We are able to construct \textbf{highly adaptive meshes} with virtually \textbf{any criterion}, achieving a selective refinement where actually needed. \pause
\item We are capable of \textbf{integrating in a parallel fashion} on the quadtree using \textbf{different quadrature rules}. \pause
\item We can perform \textbf{image compression} in a naive way based on the \textbf{color variance}, which significantly reduces the size of the images.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Perspective and main possible improvements}
\pause
\begin{itemize}
\item \textbf{Different way of storing the tree}: it is a trade-off between time-efficiency, memory-efficiency and possibility of recovering neighbors. \pause
\item Implement a \textbf{way of finding neighbors} (in our implementation, every cell should store a pointer to its father). \pause
\item Improve \textbf{data distribution between cores} when performing parallel quadratures, in order to exploit any number of physical processors. \pause
\item \textbf{Use external libraries} to import and export more ``user-friendly'' image formats (see .png)
\end{itemize}
\end{frame}
\begin{frame}
\begin{center}
\begin{Huge}
Thank you!
\end{Huge}
\end{center}
\end{frame}
\section{Bibliography}
\end{document}
|
{"hexsha": "b9a3b6ed69fd98bfd928a77ef16a48216f75733e", "size": 22159, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "presentation/slides.tex", "max_stars_repo_name": "tbellotti/SimpleQuadTree", "max_stars_repo_head_hexsha": "227b0ed368ca71849f9cdc0a58788d5b0dcfd2a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "presentation/slides.tex", "max_issues_repo_name": "tbellotti/SimpleQuadTree", "max_issues_repo_head_hexsha": "227b0ed368ca71849f9cdc0a58788d5b0dcfd2a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "presentation/slides.tex", "max_forks_repo_name": "tbellotti/SimpleQuadTree", "max_forks_repo_head_hexsha": "227b0ed368ca71849f9cdc0a58788d5b0dcfd2a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4457236842, "max_line_length": 267, "alphanum_fraction": 0.6831987003, "num_tokens": 7896}
|
"""Scarf Algotihm util functions."""
import numpy as np
def check_single_preflist(s, pair_list):
assert(np.all([len(p) == 2 and p[0] == s for p in pair_list]))
assert(pair_list[-1] == (s, -1))
def check_couple_preflist(c, pair_list):
assert(np.all([len(p) == 3 and p[0] == c for p in pair_list]))
assert(pair_list[-1] == (c, -1, -1))
def check_hospital_preflist(h, pair_list):
assert(np.all(h in p for p in pair_list))
assert(pair_list[-1] == (-1, h))
def recover_pref_lists(num_single, num_couple, num_hospital, U,
pair_list):
"""Recover preference list from utility matrix."""
assert(U.shape[0] == num_single + num_couple + num_hospital)
num_hospital_pair = (num_hospital + 1) ** 2 - 1
assert(U.shape[1] == len(pair_list))
# Sort decend by negating the array
orders_U = np.argsort(-U)
single_pref_list, couple_pref_list, hospital_pref_list = [], [], []
for s in range(num_single):
cols = orders_U[s]
single_s_pair_list = [pair_list[col] for col in cols if U[s][col] < 0]
check_single_preflist(s, single_s_pair_list)
# The last one is (s, num_hospital), remove
single_pref_list.append([p[1] for p in single_s_pair_list[:-1]])
for c in range(num_couple):
cols = orders_U[num_single + c]
couple_c_pair_list = [pair_list[col] for col in cols
if U[num_single + c][col] < 0]
check_couple_preflist(c, couple_c_pair_list)
# The last one is (c, num_hospital, num_hospital), remove
couple_pref_list.append([p[1:] for p in couple_c_pair_list[:-1]])
for h in range(num_hospital):
cols = orders_U[num_single + num_couple + h]
hospital_h_pref_list = [pair_list[col] for col in cols
if U[num_single + num_couple + h][col] < 0]
check_hospital_preflist(h, hospital_h_pref_list)
# The last one is (s, num_hospital), remove
hospital_pref_list.append(hospital_h_pref_list[:-1])
return single_pref_list, couple_pref_list, hospital_pref_list
def create_hospital_pref_on_pairs(num_hospital, h, one_hospital_pref_list,
single_pref_list, couple_pref_list):
"""Create hospital's preference on pairs given preference on individuals."""
pair_pref_list = []
for i in one_hospital_pref_list:
if isinstance(i, int):
if h in single_pref_list[i]:
pair_pref_list += [(i, h)]
else:
c, j = i # couple c, member j
# find out if this member is the better one
member_j_position = one_hospital_pref_list.index((c, j))
other_member_position = one_hospital_pref_list.index((c, 1 - j))
is_better_member = member_j_position < other_member_position
# filter out the pairs related to hospital h and (c, j)
# where (c, j) is the worst member assigned to couple h in this pair
def is_relavent_pair(p):
return p[j] == h and not (p == (h, h) and is_better_member)
couple_c_pref_list = list(filter(
is_relavent_pair,
couple_pref_list[c]
))
pair_pref_list += [(c,) + p for p in couple_c_pref_list]
return pair_pref_list
def check_stable(U, basis):
"""Check if a basis is ordinal basis for a utility matrix.
Args:
U: (m, n) utility matrix.
basis: a list of m column indices
Returns:
True if `basis` is an ordinal basis of `U`
"""
U_pt = U + np.tile(
np.linspace(start=0.5, stop=0.0, num=U.shape[1]),
(U.shape[0], 1))
rowmins = np.min(U[:, basis], axis=1, keepdims=True)
U_dom = U <= rowmins
return np.all(np.any(U_dom, axis=0))
def check_feasible(A, basis, alloc, b, tol=1e-6):
"""Check if a basis is a feasible basis for a polytope.
Args:
A: (m, n) constraint matrix.
basis: a list of m column indices
alloc: allocation vector
b: the right hand side vector of size (m,)
Returns:
True if `basis` is a feasible basis of the polytope `Ax=b, x>=0`.
"""
return np.all(np.dot(A[:, basis], alloc) <= np.array(b) + tol)
|
{"hexsha": "061d1f7a3ad6bd582a627a3b8030780a704907e4", "size": 3980, "ext": "py", "lang": "Python", "max_stars_repo_path": "scarf/utils.py", "max_stars_repo_name": "dwtang/scarf", "max_stars_repo_head_hexsha": "62b58c62b2bca0552aec95500fad2c6d9555a0d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scarf/utils.py", "max_issues_repo_name": "dwtang/scarf", "max_issues_repo_head_hexsha": "62b58c62b2bca0552aec95500fad2c6d9555a0d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scarf/utils.py", "max_forks_repo_name": "dwtang/scarf", "max_forks_repo_head_hexsha": "62b58c62b2bca0552aec95500fad2c6d9555a0d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1818181818, "max_line_length": 78, "alphanum_fraction": 0.6570351759, "include": true, "reason": "import numpy", "num_tokens": 1124}
|
/*
BSD 3-Clause License
Copyright (c) 2017, Alibaba Cloud
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "random.hpp"
#include "tablestore/util/assert.hpp"
#include "tablestore/util/timestamp.hpp"
#include <boost/random/mersenne_twister.hpp>
#include <string>
#include <deque>
using namespace std;
namespace aliyun {
namespace tablestore {
namespace util {
namespace random {
namespace {
class Default : public Random
{
public:
explicit Default(unsigned int seed)
: mSeed(seed)
{
init();
}
explicit Default()
: mSeed(UtcTime::now().toUsec())
{
init();
}
uint64_t upperBound() const
{
uint64_t up = mRng.max();
return up + 1;
}
uint64_t next()
{
return mRng();
}
uint64_t seed() const
{
return mSeed;
}
private:
void init()
{
mRng.seed(static_cast<uint32_t>(mSeed));
}
private:
uint64_t mSeed;
boost::mt19937 mRng;
};
uint64_t nextUint(Random& rng, uint64_t upper)
{
OTS_ASSERT(upper > 0)(upper);
const uint64_t bound = rng.upperBound();
if (upper <= bound) {
return rng.next() % upper;
} else {
uint64_t shiftUpper = upper;
deque<uint64_t> segs;
for(; shiftUpper > bound; shiftUpper /= bound) {
segs.push_back(rng.next());
}
segs.push_back(rng.next() % shiftUpper);
uint64_t res = 0;
for(; !segs.empty(); segs.pop_back()) {
res *= bound;
res += segs.back();
}
return res % upper;
}
}
} // namespace
Random* newDefault()
{
return new Default();
}
Random* newDefault(uint64_t seed)
{
return new Default(seed);
}
int64_t nextInt(Random& rng, int64_t exclusiveUpper)
{
OTS_ASSERT(exclusiveUpper > 0)(exclusiveUpper);
return nextUint(rng, exclusiveUpper);
}
int64_t nextInt(Random& rng, int64_t inclusiveLower, int64_t exclusiveUpper)
{
OTS_ASSERT(exclusiveUpper > inclusiveLower)
(inclusiveLower)
(exclusiveUpper);
uint64_t range = static_cast<uint64_t>(exclusiveUpper) - static_cast<uint64_t>(inclusiveLower);
int64_t res = nextUint(rng, range);
res += inclusiveLower;
return res;
}
} // namespace random
} // namespace util
} // namespace tablestore
} // namespace aliyun
|
{"hexsha": "17c716e8cc87f22fde9446aa5518712c4f2df168", "size": 3745, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tablestore/util/random.cpp", "max_stars_repo_name": "TimeExceed/aliyun-tablestore-cpp-sdk", "max_stars_repo_head_hexsha": "f8d2fdf500badf70073dff4e21a5d2d7aa7d3853", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-02-24T06:51:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-24T14:40:10.000Z", "max_issues_repo_path": "src/tablestore/util/random.cpp", "max_issues_repo_name": "TimeExceed/aliyun-tablestore-cpp-sdk", "max_issues_repo_head_hexsha": "f8d2fdf500badf70073dff4e21a5d2d7aa7d3853", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tablestore/util/random.cpp", "max_forks_repo_name": "TimeExceed/aliyun-tablestore-cpp-sdk", "max_forks_repo_head_hexsha": "f8d2fdf500badf70073dff4e21a5d2d7aa7d3853", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-02-24T06:51:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-24T06:51:57.000Z", "avg_line_length": 26.0069444444, "max_line_length": 99, "alphanum_fraction": 0.6926568758, "num_tokens": 867}
|
\section{Performance Notes}
|
{"hexsha": "cc7f0ab35eacafd27440606861e7035bb0b1a31a", "size": 27, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "day-4/trio_score/trio_score/builds/assets/performance-notes.tex", "max_stars_repo_name": "DaviRaubach/intensive", "max_stars_repo_head_hexsha": "f2abce0f888f1231d0f8da6f24d41c921ea6aca1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-29T17:59:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-29T17:59:54.000Z", "max_issues_repo_path": "day-4/trio_score/trio_score/builds/assets/performance-notes.tex", "max_issues_repo_name": "DaviRaubach/intensive", "max_issues_repo_head_hexsha": "f2abce0f888f1231d0f8da6f24d41c921ea6aca1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day-4/trio_score/trio_score/builds/assets/performance-notes.tex", "max_forks_repo_name": "DaviRaubach/intensive", "max_forks_repo_head_hexsha": "f2abce0f888f1231d0f8da6f24d41c921ea6aca1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0, "max_line_length": 27, "alphanum_fraction": 0.8518518519, "num_tokens": 6}
|
||| ported from https://github.com/pepijnkokke/FirstOrderUnificationInAgda
module Unification
import Data.Fin
%default total
%access public export
||| An identifier
data Name = MkName String
Eq Name where
(MkName x) == (MkName y) = x == y
||| A term in an untyped lambda-calculus with variables indexed by `v`.
||| Either a variable, an identifier, or a function applied to an argument.
||| We keep track of the depth `d` to ensure terms are finite.
||| @ v the type `Var`s are indexed by
||| @ d the depth of the AST
data TermD : (v : Type) -> (d : Nat) -> Type where
||| A variable with index `i`
Var : (i : v) -> TermD v Z
||| an identifier with name `n`
Identifier : (n : Name) -> TermD v Z
||| function application, `f` applied to `x`
App : (f : TermD v d1) -> (x : TermD v d2) -> TermD v (S (max d1 d2))
||| A TermD with `d` as a reified witness instead of a type param
data Term : (v : Type) -> Type where
MkTerm : Exists (TermD v) -> Term v
namespace Term
depth : Term v -> Nat
depth (MkTerm t) = getWitness t
{- implicit conversions to/from TermD -}
implicit
toTermD : (t : Term v) -> TermD v (depth t)
toTermD (MkTerm t) = getProof t
implicit
fromTermD : TermD v d -> Term v
fromTermD {d=d} t = MkTerm (Evidence d t)
{- constructors for Term (making use of implicit conversions to/from TermD) -}
var : v -> Term v
var v = Var v
identifier : {v : Type} -> Name -> Term v
identifier c = Identifier c
app : Term v -> Term v -> Term v
app left right = App left right
||| The catamorphism for Term.
||| We unpack and use `d` to convince idris `fold'` is total,
||| so callers of `fold` can ignore `d` and not worry about totality.
fold : (v -> r) -> (Name -> r) -> (r -> r -> r) -> Term v -> r
fold v n f (MkTerm (Evidence d t)) = fold' v n f d t where
fold' v n f d t = case t of
Var i => v i
Identifier c => n c
App {d1=d1} {d2=d2} l r => f (fold' v n f d1 l) (fold' v n f d2 r)
Functor Term where
map f t = fold (var . f) identifier app t
Applicative Term where
pure = var
mf <*> ma = fold (\f => map f ma) identifier app mf
Monad Term where
t >>= f = fold f identifier app t
Foldable Term where
foldr op z t = fold op (const id) (.) t z
Traversable Term where
traverse f = fold
(\y => [| var (f y) |])
(\c => pure (identifier c))
(\l, r => [| app l r |])
{- Now for some unification... -}
||| A substitution of a `Term (Fin n)` for a variable in `Fin (S n)`.
||| @ n the number of vars after applying the substitution.
data Subst : (n : Nat) -> Type where
||| Make a Subst
||| @ x the variable to replace
||| @ t the term (not mentioning `x`) to replace `x` with
MkSubst : (x : Fin (S n)) -> (t : Term (Fin n)) -> Subst n
{-
thick and thin help keep track of var indexes when vars are
introduced or removed.
-}
||| `thick x` maps each `Fin (S n)` that is not `x` to a unique `Fin n`.
||| Useful when removing `x` from the set of vars (e.g. with a `Subst`).
||| y |-> Just y, if y < x,
||| Nothing, if y = x,
||| Just (pred y), if y > x
thick : (x, y : Fin (S n)) -> Maybe (Fin n)
thick {n=S k} FZ (FS y) = Just y -- y < x
thick {n=S k} (FS x) FZ = Just FZ -- y > x
thick {n=S k} (FS x) (FS y) = FS <$> thick x y -- recurse
thick _ _ = Nothing -- y = x
||| `thin x` maps each `Fin n` to a unique `Fin (S n)` that is not `x`.
||| The inverse of `y |-> thick x y` for `y`s s.t. `y != x`.
||| Useful for undoing a `Subst`.
||| y |-> FS y, if y >= x,
||| y, if y < x
thin : (x : Fin (S n)) -> (y : Fin n) -> Fin (S n)
thin FZ y = FS y
thin x FZ = FZ
thin (FS x) (FS y) = FS (thin x y)
||| replaces any occurrance of a `Var y` in `t` with `r` if
||| `x == y` or `Var (thick x y)` otherwise.
||| @ s the substitution to make
||| @ t the term to search and replace in
subst : (s : Subst m) -> (t : Term (Fin (S m))) -> Term (Fin m)
subst (MkSubst x r) t = map (thick x) t >>= (maybe r var)
||| A list of substitutions, of the form
||| `[Subst m d, Subst m-1 d, Subst m-2 d, ...]`.
||| Substitutions are applied left to right.
||| Each `s` in a SubstList reduces the number of variables by one, so
||| successive substitutions operate on the reduced set of var idxs.
||| @ m the number of vars before applying the substitutions.
||| @ n the number of vars remaining after applying the substitutions.
data SubstList : (m : Nat) -> (n : Nat) -> Type where
Nil : {n : Nat} -> SubstList n n -- no op, number of vars is unchanged
(::) : (s : Subst m) ->
(tail : SubstList m n)
-> SubstList (S m) n
||| Check that `x` doesn't appear in `t`.
||| If it does, return Nothing.
||| If it doesn't, return `t` but over a smaller set of vars.
check : (x : Fin (S n)) -> (t : Term (Fin (S n))) -> Maybe (Term (Fin n))
check x = traverse (thick x)
||| As long as the var x doesn't appear in t, we can unify x and t
||| by substituting t for x.
flexRigid : Fin n -> Term (Fin n) -> Maybe (Exists (SubstList n))
flexRigid {n=Z} _ _ = Nothing -- impossible, Fin Z is uninhabited
flexRigid {n=S n'} x t = map (\t' => Evidence n' [MkSubst x t']) (check x t)
||| we can always unify two variables
flexFlex : (x, y : Fin n) -> Exists (SubstList n)
flexFlex {n=Z} _ _ = Evidence Z [] -- impossible, Fin Z is uninhabited
flexFlex {n=S n'} x y = case thick x y of
Nothing => Evidence (S n') [] -- x = y, no subst needed
Just y' => Evidence n' [MkSubst x (Var y')] -- x != y, sub y' for x
||| helper function for unify
unify' : (t1 : TermD (Fin m) d1) ->
(t2 : TermD (Fin m) d2) ->
(acc : Exists (SubstList m))
-> Maybe (Exists (SubstList m))
unify' t1 t2 (Evidence n (s :: tail)) = -- non-empty `acc`
map consS $ unify' (subst s t1) (subst s t2) (Evidence n tail) where
consS (Evidence n' sl) = Evidence n' (s :: sl)
unify' (Var x1) (Var x2) _ = Just (flexFlex x1 x2)
unify' (Var x1) t2 _ = flexRigid x1 t2
unify' t1 (Var x2) _ = flexRigid x2 t1
unify' {m=m} (Identifier c1) (Identifier c2) acc =
if c1 == c2 then Just acc else Nothing
unify' (App la ra) (App lb rb) acc =
Just acc >>= unify' la lb >>= unify' ra rb
unify' _ _ _ = Nothing -- can't unify identifier with function application
-- (in first-order unification, anyway)
||| Finds the most general substitutions of terms for variables that makes
||| the two terms equal.
unify : (t1 : Term (Fin m)) -> (t2 : Term (Fin m)) -> Maybe (Exists (SubstList m))
unify {m=m} t1 t2 = unify' t1 t2 (Evidence m [])
|
{"hexsha": "45e1bd5f7bc9b301080e069c7045b0e343afbfa9", "size": 6629, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Unification.idr", "max_stars_repo_name": "sammthomson/IdrisUnification", "max_stars_repo_head_hexsha": "c3f2a8b63d4d57f0668f18caf505baf632e9b363", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Unification.idr", "max_issues_repo_name": "sammthomson/IdrisUnification", "max_issues_repo_head_hexsha": "c3f2a8b63d4d57f0668f18caf505baf632e9b363", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Unification.idr", "max_forks_repo_name": "sammthomson/IdrisUnification", "max_forks_repo_head_hexsha": "c3f2a8b63d4d57f0668f18caf505baf632e9b363", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8324324324, "max_line_length": 82, "alphanum_fraction": 0.5877206215, "num_tokens": 2240}
|
/*****************************************************************************
* Licensed to Qualys, Inc. (QUALYS) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* QUALYS licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
****************************************************************************/
/**
* @file
* @brief Predicate --- Validate Graph Implementation
*
* @author Christopher Alfeld <calfeld@qualys.com>
*/
#include <ironbee/predicate/validate_graph.hpp>
#include <ironbee/predicate/bfs.hpp>
#include <ironbee/predicate/leaves.hpp>
#include <ironbee/predicate/merge_graph.hpp>
#include <boost/bind.hpp>
#include <boost/function_output_iterator.hpp>
using namespace std;
namespace IronBee {
namespace Predicate {
namespace {
//! Call pre_transform on node.
class validate_graph_helper
{
public:
validate_graph_helper(validation_e which, reporter_t reporter) :
m_which(which),
m_reporter(reporter)
{
// nop
}
void operator()(const node_cp& n)
{
if (m_which == VALIDATE_PRE) {
n->pre_transform(NodeReporter(m_reporter, n));
}
else {
n->post_transform(NodeReporter(m_reporter, n));
}
}
private:
validation_e m_which;
reporter_t m_reporter;
};
}
void validate_graph(
validation_e which,
reporter_t reporter,
const MergeGraph& graph
)
{
node_list_t leaves;
// find_leaves guarantees no duplicates in output.
find_leaves(
graph.roots().first, graph.roots().second,
back_inserter(leaves)
);
bfs_up(
leaves.begin(), leaves.end(),
boost::make_function_output_iterator(
validate_graph_helper(which, reporter)
)
);
}
} // Predicate
} // IronBee
|
{"hexsha": "778c620c8edf54dcd141f5f2119a7acdacc522e1", "size": 2418, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "predicate/validate_graph.cpp", "max_stars_repo_name": "crustymonkey/ironbee", "max_stars_repo_head_hexsha": "8350b383244e33b18c7a7b6ba989f67ffcbd945a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-12-22T21:08:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-22T21:08:35.000Z", "max_issues_repo_path": "predicate/validate_graph.cpp", "max_issues_repo_name": "crustymonkey/ironbee", "max_issues_repo_head_hexsha": "8350b383244e33b18c7a7b6ba989f67ffcbd945a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predicate/validate_graph.cpp", "max_forks_repo_name": "crustymonkey/ironbee", "max_forks_repo_head_hexsha": "8350b383244e33b18c7a7b6ba989f67ffcbd945a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 78, "alphanum_fraction": 0.6406120761, "num_tokens": 512}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 22 13:13:26 2017
@author: wd
"""
import io
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import math
import tensorflow as tf
import numpy as np
class CNN_SL:
def __init__(self, sess, input_size, output_size, name='main'):
self.sess = sess
self.input_size = input_size
self.output_size = output_size
self.filter_sizes = [5, 5, 7, 1]
self.net_name = name
self._build_network()
def _build_network(self, h_size=256, l_rate=1e-5):
with tf.variable_scope(self.net_name):
self._X = tf.placeholder(tf.float32, [None, self.input_size])
self._Y = tf.placeholder(tf.float32, [None, 2])
self._A = tf.placeholder(tf.float32, [None, 5])
self.x_tensor = tf.reshape(self._X, [-1, 28, 28, 1])
# ===============================================================================
# convolution layers
# ===============================================================================
self.current_input = self.x_tensor
self.cnn_weight = []
for layer_i, nf_output in enumerate([64, 128, 256, 512]):
nf_input = self.current_input.get_shape().as_list()[3]
# define filters
W = tf.Variable(tf.random_uniform(
[self.filter_sizes[layer_i], self.filter_sizes[layer_i], nf_input, nf_output],
-1.0 / math.sqrt(nf_input),
1.0 / math.sqrt(nf_input)),
name='CNN')
b = tf.Variable(tf.zeros([nf_output]))
self.cnn_weight.append(W)
if 2 > layer_i:
filter_res = \
tf.nn.max_pool(
tf.add(tf.nn.conv2d(self.current_input, W, strides=[1, 1, 1, 1], padding='SAME'), b),
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
else:
filter_res = \
tf.add(tf.nn.conv2d(self.current_input, W, strides=[1, 1, 1, 1], padding='VALID'), b)
self.current_input = tf.nn.dropout(tf.nn.relu(filter_res), keep_prob=0.7)
# ===============================================================================
# fully connected layers
# ===============================================================================
# fc layer 1
self.current_input = tf.reshape(self.current_input, [-1, 512])
self.Wc1 = tf.get_variable("Wc1", shape=[512, h_size], initializer=tf.contrib.layers.xavier_initializer())
layer1 = tf.nn.dropout(tf.nn.tanh(tf.matmul(self.current_input, self.Wc1)), keep_prob=0.7)
# fc layer for action (trigger, right, down, left, up)
self.Wc2 = tf.get_variable("Wc2", shape=[h_size, 5], initializer=tf.contrib.layers.xavier_initializer())
self.action_pred = tf.matmul(layer1, self.Wc2)
# fc layer for class (landmark / not landmark)
self.Wc3 = tf.get_variable("Wc3", shape=[h_size, 2], initializer=tf.contrib.layers.xavier_initializer())
self.class_pred = tf.matmul(layer1, self.Wc3)
# ===============================================================================
# loss
# ===============================================================================
self.loss1 = tf.nn.softmax_cross_entropy_with_logits(logits=self.action_pred, labels=self._A)
self.loss2 = tf.nn.softmax_cross_entropy_with_logits(logits=self.class_pred, labels=self._Y)
self._loss = tf.reduce_mean(self.loss1 + self.loss2)
self.scalar_summary = tf.summary.scalar("loss", self._loss)
# ===============================================================================
# optimizer
# ===============================================================================
self._train = tf.train.AdamOptimizer(learning_rate=l_rate).minimize(self._loss)
# self.merged_summary_op = tf.summary.merge(["loss"])
def predict(self, state):
x = np.reshape(state, [np.size(state)/784, 784])
return self.sess.run(self.action_pred, feed_dict={self._X: x})
def update(self, input_state_batch, action_batch, class_label_batch):
return self.sess.run(
[self._loss, self._train, self.scalar_summary],
feed_dict={self._X: input_state_batch, self._A: action_batch, self._Y: class_label_batch})
def get_cnn_weights(self):
return self.sess.run(self.cnn_weight)
def get_wc1(self):
return self.sess.run(self.Wc1)
def get_wc2(self):
return self.sess.run(self.Wc2)
def get_wc3(self):
return self.sess.run(self.Wc3)
|
{"hexsha": "852fdec407bba33173502a37cf55fa3f6eac9ae4", "size": 5014, "ext": "py", "lang": "Python", "max_stars_repo_path": "PG_supervised_CNN_action_class.py", "max_stars_repo_name": "blackpigg/RL_landmark_finder_scale_action", "max_stars_repo_head_hexsha": "2005741bc0d54d4361331342d522859231ad1955", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PG_supervised_CNN_action_class.py", "max_issues_repo_name": "blackpigg/RL_landmark_finder_scale_action", "max_issues_repo_head_hexsha": "2005741bc0d54d4361331342d522859231ad1955", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PG_supervised_CNN_action_class.py", "max_forks_repo_name": "blackpigg/RL_landmark_finder_scale_action", "max_forks_repo_head_hexsha": "2005741bc0d54d4361331342d522859231ad1955", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0983606557, "max_line_length": 118, "alphanum_fraction": 0.4958117272, "include": true, "reason": "import numpy", "num_tokens": 1069}
|
# -*- coding: utf-8 -*-
"""
Deep Learning with Python by Francois Chollet
4. Fundamentals of machine learning
4.4 Overfitting and underfitting
"""
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.regularizers import l2
import matplotlib.pyplot as plt
import numpy as np
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.0
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# Original model
model = Sequential()
model.add(Dense(16, activation='relu', input_shape=(x_train.shape[1],)))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
original_loss_values = history.history['loss']
original_val_loss_values = history.history['val_loss']
epochs = range(1, len(original_val_loss_values) + 1)
# Version of the model with lower capacity
model = Sequential()
model.add(Dense(4, activation='relu', input_shape=(x_train.shape[1],)))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
val_loss_values = history.history['val_loss']
fig = plt.figure()
ax = fig.gca()
ax.plot(epochs, original_val_loss_values, '+', label='Original model')
ax.plot(epochs, val_loss_values, 'o', label='Smaller model')
ax.set_xlabel('Epochs')
ax.set_ylabel('Validation Loss')
ax.legend()
# Version of the model with higher capacity
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(x_train.shape[1],)))
model.add(Dense(512, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
loss_values = history.history['loss']
val_loss_values = history.history['val_loss']
fig = plt.figure()
ax = fig.gca()
ax.plot(epochs, original_val_loss_values, '+', label='Original model')
ax.plot(epochs, val_loss_values, 'o', label='Bigger model')
ax.set_xlabel('Epochs')
ax.set_ylabel('Validation Loss')
ax.legend()
fig = plt.figure()
ax = fig.gca()
ax.plot(epochs, original_loss_values, '+', label='Original model')
ax.plot(epochs, loss_values, 'o', label='Bigger model')
ax.set_xlabel('Epochs')
ax.set_ylabel('Training Loss')
ax.legend()
# Adding L2 weight regularization to the model
model = Sequential()
model.add(Dense(16, kernel_regularizer=l2(0.001), activation='relu',
input_shape=(x_train.shape[1],)))
model.add(Dense(16, kernel_regularizer=l2(0.001), activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
val_loss_values = history.history['val_loss']
fig = plt.figure()
ax = fig.gca()
ax.plot(epochs, original_val_loss_values, '+', label='Original model')
ax.plot(epochs, val_loss_values, 'o', label='L2-regularized model')
ax.set_xlabel('Epochs')
ax.set_ylabel('Validation Loss')
ax.legend()
# Adding dopout to the IMDB network
model = Sequential()
model.add(Dense(16, activation='relu', input_shape=(x_train.shape[1],)))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
val_loss_values = history.history['val_loss']
fig = plt.figure()
ax = fig.gca()
ax.plot(epochs, original_val_loss_values, '+', label='Original model')
ax.plot(epochs, val_loss_values, 'o', label='Dropout-regularized model')
ax.set_xlabel('Epochs')
ax.set_ylabel('Validation Loss')
ax.legend()
|
{"hexsha": "0f465a43dafca4730f6b979607782409bb607bea", "size": 5294, "ext": "py", "lang": "Python", "max_stars_repo_path": "4_fundamentals_of_machine_learning/overfitting_and_underfitting.py", "max_stars_repo_name": "agaitanis/deep_learning_with_python", "max_stars_repo_head_hexsha": "590e4171c4e4e83136a8633665586e07f0d4c2e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-28T16:03:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-28T16:03:05.000Z", "max_issues_repo_path": "4_fundamentals_of_machine_learning/overfitting_and_underfitting.py", "max_issues_repo_name": "agaitanis/deep_learning_with_python", "max_issues_repo_head_hexsha": "590e4171c4e4e83136a8633665586e07f0d4c2e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "4_fundamentals_of_machine_learning/overfitting_and_underfitting.py", "max_forks_repo_name": "agaitanis/deep_learning_with_python", "max_forks_repo_head_hexsha": "590e4171c4e4e83136a8633665586e07f0d4c2e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2514285714, "max_line_length": 86, "alphanum_fraction": 0.6717038156, "include": true, "reason": "import numpy", "num_tokens": 1204}
|
from ast import Mod
import numpy as np
import scipy
from multiprocessing import Pool
from enum import Enum
class Direction(Enum):
N = 0
NE = 1
E = 2
SE = 3
S = 4
SW = 5
W = 6
NW = 7
transform = {
'N': (-1,0),
'NE': (-1,1),
'E': (0,1),
'SE': (1,1),
'S': (1,0),
'SW': (-1,1),
'W': (-1,0),
'NW': (-1,-1)
}
"""def _is_border(kernel):
if kernel[0][1] and kernel[1][0] and kernel[2][1] and kernel[1][2]:
return True
else:
return False"""
# [[0,1,0],
# [1,1,1],
# [0,1,0]]
def form_boundary_mask(mask):
#padded_mask = np.pad(mask, [(1,1),(1,1)], mode='constant', constant_values=[(1,1),(1,1)])
#padded_mask_shape = padded_mask.shape
#boundary_mask = np.zeros(mask.shape,dtype=np.bool_)
kernel = [[0,1,0],[1,1,1],[0,1,0]]
boundary_mask = scipy.ndimage.convolve(mask,kernel,mode="constant",cval=0.0)
boundary_mask = (boundary_mask < 5) & (boundary_mask > 0)
return boundary_mask
def _transform_dir(direction):
return transform[Direction(direction).name]
def _find_next_dir(last_dir):
return Direction((Direction[last_dir]-3)%8)
def _find_highest_pixel(boundary):
y = min(np.where(boundary == 1)[0])
x = min(np.where(boundary[y] == 1)[0])
return (x,y)
def order_boundary_pixels(boundary):
#TODO: Find lowest y and x mask pixel (Highest Pixel)
#TODO: There might be inside pixels as in the case of a tennis racket and as such this process needs to be verified every time
ordered_pixels = []
cur_list = []
cur_pixel = _find_highest_pixel(boundary)
cur_list.append(cur_pixel)
next_dir = Direction.E
return ordered_pixels
def gen_sliding_window():
sliding_window = []
return sliding_window
|
{"hexsha": "f5ffc2ccc7aed4db119be4e39e6c925fdd5f7d69", "size": 1789, "ext": "py", "lang": "Python", "max_stars_repo_path": "boundary_refinement.py", "max_stars_repo_name": "Yusoi/mmdetection", "max_stars_repo_head_hexsha": "cbb5fb00f6e124fbb2c15e7e3438d7fa76b8850a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "boundary_refinement.py", "max_issues_repo_name": "Yusoi/mmdetection", "max_issues_repo_head_hexsha": "cbb5fb00f6e124fbb2c15e7e3438d7fa76b8850a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boundary_refinement.py", "max_forks_repo_name": "Yusoi/mmdetection", "max_forks_repo_head_hexsha": "cbb5fb00f6e124fbb2c15e7e3438d7fa76b8850a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5542168675, "max_line_length": 130, "alphanum_fraction": 0.6143096702, "include": true, "reason": "import numpy,import scipy", "num_tokens": 549}
|
#include <huaweicloud/eip/v2/EipClient.h>
#include <huaweicloud/core/utils/MultipartFormData.h>
#include <unordered_set>
#include <boost/algorithm/string/replace.hpp>
template <typename T>
std::string toString(const T value)
{
std::ostringstream out;
out << std::setprecision(std::numeric_limits<T>::digits10) << std::fixed << value;
return out.str();
}
namespace HuaweiCloud {
namespace Sdk {
namespace Eip {
namespace V2 {
using namespace HuaweiCloud::Sdk::Eip::V2::Model;
EipClient::EipClient()
{
}
EipClient::~EipClient()
{
}
ClientBuilder<EipClient> EipClient::newBuilder()
{
return ClientBuilder<EipClient>("BasicCredentials");
}
std::shared_ptr<AddPublicipsIntoSharedBandwidthResponse> EipClient::addPublicipsIntoSharedBandwidth(AddPublicipsIntoSharedBandwidthRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/bandwidths/{bandwidth_id}/insert";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["bandwidth_id"] = parameterToString(request.getBandwidthId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<AddPublicipsIntoSharedBandwidthResponse> localVarResult = std::make_shared<AddPublicipsIntoSharedBandwidthResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<BatchCreateSharedBandwidthsResponse> EipClient::batchCreateSharedBandwidths(BatchCreateSharedBandwidthsRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/batch-bandwidths";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<BatchCreateSharedBandwidthsResponse> localVarResult = std::make_shared<BatchCreateSharedBandwidthsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<CreateSharedBandwidthResponse> EipClient::createSharedBandwidth(CreateSharedBandwidthRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/bandwidths";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<CreateSharedBandwidthResponse> localVarResult = std::make_shared<CreateSharedBandwidthResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<DeleteSharedBandwidthResponse> EipClient::deleteSharedBandwidth(DeleteSharedBandwidthRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/bandwidths/{bandwidth_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["bandwidth_id"] = parameterToString(request.getBandwidthId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("DELETE", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<DeleteSharedBandwidthResponse> localVarResult = std::make_shared<DeleteSharedBandwidthResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ListBandwidthsResponse> EipClient::listBandwidths(ListBandwidthsRequest &request)
{
std::string localVarPath = "/v1/{project_id}/bandwidths";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
if (request.markerIsSet()) {
localVarQueryParams["marker"] = parameterToString(request.getMarker());
}
if (request.limitIsSet()) {
localVarQueryParams["limit"] = parameterToString(request.getLimit());
}
if (request.enterpriseProjectIdIsSet()) {
localVarQueryParams["enterprise_project_id"] = parameterToString(request.getEnterpriseProjectId());
}
if (request.shareTypeIsSet()) {
localVarQueryParams["share_type"] = parameterToString(request.getShareType());
}
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ListBandwidthsResponse> localVarResult = std::make_shared<ListBandwidthsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ListQuotasResponse> EipClient::listQuotas(ListQuotasRequest &request)
{
std::string localVarPath = "/v1/{project_id}/quotas";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
if (request.typeIsSet()) {
localVarQueryParams["type"] = parameterToString(request.getType());
}
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ListQuotasResponse> localVarResult = std::make_shared<ListQuotasResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<RemovePublicipsFromSharedBandwidthResponse> EipClient::removePublicipsFromSharedBandwidth(RemovePublicipsFromSharedBandwidthRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/bandwidths/{bandwidth_id}/remove";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["bandwidth_id"] = parameterToString(request.getBandwidthId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<RemovePublicipsFromSharedBandwidthResponse> localVarResult = std::make_shared<RemovePublicipsFromSharedBandwidthResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ShowBandwidthResponse> EipClient::showBandwidth(ShowBandwidthRequest &request)
{
std::string localVarPath = "/v1/{project_id}/bandwidths/{bandwidth_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["bandwidth_id"] = parameterToString(request.getBandwidthId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ShowBandwidthResponse> localVarResult = std::make_shared<ShowBandwidthResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<UpdateBandwidthResponse> EipClient::updateBandwidth(UpdateBandwidthRequest &request)
{
std::string localVarPath = "/v1/{project_id}/bandwidths/{bandwidth_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["bandwidth_id"] = parameterToString(request.getBandwidthId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("PUT", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<UpdateBandwidthResponse> localVarResult = std::make_shared<UpdateBandwidthResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<UpdatePrePaidBandwidthResponse> EipClient::updatePrePaidBandwidth(UpdatePrePaidBandwidthRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/bandwidths/{bandwidth_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["bandwidth_id"] = parameterToString(request.getBandwidthId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("PUT", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<UpdatePrePaidBandwidthResponse> localVarResult = std::make_shared<UpdatePrePaidBandwidthResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<BatchCreatePublicipTagsResponse> EipClient::batchCreatePublicipTags(BatchCreatePublicipTagsRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips/{publicip_id}/tags/action";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<BatchCreatePublicipTagsResponse> localVarResult = std::make_shared<BatchCreatePublicipTagsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<BatchDeletePublicipTagsResponse> EipClient::batchDeletePublicipTags(BatchDeletePublicipTagsRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips/{publicip_id}/tags/action";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<BatchDeletePublicipTagsResponse> localVarResult = std::make_shared<BatchDeletePublicipTagsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<CreatePrePaidPublicipResponse> EipClient::createPrePaidPublicip(CreatePrePaidPublicipRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<CreatePrePaidPublicipResponse> localVarResult = std::make_shared<CreatePrePaidPublicipResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<CreatePublicipResponse> EipClient::createPublicip(CreatePublicipRequest &request)
{
std::string localVarPath = "/v1/{project_id}/publicips";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<CreatePublicipResponse> localVarResult = std::make_shared<CreatePublicipResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<CreatePublicipTagResponse> EipClient::createPublicipTag(CreatePublicipTagRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips/{publicip_id}/tags";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<CreatePublicipTagResponse> localVarResult = std::make_shared<CreatePublicipTagResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<DeletePublicipResponse> EipClient::deletePublicip(DeletePublicipRequest &request)
{
std::string localVarPath = "/v1/{project_id}/publicips/{publicip_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("DELETE", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<DeletePublicipResponse> localVarResult = std::make_shared<DeletePublicipResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<DeletePublicipTagResponse> EipClient::deletePublicipTag(DeletePublicipTagRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips/{publicip_id}/tags/{key}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
localVarPathParams["key"] = parameterToString(request.getKey());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("DELETE", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<DeletePublicipTagResponse> localVarResult = std::make_shared<DeletePublicipTagResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ListPublicipTagsResponse> EipClient::listPublicipTags(ListPublicipTagsRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips/tags";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ListPublicipTagsResponse> localVarResult = std::make_shared<ListPublicipTagsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ListPublicipsResponse> EipClient::listPublicips(ListPublicipsRequest &request)
{
std::string localVarPath = "/v1/{project_id}/publicips";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
if (request.markerIsSet()) {
localVarQueryParams["marker"] = parameterToString(request.getMarker());
}
if (request.limitIsSet()) {
localVarQueryParams["limit"] = parameterToString(request.getLimit());
}
if (request.ipVersionIsSet()) {
localVarQueryParams["ip_version"] = parameterToString(request.getIpVersion());
}
if (request.enterpriseProjectIdIsSet()) {
localVarQueryParams["enterprise_project_id"] = parameterToString(request.getEnterpriseProjectId());
}
if (request.portIdIsSet()) {
localVarQueryParams["port_id"] = parameterToString(request.getPortId());
}
if (request.publicIpAddressIsSet()) {
localVarQueryParams["public_ip_address"] = parameterToString(request.getPublicIpAddress());
}
if (request.privateIpAddressIsSet()) {
localVarQueryParams["private_ip_address"] = parameterToString(request.getPrivateIpAddress());
}
if (request.idIsSet()) {
localVarQueryParams["id"] = parameterToString(request.getId());
}
if (request.allowShareBandwidthTypeAnyIsSet()) {
localVarQueryParams["allow_share_bandwidth_type_any"] = parameterToString(request.getAllowShareBandwidthTypeAny());
}
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ListPublicipsResponse> localVarResult = std::make_shared<ListPublicipsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ListPublicipsByTagsResponse> EipClient::listPublicipsByTags(ListPublicipsByTagsRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips/resource_instances/action";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ListPublicipsByTagsResponse> localVarResult = std::make_shared<ListPublicipsByTagsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ShowPublicipResponse> EipClient::showPublicip(ShowPublicipRequest &request)
{
std::string localVarPath = "/v1/{project_id}/publicips/{publicip_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ShowPublicipResponse> localVarResult = std::make_shared<ShowPublicipResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<ShowPublicipTagsResponse> EipClient::showPublicipTags(ShowPublicipTagsRequest &request)
{
std::string localVarPath = "/v2.0/{project_id}/publicips/{publicip_id}/tags";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<ShowPublicipTagsResponse> localVarResult = std::make_shared<ShowPublicipTagsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<UpdatePublicipResponse> EipClient::updatePublicip(UpdatePublicipRequest &request)
{
std::string localVarPath = "/v1/{project_id}/publicips/{publicip_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["publicip_id"] = parameterToString(request.getPublicipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("PUT", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<UpdatePublicipResponse> localVarResult = std::make_shared<UpdatePublicipResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<NeutronCreateFloatingIpResponse> EipClient::neutronCreateFloatingIp(NeutronCreateFloatingIpRequest &request)
{
std::string localVarPath = "/v2.0/floatingips";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("POST", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<NeutronCreateFloatingIpResponse> localVarResult = std::make_shared<NeutronCreateFloatingIpResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<NeutronDeleteFloatingIpResponse> EipClient::neutronDeleteFloatingIp(NeutronDeleteFloatingIpRequest &request)
{
std::string localVarPath = "/v2.0/floatingips/{floatingip_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["floatingip_id"] = parameterToString(request.getFloatingipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("DELETE", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<NeutronDeleteFloatingIpResponse> localVarResult = std::make_shared<NeutronDeleteFloatingIpResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<NeutronListFloatingIpsResponse> EipClient::neutronListFloatingIps(NeutronListFloatingIpsRequest &request)
{
std::string localVarPath = "/v2.0/floatingips";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
if (request.limitIsSet()) {
localVarQueryParams["limit"] = parameterToString(request.getLimit());
}
if (request.markerIsSet()) {
localVarQueryParams["marker"] = parameterToString(request.getMarker());
}
if (request.pageReverseIsSet()) {
localVarQueryParams["page_reverse"] = parameterToString(request.isPageReverse());
}
if (request.idIsSet()) {
localVarQueryParams["id"] = parameterToString(request.getId());
}
if (request.floatingIpAddressIsSet()) {
localVarQueryParams["floating_ip_address"] = parameterToString(request.getFloatingIpAddress());
}
if (request.routerIdIsSet()) {
localVarQueryParams["router_id"] = parameterToString(request.getRouterId());
}
if (request.portIdIsSet()) {
localVarQueryParams["port_id"] = parameterToString(request.getPortId());
}
if (request.fixedIpAddressIsSet()) {
localVarQueryParams["fixed_ip_address"] = parameterToString(request.getFixedIpAddress());
}
if (request.tenantIdIsSet()) {
localVarQueryParams["tenant_id"] = parameterToString(request.getTenantId());
}
if (request.floatingNetworkIdIsSet()) {
localVarQueryParams["floating_network_id"] = parameterToString(request.getFloatingNetworkId());
}
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<NeutronListFloatingIpsResponse> localVarResult = std::make_shared<NeutronListFloatingIpsResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<NeutronShowFloatingIpResponse> EipClient::neutronShowFloatingIp(NeutronShowFloatingIpRequest &request)
{
std::string localVarPath = "/v2.0/floatingips/{floatingip_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["floatingip_id"] = parameterToString(request.getFloatingipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
std::unique_ptr<HttpResponse> res = callApi("GET", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<NeutronShowFloatingIpResponse> localVarResult = std::make_shared<NeutronShowFloatingIpResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
std::shared_ptr<NeutronUpdateFloatingIpResponse> EipClient::neutronUpdateFloatingIp(NeutronUpdateFloatingIpRequest &request)
{
std::string localVarPath = "/v2.0/floatingips/{floatingip_id}";
std::map<std::string, std::string> localVarQueryParams;
std::map<std::string, std::string> localVarHeaderParams;
std::map<std::string, std::string> localVarFormParams;
std::map<std::string, std::string> localVarPathParams;
std::map<std::string, std::shared_ptr<HttpContent>> localVarFileParams;
localVarPathParams["floatingip_id"] = parameterToString(request.getFloatingipId());
bool isJson = false;
bool isMultiPart = false;
std::string contentType = getContentType("application/json;charset=UTF-8", isJson, isMultiPart);
localVarHeaderParams["Content-Type"] = contentType;
std::string localVarHttpBody;
if (isJson) {
web::json::value localVarJson;
localVarJson = ModelBase::toJson(request.getBody());
localVarHttpBody = utility::conversions::to_utf8string(localVarJson.serialize());
}
std::unique_ptr<HttpResponse> res = callApi("PUT", localVarPath, localVarPathParams, localVarQueryParams, localVarHeaderParams, localVarHttpBody);
std::shared_ptr<NeutronUpdateFloatingIpResponse> localVarResult = std::make_shared<NeutronUpdateFloatingIpResponse>();
if (!res->getHttpBody().empty()) {
utility::string_t localVarResponse = utility::conversions::to_string_t(res->getHttpBody());
web::json::value localVarJson = web::json::value::parse(localVarResponse);
localVarResult->fromJson(localVarJson);
}
localVarResult->setStatusCode(res->getStatusCode());
localVarResult->setHeaderParams(res->getHeaderParams());
localVarResult->setHttpBody(res->getHttpBody());
return localVarResult;
}
#if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER)
std::string EipClient::parameterToString(utility::string_t value)
{
return utility::conversions::to_utf8string(value);
}
#endif
std::string EipClient::parameterToString(std::string value)
{
return value;
}
std::string EipClient::parameterToString(int64_t value)
{
std::stringstream valueAsStringStream;
valueAsStringStream << value;
return valueAsStringStream.str();
}
std::string EipClient::parameterToString(int32_t value)
{
std::stringstream valueAsStringStream;
valueAsStringStream << value;
return valueAsStringStream.str();
}
std::string EipClient::parameterToString(float value)
{
return toString(value);
}
std::string EipClient::parameterToString(double value)
{
return toString(value);
}
std::string EipClient::parameterToString(const utility::datetime &value)
{
return utility::conversions::to_utf8string(value.to_string(utility::datetime::ISO_8601));
}
}
}
}
}
|
{"hexsha": "99f10ca0c0be0b2bf0d1e4c6a90870074167307b", "size": 51440, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "eip/src/v2/EipClient.cpp", "max_stars_repo_name": "huaweicloud/huaweicloud-sdk-cpp-v3", "max_stars_repo_head_hexsha": "d3b5e07b0ee8367d1c7f6dad17be0212166d959c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2021-03-03T08:23:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T02:16:39.000Z", "max_issues_repo_path": "eip/src/v2/EipClient.cpp", "max_issues_repo_name": "ChenwxJay/huaweicloud-sdk-cpp-v3", "max_issues_repo_head_hexsha": "f821ec6d269b50203e0c1638571ee1349c503c41", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eip/src/v2/EipClient.cpp", "max_forks_repo_name": "ChenwxJay/huaweicloud-sdk-cpp-v3", "max_forks_repo_head_hexsha": "f821ec6d269b50203e0c1638571ee1349c503c41", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2021-02-26T13:53:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T02:36:43.000Z", "avg_line_length": 43.0100334448, "max_line_length": 157, "alphanum_fraction": 0.7347006221, "num_tokens": 12127}
|
import os, sys
from csv import DictReader
import numpy as np
from cupcake.io.BioReaders import GMAPSAMReader
REF_LENGTH = 29903
def process_sam_to_wig(sam_filename, output_wig, cov_threshold=200, meta_info=None):
cov = np.zeros(REF_LENGTH)
reader = GMAPSAMReader(sam_filename, True)
f_sam = open(sam_filename[:sam_filename.rfind('.')] + '.metainfo.sam', 'w')
f_sam.write(reader.header)
bad_count = 0
for r in reader:
tags = ''
if len(r.segments) > 1:
for e in r.segments: cov[e.start:e.end] += 1
bad_count += 1
tags += "\tsg:i:{0}".format(len(r.segments)) # sg: number of segments
if meta_info is not None:
seqid = r.qID.split('|')[0]
if seqid in meta_info:
tags += "\tst:A:{0}".format(meta_info[seqid]['Sequencing technology'][0]) # st: sequencing technology
else:
print("WARNING: Could not find {0} in metadata. Skipping.".format(seqid))
f_sam.write(r.record_line + tags + '\n')
f_sam.close()
for i in range(len(cov)):
if cov[i] < cov_threshold: cov[i] = 0
f = open(output_wig, 'w')
f.write("variableStep chrom=NC_045512v2 start=1")
for i in range(len(cov)):
f.write("{0} {1}\n".format(i+1, cov[i]))
f.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("sam_filename")
parser.add_argument("output_wig")
parser.add_argument("-c", "--coverage_threshold", type=int, default=200, help="Threshold coverage under which to show 0")
parser.add_argument("-m", "--metadata_csv", help="Metadata CSV (optional)")
args = parser.parse_args()
if args.metadata_csv is not None:
meta_info = dict((r['Accession ID'], r) for r in DictReader(open(args.metadata_csv), delimiter=','))
process_sam_to_wig(args.sam_filename, args.output_wig, args.coverage_threshold, meta_info)
|
{"hexsha": "6a46def310fa45132982b11000a1623fe140ef33", "size": 1970, "ext": "py", "lang": "Python", "max_stars_repo_path": "cosa/utils/process_sam_to_wig.py", "max_stars_repo_name": "Zuhayr-PacBio/CoSA", "max_stars_repo_head_hexsha": "a8bfccd301a367aac2858b48f13ca708833f6c8f", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-03-28T14:46:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T20:24:24.000Z", "max_issues_repo_path": "cosa/utils/process_sam_to_wig.py", "max_issues_repo_name": "Zuhayr-PacBio/CoSA", "max_issues_repo_head_hexsha": "a8bfccd301a367aac2858b48f13ca708833f6c8f", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-02-20T01:18:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-13T15:49:25.000Z", "max_forks_repo_path": "cosa/utils/process_sam_to_wig.py", "max_forks_repo_name": "Zuhayr-PacBio/CoSA", "max_forks_repo_head_hexsha": "a8bfccd301a367aac2858b48f13ca708833f6c8f", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-05-08T22:20:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-14T16:46:43.000Z", "avg_line_length": 38.6274509804, "max_line_length": 125, "alphanum_fraction": 0.6406091371, "include": true, "reason": "import numpy", "num_tokens": 531}
|
#!/usr/bin/env python3
import numpy as np
import h5py as h5
import argparse
import os
import subprocess
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('--npts', type=int, default=201,
help='Number of gridpoints per dimension.')
parser.add_argument('--target', type=float, nargs=2, default=[0.0, 0.0],
help='Coordinates of the target point.')
parser.add_argument('--no_plot', action='store_true',
help='Don\'t plot the result.')
args = parser.parse_args()
filename = '../data/eikonal2d_periodic.h5'
if os.path.isfile(filename):
os.remove(filename)
f = h5.File(filename, "w")
data = np.ones((args.npts, args.npts))
i = int(np.floor((args.npts - 1) * (1 + args.target[0]) / 2.0))
j = int(np.floor((args.npts - 1) * (1 + args.target[1]) / 2.0))
data[i, j] = -1
f.create_dataset('cost_function', shape=data.shape, data=data)
f.close()
subprocess.run(["../../build/examples/eikonal2d_periodic"])
f = h5.File(filename, "r")
data = f['value_function'][()]
f.close()
t = np.linspace(-1, 1, data.shape[0])
x, y = np.meshgrid(t, t, indexing='ij')
if not args.no_plot:
plt.pcolormesh(x, y, data, cmap='jet')
plt.colorbar()
plt.contour(x, y, data, levels=10, linestyles='dashed', colors='k')
plt.axis('equal')
plt.show()
|
{"hexsha": "17e7efd7f4fc01716cdf2e6e2bb61659678e5793", "size": 1345, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/scripts/eikonal2d_periodic.py", "max_stars_repo_name": "mcpca/fsm", "max_stars_repo_head_hexsha": "df4081fa0e595284ddbb1f30f20c5fb2063aa41f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-18T14:07:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T11:35:29.000Z", "max_issues_repo_path": "examples/scripts/eikonal2d_periodic.py", "max_issues_repo_name": "mcpca/fsm", "max_issues_repo_head_hexsha": "df4081fa0e595284ddbb1f30f20c5fb2063aa41f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/scripts/eikonal2d_periodic.py", "max_forks_repo_name": "mcpca/fsm", "max_forks_repo_head_hexsha": "df4081fa0e595284ddbb1f30f20c5fb2063aa41f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-31T07:50:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-03T17:30:14.000Z", "avg_line_length": 26.3725490196, "max_line_length": 72, "alphanum_fraction": 0.6453531599, "include": true, "reason": "import numpy", "num_tokens": 382}
|
# Testcase from example given in Mocking.jl's README
@testset "readme" begin
# Note: Function only works in UNIX environments.
function randdev(n::Integer)
@mock open("/dev/urandom") do fp
reverse(read(fp, n))
end
end
n = 10
if Sys.isunix()
result = randdev(n) # Reading /dev/urandom only works on UNIX environments
@test eltype(result) == UInt8
@test length(result) == n
end
# Produces a string with sequential UInt8 values from 1:n
data = unsafe_string(pointer(convert(Array{UInt8}, 1:n)))
# Generate a alternative method of `open` which call we wish to mock
patch = @patch open(fn::Function, f::AbstractString) = fn(IOBuffer(data))
# Apply the patch which will modify the behaviour for our test
apply(patch) do
@test randdev(n) == convert(Array{UInt8}, n:-1:1)
end
if Sys.isunix()
# Outside of the scope of the patched environment `@mock` is essentially a no-op
@test randdev(n) != convert(Array{UInt8}, n:-1:1)
end
end
|
{"hexsha": "c6ad21d2610bf5868e5c7cd5026b3ab135b9beeb", "size": 1065, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/readme.jl", "max_stars_repo_name": "invenia/Patchwork.jl", "max_stars_repo_head_hexsha": "1eb1e7fb3783c0a78900d6880bcac368905a464f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2015-12-01T12:55:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T09:02:35.000Z", "max_issues_repo_path": "test/readme.jl", "max_issues_repo_name": "invenia/Patchwork.jl", "max_issues_repo_head_hexsha": "1eb1e7fb3783c0a78900d6880bcac368905a464f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 93, "max_issues_repo_issues_event_min_datetime": "2016-01-04T14:42:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T17:19:53.000Z", "max_forks_repo_path": "test/readme.jl", "max_forks_repo_name": "invenia/Patchwork.jl", "max_forks_repo_head_hexsha": "1eb1e7fb3783c0a78900d6880bcac368905a464f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-07-12T02:15:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:35:28.000Z", "avg_line_length": 32.2727272727, "max_line_length": 88, "alphanum_fraction": 0.6384976526, "num_tokens": 287}
|
import pyrtools as pyr
import numpy
class FakeSFPyr(pyr.SFpyr):
def __init__(self, pyr, pind):
self.pyr = list()
self.pyrSize = pind
# decompose pyr vector into each bands
start = 0
for shape in pind:
ind = numpy.prod(shape)
self.pyr.append(pyr[start:start + ind].reshape(*shape))
start += ind
|
{"hexsha": "3167f427e2164b14ca66b2d26c2992f16cad2225", "size": 377, "ext": "py", "lang": "Python", "max_stars_repo_path": "texturesynth/fakesfpyr.py", "max_stars_repo_name": "tochikuji/pyTextureSynth", "max_stars_repo_head_hexsha": "6e1746fa1cc931ea083e3f04004a42a4894c762e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "texturesynth/fakesfpyr.py", "max_issues_repo_name": "tochikuji/pyTextureSynth", "max_issues_repo_head_hexsha": "6e1746fa1cc931ea083e3f04004a42a4894c762e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "texturesynth/fakesfpyr.py", "max_forks_repo_name": "tochikuji/pyTextureSynth", "max_forks_repo_head_hexsha": "6e1746fa1cc931ea083e3f04004a42a4894c762e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5625, "max_line_length": 67, "alphanum_fraction": 0.575596817, "include": true, "reason": "import numpy", "num_tokens": 104}
|
import numpy as np
import gym
from gym import wrappers
n_states = 40
max_episodes = 10000
initial_lr = 1.0 #Initial Learning rate
min_lr = 0.003
discount_factor = 1.0
max_iterations = 10000
eps = 0.02
env_name = 'MountainCar-v0'
env = gym.make(env_name)
env.seed(0)
np.random.seed(0)
q_table = np.zeros((n_states, n_states, env.action_space.n))
def train(render=False):
for ep in range(max_episodes):
obs = env.reset()
total_reward = 0
## eta: learning rate is decreased at each step
eta = max(min_lr, initial_lr * (0.85 ** (ep//100)))
for iter in range(max_iterations):
if render:
env.render()
state = obs_to_state(obs)
if np.random.uniform(0, 1) < eps:
action = np.random.choice(env.action_space.n)
else:
logits = q_table[state]
logits_exp = np.exp(logits)
probs = logits_exp / np.sum(logits_exp)
action = np.random.choice(env.action_space.n, p=probs)
obs, reward, done, info = env.step(action)
total_reward += reward
# update q table
new_state = obs_to_state(obs)
q_table[state + (action,)] = q_table[state + (action,)] + eta * (reward + discount_factor * np.max(q_table[new_state]) - q_table[state + (action, )])
if done:
break
if ep % 100 == 0:
print('Iteration #{} -- Total reward = {}.'.format(ep+1, total_reward))
def run(render=True, policy=None):
obs = env.reset()
total_reward = 0
step_idx = 0
for iter in range(max_iterations):
if render:
env.render()
if policy is None:
action = env.action_space.sample()
else:
state = obs_to_state(obs)
action = policy[state]
obs, reward, done, info = env.step(action)
total_reward += discount_factor ** step_idx * reward
step_idx += 1
if done:
break
return total_reward
def obs_to_state(obs):
""" Maps an observation to state """
env_low = env.observation_space.low
env_high = env.observation_space.high
env_dx = (env_high - env_low) / n_states
a = int((obs[0] - env_low[0])/env_dx[0])
b = int((obs[1] - env_low[1])/env_dx[1])
return a, b
if __name__ == '__main__':
train(render=False)
solution_policy = np.argmax(q_table, axis=2)
print("Solution policy")
print(q_table)
# Animate it
solution_policy_scores = [run(render=False, policy=solution_policy) for _ in range(100)]
print("Average score of solution = ", np.mean(solution_policy_scores))
run(render=True, policy=solution_policy)
|
{"hexsha": "cbe170ff2c5dd74b0db866aaf5a0e6be34700e3b", "size": 2733, "ext": "py", "lang": "Python", "max_stars_repo_path": "MountainCar-v0/mountainCar-v0.py", "max_stars_repo_name": "spirosbax/HittingTheGym", "max_stars_repo_head_hexsha": "9bac19cb159d8e62d9518ee3703e6f18d32e5cb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-03-05T12:38:04.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-26T18:41:33.000Z", "max_issues_repo_path": "MountainCar-v0/mountainCar-v0.py", "max_issues_repo_name": "spirosbax/HittingTheGym", "max_issues_repo_head_hexsha": "9bac19cb159d8e62d9518ee3703e6f18d32e5cb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MountainCar-v0/mountainCar-v0.py", "max_forks_repo_name": "spirosbax/HittingTheGym", "max_forks_repo_head_hexsha": "9bac19cb159d8e62d9518ee3703e6f18d32e5cb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-20T17:26:12.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-20T17:26:12.000Z", "avg_line_length": 32.9277108434, "max_line_length": 162, "alphanum_fraction": 0.5967800951, "include": true, "reason": "import numpy", "num_tokens": 685}
|
%\documentclass[ebook,12pt,openany]{memoir} %ebook
\documentclass[ebook,12pt,openany,onesided]{memoir} %physical book
\usepackage[utf8x]{inputenc}
\usepackage[english]{babel}
\usepackage{url}
\usepackage{graphicx}
\usepackage{imakeidx} % for how to use the index see https://www.sharelatex.com/learn/Indices
\usepackage{hyperref}
\usepackage{afterpage}
\newcommand\blankpage{%
\null
\thispagestyle{empty}%
\addtocounter{page}{-1}%
\newpage}
\makeindex
\title{Trash Magic}
\author{Trash Robot}
\begin{document}
\frontmatter
%\begin{figure}
%\centering
%\includegraphics{cover.png}
%\end{figure}
\clearpage
\clearpage
\newpage
\thispagestyle{empty}
\mbox{}
\maketitle
%\tableofcontents
%\listoffigures
%civilizations
%
\mainmatter
\chapter{Trash Magic}
\input{scrolls/trashmagic.tex}
\chapter{Magic Book}
\input{scrolls/magicbook.tex}
\chapter{Street Network}
\input{scrolls/streetnetwork.tex}
\chapter{Trash Robot}
\input{scrolls/trashrobot.tex}
\chapter{Action Geometry}
\input{scrolls/actiongeometry.tex}
%\printindex
%https://www.sharelatex.com/learn/Glossaries
\end{document}
|
{"hexsha": "89180b261c73205cbbb89ccf1a38953fd5bf544c", "size": 1113, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "book6x9.tex", "max_stars_repo_name": "LafeLabs/trashmagic", "max_stars_repo_head_hexsha": "2eb29db9e29c6f90ac7cc7a0b477b0e00fec5eab", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "book6x9.tex", "max_issues_repo_name": "LafeLabs/trashmagic", "max_issues_repo_head_hexsha": "2eb29db9e29c6f90ac7cc7a0b477b0e00fec5eab", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "book6x9.tex", "max_forks_repo_name": "LafeLabs/trashmagic", "max_forks_repo_head_hexsha": "2eb29db9e29c6f90ac7cc7a0b477b0e00fec5eab", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.1230769231, "max_line_length": 93, "alphanum_fraction": 0.7619047619, "num_tokens": 347}
|
import TimeZones: TimeZone, localzone
import Compat: is_linux
# Ensure that the current system's local time zone is supported. If this test fails make
# sure to report it as an issue.
@test isa(localzone(), TimeZone)
if is_linux()
# Bad TZ environmental variables
withenv("TZ" => "") do
@test_throws ErrorException localzone()
end
withenv("TZ" => "Europe/Warsaw") do
@test_throws ErrorException localzone()
end
# Absolute filespec
warsaw_path = joinpath(TZFILE_DIR, "Europe", "Warsaw")
warsaw_from_file = open(warsaw_path) do f
TimeZones.read_tzfile(f, "local")
end
withenv("TZ" => ":" * abspath(warsaw_path)) do
@test localzone() == warsaw_from_file
end
# Relative filespec
warsaw = TimeZone("Europe/Warsaw")
withenv("TZ" => ":Europe/Warsaw") do
@test localzone() == warsaw
end
# Set TZDIR and use time zone unrecognized by TimeZone
@test_throws ArgumentError TimeZone("Etc/UTC")
utc = open(joinpath(TZFILE_DIR, "Etc", "UTC")) do f
TimeZones.read_tzfile(f, "Etc/UTC")
end
withenv("TZ" => ":Etc/UTC", "TZDIR" => TZFILE_DIR) do
@test localzone() == utc
end
# Use system installed files
@test_throws ArgumentError TimeZone("Etc/GMT-9")
gmt_minus_9 = FixedTimeZone("Etc/GMT-9", 9 * 3600)
withenv("TZ" => ":Etc/GMT-9") do
@test localzone() == gmt_minus_9
end
# Unable to locate time zone on system
withenv("TZ" => ":") do
@test_throws SystemError localzone()
end
withenv("TZ" => ":Etc/Foo") do
@test_throws SystemError localzone()
end
end
|
{"hexsha": "2178c48ee033c733781997acd9334558dadcf863", "size": 1647, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/local.jl", "max_stars_repo_name": "JuliaPackageMirrors/TimeZones.jl", "max_stars_repo_head_hexsha": "d997abb7398ca8514007d1cbc77a031f721c727b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/local.jl", "max_issues_repo_name": "JuliaPackageMirrors/TimeZones.jl", "max_issues_repo_head_hexsha": "d997abb7398ca8514007d1cbc77a031f721c727b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-09-21T20:48:24.000Z", "max_issues_repo_issues_event_max_datetime": "2015-09-21T20:48:24.000Z", "max_forks_repo_path": "test/local.jl", "max_forks_repo_name": "invenia/TimeZones.jl", "max_forks_repo_head_hexsha": "4844e17e1706fe4004876345c712e8e8324eb7c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8947368421, "max_line_length": 88, "alphanum_fraction": 0.6381299332, "num_tokens": 493}
|
# Copyright 2016-2019 David Robillard <d@drobilla.net>
# Copyright 2013 Kaspar Emanuel <kaspar.emanuel@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import lilv
import os
import sys
import unittest
path = os.path.abspath("bindings/bindings_test_plugin.lv2/")
if sys.version_info[0] == 2:
import urllib.request, urllib.parse, urllib.error
import urllib.parse
location = urllib.parse.urljoin("file:", urllib.request.pathname2url(path) + "/")
else:
from urllib.parse import urljoin
from urllib.request import pathname2url
location = urljoin("file:", pathname2url(path) + "/")
class NodeTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
def testNodes(self):
aint = self.world.new_int(1)
aint2 = self.world.new_int(1)
aint3 = self.world.new_int(3)
afloat = self.world.new_float(2.0)
atrue = self.world.new_bool(True)
afalse = self.world.new_bool(False)
auri = self.world.new_uri("http://example.org")
afile = self.world.new_file_uri(None, "/foo/bar")
astring = self.world.new_string("hello")
self.assertEqual(auri.get_turtle_token(), "<http://example.org>")
self.assertTrue(aint.is_int())
self.assertTrue(afloat.is_float())
self.assertTrue(auri.is_uri())
self.assertTrue(astring.is_string())
self.assertTrue(astring.is_literal())
self.assertFalse(auri.is_blank())
self.assertTrue(int(aint) == 1)
self.assertTrue(float(afloat) == 2.0)
self.assertTrue(bool(atrue))
self.assertFalse(bool(afalse))
self.assertEqual(afile.get_path(), "/foo/bar")
self.assertTrue(aint == aint2)
self.assertTrue(aint != aint3)
self.assertTrue(aint != afloat)
with self.assertRaises(ValueError):
int(atrue)
with self.assertRaises(ValueError):
float(aint)
with self.assertRaises(ValueError):
bool(astring)
class UriTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.load_all()
def testInvalidURI(self):
with self.assertRaises(ValueError):
self.plugin_uri = self.world.new_uri("invalid_uri")
def testNonExistentURI(self):
self.plugin_uri = self.world.new_uri("exist:does_not")
self.plugin = self.world.get_all_plugins().get_by_uri(self.plugin_uri)
self.assertEqual(self.plugin, None)
def testPortTypes(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_INPUT_PORT))
def testPortTypes2(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_OUTPUT_PORT))
def testPortTypes3(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_AUDIO_PORT))
def testPortTypes4(self):
self.assertIsNotNone(self.world.new_uri(lilv.LILV_URI_CONTROL_PORT))
class PluginClassTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
def testPluginClasses(self):
pclass = self.world.get_plugin_class()
self.assertIsNotNone(pclass)
self.assertIsNone(pclass.get_parent_uri())
self.assertIsNotNone(pclass.get_uri())
self.assertIsNotNone(pclass.get_label())
self.assertEqual(str(pclass.get_uri()), str(pclass))
for i in pclass.get_children():
self.assertIsNotNone(i)
self.assertIsNotNone(i.get_uri())
self.assertIsNotNone(i.get_label())
class PluginClassesTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.load_all()
def testPluginClasses(self):
classes = self.world.get_plugin_classes()
pclass = self.world.get_plugin_class()
self.assertIsNotNone(classes)
self.assertIsNotNone(pclass)
self.assertTrue(pclass in classes)
self.assertTrue(pclass.get_uri() in classes)
self.assertGreater(len(classes), 1)
self.assertIsNotNone(classes[0])
self.assertIsNotNone(classes[pclass.get_uri()])
with self.assertRaises(KeyError):
classes["http://example.org/notaclass"].get_uri()
class LoadTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.bundle_uri = self.world.new_uri(location)
self.world.load_specifications()
self.world.load_plugin_classes()
def testLoadUnload(self):
self.world.load_bundle(self.bundle_uri)
plugins = self.world.get_all_plugins()
plugin = plugins.get(plugins.begin())
self.world.load_resource(plugin)
self.world.unload_resource(plugin)
self.world.unload_bundle(self.bundle_uri)
class PluginTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.set_option(
lilv.OPTION_FILTER_LANG, self.world.new_bool(True)
)
self.bundle_uri = self.world.new_uri(location)
self.assertIsNotNone(
self.bundle_uri, "Invalid URI: '" + location + "'"
)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins.get(self.plugins.begin())
self.assertTrue(self.plugin.verify())
self.assertTrue(self.plugin in self.plugins)
self.assertTrue(self.plugin.get_uri() in self.plugins)
self.assertEqual(self.plugins[self.plugin.get_uri()], self.plugin)
with self.assertRaises(KeyError):
self.plugins["http://example.org/notaplugin"].get_uri()
self.assertIsNotNone(
self.plugin,
msg="Test plugin not found at location: '" + location + "'",
)
self.assertEqual(location, str(self.plugin.get_bundle_uri()))
self.plugin_uri = self.plugin.get_uri()
self.assertEqual(
self.plugin.get_uri(), self.plugin_uri, "URI equality broken"
)
self.lv2_InputPort = self.world.new_uri(lilv.LILV_URI_INPUT_PORT)
self.lv2_OutputPort = self.world.new_uri(lilv.LILV_URI_OUTPUT_PORT)
self.lv2_AudioPort = self.world.new_uri(lilv.LILV_URI_AUDIO_PORT)
self.lv2_ControlPort = self.world.new_uri(lilv.LILV_URI_CONTROL_PORT)
def testGetters(self):
self.assertEqual(
self.world.get_symbol(self.plugin), "lilv_bindings_test_plugin"
)
self.assertIsNotNone(self.plugin.get_bundle_uri())
self.assertGreater(len(self.plugin.get_data_uris()), 0)
self.assertIsNotNone(self.plugin.get_library_uri())
self.assertTrue(self.plugin.get_name().is_string())
self.assertTrue(self.plugin.get_class().get_uri().is_uri())
self.assertEqual(
len(self.plugin.get_value(self.world.ns.doap.license)), 1
)
licenses = self.plugin.get_value(self.world.ns.doap.license)
features = self.plugin.get_value(self.world.ns.lv2.optionalFeature)
self.assertEqual(len(licenses), 1)
self.assertTrue(licenses[0] in licenses)
with self.assertRaises(IndexError):
self.assertIsNone(licenses[len(licenses)])
self.assertEqual(
len(licenses) + len(features), len(licenses.merge(features))
)
self.assertEqual(
licenses.get(licenses.begin()),
self.world.new_uri("http://opensource.org/licenses/isc"),
)
self.assertEqual(licenses[0], licenses.get(licenses.begin()))
self.assertTrue(
self.plugin.has_feature(self.world.ns.lv2.hardRTCapable)
)
self.assertEqual(len(self.plugin.get_supported_features()), 1)
self.assertEqual(len(self.plugin.get_optional_features()), 1)
self.assertEqual(len(self.plugin.get_required_features()), 0)
self.assertFalse(
self.plugin.has_extension_data(
self.world.new_uri("http://example.org/nope")
)
)
self.assertEqual(len(self.plugin.get_extension_data()), 0)
self.assertEqual(len(self.plugin.get_extension_data()), 0)
self.assertFalse(self.plugin.has_latency())
self.assertIsNone(self.plugin.get_latency_port_index())
def testPorts(self):
self.assertEqual(self.plugin.get_num_ports(), 4)
self.assertIsNotNone(self.plugin.get_port(0))
self.assertIsNotNone(self.plugin.get_port(1))
self.assertIsNotNone(self.plugin.get_port(2))
self.assertIsNotNone(self.plugin.get_port(3))
self.assertIsNone(self.plugin.get_port_by_index(4))
self.assertIsNotNone(self.plugin.get_port("input"))
self.assertIsNotNone(self.plugin.get_port("output"))
self.assertIsNotNone(self.plugin.get_port("audio_input"))
self.assertIsNotNone(self.plugin.get_port("audio_output"))
self.assertIsNone(self.plugin.get_port_by_symbol("nonexistent"))
self.assertIsNone(
self.plugin.get_port_by_designation(
self.world.ns.lv2.InputPort, self.world.ns.lv2.control
)
)
self.assertIsNone(self.plugin.get_project())
self.assertIsNone(self.plugin.get_author_name())
self.assertIsNone(self.plugin.get_author_email())
self.assertIsNone(self.plugin.get_author_homepage())
self.assertFalse(self.plugin.is_replaced())
self.assertEqual(
0,
len(
self.plugin.get_related(
self.world.new_uri("http://example.org/Type")
)
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_InputPort, self.lv2_AudioPort
),
)
port = self.plugin.get_port("input")
self.assertEqual(self.world.get_symbol(port), "input")
self.assertTrue(port.get_node().is_blank())
self.assertEqual(0, port.get(self.world.ns.lv2.index))
self.assertEqual(1, len(port.get_value(self.world.ns.lv2.symbol)))
self.assertEqual(port.get_value(self.world.ns.lv2.symbol)[0], "input")
self.assertFalse(port.has_property(self.world.ns.lv2.latency))
self.assertFalse(port.supports_event(self.world.ns.midi.MidiEvent))
self.assertEqual(0, port.get_index())
self.assertEqual("input", port.get_symbol())
self.assertEqual("Input", port.get_name())
self.assertEqual(
[
str(self.world.ns.lv2.ControlPort),
str(self.world.ns.lv2.InputPort),
],
sorted(list(map(str, port.get_classes()))),
)
self.assertTrue(port.is_a(self.world.ns.lv2.ControlPort))
self.assertFalse(port.is_a(self.world.ns.lv2.AudioPort))
self.assertEqual((0.5, 0.0, 1.0), port.get_range())
self.assertEqual(0, len(port.get_properties()))
def testScalePoints(self):
port = self.plugin.get_port("input")
points = port.get_scale_points()
point_dict = {
float(points[0].get_value()): points[0].get_label(),
float(points[1].get_value()): points[1].get_label(),
}
self.assertEqual(point_dict, {0.0: "off", 1.0: "on"})
def testPortCount(self):
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_OutputPort, self.lv2_AudioPort
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_OutputPort, self.lv2_ControlPort
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_InputPort, self.lv2_AudioPort
),
)
self.assertEqual(
1,
self.plugin.get_num_ports_of_class(
self.lv2_InputPort, self.lv2_ControlPort
),
)
class QueryTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.world.load_all()
self.bundle_uri = self.world.new_uri(location)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins.get(self.plugins.begin())
def testNamespaces(self):
self.assertEqual(self.world.ns.lv2, "http://lv2plug.in/ns/lv2core#")
self.assertEqual(
self.world.ns.lv2.Plugin, "http://lv2plug.in/ns/lv2core#Plugin"
)
def testQuery(self):
self.assertTrue(
self.world.ask(
None, self.world.ns.rdf.type, self.world.ns.lv2.Plugin
)
)
self.assertLess(
0,
len(
self.world.find_nodes(
None, self.world.ns.rdf.type, self.world.ns.lv2.Plugin
)
),
)
self.assertEqual(
self.plugin.get_uri(),
self.world.get(
None, self.world.ns.rdf.type, self.world.ns.lv2.Plugin
),
)
class InstanceTests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.bundle_uri = self.world.new_uri(location)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins[0]
self.instance = lilv.Instance(self.plugin, 48000)
self.assertEqual(self.plugin.get_uri(), self.instance.get_uri())
self.assertIsNone(
self.instance.get_extension_data(
self.world.new_uri("http://example.org/ext")
)
)
self.assertIsNone(
self.instance.get_extension_data("http://example.org/ext")
)
def testRun(self):
try:
import numpy
except ImportError:
sys.stderr.write("warning: Missing numpy, not testing instance\n")
return
n_samples = 100
buf = numpy.zeros(n_samples)
with self.assertRaises(Exception):
self.instance.connect_port(0, "hello")
self.instance.connect_port(0, None)
self.instance.connect_port(0, None)
self.instance.connect_port(2, buf)
self.instance.connect_port(3, buf)
self.instance.activate()
self.instance.run(n_samples)
self.instance.deactivate()
class UITests(unittest.TestCase):
def setUp(self):
self.world = lilv.World()
self.bundle_uri = self.world.new_uri(location)
self.world.load_bundle(self.bundle_uri)
self.plugins = self.world.get_all_plugins()
self.plugin = self.plugins[0]
def testUI(self):
uis = self.plugin.get_uis()
ui_uri = self.world.new_uri(
"http://example.org/lilv-bindings-test-plugin-ui"
)
self.assertEqual(1, len(uis))
self.assertEqual(str(uis[0]), str(ui_uri))
with self.assertRaises(KeyError):
uis["http://example.org/notaui"].get_uri()
self.assertEqual(uis[0], str(ui_uri))
self.assertEqual(uis[0].get_uri(), ui_uri)
self.assertEqual(uis[0].get_bundle_uri(), self.bundle_uri)
self.assertEqual(
uis[0].get_binary_uri(), str(self.bundle_uri) + "TODO"
)
self.assertEqual(uis[uis[0].get_uri()], uis[0])
self.assertTrue(uis[0].is_a(self.world.ns.ui.GtkUI))
self.assertTrue(uis[0] in uis)
self.assertTrue(uis[0].get_uri() in uis)
self.assertEqual([self.world.ns.ui.GtkUI], list(uis[0].get_classes()))
|
{"hexsha": "3db89182ac4d77a0c5252df3aa6111b3e90c9d90", "size": 16224, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib-src/lv2/lilv/bindings/test/python/test_api.py", "max_stars_repo_name": "ECSE437-Audacity/audacity", "max_stars_repo_head_hexsha": "eff572509488f2df25f244f71cf45564b62027c0", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-27T21:38:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-27T21:38:45.000Z", "max_issues_repo_path": "lib-src/lv2/lilv/bindings/test/python/test_api.py", "max_issues_repo_name": "ECSE437-Audacity/audacity", "max_issues_repo_head_hexsha": "eff572509488f2df25f244f71cf45564b62027c0", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib-src/lv2/lilv/bindings/test/python/test_api.py", "max_forks_repo_name": "ECSE437-Audacity/audacity", "max_forks_repo_head_hexsha": "eff572509488f2df25f244f71cf45564b62027c0", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-17T19:54:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-17T19:54:58.000Z", "avg_line_length": 37.9953161593, "max_line_length": 85, "alphanum_fraction": 0.6336291913, "include": true, "reason": "import numpy", "num_tokens": 3416}
|
\documentclass[Physics.tex]{subfiles}
\begin{document}
\chapter{Lasers and Semiconductors}
\section{Lasers}
The word `\sldef{laser}' is an acronym meaning light amplification by stimulated emission of radiation. Light emitted from a laser is monochromatic, coherent, unidirectional and focused.
\subsection{Principles of the laser}
Lasers work based on (stimulated) absorption, spontaneous emission, stimulated emission and population inversion.
\sldef{Absorption} occurs when an atom is excited from a lower energy level to a higher energy level due to an electron absorbing an external photon with energy equal to the energy difference between the two energy levels.
\sldef{Spontaneous emission} occurs when an excited atom transits on its own accord to a lower energy level, releasing a photon with energy equal to the energy difference between the initial and final energy levels.
\sldef{Stimulated emission} occurs when an excited atom is induced to transit to a lower energy level through interaction with an incident photon of energy equal to the energy difference between the initial and final energy levels, releasing a photon that has the same energy, phase, polarisation and direction of travel as the incident photon.
\sldef{Population inversion} occurs when the population of atoms at a higher energy level exceeds the population of atoms at a lower energy level. This can be achieved through optical pumping or pumping by electrical discharge.
\subsection{Laser mechanism}
The \sldef{laser mechanism} starts with an external energy source, which is used to excite atoms from ground state to an excited state. These atoms de-excite to a metastable state through spontaneous emission, where they stay for a long period of time, such that more atoms are in the higher lasing level than the lower lasing level i.e. population inversion.
When an atom in the metastable state falls back to a lower energy level, a photon is emitted. This photon interacts with another atom in the metastable state, stimulating the atom to de-excite to a lower energy state, emitting another photon in the process. This photon emitted by stimulated emission has the same phase, energy, frequency, polarization and direction of travel as the incoming photon. The incoming photon is not absorbed in the process. These photons then go on to cause other excited atoms in the metastable state to de-excite resulting in the stimulated emission of more photon.
This chain reaction will continue and light of high intensity is produced. Mirrors at both ends reflect the photons through the lasing medium, causing more stimulated emission of photons in the axis parallel to the laser tube. The partially reflective mirror at one end allows a small fraction of laser light to escape to form a useful laser beam that is coherent, collimated and monochromatic.
\section{Semiconductors}
\subsection{Energy bands}
Isolated atoms have discrete electron energy levels. However, when two atoms are brought close, the electric interaction between them causes these two energy levels to split into two levels with energies similar to the original.
When many atoms come together, like in a solid, the original discrete energy level splits into an exceedingly large number of energy levels with similar energies, which coalesce to form a continuous \sldef{energy band}.
Similar to energy levels in an isolated atom, most energy bands are either filled or empty. Like the valence shell, the \sldef{valence band} is the outermost band containing electrons. The \sldef{conduction band} refers to the next higher energy band after the valence band.
\subsection{Band theory: electrical conduction}
The energy gap between the valence and conduction band is termed the \sldef{band gap}. The width of the band gap determines whether a solid is a conductor, semiconductor or insulator.
In metals, the valence band either is incompletely filled or overlaps with the conduction band. Valence electrons can thus easily move to higher unfilled energy levels in the valence or conduction band using little to no energy, and so are free and able to move when there is an external electric field to conduct electricity. Metals are thus good conductors of electricity.
In insulators, the valence band is completely filled while the conduction band is completely vacant; there is also a large band gap. At room temperature a negligible number of atoms have sufficient energy to cross over to the conduction band. Most electrons cannot move to conduct electricity and the material is a insulator.
However, at higher temperatures or in strong electric fields, enough electrons can move to the conduction band and the material starts to conduct. When this happens, the material has `broken down'.
A material that acts as a semiconductor when pure is an \sldef{intrinsic semiconductor}. They have conductivities and band gaps (on the order of \SI{1}{\electronvolt}) between those of conductors and insulators.
At \SI{0}{\kelvin}, there are no free electrons to conduct as all valence electrons are bound in the completely filled valence band and there is no energy to excite them across the energy gap, and so semiconductors are poor conductors at low temperatures.
At higher temperatures, thermal excitation of electrons across the band gap becomes more possible, and so conductivity increases.
When electrons move from the valence band to the conduction band, they become free to move when there is an external electric field in order to conduct a charge. The \sldef{hole} they leave behind in the valence band acts as a charge carrier as a nearby free electron can move into the hole, creating a new hole at the original site of the electron. The hole thus acts as a positively charged particle moving opposite to the electrons.
Both electrons and holes in the conduction and valence bands respectively are responsible for electrical conduction in semiconductors.
\subsection{Doping}
The electrical conductivity of an intrinsic semiconductor can be increased by adding impurities called \sldef{dopants}. Doped semiconductors are termed \sldef{extrinsic semiconductors}.
There are two types of dopants. \sldef{N-type dopants} are pentavalent elements from group V like phosphorus and arsenic; they are electron donors. \sldef{P-type dopants} are trivalent elements from group III like boron; they are electron acceptors.
When phosphorus is used as a dopant in silicon, its `extra' electron (compared to silicon) is weakly bonded and can break free and move into the conduction band, helping to conduct. This type of doped semiconductor has mostly electrons as charge carriers, so it is called an n-type semiconductor.
In band theory, the addition of a donor creates \sldef{donor levels} just below the conduction band, which contains the extra electrons from donor atoms. At room temperature, lattice vibrations easily provide the small amount of energy required for donated electrons to cross to the conduction band where the electrons can conduct in the presence of an electric field.
When boron is used as a dopant in silicon, its missing electron forms a hole that can be filled by an electron from a neighbouring atom; when this happens, it creates a hole at the position the electron originated. This movement of electrons conducts electricity; equivalently, this can be seen as holes moving in the opposite direction. Since the charge carriers are mostly holes, this kind of doped semiconductor is a p-type semiconductor.
In band theory, the addition of an acceptor creates \sldef{acceptor levels} just above the valence band, representing the holes created by the acceptor atoms. At room temperature, acceptor levels are occupied by electrons thermally excited from the valence band, which leave holes in the valence band that can move to conduct electricity.
The addition of dopants does not disrupt the regular lattice of the silicon atoms, nor does it cause the material as a whole to have an overall charge.
\section{p-n junction}
A \sldef{p-n junction} is formed when a p-type conductor is joined to an n-type. The difference in concentration of electrons between the n-type and p-type causes electrons to diffuse across the junction from the n-type filling the holes in the p-type. This depletes the electrons and holes in the n-type and p-type respectively, creating a \sldef{depletion region} almost devoid of charge carriers.
When electrons diffuse away from the n-type, they leave behind immobile cations, while the filling of holes in the p-type creates anions. This results in an internal electric field being created across the depletion region.
As the diffusion continues, the depletion region widens and the charge difference across the junction increases, and eventually the internal electric field becomes so strong that further diffusion across the junction is prevented, an an equilibrium is reached.
When the p-type is connected to the positive terminal and the n-type to the negative terminal of a voltage source, the p-n junction is under forward bias. The internal potential difference decreases as its polarity is opposite to that of the external voltage source, and so the depletion region becomes narrower and stops inhibiting the flow of electrons from n-type to p-type. Electrons can now flow to conduct the current and current increases exponentially with increasing forward voltage.
When the p-type is connected to the negative terminal and the n-type is connected to the positive terminal of a voltage source, the p-n junction is under reverse bias. The internal potential difference increases with increasing reverse bias, widening the depletion region and further inhibiting the flow of current across the junction. Only a small reverse current can flow through, carried by the small number of n-type electrons in the conduction band and p-type holes in the valence band.
The p-n junction thus conducts current in one direction and resists current in the other; this can be used to rectify alternating currents. When the AC is flowing in the forward bias direction of the diode, the diode has low resistance and allows the current to flow. When the AC is flowing in the reverse bias direction of the diode, the diode has very high resistance, only allowing negligible current to flow. Effectively, the current is forced to flow in only one direction.
\end{document}
|
{"hexsha": "3f9e054c661218c9b5c5a2a2633ca0774f160846", "size": 10456, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "TeX/Physics/ch19_materials.tex", "max_stars_repo_name": "oliverli/A-Level-Notes", "max_stars_repo_head_hexsha": "5afdc9a71c37736aacf3ae1db9d0384cdb6a0348", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-05T11:44:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-05T11:44:33.000Z", "max_issues_repo_path": "TeX/Physics/ch19_materials.tex", "max_issues_repo_name": "oliverli/A-Level-Notes", "max_issues_repo_head_hexsha": "5afdc9a71c37736aacf3ae1db9d0384cdb6a0348", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TeX/Physics/ch19_materials.tex", "max_forks_repo_name": "oliverli/A-Level-Notes", "max_forks_repo_head_hexsha": "5afdc9a71c37736aacf3ae1db9d0384cdb6a0348", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 143.2328767123, "max_line_length": 597, "alphanum_fraction": 0.8074789594, "num_tokens": 2114}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: load-cpm.py
# Author: Yuxin Wu
import argparse
import numpy as np
import cv2
import tensorflow as tf
from tensorpack import *
from tensorpack.utils import viz
from tensorpack.utils.argtools import memoized
"""
15 channels:
0-1 head, neck
2-4 right shoulder, right elbow, right wrist
5-7 left shoulder, left elbow, left wrist
8-10 right hip, right knee, right ankle
11-13 left hip, left knee, left ankle
14: background
"""
def colorize(img, heatmap):
""" img: bgr, [0,255]
heatmap: [0,1]
"""
heatmap = viz.intensity_to_rgb(heatmap, cmap='jet')[:, :, ::-1]
return img * 0.5 + heatmap * 0.5
@memoized
def get_gaussian_map():
gaussian_map = np.zeros((368, 368), dtype='float32')
for x_p in range(368):
for y_p in range(368):
dist_sq = (x_p - 368 / 2) * (x_p - 368 / 2) + \
(y_p - 368 / 2) * (y_p - 368 / 2)
exponent = dist_sq / 2.0 / (21**2)
gaussian_map[y_p, x_p] = np.exp(-exponent)
return gaussian_map.reshape((1, 368, 368, 1))
def CPM(image):
image = image / 256.0 - 0.5
gmap = tf.constant(get_gaussian_map())
gmap = tf.pad(gmap, [[0, 0], [0, 1], [0, 1], [0, 0]])
pool_center = AvgPooling('mappool', gmap, 9, strides=8, padding='VALID')
with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu):
shared = (LinearWrap(image)
.Conv2D('conv1_1', 64)
.Conv2D('conv1_2', 64)
.MaxPooling('pool1', 2)
# 184
.Conv2D('conv2_1', 128)
.Conv2D('conv2_2', 128)
.MaxPooling('pool2', 2)
# 92
.Conv2D('conv3_1', 256)
.Conv2D('conv3_2', 256)
.Conv2D('conv3_3', 256)
.Conv2D('conv3_4', 256)
.MaxPooling('pool3', 2)
# 46
.Conv2D('conv4_1', 512)
.Conv2D('conv4_2', 512)
.Conv2D('conv4_3_CPM', 256)
.Conv2D('conv4_4_CPM', 256)
.Conv2D('conv4_5_CPM', 256)
.Conv2D('conv4_6_CPM', 256)
.Conv2D('conv4_7_CPM', 128)())
def add_stage(stage, l):
l = tf.concat([l, shared, pool_center], 3,
name='concat_stage{}'.format(stage))
for i in range(1, 6):
l = Conv2D('Mconv{}_stage{}'.format(i, stage), l, 128, 7, activation=tf.nn.relu)
l = Conv2D('Mconv6_stage{}'.format(stage), l, 128, 1, activation=tf.nn.relu)
l = Conv2D('Mconv7_stage{}'.format(stage), l, 15, 1, activation=tf.identity)
return l
out1 = (LinearWrap(shared)
.Conv2D('conv5_1_CPM', 512, 1, activation=tf.nn.relu)
.Conv2D('conv5_2_CPM', 15, 1, activation=tf.identity)())
out2 = add_stage(2, out1)
out3 = add_stage(3, out2)
out4 = add_stage(4, out3)
out5 = add_stage(5, out4)
out6 = add_stage(6, out5)
tf.image.resize_bilinear(out6, [368, 368], name='resized_map')
def run_test(model_path, img_file):
param_dict = dict(np.load(model_path))
predict_func = OfflinePredictor(PredictConfig(
input_signature=[tf.TensorSpec((None, 368, 368, 3), tf.float32, 'input')],
tower_func=CPM,
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['resized_map']
))
im = cv2.imread(img_file, cv2.IMREAD_COLOR).astype('float32')
im = cv2.resize(im, (368, 368))
out = predict_func(im[None, :, :, :])[0][0]
hm = out[:, :, :14].sum(axis=2)
viz = colorize(im, hm)
cv2.imwrite("output.jpg", viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', required=True, help='.npz model file')
parser.add_argument('--input', required=True, help='input image')
args = parser.parse_args()
run_test(args.load, args.input)
|
{"hexsha": "0a5dd42e9266d4c405aea8ed1df2ec534d452c8b", "size": 3989, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/CaffeModels/load-cpm.py", "max_stars_repo_name": "Neovairis/tensorpack", "max_stars_repo_head_hexsha": "ca0969089847c37a893a8e99317214c5899278db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-05-21T15:13:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-24T07:25:41.000Z", "max_issues_repo_path": "examples/CaffeModels/load-cpm.py", "max_issues_repo_name": "lkn123/tensorpack", "max_issues_repo_head_hexsha": "d7a13cb74c9066bc791d7aafc3b744b60ee79a9f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-12-16T21:58:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:17:01.000Z", "max_forks_repo_path": "examples/CaffeModels/load-cpm.py", "max_forks_repo_name": "lkn123/tensorpack", "max_forks_repo_head_hexsha": "d7a13cb74c9066bc791d7aafc3b744b60ee79a9f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-04T00:02:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-06T20:27:04.000Z", "avg_line_length": 32.9669421488, "max_line_length": 92, "alphanum_fraction": 0.5617949361, "include": true, "reason": "import numpy", "num_tokens": 1231}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.