text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import multiprocessing as mp
import os
import shutil
import cairo
import numpy as np
import tqdm
CUR_DIR = os.path.join(os.path.dirname(__file__))
OUTPUT_DIR = os.path.join(CUR_DIR, 'output')
WIDTH = 1920 * 2
HEIGHT = 1080 * 2
FPS = 60
# The dtype can be changed to use a different level precision for
# calculating the points used to draw the lines.
DTYPE = np.float64
# The position and size of the circle.
CENTER = np.array([WIDTH / 2, HEIGHT / 2], dtype=DTYPE)
RADIUS = np.array(HEIGHT / 3, dtype=DTYPE)
# The width of each line.
LINE_WIDTH = 3
# Some helper constants used in the calculations below.
TAU = np.array(np.pi * 2, dtype=DTYPE)
MIN_ANGLE_DELTA = np.finfo(DTYPE).eps * 1e6
LINE_EXTENSION_LENGTH = np.array(WIDTH * 3 / 4, dtype=DTYPE)
# The number of points around the circle (which will also be the number
# of lines drawn on each frame).
NUM_STARTING_POINTS = 512
# This in the granularity of the multiples. For example, if
# `MULTIPLES_PER_STARTING_POINT` is 100, then the multiples would be:
# 1.00, 1.01, 1.02, etc., and one frame would be generated for each
# multiple, so the first frame would be for multiple 1.00, and the
# second frame for multiple 1.01, and so on. If we are generating a
# video at 30 frames per second, then setting this value to 30 will
# cycle through one multiple per second.
MULTIPLES_PER_STARTING_POINT = 30
# Angle of hue 0 (red).
# STARTING_ANGLE = TAU / 4
STARTING_ANGLE = TAU / 8
# If `OVERWRITE` is True, the output directory will be cleared and a new
# render will be started from scratch. If `OVERWRITE` is False, the
# script will try to resume any previously started render by skipping
# any frames that already exist in the output directory. Note that if
# you are trying to resume a previous render that did not completely
# finish, you should delete some of the last few frames in the output
# directory beforehand, since some of the last few frames in the output
# directory may not have been completely written to disk, meaning the
# the PNG files may be corrupt or only part of the pattern may be shown
# in that frame. To be safe, I just delete the last 100 or so frames
# in the output directory before trying to resume a render.
OVERWRITE = True
def write_frame(frame_data):
frame_num, start_points, end_points, colors = frame_data
frame_path = os.path.join(OUTPUT_DIR, f'frame_{frame_num}.png')
if os.path.exists(frame_path):
return
# Initialize our surface and context.
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
cr = cairo.Context(surface)
# Paint the frame black.
cr.set_source_rgb(0, 0, 0)
cr.set_operator(cairo.Operator.SOURCE)
cr.paint()
# Set the operator mode to screen so that any overlapping lines will
# be blended.
cr.set_operator(cairo.Operator.SCREEN)
# Set the line width.
cr.set_line_width(LINE_WIDTH)
# Draw each line.
for i in range(len(start_points)):
cr.set_source_rgb(*colors[i])
cr.move_to(*start_points[i])
cr.line_to(*end_points[i])
cr.stroke()
# Write the frame to disk.
surface.write_to_png(frame_path)
def main():
if OVERWRITE and os.path.exists(OUTPUT_DIR):
shutil.rmtree(OUTPUT_DIR)
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
# Get the angles for each of our starting points.
start_angles = np.arange(NUM_STARTING_POINTS, dtype=DTYPE) / NUM_STARTING_POINTS * TAU
# Get a color for each starting angle. Angle 0 will be fully
# saturated red, and then it will cycle through all the fully
# saturated colors (orange, yellow, green, ...) as the angle
# increases clockwise, with angle tau coming back to fully saturated
# red again.
colors = np.clip(
np.stack(
[
(-np.abs((-start_angles + TAU / 2) % TAU - TAU / 2) + TAU / 3) * 6 / TAU,
(-np.abs((-start_angles - TAU / 6) % TAU - TAU / 2) + TAU / 3) * 6 / TAU,
(-np.abs((-start_angles + TAU / 6) % TAU - TAU / 2) + TAU / 3) * 6 / TAU,
],
axis=1,
),
0,
1,
)
# Get the the list of multiples, each multiple will be used to
# generate a different frame.
multiples = np.linspace(
1, NUM_STARTING_POINTS + 1, NUM_STARTING_POINTS * MULTIPLES_PER_STARTING_POINT + 1,
)
# If I only want to render a section of the sequence, I reduce the
# `multiples` array here to the section that should be rendered. For
# example, I rendered the last part of the video at a higher
# framerate to slow it down, so I only rendered the last part of the
# sequence for that section.
# multiples = multiples[-MULTIPLES_PER_STARTING_POINT * 2 :]
multiples = multiples[:1]
# Expand the first dimension of `multiples` and the last dimension
# of `starting_angles` so that they can be broadcast together when
# multiplied.
multiples = multiples[None, ...]
start_angles = start_angles[..., None]
# Calculate the difference in angle between the starting point and
# the ending point, and make sure they are at least
# `MIN_ANGLE_DELTA` apart (this makes it so that if the points would
# have landed at the same point, they will instead be just slightly
# apart so that a line will be drawn tangent to the circle).
delta_angles = (multiples * start_angles - start_angles) % TAU
delta_angles = np.clip(delta_angles, MIN_ANGLE_DELTA, TAU - MIN_ANGLE_DELTA)
# Get the starting angles and ending angles. These are rotated so
# that they are displayed in desired rotation.
start_angles -= STARTING_ANGLE
end_angles = start_angles + delta_angles
# Calculate the starting and ending points on the circle form the
# starting and ending angles.
start_points = CENTER + RADIUS * np.stack([np.cos(start_angles), np.sin(start_angles)], axis=2)
end_points = CENTER + RADIUS * np.stack([np.cos(end_angles), np.sin(end_angles)], axis=2)
# Extend the line that connects the points so that it goes off the
# edge of the screen.
diffs = end_points - start_points
unit_diffs = diffs / np.linalg.norm(diffs, axis=2, keepdims=True)
start_points = start_points - unit_diffs * LINE_EXTENSION_LENGTH
end_points = end_points + unit_diffs * LINE_EXTENSION_LENGTH
# Get the number of frames and generate a list of frame data tuples,
# where each tuple contains all the data needed to generate that
# frame.
num_frames = start_points.shape[1]
frame_data_list = [
(frame_num, start_points[:, frame_num], end_points[:, frame_num], colors)
for frame_num in range(num_frames)
]
print(f'frames: {len(frame_data_list)}, mins: {len(frame_data_list) / FPS / 60}')
# We create a pool of processes and for each tuple of frame data in
# our frame data list, we pass that tuple of frame data to a process
# and that process calls `write_frame()` passing it the tuple of
# frame data as the argument. Then the `write_frame()` function
# draws the frame and writes it to disk.
with mp.Pool(mp.cpu_count()) as p:
for _ in tqdm.tqdm(p.imap_unordered(write_frame, frame_data_list), total=num_frames):
# This loop is just a simple way to wrap our pool iterator
# with tqdm so that we can track the progress of our script,
# and since we don't do anything with the data returned from
# the `write_frame()` calls so we just write `pass` here.
pass
if __name__ == '__main__':
main()
|
{"hexsha": "58216ab442dfb965a8a11597b32d826e77756d91", "size": 7600, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_frames.py", "max_stars_repo_name": "elliotwaite/times-table-animation", "max_stars_repo_head_hexsha": "4bff8521bae7314a0dd2189eee6518e87cf2a9b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-08-02T23:38:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T13:09:29.000Z", "max_issues_repo_path": "generate_frames.py", "max_issues_repo_name": "elliotwaite/times-table-animation", "max_issues_repo_head_hexsha": "4bff8521bae7314a0dd2189eee6518e87cf2a9b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_frames.py", "max_forks_repo_name": "elliotwaite/times-table-animation", "max_forks_repo_head_hexsha": "4bff8521bae7314a0dd2189eee6518e87cf2a9b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-01T17:44:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-07T10:40:19.000Z", "avg_line_length": 38.7755102041, "max_line_length": 99, "alphanum_fraction": 0.6911842105, "include": true, "reason": "import numpy", "num_tokens": 1933}
|
#include "potgen.hpp"
#include "vector.hpp"
#include "interpolation.hpp"
#include "fileIO.hpp"
#include "correlation.hpp"
#include "config.h"
#include <fstream>
#define BOOST_TEST_MODULE potgen_test
#include <boost/test/unit_test.hpp>
// declaration of compare function
/// \todo define some kind of test utility header
bool compare_files_binary(std::string f1, std::string f2, bool verbose);
// helper function
static Potential generatePotential( int N, std::size_t size, const PGOptions& opt )
{
std::vector<std::size_t> sizes(N, size);
return generatePotential( sizes, std::vector<double>(N, 1.0), opt);
}
BOOST_AUTO_TEST_SUITE(potgen_test)
BOOST_AUTO_TEST_CASE( potential_properties )
{
PGOptions opt;
opt.randomSeed = rand();
opt.maxDerivativeOrder = 2;
opt.corrlength = 0.1;
opt.cor_fun = makeGaussianCorrelation(0.01);
auto result = generatePotential(2, 128, opt);
BOOST_CHECK_EQUAL( opt.randomSeed, result.getSeed() );
BOOST_CHECK_EQUAL( result.getExtents()[0], 128);
BOOST_CHECK_EQUAL( result.getExtents()[1], 128);
BOOST_CHECK_EQUAL( result.getSupport()[0], 1.0);
BOOST_CHECK_EQUAL( result.getSupport()[1], 1.0);
BOOST_CHECK_EQUAL( result.getCorrelationLength(), opt.corrlength);
BOOST_CHECK( result.hasDerivativesOfOrder( opt.maxDerivativeOrder ) );
}
// check determinism
BOOST_AUTO_TEST_CASE( check_potgen_determinism )
{
auto f = makeGaussianCorrelation(0.01);
PGOptions opt;
opt.randomSeed = rand();
opt.cor_fun = f;
auto result1 = generatePotential(2, 128, opt);
auto result2 = generatePotential(2, 128, opt);
BOOST_CHECK_EQUAL( opt.randomSeed, result1.getSeed() );
for(unsigned int i = 0; i < result1.getPotential().getElementCount(); ++i)
{
BOOST_CHECK_EQUAL( result1.getPotential()[i], result2.getPotential()[i] );
}
// check consistency across multiple revision
std::cout << TEST_DATA_DIRECTORY"/pot_2d_128_ref" << "\n";
std::fstream rfile(TEST_DATA_DIRECTORY"/pot_2d_128_ref", std::fstream::in | std::fstream::binary);
if(!rfile.is_open()) {
BOOST_FAIL("Could not open reference file " TEST_DATA_DIRECTORY "/pot_2d_128_ref");
}
auto ref = Potential::readFromFile(rfile);
opt.randomSeed = ref.getSeed();
opt.cor_fun = f;
opt.corrlength = 0.01;
auto result3 = generatePotential(2, 128, opt);
std::fstream generated("pot_2d_128_cmp", std::fstream::out | std::fstream::binary);
result3.writeToFile(generated);
BOOST_CHECK(compare_files_binary(TEST_DATA_DIRECTORY"/pot_2d_128_ref", "pot_2d_128_cmp", true));
for(int x = 0; x < 128*128; ++x)
{
BOOST_REQUIRE_EQUAL(ref.getPotential()[x], result3.getPotential()[x]);
}
/// \todo error check for too high sizes
/// \todo this belong into another test case
opt.cor_fun = makeGaussianCorrelation(1);
BOOST_CHECK_THROW( generatePotential(3, std::size_t(-1), opt), boost::exception );
}
BOOST_AUTO_TEST_CASE( check_potgen_derivatives )
{
/// \todo for this check to make sense, we should actually generate a larger potential and scale down by hand, then make comparisons
constexpr int size = 5120;
auto f = makeGaussianCorrelation(0.01);
/// \todo allow the direct use of lambdas
PGOptions opt;
opt.randomSeed = rand();
opt.maxDerivativeOrder = 2;
opt.cor_fun = f;
auto result1 = generatePotential(1, size, opt);
result1.setSupport(std::vector<double>{1.0});
auto pgrid = result1.getPotential().shallow_copy();
pgrid.setAccessMode(TransformationType::PERIODIC);
auto dgrid = result1.getDerivative( makeIndexVector(1, {0}) ).shallow_copy();
dgrid.setAccessMode(TransformationType::PERIODIC);
constexpr double STEP = 0.01;
double p_int = pgrid(std::vector<int>{2});
/// \todo use an integrator and check that results are consistent
int c = 0;
double max_dev = 0;
double avg_dev = 0;
for(double x = 2; x < size - 2; x += STEP)
{
gen_vect pos(1);
pos[0] = x;
double p_here = linearInterpolate( pgrid, pos );
double dx_here = linearInterpolate( dgrid, pos );
p_int += dx_here * STEP / size;
double dev = std::abs( p_int - p_here );
if ( dev > max_dev )
max_dev = dev;
avg_dev += dev;
c++;
}
avg_dev /= c;
/// \todo 1e-3 and 2e-4 seem quite high here
BOOST_CHECK_SMALL( max_dev, 1e-3 );
BOOST_CHECK_SMALL( avg_dev, 2e-4 );
std::cout << "average deviation: " << avg_dev << "\n";
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "1ffd01c122e13bd3b7e9a0b6f590fce4eb7af434", "size": 4293, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/potgen/test/potgen_test.cpp", "max_stars_repo_name": "ngc92/branchedflowsim", "max_stars_repo_head_hexsha": "d38c0e7f892d07d0abd9b63d30570c41b3b83b34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/potgen/test/potgen_test.cpp", "max_issues_repo_name": "ngc92/branchedflowsim", "max_issues_repo_head_hexsha": "d38c0e7f892d07d0abd9b63d30570c41b3b83b34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/potgen/test/potgen_test.cpp", "max_forks_repo_name": "ngc92/branchedflowsim", "max_forks_repo_head_hexsha": "d38c0e7f892d07d0abd9b63d30570c41b3b83b34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2040816327, "max_line_length": 133, "alphanum_fraction": 0.7232704403, "num_tokens": 1191}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: J.A. de Jong - ASCEE
Description:
Class for plotting bars on a QGraphicsScene.
"""
from ..lasp_gui_tools import ASCEEColors, Branding
from PySide.QtGui import (
QGraphicsScene, QPen, QBrush, QGraphicsRectItem,
QGraphicsTextItem, QPainter, QImage, QPrinter
)
from PySide.QtCore import Qt, QRectF, QLineF, QSize, QRect, QPointF, QSizeF
import numpy as np
import os
leftoffset = 120 # Left offset of the figure
rightoffset = 60
topoffset = 30
bottomoffset = 80
xticklabeloffset = 55
xlabelbottomoffset = 30
ylabelleftoffset = 30
nyticks = 11
ticklength = 10
# Distance between two bar groups in units of bar thicknesses
dxbars = 2
DEFAULT_COLORS = [ASCEEColors.blue, ASCEEColors.green, Qt.red, Qt.cyan,
Qt.darkYellow,
Qt.darkMagenta]
def graphicsTextItem(label):
item = QGraphicsTextItem(label)
item.setFont(Branding.figureFont())
return item
class BarScene(QGraphicsScene):
"""
Graphhics Scene for plotting bars
"""
def __init__(self, parent, xvals, G, ylim=(0, 1),
grid=True,
xlabel=None,
ylabel=None,
title=None,
colors=DEFAULT_COLORS, size=(1200, 600),
legend=None,
legendpos=None):
"""
Initialize a bar scene
Args:
xvals: labels and x positions of the bars
G: Number of bars per x value
ylim: y limits of the figure
xlabel: label below x-axis
ylabel: label on left side of the y-axis
title: figure title
colors: color cycler
size: size of the plot in pixels
legend: list of legend strings to show.
legendpos: position of legend w.r.t. default position, in pixels
"""
super().__init__(parent=parent)
self.setSceneRect(QRect(0, 0, *size))
# self.setBackgroundBrush(ASCEEColors.bgBrush(0, size[0]))
self.ylim = ylim
N = len(xvals)
self.N = N
self.G = G
self.bgs = []
self.size = size
xsize, ysize = size
self.xsize = xsize
self.ysize = ysize
self.colors = colors
# Size of the frame
Lx = xsize - rightoffset - leftoffset
Ly = ysize - topoffset - bottomoffset
# The main frame where the bars are in.
mainframe = self.createRect(leftoffset,
bottomoffset,
Lx,
Ly)
# Set the y ticks and ticklabels
self.yticks = []
txtmaxwidth = 0
for i in range(nyticks):
y = bottomoffset+Ly*i/(nyticks-1)
ytick = self.addLine(leftoffset,
y,
leftoffset-ticklength,
y)
if grid:
ygrid = self.addLine(leftoffset,
y,
xsize-rightoffset,
y, pen=QPen(Qt.gray))
range_ = ylim[1]-ylim[0]
ytickval = i/(nyticks-1)*range_ + ylim[0]
yticklabel = f'{ytickval:.0f}'
txt = graphicsTextItem(yticklabel)
txtwidth = txt.boundingRect().width()
txtmaxwidth = max(txtmaxwidth, txtwidth)
txt.setPos(leftoffset-10-txtwidth,
ysize - y-.022*self.ysize)
self.addItem(txt)
self.yticks.append(ytick)
# Main frame added after grid lines, to get the color right
self.addItem(mainframe)
# Create the bars
for g in range(G):
bg = []
for n in range(N):
barrect = self.getBarRect(n, g, 0)
baritem = QGraphicsRectItem(barrect, brush=QBrush(Qt.blue))
self.addItem(baritem)
bg.append(baritem)
self.bgs.append(bg)
# Add x ticks and ticklabels
xticklabels = []
for n in range(N):
xticklabel = f'{xvals[n]}'
txt = graphicsTextItem(xticklabel)
txtxpos = self.getBarGroupMidPos(n)-12
txt.setPos(txtxpos,
self.ysize-bottomoffset+xticklabeloffset)
txt.rotate(-90)
self.addItem(txt)
xticklabels.append(txt)
# Set xlabel
if xlabel is not None:
xlabel = graphicsTextItem(xlabel)
width = xlabel.boundingRect().width()
txtxpos = xsize/2-width/2
txtypos = ysize - xlabelbottomoffset
xlabel.setPos(txtxpos, txtypos)
self.addItem(xlabel)
# # Set ylabel
if ylabel is not None:
ylabel = graphicsTextItem(ylabel)
ylabel.setPos(ylabelleftoffset,
(ysize-topoffset-bottomoffset)/2+topoffset)
ylabel.rotate(-90)
self.addItem(ylabel)
# Set title
if title is not None:
title = graphicsTextItem(title)
width = xlabel.boundingRect().width()
txtxpos = self.xsize/2-width/2
txtypos = (1-.998)*self.ysize
title.setPos(txtxpos, txtypos)
self.addItem(title)
if legend is not None:
maxlegtxtwidth = 0
legposx, legposy = (0, 0) if legendpos is None else legendpos
legpos = (xsize-rightoffset-300+legposx,
ysize-topoffset-30+legposy)
dyleg = 15
dylegtxt = dyleg
Lylegrect = 10
Lxlegrect = 20
legrectmargin = 5
boxtopleft = QPointF(legpos[0]-legrectmargin,
ysize-legpos[1]-Lylegrect-legrectmargin)
legbox = self.addRect(QRectF(0, 0, 0, 0),
pen=QPen(), brush=QBrush(Qt.white))
for i, leg in enumerate(legend):
leglabel = legend[i]
# The position of the legend, in screen coordinates
pos = (legpos[0], legpos[1] - i*dyleg)
color = self.colors[i % len(self.colors)]
legrect = self.createRect(*pos, Lxlegrect, Lylegrect)
legrect.setBrush(QBrush(color))
legtxt = graphicsTextItem(leglabel)
maxlegtxtwidth = max(maxlegtxtwidth,
legtxt.boundingRect().width())
self.addItem(legrect)
self.addItem(legtxt)
legtxt.setPos(legpos[0]+Lxlegrect,
ysize-pos[1]-dylegtxt-3)
legboxsize = QSize(maxlegtxtwidth+Lxlegrect+2*legrectmargin,
(i+1)*dyleg+legrectmargin)
legboxrect = QRectF(boxtopleft, legboxsize)
legbox.setRect(legboxrect)
def saveAsBitmap(self, fn):
"""
Save bar image as a jpg file. Overwrites a file already existing in
filesystem.
https://stackoverflow.com/questions/7451183/how-to-create-image-file\
-from-qgraphicsscene-qgraphicsview#11642517
Args:
fn: Filename
Returns:
True on success
"""
size = self.size
pixelsx = max(1200, size[0])
pixelsy = int(pixelsx*size[1]/size[0])
imagesize = (pixelsx, pixelsy)
image = QImage(pixelsx,
pixelsy,
QImage.Format_ARGB32_Premultiplied)
painter = QPainter(image)
painter.setRenderHint(QPainter.Antialiasing)
painter.setBrush(Qt.white)
painter.setPen(Qt.white)
painter.drawRect(QRect(0, 0, *imagesize))
targetrect = QRectF(0, 0, *imagesize)
sourcerect = QRectF(0, 0, *size)
self.render(painter, targetrect, sourcerect)
painter.end()
return image.save(fn)
def saveAsPdf(self, fn, force=False):
"""
Save bar image as a eps file.
Args:
fn: Filename
force: if True, overwrites an existing file. If false, raises a
RuntimeError if file already exists.
"""
printer = QPrinter(QPrinter.HighResolution)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName(fn)
printer.setFullPage(True)
printer.setPageSize(QPrinter.Custom)
printer.setPaperSize(QSizeF(*self.size), QPrinter.Millimeter)
printer.setPageMargins(0, 0, 0, 0, QPrinter.Millimeter)
painter = QPainter(printer)
painter.setRenderHint(QPainter.Antialiasing)
painter.setBrush(Qt.white)
painter.setPen(Qt.white)
painter.drawRect(QRect(0, 0, *self.size))
targetrect = QRectF(0, 0, printer.width(), printer.height())
sourcerect = QRectF(0, 0, *self.size)
self.render(painter, targetrect, sourcerect)
painter.end()
return True
def getBarGroupMidPos(self, n):
"""
Returns the mid x position below each bar group
"""
Lx = self.xsize-rightoffset-leftoffset
# Ly = self.ysize - topoffset - bottomoffset
start = 10
S = Lx - 2*start
L = S/(self.N*self.G+dxbars*(self.N-1))
xL = leftoffset+start
return (n*(self.G*L+dxbars*L) + xL + self.G*L/2)
def getBarRect(self, n, g, yval):
"""
Returns a bar QRectF.
Args:
n: Bar index (i.e. corresponding to a certain frequency band)
g: Bar group (i.e. corresponding to a certain quantity)
yval: Height of bar, 1 for full lenght, 0 for no length
Returns:
QRectF corresponding to the bar at the right place in the scene
"""
assert yval >= 0 and yval <= 1, "Invalid yval"
Lx = self.xsize-rightoffset-leftoffset
Ly = self.ysize-topoffset - bottomoffset
start = 10
S = Lx - 2*start
assert S > 0, "Size of bar field is too small."
# Width of a single bar
L = S/(self.N*self.G+dxbars*(self.N-1))
xL = leftoffset+start
x = g*L + n*(self.G*L+dxbars*L) + xL
return QRectF(x,
self.ysize-bottomoffset-yval*Ly,
L,
yval*Ly)
def addLine(self, x1, y1, x2, y2, pen=QPen(), brush=QBrush()):
line = QLineF(x1,
self.ysize - y1,
x2,
self.ysize - y2)
return super().addLine(line, pen=pen, brush=brush)
def createRect(self, x, y, Lx, Ly, pen=QPen(), brush=QBrush()):
"""
Create a rectangle somewhere, in relative coordinates originating
from the lower left position.
"""
x1 = x
# Y-position from the top, these are the coordinates used to create a
# rect item.
y1 = self.ysize-y-Ly
return QGraphicsRectItem(x1,
y1,
Lx,
Ly,
pen=pen,
brush=brush)
def set_ydata(self, newydata):
G = len(self.bgs)
N = len(self.bgs[0])
assert newydata.shape[0] == N
assert newydata.shape[1] == G
# Y-values of the bars should be between 0 and 1.
scalefac = self.ylim[1]-self.ylim[0]
yvals = (newydata - self.ylim[0])/scalefac
# Clip values to be between 0 and 1
yvals = np.clip(yvals, 0, 1)
for g in range(G):
color = self.colors[g % len(self.colors)]
for n in range(N):
bar = self.bgs[g][n]
bar.setRect(self.getBarRect(n, g, yvals[n, g]))
bar.setBrush(color)
|
{"hexsha": "29cc216d3e9e58e92fc4a2c63c6c5e774fce33b8", "size": 11948, "ext": "py", "lang": "Python", "max_stars_repo_path": "lasp/plot/bar.py", "max_stars_repo_name": "asceenl/lasp", "max_stars_repo_head_hexsha": "7cc77b073a91eb7470b449604544d9f57faf32e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lasp/plot/bar.py", "max_issues_repo_name": "asceenl/lasp", "max_issues_repo_head_hexsha": "7cc77b073a91eb7470b449604544d9f57faf32e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lasp/plot/bar.py", "max_forks_repo_name": "asceenl/lasp", "max_forks_repo_head_hexsha": "7cc77b073a91eb7470b449604544d9f57faf32e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1182795699, "max_line_length": 77, "alphanum_fraction": 0.5328088383, "include": true, "reason": "import numpy", "num_tokens": 2895}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# __author__ : stray_camel
# __description__ : DIEN论文复现
# __date__: 2020/09/15 16
import csv
import io
import os
import pickle
import random
from concurrent.futures import (ALL_COMPLETED, ThreadPoolExecutor,
as_completed, wait)
import numpy as np
import pandas as pd
from openpyxl import Workbook
from apps.api_exception import Fail, ParameterException
from apps.constants import MAX_CPUS
from apps.data.handler import (META_ELECTRONICS, REVIEWS_ELECTRONICS_5,
TMP_PATH)
def build_map(df:"reviews_df / meta_df", col_name:"列名"):
"""
制作一个映射,键为列名,值为序列数字
:return: 字典,键
"""
key = sorted(df[col_name].unique().tolist())
m = dict(zip(key, range(len(key))))
df[col_name] = df[col_name].map(lambda x: m[x])
return m, key
def get_building_dataset(params=None):
"""
获取处理后的测试集和训练集
"""
if not os.path.exists(os.path.join(TMP_PATH, 'dataset.p')):
train_set, test_set, cate_list, (user_count, item_count,
cate_count, max_sl) = building_dataset(params)
else:
# pickle获取
with open(os.path.join(TMP_PATH, 'dataset.p'), 'rb') as f:
train_set = np.array(pickle.load(f),dtype=object)
test_set = pickle.load(f)
cate_list = pickle.load(f)
(user_count, item_count, cate_count, max_sl) = pickle.load(f)
return np.array(train_set,dtype=object), test_set, cate_list, (user_count, item_count, cate_count, max_sl)
def building_dataset(params=None):
"""
获取映射后的数据,处理过用pickle缓存.
通过获取的数据构建训练测试集:
NOTE: 生成训练集、测试集,每个用户所有浏览的物品(共n个)前n-1个为训练集(正样本),并生成相应的负样本,每个用户共有n-2个训练集(第1个无浏览历史),第n个作为测试集。
故测试集共有192403个,即用户的数量。训练集共2608764个
"""
if not os.path.exists(os.path.join(TMP_PATH, 'remap.p')):
# 处理数据
reviews_df, cate_list, (user_count, item_count, cate_count,
example_count), _ = re_maping(params=None)
else:
# pickle获取
with open(os.path.join(TMP_PATH, 'remap.p'), 'rb') as f:
reviews_df = pickle.load(f)
cate_list = pickle.load(f)
user_count, item_count, cate_count, example_count = pickle.load(f)
train_set, test_set = [], []
# 最大的序列长度
max_sl = 0
for reviewerID, hist in reviews_df.groupby('reviewerID'):
# 每个用户浏览过的物品,即为正样本
pos_list = hist['asin'].tolist()
max_sl = max(max_sl, len(pos_list))
# 生成负样本
def gen_neg():
neg = pos_list[0]
while neg in pos_list:
neg = random.randint(0, item_count - 1)
return neg
# 正负样本比例1:1
neg_list = [gen_neg() for i in range(len(pos_list))]
for i in range(1, len(pos_list)):
# 生成每一次的历史记录,即之前的浏览历史
hist = pos_list[:i]
sl = len(hist)
if i != len(pos_list) - 1:
# 保存正负样本,格式:用户ID,正/负物品id,浏览历史,浏览历史长度,标签(1/0)
train_set.append((reviewerID, pos_list[i], hist, sl, 1))
train_set.append((reviewerID, neg_list[i], hist, sl, 0))
else:
# 最后一次保存为测试集
label = (pos_list[i], neg_list[i])
test_set.append((reviewerID, hist, sl, label))
# 打乱顺序
random.shuffle(train_set)
random.shuffle(test_set)
assert len(test_set) == user_count
# 写入dataset.pkl文件
with open(os.path.join(TMP_PATH, 'dataset.p'), 'wb') as f:
pickle.dump(np.array(train_set, dtype=object), f, pickle.HIGHEST_PROTOCOL)
pickle.dump(test_set, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(cate_list, f, pickle.HIGHEST_PROTOCOL)
pickle.dump((user_count, item_count, cate_count, max_sl),
f, pickle.HIGHEST_PROTOCOL)
return np.array(train_set, dtype=object), test_set, cate_list, (user_count, item_count, cate_count, max_sl)
def re_maping(params=None):
"""
reviews_df保留['reviewerID'【用户ID】, 'asin'【产品ID】, 'unixReviewTime'【浏览时间】]
meta_df保留['asin'【产品ID】, 'categories'【种类】]
"""
# reviews,meta
pred_data = get_pred_data(params)
reviews_df, meta_df = pred_data.get('meta'), pred_data.get('reviews')
reviews_df = reviews_df[['reviewerID', 'asin', 'unixReviewTime']]
meta_df = meta_df[['asin', 'categories']]
# 类别只保留最后一个
meta_df['categories'] = meta_df['categories'].map(lambda x: x[-1][-1])
# meta_df文件的物品ID映射
asin_map, asin_key = build_map(meta_df, 'asin')
# meta_df文件物品种类映射
cate_map, cate_key = build_map(meta_df, 'categories')
# reviews_df文件的用户ID映射
revi_map, revi_key = build_map(reviews_df, 'reviewerID')
# user_count: 192403 item_count: 63001 cate_count: 801 example_count: 1689188
user_count, item_count, cate_count, example_count = \
len(revi_map), len(asin_map), len(cate_map), reviews_df.shape[0]
# print('user_count: %d\titem_count: %d\tcate_count: %d\texample_count: %d' %
# (user_count, item_count, cate_count, example_count))
# 按物品id排序,并重置索引
meta_df = meta_df.sort_values('asin')
meta_df = meta_df.reset_index(drop=True)
# reviews_df文件物品id进行映射,并按照用户id、浏览时间进行排序,重置索引
reviews_df['asin'] = reviews_df['asin'].map(lambda x: asin_map[x])
reviews_df = reviews_df.sort_values(['reviewerID', 'unixReviewTime'])
reviews_df = reviews_df.reset_index(drop=True)
reviews_df = reviews_df[['reviewerID', 'asin', 'unixReviewTime']]
# 各个物品对应的类别
cate_list = np.array(meta_df['categories'], dtype='int32')
# 保存所需数据为pkl文件
with open(os.path.join(TMP_PATH, 'remap.p'), 'wb') as f:
pickle.dump(reviews_df, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(cate_list, f, pickle.HIGHEST_PROTOCOL)
pickle.dump((user_count, item_count, cate_count, example_count),
f, pickle.HIGHEST_PROTOCOL)
pickle.dump((asin_key, cate_key, revi_key), f, pickle.HIGHEST_PROTOCOL)
return reviews_df, cate_list, (user_count, item_count, cate_count, example_count), (asin_key, cate_key, revi_key)
def get_pred_data(params=None) -> dict:
"""
预加载数据,加载完后自动存到pickle,并之后从pickle获取
"""
params = {'meta': META_ELECTRONICS,
'reviews': REVIEWS_ELECTRONICS_5}
file_pathes = params.get('files')
def json2df(file_path):
with open(file_path, 'r') as fin:
df = {}
i = 0
all_task = []
# with ThreadPoolExecutor(MAX_CPUS) as executor:
# all_task += [executor.submit(ant, (df,i,line))
# for line in fin]
for line in fin:
df[i] = eval(line)
i += 1
df = pd.DataFrame.from_dict(df, orient='index')
return df
meta_pickle = os.path.join(TMP_PATH, 'meta.p')
reviews_pickle = os.path.join(TMP_PATH, 'reviews.p')
if not os.path.exists(reviews_pickle):
reviews_df = json2df(params.get('reviews'))
with open(reviews_pickle, 'wb') as f:
pickle.dump(reviews_df, f, pickle.HIGHEST_PROTOCOL)
else:
# 从缓存的文件夹获取基本被过滤后的文件
reviews_df = pickle.load(
open(reviews_pickle, mode='rb'))
if not os.path.exists(meta_pickle):
meta_df = json2df(params.get('meta'))
meta_df = meta_df[meta_df['asin'].isin(reviews_df['asin'].unique())]
meta_df = meta_df.reset_index(drop=True)
with open(meta_pickle, 'wb') as f:
pickle.dump(meta_df, f, pickle.HIGHEST_PROTOCOL)
else:
# 从缓存的文件夹获取基本被过滤后的文件
meta_df = pickle.load(
open(meta_pickle, mode='rb'))
res = {'meta': meta_df,
'reviews': reviews_df}
return res
def hello_word_handler(params=None):
res = {
'name': 'stray_camel',
'age': '25',
'patient_id': '19000347',
}
return res
|
{"hexsha": "6ef798e1cd3de53e38c44955986be0f86ac81d37", "size": 7874, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/models_tensorflow2/DIN_CTR/handler.py", "max_stars_repo_name": "Freen247/dj_blog", "max_stars_repo_head_hexsha": "f7df1a7b101d41835a334b78cddf3570968799e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2020-09-28T09:41:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T08:25:19.000Z", "max_issues_repo_path": "apps/models_tensorflow2/DIN_CTR/handler.py", "max_issues_repo_name": "Freen247/dj_blog", "max_issues_repo_head_hexsha": "f7df1a7b101d41835a334b78cddf3570968799e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2020-09-24T10:26:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:49:05.000Z", "max_forks_repo_path": "apps/models_tensorflow2/DIN_CTR/handler.py", "max_forks_repo_name": "Freen247/django_blogback", "max_forks_repo_head_hexsha": "f7df1a7b101d41835a334b78cddf3570968799e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-10-10T01:23:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T17:06:09.000Z", "avg_line_length": 35.3094170404, "max_line_length": 117, "alphanum_fraction": 0.6187452375, "include": true, "reason": "import numpy", "num_tokens": 2383}
|
#Load app and configuration
# create config variables (to be cleaned in the future)
from flasky import db
from flask_login import login_required, current_user
from config import config as config_set
from app.models import User
config=config_set['tinymrp'].__dict__
folderout=config['FOLDEROUT']
fileserver_path=config['FILESERVER_PATH']
datasheet_folder=config['DATASHEET_FOLDER']
deliverables_folder=config['DELIVERABLES_FOLDER']
variables_conf=config['VARIABLES_CONF']
webfileserver=config['WEBFILESERVER']
maincols=config['MAINCOLS']
refcols=config['REFCOLS']
deliverables=config['DELIVERABLES']
webserver=config['WEBSERVER']
process_conf=config['PROCESS_CONF']
lowercase_properties=config['LOWERCASE_PROPERTIES']
property_conf=config['PROPERTY_CONF']
hardware_folder=config['HARDWARE_FOLDER']
from flask import (
Blueprint, flash, g, redirect, session, render_template, request, url_for
)
#To genearte qr codes
import qrcode
#Other libraries
from datetime import datetime, date #for timestamps
import chardet #for getting the encoding of files
import sys, os
import re
from shutil import copyfile
import glob
import pickle #To save bom object session
from pathlib import Path, PureWindowsPath, PurePosixPath
import pandas as pd
import numpy as np
import re
import math
#PDF libraries
import PyPDF2
from PyPDF2 import PdfFileWriter, PdfFileReader, PdfFileMerger
import pdfkit #TO EXPORT WEBPAGES TO PDF
# import openpyxl #to manipulate excelfiles
import xlsxwriter
from openpyxl import load_workbook
from openpyxl.drawing.image import Image as openpyxlIm
from openpyxl.styles import Color, PatternFill, Font, Border
from openpyxl.utils import get_column_letter
from PIL import Image #to process thumbnails
#SQL libraries
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.types import Integer, Float, Boolean,String,Text,NVARCHAR, Date
from flask import render_template, jsonify, request, redirect, url_for, jsonify
from sqlalchemy import create_engine, ForeignKey,select, or_, and_
from sqlalchemy.orm import relationship, backref
#To check if conection to file.
import urllib
#Tinylib internal imports
from .imageprocess import cropandbackground
#Mongorelated
#from mongoengine import *
from mongoengine import EmbeddedDocument,EmbeddedDocumentField, \
StringField,ListField,IntField, DynamicDocument,\
ReferenceField,DynamicField
import pymongo
import json
from pymongo import MongoClient
from werkzeug.utils import secure_filename
client = pymongo.MongoClient("localhost", 27017)
mongodb=client.TinyMRP
partcol=mongodb["part"]
#Core inventory class
class mongoPart(DynamicDocument):
meta = {'collection': 'part'}
# partnumber=StringField(required=True)
partnumber=StringField()
description=StringField()
revision=StringField()
finish=StringField()
children=ListField(ReferenceField("self"))
childrenqty=ListField(IntField())
#Uploader
approved=StringField()
uploader=DynamicField()
#Procurement/customer
category=DynamicField()
supplier=DynamicField()
customer=DynamicField()
#Related files locations
modelpath=StringField()
pngpath=StringField()
pdfpath=StringField()
dxfpath=StringField()
edrpath=StringField()
edr_dpath=StringField()
steppath=StringField()
threemfpath=StringField()
datasheetpath=StringField()
qrpath=StringField()
#Real pictures of the object
picpath=ListField(StringField())
#pics for drawings allows several pages (several images)
png_dpath=StringField()
#For another attachements
otherspath=ListField(StringField())
#Processes
process=DynamicField()
process_colors=DynamicField()
process_icons=DynamicField()
tag=StringField()
def hasConsumingProcess(self):
consume=False
processlist=self.process
processlist=[x for x in processlist if x in process_conf.keys()]
for process in processlist:
if int(process_conf[process]['priority'])<20:
consume=True
return consume
def isMainProcess(self,process):
mainprocess_bool=False
if self.hasProcess(process):
processlist=self.process
processlist=[x for x in processlist if x in process_conf.keys()]
if len(processlist)>0:
for x in processlist:
if int(process_conf[process]['priority'])<= int(process_conf[x]['priority']):
mainprocess_bool=True
else:
mainprocess_bool=False
return mainprocess_bool
def MainProcess(self):
processlist=self.process
print(self.partnumber)
print(processlist)
if len(processlist)>0: processlist=[x for x in processlist if x in process_conf.keys()]
if len(processlist)>0:
mainprocess=processlist[0]
for x in processlist:
if int(process_conf[x]['priority'])<= int(process_conf[mainprocess]['priority']):
mainprocess=x
else:
mainprocess='other'
self.mainprocess=mainprocess
return mainprocess
#Get tree for tree representation:
def treeDict(self, count=0,qty=1):
refkids=[]
i=0
for child in self.children:
kid={}
count+=1
kid=mongoPart.objects(pk=self.children[i]['id']).first().to_dict()
kid['qty']=self.childrenqty[i]
kid['value']=kid['qty']
kid['children']=self.children[i].treeDict(count=count)['children']
kid['name']=self.children[i].treeDict(count=count)['partnumber']
refkids.append(kid)
i+=1
refdict=self.to_dict()
refdict['children']=refkids
refdict['name']=self.partnumber
try:
refdict['value']=self.qty
except:
refdict['value']=qty
#test['children']['0']['part']['children']
# refkids=[]
# for kid in refdict['children']:
# kid['part']['children']=mongoPart.objects(id=kid['part']['id']).first().treeDict()
# print(kid)
# refkids.append(kid)
# # refkids.append({'part': mongoPart.objects(id=t).first().to_dict())
# refdict['children']=refkids
return refdict
#For the print outs on terminal of the object
def __repr__(self):
return f'P/N_{self.partnumber}_REV_{self.revision}_DES_{self.description}'
def __str__ (self):
return f'P/N_{self.partnumber}_REV_{self.revision}_DES_{self.description}'
#To get the children with quantities
def getchildren (self):
outlist=[]
i=0
for child in self.children:
kid={}
# kid['part']=partcol.find_one({"_id":child.id})
kid['part']=mongoPart.objects(pk=child.id)[0]
kid['qty']=self.childrenqty[i]
outlist.append(kid)
i+=1
return outlist
def children_with_qty (self):
outlist=[]
i=0
for child in self.children:
kid=child
# kid['part']=partcol.find_one({"_id":child.id})
kid['part']=mongoPart.objects(pk=child.id)[0]
kid['qty']=self.childrenqty[i]
outlist.append(kid)
i+=1
return outlist
#To get the parents with quantities in those parents
def getparents (self):
outlist=[]
i=0
parents=mongoPart.objects(children=self.pk)
i=0
for parent in parents:
father={}
father['part']=parent
j=0
for kid in parent.children:
if kid.pk==self.pk:
father['qty']=parent.childrenqty[j]
j+=1
outlist.append(father)
i+=1
return outlist
def parents_with_qty(self):
outlist=[]
parents=mongoPart.objects(children=self.pk)
for parent in parents:
j=0
for kid in parent.children:
if kid.pk==self.pk:
parent['qty']=parent.childrenqty[j]
j+=1
outlist.append(parent)
return outlist
def get_process_icons (self,persist=False):
self['process_icons']=[]
self['process_colors']=[]
if not "process2" in self.to_dict().keys(): self['process2']=""
if not "process3" in self.to_dict().keys(): self['process3']=""
if type(self['process'])==str:
self['process']=[self['process']]
if self['process2'] and self['process2']!="" and self['process2']!=" ":
self['process'].append(self['process2'])
if self['process3'] and self['process3']!="" and self['process3']!=" ":
self['process'].append(self['process3'])
persist=True
if type(self['process'])==list:
self['process']=[x for x in self['process'] if x and x!="" and x!=" "]
#Put all to lowercase
self['process']=list(map(lambda x: x.lower(), self['process']))
#Remove duplicates
self['process']=list(dict.fromkeys(self['process']))
for process in self['process']:
if process in process_conf.keys() :
self['process_icons'].append('images/'+(process_conf[process]['icon']))
self['process_colors'].append(process_conf[process]['color'])
else:
self['process_icons'].append('images/'+process_conf['others']['icon'])
self['process_colors'].append(process_conf['others']['color'])
if persist: self.save()
#To return a dictinary with the parts attributes
def to_dict(self):
print(self.partnumber)
# dirtydict=self.to_mongo()#.to_dict()
#
dirtydict={}
dirtydict['partnumber']=self.partnumber
dirtydict['revision']=self.revision
dirtydict['description']=self.description
dirtydict['process']=self.process
dirtydict['finish']=self.finish
dirtydict['children']=self.children
dirtydict['pngpath']=self.pngpath
dirtydict=self.to_mongo().to_dict()
dirtydict['_id']=str(dirtydict['_id'])
print(dirtydict)
cleanchildren=[]
for child in dirtydict['children']:
cleanchildren.append(str(child))
dirtydict['children']=cleanchildren
return dirtydict
#Generates a time stamp tag with partnumber and rev
def get_tag(self):
self.tag=self.partnumber+"_REV_"+self.revision+"-"+date.today().strftime('%d_%m_%Y')
return self.tag
#Checks if a process is present
def hasProcess(self,process):
if process in self.process:
return True
else:
return False
#Checks if it has a consuming process
#That is to say a special process that should hide
#the children as they are subcomponents that cannot
#be supplied without creating the parent
def hasConsumingProcess(self):
processlist=[x for x in self.process if x in process_conf.keys()]
consume=False
for process in processlist:
if int(process_conf[process]['priority'])<20:
consume=True
return consume
#Checks if
def isMainProcess(self,process):
mainprocess_bool=False
if self.hasProcess(process):
processlist=[self.process,self.process2,self.process3]
processlist=[x for x in processlist if x in process_conf.keys()]
if len(processlist)>0:
for x in processlist:
if int(process_conf[process]['priority'])<= int(process_conf[x]['priority']):
mainprocess_bool=True
else:
mainprocess_bool=False
return mainprocess_bool
#Check for all available files in the Fileserver folders for a part
def updateFileset(self,web=False,persist=False):
self.get_tag()
parttag=self.partnumber+"_REV_"+self.revision
save=False
for filetype in config['DELIVERABLES']:
filelist=[]
for extension in config['DELIVERABLES'][filetype]['extension']:
filetag=config['DELIVERABLES'][filetype]['path']+parttag+str(config['DELIVERABLES'][filetype]['filemod'])+"."+extension
if file_exists(filetag):
if config['DELIVERABLES'][filetype]['list']!="yes":
try:
if self[filetype+'path']!=filetag:
self[filetype+'path']=filetag
# print(filetype,extension)
# print("string- " ,filetag)
save=True
except:
print("couldnt save - ", "string- " ,filetag)
else:
if self[filetype+'path']==[] or self[filetype+'path']==None:
self[filetype+'path']=[]
if not ( filetag in self[filetype+'path'] ):
self[filetype+'path'].append(filetag)
# print(filetype,extension)
# print("list- ", filetag)
save=True
else:
pass
# if filetype+'path'=='pngpath':
# self[filetype+'path']=url_for('static', filename='images/logo.png')
# save=True
if web:
try:
self[filetype+'path']=self[filetype+'path'].replace(fileserver_path,webfileserver)
print( (self[filetype+'path']))
print( secure_filename(self[filetype+'path']))
except:
pass
if persist:
if save: self.save()
def getweblinks(self,checkfiles=False):
if self.partnumber ==None:
pass
else:
parttag=self.partnumber+"_REV_"+self.revision
if checkfiles:
self.updateFileset(web=True)
else:
for filetype in config['DELIVERABLES']:
filelist=[]
for extension in config['DELIVERABLES'][filetype]['extension']:
filetag=config['DELIVERABLES'][filetype]['path']+parttag+str(config['DELIVERABLES'][filetype]['filemod'])+"."+extension
try:
self[filetype+'path']=self[filetype+'path'].replace(fileserver_path,webfileserver)
# print(self[filetype+'path'])
except:
pass
def get_components(self, components_only=True,qty=1):
reflist=[]
flatbom=[]
def loopchildren(partnumber,revision,qty,reflist):
part=mongoPart.objects(partnumber=partnumber,revision=revision)[0]
children=part.getchildren()
for child in children:
refqty=child['qty']*qty
if len(child['part']['children'])>0:
if child['part'].hasConsumingProcess() and components_only:
reflist.append((child['part'],refqty))
else:
reflist.append((child['part'],refqty))
loopchildren(child['part']['partnumber'],child['part']['revision'],refqty,reflist)
else:
reflist.append((child['part'],refqty))
loopchildren(self.partnumber,self.revision,qty,reflist)
#Sum up all quantities and compile flatbom
resdict={}
for item,q in reflist:
total=resdict.get(item,0)+q
resdict[item]=total
for part in resdict.keys():
part.qty=resdict[part]
flatbom.append(part)
#Range flatbom by partnumber
#flatbom.sort(key=lambda x: x.partnumber)
print(flatbom)
#input("stop")
# flatbom.sort(key=lambda x: (x.category,x.supplier,x.approved,x.partnumber))
totalqty=0
for item in flatbom:
totalqty=totalqty + item.qty
print("Total components ", totalqty)
print("Unique components", len(flatbom))
return flatbom
def file_exists(location):
if "http" in location:
request = urllib.request.Request(location)
request.get_method = lambda : 'HEAD'
try:
response = urllib.request.urlopen(request)
return True
except urllib.error.HTTPError:
return False
else:
if os.path.isfile(location):
return True
else:
return False
def web_to_pdf(url,fileout):
config= pdfkit.configuration(wkhtmltopdf=path_wkhtmltopdf)
options = {
'quiet': ''
}
try:
pdfkit.from_url(url, fileout, options=options)
except:
print("Couldn't export to pdf ",url)
#To find the encoding of a particular file
def find_encoding(fname):
r_file = open(fname, 'rb').read()
result = chardet.detect(r_file)
charenc = result['encoding']
return charenc
def create_folder_ifnotexists(path):
#Check if outputfolder exists otherwise create it
foldercheck=os.path.isdir(path)
if not foldercheck:
os.makedirs(path)
#To create thumbnails of images
def thumbnail(infile, size=(100, 100)):
outfile = os.path.splitext(infile)[0] + ".thumbnail.png"
if file_exists(outfile):
if os.path.getatime(infile)>os.path.getatime(outfile):
try:
os.remove(outfile)
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(outfile, "PNG")
#print(outfile)
return outfile
except:
print("Couldnt update existing OLD thumbnail - ",outfile)
return outfile
else:
#print(outfile)
return(outfile)
else:
try:
im = Image.open(infile)
im.thumbnail(size, Image.ANTIALIAS)
im.save(outfile, "PNG")
#print(outfile)
return outfile
except IOError:
print ("cannot create thumbnail for '%s'" % infile)
return ""
#To create a QR code to point to part link in tiny:
def qr_code(part,persist=True):
# flash(part.partnumber)
qrfile=fileserver_path+"/Deliverables/png/"+part.partnumber+"_REV_"+part.revision+".qr.jpg"
if file_exists(qrfile):
try:
os.remove(qrfile)
# flash("erased"+ qrfile)
except:
# flash("couldnt earse" + qrfile)
pass
image_url="http://"+webserver+"/part/"
image_url+= part.partnumber+"_rev_"
if part.revision=="":
image_url+= "%25"
else:
image_url+=part.revision
image_url=image_url.replace(" ","%20")
#Creating an instance of qrcode
qr = qrcode.QRCode(
version=1,
box_size=10,
border=5)
qr.add_data(image_url)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
tempfile=qrfile.replace('.qr.jpg','.temp.jpg')
tempfile=img.save(qrfile)
img.close()
if persist:
part['qrpath']=qrfile
part.save()
return qrfile
#To find the children based on a flatbom and a bom per the class definition below of solidbom
def get_children(father_partnumber,father_rev,bom,flatbom, qty="total"):
children=bom.loc[(bom['father_partnumber']==father_partnumber) & (bom['father_revision']==father_rev)]
children_rename_dict={}; children_rename_dict['child_partnumber']='partnumber'
children_rename_dict['child_revision']='revision'
children=children.rename(columns=children_rename_dict)
children_flatbom=flatbom.merge(children,on=['partnumber','revision'],
how='left',indicator=True).query('_merge == "both"').drop(columns='_merge').reset_index(drop=True).sort_values(by='partnumber')
if len(children)>0:
return children_flatbom
else:
return []
# class User(db.Model):
# # Defines the Table comment
# __tablename__ = "user"
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# username = db.Column(db.String,unique=True , nullable=False)
# password = db.Column(db.String, nullable=False)
# email = db.Column(db.String, nullable=False)
# password = db.Column(db.String)
# authenticated = db.Column(db.Boolean, default=False)
# def is_active(self):
# """True, as all users are active."""
# return True
# def get_id(self):
# """Return the email address to satisfy Flask-Login's requirements."""
# return self.email
# def is_authenticated(self):
# """Return True if the user is authenticated."""
# return self.authenticated
# def is_anonymous(self):
# """False, as anonymous users aren't supported."""
# return False
# class Comment(db.Model):
# # Defines the Table comment
# __tablename__ = "comment"
# id = db.Column(db.Integer, primary_key=True, autoincrement=True)
# part_id = db.Column(db.Integer, ForeignKey('part.id'), nullable=False)
# user_id = db.Column(db.Integer,ForeignKey('user.id'), nullable=False)
# body =db.Column(db.String)
# category =db.Column(db.String)
# created=db.Column(db.Date)
# pic_path=db.Column(db.String)
# def __init__(self, part_id="",user_id="",body="",category="", created="",pic_path=""):
# self.part_id = part_id
# self.user_id = user_id
# self.body=body
# self.category =category
# self.pic_path=pic_path
# self.created=created
###########################################################################################################
###########################################################################################################
###########################################################################################################
###########################################################################################################
class mongoJob(DynamicDocument):
meta = {'collection': 'job'}
jobnumber=StringField(unique=True )
description=StringField( )
customer=StringField( )
user_id=StringField( )
date_create=StringField( )
date_due=StringField( )
date_modify=StringField( )
date_finish=StringField( )
bom=ListField(DynamicField())
#For the print outs on terminal of the object
def __repr__(self):
return f'Job/n_{self.jobnumber}_DES_{self.description}'
def __str__ (self):
return f'Job/n_{self.jobnumber}_DES_{self.description}'
def to_dict(self):
dirtydict=self.to_mongo().to_dict()
try:
dirtydict['_id']=str(dirtydict['_id'])
except:
pass
return dirtydict
class mongoSupplier (DynamicDocument):
meta = {'collection': 'supplier'}
suppliername=StringField(unique=True )
description=StringField( )
location=StringField()
address= StringField()
processes= ListField(StringField())
contact= StringField()
#For the print outs on terminal of the object
def __repr__(self):
return f'Supplier_{self.suppliername}_DES_{self.description}'
def __str__ (self):
return f'Supplier_{self.suppliername}_DES_{self.description}'
def to_dict(self):
dirtydict=self.to_mongo().to_dict()
try:
dirtydict['_id']=str(dirtydict['_id'])
except:
pass
return dirtydict
class mongoOrder (DynamicDocument):
meta = {'collection': 'order'}
ordernumber=StringField(unique=True )
description=StringField( )
job=StringField()
supplier=ReferenceField(mongoSupplier)
parts=ListField(ReferenceField(mongoPart))
user_id=StringField( )
date_create=StringField( )
date_due=StringField( )
date_modify=StringField( )
date_finish=StringField( )
#For the print outs on terminal of the object
def __repr__(self):
return f'Job/n_{self.ordernumber}_DES_{self.description}'
def __str__ (self):
return f'Job/n_{self.ordernumber}_DES_{self.description}'
def to_dict(self):
dirtydict=self.to_mongo().to_dict()
try:
dirtydict['_id']=str(dirtydict['_id'])
except:
pass
return dirtydict
class solidbom():
def __init__(self, bomfile, flatfile,deliverableslocation,outputfolder,toppart=None):
###Load configuration
[self.process_conf,self.property_conf, self.variables_conf] = [process_conf , property_conf,variables_conf]
### List of invalid chars, names, etc...
### this is to be set up from a conf file or something
self.file=bomfile
self.invalid_col_chars=['.','-',' ']
self.renamelist={'part number':'partnumber',
'sw-configuration name(configuration name)':'sw_configuration',
'sw-folder name(folder name)':'folder',
'sw-file name(file name)':'file',
'item no.':'item_no'}
#to rename teh cols
self.renamedict={}
#to drop the cols that are not required
self.dropcols=[]
self.col_clean_list=[]
#Variable types
self.int_cols=['qty']
# self.float_cols=['mass','thickness']
# self.bool_cols=['spare_part']
#Add timestamp of bom creation
self.timestamp=date.today().strftime('%d_%m_%Y')
#Default folder out
self.folderout=outputfolder
self.deliverables_folder=deliverableslocation
##### Load the input file and create the dataframe
if bomfile!="" and flatfile!="":
print(flatfile)
#Insert all the individual part files, it will override the properties of the previous ones
with open(flatfile) as f:
lines=f.readlines()
print(lines)
for line in lines:
print(line)
partdict=json.loads(line)
print(partdict)
#Add uploader user
partdict['uploader']=current_user.to_dict()
partnumber=partdict["partnumber"]
revision=partdict["revision"]
print("IMPORTED DICT - ",partnumber,revision)
existing=partcol.find_one({"partnumber":partnumber,"revision":revision})
print(existing)
# flash(existing)
if existing==None:
partcol.insert_one(partdict)
# print(input("test"))
else:
fieldsdrop={}
for field in existing.keys():
if field != '_id' and field!='partnumber' and field!='revision':
fieldsdrop[field]=""
# partcol.update ( { "_id": mongoid },{ "$unset":fieldsdrop})
# partcol.update ( { "_id": mongoid },{ "$set":partdict})
partcol.update_one({"partnumber":partnumber, "revision":revision},{ "$unset":fieldsdrop})
partcol.update_one({"partnumber":partnumber, "revision":revision},{ "$set":partdict})
print("-----------------------------------------------------")
print("-----------------------------------------------------")
print(partdict)
print("-----------------------------------------------------")
print("-----------------------------------------------------")
part=mongoPart.objects(partnumber=partnumber,revision=revision)[0]
part.updateFileset(persist=True)
if part.pngpath:
cropandbackground(part.pngpath)
part.get_process_icons(persist=True)
#To add the coating process if specified
if "zinc" in part.finish.lower():
part.process.append("zinc")
part.get_process_icons(persist=True)
if "gal" in part.finish.lower():
part.process.append("galvanize")
part.get_process_icons(persist=True)
if "nickel" in part.finish.lower():
part.process.append("nickel")
part.get_process_icons(persist=True)
qr_code(part, persist=True)
#if "pngpath" in part.keys():
####BOM IMPORT STARTS ######
file_enc=find_encoding(bomfile)
self.filedata=pd.read_csv(bomfile, encoding =file_enc, sep='\t', lineterminator='\r',dtype=str)
#Copy data to maniulate
self.data=self.filedata.fillna("").copy()
print("************** Afterimport filling nan with "" ")
print(self.data)
#Rename cols, not dependent on excel config file, check for future
self.data=self.data.rename(columns={"ITEM NO.":"item_no","PART NUMBER":"partnumber","Approved":"approved","QTY.":"qty"})
#Remove leading spaces from all cols
for col in self.data:
if col!='qty': self.data[col]=self.data[col].str.lstrip()
print("************** After removing spacess")
print(self.data)
#Drop rows with no quantity and transform the col to int
self.data.dropna(subset = ["qty"], inplace=True)
self.data["qty"]=pd.to_numeric(self.data["qty"], errors='coerce').fillna(0).astype(int)
print("************** after as rename, drop no quantities, and to int")
print(self.data)
#Drop empty partnumbers rows
self.data=self.data[self.data.partnumber!=""]
print("************** after dropping no partnumber rows ")
print(self.data)
#Modify the revision entry to account for the approved status
#self.data.loc[self.data['event'].eq('add_rd') & self.data['environment']=="", 'environment'] = 'RD'
self.data['revision']=np.where(self.data['approved']=="",self.data['revision']+"MOD",self.data['revision'])
self.data.reset_index(drop=True)
print("************** modifiyng revision based on approved ")
print(self.data)
################################################
######################################################
#Apply company customization
#no need with mongodb
# self.customize()
#Clean the entry data
# self.clean_data()
#Get the root component definition
print(self.data)
self.root_definition()
#Create bom objects
self.createbom()
#Create bom objects
self.uploadbom()
print("**********************************")
print("**********************************")
#Screening based on custom properties
# self.property_screening()
#Find related files/deliverables
# self.find_deliverables()
#Dataframe to database
#self.createdatabase()
elif toppart!=None:
self.part=toppart
self.partnumber=toppart.partnumber
self.revision=toppart.revision
self.description=toppart.description
#self.flatbom=get_flatbom(self.partnumber,self.revision)
self.tag=self.partnumber+"-REV-"+self.revision+"-"+self.timestamp
def solidbom_from_flatbom(object_list,part,outputfolder="",sort=False):
if outputfolder=="":
folderout="temp"
else:
folderout="outputfolder"
print("---obj in--- for " , part.partnumber)
for pepe in object_list:
print(pepe.partnumber)
bomout=solidbom ( "","",deliverables_folder,folderout+"/" , toppart=part)
bomout.flatbom=pd.DataFrame([x.to_dict() for x in object_list])
print("---bom out --")
for i,row in bomout.flatbom.iterrows():
print(row.partnumber)
return bomout
print("---end --")
print(" ")
# def find_deliverables(self):
# #Create the empty cols for storing the info
# for extension in deliverables:
# self.flatbom[extension]=False
# self.flatbom[extension+'path']=''
# for i in range(len(self.flatbom)):
# partstring=self.flatbom.at[i,'file']+"_REV_"+self.flatbom.at[i,'revision']
# for extension in deliverables:
# extension_folder=self.deliverables_folder+extension+"/"
# file_string=extension_folder+partstring+"."+extension
# if os.path.isfile(file_string):
# self.flatbom.at[i,extension+'path']=file_string
# self.flatbom.at[i,extension]=True
def root_definition(self):
self.partnumber=self.data.at[0,'partnumber']
self.revision=self.data.at[0,'revision']
self.tag=self.partnumber+"-REV-"+self.revision+"-"+self.timestamp
#Set output location
self.folderout=self.folderout+self.tag+"/"
create_folder_ifnotexists(self.folderout)
# #no need with mongodb
# def customize(self):
# #Build the rename dictionary and rename cols
# # for prop in property_conf.keys():
# # self.renamedict[property_conf[prop]['custom_property']]=prop
# # self.data=self.data.rename(columns=self.renamedict)
# #Drop the non required properties from the dataframe
# # for col in self.data.columns:
# # if not col in [*property_conf]:
# # self.dropcols.append(col)
# # self.data= self.data.drop(self.dropcols, axis = 1)
# #Rename cols, see above to use the input from the excel file later on
# self.data=self.data.rename(columns={"ITEM NO.":"item_no","PART NUMBER":"partnumber","Approved":"approved","QTY.":"qty"})
def clean_data(self):
###Remove all the entries with no file related to it
###This will hasve to be revised if the cut list and sheet metal properties
### are to be counted
# self.data.dropna(subset = ["qty"], inplace=True)
#Transform to right data type based on init lists:
#Nan values will be filled with empty string or 0
#no need with mongodb
for col in self.data:
#Remove spaces
self.data[col]=self.data[col].str.lstrip()
if col in self.int_cols:
try:
self.data[col]=pd.to_numeric(self.data[col].fillna(0).astype(int))
except:
print("Problems tranforming to int " , col)
print("forcing and replacing non valid with 0.0, check impact on output files")
self.data[col]=pd.to_numeric(self.data[col], errors='coerce').fillna(0).astype(int)
# elif col in self.float_cols:
# try:
# self.data[col]=pd.to_numeric(self.data[col].fillna(0.0).astype(float))
# except:
# print("Problems tranforming to float " , col)
# print("forcing and replacing non valid with 0.0, check impact on output files")
# self.data[col]=pd.to_numeric(self.data[col], errors='coerce').fillna(0.0).astype(float)
# elif col in self.bool_cols:
# self.data[col]=self.data[col].fillna(False).astype(bool)
# else:
# self.data[col]=self.data[col].fillna("").astype(str)
#no need with mongodb
# #Remove empty rows, spaces and and dirty data
# self.data['revision']=self.data['revision'].astype(str)
#Remove non alfanumeric from revision and replace by "" (in case empty or funny)
self.data.loc[self.data.revision.str.isalnum()==False,'revision']=''
self.data.loc[self.data.revision=="nan",'revision']=''
self.data.loc[self.data.revision.str.isalnum()==False,'approved']=''
self.data.loc[self.data.revision=="nan",'approved']=''
#Remove leading spaces in part number
print(self.data)
self.data['partnumber']=self.data['partnumber'].str.lstrip()
#Remove all the empty partnumber lines (to include in the future )
self.data.dropna(subset = ["partnumber"], inplace=True)
print(self.data)
#no need with mongodb
#Reorder the columns for easier debugging and manipulations
# newcols=maincols+refcols
# for col in self.data.columns.to_list():
# if not col in newcols: newcols.append(col)
# self.data=self.data.reindex(columns=newcols)
#no need with mongodb
#Put process, finish and treatment as lowercase
# for prop in lowercase_properties:
# self.data[prop]=self.data[prop].str.lower()
#Remove all non desired properties
def property_screening(self):
#Account for the hardware in process related
for folder in hardware_folder:
self.flatbom.loc[self.flatbom['folder'].str.lower().str.contains(folder),['process','process2','process3']]=['hardware',"",""]
def createdatabase(self):
# #Add parts to database
# #The overwrite must be revised
# for index, row in self.flatbom.iterrows():
# database_part=Part()
# database_part.partfromlist(row)
# #Find if the part is already in database
# check_part=db.session.query(Part).filter(and_(Part.partnumber==row['partnumber'] ,
# Part.revision==row['revision'])
# ).first()
# #Create or update qrcode
# qr_code(database_part)
# #IF part exists overwrite attributes
# if check_part==None:
# db.session.add(database_part)
# else:
# for prop in property_conf.keys():
# if hasattr(database_part, prop):
# setattr(check_part,prop,getattr(database_part,prop))
# #Commit changes on part table
# db.session.commit()
# db.session.close()
#Add bom to database
for index, row in self.flatbom.iterrows():
database_part=Part()
database_part=db.session.query(Part).filter(and_(Part.partnumber==row['partnumber'],
Part.revision==row['revision'])
).first()
#Erase all bom entries of related part downstrema, erase kids entries
if database_part.partnumber!="":
bomentries=db.session.query(Bom).filter(Bom.father_id==database_part.id)
for bomline in bomentries:
db.session.delete(bomline)
db.session.commit()
#Get children from solidbom
children=get_children(database_part.partnumber,database_part.revision,self.bom, self.flatbom)
#Add children to database
if len(children)>0:
for i, childrow in children.iterrows():
childpart=db.session.query(Part).filter(and_(Part.partnumber==childrow['partnumber'] ,
Part.revision==childrow['revision'])
).first()
if childpart!=None:
bomentry=Bom(database_part.id,childpart.id,childrow['qty'])
db.session.add(bomentry)
#Commit changes on bom table
db.session.commit()
db.session.close()
def createbom(self):
self.flatbom=self.data.copy()
#Remove qty and tree reference to get unique entries
#And add the totalquanty
del self.flatbom['item_no']
del self.flatbom['qty']
self.flatbom['totalqty']=0
# #Drop flatbom duplicates
self.flatbom= self.flatbom.drop_duplicates()
self.flatbom=self.flatbom.reset_index(drop=True)
## Create bom dataframe
self.bom= pd.DataFrame({'father_partnumber':"",
'father_revision':"",
'child_partnumber':self.data['partnumber'],
'child_revision':self.data['revision'],
'qty':self.data['qty'],
'ref': self.data ['item_no']
})
#self.bom.reset_index(drop=True)
#Filter bom ref for pointing only to father for
#accouting of duplicated configurations
for index, row in self.data.iterrows():
self.bom.at[index,'ref']=re.sub(r"\..?[0-9]$", "",self.bom.at[index,'ref'])
#Build bom table finding referenced father part number
for index, row in self.data.iterrows():
temp=self.data.loc[self.data['item_no'] ==re.sub(r"\..?[0-9]$", "", row ['item_no'])].reset_index(drop=True)
self.bom.at[index,'father_partnumber']=temp.at[0,'partnumber']
self.bom.at[index,'father_revision']=temp.at[0,'revision']
#If couldnt repalce, it means twe are in the top level and we add the mark root
if self.bom.at[index,'father_partnumber']==self.bom.at[index,'child_partnumber']:
self.bom.at[index,'father_partnumber']='root'
#Combine duplicate entries quantities and reset index
self.bom=self.bom.groupby(['child_partnumber','child_revision','father_partnumber','father_revision','ref'])['qty'].sum().to_frame().reset_index()
self.bom.reset_index(drop=True)
# #Compute the total quantity for each partnumber
self.data['branchqty']=self.data['qty']
self.data=self.data.set_index('item_no')
for index, row in self.data.iterrows():
father_index=re.sub(r"\..?[0-9]$", "", index)
while len(father_index.split('.'))>1:
self.data.at[index,'branchqty']=self.data.at[index,'branchqty']*self.data.at[father_index,'branchqty']
father_index=re.sub(r"\..?[0-9]$", "", father_index)
for index, row in self.flatbom.iterrows():
self.flatbom.at[index,'totalqty']=self.data[(self.data['partnumber']==row['partnumber'] )& (self.data['revision']==row['revision'])]['branchqty'].sum()
print("************** BOM ")
print (self.bom)
print("************** FLATBOM ")
print (self.flatbom)
print("************** DATA AFTER bOM ")
print(self.data)
def uploadbom(self):
for index, row in self.bom.iterrows():
fatherPN=row['father_partnumber']
childPN=row['child_partnumber']
fatherREV=row['father_revision']
childREV=row['child_revision']
qty=row['qty']
father=partcol.find_one({"partnumber":fatherPN,"revision":fatherREV})
child=partcol.find_one({"partnumber":childPN,"revision":childREV})
# father=partcol.find_one({"partnumber":fatherPN})
# child=partcol.find_one({"partnumber":childPN})
# print("*"+fatherPN+"*","*"+ fatherREV+"*")
# print("*"+childPN+"*","*"+ childREV+"*")
# print(father['partnumber'], child['partnumber'])
# print(father['partnumber'], child['partnumber'])
if father!=None and child!=None:
if "children" in father.keys():
partcol.update_one({"partnumber":fatherPN, "revision":fatherREV},{ "$push":{'children': child['_id'],'childrenqty': qty}})
else:
partcol.update_one({"partnumber":fatherPN, "revision":fatherREV},{ "$set":{'children':[ child['_id']],'childrenqty':[qty]}})
def solidbom_to_excel(self,process=""):
redFill = PatternFill(start_color='FFEE1111',
end_color='FFEE1111',
fill_type='solid')
yellowFill = PatternFill(start_color='00FFFF00',
end_color='00FFFF00',
fill_type='solid')
bom_in=self.flatbom.loc[(self.flatbom['process']!='hardware') & (self.flatbom['process2']!='hardware') & (self.flatbom['process3']!='hardware')]
#Rearrange the cols
notmain_cols=[]
last_cols=[]
for col in bom_in:
if col in maincols or col in refcols:
pass
elif "path" in col or col in deliverables or col in ['eprt','edrw','easm','threemf']:
last_cols.append(col)
elif col!="_sa_instance_state":
notmain_cols.append(col)
#Add screenshot col
bom_in["Screenshot"]=""
#Copy of bom for bom generation only (avoid having images path collumns)
bom_image=bom_in[['partnumber']+['revision']+['pngpath']+['png']]
#Replace datasheet location by web link:
bom_in['datasheet']=bom_in['datasheet'].str.replace("//","/")
bom_in['datasheet']=bom_in['datasheet'].str.replace(fileserver_path,"http://"+webfileserver)
if process!="":
bom_in=bom_in[["Screenshot"]+process_conf[process]['fields']]
else:
bom_in=bom_in[["Screenshot"]+maincols+refcols+notmain_cols+last_cols]
self.excel_file=self.folderout + "BOM_tables-" +self.tag +".xlsx"
#Prepare file and format wrap
writer = pd.ExcelWriter(self.excel_file, engine = 'xlsxwriter')
workbook = writer.book
wrap_format = workbook.add_format({'text_wrap': True})
#wrap_format.set_border(6)
#Dump dataframe to excel
if process!="":
sheet_name = process.upper()+ ' scope of supply'
else:
sheet_name = 'Flatbom'
bom_in.to_excel(writer, sheet_name = sheet_name)
# Set the worksheet and autofilter.
worksheet = writer.sheets[sheet_name]
(max_row, max_col) = bom_in.shape
worksheet.autofilter(0, 0, max_row, max_col )
worksheet.set_column(2,max_col,None,wrap_format)
#Width of cols
worksheet.set_column(0,0,4,wrap_format)
worksheet.set_column(1,1,8,wrap_format)
worksheet.set_column(2,2,20,wrap_format)
worksheet.set_column(3,3,4,wrap_format)
worksheet.set_column(4,4,30,wrap_format)
worksheet.set_column(5,5,4,wrap_format)
worksheet.set_column(6,max_col,20,wrap_format)
# Add images section
i=-1
for index, row in bom_image.iterrows():
i=i+1
thumb=thumbnail(row['pngpath'])
print(thumb)
#Adjust row height
worksheet.set_row(i+1,30)
#Add image
cell='B'+str(i+2)
image_url="http://"+webserver+"/part/"
image_url+= row['partnumber']+"_rev_"
if row['revision']=="":
image_url+= "%25"
else:
image_url+= row['revision']
print(row['png'])
if (row['png']!="FALSE" and row['png']!=False ) or thumb!="":
worksheet.insert_image(cell, thumb, {'x_offset': 1,
'y_offset': 1,
'x_scale': 0.5,
'y_scale': 0.5,
'object_position': 1,
'url': image_url})
if process=="":
##Generate basic lists:
#with pd.ExcelWriter(self.excel_file) as writer:
listofprocess=[*process_conf]
for process in listofprocess:
property_list=['partnumber','revision','description','material']
values_list=["totalqty"]
process_df=self.flatbom.loc[(self.flatbom['process']==process) | (self.flatbom['process2']==process) | (self.flatbom['process3']==process)]
if process=='folding' or process=='lasercut' or process=='profile cut' :
property_list.append('thickness')
if process=='purchase':
property_list=['partnumber','description','oem','supplier','supplier_partnumber', 'category','datasheet','pdfpath']
if process=='hardware':
property_list.remove('revision')
process_df=pd.pivot_table(process_df,index=property_list,values=values_list)
if len(process_df)>0:
process_df.to_excel(writer,sheet_name=process)
#print (process_df)
# Set the autofilter.
worksheet = writer.sheets[process]
(max_row, max_col) = process_df.shape
worksheet.autofilter(0, 0, max_row, max_col +len (property_list)-1)
worksheet.set_column(0,max_col +len (property_list)-1,15,wrap_format)
#Close workbook with xlsxwriter so openpyxl can open it
workbook.close()
return self.excel_file
def get_parents(self,partnumber,revision):
parents=self.bom.loc[(self.bom['child_partnumber']==partnumber) & (self.bom['child_revision']==revision)]
parents_rename_dict={} ; parents_rename_dict['father_partnumber']='partnumber'
parents_rename_dict['father_revision']='revision'
parents=parents.rename(columns=parents_rename_dict)
parents=self.flatbom.merge(parents,on=['partnumber','revision'],
how='left',indicator=True).query('_merge == "both"').drop(columns='_merge').reset_index(drop=True)
if len(parents)>0:
return parents
else:
return []
def gather_datasheet(self):
#Check if outputfolder exists otherwise create it
outputfolder=self.folderout+"datasheets/"
create_folder_ifnotexists(outputfolder)
if len(self.flatbom[(self.flatbom['process']=='purchase') |( self.flatbom['process2']=='purchase')|( self.flatbom['process3']=='purchase')]):
purchasefolder=self.folderout+"purchase/"
create_folder_ifnotexists(purchasefolder)
for i in range(len(self.flatbom)):
targetfile=outputfolder + self.flatbom['partnumber'][i]+"-datasheet"
if os.path.isfile(self.flatbom.at[i,"datasheet"]):
sourcefile=self.flatbom["datasheet"][i]
fileName, fileExtension = os.path.splitext(sourcefile)
if "png" in fileExtension.lower() or "jpg" in fileExtension.lower() :
try:
image1=Image.open(sourcefile)
im1=image1.convert('RGB')
targetfile=targetfile+".pdf"
im1.save(targetfile)
self.flatbom.at[i,'datasheet']=targetfile
except:
print (self.flatbom['partnumber'][i] + " PROBLEMS COMPILING DATASHEET ", sourcefile)
elif "pdf" in fileExtension.lower():
targetfile=targetfile + fileExtension
sourcefile=PureWindowsPath(sourcefile)
targetfile=PureWindowsPath(targetfile)
#print(sourcefile, " copy to ", targetfile)
try:
copyfile(sourcefile,targetfile)
self.flatbom.at[i,'datasheet']=targetfile
except:
print (self.flatbom['partnumber'][i] + " PROBLEMS COMPILING DATASHEET ", sourcefile, targetfile)
if self.flatbom['process'][i]=='purchase' or self.flatbom['process2'][i]=='purchase' or self.flatbom['process3'][i]=='purchase' :
purchasefile=purchasefolder + self.flatbom['partnumber'][i]+".pdf"
try:
copyfile(targetfile,purchasefile)
except:
print("Couldn't copy ", purchasefile , " to ", targetfile)
elif self.flatbom.at[i,"datasheet"]!="":
try:
web_to_pdf(self.flatbom['link'][i],targetfile+".pdf")
print(self.flatbom['partnumber'][i] , " DATASHEET FROM WEB")
except:
try:
print(self.flatbom['partnumber'][i] , " NO DATASHEET - ", sourcefile)
except:
print(self.flatbom['partnumber'][i] , " invalid SOURCEFILE !!!!!!!!!!")
# self.flatbom.at[i,'notes'].append("Invalid datasheet source")
def gather_deliverables(self):
#Loops over the processes in the excel configurator file
#but excludes the last one (others) that is why [*process_conf][:-1]
# so be careful when adding more
for process in [*process_conf][:-1]:
bom_in=self.flatbom.loc[(self.flatbom['process']==process) | (self.flatbom['process2']==process) | (self.flatbom['process3']==process)]
if len(bom_in)==0:
continue
musthave=[]
if process_conf[process]['pdf']==1: musthave.append("pdf")
if process_conf[process]['dxf']==1: musthave.append("dxf")
if process_conf[process]['step']==1: musthave.append("step")
#Check if outputfolder exists otherwise create it
outputfolder=self.folderout+process+"/"
print(outputfolder)
if len(musthave)>0:create_folder_ifnotexists(outputfolder)
for index, row in bom_in.iterrows():
#print(index,row['partnumber'], row["pdfpath"])
filenamebit=row["partnumber"]+"_REV_"+row["revision"]
for extension in musthave:
sourcefile=self.deliverables_folder+extension+"/"+filenamebit+"."+extension
if process=='folding' or process=='lasercut' or process=='profile cut':
targetfile=outputfolder+filenamebit
targetfile=targetfile+"-"+row["material"]+"_"+str(row["thickness"])+"mm."+extension
# if row["thickness"]==0 or str(row["thickness"])=='':
# self.flatbom.at[index,'notes'].append("Missing thickness")
else:
targetfile=outputfolder+filenamebit+"."+extension
if os.path.isfile(sourcefile):
copyfile(sourcefile,targetfile)
print(sourcefile,targetfile)
class Bom(db.Model):
# Defines the Table Name user
__tablename__ = "bom"
# Defines the variables
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
father_id = db.Column(Integer, ForeignKey('part.id'), nullable=False)
child_id = db.Column(Integer, ForeignKey('part.id') , nullable=False)
qty=db.Column(Integer, nullable=False)
def __init__(self, father_id, child_id,qty):
self.father_id = father_id
self.child_id = child_id
self.qty = qty
self.child=Part.query.filter_by(id=self.child_id).first()
def getchild(self):
self.child=Part.query.filter_by(id=self.child_id).first()
return self.child
def __repr__(self):
self.getchild()
return f'BOM( {self.child.partnumber}, {self.child.revision} , quantity {self.qty})'
def __str__ (self):
self.getchild()
return f'BOM( {self.child.partnumber} , {self.child.revision} ,quantity {self.qty})'
def deletepart(database_part,echo=False):
bomentries=db.session.query(Bom).filter(or_(Bom.father_id==database_part.id,Bom.child_id==database_part.id))
for bomline in bomentries:
this=bomline.id
db.session.delete(bomline)
if echo: print("deleted-",this)
db.session.commit()
# bomentries=db.session.query(Bom).filter(Bom.child_id==database_part.id)
# for bomline in bomentries:
# this=bomline.id
# db.session.delete(bomline)
# if echo: print("deleted-",this)
# db.session.commit()
this=database_part.id
db.session.delete(database_part)
if echo: print("part deleted-",this)
db.session.commit()
if echo: print("all deleted from ",this)
#db.session.close()
class Part(db.Model):
# Defines the Table Name user
__tablename__ = "part"
# Makes three columns into the table id, name, email
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
partnumber = db.Column(db.String, nullable=False)
revision = db.Column(db.String)
approved = db.Column(db.String)
author = db.Column(db.String)
category = db.Column(db.String)
configuration = db.Column(db.String)
colour = db.Column(db.String)
datasheet = db.Column(db.String)
description = db.Column(db.String)
drawndate = db.Column(db.String)
file = db.Column(db.String)
finish = db.Column(db.String)
folder = db.Column(db.String)
link = db.Column(db.String)
mass = db.Column(db.Float)
material = db.Column(db.String)
oem = db.Column(db.String)
process = db.Column(db.String)
process2 = db.Column(db.String)
process3 = db.Column(db.String)
spare_part = db.Column(db.Boolean)
supplier = db.Column(db.String)
supplier_partnumber = db.Column(db.String)
thickness = db.Column(db.Float)
treatment = db.Column(db.String)
colour = db.Column(db.String)
notes = db.Column(db.String)
asset = db.Column(db.String)
children= relationship("Part",
secondary="bom",
primaryjoin=id==Bom.father_id,
secondaryjoin=id==Bom.child_id,
backref="parent" )
# comments=relationship("Comment")
def as_dict(self,folder=""):
if not hasattr(self,'qty'):
self.qty=0
self.totalqty=self.qty
else:
self.totalqty=self.qty
self.datasheet_available=False
self.png=False
self.pdf=False
self.eprt=False
self.edrw=False
self.easm=False
self.edr=False
self.png_d=False
self.dxf=False
self.step=False
self.threemf=False
self.datasheet_link=""
self.modelpath=""
self.pngpath=""
self.pdfpath=""
self.dxfpath=""
self.edrpath=""
self.steppath=""
self.threemfpath=""
self.png_dpath=""
if folder!="":
self.updatefilespath(folder,local=True)
return self.__dict__
def to_dict(self):
self.updatefilespath(webfileserver)
if self.revision=="":
urllink=url_for('tinylib.partnumber',partnumber=self.partnumber,revision="%25",detail="quick")
else:
urllink=url_for('tinylib.partnumber',partnumber=self.partnumber,revision=self.revision,detail="quick")
weblink= '<a href="'+ urllink + '">' + """<img src="http://""" + self.pngpath + """" width=auto height=30rm></a>"""
return {
'id': self.id,
'pngpath': weblink,
'partnumber': self.partnumber,
'revision': self.revision,
'description': self.description,
'process': self.process,
'process2': self.process2,
'process3': self.process3,
'finish': self.finish
}
def getchildren (self):
outlist=[]
for child in self.children:
kid={}
kid['part']=child
kid['qty']=Bom.query.filter_by(father_id=self.id,child_id=child.id).first().qty
outlist.append(kid)
return outlist
def children_with_qty (self):
outlist=[]
for child in self.children:
child.qty=Bom.query.filter_by(father_id=self.id,child_id=child.id).first().qty
outlist.append(child)
return outlist
def get_tag(self):
self.tag=self.partnumber+"_REV_"+self.revision+"-"+date.today().strftime('%d_%m_%Y')
return self.tag
def parents_with_qty (self):
outlist=[]
for parent in self.parent:
parent.qty=Bom.query.filter_by(father_id=parent.id,child_id=self.id).first().qty
outlist.append(parent)
return outlist
def hasProcess(self,process):
if process in self.process or process in self.process2 or process in self.process3:
return True
else:
return False
def hasConsumingProcess(self):
processlist=[self.process,self.process2,self.process3]
processlist=[x for x in processlist if x in process_conf.keys()]
consume=False
for process in processlist:
if int(process_conf[process]['priority'])<20:
consume=True
return consume
def isMainProcess(self,process):
mainprocess_bool=False
if self.hasProcess(process):
processlist=[self.process,self.process2,self.process3]
processlist=[x for x in processlist if x in process_conf.keys()]
if len(processlist)>0:
for x in processlist:
if int(process_conf[process]['priority'])<= int(process_conf[x]['priority']):
mainprocess_bool=True
else:
mainprocess_bool=False
return mainprocess_bool
def MainProcess(self):
processlist=[self.process,self.process2,self.process3]
processlist=[x for x in processlist if x in process_conf.keys()]
mainprocess=self.process
if len(processlist)>0:
for x in processlist:
if int(process_conf[x]['priority'])<= int(process_conf[mainprocess]['priority']):
mainprocess=x
else:
mainprocess='other'
self.mainprocess=mainprocess
return mainprocess
def __repr__(self):
return f'part( {self.partnumber} , {self.revision} , {self.description})'
def __str__ (self):
return f'part( {self.partnumber} , {self.revision} , {self.description})'
def updatefilespath(self,folderin,local=False, png_thumbnail=False):
#This functions check first in the file server local path and then adds the http link
folder=folderin
# folder=webfileserver
self.tag=self.partnumber+"_REV_"+self.revision
pngfolder=fileserver_path+variables_conf['deliverables_folder']['value']+"png/"
pdffolder=fileserver_path+variables_conf['deliverables_folder']['value']+"pdf/"
edrfolder=fileserver_path+variables_conf['deliverables_folder']['value']+"edr/"
stepfolder=fileserver_path+variables_conf['deliverables_folder']['value']+"step/"
dxffolder=fileserver_path+variables_conf['deliverables_folder']['value']+"dxf/"
threemffolder=fileserver_path+variables_conf['deliverables_folder']['value']+"3mf/"
#Ssame for datasheets
datasheetfolder=fileserver_path+variables_conf['deliverables_folder']['value']+"/datasheet/"
self.pdfpath=pdffolder+self.tag+".pdf"
self.steppath=stepfolder+self.tag+".step"
self.dxfpath=dxffolder+self.tag+".dxf"
self.eprtpath=edrfolder+self.tag+".eprt"
self.easmpath=edrfolder+self.tag+".easm"
self.edrwpath=edrfolder+self.tag+".edrw"
self.png_dpath=pngfolder+self.tag+"_DWG.png"
self.pngpath=pngfolder+self.tag+".png"
self.qrpath=pngfolder+self.tag+".qr.jpg"
self.threemfpath=threemffolder+self.tag+".3mf"
if self.hasProcess("hardware") or self.hasProcess("purchase") and not file_exists(self.pngpath):
self.pngpath=pngfolder+self.file+"_REV_"+self.revision+".png"
#if self.hasProcess("hardware") and not file_exists(self.pngpath):
# if not local:
# self.pngpath=pngfolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".png"
# else:
# self.pngpath=pngfolder+self.file+"_REV_"+self.revision+".png"
#Adding boolean value to false for all files
self.datasheet_available=False
self.png=False
self.qr=False
self.pdf=False
self.eprt=False
self.edrw=False
self.easm=False
self.edr=False
self.png_d=False
self.dxf=False
self.step=False
self.threemf=False
#Add directly the link for datasheets withouth checking file exists
if self.datasheet:
#print(self.datasheet)
self.datasheet.replace('"', '')
self.datasheet.replace("file:///",'')
self.datasheet.replace('%20',' ')
# path, filename = os.path.split(self.datasheet)
# flash(PureWindowsPath(self.datasheet).name)
# flash(filename)
filename=PureWindowsPath(self.datasheet).name
self.datasheet=datasheetfolder+filename
if file_exists(self.datasheet):
self.datasheet_link=folder+variables_conf['deliverables_folder']['value']+"/datasheet/"+ filename
self.datasheet_link=self.datasheet_link.replace(' ','%20')
self.datasheet_available=True
else:
if file_exists(self.datasheet+".pdf"):
self.datasheet_link=folder+variables_conf['deliverables_folder']['value']+"/datasheet/"+ filename+".pdf"
self.datasheet_link=self.datasheet_link.replace(' ','%20')
self.datasheet_available=True
#if hasattr(self,"datasheet_link"):
# print(self.datasheet_link)
#Model paths added directly
self.modelpath=self.folder +self.file+ ".SLDPRT"
if file_exists( self.modelpath.upper()+ ".SLDPRT"):
self.modelpath= self.modelpath+ ".SLDPRT"
if file_exists( self.modelpath+ ".SLDASM"):
self.modelpath= self.modelpath+ ".SLDASM"
#UPdate the folder location to the given in function, normally if not local
#the folder path will be the fileserver address
if not local:
pngfolder=folder+variables_conf['deliverables_folder']['value']+"png/"
pdffolder=folder+variables_conf['deliverables_folder']['value']+"pdf/"
edrfolder=folder+variables_conf['deliverables_folder']['value']+"edr/"
stepfolder=folder+variables_conf['deliverables_folder']['value']+"step/"
dxffolder=folder+variables_conf['deliverables_folder']['value']+"dxf/"
threemffolder=folder+variables_conf['deliverables_folder']['value']+"3mf/"
#To reduce the image quality on the flatbom if needed (too much bandwidth)
#Png path
#print(self.pngpath)
if file_exists(self.pngpath):
# print("exist")
if png_thumbnail:
pngfolder=fileserver_path+variables_conf['deliverables_folder']['value']+"png/"
self.pngpath=pngfolder+self.file+"_REV_"+self.revision+".png"
self.pngpath=thumbnail(self.pngpath)
try:
path, filename = os.path.split(self.pngpath)
self.pngpath=folder+variables_conf['deliverables_folder']['value']+"png/"+filename
self.png=True
except:
print("Problems with ", self.partnumber)
print("pngfolder ", fileserver_path)
print("folder ", folder)
print("fileserver_path ", fileserver_path)
self.png=False
else:
self.pngpath=pngfolder+self.file+"_REV_"+self.revision+".png"
if not local:
self.pngpath=self.pngpath.replace(' ','%20')
else:
self.pngpath=pngfolder+self.file+"_REV_"+self.revision+".png"
self.png=True
# #qr path
# self.qrpath=qr_code(self)
self.qrpath=pngfolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".qr.jpg"
self.qr=True
#png_d path
if file_exists(self.png_dpath):
self.png_dpath=pngfolder+self.file.replace(' ','%20')+"_REV_"+self.revision+"_DWG.png"
self.png_d=True
#pdf path
if file_exists(self.pdfpath):
self.pdfpath=pdffolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".pdf"
self.pdf=True
#dxf path
if file_exists(self.dxfpath):
self.dxfpath=dxffolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".dxf"
self.dxf=True
#step path
if file_exists(self.steppath):
self.steppath=stepfolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".step"
self.step=True
#threemf path
if file_exists(self.threemfpath):
self.threemfpath=threemffolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".threemf"
self.threemf=True
#Model Edrawings path
if file_exists(self.eprtpath):
self.eprtpath=edrfolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".eprt"
self.eprt=True
self.edr=True
self.edrpath=self.eprtpath
elif file_exists(self.easmpath):
self.easmpath=edrfolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".easm"
self.easm=True
self.edr=True
self.edrpath=self.easmpath
#Drawing - Edrawings path
if file_exists(self.edrwpath):
self.edrwpath=edrfolder+self.file.replace(' ','%20')+"_REV_"+self.revision+".edrw"
self.edrw=True
self.edr_d=True
self.edr_dpath=self.edrwpath
self.get_process_icons()
def get_process_icons (self):
self.process_icons=[]
self.process_colors=[]
if self.process!="" and self.process in process_conf.keys() :
self.process_icons.append('images/'+(process_conf[self.process]['icon']))
self.process_colors.append(process_conf[self.process]['color'])
if self.process2 in process_conf.keys() :
self.process_icons.append('images/'+(process_conf[self.process2]['icon']))
self.process_colors.append(process_conf[self.process2]['color'])
if self.process3 in process_conf.keys() :
self.process_icons.append('images/'+(process_conf[self.process3]['icon']))
self.process_colors.append(process_conf[self.process3]['color'])
else:
self.process_icons.append('images/'+process_conf['others']['icon'])
self.process_colors.append(process_conf['others']['color'])
def __init__(self, partnumber="",revision="",description="",
process="",process2="",process3="",
finish="",path_to_model_file="",material="",matspec="",partType="",
pdfpath="",edrpath="",edr_dpath="",jpgpath="",jpg_dpath="",pngpath="",png_dpath="",
dxfpath="",dwgpath="",
pdf="",edr="",edr_d="",jpg="",jpg_d="",png="",png_d="",dxf="",dwg="",check="",notes="",
pdfindex="",pdfpages="",qty="",totalqty="",colour="",asset=""):
self.partnumber=partnumber
self.revision=revision
self.description=description
self.process=process
self.process2=process2
self.process3=process3
self.finish=finish
self.material=material
self.colour=colour
self.notes=notes
self.asset=asset
#outputfiles
self.pdfpath=pdfpath
self.pngpath=pngpath
#Bom and index related
self.qty=qty
self.totalqty=totalqty
self.pdfindex=pdfindex
def ispartprocess(self,process):
if process in self.process or process in self.process2 or process in self.process3:
return True
else:
return False
#To create a part from a dataframe row
def partfromlist(self,datalist):
self.spare_part=datalist['spare_part']
self.mass=datalist['mass']
self.thickness=datalist['thickness']
self.category=datalist['category']
self.datasheet=datalist['datasheet']
self.partnumber=datalist['partnumber']
self.process=datalist['process']
self.process2=datalist['process2']
self.process3=datalist['process3']
self.configuration=datalist['configuration']
self.file=datalist['file']
self.folder=datalist['folder']
self.supplier_partnumber=datalist['supplier_partnumber']
self.description=datalist['description']
self.finish=datalist['finish']
self.material=datalist['material']
self.revision=datalist['revision']
self.approved=datalist['approved']
self.author=datalist['author']
self.supplier=datalist['supplier']
self.link=datalist['link']
self.oem=datalist['oem']
self.treatment=datalist['treatment']
self.drawndate=datalist['drawndate']
self.colour=datalist['colour']
#self.notes=datalist['notes']
self.category=datalist['category']
self.asset=datalist['asset']
##Extra values only needed for pdf list exports
self.pdfpath=datalist['pdfpath']
self.pngpath=datalist['pngpath']
try:
self.qty=str(int(datalist[ 'qty']))
except:
self.qty=0
self.totalqty=str(int(datalist[ 'totalqty']))
self.png=False
if os.path.isfile(self.pngpath): self.png=True
try:
self.pdfindex=str(int(datalist[ 'pdfindex']))
except:
self.pdfindex=""
return self
def get_components(self, components_only=True):
reflist=[]
flatbom=[]
def loopchildren(partnumber,revision,qty,reflist):
part=Part.query.filter_by(partnumber=partnumber,revision=revision).first()
children=part.children_with_qty()
for child in children:
refqty=child.qty*qty
if len(child.children)>0:
if child.hasConsumingProcess() and components_only:
reflist.append((child,refqty))
else:
reflist.append((child,refqty))
loopchildren(child.partnumber,child.revision,refqty,reflist)
else:
reflist.append((child,refqty))
loopchildren(self.partnumber,self.revision,1,reflist)
#Sum up all quantities and compile flatbom
resdict={}
for item,q in reflist:
total=resdict.get(item,0)+q
resdict[item]=total
for part in resdict.keys():
part.qty=resdict[part]
flatbom.append(part)
#Range flatbom by partnumber
#flatbom.sort(key=lambda x: x.partnumber)
flatbom.sort(key=lambda x: (x.category,x.supplier,x.oem,x.approved,x.partnumber))
print(len(flatbom))
return flatbom
def partlist(bom_list):
part_list=[]
for i in range (len(bom_list)):
part_in=Part()
part_in=part_in.partfromlist(bom_list.iloc[i])
part_list.append(part_in)
return part_list
def get_tree(partnumber,revision,partlist,qty=1):
refpart=Part.query.filter_by(revision=revision,partnumber=partnumber).first()
#print(partlist)
if len (refpart.children)>0:
for i in refpart.children:
partlist.append((i.getchild(),i.qty*qty))
get_tree(i.child.partnumber,i.child.revision,partlist,qty=i.qty*qty)
return partlist
def get_flatbom(partnumber,revision,qty=1):
refpart=Part.query.filter_by(revision=revision,partnumber=partnumber).first()
flatlist=[]
flatlist.append((refpart,qty))
dictlist=[]
checklist=[]
get_tree(partnumber,revision,flatlist)
for part,part_qty in flatlist:
part_dict={}
part_dict=part.__dict__.copy()
# part_dict.pop('_sa_instance_state')
if part_dict['partnumber']+part_dict['revision'] in checklist:
ref_index=checklist.index(part_dict['partnumber']+part_dict['revision'])
dictlist[ref_index]['totalqty']+=part_qty
else:
part_dict['totalqty']=part_qty
dictlist.append(part_dict)
checklist.append(part_dict['partnumber']+part_dict['revision'])
#print(dictlist)
flatbom=pd.DataFrame(dictlist)
flatbom['pdfpath']=deliverables_folder+"pdf\\"+flatbom['file']+"_REV_"+flatbom['revision']+".pdf"
flatbom['pngpath']=deliverables_folder+"png\\"+flatbom['file']+"_REV_"+flatbom['revision']+".png"
return flatbom
################
##### POLISH THE DATABASE CREATION
#db.create_all()
#Job class definition
class Job(db.Model):
# Defines the Table Name user
__tablename__ = "job"
# Makes three columns into the table id, name, email
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
jobnumber = db.Column(db.String, nullable=False,unique=True)
description = db.Column(db.String)
customer =db.Column(db.String)
user_id =db.Column(db.Integer, ForeignKey('users.id') , nullable=False)
date_create=db.Column(db.DateTime, index=True, default=datetime.utcnow)
date_due=db.Column(db.DateTime)
date_modify=db.Column(db.DateTime)
date_finish=db.Column(db.DateTime)
#scope=relationship('Jobbom',
# backref='job',
# lazy='dynamic')
#user=relationship('User', backref = "job")
def __init__(self, id="",jobnumber="",description="",user="",customer="",user_id="",date_create="",date_due="",date_modify="",date_finish="", **kwargs):
self.jobnumber=jobnumber
self.description=description
self.customer=customer
self.user_id=user_id
self.user=user
self.misc="wahtever"
print('walksjad;f')
# self.date_create=datetime.now()
# self.date_due=date_due
# self.date_modify=date_modify
# self.date_finish=date_finish
class Jobbom(db.Model):
# Defines the Table Name user
__tablename__ = "jobbom"
# Makes three columns into the table id, name, email
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
job_id = db.Column(db.Integer, ForeignKey('job.id') , nullable=False)
part_id = db.Column(db.Integer, ForeignKey('part.id') , nullable=False)
#user_id = db.Column(db.Integer, ForeignKey('users.id') , nullable=False)
qty =db.Column(db.Integer)
def __init__(self, job_id="",part_id="",user_id="",qty="", **kwargs):
self.job_id=job_id
self.part_id=part_id
self.user_id=user_id
self.qty=qty
|
{"hexsha": "01e784ed1695ad2df6634e0e001743acb8a3a8af", "size": 86585, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/tinylib/models.py", "max_stars_repo_name": "pzetairoi/TinyMRP", "max_stars_repo_head_hexsha": "2d113a7ebc747d5a9cf082b4c6fad2ddb6b59ba8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app/tinylib/models.py", "max_issues_repo_name": "pzetairoi/TinyMRP", "max_issues_repo_head_hexsha": "2d113a7ebc747d5a9cf082b4c6fad2ddb6b59ba8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app/tinylib/models.py", "max_forks_repo_name": "pzetairoi/TinyMRP", "max_forks_repo_head_hexsha": "2d113a7ebc747d5a9cf082b4c6fad2ddb6b59ba8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3408163265, "max_line_length": 164, "alphanum_fraction": 0.5351157822, "include": true, "reason": "import numpy", "num_tokens": 17997}
|
#! /usr/bin/env python2
import roslib
import sys
import rospy
import cv2
import numpy as np
from sensor_msgs.msg import Image
from rospy_tutorials.msg import Floats
from rospy.numpy_msg import numpy_msg
from cv_bridge import CvBridge, CvBridgeError
colors = []
class threshold_finder:
def __init__(self):
self.frame = None
self.colors = []
self.hasNewFrame = False
self.thresholds = np.array([0,0,0,0,0,0], dtype= np.float32)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/stereo/right/image_raw", Image, self.callback)
self.image_thresholds = rospy.Publisher("/threshold_values",numpy_msg(Floats), queue_size=10)
client = dynamic_reconfigure.client.Client("vision_server", timeout=30, config_callback=callback)
def pick_colors(self, event, x, y, flags, frame):
if event == cv2.EVENT_LBUTTONUP:
self.colors.append(self.frame[y,x].tolist())
minb = min(c[0] for c in self.colors)
ming = min(c[1] for c in self.colors)
minr = min(c[2] for c in self.colors)
maxb = max(c[0] for c in self.colors)
maxg = max(c[1] for c in self.colors)
maxr = max(c[2] for c in self.colors)
thresholds = np.array([(minb, ming, minr),maxb,maxg,maxr)])
hsl = cv2.cvtColor(thresholds, cv2.COLOR_BGR2HSL)
client.update_configuration({"lowH":hsl[0][0], "lowS":hsl[0][1], "lowL":hsl[0][2], "highH":hsl[1][0], "highS":hsl[1][1], "highL":hsl[1][2]})
print (minr, ming, minb, maxr, maxg, maxb)
lb = [minb,ming,minr]
ub = [maxb,maxg,maxr]
print (lb, ub)
def callback(self,data):
try:
self.frame = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
if colors:
cv2.putText(frame, str(colors[-1]), (10, 50), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 0), 2)
self.hasNewFrame=True
try:
minb = min(c[0] for c in self.colors)
for x in range(x):
if x < 3:
thresholds[x] = max(c[x] for c in self.colors)
else:
thresholds[x] = min(c[x%3] for c in self.colors)
except:
pass
try:
self.image_thresholds.publish(thresholds)
except:
pass
def main(args):
tf = threshold_finder()
rospy.init_node('threshold_finder', anonymous=False)
cv2.namedWindow('Sub Camera')
cv2.setMouseCallback('Sub Camera', tf.pick_colors, tf.frame)
try:
while not rospy.is_shutdown():
if tf.frame is not None and tf.hasNewFrame:
cv2.imshow('Sub Camera',tf.frame)
key=cv2.waitKey(1)
if key ==27:
sys.exit()
tf.hasNewFrame = False
except KeyboardInterrupt():
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == "__main__":
main(sys.argv)
|
{"hexsha": "2e7bb58f9eb19d558f101e82375845db918c5698", "size": 2834, "ext": "py", "lang": "Python", "max_stars_repo_path": "ucf_sub_catkin_ros/src/sub_utils/src/picker.py", "max_stars_repo_name": "RoboticsClubatUCF/RoboSub", "max_stars_repo_head_hexsha": "47304c620f963a8762db57a7ed248d1df90190fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ucf_sub_catkin_ros/src/sub_utils/src/picker.py", "max_issues_repo_name": "RoboticsClubatUCF/RoboSub", "max_issues_repo_head_hexsha": "47304c620f963a8762db57a7ed248d1df90190fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2016-09-16T19:52:57.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-14T18:16:17.000Z", "max_forks_repo_path": "ucf_sub_catkin_ros/src/sub_utils/src/picker.py", "max_forks_repo_name": "RoboticsClubatUCF/RoboSub", "max_forks_repo_head_hexsha": "47304c620f963a8762db57a7ed248d1df90190fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-01-06T20:56:45.000Z", "max_forks_repo_forks_event_max_datetime": "2017-02-26T02:49:17.000Z", "avg_line_length": 31.4888888889, "max_line_length": 145, "alphanum_fraction": 0.6206774876, "include": true, "reason": "import numpy", "num_tokens": 788}
|
# -*- coding: utf-8 -*-
'''
<Lenet Neural Network High Level Synthesis Lenet>
MIT License
Copyright (c) 2020 Filipe Maciel Lins
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions: The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import pre_data
import os
import numpy as np
X_train,y_train,X_validation,y_validation,X_test,y_test = pre_data.pre_data()
num_images = 10000
filename = "test_img"
dir_name='../lenet_hls/ref/testdata/'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for i in range(0, num_images):
filename2 = filename + str(i)
a_file = open(os.path.join(dir_name, filename2 + ".out"), "w")
for row in X_test[i]:
np.savetxt(a_file, row, '%1.23f')
a_file.close()
dir_name='../lenet_hls/ref/'
a_file = open(os.path.join(dir_name, "filenames" + ".out"), "w")
dir_name2='../../../ref/testdata/'
for i in range(0, num_images):
filename2 = filename + str(i)
a_file.write( os.path.join(dir_name2, filename2 + ".out"))
a_file.write("\n")
a_file.close()
filename = "test_result"
dir_name_results='../lenet_hls/ref/testresult/'
if not os.path.exists(dir_name_results):
os.makedirs(dir_name_results)
a_file = open(os.path.join(dir_name_results, filename + ".out"), "w")
np.savetxt(a_file, y_test[0:num_images], '%d')
a_file.close()
|
{"hexsha": "736b2bc3e3775faf4127d3a6303917ccf65742b2", "size": 2251, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/generate_testdata.py", "max_stars_repo_name": "filipemlins/Neural-Network-High-Level-Synthesis-Lenet", "max_stars_repo_head_hexsha": "7c787ffb103880b55a6e9397cd4a24a82a69d45a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-25T12:27:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-25T12:27:43.000Z", "max_issues_repo_path": "python/generate_testdata.py", "max_issues_repo_name": "filipemlins/Neural-Network-High-Level-Synthesis-Lenet", "max_issues_repo_head_hexsha": "7c787ffb103880b55a6e9397cd4a24a82a69d45a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-11T16:19:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T16:19:31.000Z", "max_forks_repo_path": "python/generate_testdata.py", "max_forks_repo_name": "filipemlins/Neural-Network-High-Level-Synthesis-Lenet", "max_forks_repo_head_hexsha": "7c787ffb103880b55a6e9397cd4a24a82a69d45a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2337662338, "max_line_length": 101, "alphanum_fraction": 0.7334517992, "include": true, "reason": "import numpy", "num_tokens": 539}
|
# -*- coding: utf-8 -*-
"""emocoes-em-video-comentado.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nDpT7CZsvulmYnL4wfA9Y-T1wb62e-_s
# **Detecção de Emoções em Videos**
# **Importação as bibliotecas**
"""
import cv2
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
from google.colab.patches import cv2_imshow
import zipfile
cv2.__version__
# Commented out IPython magic to ensure Python compatibility.
# %tensorflow_version 2.x
import tensorflow
tensorflow.__version__
"""# **Conectando com o Drive e acessando os arquivos**
Faça o download do arquivo aqui:
e importe para a sua pasta do Google Drive.
o meu caso eu inclui o arquivo em : Colab/Material.zip
Caso faça o upload para um caminho diferente, lembre-se de alterar o caminho no código abaixo
"""
from google.colab import drive
drive.mount('/content/gdrive')
path = "/content/gdrive/My Drive/Colab/Material.zip"
zip_object = zipfile.ZipFile(file=path, mode="r")
zip_object.extractall("./")
"""# **Carregando o modelo**"""
from tensorflow.keras.models import load_model
diretorio = 'Material/' # diretorio onde estão os arquivos do curso
# Modelo da Arquitetura 2 pois foi o que se saiu melhor nos testes
model = load_model(diretorio + "modelo_02_expressoes.h5")
"""# **Carregando o vídeo**
Envie seu video a pasta Material/Videos
ou use alguns dos videos de testes.
"""
arquivo_video = diretorio + "Videos/video_teste06.MOV"
cap = cv2.VideoCapture(arquivo_video)
conectado, video = cap.read()
print(video.shape) # mostra as dimensões do video
"""# **Redimensionando o tamanho**
Recomendado quando o tamanho do vídeo é muito grande.
Se o vídeo tiver a resolução muito alta então pode demorar muito o processamento.
"""
redimensionar = True
# deixe True para reduzir o tamanho do vídeo salvo caso este supere a largura máxima que vamos especificar abaixo.
# para manter o tamanho original deixe False
largura_maxima = 600 # pixels. define o tamanho da largura (máxima) do vídeo a ser salvo. a altura será proporcional e é definida nos calculos abaixo
# se redimensionar = True então o video que será salvo terá seu tamanho em pixels reduzido SE for maior que a largura_maxima
if (redimensionar and video.shape[1]>largura_maxima):
# precisamos deixar a largura e altura proporcionais (mantendo a proporção do vídeo original) para que a imagem não fique com aparência esticada
proporcao = video.shape[1] / video.shape[0]
# para isso devemos calcular a proporção (largura/altura) e usaremos esse valor para calcular a altura (com base na largura que definimos acima)
video_largura = largura_maxima
video_altura = int(video_largura / proporcao)
else:
video_largura = video.shape[1]
video_altura = video.shape[0]
# se redimensionar = False então os valores da largura e altura permanecerão os mesmos do vídeo original
"""# **Definindo as configurações do vídeo**"""
# nome do arquivo de vídeo que será salvo
nome_arquivo = diretorio+'/Videos/resultado_final.mp4'
# definição do codec
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
# FourCC é um código de 4 bytes usado para especificar o codec de vídeo. A lista de códigos disponíveis pode ser encontrada no site fourcc.org
# Codecs mais usados: XVID, MP4V, MJPG, DIVX, X264...
# Por exemplo, para salvar em formato mp4 utiliza-se o codec mp4v (o nome do arquivo também precisa possuir a extensão .mp4)
# fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# FPS - frames por segundo
fps = 24
# se quiser deixar o video um pouco mais lento pode diminuir o numero de frames por segundo para 20
saida_video = cv2.VideoWriter(nome_arquivo, fourcc, fps, (video_largura, video_altura))
"""# **Processamento do vídeo e gravação do resultado**"""
from tensorflow.keras.preprocessing.image import img_to_array
# define se deixa no modo detalhado, que exibirá no canto da tela as barras com as probabilidades de cada emoção
# esse modo é pra ser usado de preferência com vídeos onde tem apenas um rosto em foco
# (não tem problema se aparecer outros rostos menores no fundo, pois abaixo colocamos uma condição para que seja considerado apenas a face com maior área.
# o importante é que não tenha mais de um rosto em foco disputando a atenção da cena)
unica_face = False
# se deixar False então o programa vai detectar a emoção de todas as faces na imagem e não vai exibir os gráficos com as probabilidades no canto
haarcascade_faces = diretorio + 'haarcascade_frontalface_default.xml' # arquivo haarcascade
# define os tamanhos para as fontes
fonte_pequena, fonte_media = 0.4, 0.7
fonte = cv2.FONT_HERSHEY_SIMPLEX
expressoes = ["Raiva", "Nojo", "Medo", "Feliz", "Triste", "Surpreso", "Neutro"]
while (cv2.waitKey(1) < 0):
conectado, frame = cap.read()
if not conectado:
break # se ocorreu um problema ao carregar a imagem então interrompe o programa
t = time.time() # tempo atual, antes de iniciar (vamos utilizar para calcular quanto tempo levou para executar as operações)
# frame_video = np.copy(frame) # faz uma copia do frame do video
if redimensionar: # se redimensionar = True então redimensiona o frame para os novos tamanhos
frame = cv2.resize(frame, (video_largura, video_altura))
face_cascade = cv2.CascadeClassifier(haarcascade_faces)
cinza = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # converte pra grayscale
faces = face_cascade.detectMultiScale(cinza,scaleFactor=1.2, minNeighbors=5,minSize=(30,30))
if len(faces) > 0:
for (x, y, w, h) in faces:
# se detectar mais de uma face então considera aquela que possui uma maior area na imagem
if unica_face and len(faces) > 1:
max_area_face = faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face = face
face = max_area_face
(x,y,w,h) = max_area_face # retorna as coordenadas e tamanhos da maior face detectada nesse frame
frame = cv2.rectangle(frame,(x,y),(x+w,y+h+10),(255,50,50),2) # desenha retângulo ao redor da face
roi = cinza[y:y + h, x:x + w] # extrai apenas a região de interesse (ROI) que é onde contém o rosto
roi = cv2.resize(roi, (48, 48)) # antes de passar pra rede neural redimensiona para o tamanho das imagens de treinamento
roi = roi.astype("float") / 255.0 # normaliza
roi = img_to_array(roi) # converte para array para que a rede possa processar
roi = np.expand_dims(roi, axis=0) # muda o shape da array
# faz a predição - calcula as probabilidades
result = model.predict(roi)[0]
print(result)
if result is not None:
if unica_face:
for (index, (emotion, prob)) in enumerate(zip(expressoes, result)):
# nomes das emoções
text = "{}: {:.2f}%".format(emotion, prob * 100)
barra = int(prob * 150) # calcula do tamanho da barra, com base na probabilidade
espaco_esquerda = 7 # é a coordenada x onde inicia a barra. define quantos pixels tem de espaçamento à esquerda das barras, pra não ficar muito no canto.
if barra <= espaco_esquerda:
barra = espaco_esquerda + 1
# se o tamanho da barra for menor que o espaço da esquerda então deixa a barra com 1 pixel de largura
# isso é feito pois estamos usando esse valor para passar por coordenada, se for menor então a barra irá crescer pra esquerda
cv2.rectangle(frame, (espaco_esquerda, (index * 18) + 7), (barra, (index * 18) + 18), (200, 250, 20), -1)
cv2.putText(frame, text, (15, (index * 18) + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 0, 0), 1, cv2.LINE_AA)
resultado = np.argmax(result) # encontra a emoção com maior probabilidade
cv2.putText(frame,expressoes[resultado],(x,y-10), fonte, fonte_media,(255,255,255),1,cv2.LINE_AA) # escreve a emoção acima do rosto
if unica_face and len(faces) > 1:
break
# se ja executou a face maior então não precisa percorrer as outras porque vai fazer a mesma coisa (pois vai fazer o mesmo cálculo pra ver qual é a maior)
# então usamos break pra pular para o próximo frame
# tempo processado = tempo atual (time.time()) - tempo inicial (t)
cv2.putText(frame, " frame processado em {:.2f} segundos".format(time.time() - t), (20, video_altura-20), fonte, fonte_pequena, (250, 250, 250), 0, lineType=cv2.LINE_AA)
cv2_imshow(frame)
saida_video.write(frame) # grava o frame atual
print("O processo terminou")
saida_video.release()
cv2.destroyAllWindows()
|
{"hexsha": "6b51349c69c430f6b22673d4598fcacd3c9a017a", "size": 8896, "ext": "py", "lang": "Python", "max_stars_repo_path": "emocoes_em_video.py", "max_stars_repo_name": "thelesson/Deteccao-de-Emocoes-em-Videos-com-TensorFlow", "max_stars_repo_head_hexsha": "1ed1b2c77ff0d70fbc983c13baf5c62dd1b31a47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "emocoes_em_video.py", "max_issues_repo_name": "thelesson/Deteccao-de-Emocoes-em-Videos-com-TensorFlow", "max_issues_repo_head_hexsha": "1ed1b2c77ff0d70fbc983c13baf5c62dd1b31a47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "emocoes_em_video.py", "max_forks_repo_name": "thelesson/Deteccao-de-Emocoes-em-Videos-com-TensorFlow", "max_forks_repo_head_hexsha": "1ed1b2c77ff0d70fbc983c13baf5c62dd1b31a47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7035175879, "max_line_length": 177, "alphanum_fraction": 0.6987410072, "include": true, "reason": "import numpy", "num_tokens": 2457}
|
from astropy import units as u
from astropy.coordinates import SkyCoord
__all__ = ['get_point_data', 'get_luminosity_data', 'get_velocity_data']
def get_point_data(data, longitude_attribute, latitude_attribute, alternative_attribute=None,
frame=None, alternative_unit=None):
x_coordinates = ""
y_coordinates = ""
z_coordinates = ""
longitude = data[longitude_attribute]
latitude = data[latitude_attribute]
if alternative_attribute is None:
# Get cartesian coordinates on unit galactic sphere
coordinates = SkyCoord(longitude, latitude, unit='deg', frame=frame.lower())
x, y, z = coordinates.galactic.cartesian.xyz
# Convert to be on a sphere of radius 100pc
radius = 100
x *= radius
y *= radius
z *= radius
else:
distance = data[alternative_attribute]
# Get cartesian coordinates on unit galactic sphere
coordinates = SkyCoord(longitude * u.deg, latitude * u.deg,
distance=distance * u.Unit(alternative_unit),
frame=frame.lower())
x, y, z = coordinates.galactic.cartesian.xyz
x = x.to_value(u.pc)
y = y.to_value(u.pc)
z = z.to_value(u.pc)
n_points = len(x)
for i in range(n_points):
x_coordinates += (str(x[i]) + ",")
y_coordinates += (str(y[i]) + ",")
z_coordinates += (str(z[i]) + ",")
number_of_points = str(format(n_points, "09"))
point_data_string = number_of_points + x_coordinates + y_coordinates + z_coordinates
return point_data_string
# # DOESN'T WORK! USED FOR TESTING FOR FUTURE WORK
# def get_luminosity_data(data, luminosity_attribute):
# luminosity_data = ""
# luminosity_values = data[luminosity_attribute]
# for i in range(len(luminosity_values)):
# luminosity_data += (str(luminosity_values[i]) + ",")
# length_luminosity_data = str(format(len(luminosity_data), "09"))
# luminosity_data_string = length_luminosity_data + luminosity_data
# return luminosity_data_string
# # DOESN'T WORK! USED FOR TESTING FOR FUTURE WORK
# def get_velocity_data(data, velocity_attribute):
# velocity_data = ""
# velocity_values = data[velocity_attribute]
# for i in range(len(velocity_values)):
# velocity_data += (str(velocity_values[i]) + ",")
# length_velocity_data = str(format(len(velocity_data), "09"))
# velocity_data_string = length_velocity_data + velocity_data
# return velocity_data_string
|
{"hexsha": "955df38fb0703a4b62e33efaa8a5942f9acb3c9d", "size": 2565, "ext": "py", "lang": "Python", "max_stars_repo_path": "glue_openspace_thesis/utils.py", "max_stars_repo_name": "aniisabihi/glue-openspace", "max_stars_repo_head_hexsha": "853a61e0d1b0b2e5ed9919379b0a9db6ed39b1d9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "glue_openspace_thesis/utils.py", "max_issues_repo_name": "aniisabihi/glue-openspace", "max_issues_repo_head_hexsha": "853a61e0d1b0b2e5ed9919379b0a9db6ed39b1d9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "glue_openspace_thesis/utils.py", "max_forks_repo_name": "aniisabihi/glue-openspace", "max_forks_repo_head_hexsha": "853a61e0d1b0b2e5ed9919379b0a9db6ed39b1d9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-08T01:35:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T01:35:29.000Z", "avg_line_length": 31.2804878049, "max_line_length": 93, "alphanum_fraction": 0.6557504873, "include": true, "reason": "from astropy", "num_tokens": 588}
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
class NQueensProblem:
"""This class encapsulates the N-Queens problem
"""
def __init__(self, numOfQueens):
"""
:param numOfQueens: the number of queens in the problem
"""
self.numOfQueens = numOfQueens
def __len__(self):
"""
:return: the number of queens
"""
return self.numOfQueens
def getViolationsCount(self, positions):
"""
Calculates the number of violations in the given solution
Since the input contains unique indices of columns for each row, no row or column violations are possible,
Only the diagonal violations need to be counted.
:param positions: a list of indices corresponding to the positions of the queens in each row
:return: the calculated value
"""
if len(positions) != self.numOfQueens:
raise ValueError("size of positions list should be equal to ", self.numOfQueens)
violations = 0
# iterate over every pair of queens and find if they are on the same diagonal:
for i in range(len(positions)):
for j in range(i + 1, len(positions)):
# first queen in pair:
column1 = i
row1 = positions[i]
# second queen in pair:
column2 = j
row2 = positions[j]
# look for diagonal threat for th ecurrent pair:
if abs(column1 - column2) == abs(row1 - row2):
violations += 1
return violations
def plotBoard(self, positions):
"""
Plots the positions of the queens on the board according to the given solution
:param positions: a list of indices corresponding to the positions of the queens in each row.
"""
if len(positions) != self.numOfQueens:
raise ValueError("size of positions list should be equal to ", self.numOfQueens)
fig, ax = plt.subplots()
# start with the board's squares:
board = np.zeros((self.numOfQueens, self.numOfQueens))
# change color of every other square:
board[::2, 1::2] = 1
board[1::2, ::2] = 1
# draw the squares with two different colors:
ax.imshow(board, interpolation='none', cmap=mpl.colors.ListedColormap(['#ffc794', '#4c2f27']))
# read the queen image thumbnail and give it a spread of 70% of the square dimensions:
queenThumbnail = plt.imread('queen-thumbnail.png')
thumbnailSpread = 0.70 * np.array([-1, 1, -1, 1]) / 2 # spread is [left, right, bottom, top]
# iterate over the queen positions - i is the row, j is the column:
for i, j in enumerate(positions):
# place the thumbnail on the matching square:
ax.imshow(queenThumbnail, extent=[j, j, i, i] + thumbnailSpread)
# show the row and column indexes:
ax.set(xticks=list(range(self.numOfQueens)), yticks=list(range(self.numOfQueens)))
ax.axis('image') # scale the plot as square-shaped
return plt
# testing the class:
def main():
# create a problem instance:
nQueens = NQueensProblem(8)
# a known good solution:
#solution = [5, 0, 4, 1, 7, 2, 6, 3]
# a solution with 3 violations:
solution = [1, 2, 7, 5, 0, 3, 4, 6]
print("Number of violations = ", nQueens.getViolationsCount(solution))
plot = nQueens.plotBoard(solution)
plot.savefig("img_queens.png")
if __name__ == "__main__":
main()
|
{"hexsha": "e6fe7abf70c28005711da2e69f2f8c762575890d", "size": 3595, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter05/queens.py", "max_stars_repo_name": "KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python", "max_stars_repo_head_hexsha": "ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter05/queens.py", "max_issues_repo_name": "KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python", "max_issues_repo_head_hexsha": "ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter05/queens.py", "max_forks_repo_name": "KonstantinKlepikov/Hands-On-Genetic-Algorithms-with-Python", "max_forks_repo_head_hexsha": "ee5e7c5f8274a7ce22c3b528f86fa2bb1695e686", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6818181818, "max_line_length": 114, "alphanum_fraction": 0.6089012517, "include": true, "reason": "import numpy", "num_tokens": 868}
|
from __future__ import division, unicode_literals, print_function
import sys
import os
import copy
import operator
import traceback
from functools import cmp_to_key
import pandas as pd
import numpy as np
from itertools import groupby, combinations
from collections import OrderedDict, defaultdict
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
from multiprocessing import Process
try:
from profilestats import profile # noqa: F401
from memory_profiler import profile as memory_profiler # noqa: F401
except ImportError:
pass
from scipy import integrate
from scipy.ndimage.filters import gaussian_filter1d
from pythomics.proteomics import config
from . import PEAK_RESOLUTION_RT_MODE
from . import peaks
from .utils import (
calculate_theoretical_distribution,
find_scan,
find_prior_scan,
find_next_scan,
nanmean,
find_common_peak_mean,
get_scan_resolution,
)
class Worker(Process):
def __init__(
self,
queue=None,
results=None,
precision=6,
raw_name=None,
mass_labels=None,
isotope_ppms=None,
debug=False,
html=False,
mono=False,
precursor_ppm=5.0,
isotope_ppm=2.5,
quant_method="integrate",
reader_in=None,
reader_out=None,
thread=None,
fitting_run=False,
msn_rt_map=None,
reporter_mode=False,
spline=None,
isotopologue_limit=-1,
labels_needed=1,
overlapping_mz=False,
min_resolution=0,
min_scans=3,
quant_msn_map=None,
mrm=False,
mrm_pair_info=None,
peak_cutoff=0.05,
ratio_cutoff=0,
replicate=False,
ref_label=None,
max_peaks=4,
parser_args=None,
scans_to_skip=None,
):
super(Worker, self).__init__()
self.precision = precision
self.precursor_ppm = precursor_ppm
self.isotope_ppm = isotope_ppm
self.queue = queue
self.reader_in, self.reader_out = reader_in, reader_out
self.msn_rt_map = pd.Series(msn_rt_map)
self.msn_rt_map.sort_values(inplace=True)
self.results = results
self.mass_labels = {"Light": {}} if mass_labels is None else mass_labels
self.shifts = {0: "Light"}
self.shifts.update(
{
sum(silac_masses.keys()): silac_label
for silac_label, silac_masses in self.mass_labels.items()
}
)
self.raw_name = raw_name
self.filename = os.path.split(self.raw_name)[1]
self.rt_tol = 0.2 # for fitting
self.debug = debug
self.html = html
self.mono = mono
self.thread = thread
self.fitting_run = fitting_run
self.isotope_ppms = isotope_ppms
self.quant_method = quant_method
self.reporter_mode = reporter_mode
self.spline = spline
self.isotopologue_limit = isotopologue_limit
self.labels_needed = labels_needed
self.overlapping_mz = overlapping_mz
self.min_resolution = min_resolution
self.min_scans = min_scans
self.quant_msn_map = quant_msn_map
self.mrm = mrm
self.mrm_pair_info = mrm_pair_info
self.peak_cutoff = peak_cutoff
self.replicate = replicate
self.ratio_cutoff = ratio_cutoff
self.ref_label = ref_label
self.max_peaks = max_peaks
self.parser_args = parser_args
if mrm:
self.quant_mrm_map = {
label: list(group)
for label, group in groupby(
self.quant_msn_map, key=operator.itemgetter(0)
)
}
self.peaks_n = self.parser_args.peaks_n
self.rt_guide = not self.parser_args.no_rt_guide
self.filter_peaks = not self.parser_args.disable_peak_filtering
self.report_ratios = not self.parser_args.no_ratios
self.bigauss_stepsize = 6 if self.parser_args.fit_baseline else 4
self.xic_missing_ion_count = self.parser_args.xic_missing_ion_count
self.scans_to_skip = scans_to_skip or {}
# This is a convenience object to pass to the findAllPeaks function since it is called quite a few times
self.peak_finding_kwargs = {
"max_peaks": self.max_peaks,
"debug": self.debug,
"snr": self.parser_args.snr_filter,
"amplitude_filter": self.parser_args.intensity_filter,
"min_dist": self.parser_args.min_peak_separation,
"fit_baseline": self.parser_args.fit_baseline,
"zscore": self.parser_args.zscore_filter,
"local_filter_size": self.parser_args.filter_width,
"percentile_filter": self.parser_args.percentile_filter,
"smooth": self.parser_args.xic_smooth,
"r2_cutoff": self.parser_args.r2_cutoff,
"gap_interpolation": self.parser_args.gap_interpolation,
"fit_mode": self.parser_args.peak_find_mode,
}
def get_calibrated_mass(self, mass):
return mass / (1 - self.spline(mass) / 1e6) if self.spline else mass
def low_snr(self, scan_intensities, thresh=0.3):
std = np.std(scan_intensities)
last_point = nanmean(scan_intensities[-3:])
# check the SNR of the last points, if its bad, get out
return (last_point / std) < thresh
def replaceOutliers(self, common_peaks, combined_data, debug=False):
x = []
y = []
tx = []
ty = []
ty2 = []
hx = []
hy = []
keys = []
hkeys = []
y2 = []
hy2 = []
for i, v in common_peaks.items():
for isotope, found_peaks in v.items():
for peak_index, peak in enumerate(found_peaks):
keys.append((i, isotope, peak_index))
mean, std, std2 = peak["mean"], peak["std"], peak["std2"]
x.append(mean)
y.append(std)
y2.append(std2)
if peak.get("valid"):
tx.append(mean)
ty.append(std)
ty2.append(std2)
if self.mrm and i != "Light":
hx.append(mean)
hy.append(std)
hy2.append(std2)
hkeys.append((i, isotope, peak_index))
classifier = EllipticEnvelope(support_fraction=0.75, random_state=0)
if len(x) == 1:
return x[0]
data = np.array([x, y, y2]).T
true_data = np.array([tx, ty, ty2]).T
false_pred = (False, -1)
true_pred = (True, 1)
to_delete = set([])
fitted = False
true_data = (
np.vstack({tuple(row) for row in true_data}) if true_data.shape[0] else None
)
if true_data is not None and true_data.shape[0] >= 3:
fit_data = true_data
else:
fit_data = np.vstack({tuple(row) for row in data})
if len(hx) >= 3 or fit_data.shape[0] >= 3:
if debug:
print(common_peaks)
try:
classifier.fit(np.array([hx, hy, hy2]).T if self.mrm else fit_data)
fitted = True
# x_mean, x_std1, x_std2 = classifier.location_
except Exception as e:
try:
classifier = OneClassSVM(
nu=0.95 * 0.15 + 0.05,
kernel=str("linear"),
degree=1,
random_state=0,
)
classifier.fit(np.array([hx, hy, hy2]).T if self.mrm else fit_data)
fitted = True
except Exception as e:
if debug:
print(traceback.format_exc(), data)
x_mean, x_std1, x_std2 = np.median(data, axis=0)
if fitted:
classes = classifier.predict(data)
try:
if hasattr(classifier, "location_"):
x_mean, x_std1, x_std2 = classifier.location_
else:
x_mean, x_std1, x_std2 = np.median(data[classes == 1], axis=0)
except IndexError:
x_mean, x_std1, x_std2 = np.median(data, axis=0)
else:
x_inlier_indices = [
i
for i, v in enumerate(classes)
if v in true_pred
or common_peaks[keys[i][0]][keys[i][1]][keys[i][2]].get("valid")
]
x_inliers = set([keys[i][:2] for i in sorted(x_inlier_indices)])
x_outliers = [
i
for i, v in enumerate(classes)
if keys[i][:2] not in x_inliers
and (
v in false_pred
or common_peaks[keys[i][0]][keys[i][1]][keys[i][2]].get(
"interpolate"
)
)
]
if debug:
print("inliers", x_inliers)
print("outliers", x_outliers)
# print('x1o', x1_outliers)
min_x = x_mean - x_std1
max_x = x_mean + x_std2
for index in x_inlier_indices:
indexer = keys[index]
peak_info = common_peaks[indexer[0]][indexer[1]][indexer[2]]
peak_min = peak_info["mean"] - peak_info["std"]
peak_max = peak_info["mean"] + peak_info["std2"]
if peak_min < min_x:
min_x = peak_min
if peak_max > max_x:
max_x = peak_max
if x_inliers:
for index in x_outliers:
indexer = keys[index]
if x_inliers is not None and indexer[:2] in x_inliers:
# this outlier has a valid inlying value in x1_inliers, so we delete it
to_delete.add(indexer)
else:
# there is no non-outlying data point. If this data point is > 1 sigma away, delete it
peak_info = common_peaks[indexer[0]][indexer[1]][indexer[2]]
if debug:
print(indexer, peak_info, x_mean, x_std1, x_std2)
if not (min_x < peak_info["mean"] < max_x):
to_delete.add(indexer)
else:
# we do not have enough data for ML, if we have scenarios with a 'valid' peak, keep them other others
for quant_label, isotope_peaks in common_peaks.items():
for isotope, found_peaks in isotope_peaks.items():
keys.append((i, isotope, peak_index))
to_keep = []
to_remove = []
for peak_index, peak in enumerate(found_peaks):
if peak.get("valid"):
to_keep.append(peak_index)
else:
to_remove.append(peak_index)
if to_keep:
for i in sorted(to_remove, reverse=True):
del peaks[i]
if debug:
print("to remove", to_delete)
for i in sorted(set(to_delete), key=operator.itemgetter(0, 1, 2), reverse=True):
del common_peaks[i[0]][i[1]][i[2]]
return x_mean
def convertScan(self, scan):
import numpy as np
scan_vals = scan["vals"]
res = pd.Series(
scan_vals[:, 1].astype(np.uint64),
index=np.round(scan_vals[:, 0], self.precision),
name=int(scan["title"]) if self.mrm else scan["rt"],
dtype="uint64",
)
# mz values can sometimes be not sorted -- rare but it happens
res = res.sort_index()
del scan_vals
# due to precision, we have multiple m/z values at the same place. We can eliminate this by grouping them and summing them.
# Summation is the correct choice here because we are combining values of a precision higher than we care about.
try:
return res.groupby(level=0).sum() if not res.empty else None
except Exception as e:
print(
"Converting scan error {}\n{}\n{}\n".format(
traceback.format_exc(), res, scan
)
)
def getScan(self, ms1, start=None, end=None):
self.reader_in.put((self.thread, ms1, start, end))
scan = self.reader_out.get()
if scan is None:
print("Unable to fetch scan {}.\n".format(ms1))
return (
(self.convertScan(scan), {"centroid": scan.get("centroid", False)})
if scan is not None
else (None, {})
)
# @memory_profiler
# @line_profiler(extra_view=[peaks.findEnvelope, peaks.findAllPeaks, peaks.findMicro])
def quantify_peaks(self, params):
result_dict = {}
try:
html_images = {}
scan_info = params.get("scan_info")
target_scan = scan_info.get("id_scan")
quant_scan = scan_info.get("quant_scan")
scanId = target_scan.get("id")
ms1 = quant_scan["id"]
scans_to_quant = quant_scan.get("scans")
if scans_to_quant:
scans_to_quant.pop(scans_to_quant.index(ms1))
charge = target_scan["charge"]
mass = target_scan["mass"]
combine_xics = scan_info.get("combine_xics")
precursor = target_scan["precursor"]
calibrated_precursor = self.get_calibrated_mass(precursor)
theor_mass = target_scan.get("theor_mass", calibrated_precursor)
# this will be the RT of the target_scan, which is not always equal to the RT of the quant_scan
rt = target_scan["rt"]
peptide = target_scan.get("peptide")
if self.debug:
sys.stderr.write(
"thread {4} on ms {0} {1} {2} {3}\n".format(
ms1, rt, precursor, scan_info, id(self)
)
)
result_dict.update(
{
"peptide": target_scan.get("mod_peptide")
or target_scan.get("peptide"),
"scan": scanId,
"ms1": ms1,
"charge": charge,
"modifications": target_scan.get("modifications"),
"rt": rt,
"accession": target_scan.get("accession"),
}
)
if float(charge) == 0:
# We cannot proceed with a zero charge
self.results.put(result_dict)
return
precursors = defaultdict(dict)
silac_dict = {
"data": None,
"df": pd.DataFrame(),
"precursor": "NA",
"isotopes": {},
"peaks": OrderedDict(),
"intensity": "NA",
}
data = OrderedDict()
# data['Light'] = copy.deepcopy(silac_dict)
combined_data = pd.DataFrame()
if self.mrm:
mrm_labels = [
i
for i in self.mrm_pair_info.columns
if i.lower() not in ("retention time")
]
mrm_info = None
for index, values in self.mrm_pair_info.iterrows():
if values["Light"] == mass:
mrm_info = values
for ion in target_scan.get("ion_set", []):
precursors[str(ion)]["uncalibrated_mz"] = ion
precursors[str(ion)]["calibrated_mz"] = self.get_calibrated_mass(ion)
precursors[str(ion)]["theoretical_mz"] = ion
data[str(ion)] = copy.deepcopy(silac_dict)
for silac_label, silac_masses in self.mass_labels.items():
silac_shift = 0
global_mass = None
added_residues = set([])
cterm_mass = 0
nterm_mass = 0
mass_keys = list(silac_masses.keys())
if self.reporter_mode:
silac_shift = sum(mass_keys)
label_mz = silac_shift
theo_mz = silac_shift
else:
if peptide:
for label_mass, label_masses in silac_masses.items():
if "X" in label_masses:
global_mass = label_mass
if "]" in label_masses:
cterm_mass = label_mass
if "[" in label_masses:
nterm_mass = label_mass
added_residues = added_residues.union(label_masses)
labels = [
label_mass
for mod_aa in peptide
if mod_aa in label_masses
]
silac_shift += sum(labels)
else:
# no mass, just assume we have one of the labels
silac_shift += mass_keys[0]
if global_mass is not None:
silac_shift += sum(
[
global_mass
for mod_aa in peptide
if mod_aa not in added_residues
]
)
silac_shift += cterm_mass + nterm_mass
label_mz = precursor + (silac_shift / float(charge))
theo_mz = theor_mass + (silac_shift / float(charge))
precursors[silac_label]["uncalibrated_mz"] = label_mz
precursors[silac_label]["calibrated_mz"] = self.get_calibrated_mass(
label_mz
)
precursors[silac_label]["theoretical_mz"] = theo_mz
data[silac_label] = copy.deepcopy(silac_dict)
if not precursors:
precursors[""]["uncalibrated_mz"] = precursor
precursors[""]["calibrated_mz"] = self.get_calibrated_mass(precursor)
precursors[""]["theoretical_mz"] = precursor
data[""] = copy.deepcopy(silac_dict)
precursors = OrderedDict(
sorted(
precursors.items(),
key=cmp_to_key(
lambda x, y: int(
x[1]["uncalibrated_mz"] - y[1]["uncalibrated_mz"]
)
),
)
)
shift_maxes = {
i: max([j["uncalibrated_mz"], j["calibrated_mz"], j["theoretical_mz"]])
for i, j in zip(precursors.keys(), list(precursors.values())[1:])
}
lowest_precursor_mz = min(
[
label_val
for label, label_info in precursors.items()
for label_info_key, label_val in label_info.items()
if label_info_key.endswith("mz")
]
)
highest_precursor_mz = (
max(shift_maxes.values()) if shift_maxes else lowest_precursor_mz
)
# do these here, remember when you tried to do this in one line with () and spent an hour debugging it?
lowest_precursor_mz -= 5
highest_precursor_mz += 5
finished_isotopes = {i: set([]) for i in precursors.keys()}
ms_index = 0
delta = -1
theo_dist = (
calculate_theoretical_distribution(peptide=peptide.upper())
if peptide
else None
)
spacing = config.NEUTRON / float(charge)
isotope_labels = {}
isotopes_chosen = {}
last_precursors = {-1: {}, 1: {}}
# our rt might sometimes be an approximation, such as from X!Tandem which requires some transformations
initial_scan = find_scan(self.quant_msn_map, ms1)
current_scan = None
not_found = 0
if self.mrm:
mrm_label = mrm_labels.pop() if mrm_info is not None else "Light"
mass = mass if mrm_info is None else mrm_info[mrm_label]
last_peak_height = {i: defaultdict(int) for i in precursors.keys()}
low_int_isotopes = defaultdict(int)
all_data_intensity = {-1: [], 1: []}
while True:
map_to_search = (
self.quant_mrm_map[mass] if self.mrm else self.quant_msn_map
)
if current_scan is None:
current_scan = initial_scan
else:
if scans_to_quant:
current_scan = scans_to_quant.pop(0)
elif scans_to_quant is None:
current_scan = (
find_prior_scan(map_to_search, current_scan)
if delta == -1
else find_next_scan(map_to_search, current_scan)
)
else:
# we've exhausted the scans we are supposed to quantify
break
found = set([])
current_scan_intensity = 0
if current_scan is not None:
if current_scan in self.scans_to_skip:
continue
else:
if self.min_resolution:
full_scan, scan_params = self.getScan(current_scan)
# check if it's a low res scan, if so skip it
if full_scan is not None:
scan_resolution = get_scan_resolution(full_scan)
if scan_resolution < self.min_resolution:
self.scans_to_skip[current_scan] = True
continue
if self.mrm:
df = full_scan
else:
df = full_scan.ix[
(full_scan.index >= lowest_precursor_mz)
& (full_scan.index <= highest_precursor_mz)
]
else:
df, scan_params = self.getScan(
current_scan,
start=None if self.mrm else lowest_precursor_mz,
end=None if self.mrm else highest_precursor_mz,
)
if df is not None:
labels_found = set([])
xdata = df.index.values.astype(float)
ydata = df.fillna(0).values.astype(float)
iterator = (
precursors.items() if not self.mrm else [(mrm_label, 0)]
)
for precursor_label, precursor_info in iterator:
selected = {}
if self.mrm:
labels_found.add(precursor_label)
for i, j in zip(xdata, ydata):
selected[i] = j
isotope_labels[df.name] = {
"label": precursor_label,
"isotope_index": target_scan.get("product_ion", 0),
}
key = (df.name, xdata[-1])
isotopes_chosen[key] = {
"label": precursor_label,
"isotope_index": target_scan.get("product_ion", 0),
"amplitude": ydata[-1],
}
else:
uncalibrated_precursor = precursor_info[
"uncalibrated_mz"
]
measured_precursor = precursor_info["calibrated_mz"]
theoretical_precursor = precursor_info["theoretical_mz"]
data[precursor_label][
"calibrated_precursor"
] = measured_precursor
data[precursor_label][
"precursor"
] = uncalibrated_precursor
shift_max = (
shift_maxes.get(precursor_label)
if self.overlapping_mz is False
else None
)
is_fragmented_scan = (
current_scan == initial_scan
) and (precursor == measured_precursor)
envelope = peaks.findEnvelope(
xdata,
ydata,
measured_mz=measured_precursor,
theo_mz=theoretical_precursor,
max_mz=shift_max,
charge=charge,
contaminant_search=not self.parser_args.no_contaminant_detection,
precursor_ppm=self.precursor_ppm,
isotope_ppm=self.isotope_ppm,
reporter_mode=self.reporter_mode,
isotope_ppms=self.isotope_ppms
if self.fitting_run
else None,
quant_method=self.quant_method,
debug=self.debug,
theo_dist=theo_dist
if (self.mono or precursor_label not in shift_maxes)
else None,
label=precursor_label,
skip_isotopes=finished_isotopes[precursor_label],
last_precursor=last_precursors[delta].get(
precursor_label, measured_precursor
),
isotopologue_limit=self.isotopologue_limit,
fragment_scan=is_fragmented_scan,
centroid=scan_params.get("centroid", False),
)
if not envelope["envelope"]:
if self.debug:
print(
"envelope empty",
envelope,
measured_precursor,
initial_scan,
current_scan,
last_precursors,
)
if self.parser_args.msn_all_scans:
selected[measured_precursor] = 0
isotope_labels[measured_precursor] = {
"label": precursor_label,
"isotope_index": 0,
}
isotopes_chosen[
(df.name, measured_precursor)
] = {
"label": precursor_label,
"isotope_index": 0,
"amplitude": 0,
}
else:
continue
if (
not self.parser_args.msn_all_scans
and 0 in envelope["micro_envelopes"]
and envelope["micro_envelopes"][0].get("int")
):
if ms_index == 0:
last_precursors[delta * -1][
precursor_label
] = envelope["micro_envelopes"][0]["params"][1]
last_precursors[delta][precursor_label] = envelope[
"micro_envelopes"
][0]["params"][1]
added_keys = []
for isotope, vals in envelope[
"micro_envelopes"
].items():
if isotope in finished_isotopes[precursor_label]:
continue
peak_intensity = vals.get("int")
if peak_intensity == 0 or (
self.peak_cutoff
and peak_intensity
< last_peak_height[precursor_label][isotope]
* self.peak_cutoff
):
low_int_isotopes[
(precursor_label, isotope)
] += 1
if (
not self.parser_args.msn_all_scans
and low_int_isotopes[
(precursor_label, isotope)
]
>= 2
):
if self.debug:
print(
"finished with isotope",
precursor_label,
envelope,
)
finished_isotopes[precursor_label].add(
isotope
)
else:
labels_found.add(precursor_label)
continue
else:
low_int_isotopes[(precursor_label, isotope)] = 0
found.add(precursor_label)
labels_found.add(precursor_label)
if (
current_scan == initial_scan
or last_peak_height[precursor_label][isotope]
== 0
):
last_peak_height[precursor_label][
isotope
] = peak_intensity
selected[
measured_precursor + isotope * spacing
] = peak_intensity
current_scan_intensity += peak_intensity
vals["isotope"] = isotope
isotope_labels[
measured_precursor + isotope * spacing
] = {
"label": precursor_label,
"isotope_index": isotope,
}
key = (
df.name,
measured_precursor + isotope * spacing,
)
added_keys.append(key)
isotopes_chosen[key] = {
"label": precursor_label,
"isotope_index": isotope,
"amplitude": peak_intensity,
}
del envelope
selected = pd.Series(selected, name=df.name).to_frame()
if df.name in combined_data.columns:
combined_data = combined_data.add(
selected, axis="index", fill_value=0
)
else:
combined_data = pd.concat(
[combined_data, selected], axis=1, sort=True
).fillna(0)
del selected
if not self.mrm and (
(len(labels_found) < self.labels_needed)
or (
self.parser_args.require_all_ions
and len(labels_found) < len(precursors)
)
):
if self.parser_args.msn_all_scans:
if self.parser_args.require_all_ions:
if self.debug:
print(
"Not all ions found, setting",
df.name,
"to zero",
)
combined_data[df.name] = 0
else:
found.discard(precursor_label)
if df is not None and df.name in combined_data.columns:
del combined_data[df.name]
for i in isotopes_chosen.keys():
if i[0] == df.name:
del isotopes_chosen[i]
del df
all_data_intensity[delta].append(current_scan_intensity)
if not found or (
(
np.abs(ms_index) > 7
and self.low_snr(
all_data_intensity[delta], thresh=self.parser_args.xic_snr
)
)
or (
self.parser_args.xic_window_size != -1
and np.abs(ms_index) >= self.parser_args.xic_window_size
)
):
not_found += 1
if current_scan is None or (
not_found > self.xic_missing_ion_count
and not self.parser_args.msn_all_scans
):
not_found = 0
if delta == -1:
delta = 1
current_scan = initial_scan
finished_isotopes = {i: set([]) for i in precursors.keys()}
last_peak_height = {
i: defaultdict(int) for i in precursors.keys()
}
ms_index = 0
else:
if self.mrm:
if mrm_info is not None and mrm_labels:
mrm_label = (
mrm_labels.pop()
if mrm_info is not None
else "Light"
)
mass = (
mass
if mrm_info is None
else mrm_info[mrm_label]
)
delta = -1
current_scan = self.quant_mrm_map[mass][0][1]
last_peak_height = {
i: defaultdict(int) for i in precursors.keys()
}
initial_scan = current_scan
finished_isotopes = {
i: set([]) for i in precursors.keys()
}
ms_index = 0
else:
break
else:
break
else:
not_found = 0
if self.reporter_mode:
break
ms_index += delta
rt_figure = {}
isotope_figure = {}
if self.parser_args.merge_isotopes:
new_labels = {}
labels = set(v["label"] for i, v in isotope_labels.items())
for label in labels:
to_merge = [
(i, v["isotope_index"], v)
for i, v in isotope_labels.items()
if v["label"] == label
]
to_merge.sort(key=operator.itemgetter(1))
new_labels[to_merge[0][0]] = to_merge[0][2]
if len(to_merge) > 1:
combined_data.loc[to_merge[0][0], :] = combined_data.loc[
[i[0] for i in to_merge], :
].sum(axis=0)
combined_data.drop([i[0] for i in to_merge[1:]], inplace=True)
isotope_labels = new_labels
if self.parser_args.merge_labels or combine_xics:
label_name = "_".join(map(str, combined_data.index))
combined_data = combined_data.sum(axis=0).to_frame(name=label_name).T
isotope_labels = {
label_name: {"isotope_index": 0, "label": label_name,}
}
data[label_name] = {}
data[label_name]["calibrated_precursor"] = "_".join(
map(
str,
(
data[i].get("calibrated_precursor")
for i in sorted(data.keys())
if i != label_name
),
)
)
data[label_name]["precursor"] = "_".join(
map(
str,
(
data[i].get("precursor")
for i in sorted(data.keys())
if i != label_name
),
)
)
if isotopes_chosen and isotope_labels and not combined_data.empty:
if self.mrm:
combined_data = combined_data.T
# bookend with zeros if there aren't any, do the right end first because pandas will by default append there
combined_data = combined_data.sort_index().sort_index(axis="columns")
start_rt = rt
rt_guide = self.rt_guide and start_rt
if len(combined_data.columns) == 1:
if combined_data.columns[-1] == self.msn_rt_map.iloc[-1]:
new_col = combined_data.columns[-1] + (
combined_data.columns[-1] - self.msn_rt_map.iloc[-2]
)
else:
new_col = self.msn_rt_map.iloc[
self.msn_rt_map.searchsorted(combined_data.columns[-1]) + 1
]
else:
new_col = combined_data.columns[-1] + (
combined_data.columns[-1] - combined_data.columns[-2]
)
combined_data[new_col] = 0
new_col = combined_data.columns[0] - (
combined_data.columns[1] - combined_data.columns[0]
)
combined_data[new_col] = 0
combined_data = combined_data[sorted(combined_data.columns)]
combined_data = combined_data.sort_index().sort_index(axis="columns")
quant_vals = defaultdict(dict)
isotope_labels = pd.DataFrame(isotope_labels).T
isotopes_chosen = pd.DataFrame(isotopes_chosen).T
isotopes_chosen.index.names = ["RT", "MZ"]
if self.html:
# make the figure of our isotopes selected
all_x = sorted(
isotopes_chosen.index.get_level_values("MZ").drop_duplicates()
)
isotope_group = isotopes_chosen.groupby(level="RT")
isotope_figure = {
"data": [],
"plot-multi": True,
"common-x": ["x"] + all_x,
"max-y": isotopes_chosen["amplitude"].max(),
}
isotope_figure_mapper = {}
rt_figure = {
"data": [],
"plot-multi": True,
"common-x": ["x"]
+ ["{0:0.4f}".format(i) for i in combined_data.columns],
"rows": len(precursors),
"max-y": combined_data.max().max(),
}
rt_figure_mapper = {}
for counter, (index, row) in enumerate(isotope_group):
try:
title = "Scan {} RT {}".format(
self.msn_rt_map[self.msn_rt_map == index].index[0],
index,
)
except Exception as e:
title = "{}".format(index)
if index in isotope_figure_mapper:
isotope_base = isotope_figure_mapper[index]
else:
isotope_base = {
"data": {"x": "x", "columns": [], "type": "bar"},
"axis": {
"x": {"label": "M/Z"},
"y": {"label": "Intensity"},
},
}
isotope_figure_mapper[index] = isotope_base
isotope_figure["data"].append(isotope_base)
for group in precursors.keys():
label_df = row[row["label"] == group]
x = (
label_df["amplitude"]
.index.get_level_values("MZ")
.tolist()
)
y = label_df["amplitude"].values.tolist()
isotope_base["data"]["columns"].append(
["{} {}".format(title, group)]
+ [y[x.index(i)] if i in x else 0 for i in all_x]
)
if not self.reporter_mode:
combined_peaks = defaultdict(dict)
peak_location = None
# If we are searching for a particular RT, we look for it in the data and remove other larger peaks
# until we find it. To help with cases where we are fitting multiple datasets for the same XIC, we
# combine the data to increase the SNR in case some XICs of a given ion are weak
if rt_guide and not self.parser_args.msn_all_scans:
merged_data = combined_data.sum(axis=0)
merged_x = merged_data.index.astype(float).values
merged_y = merged_data.values.astype(float)
res, residual = peaks.targeted_search(
merged_x,
merged_y,
start_rt,
attempts=4,
peak_finding_kwargs=self.peak_finding_kwargs,
)
if self.debug:
if res is not None:
print("peak used for sub-fitting", res)
else:
print(peptide, "is dead")
if res is not None:
rt_means = res[1 :: self.bigauss_stepsize]
rt_amps = res[:: self.bigauss_stepsize]
rt_std = res[2 :: self.bigauss_stepsize]
rt_std2 = res[3 :: self.bigauss_stepsize]
m_std = np.std(merged_y)
m_mean = nanmean(merged_y)
valid_peaks = [
{
"mean": i,
"amp": j,
"std": l,
"std2": k,
"total": merged_y.sum(),
"snr": m_mean / m_std,
"residual": residual,
}
for i, j, l, k in zip(
rt_means, rt_amps, rt_std, rt_std2
)
]
valid_peaks.sort(key=lambda x: np.abs(x["mean"] - start_rt))
peak_index = peaks.find_nearest_index(
merged_x, valid_peaks[0]["mean"]
)
peak_location = merged_x[peak_index]
if self.debug:
print("peak location is", peak_location)
merged_lb = peaks.find_nearest_index(
merged_x,
valid_peaks[0]["mean"] - valid_peaks[0]["std"] * 2,
)
merged_rb = peaks.find_nearest_index(
merged_x,
valid_peaks[0]["mean"] + valid_peaks[0]["std2"] * 2,
)
merged_rb = (
len(merged_x) if merged_rb == -1 else merged_rb + 1
)
else:
merged_lb = 0
merged_rb = combined_data.shape[1]
peak_location = start_rt
else:
merged_x = xdata
merged_y = ydata
merged_lb = 0
merged_rb = combined_data.shape[1]
potential_peaks = defaultdict(list)
for row_num, (index, values) in enumerate(combined_data.iterrows()):
quant_label = isotope_labels.loc[index, "label"]
xdata = values.index.values.astype(float)
ydata = values.fillna(0).values.astype(float)
# Setup the HTML first in case we do not fit any peaks, we still want to report the raw data
if self.html:
# ax = fig.add_subplot(subplot_rows, subplot_columns, fig_index)
if quant_label in rt_figure_mapper:
rt_base = rt_figure_mapper[(quant_label, index)]
else:
rt_base = {
"data": {"x": "x", "columns": []},
"grid": {
"x": {
"lines": [
{
"value": rt,
"text": "Initial RT {0:0.4f}".format(
rt
),
"position": "middle",
}
]
}
},
"subchart": {"show": True},
"axis": {
"x": {"label": "Retention Time"},
"y": {"label": "Intensity"},
},
}
rt_figure_mapper[(quant_label, index)] = rt_base
rt_figure["data"].append(rt_base)
rt_base["data"]["columns"].append(
["{0} {1} raw".format(quant_label, index)]
+ ydata.tolist()
)
if sum(ydata > 0) >= self.min_scans:
# this step is to add in a term on the border if possible
# otherwise, there are no penalties on the variance if it is
# at the border since the data does not exist. We only add for lower values to avoid
# including monster peaks we may be explicitly excluding above
fit_lb = merged_lb
fit_rb = merged_rb
while (
fit_rb + 1 < len(ydata)
and ydata[fit_rb + 1] <= ydata[fit_rb - 1]
):
fit_rb += 1
while fit_lb != 0 and ydata[fit_lb] >= ydata[fit_lb - 1]:
fit_lb -= 1
peak_x = np.copy(xdata[fit_lb:fit_rb])
peak_y = np.copy(ydata[fit_lb:fit_rb])
if peak_x.size <= 1 or sum(peak_y > 0) < self.min_scans:
continue
if rt_guide:
peak_positive_y = peak_y > 0
if (
peak_location is None
and self.parser_args.msn_all_scans
):
peak_location = start_rt
nearest_positive_peak = peaks.find_nearest(
peak_x[peak_positive_y], peak_location
)
sub_peak_location = peaks.find_nearest_index(
peak_x, nearest_positive_peak
)
sub_peak_index = (
sub_peak_location
if peak_y[sub_peak_location]
else np.argmax(peak_y)
)
else:
nearest_positive_peak = None
# fit, residual = peaks.fixedMeanFit2(peak_x, peak_y, peak_index=sub_peak_index, debug=self.debug)
if self.debug:
print("fitting XIC for", quant_label, index)
print("raw data is", xdata.tolist(), ydata.tolist())
fit, residual = peaks.findAllPeaks(
xdata,
ydata,
bigauss_fit=True,
filter=self.filter_peaks,
rt_peak=nearest_positive_peak,
**self.peak_finding_kwargs
)
if not fit.any():
continue
rt_amps = fit[:: self.bigauss_stepsize] # * ydata.max()
rt_means = fit[1 :: self.bigauss_stepsize]
rt_std = fit[2 :: self.bigauss_stepsize]
rt_std2 = fit[3 :: self.bigauss_stepsize]
xic_peaks = []
positive_y = ydata[ydata > 0]
if len(positive_y) > 5:
positive_y = gaussian_filter1d(
positive_y, 3, mode="constant"
)
for i, j, l, k in zip(rt_means, rt_amps, rt_std, rt_std2):
d = {
"mean": i,
"amp": j,
"std": l,
"std2": k,
"total": values.sum(),
"residual": residual,
}
mean_index = peaks.find_nearest_index(
xdata[ydata > 0], i
)
window_size = (
5
if len(positive_y) < 15
else int(len(positive_y) / 3)
)
lb, rb = (
mean_index - window_size,
mean_index + window_size + 1,
)
if lb < 0:
lb = 0
if rb > len(positive_y):
rb = -1
data_window = positive_y[lb:rb]
if data_window.any():
try:
background = np.percentile(data_window, 0.8)
except Exception as e:
background = np.percentile(ydata, 0.8)
mean = nanmean(data_window)
if background < mean:
background = mean
d["sbr"] = nanmean(
j
/ (
np.array(
sorted(data_window, reverse=True)[:5]
)
)
) # (j-np.mean(positive_y[lb:rb]))/np.std(positive_y[lb:rb])
if len(data_window) > 2:
d["snr"] = (j - background) / np.std(
data_window
)
else:
d["snr"] = np.NaN
else:
d["sbr"] = np.NaN
d["snr"] = np.NaN
xic_peaks.append(d)
potential_peaks[(quant_label, index)] = xic_peaks
# if we have a peaks containing our retention time, keep them and throw out ones not containing it
if (
self.parser_args.peak_resolution_mode
== PEAK_RESOLUTION_RT_MODE
):
to_remove = []
to_keep = []
if rt_guide:
peak_location_index = peaks.find_nearest_index(
merged_x, peak_location
)
for i, v in enumerate(xic_peaks):
mu = v["mean"]
s1 = v["std"]
s2 = v["std2"]
if mu - s1 * 2 < start_rt < mu + s2 * 2:
# these peaks are considered true and will help with the machine learning
if mu - s1 * 1.5 < start_rt < mu + s2 * 1.5:
v["valid"] = True
to_keep.append(i)
elif (
np.abs(
peaks.find_nearest_index(merged_x, mu)
- peak_location_index
)
> 2
):
to_remove.append(i)
if not to_keep:
# we have no peaks with our RT, there are contaminating peaks, remove all the noise but the closest to our RT
if not self.mrm:
# for i in to_remove:
# xic_peaks[i]['interpolate'] = True
valid_peak = sorted(
[
(i, np.abs(i["mean"] - start_rt))
for i in xic_peaks
],
key=operator.itemgetter(1),
)[0][0]
for i in reversed(range(len(xic_peaks))):
if xic_peaks[i] == valid_peak:
continue
else:
del xic_peaks[i]
# valid_peak['interpolate'] = True
# else:
# valid_peak = [j[0] for j in sorted([(i, i['amp']) for i in xic_peaks], key=operator.itemgetter(1), reverse=True)[:3]]
else:
# if not to_remove:
# xic_peaks = [xic_peaks[i] for i in to_keep]
# else:
for i in reversed(to_remove):
del xic_peaks[i]
if self.debug:
print(quant_label, index)
print(fit)
print(to_remove, to_keep, xic_peaks)
combined_peaks[quant_label][
index
] = xic_peaks # if valid_peak is None else [valid_peak]
peak_info = (
{i: {} for i in self.mrm_pair_info.columns}
if self.mrm
else {i: {} for i in precursors.keys()}
)
if self.reporter_mode or combined_peaks:
if self.reporter_mode:
for row_num, (index, values) in enumerate(
combined_data.iterrows()
):
quant_label = isotope_labels.loc[index, "label"]
isotope_index = isotope_labels.loc[index, "isotope_index"]
int_val = sum(values)
quant_vals[quant_label][isotope_index] = int_val
else:
# common_peak = self.replaceOutliers(combined_peaks, combined_data, debug=self.debug)
common_peak = find_common_peak_mean(
combined_peaks, tie_breaker_time=start_rt
)
common_loc = peaks.find_nearest_index(
xdata, common_peak
) # np.where(xdata==common_peak)[0][0]
for quant_label, quan_values in combined_peaks.items():
for index, values in quan_values.items():
if not values:
continue
isotope_index = isotope_labels.loc[
index, "isotope_index"
]
rt_values = combined_data.loc[index]
xdata = rt_values.index.values.astype(float)
ydata = rt_values.fillna(0).values.astype(float)
# pick the biggest within a rt cutoff of 0.2, otherwise pick closest
# closest_rts = sorted([(i, i['amp']) for i in values if np.abs(i['peak']-common_peak) < 0.2], key=operator.itemgetter(1), reverse=True)
closest_rts = sorted(
[
(i, np.abs(i["mean"] - common_peak))
for i in values
],
key=operator.itemgetter(1),
)
xic_peaks = [i[0] for i in closest_rts]
pos_x = xdata[ydata > 0]
if rt_guide:
xic_peaks = [xic_peaks[0]]
else:
# unguided, sort by amplitude
xic_peaks.sort(
key=operator.itemgetter("amp"), reverse=True
)
for xic_peak_index, xic_peak in enumerate(xic_peaks):
if (
self.peaks_n != -1
and xic_peak_index >= self.peaks_n
): # xic_peak index is 0 based, peaks_n is 1 based, hence the >=
break
# if we move more than a # of ms1 to the dominant peak, update to our known peak
gc = "k"
nearest = peaks.find_nearest_index(
pos_x, xic_peak["mean"]
)
peak_loc = np.where(xdata == pos_x[nearest])[0][0]
mean = xic_peak["mean"]
amp = xic_peak["amp"]
mean_diff = mean - xdata[common_loc]
mean_diff = np.abs(
mean_diff / xic_peak["std"]
if mean_diff < 0
else mean_diff / xic_peak["std2"]
)
std = xic_peak["std"]
std2 = xic_peak["std2"]
snr = xic_peak["snr"]
sbr = xic_peak["sbr"]
residual = xic_peak["residual"]
if (
False
and len(xdata) >= 3
and (
mean_diff > 2
or (
np.abs(peak_loc - common_loc) > 2
and mean_diff > 2
)
)
):
# fixed mean fit
if self.debug:
print(quant_label, index)
print(common_loc, peak_loc)
nearest = peaks.find_nearest_index(pos_x, mean)
nearest_index = np.where(
xdata == pos_x[nearest]
)[0][0]
res = peaks.fixedMeanFit(
xdata,
ydata,
peak_index=nearest_index,
debug=self.debug,
)
if res is None:
if self.debug:
print(
quant_label,
index,
"has no values here",
)
continue
amp, mean, std, std2 = res
amp *= ydata.max()
gc = "g"
# var_rat = closest_rt['var']/common_var
peak_params = np.array([amp, mean, std, std2])
# int_args = (res.x[rt_index]*mval, res.x[rt_index+1], res.x[rt_index+2])
left, right = (
xdata[0] - 4 * std,
xdata[-1] + 4 * std2,
)
xr = np.linspace(left, right, 1000)
left_index, right_index = (
peaks.find_nearest_index(xdata, left),
peaks.find_nearest_index(xdata, right) + 1,
)
if left_index < 0:
left_index = 0
if right_index >= len(xdata) or right_index <= 0:
right_index = len(xdata)
# check that we have at least 2 positive values
if sum(ydata[left_index:right_index] > 0) < 2:
continue
try:
int_val = (
integrate.simps(
peaks.bigauss_ndim(xr, peak_params),
x=xr,
)
if self.quant_method == "integrate"
else ydata[
(xdata > left) & (xdata < right)
].sum()
)
except Exception as e:
if self.debug:
print(traceback.format_exc())
print(xr, peak_params)
try:
total_int = integrate.simps(
ydata[left_index:right_index],
x=xdata[left_index:right_index],
)
except Exception as e:
if self.debug:
print(traceback.format_exc())
print(left_index, right_index, xdata, ydata)
sdr = np.log2(int_val * 1.0 / total_int + 1.0)
if int_val and not pd.isnull(int_val) and gc != "c":
try:
quant_vals[quant_label][
isotope_index
] += int_val
except KeyError:
try:
quant_vals[quant_label][
isotope_index
] = int_val
except KeyError:
quant_vals[quant_label] = {
isotope_index: int_val
}
cleft, cright = mean - 2 * std, mean + 2 * std2
curve_indices = (xdata >= cleft) & (xdata <= cright)
cf_data = ydata[curve_indices]
# Buffer cf_data with 0's to reflect that the data is nearly zero outside the fit
# and to prevent areas with 2 data points from having negative R^2
cf_data = np.hstack((0, cf_data, 0))
ss_tot = np.sum((cf_data - nanmean(cf_data)) ** 2)
if ss_tot == 0:
continue
ss_res = np.sum(
(
cf_data
- np.hstack(
(
0,
peaks.bigauss_ndim(
xdata[curve_indices],
peak_params,
),
0,
)
)
)
** 2
)
coef_det = 1 - ss_res / ss_tot
peak_info_dict = {
"peak_mean": mean,
"std": std,
"std2": std2,
"amp": amp,
"mean_diff": mean_diff,
"snr": snr,
"sbr": sbr,
"sdr": sdr,
"auc": int_val,
"peak_width": std + std2,
"coef_det": coef_det,
"residual": residual,
"label": quant_label,
}
try:
peak_info[quant_label][isotope_index][
xic_peak_index
] = peak_info_dict
except KeyError:
try:
peak_info[quant_label][isotope_index] = {
xic_peak_index: peak_info_dict
}
except KeyError:
peak_info[quant_label] = {
isotope_index: {
xic_peak_index: peak_info_dict
}
}
try:
data[quant_label]["residual"].append(residual)
except KeyError:
data[quant_label]["residual"] = [residual]
if self.html:
rt_base = rt_figure_mapper[(quant_label, index)]
key = "{} {}".format(quant_label, index)
for i, v in enumerate(
rt_base["data"]["columns"]
):
if key in v[0]:
break
rt_base["data"]["columns"].insert(
i,
[
"{0} {1} fit {2}".format(
quant_label, index, xic_peak_index
)
]
+ np.nan_to_num(
peaks.bigauss_ndim(xdata, peak_params)
).tolist(),
)
del combined_peaks
write_html = True if self.ratio_cutoff == 0 else False
# # Some experimental code that tries to compare the XIC with the theoretical distribution
# # Currently disabled as it reduces the number of datapoints to infer SILAC ratios and results in poorer
# # comparisons -- though there might be merit to intensity based estimates with this.
if self.parser_args.theo_xic and self.mono and theo_dist is not None:
# Compare the extracted XIC with the theoretical abundance of each isotope:
# To do this, we take the residual of all combinations of isotopes
for quant_label in quant_vals:
isotopes = quant_vals[quant_label].keys()
isotope_ints = {i: quant_vals[quant_label][i] for i in isotopes}
isotope_residuals = []
for num_of_isotopes in range(2, len(isotopes) + 1):
for combo in combinations(isotopes, num_of_isotopes):
chosen_isotopes = np.array(
[isotope_ints[i] for i in combo]
)
chosen_isotopes /= chosen_isotopes.max()
chosen_dist = np.array([theo_dist[i] for i in combo])
chosen_dist /= chosen_dist.max()
res = sum((chosen_dist - chosen_isotopes) ** 2)
isotope_residuals.append((res, combo))
# this weird sorting is to put the favorable values as the lowest values
if isotope_residuals:
kept_keys = sorted(
isotope_residuals,
key=lambda x: (
0 if x[0] < 0.1 else 1,
len(isotopes) - len(x[1]),
x[0],
),
)[0][1]
# print(quant_label, kept_keys)
for i in isotopes:
if i not in kept_keys:
del quant_vals[quant_label][i]
for silac_label1 in data.keys():
# TODO: check if peaks overlap before taking ratio
qv1 = quant_vals.get(silac_label1, {})
result_dict.update(
{"{}_intensity".format(silac_label1): sum(qv1.values())}
)
if self.report_ratios:
for silac_label2 in data.keys():
if (
self.ref_label is not None
and str(silac_label2.lower()) != self.ref_label.lower()
):
continue
if silac_label1 == silac_label2:
continue
qv2 = quant_vals.get(silac_label2, {})
ratio = "NA"
if qv1 is not None and qv2 is not None:
if self.mono:
common_isotopes = set(qv1.keys()).intersection(
qv2.keys()
)
x = []
y = []
l1, l2 = 0, 0
for i in common_isotopes:
q1 = qv1.get(i)
q2 = qv2.get(i)
if (
q1 > 100
and q2 > 100
and q1 > l1 * 0.15
and q2 > l2 * 0.15
):
x.append(i)
y.append(q1 / q2)
l1, l2 = q1, q2
# fit it and take the intercept
if len(x) >= 3 and np.std(np.log2(y)) > 0.3:
classifier = EllipticEnvelope(
contamination=0.25, random_state=0
)
fit_data = np.log2(
np.array(y).reshape(len(y), 1)
)
true_pred = (True, 1)
classifier.fit(fit_data)
ratio = nanmean(
[
y[i]
for i, v in enumerate(
classifier.predict(fit_data)
)
if v in true_pred
]
)
else:
ratio = nanmean(np.array(y))
else:
common_isotopes = set(qv1.keys()).union(qv2.keys())
quant1 = sum(
[qv1.get(i, 0) for i in common_isotopes]
)
quant2 = sum(
[qv2.get(i, 0) for i in common_isotopes]
)
ratio = (
quant1 / quant2 if quant1 and quant2 else "NA"
)
try:
if (
self.ratio_cutoff
and not pd.isnull(ratio)
and np.abs(np.log2(ratio)) > self.ratio_cutoff
):
write_html = True
except Exception as e:
pass
result_dict.update(
{
"{}_{}_ratio".format(
silac_label1, silac_label2
): ratio
}
)
if write_html:
result_dict.update({"html_info": html_images})
for peak_label, peak_data in peak_info.items():
result_dict.update(
{
"{}_peaks".format(peak_label): peak_data,
"{}_isotopes".format(peak_label): sum(
(isotopes_chosen["label"] == peak_label)
& (isotopes_chosen["amplitude"] > 0)
),
}
)
for silac_label, silac_data in data.items():
precursor = silac_data["precursor"]
calc_precursor = silac_data.get(
"calibrated_precursor", silac_data["precursor"]
)
result_dict.update(
{
"{}_residual".format(silac_label): nanmean(
pd.Series(silac_data.get("residual", [])).replace(
[np.inf, -np.inf, np.nan], 0
)
),
"{}_precursor".format(silac_label): precursor,
"{}_calibrated_precursor".format(silac_label): calc_precursor,
}
)
result_dict.update(
{
"ions_found": target_scan.get("ions_found"),
"html": {"xic": rt_figure, "isotope": isotope_figure,},
}
)
self.results.put(result_dict)
except Exception as e:
print(
"ERROR encountered. Please report at https://github.com/Chris7/pyquant/issues:\n {}\nParameters: {}".format(
traceback.format_exc(), params
)
)
try:
self.results.put(result_dict)
except Exception as e:
pass
return
def run(self):
for index, params in enumerate(iter(self.queue.get, None)):
self.params = params
self.quantify_peaks(params)
self.results.put(None)
|
{"hexsha": "9288cb1485da29db4ecf3607797348f730fb1fc2", "size": 86728, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyquant/worker.py", "max_stars_repo_name": "Chris7/pyquant", "max_stars_repo_head_hexsha": "56410060546bcdafdba83232d8119f23a28cac56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2016-04-26T14:19:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T19:38:15.000Z", "max_issues_repo_path": "pyquant/worker.py", "max_issues_repo_name": "Chris7/pyquant", "max_issues_repo_head_hexsha": "56410060546bcdafdba83232d8119f23a28cac56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2016-01-11T17:48:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-19T17:50:30.000Z", "max_forks_repo_path": "pyquant/worker.py", "max_forks_repo_name": "Chris7/pyquant", "max_forks_repo_head_hexsha": "56410060546bcdafdba83232d8119f23a28cac56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-05-12T17:39:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-30T18:12:22.000Z", "avg_line_length": 50.5702623907, "max_line_length": 179, "alphanum_fraction": 0.3616479107, "include": true, "reason": "import numpy,from scipy", "num_tokens": 13941}
|
import unittest
import numpy as np
import pandas as pd
import baobab.sim_utils.metadata_utils as metadata_utils
class TestMetadataUtils(unittest.TestCase):
"""Tests for the metadata utils module used to convert between parameter definitions
"""
def test_g1g2_vs_gamma_psi_symmetry(self):
n_data = 1000
data = {
'external_shear_gamma_ext': np.abs(np.random.randn(n_data)*0.01 + 0.025),
'external_shear_psi_ext': np.random.rand(n_data)*np.pi - np.pi*0.5,
}
only_gamma_psi = pd.DataFrame(data)
g1g2_added = metadata_utils.add_g1g2_columns(only_gamma_psi)
del only_gamma_psi
only_g1g2 = g1g2_added[['external_shear_gamma1', 'external_shear_gamma2']].copy()
gamma_psi_added = metadata_utils.add_gamma_psi_ext_columns(only_g1g2)
del only_g1g2
np.testing.assert_array_almost_equal(g1g2_added['external_shear_gamma1'].values, gamma_psi_added['external_shear_gamma1'].values, err_msg="gamma1")
np.testing.assert_array_almost_equal(g1g2_added['external_shear_gamma2'].values, gamma_psi_added['external_shear_gamma2'].values, err_msg="gamma2")
np.testing.assert_array_almost_equal(g1g2_added['external_shear_gamma_ext'].values, gamma_psi_added['external_shear_gamma_ext'].values, err_msg="gamma_ext")
np.testing.assert_array_almost_equal(g1g2_added['external_shear_psi_ext'].values, gamma_psi_added['external_shear_psi_ext'].values, err_msg="psi_ext")
def test_e1e2_vs_qphi_symmetry(self):
n_data = 1000
data = {
'a_e1': np.random.randn(n_data)*0.1,
'a_e2': np.random.randn(n_data)*0.1,
}
only_e1e2 = pd.DataFrame(data)
pass
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "ca32da26fef819b8461ec0f047f34b924d80359d", "size": 1804, "ext": "py", "lang": "Python", "max_stars_repo_path": "baobab/tests/test_sim_utils/test_metadata_utils.py", "max_stars_repo_name": "aymgal/baobab", "max_stars_repo_head_hexsha": "960ddbd55fc4391f2b857f2232af38c45c809ae8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-09-11T15:11:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T08:24:52.000Z", "max_issues_repo_path": "baobab/tests/test_sim_utils/test_metadata_utils.py", "max_issues_repo_name": "aymgal/baobab", "max_issues_repo_head_hexsha": "960ddbd55fc4391f2b857f2232af38c45c809ae8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2019-08-29T00:39:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-02T22:49:41.000Z", "max_forks_repo_path": "baobab/tests/test_sim_utils/test_metadata_utils.py", "max_forks_repo_name": "aymgal/baobab", "max_forks_repo_head_hexsha": "960ddbd55fc4391f2b857f2232af38c45c809ae8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-26T23:38:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-18T10:07:04.000Z", "avg_line_length": 48.7567567568, "max_line_length": 164, "alphanum_fraction": 0.7050997783, "include": true, "reason": "import numpy", "num_tokens": 471}
|
from sklearn.neural_network import MLPClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import learning_curve
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import fbeta_score
import os
import numpy as np
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
# **********************************************************调参部分*************************************************************************
# 模型超参数
CHOICE = 1 # 数据预处理方式-- 0.Robust标准化 1.归一化 2.正则化
HIDDEN_LAYER_SIZES = [(100,)] # 隐层的形状
ACTIVATION = ['relu','identity'] # 激活函数{‘identity’, ‘logistic’, ‘tanh’, ‘relu’}
SOLVER = ['adam'] # 权重优化方法 {‘lbfgs’, ‘sgd’, ‘adam’}
ALPHA = [0.0001] # L2正则化惩罚力度
MAX_ITER = [200] # 最大迭代次数
TOL = [1e-4] # 误差
VERBOSE = [False] # 是否打印中间结果
BATCH_SIZE = ['auto']
RANDOM_STATE = 28
BETA = 0.5 # BETA<1侧重查准率,BETA>1侧重查全率
# 文件
FEATURE_FILE_PATH = "D:\Pycharm\A_20200629.xlsx" # 特征所在文件 文件中所有内容都会当作特征!请不要包含样本ID等描述性内容
LABEL_FILE_PATH = "D:\Pycharm\B_20200629.xlsx" # 标签所在文件
# **********************************************************代码部分*************************************************************************
parameters = {'hidden_layer_sizes':HIDDEN_LAYER_SIZES,
'activation':ACTIVATION,
'solver':SOLVER,
'alpha':ALPHA,
'batch_size':BATCH_SIZE,
'max_iter':MAX_ITER,
'tol':TOL,
'verbose':VERBOSE}
# 读取文件,提取特征和标签
def get_data(feature_file_path=FEATURE_FILE_PATH,label_file_path=LABEL_FILE_PATH):
if not os.path.exists(feature_file_path):
raise ValueError("特征文件不存在")
if not os.path.exists(label_file_path):
raise ValueError("标签文件不存在")
df_x = pd.read_excel(feature_file_path)
df_y = pd.read_excel(label_file_path)
return {'X':df_x,'y':df_y}
# 数据预处理
def data_process(X,choice):
if choice==0:
X=preprocessing.RobustScaler().fit_transform(X)
elif choice==1:
X=preprocessing.MinMaxScaler().fit_transform(X)
elif choice==2:
X=preprocessing.StandardScaler().fit_transform(X)
return X
# 划分数据集
def split_data(X,y):
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=7)
return {'X_train':X_train,'X_test':X_test,'y_train':y_train,'y_test':y_test}
# 模型评估
def evaluate_model(clf,X_test,y_test,beta=BETA):
y_pred = clf.predict(X_test)
acc_score = accuracy_score(y_test, y_pred) # 模型精度
recall = recall_score(y_test, y_pred) # 召回率
matrix = confusion_matrix(y_test, y_pred) # 混淆矩阵
f_score = fbeta_score(y_test, y_pred, beta=beta) # f_beta
return {'acc_score':acc_score,'recall':recall,'matrix':matrix,'f_score':f_score,'test_num':len(y_pred)}
# 训练神经网络分类模型
def train_model(X_train,X_test,y_train,y_test):
# 模型训练
mlp = MLPClassifier()
clf = GridSearchCV(estimator=mlp,param_grid=parameters,cv=5).fit(X_train,y_train)
#模型评估
# res = evaluate_model(clf,X_test,y_test,beta=BETA)
return {'results':clf.cv_results_,'clf':clf}
# 保存实验结果
def save_res(eval_model,TIMESTAMP):
res = {}
res['TIMESTAMP'] = TIMESTAMP
res['acc_score'] = [eval_model['acc_score']]
res['recall'] = [eval_model['recall']]
res['f_beta'] = [BETA]
res['f_score'] = [eval_model['f_score']]
(tn, fp, fn, tp) = eval_model['matrix'].ravel()
res['tn'] = [tn]
res['fp'] = [fp]
res['fn'] = [fn]
res['tp'] = [tp]
res['test_num'] = [eval_model['test_num']]
df = pd.DataFrame(res)
if os.path.exists("result.csv"):
df.to_csv("result.csv",mode='a',index=None,header=None)
else:
df.to_csv("result.csv",index=None)
def main():
TIMESTAMP = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S").replace("'","")
# 获取数据
print("读取文件.........")
print("特征文件:{}".format(FEATURE_FILE_PATH))
print("标签文件:{}".format(LABEL_FILE_PATH))
data = get_data(feature_file_path=FEATURE_FILE_PATH,label_file_path=LABEL_FILE_PATH)
X = data['X'].fillna(0)
y = data['y'].fillna(0)
if X.isnull().any().any():
raise ValueError("特征文件存在缺失数据")
if y.isnull().any().any():
raise ValueError("标签文件存在缺失数据")
# 数据预处理
X = data_process(X,1)
# 划分数据集
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=RANDOM_STATE)
# 获取参数列表
# 模型训练并且拿到结果
print("模型训练.........")
res = train_model(X_train,X_test,y_train,y_test)
clf = res['clf'] #最佳模型
results = pd.DataFrame(res['results']) #结果对比
results.to_csv("{}.csv".format(TIMESTAMP),index=None)
# 最佳模型结果评估
eval_model = evaluate_model(clf,X_test,y_test,beta=BETA)
# 结果展示
print("最佳模型评估结果.........")
for key,value in eval_model.items():
print("{}:{}".format(key,value))
plt.plot(clf.best_estimator_.loss_curve_,c='red', linestyle= '-')
plt.ylabel('loss')
plt.title('loss curve')
plt.grid()
# plt.show()
# 整理实验结果写入文件保存
save_res(eval_model,TIMESTAMP)
return
if __name__ == '__main__':
main()
|
{"hexsha": "60a0b97b2d1faa50dba80f2b8c5ba8d35f68d0c6", "size": 5775, "ext": "py", "lang": "Python", "max_stars_repo_path": "chinese/mlp_dnn/MLP_CLASSFIER/MLP.py", "max_stars_repo_name": "Lyuyangdaisy/DS_package", "max_stars_repo_head_hexsha": "ca0f220598ee156028646fbefccde08b2ece62ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-13T10:33:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T07:22:40.000Z", "max_issues_repo_path": "chinese/mlp_dnn/MLP_CLASSFIER/MLP.py", "max_issues_repo_name": "Lyuyangdaisy/DS_package", "max_issues_repo_head_hexsha": "ca0f220598ee156028646fbefccde08b2ece62ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chinese/mlp_dnn/MLP_CLASSFIER/MLP.py", "max_forks_repo_name": "Lyuyangdaisy/DS_package", "max_forks_repo_head_hexsha": "ca0f220598ee156028646fbefccde08b2ece62ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-05T17:47:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-10T16:13:53.000Z", "avg_line_length": 35.6481481481, "max_line_length": 138, "alphanum_fraction": 0.5991341991, "include": true, "reason": "import numpy", "num_tokens": 1677}
|
from __future__ import (print_function, division, unicode_literals, absolute_import)
import numpy as np
import matplotlib.pyplot as plot
import matplotlib as mpl
import copy
import scipy
from scipy import ndimage
from astropy import log
from glob import glob
from nicer.values import *
from os import path
from astropy.table import Table, vstack
from astropy.coordinates import SkyCoord
from astropy.stats import mad_std, sigma_clipped_stats
import astropy.io.fits as pyfits
import astropy.units as u
# Python 2 (xrange) and Python 3 (range) compatibility
try:
xrange
except NameError:
xrange = range
#------------------------THIS MAKES THE TOTAL COUNT HISTOGRAM---------------------------
def event_counter(etable):
'Count events by DET_ID'
IDevents = np.zeros_like(IDS)
for i, det_id in enumerate(IDS):
IDevents[i] = np.count_nonzero(etable['DET_ID'] == det_id)
return IDevents
def find_hot_detectors(etable):
# Compute which detectors to mask on the fly
det_events = event_counter(etable)
# Remove detector with no events
#temp_events = np.delete(det_events, np.where(det_events == 0))
stats = sigma_clipped_stats(det_events,iters=3,sigma_lower=3,sigma_upper=3)
bad_dets = IDS[det_events > stats[0]+3.0*stats[2]]
log.info('Detector Count Mean {0}, std {1}'.format(stats[0],stats[2]))
if len(bad_dets) > 0:
log.warning('!!! Found hot detectors {0}'.format(bad_dets))
return bad_dets
return None
def hist_use(etable):
'Creates array of event count per ID and colors hot detectors red'
# Make array of event counts by DET_ID
IDevents = event_counter(etable)
colors = np.array(['k']*len(IDevents))
# Color using auto identified hot detectors
bad_dets = find_hot_detectors(etable)
if bad_dets is not None:
for i, det_id in enumerate(IDS):
if det_id in bad_dets:
colors[i] = 'r'
# Remove any that have 0 counts and compute std dev
#temp = np.delete(IDevents, np.where(IDevents == 0))
#stdev = np.std(temp)
# Set points that are off by more than 2 sigma to red
#diff = np.array(IDevents,dtype=np.float)-np.mean(temp)
#idx = np.where(diff>2.0*stdev)[0]
#colors[idx] = 'r'
return IDevents, colors
def plot_total_count_hist(etable, ax_rate, ax_counts):
'Plots event count per ID as a histogram with event count and countrate on y axes'
num_events, colors = hist_use(etable)
tc = ax_counts.bar(IDS, num_events, color = colors)
ax_rate.set_ylabel('c/s')
cntmin, cntmax = ax_counts.get_ylim()
ax_rate.set_ylim((cntmin/etable.meta['EXPOSURE'],cntmax/etable.meta['EXPOSURE']))
#countrate.set_ylim([np.min(rate)-20,np.max(rate)+20])
ax_counts.set_xlabel('DET_ID')
ax_counts.set_ylabel('# of Events')
plot.locator_params(nticks = 20)
plot.title('Total (Filtered) Event Count by Detector')
#total_counts.set_ylim([np.min(num_events)-20, np.max(num_events)+20])
return num_events
#----------------------THIS MAKES THE GRAYSCALE ID/EVENT COUNT CHART---------------------
def structure(etable, num_events):
'Creates a grid where the xy pair corresponds to RAWX,RAWY and value at each entry is event count'
rawx = np.zeros_like(IDS,dtype=np.int)
rawy = np.zeros_like(IDS,dtype=np.int)
#getting RAWX and RAWY vals for each ID
for count, detid in enumerate(IDS):
idx = np.where(etable['DET_ID']==detid)[0]
if len(idx) > 0:
rawx[count] = etable['RAWX'][idx][0]
rawy[count] = etable['RAWY'][idx][0]
else:
log.info("No counts for det {0}".format(detid))
rawx[count] = -1
rawy[count] = -1
#In STRUCTURE, each element corresponds to the geometric position
# of each detector, while the value is the # of counts
structure = np.zeros(shape = (7,8))
for i in xrange(len(rawx)):
if rawx[i] >= 0 and rawy[i] >= 0:
structure[rawy[i]][rawx[i]] = num_events[i]
return structure
def plot_detector_chart(etable, num_events, ax_map):
'Plots the structure created in structure() above as a grayscale grid'
#WANT TO GET THE ORIGIN IN THE TOP RIGHT HAND CORNER
struct = structure(etable, num_events)
#plot.style.use('grayscale')
ax_img = plot.imshow(struct, origin = 'lower')
plot.gca().invert_yaxis()
plot.gca().invert_xaxis()
plot.rcParams.update({'font.size' : 8})
plot.title('Filtered Event Count by Detector Location')
plot.xlabel('Raw X')
plot.ylabel('Raw Y')
plot.colorbar(ax_img, orientation = 'horizontal')
return
#----------------------THIS MAKES THE LIGHT CURVE---------------------------
def light_curve(etable, startmet, stopmet, binsize):
'Bins events as a histogram to be plotted as the light curve. returns bins and the histogram'
if startmet is None and stopmet is None:
startmet = etable['MET'][0]
t = etable['MET'] - startmet
stopmet = etable['MET'][-1]
else:
t = etable['MET'][np.where(np.logical_and(etable['MET'] < stopmet, etable['MET'] > startmet))] - startmet
duration = stopmet-startmet
# Add 1 bin to make sure last bin covers last events
bins = np.arange(0.0,duration+binsize,binsize)
sums, edges = np.histogram(t, bins=bins, range=(0.0,duration))
# Chop off last bin edge, which is only for computing histogram, not plotting
return bins[:-1], sums
def gti_colormap():
colornames = ['black','green','red','blue','magenta','orange','cyan','yellow','gray']
colorlevels = np.arange(len(colornames))
cmap, norm = mpl.colors.from_levels_and_colors(levels=colorlevels, colors=colornames, extend='max')
return colornames, cmap, norm
def plot_light_curve(etable, lclog, gtitable, binsize=1.0, noplot=False, plot_pos=None):
#'Compute binned light curve of events and return mean rate,plots light curve'
#EDGE CASE FOR FIRST INSTANCE
bins, sums = light_curve(etable, gtitable['START'][0], gtitable['STOP'][0], binsize=binsize)
cc = np.zeros_like(bins,dtype=np.float)
cumtime = bins[-1]+binsize
#THE REST OF THE GOOD INTERVALS
for i in xrange(1,len(gtitable['START'])):
mybins, mysums = light_curve(etable, gtitable['START'][i], gtitable['STOP'][i], binsize=binsize)
bins = np.append(bins, mybins+cumtime)
cumtime += mybins[-1]+binsize
sums = np.append(sums, mysums)
mycolors = np.zeros_like(mybins,dtype=np.float)+np.float(i)
cc = np.append(cc,mycolors)
#Compute mean rate
rate = sums/binsize
mean_rate = rate.mean()
if not noplot:
colornames, cmap, norm = gti_colormap()
plot.scatter(bins, rate, c=np.fmod(cc,len(colornames)), cmap=cmap,norm=norm,marker='+', label='Light Curve')
label = 'Mean Rate: {0:.3f} c/s'.format(mean_rate)
# Plot line at mean counts per bin
plot.axhline(y=mean_rate, xmin=bins[0], xmax=bins[-1], linestyle='dashed', label = label)
bininfo = "bin size = {} sec".format(binsize)
plot.annotate(bininfo, xy=(0.02, 0.85), xycoords='axes fraction')
#plot.legend(loc = 4)
if lclog:
plot.yscale('log')
plot.ylim(ymin=0.1)
# Default option
if plot_pos is None:
plot.ylabel('c/s')
## Options for plot_all_spec
if plot_pos is "corner":
plot.ylabel('c/s')
plot.tick_params(axis='x',which='both',bottom='on',labelbottom='on')
if plot_pos is "left":
plot.ylabel('c/s')
plot.tick_params(axis='x',which='both',labelbottom='off')
if plot_pos is "center":
plot.tick_params(axis='x',which='both',labelbottom='off')
return mean_rate, sums
#-------------------------------THIS PLOTS THE FAST TO SLOW___------------------
def plot_slowfast(etable,args):
'Scatter plot of PI and fast PHA, highlighting points above ratio cut'
log.info('Counting slow and fast')
# First do some counts
nfastonly = np.count_nonzero(np.logical_and(etable['EVENT_FLAGS'][:,FLAG_FAST],
np.logical_not(etable['EVENT_FLAGS'][:,FLAG_SLOW])))
nslowonly = np.count_nonzero(np.logical_and(etable['EVENT_FLAGS'][:,FLAG_SLOW],
np.logical_not(etable['EVENT_FLAGS'][:,FLAG_FAST])))
nboth = np.count_nonzero(np.logical_and(etable['EVENT_FLAGS'][:,FLAG_SLOW],
etable['EVENT_FLAGS'][:,FLAG_FAST]))
log.info('Using only SLOW+FAST events for ratio plot')
# Only compute ratio for events with both triggers
etable = etable[np.logical_and(etable['EVENT_FLAGS'][:,FLAG_SLOW],etable['EVENT_FLAGS'][:,FLAG_FAST])]
downsampfac = None
if len(etable) > 50000:
log.warning('Too many events for ratio plot. Plotting subset of points')
downsampfac = len(etable)//50000
etable = etable[::downsampfac]
log.info('Computing ratio')
# Ratio is SLOW to FAST. Edge events should have ratio bigger than cut
ratio = np.array(etable['PI'],dtype=np.float)/np.array(etable['PI_FAST'],dtype=np.float)
# fastconst = 1.0
# fastsig = 250.0
# fastquart = 4.0e-11
fastconst = 1.1
fastsig = 1200.0
fastquart = 0.0
x = np.array(etable['PI'],dtype=np.float)
ratio_cut = fastconst + (fastsig/10.0)/x + fastquart*x**3
colors = np.array(['k']*len(ratio))
idx = np.where(ratio>ratio_cut)[0]
colors[idx] = 'r'
log.debug('Plotting the points')
plot.scatter(etable['PI']*PI_TO_KEV,ratio, s=.4, c = colors)
if downsampfac is None:
plot.title('PI Slow to Fast Ratio vs Energy')
else:
plot.title('PI Slow to Fast Ratio vs Energy (SUBSET)')
plot.xlabel('Energy')
plot.ylabel('PI Ratio')
plot.ylim([0.5,2.5])
fast_str = "# of fast only : " + str(nfastonly)
slow_str = "# of slow only : " + str(nslowonly)
total = "# of both : " + str(nboth)
bad = "# of bad points : " + str(len(idx))
plot.annotate(fast_str, xy=(0.03, 0.85), xycoords='axes fraction')
plot.annotate(slow_str, xy=(0.03, 0.8), xycoords='axes fraction')
plot.annotate(total, xy=(0.03, 0.75), xycoords='axes fraction')
plot.annotate(bad, xy = (.03, .7), xycoords='axes fraction')
#plot.annotate("Ratio cut = {0:.2f}".format(ratio_cut),xy=(0.65,0.85),xycoords='axes fraction')
x = np.linspace(min(etable['PI']),max(etable['PI']),100,dtype=np.float)
cutline = fastconst + (fastsig/10.0)/x + fastquart*x**3
plot.plot(x*PI_TO_KEV, cutline, 'g--', linewidth = 1.5)
return
#-------------------------------THIS PLOTS THE ENERGY SPECTRUM------------------
def calc_pi(etable, calfile):
'Compute PI from PHA (slow) using approximate linear calibration'
# Load calibration file
det_ids, e0s, gains = np.loadtxt(calfile,unpack=True)
det_ids = np.array(det_ids,dtype=int)
e_keV = np.zeros_like(etable['PHA'],dtype=np.float)
for d, e0, g in zip(det_ids,e0s,gains):
idx = np.where(etable['DET_ID'] == d)[0]
e_keV[idx] = e0 + g*etable['PHA'][idx]
pi = np.array(e_keV/PI_TO_KEV,dtype=np.int)
return pi
def plot_energy_spec(etable,binscale=1.0,plot_pos=None):
'plots the energy spectrum of PI'
bb = np.concatenate((np.arange(0.0,2.0,0.02*binscale),np.arange(2.0,15.0,0.1*binscale)))
hh, hh_bins = np.histogram(etable['PI']*PI_TO_KEV, bins=bb)
widths = bb[1:] - bb[:-1]
plot.step(bb[:-1],hh/widths,where='post')
# plot.hist(etable['PI']*PI_TO_KEV, bins=bb,
# histtype='step',log=False,density=False)
plot.yscale('log')
plot.xscale('log')
plot.xlim((0.1,20.0))
# Default option
if plot_pos is None:
plot.title('PI Spectrum')
plot.xlabel('Energy (keV)')
plot.ylabel('Counts/keV')
## Options for plot_all_spec
if plot_pos is "corner":
plot.xlabel('Energy (keV)')
plot.ylabel('Counts/keV')
plot.tick_params(axis='x',which='both',bottom='on',labelbottom='on')
if plot_pos is "left":
plot.ylabel('Counts/keV')
plot.tick_params(axis='x',which='both',labelbottom='off')
if plot_pos is "bottom":
plot.xlabel('Energy (keV)')
if plot_pos is "center":
plot.tick_params(axis='x',which='both',labelbottom='off')
return
#-------------------------------THIS PLOTS THE POWER SPECTRUM (FFT)--------------
def choose_N(orig_N):
"""
choose_N(orig_N):
Choose a time series length that is larger than
the input value but that is highly factorable.
Note that the returned value must be divisible
by at least the maximum downsample factor * 2.
Currently, this is 8 * 2 = 16.
"""
# A list of 4-digit numbers that are highly factorable by small primes
goodfactors = [1008, 1024, 1056, 1120, 1152, 1200, 1232, 1280, 1296,
1344, 1408, 1440, 1536, 1568, 1584, 1600, 1680, 1728,
1760, 1792, 1920, 1936, 2000, 2016, 2048, 2112, 2160,
2240, 2304, 2352, 2400, 2464, 2560, 2592, 2640, 2688,
2800, 2816, 2880, 3024, 3072, 3136, 3168, 3200, 3360,
3456, 3520, 3584, 3600, 3696, 3840, 3872, 3888, 3920,
4000, 4032, 4096, 4224, 4320, 4400, 4480, 4608, 4704,
4752, 4800, 4928, 5040, 5120, 5184, 5280, 5376, 5488,
5600, 5632, 5760, 5808, 6000, 6048, 6144, 6160, 6272,
6336, 6400, 6480, 6720, 6912, 7040, 7056, 7168, 7200,
7392, 7680, 7744, 7776, 7840, 7920, 8000, 8064, 8192,
8400, 8448, 8624, 8640, 8800, 8960, 9072, 9216, 9408,
9504, 9600, 9680, 9856]
if orig_N < 1024:
return 1024
# Get the number represented by the first 4 digits of orig_N
first4 = int(str(orig_N)[:4])
# Now get the number that is just bigger than orig_N
# that has its first 4 digits equal to "factor"
for factor in goodfactors:
if factor > first4: break
new_N = factor
while new_N < orig_N:
new_N *= 10
# Finally, compare new_N to the closest power_of_two
# greater than orig_N. Take the closest.
two_N = 2
while two_N < orig_N:
two_N *= 2
if two_N < new_N: return two_N
else: return new_N
def plot_fft_of_power(etable,nyquist, pslog, writeps):
'plots the power spectrum'
dt = 0.5/nyquist
METmin = etable['MET'].min()
T = etable['MET'].max() - etable['MET'].min()
# Choose good number of bins for efficient FFT
n = choose_N(T/float(dt))
bins = np.arange(n)*dt
log.info('{0} {1}'.format(T/dt,n))
log.info('Computing FFT with {0} bins of {1} s, covering {2} total time (Nyquist = {3})'.format(n,dt,T, nyquist))
ts, edges = np.histogram(etable['MET']-METmin,bins)
ft = np.fft.rfft(ts)
power = (ft * ft.conj()).real
power /= len(etable['MET'])
power[0:50] = 0.0
x = np.fft.rfftfreq(len(ts), dt)
#idx = np.where(power>20)
idx = np.argmax(power)
print(x[idx], power[idx])
if pslog:
plot.semilogy(x,power)
else:
plot.plot(x,power)
if writeps:
data = np.array([x,power])
data = data.T
np.savetxt(file('powspec.txt','w'), data, fmt=['%f','%f'])
plot.title('Power Spectrum')
plot.xlabel('Frequency')
plot.ylabel('Power')
return
#-------------------------------THIS PLOTS THE DEADTIME HISTOGRAM------------------
def plot_deadtime(etable):
'Plot histogram of detector deadtime in microseconds.'
us = etable['DEADTIME']*1.0e6
# Bin at 1 us resolution
max = np.floor(us.max())+1
plot.hist(us,range=(0.0,max), bins=int(max),log=True)
#plot.ticklabel_format(style = 'sci', axis='x', scilimits = (0,0))
plot.title('Histogram of deadtime')
plot.xlabel('Deadtime [microseconds]')
plot.ylabel('Frequency [occurrences]')
return
#-------------------------PULSE PROFILE----------------------------------
def pulse_profile_fixed(etable, F0):
phase = np.fmod((etable['MET']-etable['MET'][0])*F0,1.0)
plot.hist(phase,bins=32)
plot.ylabel('Counts')
plot.xlabel('Pulse Phase')
plot.title('Pulse Profile (F0={0:.6f})'.format(F0))
def pulse_profile(ax, etable, args):
if (args.orb is None) or (args.par is None):
log.warning('You did not specify orbfile or parfile')
log.info('Please input files for orb and par with --orb and --par')
return
import pint
import astropy.io.fits as pyfits
from astropy.time import TimeDelta
import pint.toa, pint.models
from pint.plot_utils import phaseogram_binned
from pint.observatory.nicer_obs import NICERObs
from pint.eventstats import hm
### Make arguments for parfile and orbfile and only do this if both are present
log.info('Event file TELESCOPE = {0}, INSTRUMENT = {1}'.format(etable.meta['TELESCOP'],
etable.meta['INSTRUME']))
# Instantiate NICERObs once so it gets added to the observatory registry
log.info('Setting up NICER observatory')
NICERObs(name='NICER',FPorbname=args.orb)
log.info('Reading model from PARFILE')
# Load PINT model objects
modelin = pint.models.get_model(args.par)
log.info(str(modelin))
# Read event file and return list of TOA objects
log.info('doing the load_toas thing')
#tl = load_NICER_TOAs(pulsefilename[0])
# Create TOA list
tl = []
for t in etable['T']:
tl.append(pint.toa.TOA(t, obs='NICER'))
planets=False
if 'PLANET_SHAPIRO' in modelin.params:
if modelin.PLANET_SHAPIRO.value:
planets=True
ts = pint.toa.get_TOAs_list(tl,planets=planets,include_bipm=False,include_gps=False)
# No longer needed, since Feb 28 reprocessing
# log.warning('Applying -1.0s time correction to event time TOAs for pulse phase plot')
# ts.adjust_TOAs(TimeDelta(np.ones(len(ts.table))*-1.0*u.s,scale='tt'))
# Note: adjust_TOAs recomputes TDBs and posvels so no need to do again.
# ts.compute_TDBs()
# ts.compute_posvels(ephem='DE421',planets=True)
# Compute phases
phss = modelin.phase(ts,abs_phase=True)[1]
# Strip the units, because PINT may return u.cycle
phss = np.array(phss)
# ensure all postive
phases = np.where(phss < 0.0, phss + 1.0, phss)
mjds = ts.get_mjds()
h = hm(phases)
if not np.isfinite(h):
log.error("H not finite, using {0} phases!".format(len(phases)))
print("Phases from {0} to {1}\n".format(h.min(),h.max()))
else:
log.info("H = {0} from {1} phases".format(h,len(phases)))
ax.hist(phases, bins = 32)
ax.text(0.1, 0.1, 'H = {0:.2f}'.format(h), transform=ax.transAxes)
#np.savetxt('{0}.phases'.format(args.basename),np.transpose([etable['MET'], etable['PI'],phases]))
plot.ylabel('Counts')
plot.xlabel('Pulse Phase')
plot.title('Pulse Profile')
return
#-------------------------OVERSHOOT RATE FOR RATIO------------------------------
def apply_gti(etable, gtitable):
mets = etable['MET']
idx = np.where(np.logical_and(mets>gtitable['START'][0],mets<gtitable['STOP'][0]))
goodlist = [ etable[idx] ]
for ii in range(1,len(gtitable['START'])):
idx = np.where(np.logical_and(mets>gtitable['START'][ii],mets<gtitable['STOP'][ii]))
goodlist.append(etable[idx])
return vstack(goodlist,metadata_conflicts='silent')
def convert_from_elapsed_goodtime(elapsedstarts, elapsedstops, gtitable):
'Given a set of elapsed time starts and stops will convert to MET'
startmets = np.array([])
stopmets = np.array([])
for timex in xrange(0,len(elapsedstarts)):
starttime = elapsedstarts[timex]
stoptime = elapsedstarts[timex]
for idx in xrange(0,len(gtitable)):
if idx < len(gtitable)-1:
if np.logical_and(starttime >= gtitable['CUMTIME'][idx], stoptime < gtitable['CUMTIME'][idx + 1]):
startmets = np.append(startmets, starttime + gtitable['START'][idx])
stopmets = np.append(startmets, stoptime + gtitable['START'][idx])
else:
continue
else:
if starttime > gtitable['CUMTIME'][idx]:
startmets = np.append(startmets, starttime + gtitable['START'][idx])
stopmets = np.append(startmets, stoptime + gtitable['START'][idx])
return startmets, stopmets
def convert_to_elapsed_goodtime(mets, vals, gtitable):
'Given a set of values at METs, extract the values during the GTIs and return times that are in elapsed good time for plotting'
mets = np.asarray(mets)
vals = np.asarray(vals)
idx = np.where(np.logical_and(mets>gtitable['START'][0],mets<gtitable['STOP'][0]))
goodvals = vals[idx]
etimes = mets[idx] - gtitable['START'][0]
cc = np.zeros_like(goodvals, dtype=np.float)
for ii in range(1,len(gtitable['START'])):
idx = np.where(np.logical_and(mets>gtitable['START'][ii],mets<gtitable['STOP'][ii]))
goodvals = np.append(goodvals, vals[idx])
etimes = np.append(etimes, mets[idx]-gtitable['START'][ii] + gtitable['CUMTIME'][ii])
cc = np.append(cc, np.zeros_like(vals[idx],dtype=np.float)+np.float(ii))
# Returns the arrays of elapsed times, values, and an array of what segment it is in, used for setting plot colors by GTI segment
return etimes, goodvals, cc
#def plot_overshoot(etable, overshootrate, gtitable, args, hkmet, bothrate, mktable):
def plot_overshoot(mktable, ovbintable, gtitable, args):
#etime, overshoot, cc = convert_to_elapsed_goodtime(hkmet, overshootrate, gtitable)
etime, overshoot, cc = convert_to_elapsed_goodtime(mktable['TIME'], mktable['NUM_FPM_ON']*mktable['FPM_OVERONLY_COUNT'], gtitable)
# Delete the nan values from undershoot and cc so they don't plot
etimeNan = np.where(np.isnan(overshoot))
ovtime=np.delete(etime,etimeNan)
overshoot=np.delete(overshoot,etimeNan)
cc=np.delete(cc,etimeNan)
colornames, cmap, norm = gti_colormap()
plot.scatter(ovtime, overshoot, c=np.fmod(cc,len(colornames)), cmap=cmap,
norm=norm, marker='+',label='Overshoot rate')
if ovbintable is not None:
etime, binnedOV, cc = convert_to_elapsed_goodtime(ovbintable['TIME'], mktable['NUM_FPM_ON'].max()*ovbintable['FPM_OVERONLY_COUNT'], gtitable)
plot.plot(etime, binnedOV, linewidth=2.0)
else:
# if bothrate is not None:
# etime, both, cc = convert_to_elapsed_goodtime(hkmet, bothrate, gtitable)
etime, both, cc = convert_to_elapsed_goodtime(mktable['TIME'], mktable['NUM_FPM_ON']*mktable['FPM_DOUBLE_COUNT'], gtitable)
plot.scatter(etime, both, color='c', marker='.', label='Both Under and Over Flags')
plot.yscale('symlog',linthreshy=10.0)
plot.legend(loc = 2)
plot.ylabel('Overshoot rate')
plot.grid(True)
return
#def plot_SAA(mktable, gtitable, overshootrate):
def plot_SAA(mktable, gtitable):
time, insaa, colors = convert_to_elapsed_goodtime(mktable['TIME'], mktable['NICER_SAA'], gtitable)
time = np.delete(time, np.where(insaa == 0))
insaa = np.delete(insaa, np.where(insaa == 0))
# Get Overshoot Max, just to put the SAA on the same plot
ovtime, overshoot, cc = convert_to_elapsed_goodtime(mktable['TIME'], mktable['NUM_FPM_ON']*mktable['FPM_OVERONLY_COUNT'], gtitable)
#insaa[np.where(insaa == 1)] = max(overshootrate)
insaa[np.where(insaa == 1)] = np.nanmax(overshoot)
plot.scatter(time, insaa, color = 'y', label = 'In the SAA',marker = '_')
plot.legend(loc = 2)
return
#-------------------------UNDERSHOOT RATE FOR RATIO------------------------------
#def plot_undershoot(etable, undershootrate, gtitable, args, hkmet, mktable):
def plot_undershoot(mktable, gtitable, args):
#etime, undershoot, cc = convert_to_elapsed_goodtime(hkmet, undershootrate, gtitable)
etime, undershoot, cc = convert_to_elapsed_goodtime(mktable['TIME'], mktable['NUM_FPM_ON']*mktable['FPM_UNDERONLY_COUNT'], gtitable)
# Delete the nan values from undershoot and cc so they don't plot
etimeNan = np.where(np.isnan(undershoot))
udtime=np.delete(etime,etimeNan)
undershoot=np.delete(undershoot,etimeNan)
cc=np.delete(cc,etimeNan)
colornames, cmap, norm = gti_colormap()
plot.scatter(udtime, undershoot, c=np.fmod(cc,len(colornames)), cmap=cmap,
norm=norm, marker='+')
# Add sunshine
sunmet = mktable['TIME']
sunshine = mktable['SUNSHINE']
sunt, suny, suncc = convert_to_elapsed_goodtime(sunmet, sunshine, gtitable)
sidx = np.where(suny==0)
# Delete the 0 values so they don't plot
sunt=np.delete(sunt,sidx)
suny=np.delete(suny,sidx)
plot.scatter(sunt,1.1*suny*undershoot.max(), color='y', label='Sunshine',
marker = '_')
plot.legend(loc = 2)
plot.grid(True)
plot.ylabel('Undershoot rate')
if args.lclog:
plot.yscale('log')
return
#-------------------------SUN / EARTH / MOON ANGLES-----------------------------
def plot_angles(mktable, gtitable):
sun = mktable['SUN_ANGLE']
earth = mktable['BR_EARTH']
moon = mktable['MOON_ANGLE']
elv = mktable['ELV']
met = mktable['TIME']
goodtime, sunangle, cc = convert_to_elapsed_goodtime(met, sun, gtitable)
goodtime, earthangle, cc = convert_to_elapsed_goodtime(met, earth, gtitable)
goodtime, moonangle, cc = convert_to_elapsed_goodtime(met, moon, gtitable)
goodtime, elvangle, cc = convert_to_elapsed_goodtime(met, elv, gtitable)
plot.scatter(goodtime, sunangle, marker = '.', color = 'y', alpha=0.5, label = 'Sun')
plot.scatter(goodtime, earthangle, marker ='.', color = 'b', alpha=0.5, label = 'Bright Earth')
plot.scatter(goodtime, moonangle, marker = '.', color = 'grey', alpha = 0.5, label = 'Moon')
plot.scatter(goodtime, elvangle, marker = '.', color = 'm', alpha = 0.5, label = 'ELV')
plot.legend(loc = 2)
plot.ylim((0.0,180.0))
plot.grid(True)
plot.yticks([0.0, 45.0, 90.0, 135.0, 180.0])
plot.ylabel('Angle (deg)')
return
#--------------------------POINTING---------------------------------------------
def plot_pointing(mktable, gtitable):
time, pointing, cc = convert_to_elapsed_goodtime(mktable['TIME'], mktable['ANG_DIST'], gtitable)
colornames, cmap, norm = gti_colormap()
plot.scatter(time, pointing, c = np.fmod(cc,len(colornames)), cmap = cmap,
norm=norm, marker = '+', label='Pointing Offset')
plot.legend(loc = 2)
plot.ylabel('Angle (deg)')
plot.yscale('log')
plot.axhline(1.0/60.0,c='r')
plot.ylim((0.0001,100.0))
return
#--------------------------LAT / LON---------------------------------------------
def plot_latlon(mktable, gtitable):
time, lat, cc = convert_to_elapsed_goodtime(mktable['TIME'], mktable['SAT_LAT'], gtitable)
#time, lon, cc2 = convert_to_elapsed_goodtime(mktable['TIME'], mktable['SAT_LON'], gtitable)
colornames, cmap, norm = gti_colormap()
plot.scatter(time, lat, c=np.fmod(cc,len(colornames)), norm=norm,
cmap=cmap, marker='^', label='Latitude')
#plot.scatter(time, lon, c = colors, cmap = cmap, marker = '_', label = 'Longitude')
plot.legend(loc = 2)
plot.ylim((-60.0,60.0))
plot.xlabel('Elapsed Time (s)', labelpad = 1)
plot.grid(True)
plot.ylabel('Degrees')
return
def plot_cor(mktable, gtitable):
time, cor, cc = convert_to_elapsed_goodtime(mktable['TIME'], mktable['COR_SAX'], gtitable)
#time, lon, cc2 = convert_to_elapsed_goodtime(mktable['TIME'], mktable['SAT_LON'], gtitable)
colornames, cmap, norm = gti_colormap()
plot.scatter(time, cor, c=np.fmod(cc,len(colornames)), norm=norm,
cmap=cmap, marker='^', label='COR_SAX')
#plot.scatter(time, lon, c = colors, cmap = cmap, marker = '_', label = 'Longitude')
plot.legend(loc = 2)
plot.ylim((0.0,20.0))
plot.axhline(5.0,linestyle='--',color='r')
plot.xlabel('Elapsed Time (s)', labelpad = 1)
plot.grid(True)
plot.ylabel('GeV')
return
#-------------------------THIS PLOTS USEFUL TEXT AT THE TOP OF THE SUPLOT-------
# def calc_nresets(etable, IDS):
def calc_nresets(mktable, IDS):
'Count resets (detector undershoots) for each detector, from the mktable'
nresets = np.zeros_like(IDS)
# # For each DET_ID count the number of events with undershoot flag set
# for i in range(len(IDS)):
# idx = np.where(np.logical_and(etable['DET_ID'] == IDS[i],
# etable['EVENT_FLAGS'][:,FLAG_UNDERSHOOT]))[0]
# nresets[i] = len(idx)
MPU_ud_only = mktable['MPU_UNDERONLY_COUNT'].reshape((len(mktable['MPU_UNDERONLY_COUNT']),56))
# Reshaping is necessary in the case of MPU_UNDERONLY_COUNT
# given as a 7x8 matrix instead of 56 array in the mktable
for i in range(len(IDS)):
nresets[i] = MPU_ud_only[:,i].sum()
return nresets
def plot_resetrate(IDS, reset_rates):
'Plots reset rates'
reset = plot.bar(IDS, reset_rates, width = .85)
plot.title('Reset Rate by Detector')
plot.ylabel('Reset Rate [Hz]')
plot.xlabel('DET_ID')
#plot.ylim([0, np.max(reset_rates)+2])
#----------------------Filter Ratio Cut-----------------------------------------
def filt_ratio(etable, ratiocut):
#Filters out the points > filtratio
ratio = np.zeros_like(etable['PI'],dtype=np.float)
idx = np.where(np.logical_and(etable['PHA']>0, etable['PHA_FAST']>0))[0]
ratio[idx] = np.asarray(etable['PHA'][idx],dtype=np.float)/np.asarray(etable['PHA_FAST'][idx],dtype=np.float)
etable = etable[np.where(ratio < ratiocut)[0]]
return etable
def filt_ratio_trumpet(etable):
#Filters out the points > filtratio
ratio = np.zeros_like(etable['PI'],dtype=np.float)
idx = np.where(np.logical_and(etable['PI']>0, etable['PI_FAST']>0))[0]
ratio[idx] = np.asarray(etable['PI'][idx],dtype=np.float)/np.asarray(etable['PI_FAST'][idx],dtype=np.float)
# Old version
#fastconst = 1.0
#fastsig = 250.0
#fastquart = 4.0e-11
# New version 2018 Feb
fastconst = 1.1
fastsig = 1200.0
fastquart = 0.0
x = np.array(etable['PI'],dtype=np.float)
ratio_cut = fastconst + (fastsig/10.0)/x + fastquart*x**3
etable = etable[np.where(ratio < ratio_cut)[0]]
return etable
|
{"hexsha": "429c325559e81185db4b35fa8b78581f562ac760", "size": 30495, "ext": "py", "lang": "Python", "max_stars_repo_path": "nicer/plotutils.py", "max_stars_repo_name": "ZaynabGhazi/NICERsoft", "max_stars_repo_head_hexsha": "c1e467b807226f091e82cd0e3ab0ce6b7a476610", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nicer/plotutils.py", "max_issues_repo_name": "ZaynabGhazi/NICERsoft", "max_issues_repo_head_hexsha": "c1e467b807226f091e82cd0e3ab0ce6b7a476610", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nicer/plotutils.py", "max_forks_repo_name": "ZaynabGhazi/NICERsoft", "max_forks_repo_head_hexsha": "c1e467b807226f091e82cd0e3ab0ce6b7a476610", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1778656126, "max_line_length": 149, "alphanum_fraction": 0.6302016724, "include": true, "reason": "import numpy,import scipy,from scipy,import astropy,from astropy", "num_tokens": 8797}
|
using Omega: withkernel, kseα
"Replica Exchange (Parallel Tempering)"
struct ReplicaAlg <: SamplingAlgorithm end
"Single Site Metropolis Hastings"
const Replica = ReplicaAlg()
softhard(::Type{ReplicaAlg}) = IsSoft{ReplicaAlg}()
defΩ(::ReplicaAlg) = Omega.LinearΩ{ID, UnitRange{Int64}, Vector{Real}}
defΩ(x, ::ReplicaAlg; inneralg...) = defΩ(inneralg)
isapproximate(::ReplicaAlg) = true
function swap!(v, i, j)
temp = v[i]
v[i] = v[j]
v[j] = temp
end
"Swap adjacent chains"
function exchange!(rng, logdensity, ωs, temps, kernel)
for i in length(ωs):-1:2
j = i - 1
E_i_x = withkernel(kernel(temps[i])) do
logdensity(ωs[i])
end
E_j_x = withkernel(kernel(temps[j])) do
logdensity(ωs[i])
end
E_i_y = withkernel(kernel(temps[i])) do
logdensity(ωs[j])
end
E_j_y = withkernel(kernel(temps[j])) do
logdensity(ωs[j])
end
k = (E_i_y + E_j_x) - (E_i_x + E_j_y)
doswap = log(rand(rng)) < k
if doswap
swap!(ωs, i, j)
end
end
end
"Logarithmically spaced temperatures"
logtemps(n, k = 10) = exp.(k * range(-2.0, stop = 1.0, length = n))
struct PreSwap end
struct PostSwap end
struct InLoop end
"""Sample from `density` using Replica Exchange
$(SIGNATURES)
Replica exchange (aka parallel tempemring) runs `nreplicas` independent mcmc
chains in parallel.
Returns samples from lowest temperature chain
# Arguments
- `ΩT`:
- `logdensity`: Real-valued `RandVar`
- `n`: Number of samples
- `swapevery` : performs swap every swapevery iterations
- `nreplicas` : number of replica chains to run
- `temps` : temperatures of different chains
- `inneralg` : Algorithm used for each chain
- `algargs::NamedTuple` : keyword arguments to be passed to `inneralg` in:
`rand(ΩT, density, swapevery, inneralg; algargs...)`
- `kernel`: Kernel to use for soft constraints (DEPRECATE ME)
# Returns
"""
function Base.rand(rng,
ΩT::Type{OT},
logdensity::RandVar,
n::Integer,
alg::ReplicaAlg;
inneralg = SSMH,
algargs = NamedTuple(),
swapevery = 1,
nreplicas = 4,
temps = logtemps(nreplicas),
kernel = Omega.kseα) where {OT <: Ω}
@pre issorted(temps)
@pre n % swapevery == 0
@pre nreplicas == length(temps)
@show temps
ωsamples = OT[]
ωs = [ΩT() for i = 1:nreplicas]
# Do swapevery steps for each chain, then swap ωs
lowesti = nreplicas
for j = 1:div(n, swapevery)
for i = 1:nreplicas
withkernel(kernel(temps[i])) do
try
ωst = rand(rng, ΩT, logdensity, swapevery, inneralg;
ωinit = ωs[i],
offset = (j - 1) * swapevery,
algargs...)
if i == lowesti # keep lowest temperatre
append!(ωsamples, ωst)
end
lens(InLoop, (ωs = ωst, j = j, i = i, temp = temps[i]))
ωs[i] = ωst[end]
catch e
# rethrow(e)
println("Chain at temp $(temps[i]) Failed due to:", e)
end
end
end
lens(PreSwap, (ωs = ωs, temps = temps))
exchange!(rng, logdensity, ωs, temps, kernel)
lens(PostSwap, (ωs = ωs, temps = temps))
end
ωsamples
end
|
{"hexsha": "bf3a250840939f9cf5bfff25eed4d9399b5dec7d", "size": 3290, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/inference/replica.jl", "max_stars_repo_name": "mrakgr/Omega.jl", "max_stars_repo_head_hexsha": "7338c5f4c5c6931d676db2f43f6fe41d05eb19ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 112, "max_stars_repo_stars_event_min_datetime": "2018-08-02T22:30:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T18:00:22.000Z", "max_issues_repo_path": "src/inference/replica.jl", "max_issues_repo_name": "mrakgr/Omega.jl", "max_issues_repo_head_hexsha": "7338c5f4c5c6931d676db2f43f6fe41d05eb19ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 137, "max_issues_repo_issues_event_min_datetime": "2018-07-16T10:05:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T18:45:35.000Z", "max_forks_repo_path": "src/inference/replica.jl", "max_forks_repo_name": "mrakgr/Omega.jl", "max_forks_repo_head_hexsha": "7338c5f4c5c6931d676db2f43f6fe41d05eb19ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-08-07T21:04:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T17:27:46.000Z", "avg_line_length": 27.1900826446, "max_line_length": 76, "alphanum_fraction": 0.5954407295, "num_tokens": 1010}
|
"""Revised simplex method for linear programming
The *revised simplex* method uses the method described in [1]_, except
that a factorization [2]_ of the basis matrix, rather than its inverse,
is efficiently maintained and used to solve the linear systems at each
iteration of the algorithm.
.. versionadded:: 1.3.0
References
----------
.. [1] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear
programming." Athena Scientific 1 (1997): 997.
.. [2] Bartels, Richard H. "A stabilization of the simplex method."
Journal in Numerische Mathematik 16.5 (1971): 414-434.
"""
# Author: Matt Haberland
import numpy as np
from scipy.linalg import solve
from .optimize import _check_unknown_options
from ._bglu_dense import LU
from ._bglu_dense import BGLU as BGLU
from scipy.linalg import LinAlgError
from numpy.linalg.linalg import LinAlgError as LinAlgError2
from ._linprog_util import _postsolve
from .optimize import OptimizeResult
def _phase_one(A, b, x0, callback, postsolve_args, maxiter, tol, disp,
maxupdate, mast, pivot):
"""
The purpose of phase one is to find an initial basic feasible solution
(BFS) to the original problem.
Generates an auxiliary problem with a trivial BFS and an objective that
minimizes infeasibility of the original problem. Solves the auxiliary
problem using the main simplex routine (phase two). This either yields
a BFS to the original problem or determines that the original problem is
infeasible. If feasible, phase one detects redundant rows in the original
constraint matrix and removes them, then chooses additional indices as
necessary to complete a basis/BFS for the original problem.
"""
m, n = A.shape
status = 0
# generate auxiliary problem to get initial BFS
A, b, c, basis, x, status = _generate_auxiliary_problem(A, b, x0, tol)
if status == 6:
residual = c.dot(x)
iter_k = 0
return x, basis, A, b, residual, status, iter_k
# solve auxiliary problem
phase_one_n = n
iter_k = 0
x, basis, status, iter_k = _phase_two(c, A, x, basis, callback,
postsolve_args,
maxiter, tol, disp,
maxupdate, mast, pivot,
iter_k, phase_one_n)
# check for infeasibility
residual = c.dot(x)
if status == 0 and residual > tol:
status = 2
# drive artificial variables out of basis
# TODO: test redundant row removal better
# TODO: make solve more efficient with BGLU? This could take a while.
keep_rows = np.ones(m, dtype=bool)
for basis_column in basis[basis >= n]:
B = A[:, basis]
try:
basis_finder = np.abs(solve(B, A)) # inefficient
pertinent_row = np.argmax(basis_finder[:, basis_column])
eligible_columns = np.ones(n, dtype=bool)
eligible_columns[basis[basis < n]] = 0
eligible_column_indices = np.where(eligible_columns)[0]
index = np.argmax(basis_finder[:, :n]
[pertinent_row, eligible_columns])
new_basis_column = eligible_column_indices[index]
if basis_finder[pertinent_row, new_basis_column] < tol:
keep_rows[pertinent_row] = False
else:
basis[basis == basis_column] = new_basis_column
except (LinAlgError, LinAlgError2):
status = 4
# form solution to original problem
A = A[keep_rows, :n]
basis = basis[keep_rows]
x = x[:n]
m = A.shape[0]
return x, basis, A, b, residual, status, iter_k
def _get_more_basis_columns(A, basis):
"""
Called when the auxiliary problem terminates with artificial columns in
the basis, which must be removed and replaced with non-artificial
columns. Finds additional columns that do not make the matrix singular.
"""
m, n = A.shape
# options for inclusion are those that aren't already in the basis
a = np.arange(m+n)
bl = np.zeros(len(a), dtype=bool)
bl[basis] = 1
options = a[~bl]
options = options[options < n] # and they have to be non-artificial
# form basis matrix
B = np.zeros((m, m))
B[:, 0:len(basis)] = A[:, basis]
if (basis.size > 0 and
np.linalg.matrix_rank(B[:, :len(basis)]) < len(basis)):
raise Exception("Basis has dependent columns")
rank = 0 # just enter the loop
for i in range(n): # somewhat arbitrary, but we need another way out
# permute the options, and take as many as needed
new_basis = np.random.permutation(options)[:m-len(basis)]
B[:, len(basis):] = A[:, new_basis] # update the basis matrix
rank = np.linalg.matrix_rank(B) # check the rank
if rank == m:
break
return np.concatenate((basis, new_basis))
def _generate_auxiliary_problem(A, b, x0, tol):
"""
Modifies original problem to create an auxiliary problem with a trivial
initial basic feasible solution and an objective that minimizes
infeasibility in the original problem.
Conceptually, this is done by stacking an identity matrix on the right of
the original constraint matrix, adding artificial variables to correspond
with each of these new columns, and generating a cost vector that is all
zeros except for ones corresponding with each of the new variables.
A initial basic feasible solution is trivial: all variables are zero
except for the artificial variables, which are set equal to the
corresponding element of the right hand side `b`.
Runnning the simplex method on this auxiliary problem drives all of the
artificial variables - and thus the cost - to zero if the original problem
is feasible. The original problem is declared infeasible otherwise.
Much of the complexity below is to improve efficiency by using singleton
columns in the original problem where possible, thus generating artificial
variables only as necessary, and using an initial 'guess' basic feasible
solution.
"""
status = 0
m, n = A.shape
if x0 is not None:
x = x0
else:
x = np.zeros(n)
r = b - A@x # residual; this must be all zeros for feasibility
A[r < 0] = -A[r < 0] # express problem with RHS positive for trivial BFS
b[r < 0] = -b[r < 0] # to the auxiliary problem
r[r < 0] *= -1
# Rows which we will need to find a trivial way to zero.
# This should just be the rows where there is a nonzero residual.
# But then we would not necessarily have a column singleton in every row.
# This makes it difficult to find an initial basis.
if x0 is None:
nonzero_constraints = np.arange(m)
else:
nonzero_constraints = np.where(r > tol)[0]
# these are (at least some of) the initial basis columns
basis = np.where(np.abs(x) > tol)[0]
if len(nonzero_constraints) == 0 and len(basis) <= m: # already a BFS
c = np.zeros(n)
basis = _get_more_basis_columns(A, basis)
return A, b, c, basis, x, status
elif (len(nonzero_constraints) > m - len(basis) or
np.any(x < 0)): # can't get trivial BFS
c = np.zeros(n)
status = 6
return A, b, c, basis, x, status
# chooses existing columns appropriate for inclusion in initial basis
cols, rows = _select_singleton_columns(A, r)
# find the rows we need to zero that we _can_ zero with column singletons
i_tofix = np.isin(rows, nonzero_constraints)
# these columns can't already be in the basis, though
# we are going to add them to the basis and change the corresponding x val
i_notinbasis = np.logical_not(np.isin(cols, basis))
i_fix_without_aux = np.logical_and(i_tofix, i_notinbasis)
rows = rows[i_fix_without_aux]
cols = cols[i_fix_without_aux]
# indices of the rows we can only zero with auxiliary variable
# these rows will get a one in each auxiliary column
arows = nonzero_constraints[np.logical_not(
np.isin(nonzero_constraints, rows))]
n_aux = len(arows)
acols = n + np.arange(n_aux) # indices of auxiliary columns
basis_ng = np.concatenate((cols, acols)) # basis columns not from guess
basis_ng_rows = np.concatenate((rows, arows)) # rows we need to zero
# add auxiliary singleton columns
A = np.hstack((A, np.zeros((m, n_aux))))
A[arows, acols] = 1
# generate initial BFS
x = np.concatenate((x, np.zeros(n_aux)))
x[basis_ng] = r[basis_ng_rows]/A[basis_ng_rows, basis_ng]
# generate costs to minimize infeasibility
c = np.zeros(n_aux + n)
c[acols] = 1
# basis columns correspond with nonzeros in guess, those with column
# singletons we used to zero remaining constraints, and any additional
# columns to get a full set (m columns)
basis = np.concatenate((basis, basis_ng))
basis = _get_more_basis_columns(A, basis) # add columns as needed
return A, b, c, basis, x, status
def _select_singleton_columns(A, b):
"""
Finds singleton columns for which the singleton entry is of the same sign
as the right-hand side; these columns are eligible for inclusion in an
initial basis. Determines the rows in which the singleton entries are
located. For each of these rows, returns the indices of the one singleton
column and its corresponding row.
"""
# find indices of all singleton columns and corresponding row indicies
column_indices = np.nonzero(np.sum(np.abs(A) != 0, axis=0) == 1)[0]
columns = A[:, column_indices] # array of singleton columns
row_indices = np.zeros(len(column_indices), dtype=int)
nonzero_rows, nonzero_columns = np.nonzero(columns)
row_indices[nonzero_columns] = nonzero_rows # corresponding row indicies
# keep only singletons with entries that have same sign as RHS
# this is necessary because all elements of BFS must be non-negative
same_sign = A[row_indices, column_indices]*b[row_indices] >= 0
column_indices = column_indices[same_sign][::-1]
row_indices = row_indices[same_sign][::-1]
# Reversing the order so that steps below select rightmost columns
# for initial basis, which will tend to be slack variables. (If the
# guess corresponds with a basic feasible solution but a constraint
# is not satisfied with the corresponding slack variable zero, the slack
# variable must be basic.)
# for each row, keep rightmost singleton column with an entry in that row
unique_row_indices, first_columns = np.unique(row_indices,
return_index=True)
return column_indices[first_columns], unique_row_indices
def _find_nonzero_rows(A, tol):
"""
Returns logical array indicating the locations of rows with at least
one nonzero element.
"""
return np.any(np.abs(A) > tol, axis=1)
def _select_enter_pivot(c_hat, bl, a, rule="bland", tol=1e-12):
"""
Selects a pivot to enter the basis. Currently Bland's rule - the smallest
index that has a negative reduced cost - is the default.
"""
if rule.lower() == "mrc": # index with minimum reduced cost
return a[~bl][np.argmin(c_hat)]
else: # smallest index w/ negative reduced cost
return a[~bl][c_hat < -tol][0]
def _display_iter(phase, iteration, slack, con, fun):
"""
Print indicators of optimization status to the console.
"""
header = True if not iteration % 20 else False
if header:
print("Phase",
"Iteration",
"Minimum Slack ",
"Constraint Residual",
"Objective ")
# :<X.Y left aligns Y digits in X digit spaces
fmt = '{0:<6}{1:<10}{2:<20.13}{3:<20.13}{4:<20.13}'
try:
slack = np.min(slack)
except ValueError:
slack = "NA"
print(fmt.format(phase, iteration, slack, np.linalg.norm(con), fun))
def _phase_two(c, A, x, b, callback, postsolve_args, maxiter, tol, disp,
maxupdate, mast, pivot, iteration=0, phase_one_n=None):
"""
The heart of the simplex method. Beginning with a basic feasible solution,
moves to adjacent basic feasible solutions successively lower reduced cost.
Terminates when there are no basic feasible solutions with lower reduced
cost or if the problem is determined to be unbounded.
This implementation follows the revised simplex method based on LU
decomposition. Rather than maintaining a tableau or an inverse of the
basis matrix, we keep a factorization of the basis matrix that allows
efficient solution of linear systems while avoiding stability issues
associated with inverted matrices.
"""
m, n = A.shape
status = 0
a = np.arange(n) # indices of columns of A
ab = np.arange(m) # indices of columns of B
if maxupdate:
# basis matrix factorization object; similar to B = A[:, b]
B = BGLU(A, b, maxupdate, mast)
else:
B = LU(A, b)
for iteration in range(iteration, iteration + maxiter):
if disp or callback is not None:
if phase_one_n is not None:
phase = 1
x_postsolve = x[:phase_one_n]
else:
phase = 2
x_postsolve = x
x_o, fun, slack, con = _postsolve(x_postsolve,
postsolve_args)
if callback is not None:
res = OptimizeResult({'x': x_o, 'fun': fun, 'slack': slack,
'con': con, 'nit': iteration,
'phase': phase, 'complete': False,
'status': 0, 'message': "",
'success': False})
callback(res)
else:
_display_iter(phase, iteration, slack, con, fun)
bl = np.zeros(len(a), dtype=bool)
bl[b] = 1
xb = x[b] # basic variables
cb = c[b] # basic costs
try:
v = B.solve(cb, transposed=True) # similar to v = solve(B.T, cb)
except LinAlgError:
status = 4
break
# TODO: cythonize?
c_hat = c - v.dot(A) # reduced cost
c_hat = c_hat[~bl]
# Above is much faster than:
# N = A[:, ~bl] # slow!
# c_hat = c[~bl] - v.T.dot(N)
# Can we perform the multiplication only on the nonbasic columns?
if np.all(c_hat >= -tol): # all reduced costs positive -> terminate
break
j = _select_enter_pivot(c_hat, bl, a, rule=pivot, tol=tol)
u = B.solve(A[:, j]) # similar to u = solve(B, A[:, j])
i = u > tol # if none of the u are positive, unbounded
if not np.any(i):
status = 3
break
th = xb[i]/u[i]
l = np.argmin(th) # implicitly selects smallest subscript
th_star = th[l] # step size
x[b] = x[b] - th_star*u # take step
x[j] = th_star
B.update(ab[i][l], j) # modify basis
b = B.b # similar to b[ab[i][l]] = j
else:
status = 1
return x, b, status, iteration
def _linprog_rs(c, c0, A, b, x0, callback, postsolve_args,
maxiter=5000, tol=1e-12, disp=False,
maxupdate=10, mast=False, pivot="mrc",
**unknown_options):
"""
Solve the following linear programming problem via a two-phase
revised simplex algorithm.::
minimize: c @ x
subject to: A @ x == b
0 <= x < oo
Parameters
----------
c : 1-D array
Coefficients of the linear objective function to be minimized.
c0 : float
Constant term in objective function due to fixed (and eliminated)
variables. (Currently unused.)
A : 2-D array
2-D array which, when matrix-multiplied by ``x``, gives the values of
the equality constraints at ``x``.
b : 1-D array
1-D array of values representing the RHS of each equality constraint
(row) in ``A_eq``.
x0 : 1-D array, optional
Starting values of the independent variables, which will be refined by
the optimization algorithm. For the revised simplex method, these must
correspond with a basic feasible solution.
callback : callable, optional
If a callback function is provided, it will be called within each
iteration of the algorithm. The callback function must accept a single
`scipy.optimize.OptimizeResult` consisting of the following fields:
x : 1-D array
Current solution vector.
fun : float
Current value of the objective function ``c @ x``.
success : bool
True only when an algorithm has completed successfully,
so this is always False as the callback function is called
only while the algorithm is still iterating.
slack : 1-D array
The values of the slack variables. Each slack variable
corresponds to an inequality constraint. If the slack is zero,
the corresponding constraint is active.
con : 1-D array
The (nominally zero) residuals of the equality constraints,
that is, ``b - A_eq @ x``.
phase : int
The phase of the algorithm being executed.
status : int
For revised simplex, this is always 0 because if a different
status is detected, the algorithm terminates.
nit : int
The number of iterations performed.
message : str
A string descriptor of the exit status of the optimization.
postsolve_args : tuple
Data needed by _postsolve to convert the solution to the standard-form
problem into the solution to the original problem.
Options
-------
maxiter : int
The maximum number of iterations to perform in either phase.
tol : float
The tolerance which determines when a solution is "close enough" to
zero in Phase 1 to be considered a basic feasible solution or close
enough to positive to serve as an optimal solution.
disp : bool
Set to ``True`` if indicators of optimization status are to be printed
to the console each iteration.
maxupdate : int
The maximum number of updates performed on the LU factorization.
After this many updates is reached, the basis matrix is factorized
from scratch.
mast : bool
Minimize Amortized Solve Time. If enabled, the average time to solve
a linear system using the basis factorization is measured. Typically,
the average solve time will decrease with each successive solve after
initial factorization, as factorization takes much more time than the
solve operation (and updates). Eventually, however, the updated
factorization becomes sufficiently complex that the average solve time
begins to increase. When this is detected, the basis is refactorized
from scratch. Enable this option to maximize speed at the risk of
nondeterministic behavior. Ignored if ``maxupdate`` is 0.
pivot : "mrc" or "bland"
Pivot rule: Minimum Reduced Cost (default) or Bland's rule. Choose
Bland's rule if iteration limit is reached and cycling is suspected.
unknown_options : dict
Optional arguments not used by this particular solver. If
`unknown_options` is non-empty a warning is issued listing all
unused options.
Returns
-------
x : 1-D array
Solution vector.
status : int
An integer representing the exit status of the optimization::
0 : Optimization terminated successfully
1 : Iteration limit reached
2 : Problem appears to be infeasible
3 : Problem appears to be unbounded
4 : Numerical difficulties encountered
5 : No constraints; turn presolve on
6 : Guess x0 cannot be converted to a basic feasible solution
message : str
A string descriptor of the exit status of the optimization.
iteration : int
The number of iterations taken to solve the problem.
"""
_check_unknown_options(unknown_options)
messages = ["Optimization terminated successfully.",
"Iteration limit reached.",
"The problem appears infeasible, as the phase one auxiliary "
"problem terminated successfully with a residual of {0:.1e}, "
"greater than the tolerance {1} required for the solution to "
"be considered feasible. Consider increasing the tolerance to "
"be greater than {0:.1e}. If this tolerance is unnaceptably "
"large, the problem is likely infeasible.",
"The problem is unbounded, as the simplex algorithm found "
"a basic feasible solution from which there is a direction "
"with negative reduced cost in which all decision variables "
"increase.",
"Numerical difficulties encountered; consider trying "
"method='interior-point'.",
"Problems with no constraints are trivially solved; please "
"turn presolve on.",
"The guess x0 cannot be converted to a basic feasible "
"solution. "
]
if A.size == 0: # address test_unbounded_below_no_presolve_corrected
return np.zeros(c.shape), 5, messages[5], 0
x, basis, A, b, residual, status, iteration = (
_phase_one(A, b, x0, callback, postsolve_args,
maxiter, tol, disp, maxupdate, mast, pivot))
if status == 0:
x, basis, status, iteration = _phase_two(c, A, x, basis, callback,
postsolve_args,
maxiter, tol, disp,
maxupdate, mast, pivot,
iteration)
return x, status, messages[status].format(residual, tol), iteration
|
{"hexsha": "7c535e2d792fe45341e84fbf086cc7e6c64a2fc1", "size": 22622, "ext": "py", "lang": "Python", "max_stars_repo_path": "scipy/optimize/_linprog_rs.py", "max_stars_repo_name": "Corallus-Caninus/scipy", "max_stars_repo_head_hexsha": "c734dacd61c5962a86ab3cc4bf2891fc94b720a6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-25T08:49:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-25T08:49:10.000Z", "max_issues_repo_path": "scipy/optimize/_linprog_rs.py", "max_issues_repo_name": "Corallus-Caninus/scipy", "max_issues_repo_head_hexsha": "c734dacd61c5962a86ab3cc4bf2891fc94b720a6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-09-01T01:19:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-11T01:06:05.000Z", "max_forks_repo_path": "scipy/optimize/_linprog_rs.py", "max_forks_repo_name": "Corallus-Caninus/scipy", "max_forks_repo_head_hexsha": "c734dacd61c5962a86ab3cc4bf2891fc94b720a6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-19T12:49:26.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-19T12:49:26.000Z", "avg_line_length": 40.6140035907, "max_line_length": 79, "alphanum_fraction": 0.6224913801, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 5209}
|
import pytest
import numpy as np
def test_ndarray():
from rfweblab.serialize import pack_ndarray, unpack_ndarray
from rfweblab.serialize import dtype_to_fmt
arr = np.random.rand(10) - 0.5
for dtype in dtype_to_fmt:
if not dtype.isbuiltin:
continue
obj = arr.astype(dtype)
enc = pack_ndarray(obj)
dec, pos = unpack_ndarray(enc, dtype, pos=0)
assert np.allclose(dec, obj)
arr = np.random.randn(10, 2, 3, 7) * 10
for dtype in dtype_to_fmt:
if not dtype.isbuiltin:
continue
obj = arr.astype(dtype)
enc = pack_ndarray(obj)
dec, pos = unpack_ndarray(enc, dtype, pos=0)
assert np.allclose(dec, obj)
def test_shape():
from rfweblab.serialize import pack_shape, unpack_shape
with pytest.raises(AssertionError):
pack_shape(257 * [129], fmt="I")
pack_shape(251 * [129], fmt="I")
shape = tuple(range(255))
assert shape == unpack_shape(pack_shape(shape, "I"), 0, "I")[0]
assert shape == unpack_shape(pack_shape(shape, "B"), 0, "B")[0]
def test_fields():
from rfweblab.serialize import pack_fields, unpack_fields
with pytest.raises(AssertionError):
fields = tuple(map(str, range(257)))
pack_fields(fields)
fields = tuple(map(str, range(255)))
assert fields == unpack_fields(pack_fields(fields), 0)[0]
def test_serialization():
from rfweblab.serialize import serialize, deserialize
def sede_helper(obj, pos=0):
res, pos = deserialize(serialize(obj), pos)
return res
assert sede_helper({}) == {}
assert sede_helper([]) == []
obj = "dfsg*N**N ( _URU(U£ª^¨º•˙ ∫ †ƒ#¶ªç€¶¢"
assert sede_helper(obj) == obj
obj = {"foo": "foo", "bar": 12_34_56, "baz": np.euler_gamma}
assert sede_helper(obj) == obj
obj = [["foo", "bar"], ["bar", "baz"]]
assert (sede_helper(obj) == obj).all()
|
{"hexsha": "333d30518f7ceb55aefdf5d94cffdc94c57a4746", "size": 1925, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_serialize.py", "max_stars_repo_name": "ivannz/pyRFWebLab", "max_stars_repo_head_hexsha": "2f0d8edcd7ee2396243ba06f2e0ae7d500b2111c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-09-27T17:30:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T15:19:17.000Z", "max_issues_repo_path": "tests/test_serialize.py", "max_issues_repo_name": "ivannz/pyRFWebLab", "max_issues_repo_head_hexsha": "2f0d8edcd7ee2396243ba06f2e0ae7d500b2111c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_serialize.py", "max_forks_repo_name": "ivannz/pyRFWebLab", "max_forks_repo_head_hexsha": "2f0d8edcd7ee2396243ba06f2e0ae7d500b2111c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-03T10:42:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-03T10:42:11.000Z", "avg_line_length": 27.1126760563, "max_line_length": 67, "alphanum_fraction": 0.6223376623, "include": true, "reason": "import numpy", "num_tokens": 537}
|
import numpy as np
class NMF:
"""
NMFの計算,値を保持する.
Attributes
----------
W, H: numpy.ndarray
計算で使用する行列
loss_LOG: list
目的関数の計算結果のログ
epsilon: float
ゼロ除算回避用の微小な値
"""
W = None
H = None
loss_LOG = None
# probdist_V = None
epsilon = None
def __init__(self, epsilon=1e-7, seed=None):
"""
Parameters
----------
epsilon: float
ゼロ除算回避用の微小な値
seed: int
乱数シード値
"""
self.epsilon = epsilon
np.random.seed(seed)
def calc(self, V, r, iteration=500, save=False, use_cache=False, filename=None):
"""
NMF計算
Parameters
----------
V: numpy.ndarray
オリジナルのデータ
r: int
基底数
iteration: int
繰り返し回数
save: boolean
計算後のデータを保存するかのフラグ
use_cache: boolean
前の結果を使うかのフラグ
filename: str
保存するときのファイル名文字列
"""
if use_cache:
self.load_data()
return
if (r <= 0):
print("NMF calc error!!")
print("r must be greater than zero.")
exit()
# self.probdist_V = self.prob_dist(V)
self.W = np.random.rand(V.shape[0], r)
self.H = np.random.rand(r, V.shape[1])
self.loss_LOG = []
for i in range(iteration):
self.update(V)
loss = self.kl_divergence(V)
# loss = self.frobenius_norm(V)
self.loss_LOG.append(loss)
if save:
self.save_data()
def update(self, V):
"""
NMF更新
Lee & Seung アルゴリズム
Parameters
----------
V: numpy.adarray
オリジナルのデータ
"""
self.W = self.W * np.dot(V / (np.dot(self.W, self.H) + self.epsilon), self.H.T)
self.W = self.W / np.tile(np.sum(self.W, axis=0), (self.W.shape[0], 1))
self.H = self.H * np.dot(self.W.T, (V / (np.dot(self.W, self.H) + self.epsilon)))
def kl_divergence(self, V):
"""
KL-divergenceを計算する.
Parameters
----------
V: numpy.adarray
オリジナルのデータ
Returns
----------
F: float
KL-divergence
"""
WH = np.dot(self.W, self.H) + self.epsilon
F = np.sum(np.multiply(V, np.log(WH)) - WH)
# F = np.sum(np.multiply(V, np.log(WH)) - WH) / V.shape[1]
# F = np.sum(np.multiply(V, np.log(WH)) - WH) / (V.shape[0] * V.shape[1])
return F
def frobenius_norm(self, V):
"""
frobeniusノルムを計算する.
Parameters
----------
V: numpy.adarray
オリジナルのデータ
Returns
----------
F: float
frobeniusノルム
"""
WH = np.dot(self.W, self.H) + self.epsilon
F = np.linalg.norm(V - WH)
return F
def prob_dist(self, V):
"""
行列をaxis=0の方向で確率分布の行列にする.
Parameters
----------
V: numpy.adarray
行列
Returns
----------
numpy.adarray
確率分布になった行列
"""
return V / np.tile(np.sum(V, axis=0), (V.shape[0], 1))
def save_data(self, filename=None):
"""
計算後のW,H,lossのログを保存する.
連続して実験するときに使う.
Parameters
----------
filename: str
ファイル名文字列
"""
if filename is None:
fn = 'tmp'
else:
fn = str(filename)
np.savez(fn, W=self.W, H=self.H, loss=np.array(self.loss_LOG))
def load_data(self, filename=None):
"""
計算後のW,H,lossのログを読み込む.
連続して実験するときに使う.
Parameters
----------
filename: str
ファイル名文字列
"""
if filename is None:
fn = 'tmp'
else:
fn = str(filename)
load_array = np.load(fn + '.npz')
self.W = load_array['W']
self.H = load_array['H']
self.loss_LOG = load_array['loss'].tolist()
|
{"hexsha": "92f58d61681f301e8c459ff407f23529b74f0b78", "size": 4063, "ext": "py", "lang": "Python", "max_stars_repo_path": "nmf/modules/nmf.py", "max_stars_repo_name": "nssuperx/irl334-research-srcs", "max_stars_repo_head_hexsha": "7b1cf0ca16541613740e22aad373c5169abd246e", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nmf/modules/nmf.py", "max_issues_repo_name": "nssuperx/irl334-research-srcs", "max_issues_repo_head_hexsha": "7b1cf0ca16541613740e22aad373c5169abd246e", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nmf/modules/nmf.py", "max_forks_repo_name": "nssuperx/irl334-research-srcs", "max_forks_repo_head_hexsha": "7b1cf0ca16541613740e22aad373c5169abd246e", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3241758242, "max_line_length": 89, "alphanum_fraction": 0.4568053163, "include": true, "reason": "import numpy", "num_tokens": 1240}
|
/*
* Copyright 2019 GridGain Systems, Inc. and Contributors.
*
* Licensed under the GridGain Community Edition License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <boost/test/unit_test.hpp>
#include "sql_test_suite_fixture.h"
using namespace ignite;
using namespace boost::unit_test;
BOOST_FIXTURE_TEST_SUITE(SqlSystemFunctionTestSuite, ignite::SqlTestSuiteFixture)
BOOST_AUTO_TEST_CASE(TestSystemFunctionDatabase)
{
CheckSingleResult<std::string>("SELECT {fn DATABASE()}");
}
BOOST_AUTO_TEST_CASE(TestSystemFunctionUser)
{
CheckSingleResult<std::string>("SELECT {fn USER()}");
}
BOOST_AUTO_TEST_CASE(TestSystemFunctionIfnull)
{
CheckSingleResult<SQLINTEGER>("SELECT {fn IFNULL(NULL, 42)}", 42);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "aff1903591ebf866ece0ce3005b855d48a07317c", "size": 1277, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "modules/platforms/cpp/odbc-test/src/sql_system_functions_test.cpp", "max_stars_repo_name": "FedorUporov/gridgain", "max_stars_repo_head_hexsha": "883125f943743fa8198d88be98dfe61bde86ad96", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 218.0, "max_stars_repo_stars_event_min_datetime": "2015-01-04T13:20:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T05:28:55.000Z", "max_issues_repo_path": "modules/platforms/cpp/odbc-test/src/sql_system_functions_test.cpp", "max_issues_repo_name": "FedorUporov/gridgain", "max_issues_repo_head_hexsha": "883125f943743fa8198d88be98dfe61bde86ad96", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 175.0, "max_issues_repo_issues_event_min_datetime": "2015-02-04T23:16:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T18:34:24.000Z", "max_forks_repo_path": "modules/platforms/cpp/odbc-test/src/sql_system_functions_test.cpp", "max_forks_repo_name": "FedorUporov/gridgain", "max_forks_repo_head_hexsha": "883125f943743fa8198d88be98dfe61bde86ad96", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 93.0, "max_forks_repo_forks_event_min_datetime": "2015-01-06T20:54:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:09:00.000Z", "avg_line_length": 29.6976744186, "max_line_length": 102, "alphanum_fraction": 0.7729052467, "num_tokens": 274}
|
#include <sstream>
#include <array>
#include "bw64/bw64.hpp"
#define BOOST_TEST_MODULE ChunkTests
#include <boost/test/included/unit_test.hpp>
using namespace bw64;
BOOST_AUTO_TEST_CASE(rect_16bit) {
Bw64Reader bw64File("testfiles/rect_16bit.wav");
BOOST_TEST(bw64File.bitDepth() == 16);
BOOST_TEST(bw64File.sampleRate() == 44100);
BOOST_TEST(bw64File.channels() == 2);
BOOST_TEST(bw64File.numberOfFrames() == 22050);
}
BOOST_AUTO_TEST_CASE(rect_24bit) {
Bw64Reader bw64File("testfiles/rect_24bit.wav");
BOOST_TEST(bw64File.bitDepth() == 24);
BOOST_TEST(bw64File.sampleRate() == 44100);
BOOST_TEST(bw64File.channels() == 2);
BOOST_TEST(bw64File.numberOfFrames() == 22050);
}
BOOST_AUTO_TEST_CASE(rect_32bit) {
Bw64Reader bw64File("testfiles/rect_32bit.wav");
BOOST_TEST(bw64File.bitDepth() == 32);
BOOST_TEST(bw64File.sampleRate() == 44100);
BOOST_TEST(bw64File.channels() == 2);
BOOST_TEST(bw64File.numberOfFrames() == 22050);
}
BOOST_AUTO_TEST_CASE(rect_24bit_rf64) {
Bw64Reader bw64File("testfiles/rect_24bit_rf64.wav");
BOOST_TEST(bw64File.bitDepth() == 24);
BOOST_TEST(bw64File.sampleRate() == 44100);
BOOST_TEST(bw64File.channels() == 2);
BOOST_TEST(bw64File.numberOfFrames() == 22050);
}
BOOST_AUTO_TEST_CASE(rect_24bit_noriff) {
BOOST_CHECK_THROW(Bw64Reader("testfiles/rect_24bit_noriff.wav"),
std::runtime_error);
}
BOOST_AUTO_TEST_CASE(rect_24bit_nowave) {
BOOST_CHECK_THROW(Bw64Reader("testfiles/rect_24bit_nowave.wav"),
std::runtime_error);
}
BOOST_AUTO_TEST_CASE(rect_24bit_wrong_fmt_size) {
BOOST_CHECK_THROW(Bw64Reader("testfiles/rect_24bit_wrong_fmt_size.wav"),
std::runtime_error);
}
BOOST_AUTO_TEST_CASE(noise_24bit_uneven_data_chunk_size) {
Bw64Reader bw64File("testfiles/noise_24bit_uneven_data_chunk_size.wav");
BOOST_TEST(bw64File.bitDepth() == 24);
BOOST_TEST(bw64File.sampleRate() == 44100);
BOOST_TEST(bw64File.channels() == 1);
BOOST_TEST(bw64File.numberOfFrames() == 13);
BOOST_CHECK(bw64File.chnaChunk() != nullptr);
}
BOOST_AUTO_TEST_CASE(can_read_all_frames) {
Bw64Reader bw64File("testfiles/noise_24bit_uneven_data_chunk_size.wav");
BOOST_TEST(bw64File.numberOfFrames() == 13);
std::array<float, 13> readBuffer;
int readSamples = bw64File.read(readBuffer.data(), 13);
BOOST_TEST(readSamples == 13);
}
|
{"hexsha": "de1ea1de0686c8dfed70542ee5f15345cb447363", "size": 2371, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/file_tests.cpp", "max_stars_repo_name": "rsjtaylor/libbw64", "max_stars_repo_head_hexsha": "487efc9cc421e93128f775597a77f8596eddd586", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/file_tests.cpp", "max_issues_repo_name": "rsjtaylor/libbw64", "max_issues_repo_head_hexsha": "487efc9cc421e93128f775597a77f8596eddd586", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/file_tests.cpp", "max_forks_repo_name": "rsjtaylor/libbw64", "max_forks_repo_head_hexsha": "487efc9cc421e93128f775597a77f8596eddd586", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4794520548, "max_line_length": 74, "alphanum_fraction": 0.7448334036, "num_tokens": 707}
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
data = pd.read_csv('../ECGFiveDays_TRAIN', sep=',', header=None)
label = data.pop(data.columns[0])
def plot_motif(Ta, Tb, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8,4))
plt.subplot(211)
plt.plot(Ta, linestyle='--', alpha=0.5)
plt.xlim((0, len(Ta)))
print(np.argmax(values))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g', label='Top Motif')
plt.plot(range(np.argmax(values), np.argmax(values) + m), Ta[np.argmax(values):np.argmax(values) + m], c='r', label='Top Discord')
plt.legend(loc='best')
plt.title('Time-Series')
plt.subplot(212)
plt.title('Matrix Profile')
plt.plot(range(0, len(values)), values, '#ff5722')
plt.plot(np.argmax(values), np.max(values), marker='x', c='r', ms=10)
plt.plot(np.argmin(values), np.min(values), marker='^', c='g', ms=10)
plt.xlim((0, len(Ta)))
plt.xlabel('Index')
plt.ylabel('Value')
plt.show()
print(data.head())
ts = data.iloc[:10].values.flatten() # pick a random sample from class 0
from owlpy.core import *
Pab, Iab = stamp(ts,ts,100) # run the STAMP algorithm to compute the Matrix Profile
plot_motif(ts,ts,Pab,Iab,100)
|
{"hexsha": "a9436286290799560b5be585eae342860d99921e", "size": 1346, "ext": "py", "lang": "Python", "max_stars_repo_path": "owlpy/test_motif_discovery.py", "max_stars_repo_name": "dschowta/owlpy", "max_stars_repo_head_hexsha": "c614aa89ae55256727db65867915126da585aa3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2017-06-30T13:07:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T21:29:08.000Z", "max_issues_repo_path": "owlpy/test_motif_discovery.py", "max_issues_repo_name": "dschowta/owlpy", "max_issues_repo_head_hexsha": "c614aa89ae55256727db65867915126da585aa3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-03-26T11:27:24.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-26T11:27:24.000Z", "max_forks_repo_path": "owlpy/test_motif_discovery.py", "max_forks_repo_name": "dschowta/owlpy", "max_forks_repo_head_hexsha": "c614aa89ae55256727db65867915126da585aa3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2017-11-15T06:12:52.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-11T22:45:21.000Z", "avg_line_length": 32.8292682927, "max_line_length": 134, "alphanum_fraction": 0.6374442793, "include": true, "reason": "import numpy", "num_tokens": 378}
|
[STATEMENT]
lemma set_partition_by_median:
"(l, m, r) = partition_by_median k ps \<Longrightarrow> set ps = set l \<union> set r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (l, m, r) = partition_by_median k ps \<Longrightarrow> set ps = set l \<union> set r
[PROOF STEP]
unfolding partition_by_median_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (l, m, r) = (let m = axis_median k ps; (l, r) = partition (\<lambda>p. p $ k \<le> m) ps in (l, m, r)) \<Longrightarrow> set ps = set l \<union> set r
[PROOF STEP]
by (auto simp: Let_def)
|
{"llama_tokens": 223, "file": "KD_Tree_Build", "length": 2}
|
module f_python
use f_precisions, only: f_address
implicit none
!> Equivalent type than the numpy one, to be able
! to export a Fortran array into Python space.
type ndarray
integer(f_address) :: data
integer :: ndims
integer, dimension(7) :: shapes
character(len = 2) :: kind
end type ndarray
interface toNdArray
module procedure i0_to_ndarray, i1_to_ndarray, i2_to_ndarray
module procedure f0_to_ndarray, f1_to_ndarray, f2_to_ndarray
end interface toNdArray
interface toNdArray_ptr
module procedure pi0_to_ndarray, pi1_to_ndarray, pi2_to_ndarray
module procedure pf0_to_ndarray, pf1_to_ndarray, pf2_to_ndarray
end interface toNdArray_ptr
interface
subroutine f_python_initialize(iproc, nproc, igroup, ngroup)
implicit none
integer, intent(in) :: iproc, nproc, igroup, ngroup
end subroutine f_python_initialize
end interface
interface
subroutine f_python_finalize()
implicit none
end subroutine f_python_finalize
end interface
interface
subroutine f_python_execute_dict(dict, status)
use dictionaries, only: dictionary
implicit none
type(dictionary), pointer :: dict
integer, intent(out) :: status
end subroutine f_python_execute_dict
end interface
interface
subroutine f_python_execute(script, status)
implicit none
character(len = *), intent(in) :: script
integer, intent(out) :: status
end subroutine f_python_execute
end interface
contains
subroutine ndarray_new(ptr, obj)
use f_precisions, only: f_loc
implicit none
type(ndarray), pointer :: ptr
integer(f_address), intent(out) :: obj
allocate(ptr)
ptr = ndarray_null()
obj = f_loc(ptr)
end subroutine ndarray_new
subroutine ndarray_free(ptr)
implicit none
type(ndarray), pointer :: ptr
deallocate(ptr)
end subroutine ndarray_free
function ndarray_null() result(arr)
type(ndarray) :: arr
arr%data = int(0, f_address)
arr%ndims = 0
end function ndarray_null
function i0_to_ndarray(data) result(arr)
use f_precisions, only: f_loc
implicit none
integer, intent(in) :: data
type(ndarray) :: arr
arr%data = f_loc(data)
arr%ndims = 0
!arr%shapes = shape(data)
arr%kind = "i4"
end function i0_to_ndarray
function i1_to_ndarray(data) result(arr)
use f_precisions, only: f_loc
implicit none
integer, dimension(:), intent(in) :: data
type(ndarray) :: arr
arr%data = f_loc(data(1))
arr%ndims = 1
arr%shapes(1:1) = shape(data)
arr%kind = "i4"
end function i1_to_ndarray
function i2_to_ndarray(data) result(arr)
use f_precisions, only: f_loc
implicit none
integer, dimension(:, :), intent(in) :: data
type(ndarray) :: arr
arr%data = f_loc(data(1,1))
arr%ndims = 2
arr%shapes(1:2) = shape(data)
arr%kind = "i4"
end function i2_to_ndarray
function f0_to_ndarray(data) result(arr)
use f_precisions, only: f_loc
implicit none
double precision, intent(in) :: data
type(ndarray) :: arr
arr%data = f_loc(data)
arr%ndims = 0
!arr%shapes = shape(data)
arr%kind = "f8"
end function f0_to_ndarray
function f1_to_ndarray(data) result(arr)
use f_precisions, only: f_loc
implicit none
double precision, dimension(:), intent(in) :: data
type(ndarray) :: arr
arr%data = f_loc(data(1))
arr%ndims = 1
arr%shapes(1:1) = shape(data)
arr%kind = "f8"
end function f1_to_ndarray
function f2_to_ndarray(data) result(arr)
use f_precisions, only: f_loc
implicit none
double precision, dimension(:, :), intent(in) :: data
type(ndarray) :: arr
arr%data = f_loc(data(1,1))
arr%ndims = 2
arr%shapes(1:2) = shape(data)
arr%kind = "f8"
end function f2_to_ndarray
function pi0_to_ndarray(data) result(arr)
implicit none
integer, pointer :: data
type(ndarray) :: arr
if (associated(data)) then
arr = i0_to_ndarray(data)
else
arr = ndarray_null()
end if
end function pi0_to_ndarray
function pi1_to_ndarray(data) result(arr)
implicit none
integer, dimension(:), pointer :: data
type(ndarray) :: arr
if (associated(data)) then
arr = i1_to_ndarray(data)
else
arr = ndarray_null()
end if
end function pi1_to_ndarray
function pi2_to_ndarray(data) result(arr)
implicit none
integer, dimension(:, :), pointer :: data
type(ndarray) :: arr
if (associated(data)) then
arr = i2_to_ndarray(data)
else
arr = ndarray_null()
end if
end function pi2_to_ndarray
function pf0_to_ndarray(data) result(arr)
implicit none
double precision, pointer :: data
type(ndarray) :: arr
if (associated(data)) then
arr = f0_to_ndarray(data)
else
arr = ndarray_null()
end if
end function pf0_to_ndarray
function pf1_to_ndarray(data) result(arr)
implicit none
double precision, dimension(:), pointer :: data
type(ndarray) :: arr
if (associated(data)) then
arr = f1_to_ndarray(data)
else
arr = ndarray_null()
end if
end function pf1_to_ndarray
function pf2_to_ndarray(data) result(arr)
implicit none
double precision, dimension(:, :), pointer :: data
type(ndarray) :: arr
if (associated(data)) then
arr = f2_to_ndarray(data)
else
arr = ndarray_null()
end if
end function pf2_to_ndarray
end module f_python
subroutine f_python_ndarray_init()
use f_python
call f_object_add_method("class", "ndarray_new", ndarray_new, 1)
call f_object_add_method("class", "ndarray_free", ndarray_free, 0)
end subroutine f_python_ndarray_init
subroutine f_python_ndarray_get(arr, data, ndims, shapes, kind)
use f_python, only: ndarray
use f_precisions, only: f_address
implicit none
type(ndarray), intent(in) :: arr
integer(f_address), intent(out) :: data
integer, intent(out) :: ndims
integer, dimension(7), intent(out) :: shapes
character(len = 2), intent(out) :: kind
data = arr%data
ndims = arr%ndims
shapes = arr%shapes
kind = arr%kind
end subroutine f_python_ndarray_get
|
{"hexsha": "f77326f02b36daa4f53b2b7fb3de31916215f2fe", "size": 6252, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "aiida_bigdft/futile/flib/fpython.f90", "max_stars_repo_name": "adegomme/aiida-bigdft-plugin", "max_stars_repo_head_hexsha": "dfd17f166a8cd547d3e581c7c3c9f4eb32bd2aab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-10T02:45:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-05T18:55:05.000Z", "max_issues_repo_path": "aiida_bigdft/futile/flib/fpython.f90", "max_issues_repo_name": "mikiec84/aiida-bigdft-plugin", "max_issues_repo_head_hexsha": "ce6ddc69def97977fe0209861ea7f1637090b60f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-12-15T19:35:34.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-07T15:32:18.000Z", "max_forks_repo_path": "aiida_bigdft/futile/flib/fpython.f90", "max_forks_repo_name": "mikiec84/aiida-bigdft-plugin", "max_forks_repo_head_hexsha": "ce6ddc69def97977fe0209861ea7f1637090b60f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-05T18:55:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-05T18:55:21.000Z", "avg_line_length": 24.9083665339, "max_line_length": 68, "alphanum_fraction": 0.6748240563, "num_tokens": 1748}
|
using Distributions, StatsBase
using JLD, PyPlot
for set in ["set1", "set2", "set3", "set4"]
file = jldopen("res/power_$(set).jld", "r")
power = read(file, "power")
close(file)
fig = figure(figsize=(3.14961, 3.14961), dpi=1000)
scatter(-.75:.05:.75, power[1, :], s=5, marker="o")
scatter(-.75:.05:.75, power[2, :], s=5, marker="v")
minorticks_on()
grid()
grid(which="minor", ls="dotted", lw=".5")
xlabel("δ", size="xx-small")
ylabel("proportion of rejections", size="xx-small")
ylim(0, 1.05)
tick_params("both", labelsize="xx-small")
tight_layout()
savefig("res/exper2_$(set).pdf")
close(fig)
end
|
{"hexsha": "5a6bee7e88d4ebb3f76256344e4be0dd76a3c019", "size": 671, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "exper/exper2/plot_res.jl", "max_stars_repo_name": "mlakolar/KLIEPInference.jl", "max_stars_repo_head_hexsha": "56b2979791b1b6d4e0c1f2f66ed27dfe86aa52e8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-05T11:01:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-05T11:01:01.000Z", "max_issues_repo_path": "exper/exper2/plot_res.jl", "max_issues_repo_name": "mlakolar/KLIEPInference.jl", "max_issues_repo_head_hexsha": "56b2979791b1b6d4e0c1f2f66ed27dfe86aa52e8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-29T18:44:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-29T18:44:07.000Z", "max_forks_repo_path": "exper/exper2/plot_res.jl", "max_forks_repo_name": "mlakolar/KLIEPInference.jl", "max_forks_repo_head_hexsha": "56b2979791b1b6d4e0c1f2f66ed27dfe86aa52e8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6451612903, "max_line_length": 55, "alphanum_fraction": 0.5871833085, "num_tokens": 227}
|
header {* \isaheader{SDG} *}
theory SDG imports CFGExit_wf Postdomination begin
subsection {* The nodes of the SDG *}
datatype 'node SDG_node =
CFG_node 'node
| Formal_in "'node \<times> nat"
| Formal_out "'node \<times> nat"
| Actual_in "'node \<times> nat"
| Actual_out "'node \<times> nat"
fun parent_node :: "'node SDG_node \<Rightarrow> 'node"
where "parent_node (CFG_node n) = n"
| "parent_node (Formal_in (m,x)) = m"
| "parent_node (Formal_out (m,x)) = m"
| "parent_node (Actual_in (m,x)) = m"
| "parent_node (Actual_out (m,x)) = m"
locale SDG = CFGExit_wf sourcenode targetnode kind valid_edge Entry
get_proc get_return_edges procs Main Exit Def Use ParamDefs ParamUses +
Postdomination sourcenode targetnode kind valid_edge Entry
get_proc get_return_edges procs Main Exit
for sourcenode :: "'edge \<Rightarrow> 'node" and targetnode :: "'edge \<Rightarrow> 'node"
and kind :: "'edge \<Rightarrow> ('var,'val,'ret,'pname) edge_kind"
and valid_edge :: "'edge \<Rightarrow> bool"
and Entry :: "'node" ("'('_Entry'_')") and get_proc :: "'node \<Rightarrow> 'pname"
and get_return_edges :: "'edge \<Rightarrow> 'edge set"
and procs :: "('pname \<times> 'var list \<times> 'var list) list" and Main :: "'pname"
and Exit::"'node" ("'('_Exit'_')")
and Def :: "'node \<Rightarrow> 'var set" and Use :: "'node \<Rightarrow> 'var set"
and ParamDefs :: "'node \<Rightarrow> 'var list" and ParamUses :: "'node \<Rightarrow> 'var set list"
begin
fun valid_SDG_node :: "'node SDG_node \<Rightarrow> bool"
where "valid_SDG_node (CFG_node n) \<longleftrightarrow> valid_node n"
| "valid_SDG_node (Formal_in (m,x)) \<longleftrightarrow>
(\<exists>a Q r p fs ins outs. valid_edge a \<and> (kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs) \<and> targetnode a = m \<and>
(p,ins,outs) \<in> set procs \<and> x < length ins)"
| "valid_SDG_node (Formal_out (m,x)) \<longleftrightarrow>
(\<exists>a Q p f ins outs. valid_edge a \<and> (kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f) \<and> sourcenode a = m \<and>
(p,ins,outs) \<in> set procs \<and> x < length outs)"
| "valid_SDG_node (Actual_in (m,x)) \<longleftrightarrow>
(\<exists>a Q r p fs ins outs. valid_edge a \<and> (kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs) \<and> sourcenode a = m \<and>
(p,ins,outs) \<in> set procs \<and> x < length ins)"
| "valid_SDG_node (Actual_out (m,x)) \<longleftrightarrow>
(\<exists>a Q p f ins outs. valid_edge a \<and> (kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f) \<and> targetnode a = m \<and>
(p,ins,outs) \<in> set procs \<and> x < length outs)"
lemma valid_SDG_CFG_node:
"valid_SDG_node n \<Longrightarrow> valid_node (parent_node n)"
by(cases n) auto
lemma Formal_in_parent_det:
assumes "valid_SDG_node (Formal_in (m,x))" and "valid_SDG_node (Formal_in (m',x'))"
and "get_proc m = get_proc m'"
shows "m = m'"
proof -
from `valid_SDG_node (Formal_in (m,x))` obtain a Q r p fs ins outs
where "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "targetnode a = m"
and "(p,ins,outs) \<in> set procs" and "x < length ins" by fastforce
from `valid_SDG_node (Formal_in (m',x'))` obtain a' Q' r' p' f' ins' outs'
where "valid_edge a'" and "kind a' = Q':r'\<hookrightarrow>\<^bsub>p'\<^esub>f'" and "targetnode a' = m'"
and "(p',ins',outs') \<in> set procs" and "x' < length ins'" by fastforce
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `targetnode a = m`
have "get_proc m = p" by(fastforce intro:get_proc_call)
moreover
from `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p'\<^esub>f'` `targetnode a' = m'`
have "get_proc m' = p'" by(fastforce intro:get_proc_call)
ultimately have "p = p'" using `get_proc m = get_proc m'` by simp
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p'\<^esub>f'`
`targetnode a = m` `targetnode a' = m'`
show ?thesis by(fastforce intro:same_proc_call_unique_target)
qed
lemma valid_SDG_node_parent_Entry:
assumes "valid_SDG_node n" and "parent_node n = (_Entry_)"
shows "n = CFG_node (_Entry_)"
proof(cases n)
case CFG_node with `parent_node n = (_Entry_)` show ?thesis by simp
next
case (Formal_in z)
with `parent_node n = (_Entry_)` obtain x
where [simp]:"z = ((_Entry_),x)" by(cases z) auto
with `valid_SDG_node n` Formal_in obtain a where "valid_edge a"
and "targetnode a = (_Entry_)" by auto
hence False by -(rule Entry_target,simp+)
thus ?thesis by simp
next
case (Formal_out z)
with `parent_node n = (_Entry_)` obtain x
where [simp]:"z = ((_Entry_),x)" by(cases z) auto
with `valid_SDG_node n` Formal_out obtain a Q p f where "valid_edge a"
and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f" and "sourcenode a = (_Entry_)" by auto
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` have "get_proc (sourcenode a) = p"
by(rule get_proc_return)
with `sourcenode a = (_Entry_)` have "p = Main"
by(auto simp:get_proc_Entry)
with `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` have False
by(fastforce intro:Main_no_return_source)
thus ?thesis by simp
next
case (Actual_in z)
with `parent_node n = (_Entry_)` obtain x
where [simp]:"z = ((_Entry_),x)" by(cases z) auto
with `valid_SDG_node n` Actual_in obtain a Q r p fs where "valid_edge a"
and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "sourcenode a = (_Entry_)" by fastforce
hence False by -(rule Entry_no_call_source,auto)
thus ?thesis by simp
next
case (Actual_out z)
with `parent_node n = (_Entry_)` obtain x
where [simp]:"z = ((_Entry_),x)" by(cases z) auto
with `valid_SDG_node n` Actual_out obtain a where "valid_edge a"
"targetnode a = (_Entry_)" by auto
hence False by -(rule Entry_target,simp+)
thus ?thesis by simp
qed
lemma valid_SDG_node_parent_Exit:
assumes "valid_SDG_node n" and "parent_node n = (_Exit_)"
shows "n = CFG_node (_Exit_)"
proof(cases n)
case CFG_node with `parent_node n = (_Exit_)` show ?thesis by simp
next
case (Formal_in z)
with `parent_node n = (_Exit_)` obtain x
where [simp]:"z = ((_Exit_),x)" by(cases z) auto
with `valid_SDG_node n` Formal_in obtain a Q r p fs where "valid_edge a"
and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "targetnode a = (_Exit_)" by fastforce
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have "get_proc (targetnode a) = p"
by(rule get_proc_call)
with `targetnode a = (_Exit_)` have "p = Main"
by(auto simp:get_proc_Exit)
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have False
by(fastforce intro:Main_no_call_target)
thus ?thesis by simp
next
case (Formal_out z)
with `parent_node n = (_Exit_)` obtain x
where [simp]:"z = ((_Exit_),x)" by(cases z) auto
with `valid_SDG_node n` Formal_out obtain a where "valid_edge a"
and "sourcenode a = (_Exit_)" by auto
hence False by -(rule Exit_source,simp+)
thus ?thesis by simp
next
case (Actual_in z)
with `parent_node n = (_Exit_)` obtain x
where [simp]:"z = ((_Exit_),x)" by(cases z) auto
with `valid_SDG_node n` Actual_in obtain a where "valid_edge a"
and "sourcenode a = (_Exit_)" by auto
hence False by -(rule Exit_source,simp+)
thus ?thesis by simp
next
case (Actual_out z)
with `parent_node n = (_Exit_)` obtain x
where [simp]:"z = ((_Exit_),x)" by(cases z) auto
with `valid_SDG_node n` Actual_out obtain a Q p f where "valid_edge a"
and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f" and "targetnode a = (_Exit_)" by auto
hence False by -(erule Exit_no_return_target,auto)
thus ?thesis by simp
qed
subsection {* Data dependence *}
inductive SDG_Use :: "'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool" ("_ \<in> Use\<^bsub>SDG\<^esub> _")
where CFG_Use_SDG_Use:
"\<lbrakk>valid_node m; V \<in> Use m; n = CFG_node m\<rbrakk> \<Longrightarrow> V \<in> Use\<^bsub>SDG\<^esub> n"
| Actual_in_SDG_Use:
"\<lbrakk>valid_SDG_node n; n = Actual_in (m,x); V \<in> (ParamUses m)!x\<rbrakk> \<Longrightarrow> V \<in> Use\<^bsub>SDG\<^esub> n"
| Formal_out_SDG_Use:
"\<lbrakk>valid_SDG_node n; n = Formal_out (m,x); get_proc m = p; (p,ins,outs) \<in> set procs;
V = outs!x\<rbrakk> \<Longrightarrow> V \<in> Use\<^bsub>SDG\<^esub> n"
abbreviation notin_SDG_Use :: "'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool" ("_ \<notin> Use\<^bsub>SDG\<^esub> _")
where "V \<notin> Use\<^bsub>SDG\<^esub> n \<equiv> \<not> V \<in> Use\<^bsub>SDG\<^esub> n"
lemma in_Use_valid_SDG_node:
"V \<in> Use\<^bsub>SDG\<^esub> n \<Longrightarrow> valid_SDG_node n"
by(induct rule:SDG_Use.induct,auto intro:valid_SDG_CFG_node)
lemma SDG_Use_parent_Use:
"V \<in> Use\<^bsub>SDG\<^esub> n \<Longrightarrow> V \<in> Use (parent_node n)"
proof(induct rule:SDG_Use.induct)
case CFG_Use_SDG_Use thus ?case by simp
next
case (Actual_in_SDG_Use n m x V)
from `valid_SDG_node n` `n = Actual_in (m, x)` obtain a Q r p fs ins outs
where "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "sourcenode a = m"
and "(p,ins,outs) \<in> set procs" and "x < length ins" by fastforce
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `(p,ins,outs) \<in> set procs`
have "length(ParamUses (sourcenode a)) = length ins"
by(fastforce intro:ParamUses_call_source_length)
with `x < length ins`
have "(ParamUses (sourcenode a))!x \<in> set (ParamUses (sourcenode a))" by simp
with `V \<in> (ParamUses m)!x` `sourcenode a = m`
have "V \<in> Union (set (ParamUses m))" by fastforce
with `valid_edge a` `sourcenode a = m` `n = Actual_in (m, x)` show ?case
by(fastforce intro:ParamUses_in_Use)
next
case (Formal_out_SDG_Use n m x p ins outs V)
from `valid_SDG_node n` `n = Formal_out (m, x)` obtain a Q p' f ins' outs'
where "valid_edge a" and "kind a = Q\<hookleftarrow>\<^bsub>p'\<^esub>f" and "sourcenode a = m"
and "(p',ins',outs') \<in> set procs" and "x < length outs'" by fastforce
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p'\<^esub>f` have "get_proc (sourcenode a) = p'"
by(rule get_proc_return)
with `get_proc m = p` `sourcenode a = m` have [simp]:"p = p'" by simp
with `(p',ins',outs') \<in> set procs` `(p,ins,outs) \<in> set procs` unique_callers
have [simp]:"ins' = ins" "outs' = outs" by(auto dest:distinct_fst_isin_same_fst)
from `x < length outs'` `V = outs ! x` have "V \<in> set outs" by fastforce
with `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p'\<^esub>f` `(p,ins,outs) \<in> set procs`
have "V \<in> Use (sourcenode a)" by(fastforce intro:outs_in_Use)
with `sourcenode a = m` `valid_SDG_node n` `n = Formal_out (m, x)`
show ?case by simp
qed
inductive SDG_Def :: "'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool" ("_ \<in> Def\<^bsub>SDG\<^esub> _")
where CFG_Def_SDG_Def:
"\<lbrakk>valid_node m; V \<in> Def m; n = CFG_node m\<rbrakk> \<Longrightarrow> V \<in> Def\<^bsub>SDG\<^esub> n"
| Formal_in_SDG_Def:
"\<lbrakk>valid_SDG_node n; n = Formal_in (m,x); get_proc m = p; (p,ins,outs) \<in> set procs;
V = ins!x\<rbrakk> \<Longrightarrow> V \<in> Def\<^bsub>SDG\<^esub> n"
| Actual_out_SDG_Def:
"\<lbrakk>valid_SDG_node n; n = Actual_out (m,x); V = (ParamDefs m)!x\<rbrakk> \<Longrightarrow> V \<in> Def\<^bsub>SDG\<^esub> n"
abbreviation notin_SDG_Def :: "'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool" ("_ \<notin> Def\<^bsub>SDG\<^esub> _")
where "V \<notin> Def\<^bsub>SDG\<^esub> n \<equiv> \<not> V \<in> Def\<^bsub>SDG\<^esub> n"
lemma in_Def_valid_SDG_node:
"V \<in> Def\<^bsub>SDG\<^esub> n \<Longrightarrow> valid_SDG_node n"
by(induct rule:SDG_Def.induct,auto intro:valid_SDG_CFG_node)
lemma SDG_Def_parent_Def:
"V \<in> Def\<^bsub>SDG\<^esub> n \<Longrightarrow> V \<in> Def (parent_node n)"
proof(induct rule:SDG_Def.induct)
case CFG_Def_SDG_Def thus ?case by simp
next
case (Formal_in_SDG_Def n m x p ins outs V)
from `valid_SDG_node n` `n = Formal_in (m, x)` obtain a Q r p' fs ins' outs'
where "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs" and "targetnode a = m"
and "(p',ins',outs') \<in> set procs" and "x < length ins'" by fastforce
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` have "get_proc (targetnode a) = p'"
by(rule get_proc_call)
with `get_proc m = p` `targetnode a = m` have [simp]:"p = p'" by simp
with `(p',ins',outs') \<in> set procs` `(p,ins,outs) \<in> set procs` unique_callers
have [simp]:"ins' = ins" "outs' = outs" by(auto dest:distinct_fst_isin_same_fst)
from `x < length ins'` `V = ins ! x` have "V \<in> set ins" by fastforce
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` `(p,ins,outs) \<in> set procs`
have "V \<in> Def (targetnode a)" by(fastforce intro:ins_in_Def)
with `targetnode a = m` `valid_SDG_node n` `n = Formal_in (m, x)`
show ?case by simp
next
case (Actual_out_SDG_Def n m x V)
from `valid_SDG_node n` `n = Actual_out (m, x)` obtain a Q p f ins outs
where "valid_edge a" and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f" and "targetnode a = m"
and "(p,ins,outs) \<in> set procs" and "x < length outs" by fastforce
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` `(p,ins,outs) \<in> set procs`
have "length(ParamDefs (targetnode a)) = length outs"
by(rule ParamDefs_return_target_length)
with `x < length outs` `V = ParamDefs m ! x` `targetnode a = m`
have "V \<in> set (ParamDefs (targetnode a))" by(fastforce simp:set_conv_nth)
with `n = Actual_out (m, x)` `targetnode a = m` `valid_edge a`
show ?case by(fastforce intro:ParamDefs_in_Def)
qed
definition data_dependence :: "'node SDG_node \<Rightarrow> 'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ influences _ in _" [51,0,0])
where "n influences V in n' \<equiv> \<exists>as. (V \<in> Def\<^bsub>SDG\<^esub> n) \<and> (V \<in> Use\<^bsub>SDG\<^esub> n') \<and>
(parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n') \<and>
(\<forall>n''. valid_SDG_node n'' \<and> parent_node n'' \<in> set (sourcenodes (tl as))
\<longrightarrow> V \<notin> Def\<^bsub>SDG\<^esub> n'')"
subsection {* Control dependence *}
definition control_dependence :: "'node \<Rightarrow> 'node \<Rightarrow> bool"
("_ controls _" [51,0])
where "n controls n' \<equiv> \<exists>a a' as. n -a#as\<rightarrow>\<^sub>\<iota>* n' \<and> n' \<notin> set(sourcenodes (a#as)) \<and>
intra_kind(kind a) \<and> n' postdominates (targetnode a) \<and>
valid_edge a' \<and> intra_kind(kind a') \<and> sourcenode a' = n \<and>
\<not> n' postdominates (targetnode a')"
lemma control_dependence_path:
assumes "n controls n'" obtains as where "n -as\<rightarrow>\<^sub>\<iota>* n'" and "as \<noteq> []"
using `n controls n'`
by(fastforce simp:control_dependence_def)
lemma Exit_does_not_control [dest]:
assumes "(_Exit_) controls n'" shows "False"
proof -
from `(_Exit_) controls n'` obtain a where "valid_edge a"
and "sourcenode a = (_Exit_)" by(auto simp:control_dependence_def)
thus ?thesis by(rule Exit_source)
qed
lemma Exit_not_control_dependent:
assumes "n controls n'" shows "n' \<noteq> (_Exit_)"
proof -
from `n controls n'` obtain a as where "n -a#as\<rightarrow>\<^sub>\<iota>* n'"
and "n' postdominates (targetnode a)"
by(auto simp:control_dependence_def)
from `n -a#as\<rightarrow>\<^sub>\<iota>* n'` have "valid_edge a"
by(fastforce elim:path.cases simp:intra_path_def)
hence "valid_node (targetnode a)" by simp
with `n' postdominates (targetnode a)` `n -a#as\<rightarrow>\<^sub>\<iota>* n'` show ?thesis
by(fastforce elim:Exit_no_postdominator)
qed
lemma which_node_intra_standard_control_dependence_source:
assumes "nx -as@a#as'\<rightarrow>\<^sub>\<iota>* n" and "sourcenode a = n'" and "sourcenode a' = n'"
and "n \<notin> set(sourcenodes (a#as'))" and "valid_edge a'" and "intra_kind(kind a')"
and "inner_node n" and "\<not> method_exit n" and "\<not> n postdominates (targetnode a')"
and last:"\<forall>ax ax'. ax \<in> set as' \<and> sourcenode ax = sourcenode ax' \<and>
valid_edge ax' \<and> intra_kind(kind ax') \<longrightarrow> n postdominates targetnode ax'"
shows "n' controls n"
proof -
from `nx -as@a#as'\<rightarrow>\<^sub>\<iota>* n` `sourcenode a = n'` have "n' -a#as'\<rightarrow>\<^sub>\<iota>* n"
by(fastforce dest:path_split_second simp:intra_path_def)
from `nx -as@a#as'\<rightarrow>\<^sub>\<iota>* n` have "valid_edge a"
by(fastforce intro:path_split simp:intra_path_def)
show ?thesis
proof(cases "n postdominates (targetnode a)")
case True
with `n' -a#as'\<rightarrow>\<^sub>\<iota>* n` `n \<notin> set(sourcenodes (a#as'))`
`valid_edge a'` `intra_kind(kind a')` `sourcenode a' = n'`
`\<not> n postdominates (targetnode a')` show ?thesis
by(fastforce simp:control_dependence_def intra_path_def)
next
case False
show ?thesis
proof(cases "as' = []")
case True
with `n' -a#as'\<rightarrow>\<^sub>\<iota>* n` have "targetnode a = n"
by(fastforce elim:path.cases simp:intra_path_def)
with `inner_node n` `\<not> method_exit n` have "n postdominates (targetnode a)"
by(fastforce dest:inner_is_valid intro:postdominate_refl)
with `\<not> n postdominates (targetnode a)` show ?thesis by simp
next
case False
with `nx -as@a#as'\<rightarrow>\<^sub>\<iota>* n` have "targetnode a -as'\<rightarrow>\<^sub>\<iota>* n"
by(fastforce intro:path_split simp:intra_path_def)
with `\<not> n postdominates (targetnode a)` `valid_edge a` `inner_node n`
`targetnode a -as'\<rightarrow>\<^sub>\<iota>* n`
obtain asx pex where "targetnode a -asx\<rightarrow>\<^sub>\<iota>* pex" and "method_exit pex"
and "n \<notin> set (sourcenodes asx)"
by(fastforce dest:inner_is_valid simp:postdominate_def)
show ?thesis
proof(cases "\<exists>asx'. asx = as'@asx'")
case True
then obtain asx' where [simp]:"asx = as'@asx'" by blast
from `targetnode a -asx\<rightarrow>\<^sub>\<iota>* pex` `targetnode a -as'\<rightarrow>\<^sub>\<iota>* n`
`as' \<noteq> []` `method_exit pex` `\<not> method_exit n`
obtain a'' as'' where "asx' = a''#as'' \<and> sourcenode a'' = n"
by(cases asx')(auto dest:path_split path_det simp:intra_path_def)
hence "n \<in> set(sourcenodes asx)" by(simp add:sourcenodes_def)
with `n \<notin> set (sourcenodes asx)` have False by simp
thus ?thesis by simp
next
case False
hence "\<forall>asx'. asx \<noteq> as'@asx'" by simp
then obtain j asx' where "asx = (take j as')@asx'"
and "j < length as'" and "\<forall>k > j. \<forall>asx''. asx \<noteq> (take k as')@asx''"
by(auto elim:path_split_general)
from `asx = (take j as')@asx'` `j < length as'`
have "\<exists>as'1 as'2. asx = as'1@asx' \<and>
as' = as'1@as'2 \<and> as'2 \<noteq> [] \<and> as'1 = take j as'"
by simp(rule_tac x= "drop j as'" in exI,simp)
then obtain as'1 as'' where "asx = as'1@asx'"
and "as'1 = take j as'"
and "as' = as'1@as''" and "as'' \<noteq> []" by blast
from `as' = as'1@as''` `as'' \<noteq> []` obtain a1 as'2
where "as' = as'1@a1#as'2" and "as'' = a1#as'2"
by(cases as'') auto
have "asx' \<noteq> []"
proof(cases "asx' = []")
case True
with `asx = as'1@asx'` `as' = as'1@as''` `as'' = a1#as'2`
have "as' = asx@a1#as'2" by simp
with `n' -a#as'\<rightarrow>\<^sub>\<iota>* n` have "n' -(a#asx)@a1#as'2\<rightarrow>\<^sub>\<iota>* n" by simp
hence "n' -(a#asx)@a1#as'2\<rightarrow>* n"
and "\<forall>ax \<in> set((a#asx)@a1#as'2). intra_kind(kind ax)"
by(simp_all add:intra_path_def)
from `n' -(a#asx)@a1#as'2\<rightarrow>* n`
have "n' -a#asx\<rightarrow>* sourcenode a1" and "valid_edge a1"
by -(erule path_split)+
from `\<forall>ax \<in> set((a#asx)@a1#as'2). intra_kind(kind ax)`
have "\<forall>ax \<in> set(a#asx). intra_kind(kind ax)" by simp
with `n' -a#asx\<rightarrow>* sourcenode a1` have "n' -a#asx\<rightarrow>\<^sub>\<iota>* sourcenode a1"
by(simp add:intra_path_def)
hence "targetnode a -asx\<rightarrow>\<^sub>\<iota>* sourcenode a1"
by(fastforce intro:path_split_Cons simp:intra_path_def)
with `targetnode a -asx\<rightarrow>\<^sub>\<iota>* pex` have "pex = sourcenode a1"
by(fastforce intro:path_det simp:intra_path_def)
from `\<forall>ax \<in> set((a#asx)@a1#as'2). intra_kind(kind ax)`
have "intra_kind (kind a1)" by simp
from `method_exit pex` have False
proof(rule method_exit_cases)
assume "pex = (_Exit_)"
with `pex = sourcenode a1` have "sourcenode a1 = (_Exit_)" by simp
with `valid_edge a1` show False by(rule Exit_source)
next
fix a Q f p assume "pex = sourcenode a" and "valid_edge a"
and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` `pex = sourcenode a`
`pex = sourcenode a1` `valid_edge a1` `intra_kind (kind a1)`
show False by(fastforce dest:return_edges_only simp:intra_kind_def)
qed
thus ?thesis by simp
qed simp
with `asx = as'1@asx'` obtain a2 asx'1
where "asx = as'1@a2#asx'1"
and "asx' = a2#asx'1" by(cases asx') auto
from `n' -a#as'\<rightarrow>\<^sub>\<iota>* n` `as' = as'1@a1#as'2`
have "n' -(a#as'1)@a1#as'2\<rightarrow>\<^sub>\<iota>* n" by simp
hence "n' -(a#as'1)@a1#as'2\<rightarrow>* n"
and "\<forall>ax \<in> set((a#as'1)@a1#as'2). intra_kind(kind ax)"
by(simp_all add: intra_path_def)
from `n' -(a#as'1)@a1#as'2\<rightarrow>* n` have "n' -a#as'1\<rightarrow>* sourcenode a1"
and "valid_edge a1" by -(erule path_split)+
from `\<forall>ax \<in> set((a#as'1)@a1#as'2). intra_kind(kind ax)`
have "\<forall>ax \<in> set(a#as'1). intra_kind(kind ax)" by simp
with `n' -a#as'1\<rightarrow>* sourcenode a1` have "n' -a#as'1\<rightarrow>\<^sub>\<iota>* sourcenode a1"
by(simp add:intra_path_def)
hence "targetnode a -as'1\<rightarrow>\<^sub>\<iota>* sourcenode a1"
by(fastforce intro:path_split_Cons simp:intra_path_def)
from `targetnode a -asx\<rightarrow>\<^sub>\<iota>* pex` `asx = as'1@a2#asx'1`
have "targetnode a -as'1@a2#asx'1\<rightarrow>* pex" by(simp add:intra_path_def)
hence "targetnode a -as'1\<rightarrow>* sourcenode a2" and "valid_edge a2"
and "targetnode a2 -asx'1\<rightarrow>* pex" by(auto intro:path_split)
from `targetnode a2 -asx'1\<rightarrow>* pex` `asx = as'1@a2#asx'1`
`targetnode a -asx\<rightarrow>\<^sub>\<iota>* pex`
have "targetnode a2 -asx'1\<rightarrow>\<^sub>\<iota>* pex" by(simp add:intra_path_def)
from `targetnode a -as'1\<rightarrow>* sourcenode a2`
`targetnode a -as'1\<rightarrow>\<^sub>\<iota>* sourcenode a1`
have "sourcenode a1 = sourcenode a2"
by(fastforce intro:path_det simp:intra_path_def)
from `asx = as'1@a2#asx'1` `n \<notin> set (sourcenodes asx)`
have "n \<notin> set (sourcenodes asx'1)" by(simp add:sourcenodes_def)
with `targetnode a2 -asx'1\<rightarrow>\<^sub>\<iota>* pex` `method_exit pex`
`asx = as'1@a2#asx'1`
have "\<not> n postdominates targetnode a2" by(fastforce simp:postdominate_def)
from `asx = as'1@a2#asx'1` `targetnode a -asx\<rightarrow>\<^sub>\<iota>* pex`
have "intra_kind (kind a2)" by(simp add:intra_path_def)
from `as' = as'1@a1#as'2` have "a1 \<in> set as'" by simp
with `sourcenode a1 = sourcenode a2` last `valid_edge a2`
`intra_kind (kind a2)`
have "n postdominates targetnode a2" by blast
with `\<not> n postdominates targetnode a2` have False by simp
thus ?thesis by simp
qed
qed
qed
qed
subsection {* SDG without summary edges *}
inductive cdep_edge :: "'node SDG_node \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ \<longrightarrow>\<^bsub>cd\<^esub> _" [51,0] 80)
and ddep_edge :: "'node SDG_node \<Rightarrow> 'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ -_\<rightarrow>\<^bsub>dd\<^esub> _" [51,0,0] 80)
and call_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ -_\<rightarrow>\<^bsub>call\<^esub> _" [51,0,0] 80)
and return_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ -_\<rightarrow>\<^bsub>ret\<^esub> _" [51,0,0] 80)
and param_in_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ -_:_\<rightarrow>\<^bsub>in\<^esub> _" [51,0,0,0] 80)
and param_out_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ -_:_\<rightarrow>\<^bsub>out\<^esub> _" [51,0,0,0] 80)
and SDG_edge :: "'node SDG_node \<Rightarrow> 'var option \<Rightarrow>
('pname \<times> bool) option \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
where
(* Syntax *)
"n \<longrightarrow>\<^bsub>cd\<^esub> n' == SDG_edge n None None n'"
| "n -V\<rightarrow>\<^bsub>dd\<^esub> n' == SDG_edge n (Some V) None n'"
| "n -p\<rightarrow>\<^bsub>call\<^esub> n' == SDG_edge n None (Some(p,True)) n'"
| "n -p\<rightarrow>\<^bsub>ret\<^esub> n' == SDG_edge n None (Some(p,False)) n'"
| "n -p:V\<rightarrow>\<^bsub>in\<^esub> n' == SDG_edge n (Some V) (Some(p,True)) n'"
| "n -p:V\<rightarrow>\<^bsub>out\<^esub> n' == SDG_edge n (Some V) (Some(p,False)) n'"
(* Rules *)
| SDG_cdep_edge:
"\<lbrakk>n = CFG_node m; n' = CFG_node m'; m controls m'\<rbrakk> \<Longrightarrow> n \<longrightarrow>\<^bsub>cd\<^esub> n'"
| SDG_proc_entry_exit_cdep:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; n = CFG_node (targetnode a);
a' \<in> get_return_edges a; n' = CFG_node (sourcenode a')\<rbrakk> \<Longrightarrow> n \<longrightarrow>\<^bsub>cd\<^esub> n'"
| SDG_parent_cdep_edge:
"\<lbrakk>valid_SDG_node n'; m = parent_node n'; n = CFG_node m; n \<noteq> n'\<rbrakk>
\<Longrightarrow> n \<longrightarrow>\<^bsub>cd\<^esub> n'"
| SDG_ddep_edge:"n influences V in n' \<Longrightarrow> n -V\<rightarrow>\<^bsub>dd\<^esub> n'"
| SDG_call_edge:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; n = CFG_node (sourcenode a);
n' = CFG_node (targetnode a)\<rbrakk> \<Longrightarrow> n -p\<rightarrow>\<^bsub>call\<^esub> n'"
| SDG_return_edge:
"\<lbrakk>valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f; n = CFG_node (sourcenode a);
n' = CFG_node (targetnode a)\<rbrakk> \<Longrightarrow> n -p\<rightarrow>\<^bsub>ret\<^esub> n'"
| SDG_param_in_edge:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; (p,ins,outs) \<in> set procs; V = ins!x;
x < length ins; n = Actual_in (sourcenode a,x); n' = Formal_in (targetnode a,x)\<rbrakk>
\<Longrightarrow> n -p:V\<rightarrow>\<^bsub>in\<^esub> n'"
| SDG_param_out_edge:
"\<lbrakk>valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f; (p,ins,outs) \<in> set procs; V = outs!x;
x < length outs; n = Formal_out (sourcenode a,x);
n' = Actual_out (targetnode a,x)\<rbrakk>
\<Longrightarrow> n -p:V\<rightarrow>\<^bsub>out\<^esub> n'"
lemma cdep_edge_cases:
"\<lbrakk>n \<longrightarrow>\<^bsub>cd\<^esub> n'; (parent_node n) controls (parent_node n') \<Longrightarrow> P;
\<And>a Q r p fs a'. \<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; a' \<in> get_return_edges a;
parent_node n = targetnode a; parent_node n' = sourcenode a'\<rbrakk> \<Longrightarrow> P;
\<And>m. \<lbrakk>n = CFG_node m; m = parent_node n'; n \<noteq> n'\<rbrakk> \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
by -(erule SDG_edge.cases,auto)
lemma SDG_edge_valid_SDG_node:
assumes "SDG_edge n Vopt popt n'"
shows "valid_SDG_node n" and "valid_SDG_node n'"
using `SDG_edge n Vopt popt n'`
proof(induct rule:SDG_edge.induct)
case (SDG_cdep_edge n m n' m')
thus "valid_SDG_node n" "valid_SDG_node n'"
by(fastforce elim:control_dependence_path elim:path_valid_node
simp:intra_path_def)+
next
case (SDG_proc_entry_exit_cdep a Q r p f n a' n') case 1
from `valid_edge a` `n = CFG_node (targetnode a)` show ?case by simp
next
case (SDG_proc_entry_exit_cdep a Q r p f n a' n') case 2
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
with `n' = CFG_node (sourcenode a')` show ?case by simp
next
case (SDG_ddep_edge n V n')
thus "valid_SDG_node n" "valid_SDG_node n'"
by(auto intro:in_Use_valid_SDG_node in_Def_valid_SDG_node
simp:data_dependence_def)
qed(fastforce intro:valid_SDG_CFG_node)+
lemma valid_SDG_node_cases:
assumes "valid_SDG_node n"
shows "n = CFG_node (parent_node n) \<or> CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n"
proof(cases n)
case (CFG_node m) thus ?thesis by simp
next
case (Formal_in z)
from `n = Formal_in z` obtain m x where "z = (m,x)" by(cases z) auto
with `valid_SDG_node n` `n = Formal_in z` have "CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n"
by -(rule SDG_parent_cdep_edge,auto)
thus ?thesis by fastforce
next
case (Formal_out z)
from `n = Formal_out z` obtain m x where "z = (m,x)" by(cases z) auto
with `valid_SDG_node n` `n = Formal_out z` have "CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n"
by -(rule SDG_parent_cdep_edge,auto)
thus ?thesis by fastforce
next
case (Actual_in z)
from `n = Actual_in z` obtain m x where "z = (m,x)" by(cases z) auto
with `valid_SDG_node n` `n = Actual_in z` have "CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n"
by -(rule SDG_parent_cdep_edge,auto)
thus ?thesis by fastforce
next
case (Actual_out z)
from `n = Actual_out z` obtain m x where "z = (m,x)" by(cases z) auto
with `valid_SDG_node n` `n = Actual_out z` have "CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n"
by -(rule SDG_parent_cdep_edge,auto)
thus ?thesis by fastforce
qed
lemma SDG_cdep_edge_CFG_node: "n \<longrightarrow>\<^bsub>cd\<^esub> n' \<Longrightarrow> \<exists>m. n = CFG_node m"
by(induct n Vopt\<equiv>"None::'var option" popt\<equiv>"None::('pname \<times> bool) option" n'
rule:SDG_edge.induct) auto
lemma SDG_call_edge_CFG_node: "n -p\<rightarrow>\<^bsub>call\<^esub> n' \<Longrightarrow> \<exists>m. n = CFG_node m"
by(induct n Vopt\<equiv>"None::'var option" popt\<equiv>"Some(p,True)" n'
rule:SDG_edge.induct) auto
lemma SDG_return_edge_CFG_node: "n -p\<rightarrow>\<^bsub>ret\<^esub> n' \<Longrightarrow> \<exists>m. n = CFG_node m"
by(induct n Vopt\<equiv>"None::'var option" popt\<equiv>"Some(p,False)" n'
rule:SDG_edge.induct) auto
lemma SDG_call_or_param_in_edge_unique_CFG_call_edge:
"SDG_edge n Vopt (Some(p,True)) n'
\<Longrightarrow> \<exists>!a. valid_edge a \<and> sourcenode a = parent_node n \<and>
targetnode a = parent_node n' \<and> (\<exists>Q r fs. kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
proof(induct n Vopt "Some(p,True)" n' rule:SDG_edge.induct)
case (SDG_call_edge a Q r fs n n')
{ fix a'
assume "valid_edge a'" and "sourcenode a' = parent_node n"
and "targetnode a' = parent_node n'"
from `sourcenode a' = parent_node n` `n = CFG_node (sourcenode a)`
have "sourcenode a' = sourcenode a" by fastforce
moreover from `targetnode a' = parent_node n'` `n' = CFG_node (targetnode a)`
have "targetnode a' = targetnode a" by fastforce
ultimately have "a' = a" using `valid_edge a'` `valid_edge a`
by(fastforce intro:edge_det) }
with `valid_edge a` `n = CFG_node (sourcenode a)` `n' = CFG_node (targetnode a)`
`kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` show ?case by(fastforce intro!:ex1I[of _ a])
next
case (SDG_param_in_edge a Q r fs ins outs V x n n')
{ fix a'
assume "valid_edge a'" and "sourcenode a' = parent_node n"
and "targetnode a' = parent_node n'"
from `sourcenode a' = parent_node n` `n = Actual_in (sourcenode a,x)`
have "sourcenode a' = sourcenode a" by fastforce
moreover from `targetnode a' = parent_node n'` `n' = Formal_in (targetnode a,x)`
have "targetnode a' = targetnode a" by fastforce
ultimately have "a' = a" using `valid_edge a'` `valid_edge a`
by(fastforce intro:edge_det) }
with `valid_edge a` `n = Actual_in (sourcenode a,x)`
`n' = Formal_in (targetnode a,x)` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs`
show ?case by(fastforce intro!:ex1I[of _ a])
qed simp_all
lemma SDG_return_or_param_out_edge_unique_CFG_return_edge:
"SDG_edge n Vopt (Some(p,False)) n'
\<Longrightarrow> \<exists>!a. valid_edge a \<and> sourcenode a = parent_node n \<and>
targetnode a = parent_node n' \<and> (\<exists>Q f. kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f)"
proof(induct n Vopt "Some(p,False)" n' rule:SDG_edge.induct)
case (SDG_return_edge a Q f n n')
{ fix a'
assume "valid_edge a'" and "sourcenode a' = parent_node n"
and "targetnode a' = parent_node n'"
from `sourcenode a' = parent_node n` `n = CFG_node (sourcenode a)`
have "sourcenode a' = sourcenode a" by fastforce
moreover from `targetnode a' = parent_node n'` `n' = CFG_node (targetnode a)`
have "targetnode a' = targetnode a" by fastforce
ultimately have "a' = a" using `valid_edge a'` `valid_edge a`
by(fastforce intro:edge_det) }
with `valid_edge a` `n = CFG_node (sourcenode a)` `n' = CFG_node (targetnode a)`
`kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` show ?case by(fastforce intro!:ex1I[of _ a])
next
case (SDG_param_out_edge a Q f ins outs V x n n')
{ fix a'
assume "valid_edge a'" and "sourcenode a' = parent_node n"
and "targetnode a' = parent_node n'"
from `sourcenode a' = parent_node n` `n = Formal_out (sourcenode a,x)`
have "sourcenode a' = sourcenode a" by fastforce
moreover from `targetnode a' = parent_node n'` `n' = Actual_out (targetnode a,x)`
have "targetnode a' = targetnode a" by fastforce
ultimately have "a' = a" using `valid_edge a'` `valid_edge a`
by(fastforce intro:edge_det) }
with `valid_edge a` `n = Formal_out (sourcenode a,x)`
`n' = Actual_out (targetnode a,x)` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f`
show ?case by(fastforce intro!:ex1I[of _ a])
qed simp_all
lemma Exit_no_SDG_edge_source:
"SDG_edge (CFG_node (_Exit_)) Vopt popt n' \<Longrightarrow> False"
proof(induct "CFG_node (_Exit_)" Vopt popt n' rule:SDG_edge.induct)
case (SDG_cdep_edge m n' m')
hence "(_Exit_) controls m'" by simp
thus ?case by fastforce
next
case (SDG_proc_entry_exit_cdep a Q r p fs a' n')
from `CFG_node (_Exit_) = CFG_node (targetnode a)`
have "targetnode a = (_Exit_)" by simp
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have "get_proc (targetnode a) = p"
by(rule get_proc_call)
with `targetnode a = (_Exit_)` have "p = Main"
by(auto simp:get_proc_Exit)
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have False
by(fastforce intro:Main_no_call_target)
thus ?thesis by simp
next
case (SDG_parent_cdep_edge n' m)
from `CFG_node (_Exit_) = CFG_node m`
have [simp]:"m = (_Exit_)" by simp
with `valid_SDG_node n'` `m = parent_node n'` `CFG_node (_Exit_) \<noteq> n'`
have False by -(drule valid_SDG_node_parent_Exit,simp+)
thus ?thesis by simp
next
case (SDG_ddep_edge V n')
hence "(CFG_node (_Exit_)) influences V in n'" by simp
with Exit_empty show ?case
by(fastforce dest:path_Exit_source SDG_Def_parent_Def
simp:data_dependence_def intra_path_def)
next
case (SDG_call_edge a Q r p fs n')
from `CFG_node (_Exit_) = CFG_node (sourcenode a)`
have "sourcenode a = (_Exit_)" by simp
with `valid_edge a` show ?case by(rule Exit_source)
next
case (SDG_return_edge a Q p f n')
from `CFG_node (_Exit_) = CFG_node (sourcenode a)`
have "sourcenode a = (_Exit_)" by simp
with `valid_edge a` show ?case by(rule Exit_source)
qed simp_all
subsection {* Intraprocedural paths in the SDG *}
inductive intra_SDG_path ::
"'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ i-_\<rightarrow>\<^sub>d* _" [51,0,0] 80)
where iSp_Nil:
"valid_SDG_node n \<Longrightarrow> n i-[]\<rightarrow>\<^sub>d* n"
| iSp_Append_cdep:
"\<lbrakk>n i-ns\<rightarrow>\<^sub>d* n''; n'' \<longrightarrow>\<^bsub>cd\<^esub> n'\<rbrakk> \<Longrightarrow> n i-ns@[n'']\<rightarrow>\<^sub>d* n'"
| iSp_Append_ddep:
"\<lbrakk>n i-ns\<rightarrow>\<^sub>d* n''; n'' -V\<rightarrow>\<^bsub>dd\<^esub> n'; n'' \<noteq> n'\<rbrakk> \<Longrightarrow> n i-ns@[n'']\<rightarrow>\<^sub>d* n'"
lemma intra_SDG_path_Append:
"\<lbrakk>n'' i-ns'\<rightarrow>\<^sub>d* n'; n i-ns\<rightarrow>\<^sub>d* n''\<rbrakk> \<Longrightarrow> n i-ns@ns'\<rightarrow>\<^sub>d* n'"
by(induct rule:intra_SDG_path.induct,
auto intro:intra_SDG_path.intros simp:append_assoc[THEN sym] simp del:append_assoc)
lemma intra_SDG_path_valid_SDG_node:
assumes "n i-ns\<rightarrow>\<^sub>d* n'" shows "valid_SDG_node n" and "valid_SDG_node n'"
using `n i-ns\<rightarrow>\<^sub>d* n'`
by(induct rule:intra_SDG_path.induct,
auto intro:SDG_edge_valid_SDG_node valid_SDG_CFG_node)
lemma intra_SDG_path_intra_CFG_path:
assumes "n i-ns\<rightarrow>\<^sub>d* n'"
obtains as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(atomize_elim)
from `n i-ns\<rightarrow>\<^sub>d* n'`
show "\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(induct rule:intra_SDG_path.induct)
case (iSp_Nil n)
from `valid_SDG_node n` have "valid_node (parent_node n)"
by(rule valid_SDG_CFG_node)
hence "parent_node n -[]\<rightarrow>* parent_node n" by(rule empty_path)
thus ?case by(auto simp:intra_path_def)
next
case (iSp_Append_cdep n ns n'' n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' \<longrightarrow>\<^bsub>cd\<^esub> n'` show ?case
proof(rule cdep_edge_cases)
assume "parent_node n'' controls parent_node n'"
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'" and "as' \<noteq> []"
by(erule control_dependence_path)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?thesis by blast
next
fix a Q r p fs a'
assume "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "a' \<in> get_return_edges a"
and "parent_node n'' = targetnode a" and "parent_node n' = sourcenode a'"
then obtain a'' where "valid_edge a''" and "sourcenode a'' = targetnode a"
and "targetnode a'' = sourcenode a'" and "kind a'' = (\<lambda>cf. False)\<^sub>\<surd>"
by(auto dest:intra_proc_additional_edge)
hence "targetnode a -[a'']\<rightarrow>\<^sub>\<iota>* sourcenode a'"
by(fastforce dest:path_edge simp:intra_path_def intra_kind_def)
with `parent_node n'' = targetnode a` `parent_node n' = sourcenode a'`
have "\<exists>as'. parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n' \<and> as' \<noteq> []" by fastforce
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'" and "as' \<noteq> []"
by blast
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?thesis by blast
next
fix m assume "n'' = CFG_node m" and "m = parent_node n'"
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''` show ?thesis by fastforce
qed
next
case (iSp_Append_ddep n ns n'' V n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' -V\<rightarrow>\<^bsub>dd\<^esub> n'` have "n'' influences V in n'"
by(fastforce elim:SDG_edge.cases)
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(auto simp:data_dependence_def)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?case by blast
qed
qed
subsection {* Control dependence paths in the SDG *}
inductive cdep_SDG_path ::
"'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ cd-_\<rightarrow>\<^sub>d* _" [51,0,0] 80)
where cdSp_Nil:
"valid_SDG_node n \<Longrightarrow> n cd-[]\<rightarrow>\<^sub>d* n"
| cdSp_Append_cdep:
"\<lbrakk>n cd-ns\<rightarrow>\<^sub>d* n''; n'' \<longrightarrow>\<^bsub>cd\<^esub> n'\<rbrakk> \<Longrightarrow> n cd-ns@[n'']\<rightarrow>\<^sub>d* n'"
lemma cdep_SDG_path_intra_SDG_path:
"n cd-ns\<rightarrow>\<^sub>d* n' \<Longrightarrow> n i-ns\<rightarrow>\<^sub>d* n'"
by(induct rule:cdep_SDG_path.induct,auto intro:intra_SDG_path.intros)
lemma Entry_cdep_SDG_path:
assumes "(_Entry_) -as\<rightarrow>\<^sub>\<iota>* n'" and "inner_node n'" and "\<not> method_exit n'"
obtains ns where "CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node n'"
and "ns \<noteq> []" and "\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes as)"
proof(atomize_elim)
from `(_Entry_) -as\<rightarrow>\<^sub>\<iota>* n'` `inner_node n'` `\<not> method_exit n'`
show "\<exists>ns. CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node n' \<and> ns \<noteq> [] \<and>
(\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes as))"
proof(induct as arbitrary:n' rule:length_induct)
fix as n'
assume IH:"\<forall>as'. length as' < length as \<longrightarrow>
(\<forall>n''. (_Entry_) -as'\<rightarrow>\<^sub>\<iota>* n'' \<longrightarrow> inner_node n'' \<longrightarrow> \<not> method_exit n'' \<longrightarrow>
(\<exists>ns. CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node n'' \<and> ns \<noteq> [] \<and>
(\<forall>nx\<in>set ns. parent_node nx \<in> set (sourcenodes as'))))"
and "(_Entry_) -as\<rightarrow>\<^sub>\<iota>* n'" and "inner_node n'" and "\<not> method_exit n'"
thus "\<exists>ns. CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node n' \<and> ns \<noteq> [] \<and>
(\<forall>n''\<in>set ns. parent_node n'' \<in> set (sourcenodes as))"
proof -
have "\<exists>ax asx zs. (_Entry_) -ax#asx\<rightarrow>\<^sub>\<iota>* n' \<and> n' \<notin> set (sourcenodes (ax#asx)) \<and>
as = (ax#asx)@zs"
proof(cases "n' \<in> set (sourcenodes as)")
case True
hence "\<exists>n'' \<in> set(sourcenodes as). n' = n''" by simp
then obtain ns' ns'' where "sourcenodes as = ns'@n'#ns''"
and "\<forall>n'' \<in> set ns'. n' \<noteq> n''"
by(fastforce elim!:split_list_first_propE)
from `sourcenodes as = ns'@n'#ns''` obtain xs ys ax
where "sourcenodes xs = ns'" and "as = xs@ax#ys"
and "sourcenode ax = n'"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
from `\<forall>n'' \<in> set ns'. n' \<noteq> n''` `sourcenodes xs = ns'`
have "n' \<notin> set(sourcenodes xs)" by fastforce
from `(_Entry_) -as\<rightarrow>\<^sub>\<iota>* n'` `as = xs@ax#ys` have "(_Entry_) -xs@ax#ys\<rightarrow>\<^sub>\<iota>* n'"
by simp
with `sourcenode ax = n'` have "(_Entry_) -xs\<rightarrow>\<^sub>\<iota>* n'"
by(fastforce dest:path_split simp:intra_path_def)
with `inner_node n'` have "xs \<noteq> []"
by(fastforce elim:path.cases simp:intra_path_def)
with `n' \<notin> set(sourcenodes xs)` `(_Entry_) -xs\<rightarrow>\<^sub>\<iota>* n'` `as = xs@ax#ys`
show ?thesis by(cases xs) auto
next
case False
with `(_Entry_) -as\<rightarrow>\<^sub>\<iota>* n'` `inner_node n'`
show ?thesis by(cases as)(auto elim:path.cases simp:intra_path_def)
qed
then obtain ax asx zs where "(_Entry_) -ax#asx\<rightarrow>\<^sub>\<iota>* n'"
and "n' \<notin> set (sourcenodes (ax#asx))" and "as = (ax#asx)@zs" by blast
show ?thesis
proof(cases "\<forall>a' a''. a' \<in> set asx \<and> sourcenode a' = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<longrightarrow> n' postdominates targetnode a''")
case True
have "(_Exit_) -[]\<rightarrow>\<^sub>\<iota>* (_Exit_)"
by(fastforce intro:empty_path simp:intra_path_def)
hence "\<not> n' postdominates (_Exit_)"
by(fastforce simp:postdominate_def sourcenodes_def method_exit_def)
from `(_Entry_) -ax#asx\<rightarrow>\<^sub>\<iota>* n'` have "(_Entry_) -[]@ax#asx\<rightarrow>\<^sub>\<iota>* n'" by simp
from `(_Entry_) -ax#asx\<rightarrow>\<^sub>\<iota>* n'` have [simp]:"sourcenode ax = (_Entry_)"
and "valid_edge ax"
by(auto intro:path_split_Cons simp:intra_path_def)
from Entry_Exit_edge obtain a' where "sourcenode a' = (_Entry_)"
and "targetnode a' = (_Exit_)" and "valid_edge a'"
and "intra_kind(kind a')" by(auto simp:intra_kind_def)
with `(_Entry_) -[]@ax#asx\<rightarrow>\<^sub>\<iota>* n'` `\<not> n' postdominates (_Exit_)`
`valid_edge ax` True `sourcenode ax = (_Entry_)`
`n' \<notin> set (sourcenodes (ax#asx))` `inner_node n'` `\<not> method_exit n'`
have "sourcenode ax controls n'"
by -(erule which_node_intra_standard_control_dependence_source
[of _ _ _ _ _ _ a'],auto)
hence "CFG_node (_Entry_) \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'"
by(fastforce intro:SDG_cdep_edge)
hence "CFG_node (_Entry_) cd-[]@[CFG_node (_Entry_)]\<rightarrow>\<^sub>d* CFG_node n'"
by(fastforce intro:cdSp_Append_cdep cdSp_Nil)
moreover
from `as = (ax#asx)@zs` have "(_Entry_) \<in> set(sourcenodes as)"
by(simp add:sourcenodes_def)
ultimately show ?thesis by fastforce
next
case False
hence "\<exists>a' \<in> set asx. \<exists>a''. sourcenode a' = sourcenode a'' \<and> valid_edge a'' \<and>
intra_kind(kind a'') \<and> \<not> n' postdominates targetnode a''"
by fastforce
then obtain ax' asx' asx'' where "asx = asx'@ax'#asx'' \<and>
(\<exists>a''. sourcenode ax' = sourcenode a'' \<and> valid_edge a'' \<and>
intra_kind(kind a'') \<and> \<not> n' postdominates targetnode a'') \<and>
(\<forall>z \<in> set asx''. \<not> (\<exists>a''. sourcenode z = sourcenode a'' \<and> valid_edge a'' \<and>
intra_kind(kind a'') \<and> \<not> n' postdominates targetnode a''))"
by(blast elim!:split_list_last_propE)
then obtain ai where "asx = asx'@ax'#asx''"
and "sourcenode ax' = sourcenode ai"
and "valid_edge ai" and "intra_kind(kind ai)"
and "\<not> n' postdominates targetnode ai"
and "\<forall>z \<in> set asx''. \<not> (\<exists>a''. sourcenode z = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<and> \<not> n' postdominates targetnode a'')"
by blast
from `(_Entry_) -ax#asx\<rightarrow>\<^sub>\<iota>* n'` `asx = asx'@ax'#asx''`
have "(_Entry_) -(ax#asx')@ax'#asx''\<rightarrow>\<^sub>\<iota>* n'" by simp
from `n' \<notin> set (sourcenodes (ax#asx))` `asx = asx'@ax'#asx''`
have "n' \<notin> set (sourcenodes (ax'#asx''))"
by(auto simp:sourcenodes_def)
with `inner_node n'` `\<not> n' postdominates targetnode ai`
`n' \<notin> set (sourcenodes (ax'#asx''))` `sourcenode ax' = sourcenode ai`
`\<forall>z \<in> set asx''. \<not> (\<exists>a''. sourcenode z = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<and> \<not> n' postdominates targetnode a'')`
`valid_edge ai` `intra_kind(kind ai)` `\<not> method_exit n'`
`(_Entry_) -(ax#asx')@ax'#asx''\<rightarrow>\<^sub>\<iota>* n'`
have "sourcenode ax' controls n'"
by(fastforce intro!:which_node_intra_standard_control_dependence_source)
hence "CFG_node (sourcenode ax') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'"
by(fastforce intro:SDG_cdep_edge)
from `(_Entry_) -(ax#asx')@ax'#asx''\<rightarrow>\<^sub>\<iota>* n'`
have "(_Entry_) -ax#asx'\<rightarrow>\<^sub>\<iota>* sourcenode ax'" and "valid_edge ax'"
by(auto intro:path_split simp:intra_path_def simp del:append_Cons)
from `asx = asx'@ax'#asx''` `as = (ax#asx)@zs`
have "length (ax#asx') < length as" by simp
from `valid_edge ax'` have "valid_node (sourcenode ax')" by simp
hence "inner_node (sourcenode ax')"
proof(cases "sourcenode ax'" rule:valid_node_cases)
case Entry
with `(_Entry_) -ax#asx'\<rightarrow>\<^sub>\<iota>* sourcenode ax'`
have "(_Entry_) -ax#asx'\<rightarrow>* (_Entry_)" by(simp add:intra_path_def)
hence False by(fastforce dest:path_Entry_target)
thus ?thesis by simp
next
case Exit
with `valid_edge ax'` have False by(rule Exit_source)
thus ?thesis by simp
qed simp
from `asx = asx'@ax'#asx''` `(_Entry_) -ax#asx\<rightarrow>\<^sub>\<iota>* n'`
have "intra_kind (kind ax')" by(simp add:intra_path_def)
have "\<not> method_exit (sourcenode ax')"
proof
assume "method_exit (sourcenode ax')"
thus False
proof(rule method_exit_cases)
assume "sourcenode ax' = (_Exit_)"
with `valid_edge ax'` show False by(rule Exit_source)
next
fix x Q f p assume " sourcenode ax' = sourcenode x"
and "valid_edge x" and "kind x = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
from `valid_edge x` `kind x = Q\<hookleftarrow>\<^bsub>p\<^esub>f` `sourcenode ax' = sourcenode x`
`valid_edge ax'` `intra_kind (kind ax')` show False
by(fastforce dest:return_edges_only simp:intra_kind_def)
qed
qed
with IH `length (ax#asx') < length as` `(_Entry_) -ax#asx'\<rightarrow>\<^sub>\<iota>* sourcenode ax'`
`inner_node (sourcenode ax')`
obtain ns where "CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node (sourcenode ax')"
and "ns \<noteq> []"
and "\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes (ax#asx'))"
by blast
from `CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node (sourcenode ax')`
`CFG_node (sourcenode ax') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'`
have "CFG_node (_Entry_) cd-ns@[CFG_node (sourcenode ax')]\<rightarrow>\<^sub>d* CFG_node n'"
by(fastforce intro:cdSp_Append_cdep)
from `as = (ax#asx)@zs` `asx = asx'@ax'#asx''`
have "sourcenode ax' \<in> set(sourcenodes as)" by(simp add:sourcenodes_def)
with `\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes (ax#asx'))`
`as = (ax#asx)@zs` `asx = asx'@ax'#asx''`
have "\<forall>n'' \<in> set (ns@[CFG_node (sourcenode ax')]).
parent_node n'' \<in> set(sourcenodes as)"
by(fastforce simp:sourcenodes_def)
with `CFG_node (_Entry_) cd-ns@[CFG_node (sourcenode ax')]\<rightarrow>\<^sub>d* CFG_node n'`
show ?thesis by fastforce
qed
qed
qed
qed
lemma in_proc_cdep_SDG_path:
assumes "n -as\<rightarrow>\<^sub>\<iota>* n'" and "n \<noteq> n'" and "n' \<noteq> (_Exit_)" and "valid_edge a"
and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "targetnode a = n"
obtains ns where "CFG_node n cd-ns\<rightarrow>\<^sub>d* CFG_node n'"
and "ns \<noteq> []" and "\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes as)"
proof(atomize_elim)
show "\<exists>ns. CFG_node n cd-ns\<rightarrow>\<^sub>d* CFG_node n' \<and>
ns \<noteq> [] \<and> (\<forall>n''\<in>set ns. parent_node n'' \<in> set (sourcenodes as))"
proof(cases "\<forall>ax. valid_edge ax \<and> sourcenode ax = n' \<longrightarrow>
ax \<notin> get_return_edges a")
case True
from `n -as\<rightarrow>\<^sub>\<iota>* n'` `n \<noteq> n'` `n' \<noteq> (_Exit_)`
`\<forall>ax. valid_edge ax \<and> sourcenode ax = n' \<longrightarrow> ax \<notin> get_return_edges a`
show "\<exists>ns. CFG_node n cd-ns\<rightarrow>\<^sub>d* CFG_node n' \<and> ns \<noteq> [] \<and>
(\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes as))"
proof(induct as arbitrary:n' rule:length_induct)
fix as n'
assume IH:"\<forall>as'. length as' < length as \<longrightarrow>
(\<forall>n''. n -as'\<rightarrow>\<^sub>\<iota>* n'' \<longrightarrow> n \<noteq> n'' \<longrightarrow> n'' \<noteq> (_Exit_) \<longrightarrow>
(\<forall>ax. valid_edge ax \<and> sourcenode ax = n'' \<longrightarrow> ax \<notin> get_return_edges a) \<longrightarrow>
(\<exists>ns. CFG_node n cd-ns\<rightarrow>\<^sub>d* CFG_node n'' \<and> ns \<noteq> [] \<and>
(\<forall>n''\<in>set ns. parent_node n'' \<in> set (sourcenodes as'))))"
and "n -as\<rightarrow>\<^sub>\<iota>* n'" and "n \<noteq> n'" and "n' \<noteq> (_Exit_)"
and "\<forall>ax. valid_edge ax \<and> sourcenode ax = n' \<longrightarrow> ax \<notin> get_return_edges a"
show "\<exists>ns. CFG_node n cd-ns\<rightarrow>\<^sub>d* CFG_node n' \<and> ns \<noteq> [] \<and>
(\<forall>n''\<in>set ns. parent_node n'' \<in> set (sourcenodes as))"
proof(cases "method_exit n'")
case True
thus ?thesis
proof(rule method_exit_cases)
assume "n' = (_Exit_)"
with `n' \<noteq> (_Exit_)` have False by simp
thus ?thesis by simp
next
fix a' Q' f' p'
assume "n' = sourcenode a'" and "valid_edge a'" and "kind a' = Q'\<hookleftarrow>\<^bsub>p'\<^esub>f'"
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have "get_proc(targetnode a) = p"
by(rule get_proc_call)
from `n -as\<rightarrow>\<^sub>\<iota>* n'` have "get_proc n = get_proc n'"
by(rule intra_path_get_procs)
with `get_proc(targetnode a) = p` `targetnode a = n`
have "get_proc (targetnode a) = get_proc n'" by simp
from `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p'\<^esub>f'`
have "get_proc (sourcenode a') = p'" by(rule get_proc_return)
with `n' = sourcenode a'` `get_proc (targetnode a) = get_proc n'`
`get_proc (targetnode a) = p` have "p = p'" by simp
with `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p'\<^esub>f'`
obtain ax where "valid_edge ax" and "\<exists>Q r fs. kind ax = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs"
and "a' \<in> get_return_edges ax" by(auto dest:return_needs_call)
hence "CFG_node (targetnode ax) \<longrightarrow>\<^bsub>cd\<^esub> CFG_node (sourcenode a')"
by(fastforce intro:SDG_proc_entry_exit_cdep)
with `valid_edge ax`
have "CFG_node (targetnode ax) cd-[]@[CFG_node (targetnode ax)]\<rightarrow>\<^sub>d*
CFG_node (sourcenode a')"
by(fastforce intro:cdep_SDG_path.intros)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `valid_edge ax`
`\<exists>Q r fs. kind ax = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have "targetnode a = targetnode ax"
by(fastforce intro:same_proc_call_unique_target)
from `n -as\<rightarrow>\<^sub>\<iota>* n'` `n \<noteq> n'`
have "as \<noteq> []" by(fastforce elim:path.cases simp:intra_path_def)
with `n -as\<rightarrow>\<^sub>\<iota>* n'` have "hd (sourcenodes as) = n"
by(fastforce intro:path_sourcenode simp:intra_path_def)
moreover
from `as \<noteq> []` have "hd (sourcenodes as) \<in> set (sourcenodes as)"
by(fastforce intro:hd_in_set simp:sourcenodes_def)
ultimately have "n \<in> set (sourcenodes as)" by simp
with `n' = sourcenode a'` `targetnode a = targetnode ax`
`targetnode a = n`
`CFG_node (targetnode ax) cd-[]@[CFG_node (targetnode ax)]\<rightarrow>\<^sub>d*
CFG_node (sourcenode a')`
show ?thesis by fastforce
qed
next
case False
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` obtain a'
where "a' \<in> get_return_edges a"
by(fastforce dest:get_return_edge_call)
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'"
by(fastforce dest!:call_return_edges)
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `a' \<in> get_return_edges a` obtain a''
where "valid_edge a''" and "sourcenode a'' = targetnode a"
and "targetnode a'' = sourcenode a'" and "kind a'' = (\<lambda>cf. False)\<^sub>\<surd>"
by -(drule intra_proc_additional_edge,auto)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
have "\<exists>ax asx zs. n -ax#asx\<rightarrow>\<^sub>\<iota>* n' \<and> n' \<notin> set (sourcenodes (ax#asx)) \<and>
as = (ax#asx)@zs"
proof(cases "n' \<in> set (sourcenodes as)")
case True
hence "\<exists>n'' \<in> set(sourcenodes as). n' = n''" by simp
then obtain ns' ns'' where "sourcenodes as = ns'@n'#ns''"
and "\<forall>n'' \<in> set ns'. n' \<noteq> n''"
by(fastforce elim!:split_list_first_propE)
from `sourcenodes as = ns'@n'#ns''` obtain xs ys ax
where "sourcenodes xs = ns'" and "as = xs@ax#ys"
and "sourcenode ax = n'"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
from `\<forall>n'' \<in> set ns'. n' \<noteq> n''` `sourcenodes xs = ns'`
have "n' \<notin> set(sourcenodes xs)" by fastforce
from `n -as\<rightarrow>\<^sub>\<iota>* n'` `as = xs@ax#ys` have "n -xs@ax#ys\<rightarrow>\<^sub>\<iota>* n'" by simp
with `sourcenode ax = n'` have "n -xs\<rightarrow>\<^sub>\<iota>* n'"
by(fastforce dest:path_split simp:intra_path_def)
with `n \<noteq> n'` have "xs \<noteq> []" by(fastforce simp:intra_path_def)
with `n' \<notin> set(sourcenodes xs)` `n -xs\<rightarrow>\<^sub>\<iota>* n'` `as = xs@ax#ys` show ?thesis
by(cases xs) auto
next
case False
with `n -as\<rightarrow>\<^sub>\<iota>* n'` `n \<noteq> n'`
show ?thesis by(cases as)(auto simp:intra_path_def)
qed
then obtain ax asx zs where "n -ax#asx\<rightarrow>\<^sub>\<iota>* n'"
and "n' \<notin> set (sourcenodes (ax#asx))" and "as = (ax#asx)@zs" by blast
from `n -ax#asx\<rightarrow>\<^sub>\<iota>* n'` `n' \<noteq> (_Exit_)` have "inner_node n'"
by(fastforce intro:path_valid_node simp:inner_node_def intra_path_def)
from `valid_edge a` `targetnode a = n` have "valid_node n" by fastforce
show ?thesis
proof(cases "\<forall>a' a''. a' \<in> set asx \<and> sourcenode a' = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<longrightarrow>
n' postdominates targetnode a''")
case True
from `targetnode a = n` `sourcenode a'' = targetnode a`
`kind a'' = (\<lambda>cf. False)\<^sub>\<surd>`
have "sourcenode a'' = n" and "intra_kind(kind a'')"
by(auto simp:intra_kind_def)
{ fix as' assume "targetnode a'' -as'\<rightarrow>\<^sub>\<iota>* n'"
from `valid_edge a'` `targetnode a'' = sourcenode a'`
`a' \<in> get_return_edges a`
`\<forall>ax. valid_edge ax \<and> sourcenode ax = n' \<longrightarrow> ax \<notin> get_return_edges a`
have "targetnode a'' \<noteq> n'" by fastforce
with `targetnode a'' -as'\<rightarrow>\<^sub>\<iota>* n'` obtain ax' where "valid_edge ax'"
and "targetnode a'' = sourcenode ax'" and "intra_kind(kind ax')"
by(clarsimp simp:intra_path_def)(erule path.cases,fastforce+)
from `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'` `valid_edge ax'`
`targetnode a'' = sourcenode a'` `targetnode a'' = sourcenode ax'`
`intra_kind(kind ax')`
have False by(fastforce dest:return_edges_only simp:intra_kind_def) }
hence "\<not> n' postdominates targetnode a''"
by(fastforce elim:postdominate_implies_inner_path)
from `n -ax#asx\<rightarrow>\<^sub>\<iota>* n'` have "sourcenode ax = n"
by(auto intro:path_split_Cons simp:intra_path_def)
from `n -ax#asx\<rightarrow>\<^sub>\<iota>* n'` have "n -[]@ax#asx\<rightarrow>\<^sub>\<iota>* n'" by simp
from this `sourcenode a'' = n` `sourcenode ax = n` True
`n' \<notin> set (sourcenodes (ax#asx))` `valid_edge a''` `intra_kind(kind a'')`
`inner_node n'` `\<not> method_exit n'` `\<not> n' postdominates targetnode a''`
have "n controls n'"
by(fastforce intro!:which_node_intra_standard_control_dependence_source)
hence "CFG_node n \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'"
by(fastforce intro:SDG_cdep_edge)
with `valid_node n` have "CFG_node n cd-[]@[CFG_node n]\<rightarrow>\<^sub>d* CFG_node n'"
by(fastforce intro:cdSp_Append_cdep cdSp_Nil)
moreover
from `as = (ax#asx)@zs` `sourcenode ax = n` have "n \<in> set(sourcenodes as)"
by(simp add:sourcenodes_def)
ultimately show ?thesis by fastforce
next
case False
hence "\<exists>a' \<in> set asx. \<exists>a''. sourcenode a' = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<and>
\<not> n' postdominates targetnode a''"
by fastforce
then obtain ax' asx' asx'' where "asx = asx'@ax'#asx'' \<and>
(\<exists>a''. sourcenode ax' = sourcenode a'' \<and> valid_edge a'' \<and>
intra_kind(kind a'') \<and> \<not> n' postdominates targetnode a'') \<and>
(\<forall>z \<in> set asx''. \<not> (\<exists>a''. sourcenode z = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<and>
\<not> n' postdominates targetnode a''))"
by(blast elim!:split_list_last_propE)
then obtain ai where "asx = asx'@ax'#asx''"
and "sourcenode ax' = sourcenode ai"
and "valid_edge ai" and "intra_kind(kind ai)"
and "\<not> n' postdominates targetnode ai"
and "\<forall>z \<in> set asx''. \<not> (\<exists>a''. sourcenode z = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<and>
\<not> n' postdominates targetnode a'')"
by blast
from `asx = asx'@ax'#asx''` `n -ax#asx\<rightarrow>\<^sub>\<iota>* n'`
have "n -(ax#asx')@ax'#asx''\<rightarrow>\<^sub>\<iota>* n'" by simp
from `n' \<notin> set (sourcenodes (ax#asx))` `asx = asx'@ax'#asx''`
have "n' \<notin> set (sourcenodes (ax'#asx''))"
by(auto simp:sourcenodes_def)
with `inner_node n'` `\<not> n' postdominates targetnode ai`
`n -(ax#asx')@ax'#asx''\<rightarrow>\<^sub>\<iota>* n'` `sourcenode ax' = sourcenode ai`
`\<forall>z \<in> set asx''. \<not> (\<exists>a''. sourcenode z = sourcenode a'' \<and>
valid_edge a'' \<and> intra_kind(kind a'') \<and>
\<not> n' postdominates targetnode a'')`
`valid_edge ai` `intra_kind(kind ai)` `\<not> method_exit n'`
have "sourcenode ax' controls n'"
by(fastforce intro!:which_node_intra_standard_control_dependence_source)
hence "CFG_node (sourcenode ax') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'"
by(fastforce intro:SDG_cdep_edge)
from `n -(ax#asx')@ax'#asx''\<rightarrow>\<^sub>\<iota>* n'`
have "n -ax#asx'\<rightarrow>\<^sub>\<iota>* sourcenode ax'" and "valid_edge ax'"
by(auto intro:path_split simp:intra_path_def simp del:append_Cons)
from `asx = asx'@ax'#asx''` `as = (ax#asx)@zs`
have "length (ax#asx') < length as" by simp
from `as = (ax#asx)@zs` `asx = asx'@ax'#asx''`
have "sourcenode ax' \<in> set(sourcenodes as)" by(simp add:sourcenodes_def)
show ?thesis
proof(cases "n = sourcenode ax'")
case True
with `CFG_node (sourcenode ax') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'` `valid_edge ax'`
have "CFG_node n cd-[]@[CFG_node n]\<rightarrow>\<^sub>d* CFG_node n'"
by(fastforce intro:cdSp_Append_cdep cdSp_Nil)
with `sourcenode ax' \<in> set(sourcenodes as)` True show ?thesis by fastforce
next
case False
from `valid_edge ax'` have "sourcenode ax' \<noteq> (_Exit_)"
by -(rule ccontr,fastforce elim!:Exit_source)
from `n -ax#asx'\<rightarrow>\<^sub>\<iota>* sourcenode ax'` have "n = sourcenode ax"
by(fastforce intro:path_split_Cons simp:intra_path_def)
show ?thesis
proof(cases "\<forall>ax. valid_edge ax \<and> sourcenode ax = sourcenode ax' \<longrightarrow>
ax \<notin> get_return_edges a")
case True
from `asx = asx'@ax'#asx''` `n -ax#asx\<rightarrow>\<^sub>\<iota>* n'`
have "intra_kind (kind ax')" by(simp add:intra_path_def)
have "\<not> method_exit (sourcenode ax')"
proof
assume "method_exit (sourcenode ax')"
thus False
proof(rule method_exit_cases)
assume "sourcenode ax' = (_Exit_)"
with `valid_edge ax'` show False by(rule Exit_source)
next
fix x Q f p assume " sourcenode ax' = sourcenode x"
and "valid_edge x" and "kind x = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
from `valid_edge x` `kind x = Q\<hookleftarrow>\<^bsub>p\<^esub>f` `sourcenode ax' = sourcenode x`
`valid_edge ax'` `intra_kind (kind ax')` show False
by(fastforce dest:return_edges_only simp:intra_kind_def)
qed
qed
with IH `length (ax#asx') < length as` `n -ax#asx'\<rightarrow>\<^sub>\<iota>* sourcenode ax'`
`n \<noteq> sourcenode ax'` `sourcenode ax' \<noteq> (_Exit_)` True
obtain ns where "CFG_node n cd-ns\<rightarrow>\<^sub>d* CFG_node (sourcenode ax')"
and "ns \<noteq> []"
and "\<forall>n''\<in>set ns. parent_node n'' \<in> set (sourcenodes (ax#asx'))"
by blast
from `CFG_node n cd-ns\<rightarrow>\<^sub>d* CFG_node (sourcenode ax')`
`CFG_node (sourcenode ax') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'`
have "CFG_node n cd-ns@[CFG_node (sourcenode ax')]\<rightarrow>\<^sub>d* CFG_node n'"
by(rule cdSp_Append_cdep)
moreover
from `\<forall>n''\<in>set ns. parent_node n'' \<in> set (sourcenodes (ax#asx'))`
`asx = asx'@ax'#asx''` `as = (ax#asx)@zs`
`sourcenode ax' \<in> set(sourcenodes as)`
have "\<forall>n''\<in>set (ns@[CFG_node (sourcenode ax')]).
parent_node n'' \<in> set (sourcenodes as)"
by(fastforce simp:sourcenodes_def)
ultimately show ?thesis by fastforce
next
case False
then obtain ai' where "valid_edge ai'"
and "sourcenode ai' = sourcenode ax'"
and "ai' \<in> get_return_edges a" by blast
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `targetnode a = n`
have "CFG_node n \<longrightarrow>\<^bsub>cd\<^esub> CFG_node (sourcenode ax')"
by(fastforce intro!:SDG_proc_entry_exit_cdep[of _ _ _ _ _ _ ai'])
with `valid_node n`
have "CFG_node n cd-[]@[CFG_node n]\<rightarrow>\<^sub>d* CFG_node (sourcenode ax')"
by(fastforce intro:cdSp_Append_cdep cdSp_Nil)
with `CFG_node (sourcenode ax') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'`
have "CFG_node n cd-[CFG_node n]@[CFG_node (sourcenode ax')]\<rightarrow>\<^sub>d*
CFG_node n'"
by(fastforce intro:cdSp_Append_cdep)
moreover
from `sourcenode ax' \<in> set(sourcenodes as)` `n = sourcenode ax`
`as = (ax#asx)@zs`
have "\<forall>n''\<in>set ([CFG_node n]@[CFG_node (sourcenode ax')]).
parent_node n'' \<in> set (sourcenodes as)"
by(fastforce simp:sourcenodes_def)
ultimately show ?thesis by fastforce
qed
qed
qed
qed
qed
next
case False
then obtain a' where "valid_edge a'" and "sourcenode a' = n'"
and "a' \<in> get_return_edges a" by auto
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `targetnode a = n`
have "CFG_node n \<longrightarrow>\<^bsub>cd\<^esub> CFG_node n'" by(fastforce intro:SDG_proc_entry_exit_cdep)
with `valid_edge a` `targetnode a = n`[THEN sym]
have "CFG_node n cd-[]@[CFG_node n]\<rightarrow>\<^sub>d* CFG_node n'"
by(fastforce intro:cdep_SDG_path.intros)
from `n -as\<rightarrow>\<^sub>\<iota>* n'` `n \<noteq> n'` have "as \<noteq> []"
by(fastforce elim:path.cases simp:intra_path_def)
with `n -as\<rightarrow>\<^sub>\<iota>* n'` have "hd (sourcenodes as) = n"
by(fastforce intro:path_sourcenode simp:intra_path_def)
with `as \<noteq> []` have "n \<in> set (sourcenodes as)"
by(fastforce intro:hd_in_set simp:sourcenodes_def)
with `CFG_node n cd-[]@[CFG_node n]\<rightarrow>\<^sub>d* CFG_node n'`
show ?thesis by auto
qed
qed
subsection {* Paths consisting of calls and control dependences *}
inductive call_cdep_SDG_path ::
"'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ cc-_\<rightarrow>\<^sub>d* _" [51,0,0] 80)
where ccSp_Nil:
"valid_SDG_node n \<Longrightarrow> n cc-[]\<rightarrow>\<^sub>d* n"
| ccSp_Append_cdep:
"\<lbrakk>n cc-ns\<rightarrow>\<^sub>d* n''; n'' \<longrightarrow>\<^bsub>cd\<^esub> n'\<rbrakk> \<Longrightarrow> n cc-ns@[n'']\<rightarrow>\<^sub>d* n'"
| ccSp_Append_call:
"\<lbrakk>n cc-ns\<rightarrow>\<^sub>d* n''; n'' -p\<rightarrow>\<^bsub>call\<^esub> n'\<rbrakk> \<Longrightarrow> n cc-ns@[n'']\<rightarrow>\<^sub>d* n'"
lemma cc_SDG_path_Append:
"\<lbrakk>n'' cc-ns'\<rightarrow>\<^sub>d* n'; n cc-ns\<rightarrow>\<^sub>d* n''\<rbrakk> \<Longrightarrow> n cc-ns@ns'\<rightarrow>\<^sub>d* n'"
by(induct rule:call_cdep_SDG_path.induct,
auto intro:call_cdep_SDG_path.intros simp:append_assoc[THEN sym]
simp del:append_assoc)
lemma cdep_SDG_path_cc_SDG_path:
"n cd-ns\<rightarrow>\<^sub>d* n' \<Longrightarrow> n cc-ns\<rightarrow>\<^sub>d* n'"
by(induct rule:cdep_SDG_path.induct,auto intro:call_cdep_SDG_path.intros)
lemma Entry_cc_SDG_path_to_inner_node:
assumes "valid_SDG_node n" and "parent_node n \<noteq> (_Exit_)"
obtains ns where "CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* n"
proof(atomize_elim)
obtain m where "m = parent_node n" by simp
from `valid_SDG_node n` have "valid_node (parent_node n)"
by(rule valid_SDG_CFG_node)
thus "\<exists>ns. CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* n"
proof(cases "parent_node n" rule:valid_node_cases)
case Entry
with `valid_SDG_node n` have "n = CFG_node (_Entry_)"
by(rule valid_SDG_node_parent_Entry)
with `valid_SDG_node n` show ?thesis by(fastforce intro:ccSp_Nil)
next
case Exit
with `parent_node n \<noteq> (_Exit_)` have False by simp
thus ?thesis by simp
next
case inner
with `m = parent_node n` obtain asx where "(_Entry_) -asx\<rightarrow>\<^sub>\<surd>* m"
by(fastforce dest:Entry_path inner_is_valid)
then obtain as where "(_Entry_) -as\<rightarrow>\<^sub>\<surd>* m"
and "\<forall>a' \<in> set as. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by -(erule valid_Entry_path_ascending_path,fastforce)
from `inner_node (parent_node n)` `m = parent_node n`
have "inner_node m" by simp
with `(_Entry_) -as\<rightarrow>\<^sub>\<surd>* m` `m = parent_node n` `valid_SDG_node n`
`\<forall>a' \<in> set as. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
show ?thesis
proof(induct as arbitrary:m n rule:length_induct)
fix as m n
assume IH:"\<forall>as'. length as' < length as \<longrightarrow>
(\<forall>m'. (_Entry_) -as'\<rightarrow>\<^sub>\<surd>* m' \<longrightarrow>
(\<forall>n'. m' = parent_node n' \<longrightarrow> valid_SDG_node n' \<longrightarrow>
(\<forall>a' \<in> set as'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)) \<longrightarrow>
inner_node m' \<longrightarrow> (\<exists>ns. CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* n')))"
and "(_Entry_) -as\<rightarrow>\<^sub>\<surd>* m"
and "m = parent_node n" and "valid_SDG_node n" and "inner_node m"
and "\<forall>a' \<in> set as. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
show "\<exists>ns. CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* n"
proof(cases "\<forall>a' \<in> set as. intra_kind(kind a')")
case True
with `(_Entry_) -as\<rightarrow>\<^sub>\<surd>* m` have "(_Entry_) -as\<rightarrow>\<^sub>\<iota>* m"
by(fastforce simp:intra_path_def vp_def)
have "\<not> method_exit m"
proof
assume "method_exit m"
thus False
proof(rule method_exit_cases)
assume "m = (_Exit_)"
with `inner_node m` show False by(simp add:inner_node_def)
next
fix a Q f p assume "m = sourcenode a" and "valid_edge a"
and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
from `(_Entry_) -as\<rightarrow>\<^sub>\<iota>* m` have "get_proc m = Main"
by(fastforce dest:intra_path_get_procs simp:get_proc_Entry)
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f`
have "get_proc (sourcenode a) = p" by(rule get_proc_return)
with `get_proc m = Main` `m = sourcenode a` have "p = Main" by simp
with `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` show False
by(fastforce intro:Main_no_return_source)
qed
qed
with `inner_node m` `(_Entry_) -as\<rightarrow>\<^sub>\<iota>* m`
obtain ns where "CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node m"
and "ns \<noteq> []" and "\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes as)"
by -(erule Entry_cdep_SDG_path)
then obtain n' where "n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m"
and "parent_node n' \<in> set(sourcenodes as)"
by -(erule cdep_SDG_path.cases,auto)
from `parent_node n' \<in> set(sourcenodes as)` obtain ms ms'
where "sourcenodes as = ms@(parent_node n')#ms'"
by(fastforce dest:split_list simp:sourcenodes_def)
then obtain as' a as'' where "ms = sourcenodes as'"
and "ms' = sourcenodes as''" and "as = as'@a#as''"
and "parent_node n' = sourcenode a"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
with `(_Entry_) -as\<rightarrow>\<^sub>\<iota>* m` have "(_Entry_) -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(fastforce intro:path_split simp:intra_path_def)
from `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m` have "valid_SDG_node n'"
by(rule SDG_edge_valid_SDG_node)
hence n'_cases:
"n' = CFG_node (parent_node n') \<or> CFG_node (parent_node n') \<longrightarrow>\<^bsub>cd\<^esub> n'"
by(rule valid_SDG_node_cases)
show ?thesis
proof(cases "as' = []")
case True
with `(_Entry_) -as'\<rightarrow>\<^sub>\<iota>* parent_node n'` have "parent_node n' = (_Entry_)"
by(fastforce simp:intra_path_def)
from n'_cases have "\<exists>ns. CFG_node (_Entry_) cd-ns\<rightarrow>\<^sub>d* CFG_node m"
proof
assume "n' = CFG_node (parent_node n')"
with `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m` `parent_node n' = (_Entry_)`
have "CFG_node (_Entry_) cd-[]@[CFG_node (_Entry_)]\<rightarrow>\<^sub>d* CFG_node m"
by -(rule cdSp_Append_cdep,rule cdSp_Nil,auto)
thus ?thesis by fastforce
next
assume "CFG_node (parent_node n') \<longrightarrow>\<^bsub>cd\<^esub> n'"
with `parent_node n' = (_Entry_)`
have "CFG_node (_Entry_) cd-[]@[CFG_node (_Entry_)]\<rightarrow>\<^sub>d* n'"
by -(rule cdSp_Append_cdep,rule cdSp_Nil,auto)
with `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m`
have "CFG_node (_Entry_) cd-[CFG_node (_Entry_)]@[n']\<rightarrow>\<^sub>d* CFG_node m"
by(fastforce intro:cdSp_Append_cdep)
thus ?thesis by fastforce
qed
then obtain ns where "CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node m"
by(fastforce intro:cdep_SDG_path_cc_SDG_path)
show ?thesis
proof(cases "n = CFG_node m")
case True
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node m`
show ?thesis by fastforce
next
case False
with `inner_node m` `valid_SDG_node n` `m = parent_node n`
have "CFG_node m \<longrightarrow>\<^bsub>cd\<^esub> n"
by(fastforce intro:SDG_parent_cdep_edge inner_is_valid)
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node m`
have "CFG_node (_Entry_) cc-ns@[CFG_node m]\<rightarrow>\<^sub>d* n"
by(fastforce intro:ccSp_Append_cdep)
thus ?thesis by fastforce
qed
next
case False
with `as = as'@a#as''` have "length as' < length as" by simp
from `(_Entry_) -as'\<rightarrow>\<^sub>\<iota>* parent_node n'` have "valid_node (parent_node n')"
by(fastforce intro:path_valid_node simp:intra_path_def)
hence "inner_node (parent_node n')"
proof(cases "parent_node n'" rule:valid_node_cases)
case Entry
with `(_Entry_) -as'\<rightarrow>\<^sub>\<iota>* (parent_node n')`
have "(_Entry_) -as'\<rightarrow>* (_Entry_)" by(fastforce simp:intra_path_def)
with False have False by fastforce
thus ?thesis by simp
next
case Exit
with `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m` have "n' = CFG_node (_Exit_)"
by -(rule valid_SDG_node_parent_Exit,erule SDG_edge_valid_SDG_node,simp)
with `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m` Exit have False
by simp(erule Exit_no_SDG_edge_source)
thus ?thesis by simp
next
case inner
thus ?thesis by simp
qed
from `valid_node (parent_node n')`
have "valid_SDG_node (CFG_node (parent_node n'))" by simp
from `(_Entry_) -as'\<rightarrow>\<^sub>\<iota>* (parent_node n')`
have "(_Entry_) -as'\<rightarrow>\<^sub>\<surd>* (parent_node n')"
by(rule intra_path_vp)
from `\<forall>a' \<in> set as. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
`as = as'@a#as''`
have "\<forall>a' \<in> set as'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by auto
with IH `length as' < length as` `(_Entry_) -as'\<rightarrow>\<^sub>\<surd>* (parent_node n')`
`valid_SDG_node (CFG_node (parent_node n'))` `inner_node (parent_node n')`
obtain ns where "CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (parent_node n')"
apply(erule_tac x="as'" in allE) apply clarsimp
apply(erule_tac x="(parent_node n')" in allE) apply clarsimp
apply(erule_tac x="CFG_node (parent_node n')" in allE) by clarsimp
from n'_cases have "\<exists>ns. CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* n'"
proof
assume "n' = CFG_node (parent_node n')"
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (parent_node n')`
show ?thesis by fastforce
next
assume "CFG_node (parent_node n') \<longrightarrow>\<^bsub>cd\<^esub> n'"
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (parent_node n')`
have "CFG_node (_Entry_) cc-ns@[CFG_node (parent_node n')]\<rightarrow>\<^sub>d* n'"
by(fastforce intro:ccSp_Append_cdep)
thus ?thesis by fastforce
qed
then obtain ns' where "CFG_node (_Entry_) cc-ns'\<rightarrow>\<^sub>d* n'" by blast
with `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m`
have "CFG_node (_Entry_) cc-ns'@[n']\<rightarrow>\<^sub>d* CFG_node m"
by(fastforce intro:ccSp_Append_cdep)
show ?thesis
proof(cases "n = CFG_node m")
case True
with `CFG_node (_Entry_) cc-ns'@[n']\<rightarrow>\<^sub>d* CFG_node m`
show ?thesis by fastforce
next
case False
with `inner_node m` `valid_SDG_node n` `m = parent_node n`
have "CFG_node m \<longrightarrow>\<^bsub>cd\<^esub> n"
by(fastforce intro:SDG_parent_cdep_edge inner_is_valid)
with `CFG_node (_Entry_) cc-ns'@[n']\<rightarrow>\<^sub>d* CFG_node m`
have "CFG_node (_Entry_) cc-(ns'@[n'])@[CFG_node m]\<rightarrow>\<^sub>d* n"
by(fastforce intro:ccSp_Append_cdep)
thus ?thesis by fastforce
qed
qed
next
case False
hence "\<exists>a' \<in> set as. \<not> intra_kind (kind a')" by fastforce
then obtain a as' as'' where "as = as'@a#as''" and "\<not> intra_kind (kind a)"
and "\<forall>a' \<in> set as''. intra_kind (kind a')"
by(fastforce elim!:split_list_last_propE)
from `\<forall>a' \<in> set as. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
`as = as'@a#as''` `\<not> intra_kind (kind a)`
obtain Q r p fs where "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs"
and "\<forall>a' \<in> set as'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by auto
from `as = as'@a#as''` have "length as' < length as" by fastforce
from `(_Entry_) -as\<rightarrow>\<^sub>\<surd>* m` `as = as'@a#as''`
have "(_Entry_) -as'\<rightarrow>\<^sub>\<surd>* sourcenode a" and "valid_edge a"
and "targetnode a -as''\<rightarrow>\<^sub>\<surd>* m"
by(auto intro:vp_split)
hence "valid_SDG_node (CFG_node (sourcenode a))" by simp
have "\<exists>ns'. CFG_node (_Entry_) cc-ns'\<rightarrow>\<^sub>d* CFG_node m"
proof(cases "targetnode a = m")
case True
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs`
have "CFG_node (sourcenode a) -p\<rightarrow>\<^bsub>call\<^esub> CFG_node m"
by(fastforce intro:SDG_call_edge)
have "\<exists>ns. CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (sourcenode a)"
proof(cases "as' = []")
case True
with `(_Entry_) -as'\<rightarrow>\<^sub>\<surd>* sourcenode a` have "(_Entry_) = sourcenode a"
by(fastforce simp:vp_def)
with `CFG_node (sourcenode a) -p\<rightarrow>\<^bsub>call\<^esub> CFG_node m`
have "CFG_node (_Entry_) cc-[]\<rightarrow>\<^sub>d* CFG_node (sourcenode a)"
by(fastforce intro:ccSp_Nil SDG_edge_valid_SDG_node)
thus ?thesis by fastforce
next
case False
from `valid_edge a` have "valid_node (sourcenode a)" by simp
hence "inner_node (sourcenode a)"
proof(cases "sourcenode a" rule:valid_node_cases)
case Entry
with `(_Entry_) -as'\<rightarrow>\<^sub>\<surd>* sourcenode a`
have "(_Entry_) -as'\<rightarrow>* (_Entry_)" by(fastforce simp:vp_def)
with False have False by fastforce
thus ?thesis by simp
next
case Exit
with `valid_edge a` have False by -(erule Exit_source)
thus ?thesis by simp
next
case inner
thus ?thesis by simp
qed
with IH `length as' < length as` `(_Entry_) -as'\<rightarrow>\<^sub>\<surd>* sourcenode a`
`valid_SDG_node (CFG_node (sourcenode a))`
`\<forall>a' \<in> set as'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
obtain ns where "CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (sourcenode a)"
apply(erule_tac x="as'" in allE) apply clarsimp
apply(erule_tac x="sourcenode a" in allE) apply clarsimp
apply(erule_tac x="CFG_node (sourcenode a)" in allE) by clarsimp
thus ?thesis by fastforce
qed
then obtain ns where "CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (sourcenode a)"
by blast
with `CFG_node (sourcenode a) -p\<rightarrow>\<^bsub>call\<^esub> CFG_node m`
show ?thesis by(fastforce intro:ccSp_Append_call)
next
case False
from `targetnode a -as''\<rightarrow>\<^sub>\<surd>* m` `\<forall>a' \<in> set as''. intra_kind (kind a')`
have "targetnode a -as''\<rightarrow>\<^sub>\<iota>* m" by(fastforce simp:vp_def intra_path_def)
hence "get_proc (targetnode a) = get_proc m" by(rule intra_path_get_procs)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have "get_proc (targetnode a) = p"
by(rule get_proc_call)
from `inner_node m` `valid_edge a` `targetnode a -as''\<rightarrow>\<^sub>\<iota>* m`
`kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `targetnode a \<noteq> m`
obtain ns where "CFG_node (targetnode a) cd-ns\<rightarrow>\<^sub>d* CFG_node m"
and "ns \<noteq> []"
and "\<forall>n'' \<in> set ns. parent_node n'' \<in> set(sourcenodes as'')"
by(fastforce elim!:in_proc_cdep_SDG_path)
then obtain n' where "n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m"
and "parent_node n' \<in> set(sourcenodes as'')"
by -(erule cdep_SDG_path.cases,auto)
from `(parent_node n') \<in> set(sourcenodes as'')` obtain ms ms'
where "sourcenodes as'' = ms@(parent_node n')#ms'"
by(fastforce dest:split_list simp:sourcenodes_def)
then obtain xs a' ys where "ms = sourcenodes xs"
and "ms' = sourcenodes ys" and "as'' = xs@a'#ys"
and "parent_node n' = sourcenode a'"
by(fastforce elim:map_append_append_maps simp:sourcenodes_def)
from `(_Entry_) -as\<rightarrow>\<^sub>\<surd>* m` `as = as'@a#as''` `as'' = xs@a'#ys`
have "(_Entry_) -(as'@a#xs)@a'#ys\<rightarrow>\<^sub>\<surd>* m" by simp
hence "(_Entry_) -as'@a#xs\<rightarrow>\<^sub>\<surd>* sourcenode a'"
and "valid_edge a'" by(auto intro:vp_split)
from `as = as'@a#as''` `as'' = xs@a'#ys`
have "length (as'@a#xs) < length as" by simp
from `valid_edge a'` have "valid_node (sourcenode a')" by simp
hence "inner_node (sourcenode a')"
proof(cases "sourcenode a'" rule:valid_node_cases)
case Entry
with `(_Entry_) -as'@a#xs\<rightarrow>\<^sub>\<surd>* sourcenode a'`
have "(_Entry_) -as'@a#xs\<rightarrow>* (_Entry_)" by(fastforce simp:vp_def)
hence False by fastforce
thus ?thesis by simp
next
case Exit
with `valid_edge a'` have False by -(erule Exit_source)
thus ?thesis by simp
next
case inner
thus ?thesis by simp
qed
from `valid_edge a'` have "valid_SDG_node (CFG_node (sourcenode a'))"
by simp
from `\<forall>a' \<in> set as. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
`as = as'@a#as''` `as'' = xs@a'#ys`
have "\<forall>a' \<in> set (as'@a#xs).
intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by auto
with IH `length (as'@a#xs) < length as`
`(_Entry_) -as'@a#xs\<rightarrow>\<^sub>\<surd>* sourcenode a'`
`valid_SDG_node (CFG_node (sourcenode a'))`
`inner_node (sourcenode a')` `parent_node n' = sourcenode a'`
obtain ns where "CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (parent_node n')"
apply(erule_tac x="as'@a#xs" in allE) apply clarsimp
apply(erule_tac x="sourcenode a'" in allE) apply clarsimp
apply(erule_tac x="CFG_node (sourcenode a')" in allE) by clarsimp
from `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m` have "valid_SDG_node n'"
by(rule SDG_edge_valid_SDG_node)
hence "n' = CFG_node (parent_node n') \<or> CFG_node (parent_node n') \<longrightarrow>\<^bsub>cd\<^esub> n'"
by(rule valid_SDG_node_cases)
thus ?thesis
proof
assume "n' = CFG_node (parent_node n')"
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (parent_node n')`
`n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m` show ?thesis
by(fastforce intro:ccSp_Append_cdep)
next
assume "CFG_node (parent_node n') \<longrightarrow>\<^bsub>cd\<^esub> n'"
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node (parent_node n')`
have "CFG_node (_Entry_) cc-ns@[CFG_node (parent_node n')]\<rightarrow>\<^sub>d* n'"
by(fastforce intro:ccSp_Append_cdep)
with `n' \<longrightarrow>\<^bsub>cd\<^esub> CFG_node m` show ?thesis
by(fastforce intro:ccSp_Append_cdep)
qed
qed
then obtain ns where "CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node m" by blast
show ?thesis
proof(cases "n = CFG_node m")
case True
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node m` show ?thesis by fastforce
next
case False
with `inner_node m` `valid_SDG_node n` `m = parent_node n`
have "CFG_node m \<longrightarrow>\<^bsub>cd\<^esub> n"
by(fastforce intro:SDG_parent_cdep_edge inner_is_valid)
with `CFG_node (_Entry_) cc-ns\<rightarrow>\<^sub>d* CFG_node m` show ?thesis
by(fastforce dest:ccSp_Append_cdep)
qed
qed
qed
qed
qed
subsection {* Same level paths in the SDG *}
inductive matched :: "'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
where matched_Nil:
"valid_SDG_node n \<Longrightarrow> matched n [] n"
| matched_Append_intra_SDG_path:
"\<lbrakk>matched n ns n''; n'' i-ns'\<rightarrow>\<^sub>d* n'\<rbrakk> \<Longrightarrow> matched n (ns@ns') n'"
| matched_bracket_call:
"\<lbrakk>matched n\<^sub>0 ns n\<^sub>1; n\<^sub>1 -p\<rightarrow>\<^bsub>call\<^esub> n\<^sub>2; matched n\<^sub>2 ns' n\<^sub>3;
(n\<^sub>3 -p\<rightarrow>\<^bsub>ret\<^esub> n\<^sub>4 \<or> n\<^sub>3 -p:V\<rightarrow>\<^bsub>out\<^esub> n\<^sub>4); valid_edge a; a' \<in> get_return_edges a;
sourcenode a = parent_node n\<^sub>1; targetnode a = parent_node n\<^sub>2;
sourcenode a' = parent_node n\<^sub>3; targetnode a' = parent_node n\<^sub>4\<rbrakk>
\<Longrightarrow> matched n\<^sub>0 (ns@n\<^sub>1#ns'@[n\<^sub>3]) n\<^sub>4"
| matched_bracket_param:
"\<lbrakk>matched n\<^sub>0 ns n\<^sub>1; n\<^sub>1 -p:V\<rightarrow>\<^bsub>in\<^esub> n\<^sub>2; matched n\<^sub>2 ns' n\<^sub>3;
n\<^sub>3 -p:V'\<rightarrow>\<^bsub>out\<^esub> n\<^sub>4; valid_edge a; a' \<in> get_return_edges a;
sourcenode a = parent_node n\<^sub>1; targetnode a = parent_node n\<^sub>2;
sourcenode a' = parent_node n\<^sub>3; targetnode a' = parent_node n\<^sub>4\<rbrakk>
\<Longrightarrow> matched n\<^sub>0 (ns@n\<^sub>1#ns'@[n\<^sub>3]) n\<^sub>4"
lemma matched_Append:
"\<lbrakk>matched n'' ns' n'; matched n ns n''\<rbrakk> \<Longrightarrow> matched n (ns@ns') n'"
by(induct rule:matched.induct,
auto intro:matched.intros simp:append_assoc[THEN sym] simp del:append_assoc)
lemma intra_SDG_path_matched:
assumes "n i-ns\<rightarrow>\<^sub>d* n'" shows "matched n ns n'"
proof -
from `n i-ns\<rightarrow>\<^sub>d* n'` have "valid_SDG_node n"
by(rule intra_SDG_path_valid_SDG_node)
hence "matched n [] n" by(rule matched_Nil)
with `n i-ns\<rightarrow>\<^sub>d* n'` have "matched n ([]@ns) n'"
by -(rule matched_Append_intra_SDG_path)
thus ?thesis by simp
qed
lemma intra_proc_matched:
assumes "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "a' \<in> get_return_edges a"
shows "matched (CFG_node (targetnode a)) [CFG_node (targetnode a)]
(CFG_node (sourcenode a'))"
proof -
from assms have "CFG_node (targetnode a) \<longrightarrow>\<^bsub>cd\<^esub> CFG_node (sourcenode a')"
by(fastforce intro:SDG_proc_entry_exit_cdep)
with `valid_edge a`
have "CFG_node (targetnode a) i-[]@[CFG_node (targetnode a)]\<rightarrow>\<^sub>d*
CFG_node (sourcenode a')"
by(fastforce intro:intra_SDG_path.intros)
with `valid_edge a`
have "matched (CFG_node (targetnode a)) ([]@[CFG_node (targetnode a)])
(CFG_node (sourcenode a'))"
by(fastforce intro:matched.intros)
thus ?thesis by simp
qed
lemma matched_intra_CFG_path:
assumes "matched n ns n'"
obtains as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(atomize_elim)
from `matched n ns n'` show "\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(induct rule:matched.induct)
case matched_Nil thus ?case
by(fastforce dest:empty_path valid_SDG_CFG_node simp:intra_path_def)
next
case (matched_Append_intra_SDG_path n ns n'' ns' n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''` obtain as
where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' i-ns'\<rightarrow>\<^sub>d* n'` obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(fastforce elim:intra_SDG_path_intra_CFG_path)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(rule intra_path_Append)
thus ?case by fastforce
next
case (matched_bracket_call n\<^sub>0 ns n\<^sub>1 p n\<^sub>2 ns' n\<^sub>3 n\<^sub>4 V a a')
from `valid_edge a` `a' \<in> get_return_edges a` `sourcenode a = parent_node n\<^sub>1`
`targetnode a' = parent_node n\<^sub>4`
obtain a'' where "valid_edge a''" and "sourcenode a'' = parent_node n\<^sub>1"
and "targetnode a'' = parent_node n\<^sub>4" and "kind a'' = (\<lambda>cf. False)\<^sub>\<surd>"
by(fastforce dest:call_return_node_edge)
hence "parent_node n\<^sub>1 -[a'']\<rightarrow>* parent_node n\<^sub>4" by(fastforce dest:path_edge)
moreover
from `kind a'' = (\<lambda>cf. False)\<^sub>\<surd>` have "\<forall>a \<in> set [a'']. intra_kind(kind a)"
by(fastforce simp:intra_kind_def)
ultimately have "parent_node n\<^sub>1 -[a'']\<rightarrow>\<^sub>\<iota>* parent_node n\<^sub>4"
by(auto simp:intra_path_def)
with `\<exists>as. parent_node n\<^sub>0 -as\<rightarrow>\<^sub>\<iota>* parent_node n\<^sub>1` show ?case
by(fastforce intro:intra_path_Append)
next
case (matched_bracket_param n\<^sub>0 ns n\<^sub>1 p V n\<^sub>2 ns' n\<^sub>3 V' n\<^sub>4 a a')
from `valid_edge a` `a' \<in> get_return_edges a` `sourcenode a = parent_node n\<^sub>1`
`targetnode a' = parent_node n\<^sub>4`
obtain a'' where "valid_edge a''" and "sourcenode a'' = parent_node n\<^sub>1"
and "targetnode a'' = parent_node n\<^sub>4" and "kind a'' = (\<lambda>cf. False)\<^sub>\<surd>"
by(fastforce dest:call_return_node_edge)
hence "parent_node n\<^sub>1 -[a'']\<rightarrow>* parent_node n\<^sub>4" by(fastforce dest:path_edge)
moreover
from `kind a'' = (\<lambda>cf. False)\<^sub>\<surd>` have "\<forall>a \<in> set [a'']. intra_kind(kind a)"
by(fastforce simp:intra_kind_def)
ultimately have "parent_node n\<^sub>1 -[a'']\<rightarrow>\<^sub>\<iota>* parent_node n\<^sub>4"
by(auto simp:intra_path_def)
with `\<exists>as. parent_node n\<^sub>0 -as\<rightarrow>\<^sub>\<iota>* parent_node n\<^sub>1` show ?case
by(fastforce intro:intra_path_Append)
qed
qed
lemma matched_same_level_CFG_path:
assumes "matched n ns n'"
obtains as where "parent_node n -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n'"
proof(atomize_elim)
from `matched n ns n'`
show "\<exists>as. parent_node n -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n'"
proof(induct rule:matched.induct)
case matched_Nil thus ?case
by(fastforce dest:empty_path valid_SDG_CFG_node simp:slp_def same_level_path_def)
next
case (matched_Append_intra_SDG_path n ns n'' ns' n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n''" by blast
from `n'' i-ns'\<rightarrow>\<^sub>d* n'` obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(erule intra_SDG_path_intra_CFG_path)
from `parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'`
have "parent_node n'' -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n'" by(rule intra_path_slp)
with `parent_node n -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n'"
by(rule slp_Append)
thus ?case by fastforce
next
case (matched_bracket_call n\<^sub>0 ns n\<^sub>1 p n\<^sub>2 ns' n\<^sub>3 n\<^sub>4 V a a')
from `valid_edge a` `a' \<in> get_return_edges a`
obtain Q r p' fs where "kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs"
by(fastforce dest!:only_call_get_return_edges)
from `\<exists>as. parent_node n\<^sub>0 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>1`
obtain as where "parent_node n\<^sub>0 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>1" by blast
from `\<exists>as. parent_node n\<^sub>2 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3`
obtain as' where "parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3" by blast
from `valid_edge a` `a' \<in> get_return_edges a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p'\<^esub>f'" by(fastforce dest!:call_return_edges)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3` have "same_level_path as'"
by(simp add:slp_def)
hence "same_level_path_aux ([]@[a]) as'"
by(fastforce intro:same_level_path_aux_callstack_Append simp:same_level_path_def)
from `same_level_path as'` have "upd_cs ([]@[a]) as' = ([]@[a])"
by(fastforce intro:same_level_path_upd_cs_callstack_Append
simp:same_level_path_def)
with `same_level_path_aux ([]@[a]) as'` `a' \<in> get_return_edges a`
`kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` `kind a' = Q'\<hookleftarrow>\<^bsub>p'\<^esub>f'`
have "same_level_path (a#as'@[a'])"
by(fastforce intro:same_level_path_aux_Append upd_cs_Append
simp:same_level_path_def)
from `valid_edge a'` `sourcenode a' = parent_node n\<^sub>3`
`targetnode a' = parent_node n\<^sub>4`
have "parent_node n\<^sub>3 -[a']\<rightarrow>* parent_node n\<^sub>4" by(fastforce dest:path_edge)
with `parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3`
have "parent_node n\<^sub>2 -as'@[a']\<rightarrow>* parent_node n\<^sub>4"
by(fastforce intro:path_Append simp:slp_def)
with `valid_edge a` `sourcenode a = parent_node n\<^sub>1`
`targetnode a = parent_node n\<^sub>2`
have "parent_node n\<^sub>1 -a#as'@[a']\<rightarrow>* parent_node n\<^sub>4" by -(rule Cons_path)
with `same_level_path (a#as'@[a'])`
have "parent_node n\<^sub>1 -a#as'@[a']\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>4" by(simp add:slp_def)
with `parent_node n\<^sub>0 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>1`
have "parent_node n\<^sub>0 -as@a#as'@[a']\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>4" by(rule slp_Append)
with `sourcenode a = parent_node n\<^sub>1` `sourcenode a' = parent_node n\<^sub>3`
show ?case by fastforce
next
case (matched_bracket_param n\<^sub>0 ns n\<^sub>1 p V n\<^sub>2 ns' n\<^sub>3 V' n\<^sub>4 a a')
from `valid_edge a` `a' \<in> get_return_edges a`
obtain Q r p' fs where "kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs"
by(fastforce dest!:only_call_get_return_edges)
from `\<exists>as. parent_node n\<^sub>0 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>1`
obtain as where "parent_node n\<^sub>0 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>1" by blast
from `\<exists>as. parent_node n\<^sub>2 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3`
obtain as' where "parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3" by blast
from `valid_edge a` `a' \<in> get_return_edges a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p'\<^esub>f'" by(fastforce dest!:call_return_edges)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3` have "same_level_path as'"
by(simp add:slp_def)
hence "same_level_path_aux ([]@[a]) as'"
by(fastforce intro:same_level_path_aux_callstack_Append simp:same_level_path_def)
from `same_level_path as'` have "upd_cs ([]@[a]) as' = ([]@[a])"
by(fastforce intro:same_level_path_upd_cs_callstack_Append
simp:same_level_path_def)
with `same_level_path_aux ([]@[a]) as'` `a' \<in> get_return_edges a`
`kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` `kind a' = Q'\<hookleftarrow>\<^bsub>p'\<^esub>f'`
have "same_level_path (a#as'@[a'])"
by(fastforce intro:same_level_path_aux_Append upd_cs_Append
simp:same_level_path_def)
from `valid_edge a'` `sourcenode a' = parent_node n\<^sub>3`
`targetnode a' = parent_node n\<^sub>4`
have "parent_node n\<^sub>3 -[a']\<rightarrow>* parent_node n\<^sub>4" by(fastforce dest:path_edge)
with `parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3`
have "parent_node n\<^sub>2 -as'@[a']\<rightarrow>* parent_node n\<^sub>4"
by(fastforce intro:path_Append simp:slp_def)
with `valid_edge a` `sourcenode a = parent_node n\<^sub>1`
`targetnode a = parent_node n\<^sub>2`
have "parent_node n\<^sub>1 -a#as'@[a']\<rightarrow>* parent_node n\<^sub>4" by -(rule Cons_path)
with `same_level_path (a#as'@[a'])`
have "parent_node n\<^sub>1 -a#as'@[a']\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>4" by(simp add:slp_def)
with `parent_node n\<^sub>0 -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>1`
have "parent_node n\<^sub>0 -as@a#as'@[a']\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>4" by(rule slp_Append)
with `sourcenode a = parent_node n\<^sub>1` `sourcenode a' = parent_node n\<^sub>3`
show ?case by fastforce
qed
qed
subsection {*Realizable paths in the SDG *}
inductive realizable ::
"'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
where realizable_matched:"matched n ns n' \<Longrightarrow> realizable n ns n'"
| realizable_call:
"\<lbrakk>realizable n\<^sub>0 ns n\<^sub>1; n\<^sub>1 -p\<rightarrow>\<^bsub>call\<^esub> n\<^sub>2 \<or> n\<^sub>1 -p:V\<rightarrow>\<^bsub>in\<^esub> n\<^sub>2; matched n\<^sub>2 ns' n\<^sub>3\<rbrakk>
\<Longrightarrow> realizable n\<^sub>0 (ns@n\<^sub>1#ns') n\<^sub>3"
lemma realizable_Append_matched:
"\<lbrakk>realizable n ns n''; matched n'' ns' n'\<rbrakk> \<Longrightarrow> realizable n (ns@ns') n'"
proof(induct rule:realizable.induct)
case (realizable_matched n ns n'')
from `matched n'' ns' n'` `matched n ns n''` have "matched n (ns@ns') n'"
by(rule matched_Append)
thus ?case by(rule realizable.realizable_matched)
next
case (realizable_call n\<^sub>0 ns n\<^sub>1 p n\<^sub>2 V ns'' n\<^sub>3)
from `matched n\<^sub>3 ns' n'` `matched n\<^sub>2 ns'' n\<^sub>3` have "matched n\<^sub>2 (ns''@ns') n'"
by(rule matched_Append)
with `realizable n\<^sub>0 ns n\<^sub>1` `n\<^sub>1 -p\<rightarrow>\<^bsub>call\<^esub> n\<^sub>2 \<or> n\<^sub>1 -p:V\<rightarrow>\<^bsub>in\<^esub> n\<^sub>2`
have "realizable n\<^sub>0 (ns@n\<^sub>1#(ns''@ns')) n'"
by(rule realizable.realizable_call)
thus ?case by simp
qed
lemma realizable_valid_CFG_path:
assumes "realizable n ns n'"
obtains as where "parent_node n -as\<rightarrow>\<^sub>\<surd>* parent_node n'"
proof(atomize_elim)
from `realizable n ns n'`
show "\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<surd>* parent_node n'"
proof(induct rule:realizable.induct)
case (realizable_matched n ns n')
from `matched n ns n'` obtain as where "parent_node n -as\<rightarrow>\<^bsub>sl\<^esub>* parent_node n'"
by(erule matched_same_level_CFG_path)
thus ?case by(fastforce intro:slp_vp)
next
case (realizable_call n\<^sub>0 ns n\<^sub>1 p n\<^sub>2 V ns' n\<^sub>3)
from `\<exists>as. parent_node n\<^sub>0 -as\<rightarrow>\<^sub>\<surd>* parent_node n\<^sub>1`
obtain as where "parent_node n\<^sub>0 -as\<rightarrow>\<^sub>\<surd>* parent_node n\<^sub>1" by blast
from `matched n\<^sub>2 ns' n\<^sub>3` obtain as' where "parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3"
by(erule matched_same_level_CFG_path)
from `n\<^sub>1 -p\<rightarrow>\<^bsub>call\<^esub> n\<^sub>2 \<or> n\<^sub>1 -p:V\<rightarrow>\<^bsub>in\<^esub> n\<^sub>2`
obtain a Q r fs where "valid_edge a"
and "sourcenode a = parent_node n\<^sub>1" and "targetnode a = parent_node n\<^sub>2"
and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" by(fastforce elim:SDG_edge.cases)+
hence "parent_node n\<^sub>1 -[a]\<rightarrow>* parent_node n\<^sub>2"
by(fastforce dest:path_edge)
from `parent_node n\<^sub>0 -as\<rightarrow>\<^sub>\<surd>* parent_node n\<^sub>1`
have "parent_node n\<^sub>0 -as\<rightarrow>* parent_node n\<^sub>1" and "valid_path as"
by(simp_all add:vp_def)
with `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have "valid_path (as@[a])"
by(fastforce elim:valid_path_aux_Append simp:valid_path_def)
moreover
from `parent_node n\<^sub>0 -as\<rightarrow>* parent_node n\<^sub>1` `parent_node n\<^sub>1 -[a]\<rightarrow>* parent_node n\<^sub>2`
have "parent_node n\<^sub>0 -as@[a]\<rightarrow>* parent_node n\<^sub>2" by(rule path_Append)
ultimately have "parent_node n\<^sub>0 -as@[a]\<rightarrow>\<^sub>\<surd>* parent_node n\<^sub>2" by(simp add:vp_def)
with `parent_node n\<^sub>2 -as'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n\<^sub>3`
have "parent_node n\<^sub>0 -(as@[a])@as'\<rightarrow>\<^sub>\<surd>* parent_node n\<^sub>3" by -(rule vp_slp_Append)
with `sourcenode a = parent_node n\<^sub>1` show ?case by fastforce
qed
qed
lemma cdep_SDG_path_realizable:
"n cc-ns\<rightarrow>\<^sub>d* n' \<Longrightarrow> realizable n ns n'"
proof(induct rule:call_cdep_SDG_path.induct)
case (ccSp_Nil n)
from `valid_SDG_node n` show ?case
by(fastforce intro:realizable_matched matched_Nil)
next
case (ccSp_Append_cdep n ns n'' n')
from `n'' \<longrightarrow>\<^bsub>cd\<^esub> n'` have "valid_SDG_node n''" by(rule SDG_edge_valid_SDG_node)
hence "matched n'' [] n''" by(rule matched_Nil)
from `n'' \<longrightarrow>\<^bsub>cd\<^esub> n'` `valid_SDG_node n''`
have "n'' i-[]@[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce intro:iSp_Append_cdep iSp_Nil)
with `matched n'' [] n''` have "matched n'' ([]@[n'']) n'"
by(fastforce intro:matched_Append_intra_SDG_path)
with `realizable n ns n''` show ?case
by(fastforce intro:realizable_Append_matched)
next
case (ccSp_Append_call n ns n'' p n')
from `n'' -p\<rightarrow>\<^bsub>call\<^esub> n'` have "valid_SDG_node n'" by(rule SDG_edge_valid_SDG_node)
hence "matched n' [] n'" by(rule matched_Nil)
with `realizable n ns n''` `n'' -p\<rightarrow>\<^bsub>call\<^esub> n'`
show ?case by(fastforce intro:realizable_call)
qed
subsection {* SDG with summary edges *}
inductive sum_cdep_edge :: "'node SDG_node \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ s\<longrightarrow>\<^bsub>cd\<^esub> _" [51,0] 80)
and sum_ddep_edge :: "'node SDG_node \<Rightarrow> 'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ s-_\<rightarrow>\<^bsub>dd\<^esub> _" [51,0,0] 80)
and sum_call_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ s-_\<rightarrow>\<^bsub>call\<^esub> _" [51,0,0] 80)
and sum_return_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ s-_\<rightarrow>\<^bsub>ret\<^esub> _" [51,0,0] 80)
and sum_param_in_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ s-_:_\<rightarrow>\<^bsub>in\<^esub> _" [51,0,0,0] 80)
and sum_param_out_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'var \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ s-_:_\<rightarrow>\<^bsub>out\<^esub> _" [51,0,0,0] 80)
and sum_summary_edge :: "'node SDG_node \<Rightarrow> 'pname \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ s-_\<rightarrow>\<^bsub>sum\<^esub> _" [51,0] 80)
and sum_SDG_edge :: "'node SDG_node \<Rightarrow> 'var option \<Rightarrow>
('pname \<times> bool) option \<Rightarrow> bool \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
where
(* Syntax *)
"n s\<longrightarrow>\<^bsub>cd\<^esub> n' == sum_SDG_edge n None None False n'"
| "n s-V\<rightarrow>\<^bsub>dd\<^esub> n' == sum_SDG_edge n (Some V) None False n'"
| "n s-p\<rightarrow>\<^bsub>call\<^esub> n' == sum_SDG_edge n None (Some(p,True)) False n'"
| "n s-p\<rightarrow>\<^bsub>ret\<^esub> n' == sum_SDG_edge n None (Some(p,False)) False n'"
| "n s-p:V\<rightarrow>\<^bsub>in\<^esub> n' == sum_SDG_edge n (Some V) (Some(p,True)) False n'"
| "n s-p:V\<rightarrow>\<^bsub>out\<^esub> n' == sum_SDG_edge n (Some V) (Some(p,False)) False n'"
| "n s-p\<rightarrow>\<^bsub>sum\<^esub> n' == sum_SDG_edge n None (Some(p,True)) True n'"
(* Rules *)
| sum_SDG_cdep_edge:
"\<lbrakk>n = CFG_node m; n' = CFG_node m'; m controls m'\<rbrakk> \<Longrightarrow> n s\<longrightarrow>\<^bsub>cd\<^esub> n'"
| sum_SDG_proc_entry_exit_cdep:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; n = CFG_node (targetnode a);
a' \<in> get_return_edges a; n' = CFG_node (sourcenode a')\<rbrakk> \<Longrightarrow> n s\<longrightarrow>\<^bsub>cd\<^esub> n'"
| sum_SDG_parent_cdep_edge:
"\<lbrakk>valid_SDG_node n'; m = parent_node n'; n = CFG_node m; n \<noteq> n'\<rbrakk>
\<Longrightarrow> n s\<longrightarrow>\<^bsub>cd\<^esub> n'"
| sum_SDG_ddep_edge:"n influences V in n' \<Longrightarrow> n s-V\<rightarrow>\<^bsub>dd\<^esub> n'"
| sum_SDG_call_edge:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; n = CFG_node (sourcenode a);
n' = CFG_node (targetnode a)\<rbrakk> \<Longrightarrow> n s-p\<rightarrow>\<^bsub>call\<^esub> n'"
| sum_SDG_return_edge:
"\<lbrakk>valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>fs; n = CFG_node (sourcenode a);
n' = CFG_node (targetnode a)\<rbrakk> \<Longrightarrow> n s-p\<rightarrow>\<^bsub>ret\<^esub> n'"
| sum_SDG_param_in_edge:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; (p,ins,outs) \<in> set procs; V = ins!x;
x < length ins; n = Actual_in (sourcenode a,x); n' = Formal_in (targetnode a,x)\<rbrakk>
\<Longrightarrow> n s-p:V\<rightarrow>\<^bsub>in\<^esub> n'"
| sum_SDG_param_out_edge:
"\<lbrakk>valid_edge a; kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f; (p,ins,outs) \<in> set procs; V = outs!x;
x < length outs; n = Formal_out (sourcenode a,x);
n' = Actual_out (targetnode a,x)\<rbrakk>
\<Longrightarrow> n s-p:V\<rightarrow>\<^bsub>out\<^esub> n'"
| sum_SDG_call_summary_edge:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; a' \<in> get_return_edges a;
n = CFG_node (sourcenode a); n' = CFG_node (targetnode a')\<rbrakk>
\<Longrightarrow> n s-p\<rightarrow>\<^bsub>sum\<^esub> n'"
| sum_SDG_param_summary_edge:
"\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; a' \<in> get_return_edges a;
matched (Formal_in (targetnode a,x)) ns (Formal_out (sourcenode a',x'));
n = Actual_in (sourcenode a,x); n' = Actual_out (targetnode a',x');
(p,ins,outs) \<in> set procs; x < length ins; x' < length outs\<rbrakk>
\<Longrightarrow> n s-p\<rightarrow>\<^bsub>sum\<^esub> n'"
lemma sum_edge_cases:
"\<lbrakk>n s-p\<rightarrow>\<^bsub>sum\<^esub> n';
\<And>a Q r fs a'. \<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; a' \<in> get_return_edges a;
n = CFG_node (sourcenode a); n' = CFG_node (targetnode a')\<rbrakk> \<Longrightarrow> P;
\<And>a Q p r fs a' ns x x' ins outs.
\<lbrakk>valid_edge a; kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs; a' \<in> get_return_edges a;
matched (Formal_in (targetnode a,x)) ns (Formal_out (sourcenode a',x'));
n = Actual_in (sourcenode a,x); n' = Actual_out (targetnode a',x');
(p,ins,outs) \<in> set procs; x < length ins; x' < length outs\<rbrakk> \<Longrightarrow> P\<rbrakk>
\<Longrightarrow> P"
by -(erule sum_SDG_edge.cases,auto)
lemma SDG_edge_sum_SDG_edge:
"SDG_edge n Vopt popt n' \<Longrightarrow> sum_SDG_edge n Vopt popt False n'"
by(induct rule:SDG_edge.induct,auto intro:sum_SDG_edge.intros)
lemma sum_SDG_edge_SDG_edge:
"sum_SDG_edge n Vopt popt False n' \<Longrightarrow> SDG_edge n Vopt popt n'"
by(induct n Vopt popt x\<equiv>"False" n' rule:sum_SDG_edge.induct,
auto intro:SDG_edge.intros)
lemma sum_SDG_edge_valid_SDG_node:
assumes "sum_SDG_edge n Vopt popt b n'"
shows "valid_SDG_node n" and "valid_SDG_node n'"
proof -
have "valid_SDG_node n \<and> valid_SDG_node n'"
proof(cases b)
case True
with `sum_SDG_edge n Vopt popt b n'` show ?thesis
proof(induct rule:sum_SDG_edge.induct)
case (sum_SDG_call_summary_edge a Q r p f a' n n')
from `valid_edge a` `n = CFG_node (sourcenode a)`
have "valid_SDG_node n" by fastforce
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
with `n' = CFG_node (targetnode a')` have "valid_SDG_node n'" by fastforce
with `valid_SDG_node n` show ?case by simp
next
case (sum_SDG_param_summary_edge a Q r p fs a' x ns x' n n' ins outs)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `n = Actual_in (sourcenode a,x)`
`(p,ins,outs) \<in> set procs` `x < length ins`
have "valid_SDG_node n" by fastforce
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `valid_edge a` `a' \<in> get_return_edges a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" by(fastforce dest!:call_return_edges)
with `valid_edge a'` `n' = Actual_out (targetnode a',x')`
`(p,ins,outs) \<in> set procs` `x' < length outs`
have "valid_SDG_node n'" by fastforce
with `valid_SDG_node n` show ?case by simp
qed simp_all
next
case False
with `sum_SDG_edge n Vopt popt b n'` have "SDG_edge n Vopt popt n'"
by(fastforce intro:sum_SDG_edge_SDG_edge)
thus ?thesis by(fastforce intro:SDG_edge_valid_SDG_node)
qed
thus "valid_SDG_node n" and "valid_SDG_node n'" by simp_all
qed
lemma Exit_no_sum_SDG_edge_source:
assumes "sum_SDG_edge (CFG_node (_Exit_)) Vopt popt b n'" shows "False"
proof(cases b)
case True
with `sum_SDG_edge (CFG_node (_Exit_)) Vopt popt b n'` show ?thesis
proof(induct "CFG_node (_Exit_)" Vopt popt b n' rule:sum_SDG_edge.induct)
case (sum_SDG_call_summary_edge a Q r p f a' n')
from `CFG_node (_Exit_) = CFG_node (sourcenode a)`
have "sourcenode a = (_Exit_)" by simp
with `valid_edge a` show ?case by(rule Exit_source)
next
case (sum_SDG_param_summary_edge a Q r p f a' x ns x' n' ins outs)
thus ?case by simp
qed simp_all
next
case False
with `sum_SDG_edge (CFG_node (_Exit_)) Vopt popt b n'`
have "SDG_edge (CFG_node (_Exit_)) Vopt popt n'"
by(fastforce intro:sum_SDG_edge_SDG_edge)
thus ?thesis by(fastforce intro:Exit_no_SDG_edge_source)
qed
lemma Exit_no_sum_SDG_edge_target:
"sum_SDG_edge n Vopt popt b (CFG_node (_Exit_)) \<Longrightarrow> False"
proof(induct "CFG_node (_Exit_)" rule:sum_SDG_edge.induct)
case (sum_SDG_cdep_edge n m m')
from `m controls m'` `CFG_node (_Exit_) = CFG_node m'`
have "m controls (_Exit_)" by simp
hence False by(fastforce dest:Exit_not_control_dependent)
thus ?case by simp
next
case (sum_SDG_proc_entry_exit_cdep a Q r p f n a')
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
moreover
from `CFG_node (_Exit_) = CFG_node (sourcenode a')`
have "sourcenode a' = (_Exit_)" by simp
ultimately have False by(rule Exit_source)
thus ?case by simp
next
case (sum_SDG_ddep_edge n V) thus ?case
by(fastforce elim:SDG_Use.cases simp:data_dependence_def)
next
case (sum_SDG_call_edge a Q r p fs n)
from `CFG_node (_Exit_) = CFG_node (targetnode a)`
have "targetnode a = (_Exit_)" by simp
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have "get_proc (_Exit_) = p"
by(fastforce intro:get_proc_call)
hence "p = Main" by(simp add:get_proc_Exit)
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` have False
by(fastforce intro:Main_no_call_target)
thus ?case by simp
next
case (sum_SDG_return_edge a Q p f n)
from `CFG_node (_Exit_) = CFG_node (targetnode a)`
have "targetnode a = (_Exit_)" by simp
with `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` have False by(rule Exit_no_return_target)
thus ?case by simp
next
case (sum_SDG_call_summary_edge a Q r p fs a' n)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `a' \<in> get_return_edges a`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" by(fastforce dest!:call_return_edges)
from `CFG_node (_Exit_) = CFG_node (targetnode a')`
have "targetnode a' = (_Exit_)" by simp
with `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'` have False by(rule Exit_no_return_target)
thus ?case by simp
qed simp+
lemma sum_SDG_summary_edge_matched:
assumes "n s-p\<rightarrow>\<^bsub>sum\<^esub> n'"
obtains ns where "matched n ns n'" and "n \<in> set ns"
and "get_proc (parent_node(last ns)) = p"
proof(atomize_elim)
from `n s-p\<rightarrow>\<^bsub>sum\<^esub> n'`
show "\<exists>ns. matched n ns n' \<and> n \<in> set ns \<and> get_proc (parent_node(last ns)) = p"
proof(induct n "None::'var option" "Some(p,True)" "True" n'
rule:sum_SDG_edge.induct)
case (sum_SDG_call_summary_edge a Q r fs a' n n')
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `n = CFG_node (sourcenode a)`
have "n -p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a)" by(fastforce intro:SDG_call_edge)
hence "valid_SDG_node n" by(rule SDG_edge_valid_SDG_node)
hence "matched n [] n" by(rule matched_Nil)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `a' \<in> get_return_edges a`
have matched:"matched (CFG_node (targetnode a)) [CFG_node (targetnode a)]
(CFG_node (sourcenode a'))" by(rule intra_proc_matched)
from `valid_edge a` `a' \<in> get_return_edges a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" by(fastforce dest!:call_return_edges)
with `valid_edge a'` have "get_proc (sourcenode a') = p" by(rule get_proc_return)
from `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'` `n' = CFG_node (targetnode a')`
have "CFG_node (sourcenode a') -p\<rightarrow>\<^bsub>ret\<^esub> n'" by(fastforce intro:SDG_return_edge)
from `matched n [] n` `n -p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a)` matched
`CFG_node (sourcenode a') -p\<rightarrow>\<^bsub>ret\<^esub> n'` `a' \<in> get_return_edges a`
`n = CFG_node (sourcenode a)` `n' = CFG_node (targetnode a')` `valid_edge a`
have "matched n ([]@n#[CFG_node (targetnode a)]@[CFG_node (sourcenode a')]) n'"
by(fastforce intro:matched_bracket_call)
with `get_proc (sourcenode a') = p` show ?case by auto
next
case (sum_SDG_param_summary_edge a Q r fs a' x ns x' n n' ins outs)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `(p,ins,outs) \<in> set procs`
`x < length ins` `n = Actual_in (sourcenode a,x)`
have "n -p:ins!x\<rightarrow>\<^bsub>in\<^esub> Formal_in (targetnode a,x)"
by(fastforce intro:SDG_param_in_edge)
hence "valid_SDG_node n" by(rule SDG_edge_valid_SDG_node)
hence "matched n [] n" by(rule matched_Nil)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `valid_edge a` `a' \<in> get_return_edges a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" by(fastforce dest!:call_return_edges)
with `valid_edge a'` have "get_proc (sourcenode a') = p" by(rule get_proc_return)
from `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'` `(p,ins,outs) \<in> set procs`
`x' < length outs` `n' = Actual_out (targetnode a',x')`
have "Formal_out (sourcenode a',x') -p:outs!x'\<rightarrow>\<^bsub>out\<^esub> n'"
by(fastforce intro:SDG_param_out_edge)
from `matched n [] n` `n -p:ins!x\<rightarrow>\<^bsub>in\<^esub> Formal_in (targetnode a,x)`
`matched (Formal_in (targetnode a,x)) ns (Formal_out (sourcenode a',x'))`
`Formal_out (sourcenode a',x') -p:outs!x'\<rightarrow>\<^bsub>out\<^esub> n'`
`a' \<in> get_return_edges a` `n = Actual_in (sourcenode a,x)`
`n' = Actual_out (targetnode a',x')` `valid_edge a`
have "matched n ([]@n#ns@[Formal_out (sourcenode a',x')]) n'"
by(fastforce intro:matched_bracket_param)
with `get_proc (sourcenode a') = p` show ?case by auto
qed simp_all
qed
lemma return_edge_determines_call_and_sum_edge:
assumes "valid_edge a" and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
obtains a' Q' r' fs' where "a \<in> get_return_edges a'" and "valid_edge a'"
and "kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'"
and "CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)"
proof(atomize_elim)
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f`
have "CFG_node (sourcenode a) s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (targetnode a)"
by(fastforce intro:sum_SDG_return_edge)
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f`
obtain a' Q' r' fs' where "valid_edge a'" and "kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'"
and "a \<in> get_return_edges a'" by(blast dest:return_needs_call)
hence "CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a')"
by(fastforce intro:sum_SDG_call_edge)
from `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'` `valid_edge a` `a \<in> get_return_edges a'`
have "CFG_node (targetnode a') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node (sourcenode a)"
by(fastforce intro!:SDG_proc_entry_exit_cdep)
hence "valid_SDG_node (CFG_node (targetnode a'))"
by(rule SDG_edge_valid_SDG_node)
with `CFG_node (targetnode a') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node (sourcenode a)`
have "CFG_node (targetnode a') i-[]@[CFG_node (targetnode a')]\<rightarrow>\<^sub>d*
CFG_node (sourcenode a)"
by(fastforce intro:iSp_Append_cdep iSp_Nil)
from `valid_SDG_node (CFG_node (targetnode a'))`
have "matched (CFG_node (targetnode a')) [] (CFG_node (targetnode a'))"
by(rule matched_Nil)
with `CFG_node (targetnode a') i-[]@[CFG_node (targetnode a')]\<rightarrow>\<^sub>d*
CFG_node (sourcenode a)`
have "matched (CFG_node (targetnode a')) ([]@[CFG_node (targetnode a')])
(CFG_node (sourcenode a))"
by(fastforce intro:matched_Append_intra_SDG_path)
with `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'` `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f`
`a \<in> get_return_edges a'`
have "CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)"
by(fastforce intro!:sum_SDG_call_summary_edge)
with `a \<in> get_return_edges a'` `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
show "\<exists>a' Q' r' fs'. a \<in> get_return_edges a' \<and> valid_edge a' \<and>
kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs' \<and> CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)"
by fastforce
qed
subsection {* Paths consisting of intraprocedural and summary edges in the SDG *}
inductive intra_sum_SDG_path ::
"'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ is-_\<rightarrow>\<^sub>d* _" [51,0,0] 80)
where isSp_Nil:
"valid_SDG_node n \<Longrightarrow> n is-[]\<rightarrow>\<^sub>d* n"
| isSp_Append_cdep:
"\<lbrakk>n is-ns\<rightarrow>\<^sub>d* n''; n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'\<rbrakk> \<Longrightarrow> n is-ns@[n'']\<rightarrow>\<^sub>d* n'"
| isSp_Append_ddep:
"\<lbrakk>n is-ns\<rightarrow>\<^sub>d* n''; n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'; n'' \<noteq> n'\<rbrakk> \<Longrightarrow> n is-ns@[n'']\<rightarrow>\<^sub>d* n'"
| isSp_Append_sum:
"\<lbrakk>n is-ns\<rightarrow>\<^sub>d* n''; n'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'\<rbrakk> \<Longrightarrow> n is-ns@[n'']\<rightarrow>\<^sub>d* n'"
lemma is_SDG_path_Append:
"\<lbrakk>n'' is-ns'\<rightarrow>\<^sub>d* n'; n is-ns\<rightarrow>\<^sub>d* n''\<rbrakk> \<Longrightarrow> n is-ns@ns'\<rightarrow>\<^sub>d* n'"
by(induct rule:intra_sum_SDG_path.induct,
auto intro:intra_sum_SDG_path.intros simp:append_assoc[THEN sym]
simp del:append_assoc)
lemma is_SDG_path_valid_SDG_node:
assumes "n is-ns\<rightarrow>\<^sub>d* n'" shows "valid_SDG_node n" and "valid_SDG_node n'"
using `n is-ns\<rightarrow>\<^sub>d* n'`
by(induct rule:intra_sum_SDG_path.induct,
auto intro:sum_SDG_edge_valid_SDG_node valid_SDG_CFG_node)
lemma intra_SDG_path_is_SDG_path:
"n i-ns\<rightarrow>\<^sub>d* n' \<Longrightarrow> n is-ns\<rightarrow>\<^sub>d* n'"
by(induct rule:intra_SDG_path.induct,
auto intro:intra_sum_SDG_path.intros SDG_edge_sum_SDG_edge)
lemma is_SDG_path_hd:"\<lbrakk>n is-ns\<rightarrow>\<^sub>d* n'; ns \<noteq> []\<rbrakk> \<Longrightarrow> hd ns = n"
apply(induct rule:intra_sum_SDG_path.induct) apply clarsimp
by(case_tac ns,auto elim:intra_sum_SDG_path.cases)+
lemma intra_sum_SDG_path_rev_induct [consumes 1, case_names "isSp_Nil"
"isSp_Cons_cdep" "isSp_Cons_ddep" "isSp_Cons_sum"]:
assumes "n is-ns\<rightarrow>\<^sub>d* n'"
and refl:"\<And>n. valid_SDG_node n \<Longrightarrow> P n [] n"
and step_cdep:"\<And>n ns n' n''. \<lbrakk>n s\<longrightarrow>\<^bsub>cd\<^esub> n''; n'' is-ns\<rightarrow>\<^sub>d* n'; P n'' ns n'\<rbrakk>
\<Longrightarrow> P n (n#ns) n'"
and step_ddep:"\<And>n ns n' V n''. \<lbrakk>n s-V\<rightarrow>\<^bsub>dd\<^esub> n''; n \<noteq> n''; n'' is-ns\<rightarrow>\<^sub>d* n';
P n'' ns n'\<rbrakk> \<Longrightarrow> P n (n#ns) n'"
and step_sum:"\<And>n ns n' p n''. \<lbrakk>n s-p\<rightarrow>\<^bsub>sum\<^esub> n''; n'' is-ns\<rightarrow>\<^sub>d* n'; P n'' ns n'\<rbrakk>
\<Longrightarrow> P n (n#ns) n'"
shows "P n ns n'"
using `n is-ns\<rightarrow>\<^sub>d* n'`
proof(induct ns arbitrary:n)
case Nil thus ?case by(fastforce elim:intra_sum_SDG_path.cases intro:refl)
next
case (Cons nx nsx)
note IH = `\<And>n. n is-nsx\<rightarrow>\<^sub>d* n' \<Longrightarrow> P n nsx n'`
from `n is-nx#nsx\<rightarrow>\<^sub>d* n'` have [simp]:"n = nx"
by(fastforce dest:is_SDG_path_hd)
from `n is-nx#nsx\<rightarrow>\<^sub>d* n'` have "((\<exists>n''. n s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<and> n'' is-nsx\<rightarrow>\<^sub>d* n') \<or>
(\<exists>n'' V. n s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> n \<noteq> n'' \<and> n'' is-nsx\<rightarrow>\<^sub>d* n')) \<or>
(\<exists>n'' p. n s-p\<rightarrow>\<^bsub>sum\<^esub> n'' \<and> n'' is-nsx\<rightarrow>\<^sub>d* n')"
proof(induct nsx arbitrary:n' rule:rev_induct)
case Nil
from `n is-[nx]\<rightarrow>\<^sub>d* n'` have "n is-[]\<rightarrow>\<^sub>d* nx"
and disj:"nx s\<longrightarrow>\<^bsub>cd\<^esub> n' \<or> (\<exists>V. nx s-V\<rightarrow>\<^bsub>dd\<^esub> n' \<and> nx \<noteq> n') \<or> (\<exists>p. nx s-p\<rightarrow>\<^bsub>sum\<^esub> n')"
by(induct n ns\<equiv>"[nx]" n' rule:intra_sum_SDG_path.induct,auto)
from `n is-[]\<rightarrow>\<^sub>d* nx` have [simp]:"n = nx"
by(fastforce elim:intra_sum_SDG_path.cases)
from disj have "valid_SDG_node n'" by(fastforce intro:sum_SDG_edge_valid_SDG_node)
hence "n' is-[]\<rightarrow>\<^sub>d* n'" by(rule isSp_Nil)
with disj show ?case by fastforce
next
case (snoc x xs)
note `\<And>n'. n is-nx # xs\<rightarrow>\<^sub>d* n' \<Longrightarrow>
((\<exists>n''. n s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n') \<or>
(\<exists>n'' V. n s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> n \<noteq> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')) \<or>
(\<exists>n'' p. n s-p\<rightarrow>\<^bsub>sum\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')`
with `n is-nx#xs@[x]\<rightarrow>\<^sub>d* n'` show ?case
proof(induct n "nx#xs@[x]" n' rule:intra_sum_SDG_path.induct)
case (isSp_Append_cdep m ms m'' n')
note IH = `\<And>n'. m is-nx # xs\<rightarrow>\<^sub>d* n' \<Longrightarrow>
((\<exists>n''. m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n') \<or>
(\<exists>n'' V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')) \<or>
(\<exists>n'' p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')`
from `ms @ [m''] = nx#xs@[x]` have [simp]:"ms = nx#xs"
and [simp]:"m'' = x" by simp_all
from `m is-ms\<rightarrow>\<^sub>d* m''` have "m is-nx#xs\<rightarrow>\<^sub>d* m''" by simp
from IH[OF this] obtain n'' where "n'' is-xs\<rightarrow>\<^sub>d* m''"
and "(m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<or> (\<exists>V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'')) \<or> (\<exists>p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'')"
by fastforce
from `n'' is-xs\<rightarrow>\<^sub>d* m''` `m'' s\<longrightarrow>\<^bsub>cd\<^esub> n'`
have "n'' is-xs@[m'']\<rightarrow>\<^sub>d* n'" by(rule intra_sum_SDG_path.intros)
with `(m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<or> (\<exists>V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'')) \<or> (\<exists>p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'')`
show ?case by fastforce
next
case (isSp_Append_ddep m ms m'' V n')
note IH = `\<And>n'. m is-nx # xs\<rightarrow>\<^sub>d* n' \<Longrightarrow>
((\<exists>n''. m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n') \<or>
(\<exists>n'' V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')) \<or>
(\<exists>n'' p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')`
from `ms @ [m''] = nx#xs@[x]` have [simp]:"ms = nx#xs"
and [simp]:"m'' = x" by simp_all
from `m is-ms\<rightarrow>\<^sub>d* m''` have "m is-nx#xs\<rightarrow>\<^sub>d* m''" by simp
from IH[OF this] obtain n'' where "n'' is-xs\<rightarrow>\<^sub>d* m''"
and "(m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<or> (\<exists>V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'')) \<or> (\<exists>p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'')"
by fastforce
from `n'' is-xs\<rightarrow>\<^sub>d* m''` `m'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'` `m'' \<noteq> n'`
have "n'' is-xs@[m'']\<rightarrow>\<^sub>d* n'" by(rule intra_sum_SDG_path.intros)
with `(m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<or> (\<exists>V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'')) \<or> (\<exists>p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'')`
show ?case by fastforce
next
case (isSp_Append_sum m ms m'' p n')
note IH = `\<And>n'. m is-nx # xs\<rightarrow>\<^sub>d* n' \<Longrightarrow>
((\<exists>n''. m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n') \<or>
(\<exists>n'' V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')) \<or>
(\<exists>n'' p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'' \<and> n'' is-xs\<rightarrow>\<^sub>d* n')`
from `ms @ [m''] = nx#xs@[x]` have [simp]:"ms = nx#xs"
and [simp]:"m'' = x" by simp_all
from `m is-ms\<rightarrow>\<^sub>d* m''` have "m is-nx#xs\<rightarrow>\<^sub>d* m''" by simp
from IH[OF this] obtain n'' where "n'' is-xs\<rightarrow>\<^sub>d* m''"
and "(m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<or> (\<exists>V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'')) \<or> (\<exists>p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'')"
by fastforce
from `n'' is-xs\<rightarrow>\<^sub>d* m''` `m'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'`
have "n'' is-xs@[m'']\<rightarrow>\<^sub>d* n'" by(rule intra_sum_SDG_path.intros)
with `(m s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<or> (\<exists>V. m s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> m \<noteq> n'')) \<or> (\<exists>p. m s-p\<rightarrow>\<^bsub>sum\<^esub> n'')`
show ?case by fastforce
qed
qed
thus ?case apply -
proof(erule disjE)+
assume "\<exists>n''. n s\<longrightarrow>\<^bsub>cd\<^esub> n'' \<and> n'' is-nsx\<rightarrow>\<^sub>d* n'"
then obtain n'' where "n s\<longrightarrow>\<^bsub>cd\<^esub> n''" and "n'' is-nsx\<rightarrow>\<^sub>d* n'" by blast
from IH[OF `n'' is-nsx\<rightarrow>\<^sub>d* n'`] have "P n'' nsx n'" .
from step_cdep[OF `n s\<longrightarrow>\<^bsub>cd\<^esub> n''` `n'' is-nsx\<rightarrow>\<^sub>d* n'` this] show ?thesis by simp
next
assume "\<exists>n'' V. n s-V\<rightarrow>\<^bsub>dd\<^esub> n'' \<and> n \<noteq> n'' \<and> n'' is-nsx\<rightarrow>\<^sub>d* n'"
then obtain n'' V where "n s-V\<rightarrow>\<^bsub>dd\<^esub> n''" and "n \<noteq> n''" and "n'' is-nsx\<rightarrow>\<^sub>d* n'"
by blast
from IH[OF `n'' is-nsx\<rightarrow>\<^sub>d* n'`] have "P n'' nsx n'" .
from step_ddep[OF `n s-V\<rightarrow>\<^bsub>dd\<^esub> n''` `n \<noteq> n''` `n'' is-nsx\<rightarrow>\<^sub>d* n'` this]
show ?thesis by simp
next
assume "\<exists>n'' p. n s-p\<rightarrow>\<^bsub>sum\<^esub> n'' \<and> n'' is-nsx\<rightarrow>\<^sub>d* n'"
then obtain n'' p where "n s-p\<rightarrow>\<^bsub>sum\<^esub> n''" and "n'' is-nsx\<rightarrow>\<^sub>d* n'" by blast
from IH[OF `n'' is-nsx\<rightarrow>\<^sub>d* n'`] have "P n'' nsx n'" .
from step_sum[OF `n s-p\<rightarrow>\<^bsub>sum\<^esub> n''` `n'' is-nsx\<rightarrow>\<^sub>d* n'` this] show ?thesis by simp
qed
qed
lemma is_SDG_path_CFG_path:
assumes "n is-ns\<rightarrow>\<^sub>d* n'"
obtains as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(atomize_elim)
from `n is-ns\<rightarrow>\<^sub>d* n'`
show "\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(induct rule:intra_sum_SDG_path.induct)
case (isSp_Nil n)
from `valid_SDG_node n` have "valid_node (parent_node n)"
by(rule valid_SDG_CFG_node)
hence "parent_node n -[]\<rightarrow>* parent_node n" by(rule empty_path)
thus ?case by(auto simp:intra_path_def)
next
case (isSp_Append_cdep n ns n'' n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "n'' \<longrightarrow>\<^bsub>cd\<^esub> n'" by(rule sum_SDG_edge_SDG_edge)
thus ?case
proof(rule cdep_edge_cases)
assume "parent_node n'' controls parent_node n'"
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'" and "as' \<noteq> []"
by(erule control_dependence_path)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?thesis by blast
next
fix a Q r p fs a'
assume "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "a' \<in> get_return_edges a"
and "parent_node n'' = targetnode a" and "parent_node n' = sourcenode a'"
then obtain a'' where "valid_edge a''" and "sourcenode a'' = targetnode a"
and "targetnode a'' = sourcenode a'" and "kind a'' = (\<lambda>cf. False)\<^sub>\<surd>"
by(auto dest:intra_proc_additional_edge)
hence "targetnode a -[a'']\<rightarrow>\<^sub>\<iota>* sourcenode a'"
by(fastforce dest:path_edge simp:intra_path_def intra_kind_def)
with `parent_node n'' = targetnode a` `parent_node n' = sourcenode a'`
have "\<exists>as'. parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n' \<and> as' \<noteq> []" by fastforce
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'" and "as' \<noteq> []"
by blast
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?thesis by blast
next
fix m assume "n'' = CFG_node m" and "m = parent_node n'"
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''` show ?thesis by fastforce
qed
next
case (isSp_Append_ddep n ns n'' V n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'` have "n'' influences V in n'"
by(fastforce elim:sum_SDG_edge.cases)
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(auto simp:data_dependence_def)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?case by blast
next
case (isSp_Append_sum n ns n'' p n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'` have "\<exists>as'. parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(rule sum_edge_cases)
fix a Q fs a'
assume "valid_edge a" and "a' \<in> get_return_edges a"
and "n'' = CFG_node (sourcenode a)" and "n' = CFG_node (targetnode a')"
from `valid_edge a` `a' \<in> get_return_edges a`
obtain a'' where "sourcenode a -[a'']\<rightarrow>\<^sub>\<iota>* targetnode a'"
apply - apply(drule call_return_node_edge)
apply(auto simp:intra_path_def) apply(drule path_edge)
by(auto simp:intra_kind_def)
with `n'' = CFG_node (sourcenode a)` `n' = CFG_node (targetnode a')`
show ?thesis by simp blast
next
fix a Q p fs a' ns x x' ins outs
assume "valid_edge a" and "a' \<in> get_return_edges a"
and "n'' = Actual_in (sourcenode a, x)"
and "n' = Actual_out (targetnode a', x')"
from `valid_edge a` `a' \<in> get_return_edges a`
obtain a'' where "sourcenode a -[a'']\<rightarrow>\<^sub>\<iota>* targetnode a'"
apply - apply(drule call_return_node_edge)
apply(auto simp:intra_path_def) apply(drule path_edge)
by(auto simp:intra_kind_def)
with `n'' = Actual_in (sourcenode a, x)` `n' = Actual_out (targetnode a', x')`
show ?thesis by simp blast
qed
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by blast
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?case by blast
qed
qed
lemma matched_is_SDG_path:
assumes "matched n ns n'" obtains ns' where "n is-ns'\<rightarrow>\<^sub>d* n'"
proof(atomize_elim)
from `matched n ns n'` show "\<exists>ns'. n is-ns'\<rightarrow>\<^sub>d* n'"
proof(induct rule:matched.induct)
case matched_Nil thus ?case by(fastforce intro:isSp_Nil)
next
case matched_Append_intra_SDG_path thus ?case
by(fastforce intro:is_SDG_path_Append intra_SDG_path_is_SDG_path)
next
case (matched_bracket_call n\<^sub>0 ns n\<^sub>1 p n\<^sub>2 ns' n\<^sub>3 n\<^sub>4 V a a')
from `\<exists>ns'. n\<^sub>0 is-ns'\<rightarrow>\<^sub>d* n\<^sub>1` obtain nsx where "n\<^sub>0 is-nsx\<rightarrow>\<^sub>d* n\<^sub>1" by blast
from `n\<^sub>1 -p\<rightarrow>\<^bsub>call\<^esub> n\<^sub>2` `sourcenode a = parent_node n\<^sub>1` `targetnode a = parent_node n\<^sub>2`
have "n\<^sub>1 = CFG_node (sourcenode a)" and "n\<^sub>2 = CFG_node (targetnode a)"
by(auto elim:SDG_edge.cases)
from `valid_edge a` `a' \<in> get_return_edges a`
obtain Q r p' fs where "kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs"
by(fastforce dest!:only_call_get_return_edges)
with `n\<^sub>1 -p\<rightarrow>\<^bsub>call\<^esub> n\<^sub>2` `valid_edge a`
`n\<^sub>1 = CFG_node (sourcenode a)` `n\<^sub>2 = CFG_node (targetnode a)`
have [simp]:"p' = p" by -(erule SDG_edge.cases,(fastforce dest:edge_det)+)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `n\<^sub>3 -p\<rightarrow>\<^bsub>ret\<^esub> n\<^sub>4 \<or> n\<^sub>3 -p:V\<rightarrow>\<^bsub>out\<^esub> n\<^sub>4` show ?case
proof
assume "n\<^sub>3 -p\<rightarrow>\<^bsub>ret\<^esub> n\<^sub>4"
then obtain ax Q' f' where "valid_edge ax" and "kind ax = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'"
and "n\<^sub>3 = CFG_node (sourcenode ax)" and "n\<^sub>4 = CFG_node (targetnode ax)"
by(fastforce elim:SDG_edge.cases)
with `sourcenode a' = parent_node n\<^sub>3` `targetnode a' = parent_node n\<^sub>4`
`valid_edge a'` have [simp]:"ax = a'" by(fastforce dest:edge_det)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` `valid_edge ax` `kind ax = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'`
`a' \<in> get_return_edges a` `matched n\<^sub>2 ns' n\<^sub>3`
`n\<^sub>1 = CFG_node (sourcenode a)` `n\<^sub>2 = CFG_node (targetnode a)`
`n\<^sub>3 = CFG_node (sourcenode ax)` `n\<^sub>4 = CFG_node (targetnode ax)`
have "n\<^sub>1 s-p\<rightarrow>\<^bsub>sum\<^esub> n\<^sub>4"
by(fastforce intro!:sum_SDG_call_summary_edge[of a _ _ _ _ ax])
with `n\<^sub>0 is-nsx\<rightarrow>\<^sub>d* n\<^sub>1` have "n\<^sub>0 is-nsx@[n\<^sub>1]\<rightarrow>\<^sub>d* n\<^sub>4" by(rule isSp_Append_sum)
thus ?case by blast
next
assume "n\<^sub>3 -p:V\<rightarrow>\<^bsub>out\<^esub> n\<^sub>4"
then obtain ax Q' f' x where "valid_edge ax" and "kind ax = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'"
and "n\<^sub>3 = Formal_out (sourcenode ax,x)"
and "n\<^sub>4 = Actual_out (targetnode ax,x)"
by(fastforce elim:SDG_edge.cases)
with `sourcenode a' = parent_node n\<^sub>3` `targetnode a' = parent_node n\<^sub>4`
`valid_edge a'` have [simp]:"ax = a'" by(fastforce dest:edge_det)
from `valid_edge ax` `kind ax = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'` `n\<^sub>3 = Formal_out (sourcenode ax,x)`
`n\<^sub>4 = Actual_out (targetnode ax,x)`
have "CFG_node (sourcenode a') -p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (targetnode a')"
by(fastforce intro:SDG_return_edge)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` `valid_edge a'`
`a' \<in> get_return_edges a` `n\<^sub>4 = Actual_out (targetnode ax,x)`
have "CFG_node (targetnode a) \<longrightarrow>\<^bsub>cd\<^esub> CFG_node (sourcenode a')"
by(fastforce intro!:SDG_proc_entry_exit_cdep)
with `n\<^sub>2 = CFG_node (targetnode a)`
have "matched n\<^sub>2 ([]@([]@[n\<^sub>2])) (CFG_node (sourcenode a'))"
by(fastforce intro:matched.intros intra_SDG_path.intros
SDG_edge_valid_SDG_node)
with `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` `valid_edge a'` `kind ax = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'`
`a' \<in> get_return_edges a` `n\<^sub>1 = CFG_node (sourcenode a)`
`n\<^sub>2 = CFG_node (targetnode a)` `n\<^sub>4 = Actual_out (targetnode ax,x)`
have "n\<^sub>1 s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a')"
by(fastforce intro!:sum_SDG_call_summary_edge[of a _ _ _ _ a'])
with `n\<^sub>0 is-nsx\<rightarrow>\<^sub>d* n\<^sub>1` have "n\<^sub>0 is-nsx@[n\<^sub>1]\<rightarrow>\<^sub>d* CFG_node (targetnode a')"
by(rule isSp_Append_sum)
from `n\<^sub>4 = Actual_out (targetnode ax,x)` `n\<^sub>3 -p:V\<rightarrow>\<^bsub>out\<^esub> n\<^sub>4`
have "CFG_node (targetnode a') s\<longrightarrow>\<^bsub>cd\<^esub> n\<^sub>4"
by(fastforce intro:sum_SDG_parent_cdep_edge SDG_edge_valid_SDG_node)
with `n\<^sub>0 is-nsx@[n\<^sub>1]\<rightarrow>\<^sub>d* CFG_node (targetnode a')`
have "n\<^sub>0 is-(nsx@[n\<^sub>1])@[CFG_node (targetnode a')]\<rightarrow>\<^sub>d* n\<^sub>4"
by(rule isSp_Append_cdep)
thus ?case by blast
qed
next
case (matched_bracket_param n\<^sub>0 ns n\<^sub>1 p V n\<^sub>2 ns' n\<^sub>3 V' n\<^sub>4 a a')
from `\<exists>ns'. n\<^sub>0 is-ns'\<rightarrow>\<^sub>d* n\<^sub>1` obtain nsx where "n\<^sub>0 is-nsx\<rightarrow>\<^sub>d* n\<^sub>1" by blast
from `n\<^sub>1 -p:V\<rightarrow>\<^bsub>in\<^esub> n\<^sub>2` `sourcenode a = parent_node n\<^sub>1`
`targetnode a = parent_node n\<^sub>2` obtain x ins outs
where "n\<^sub>1 = Actual_in (sourcenode a,x)" and "n\<^sub>2 = Formal_in (targetnode a,x)"
and "(p,ins,outs) \<in> set procs" and "V = ins!x" and "x < length ins"
by(fastforce elim:SDG_edge.cases)
from `valid_edge a` `a' \<in> get_return_edges a`
obtain Q r p' fs where "kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs"
by(fastforce dest!:only_call_get_return_edges)
with `n\<^sub>1 -p:V\<rightarrow>\<^bsub>in\<^esub> n\<^sub>2` `valid_edge a`
`n\<^sub>1 = Actual_in (sourcenode a,x)` `n\<^sub>2 = Formal_in (targetnode a,x)`
have [simp]:"p' = p" by -(erule SDG_edge.cases,(fastforce dest:edge_det)+)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `n\<^sub>3 -p:V'\<rightarrow>\<^bsub>out\<^esub> n\<^sub>4` obtain ax Q' f' x' ins' outs' where "valid_edge ax"
and "kind ax = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" and "n\<^sub>3 = Formal_out (sourcenode ax,x')"
and "n\<^sub>4 = Actual_out (targetnode ax,x')" and "(p,ins',outs') \<in> set procs"
and "V' = outs'!x'" and "x' < length outs'"
by(fastforce elim:SDG_edge.cases)
with `sourcenode a' = parent_node n\<^sub>3` `targetnode a' = parent_node n\<^sub>4`
`valid_edge a'` have [simp]:"ax = a'" by(fastforce dest:edge_det)
from unique_callers `(p,ins,outs) \<in> set procs` `(p,ins',outs') \<in> set procs`
have [simp]:"ins = ins'" "outs = outs'"
by(auto dest:distinct_fst_isin_same_fst)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p'\<^esub>fs` `valid_edge a'` `kind ax = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'`
`a' \<in> get_return_edges a` `matched n\<^sub>2 ns' n\<^sub>3` `n\<^sub>1 = Actual_in (sourcenode a,x)`
`n\<^sub>2 = Formal_in (targetnode a,x)` `n\<^sub>3 = Formal_out (sourcenode ax,x')`
`n\<^sub>4 = Actual_out (targetnode ax,x')` `(p,ins,outs) \<in> set procs`
`x < length ins` `x' < length outs'` `V = ins!x` `V' = outs'!x'`
have "n\<^sub>1 s-p\<rightarrow>\<^bsub>sum\<^esub> n\<^sub>4"
by(fastforce intro!:sum_SDG_param_summary_edge[of a _ _ _ _ a'])
with `n\<^sub>0 is-nsx\<rightarrow>\<^sub>d* n\<^sub>1` have "n\<^sub>0 is-nsx@[n\<^sub>1]\<rightarrow>\<^sub>d* n\<^sub>4" by(rule isSp_Append_sum)
thus ?case by blast
qed
qed
lemma is_SDG_path_matched:
assumes "n is-ns\<rightarrow>\<^sub>d* n'" obtains ns' where "matched n ns' n'" and "set ns \<subseteq> set ns'"
proof(atomize_elim)
from `n is-ns\<rightarrow>\<^sub>d* n'` show "\<exists>ns'. matched n ns' n' \<and> set ns \<subseteq> set ns'"
proof(induct rule:intra_sum_SDG_path.induct)
case (isSp_Nil n)
from `valid_SDG_node n` have "matched n [] n" by(rule matched_Nil)
thus ?case by fastforce
next
case (isSp_Append_cdep n ns n'' n')
from `\<exists>ns'. matched n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "matched n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "n'' i-[]@[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce intro:intra_SDG_path.intros sum_SDG_edge_valid_SDG_node
sum_SDG_edge_SDG_edge)
with `matched n ns' n''` have "matched n (ns'@[n'']) n'"
by(fastforce intro!:matched_Append_intra_SDG_path)
with `set ns \<subseteq> set ns'` show ?case by fastforce
next
case (isSp_Append_ddep n ns n'' V n')
from `\<exists>ns'. matched n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "matched n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'` `n'' \<noteq> n'` have "n'' i-[]@[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce intro:intra_SDG_path.intros sum_SDG_edge_valid_SDG_node
sum_SDG_edge_SDG_edge)
with `matched n ns' n''` have "matched n (ns'@[n'']) n'"
by(fastforce intro!:matched_Append_intra_SDG_path)
with `set ns \<subseteq> set ns'` show ?case by fastforce
next
case (isSp_Append_sum n ns n'' p n')
from `\<exists>ns'. matched n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "matched n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'` obtain ns'' where "matched n'' ns'' n'" and "n'' \<in> set ns''"
by -(erule sum_SDG_summary_edge_matched)
with `matched n ns' n''` have "matched n (ns'@ns'') n'" by -(rule matched_Append)
with `set ns \<subseteq> set ns'` `n'' \<in> set ns''` show ?case by fastforce
qed
qed
lemma is_SDG_path_intra_CFG_path:
assumes "n is-ns\<rightarrow>\<^sub>d* n'"
obtains as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(atomize_elim)
from `n is-ns\<rightarrow>\<^sub>d* n'`
show "\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
proof(induct rule:intra_sum_SDG_path.induct)
case (isSp_Nil n)
from `valid_SDG_node n` have "parent_node n -[]\<rightarrow>* parent_node n"
by(fastforce intro:empty_path valid_SDG_CFG_node)
thus ?case by(auto simp:intra_path_def)
next
case (isSp_Append_cdep n ns n'' n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "n'' \<longrightarrow>\<^bsub>cd\<^esub> n'" by(rule sum_SDG_edge_SDG_edge)
thus ?case
proof(rule cdep_edge_cases)
assume "parent_node n'' controls parent_node n'"
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'" and "as' \<noteq> []"
by(erule control_dependence_path)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?thesis by blast
next
fix a Q r p fs a'
assume "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" "a' \<in> get_return_edges a"
and "parent_node n'' = targetnode a" and "parent_node n' = sourcenode a'"
then obtain a'' where "valid_edge a''" and "sourcenode a'' = targetnode a"
and "targetnode a'' = sourcenode a'" and "kind a'' = (\<lambda>cf. False)\<^sub>\<surd>"
by(auto dest:intra_proc_additional_edge)
hence "targetnode a -[a'']\<rightarrow>\<^sub>\<iota>* sourcenode a'"
by(fastforce dest:path_edge simp:intra_path_def intra_kind_def)
with `parent_node n'' = targetnode a` `parent_node n' = sourcenode a'`
have "\<exists>as'. parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n' \<and> as' \<noteq> []" by fastforce
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'" and "as' \<noteq> []"
by blast
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?thesis by blast
next
fix m assume "n'' = CFG_node m" and "m = parent_node n'"
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''` show ?thesis by fastforce
qed
next
case (isSp_Append_ddep n ns n'' V n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'` have "n'' influences V in n'"
by(fastforce elim:sum_SDG_edge.cases)
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(auto simp:data_dependence_def)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'" by -(rule intra_path_Append)
thus ?case by blast
next
case (isSp_Append_sum n ns n'' p n')
from `\<exists>as. parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''" by blast
from `n'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'` obtain ns' where "matched n'' ns' n'"
by -(erule sum_SDG_summary_edge_matched)
then obtain as' where "parent_node n'' -as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(erule matched_intra_CFG_path)
with `parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''`
have "parent_node n -as@as'\<rightarrow>\<^sub>\<iota>* parent_node n'"
by(fastforce intro:path_Append simp:intra_path_def)
thus ?case by blast
qed
qed
text {* SDG paths without return edges *}
inductive intra_call_sum_SDG_path ::
"'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ ics-_\<rightarrow>\<^sub>d* _" [51,0,0] 80)
where icsSp_Nil:
"valid_SDG_node n \<Longrightarrow> n ics-[]\<rightarrow>\<^sub>d* n"
| icsSp_Append_cdep:
"\<lbrakk>n ics-ns\<rightarrow>\<^sub>d* n''; n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'\<rbrakk> \<Longrightarrow> n ics-ns@[n'']\<rightarrow>\<^sub>d* n'"
| icsSp_Append_ddep:
"\<lbrakk>n ics-ns\<rightarrow>\<^sub>d* n''; n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'; n'' \<noteq> n'\<rbrakk> \<Longrightarrow> n ics-ns@[n'']\<rightarrow>\<^sub>d* n'"
| icsSp_Append_sum:
"\<lbrakk>n ics-ns\<rightarrow>\<^sub>d* n''; n'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'\<rbrakk> \<Longrightarrow> n ics-ns@[n'']\<rightarrow>\<^sub>d* n'"
| icsSp_Append_call:
"\<lbrakk>n ics-ns\<rightarrow>\<^sub>d* n''; n'' s-p\<rightarrow>\<^bsub>call\<^esub> n'\<rbrakk> \<Longrightarrow> n ics-ns@[n'']\<rightarrow>\<^sub>d* n'"
| icsSp_Append_param_in:
"\<lbrakk>n ics-ns\<rightarrow>\<^sub>d* n''; n'' s-p:V\<rightarrow>\<^bsub>in\<^esub> n'\<rbrakk> \<Longrightarrow> n ics-ns@[n'']\<rightarrow>\<^sub>d* n'"
lemma ics_SDG_path_valid_SDG_node:
assumes "n ics-ns\<rightarrow>\<^sub>d* n'" shows "valid_SDG_node n" and "valid_SDG_node n'"
using `n ics-ns\<rightarrow>\<^sub>d* n'`
by(induct rule:intra_call_sum_SDG_path.induct,
auto intro:sum_SDG_edge_valid_SDG_node valid_SDG_CFG_node)
lemma ics_SDG_path_Append:
"\<lbrakk>n'' ics-ns'\<rightarrow>\<^sub>d* n'; n ics-ns\<rightarrow>\<^sub>d* n''\<rbrakk> \<Longrightarrow> n ics-ns@ns'\<rightarrow>\<^sub>d* n'"
by(induct rule:intra_call_sum_SDG_path.induct,
auto intro:intra_call_sum_SDG_path.intros simp:append_assoc[THEN sym]
simp del:append_assoc)
lemma is_SDG_path_ics_SDG_path:
"n is-ns\<rightarrow>\<^sub>d* n' \<Longrightarrow> n ics-ns\<rightarrow>\<^sub>d* n'"
by(induct rule:intra_sum_SDG_path.induct,auto intro:intra_call_sum_SDG_path.intros)
lemma cc_SDG_path_ics_SDG_path:
"n cc-ns\<rightarrow>\<^sub>d* n' \<Longrightarrow> n ics-ns\<rightarrow>\<^sub>d* n'"
by(induct rule:call_cdep_SDG_path.induct,
auto intro:intra_call_sum_SDG_path.intros SDG_edge_sum_SDG_edge)
lemma ics_SDG_path_split:
assumes "n ics-ns\<rightarrow>\<^sub>d* n'" and "n'' \<in> set ns"
obtains ns' ns'' where "ns = ns'@ns''" and "n ics-ns'\<rightarrow>\<^sub>d* n''"
and "n'' ics-ns''\<rightarrow>\<^sub>d* n'"
proof(atomize_elim)
from `n ics-ns\<rightarrow>\<^sub>d* n'` `n'' \<in> set ns`
show "\<exists>ns' ns''. ns = ns'@ns'' \<and> n ics-ns'\<rightarrow>\<^sub>d* n'' \<and> n'' ics-ns''\<rightarrow>\<^sub>d* n'"
proof(induct rule:intra_call_sum_SDG_path.induct)
case icsSp_Nil thus ?case by simp
next
case (icsSp_Append_cdep n ns nx n')
note IH = `n'' \<in> set ns \<Longrightarrow>
\<exists>ns' ns''. ns = ns' @ ns'' \<and> n ics-ns'\<rightarrow>\<^sub>d* n'' \<and> n'' ics-ns''\<rightarrow>\<^sub>d* nx`
from `n'' \<in> set (ns@[nx])` have "n'' \<in> set ns \<or> n'' = nx" by fastforce
thus ?case
proof
assume "n'' \<in> set ns"
from IH[OF this] obtain ns' ns'' where "ns = ns' @ ns''"
and "n ics-ns'\<rightarrow>\<^sub>d* n''" and "n'' ics-ns''\<rightarrow>\<^sub>d* nx" by blast
from `n'' ics-ns''\<rightarrow>\<^sub>d* nx` `nx s\<longrightarrow>\<^bsub>cd\<^esub> n'`
have "n'' ics-ns''@[nx]\<rightarrow>\<^sub>d* n'"
by(rule intra_call_sum_SDG_path.icsSp_Append_cdep)
with `ns = ns'@ns''` `n ics-ns'\<rightarrow>\<^sub>d* n''` show ?thesis by fastforce
next
assume "n'' = nx"
from `nx s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "nx ics-[]\<rightarrow>\<^sub>d* nx"
by(fastforce intro:icsSp_Nil SDG_edge_valid_SDG_node sum_SDG_edge_SDG_edge)
with `nx s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "nx ics-[]@[nx]\<rightarrow>\<^sub>d* n'"
by -(rule intra_call_sum_SDG_path.icsSp_Append_cdep)
with `n ics-ns\<rightarrow>\<^sub>d* nx` `n'' = nx` show ?thesis by fastforce
qed
next
case (icsSp_Append_ddep n ns nx V n')
note IH = `n'' \<in> set ns \<Longrightarrow>
\<exists>ns' ns''. ns = ns' @ ns'' \<and> n ics-ns'\<rightarrow>\<^sub>d* n'' \<and> n'' ics-ns''\<rightarrow>\<^sub>d* nx`
from `n'' \<in> set (ns@[nx])` have "n'' \<in> set ns \<or> n'' = nx" by fastforce
thus ?case
proof
assume "n'' \<in> set ns"
from IH[OF this] obtain ns' ns'' where "ns = ns' @ ns''"
and "n ics-ns'\<rightarrow>\<^sub>d* n''" and "n'' ics-ns''\<rightarrow>\<^sub>d* nx" by blast
from `n'' ics-ns''\<rightarrow>\<^sub>d* nx` `nx s-V\<rightarrow>\<^bsub>dd\<^esub> n'` `nx \<noteq> n'`
have "n'' ics-ns''@[nx]\<rightarrow>\<^sub>d* n'"
by(rule intra_call_sum_SDG_path.icsSp_Append_ddep)
with `ns = ns'@ns''` `n ics-ns'\<rightarrow>\<^sub>d* n''` show ?thesis by fastforce
next
assume "n'' = nx"
from `nx s-V\<rightarrow>\<^bsub>dd\<^esub> n'` have "nx ics-[]\<rightarrow>\<^sub>d* nx"
by(fastforce intro:icsSp_Nil SDG_edge_valid_SDG_node sum_SDG_edge_SDG_edge)
with `nx s-V\<rightarrow>\<^bsub>dd\<^esub> n'` `nx \<noteq> n'` have "nx ics-[]@[nx]\<rightarrow>\<^sub>d* n'"
by -(rule intra_call_sum_SDG_path.icsSp_Append_ddep)
with `n ics-ns\<rightarrow>\<^sub>d* nx` `n'' = nx` show ?thesis by fastforce
qed
next
case (icsSp_Append_sum n ns nx p n')
note IH = `n'' \<in> set ns \<Longrightarrow>
\<exists>ns' ns''. ns = ns' @ ns'' \<and> n ics-ns'\<rightarrow>\<^sub>d* n'' \<and> n'' ics-ns''\<rightarrow>\<^sub>d* nx`
from `n'' \<in> set (ns@[nx])` have "n'' \<in> set ns \<or> n'' = nx" by fastforce
thus ?case
proof
assume "n'' \<in> set ns"
from IH[OF this] obtain ns' ns'' where "ns = ns' @ ns''"
and "n ics-ns'\<rightarrow>\<^sub>d* n''" and "n'' ics-ns''\<rightarrow>\<^sub>d* nx" by blast
from `n'' ics-ns''\<rightarrow>\<^sub>d* nx` `nx s-p\<rightarrow>\<^bsub>sum\<^esub> n'`
have "n'' ics-ns''@[nx]\<rightarrow>\<^sub>d* n'"
by(rule intra_call_sum_SDG_path.icsSp_Append_sum)
with `ns = ns'@ns''` `n ics-ns'\<rightarrow>\<^sub>d* n''` show ?thesis by fastforce
next
assume "n'' = nx"
from `nx s-p\<rightarrow>\<^bsub>sum\<^esub> n'` have "valid_SDG_node nx"
by(fastforce elim:sum_SDG_edge.cases)
hence "nx ics-[]\<rightarrow>\<^sub>d* nx" by(fastforce intro:icsSp_Nil)
with `nx s-p\<rightarrow>\<^bsub>sum\<^esub> n'` have "nx ics-[]@[nx]\<rightarrow>\<^sub>d* n'"
by -(rule intra_call_sum_SDG_path.icsSp_Append_sum)
with `n ics-ns\<rightarrow>\<^sub>d* nx` `n'' = nx` show ?thesis by fastforce
qed
next
case (icsSp_Append_call n ns nx p n')
note IH = `n'' \<in> set ns \<Longrightarrow>
\<exists>ns' ns''. ns = ns' @ ns'' \<and> n ics-ns'\<rightarrow>\<^sub>d* n'' \<and> n'' ics-ns''\<rightarrow>\<^sub>d* nx`
from `n'' \<in> set (ns@[nx])` have "n'' \<in> set ns \<or> n'' = nx" by fastforce
thus ?case
proof
assume "n'' \<in> set ns"
from IH[OF this] obtain ns' ns'' where "ns = ns' @ ns''"
and "n ics-ns'\<rightarrow>\<^sub>d* n''" and "n'' ics-ns''\<rightarrow>\<^sub>d* nx" by blast
from `n'' ics-ns''\<rightarrow>\<^sub>d* nx` `nx s-p\<rightarrow>\<^bsub>call\<^esub> n'`
have "n'' ics-ns''@[nx]\<rightarrow>\<^sub>d* n'"
by(rule intra_call_sum_SDG_path.icsSp_Append_call)
with `ns = ns'@ns''` `n ics-ns'\<rightarrow>\<^sub>d* n''` show ?thesis by fastforce
next
assume "n'' = nx"
from `nx s-p\<rightarrow>\<^bsub>call\<^esub> n'` have "nx ics-[]\<rightarrow>\<^sub>d* nx"
by(fastforce intro:icsSp_Nil SDG_edge_valid_SDG_node sum_SDG_edge_SDG_edge)
with `nx s-p\<rightarrow>\<^bsub>call\<^esub> n'` have "nx ics-[]@[nx]\<rightarrow>\<^sub>d* n'"
by -(rule intra_call_sum_SDG_path.icsSp_Append_call)
with `n ics-ns\<rightarrow>\<^sub>d* nx` `n'' = nx` show ?thesis by fastforce
qed
next
case (icsSp_Append_param_in n ns nx p V n')
note IH = `n'' \<in> set ns \<Longrightarrow>
\<exists>ns' ns''. ns = ns' @ ns'' \<and> n ics-ns'\<rightarrow>\<^sub>d* n'' \<and> n'' ics-ns''\<rightarrow>\<^sub>d* nx`
from `n'' \<in> set (ns@[nx])` have "n'' \<in> set ns \<or> n'' = nx" by fastforce
thus ?case
proof
assume "n'' \<in> set ns"
from IH[OF this] obtain ns' ns'' where "ns = ns' @ ns''"
and "n ics-ns'\<rightarrow>\<^sub>d* n''" and "n'' ics-ns''\<rightarrow>\<^sub>d* nx" by blast
from `n'' ics-ns''\<rightarrow>\<^sub>d* nx` `nx s-p:V\<rightarrow>\<^bsub>in\<^esub> n'`
have "n'' ics-ns''@[nx]\<rightarrow>\<^sub>d* n'"
by(rule intra_call_sum_SDG_path.icsSp_Append_param_in)
with `ns = ns'@ns''` `n ics-ns'\<rightarrow>\<^sub>d* n''` show ?thesis by fastforce
next
assume "n'' = nx"
from `nx s-p:V\<rightarrow>\<^bsub>in\<^esub> n'` have "nx ics-[]\<rightarrow>\<^sub>d* nx"
by(fastforce intro:icsSp_Nil SDG_edge_valid_SDG_node sum_SDG_edge_SDG_edge)
with `nx s-p:V\<rightarrow>\<^bsub>in\<^esub> n'` have "nx ics-[]@[nx]\<rightarrow>\<^sub>d* n'"
by -(rule intra_call_sum_SDG_path.icsSp_Append_param_in)
with `n ics-ns\<rightarrow>\<^sub>d* nx` `n'' = nx` show ?thesis by fastforce
qed
qed
qed
lemma realizable_ics_SDG_path:
assumes "realizable n ns n'" obtains ns' where "n ics-ns'\<rightarrow>\<^sub>d* n'"
proof(atomize_elim)
from `realizable n ns n'` show "\<exists>ns'. n ics-ns'\<rightarrow>\<^sub>d* n'"
proof(induct rule:realizable.induct)
case (realizable_matched n ns n')
from `matched n ns n'` obtain ns' where "n is-ns'\<rightarrow>\<^sub>d* n'"
by(erule matched_is_SDG_path)
thus ?case by(fastforce intro:is_SDG_path_ics_SDG_path)
next
case (realizable_call n\<^sub>0 ns n\<^sub>1 p n\<^sub>2 V ns' n\<^sub>3)
from `\<exists>ns'. n\<^sub>0 ics-ns'\<rightarrow>\<^sub>d* n\<^sub>1` obtain nsx where "n\<^sub>0 ics-nsx\<rightarrow>\<^sub>d* n\<^sub>1" by blast
with `n\<^sub>1 -p\<rightarrow>\<^bsub>call\<^esub> n\<^sub>2 \<or> n\<^sub>1 -p:V\<rightarrow>\<^bsub>in\<^esub> n\<^sub>2` have "n\<^sub>0 ics-nsx@[n\<^sub>1]\<rightarrow>\<^sub>d* n\<^sub>2"
by(fastforce intro:SDG_edge_sum_SDG_edge icsSp_Append_call icsSp_Append_param_in)
from `matched n\<^sub>2 ns' n\<^sub>3` obtain nsx' where "n\<^sub>2 is-nsx'\<rightarrow>\<^sub>d* n\<^sub>3"
by(erule matched_is_SDG_path)
hence "n\<^sub>2 ics-nsx'\<rightarrow>\<^sub>d* n\<^sub>3" by(rule is_SDG_path_ics_SDG_path)
from `n\<^sub>2 ics-nsx'\<rightarrow>\<^sub>d* n\<^sub>3` `n\<^sub>0 ics-nsx@[n\<^sub>1]\<rightarrow>\<^sub>d* n\<^sub>2`
have "n\<^sub>0 ics-(nsx@[n\<^sub>1])@nsx'\<rightarrow>\<^sub>d* n\<^sub>3" by(rule ics_SDG_path_Append)
thus ?case by blast
qed
qed
lemma ics_SDG_path_realizable:
assumes "n ics-ns\<rightarrow>\<^sub>d* n'"
obtains ns' where "realizable n ns' n'" and "set ns \<subseteq> set ns'"
proof(atomize_elim)
from `n ics-ns\<rightarrow>\<^sub>d* n'` show "\<exists>ns'. realizable n ns' n' \<and> set ns \<subseteq> set ns'"
proof(induct rule:intra_call_sum_SDG_path.induct)
case (icsSp_Nil n)
hence "matched n [] n" by(rule matched_Nil)
thus ?case by(fastforce intro:realizable_matched)
next
case (icsSp_Append_cdep n ns n'' n')
from `\<exists>ns'. realizable n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "realizable n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "valid_SDG_node n''" by(rule sum_SDG_edge_valid_SDG_node)
hence "n'' i-[]\<rightarrow>\<^sub>d* n''" by(rule iSp_Nil)
with `n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "n'' i-[]@[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce elim:iSp_Append_cdep sum_SDG_edge_SDG_edge)
hence "matched n'' [n''] n'" by(fastforce intro:intra_SDG_path_matched)
with `realizable n ns' n''` have "realizable n (ns'@[n'']) n'"
by(rule realizable_Append_matched)
with `set ns \<subseteq> set ns'` show ?case by fastforce
next
case (icsSp_Append_ddep n ns n'' V n')
from `\<exists>ns'. realizable n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "realizable n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'` have "valid_SDG_node n''"
by(rule sum_SDG_edge_valid_SDG_node)
hence "n'' i-[]\<rightarrow>\<^sub>d* n''" by(rule iSp_Nil)
with `n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'` `n'' \<noteq> n'` have "n'' i-[]@[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce elim:iSp_Append_ddep sum_SDG_edge_SDG_edge)
hence "matched n'' [n''] n'" by(fastforce intro:intra_SDG_path_matched)
with `realizable n ns' n''` have "realizable n (ns'@[n'']) n'"
by(fastforce intro:realizable_Append_matched)
with `set ns \<subseteq> set ns'` show ?case by fastforce
next
case (icsSp_Append_sum n ns n'' p n')
from `\<exists>ns'. realizable n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "realizable n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'` show ?case
proof(rule sum_edge_cases)
fix a Q r fs a'
assume "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "a' \<in> get_return_edges a"
and "n'' = CFG_node (sourcenode a)" and "n' = CFG_node (targetnode a')"
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `a' \<in> get_return_edges a`
have match':"matched (CFG_node (targetnode a)) [CFG_node (targetnode a)]
(CFG_node (sourcenode a'))"
by(rule intra_proc_matched)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `n'' = CFG_node (sourcenode a)`
have "n'' -p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a)"
by(fastforce intro:SDG_call_edge)
hence "matched n'' [] n''"
by(fastforce intro:matched_Nil SDG_edge_valid_SDG_node)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `a' \<in> get_return_edges a`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" by(fastforce dest!:call_return_edges)
from `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'` `n' = CFG_node (targetnode a')`
have "CFG_node (sourcenode a') -p\<rightarrow>\<^bsub>ret\<^esub> n'"
by(fastforce intro:SDG_return_edge)
from `matched n'' [] n''` `n'' -p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a)`
match' `CFG_node (sourcenode a') -p\<rightarrow>\<^bsub>ret\<^esub> n'` `valid_edge a`
`a' \<in> get_return_edges a` `n' = CFG_node (targetnode a')`
`n'' = CFG_node (sourcenode a)`
have "matched n'' ([]@n''#[CFG_node (targetnode a)]@[CFG_node (sourcenode a')])
n'"
by(fastforce intro:matched_bracket_call)
with `realizable n ns' n''`
have "realizable n
(ns'@(n''#[CFG_node (targetnode a),CFG_node (sourcenode a')])) n'"
by(fastforce intro:realizable_Append_matched)
with `set ns \<subseteq> set ns'` show ?thesis by fastforce
next
fix a Q r p fs a' ns'' x x' ins outs
assume "valid_edge a" and "kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs" and "a' \<in> get_return_edges a"
and match':"matched (Formal_in (targetnode a,x)) ns''
(Formal_out (sourcenode a',x'))"
and "n'' = Actual_in (sourcenode a,x)"
and "n' = Actual_out (targetnode a',x')" and "(p,ins,outs) \<in> set procs"
and "x < length ins" and "x' < length outs"
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `n'' = Actual_in (sourcenode a,x)`
`(p,ins,outs) \<in> set procs` `x < length ins`
have "n'' -p:ins!x\<rightarrow>\<^bsub>in\<^esub> Formal_in (targetnode a,x)"
by(fastforce intro!:SDG_param_in_edge)
hence "matched n'' [] n''"
by(fastforce intro:matched_Nil SDG_edge_valid_SDG_node)
from `valid_edge a` `a' \<in> get_return_edges a` have "valid_edge a'"
by(rule get_return_edges_valid)
from `valid_edge a` `kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs` `a' \<in> get_return_edges a`
obtain Q' f' where "kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'" by(fastforce dest!:call_return_edges)
from `valid_edge a'` `kind a' = Q'\<hookleftarrow>\<^bsub>p\<^esub>f'` `n' = Actual_out (targetnode a',x')`
`(p,ins,outs) \<in> set procs` `x' < length outs`
have "Formal_out (sourcenode a',x') -p:outs!x'\<rightarrow>\<^bsub>out\<^esub> n'"
by(fastforce intro:SDG_param_out_edge)
from `matched n'' [] n''` `n'' -p:ins!x\<rightarrow>\<^bsub>in\<^esub> Formal_in (targetnode a,x)`
match' `Formal_out (sourcenode a',x') -p:outs!x'\<rightarrow>\<^bsub>out\<^esub> n'` `valid_edge a`
`a' \<in> get_return_edges a` `n' = Actual_out (targetnode a',x')`
`n'' = Actual_in (sourcenode a,x)`
have "matched n'' ([]@n''#ns''@[Formal_out (sourcenode a',x')]) n'"
by(fastforce intro:matched_bracket_param)
with `realizable n ns' n''`
have "realizable n (ns'@(n''#ns''@[Formal_out (sourcenode a',x')])) n'"
by(fastforce intro:realizable_Append_matched)
with `set ns \<subseteq> set ns'` show ?thesis by fastforce
qed
next
case (icsSp_Append_call n ns n'' p n')
from `\<exists>ns'. realizable n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "realizable n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s-p\<rightarrow>\<^bsub>call\<^esub> n'` have "valid_SDG_node n'"
by(rule sum_SDG_edge_valid_SDG_node)
hence "matched n' [] n'" by(rule matched_Nil)
with `realizable n ns' n''` `n'' s-p\<rightarrow>\<^bsub>call\<^esub> n'`
have "realizable n (ns'@n''#[]) n'"
by(fastforce intro:realizable_call sum_SDG_edge_SDG_edge)
with `set ns \<subseteq> set ns'` show ?case by fastforce
next
case (icsSp_Append_param_in n ns n'' p V n')
from `\<exists>ns'. realizable n ns' n'' \<and> set ns \<subseteq> set ns'`
obtain ns' where "realizable n ns' n''" and "set ns \<subseteq> set ns'" by blast
from `n'' s-p:V\<rightarrow>\<^bsub>in\<^esub> n'` have "valid_SDG_node n'"
by(rule sum_SDG_edge_valid_SDG_node)
hence "matched n' [] n'" by(rule matched_Nil)
with `realizable n ns' n''` `n'' s-p:V\<rightarrow>\<^bsub>in\<^esub> n'`
have "realizable n (ns'@n''#[]) n'"
by(fastforce intro:realizable_call sum_SDG_edge_SDG_edge)
with `set ns \<subseteq> set ns'` show ?case by fastforce
qed
qed
lemma realizable_Append_ics_SDG_path:
assumes "realizable n ns n''" and "n'' ics-ns'\<rightarrow>\<^sub>d* n'"
obtains ns'' where "realizable n (ns@ns'') n'"
proof(atomize_elim)
from `n'' ics-ns'\<rightarrow>\<^sub>d* n'` `realizable n ns n''`
show "\<exists>ns''. realizable n (ns@ns'') n'"
proof(induct rule:intra_call_sum_SDG_path.induct)
case (icsSp_Nil n'') thus ?case by(rule_tac x="[]" in exI) fastforce
next
case (icsSp_Append_cdep n'' ns' nx n')
then obtain ns'' where "realizable n (ns@ns'') nx" by fastforce
from `nx s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "valid_SDG_node nx" by(rule sum_SDG_edge_valid_SDG_node)
hence "matched nx [] nx" by(rule matched_Nil)
from `nx s\<longrightarrow>\<^bsub>cd\<^esub> n'` `valid_SDG_node nx`
have "nx i-[]@[nx]\<rightarrow>\<^sub>d* n'"
by(fastforce intro:iSp_Append_cdep iSp_Nil sum_SDG_edge_SDG_edge)
with `matched nx [] nx` have "matched nx ([]@[nx]) n'"
by(fastforce intro:matched_Append_intra_SDG_path)
with `realizable n (ns@ns'') nx` have "realizable n ((ns@ns'')@[nx]) n'"
by(fastforce intro:realizable_Append_matched)
thus ?case by fastforce
next
case (icsSp_Append_ddep n'' ns' nx V n')
then obtain ns'' where "realizable n (ns@ns'') nx" by fastforce
from `nx s-V\<rightarrow>\<^bsub>dd\<^esub> n'` have "valid_SDG_node nx" by(rule sum_SDG_edge_valid_SDG_node)
hence "matched nx [] nx" by(rule matched_Nil)
from `nx s-V\<rightarrow>\<^bsub>dd\<^esub> n'` `nx \<noteq> n'` `valid_SDG_node nx`
have "nx i-[]@[nx]\<rightarrow>\<^sub>d* n'"
by(fastforce intro:iSp_Append_ddep iSp_Nil sum_SDG_edge_SDG_edge)
with `matched nx [] nx` have "matched nx ([]@[nx]) n'"
by(fastforce intro:matched_Append_intra_SDG_path)
with `realizable n (ns@ns'') nx` have "realizable n ((ns@ns'')@[nx]) n'"
by(fastforce intro:realizable_Append_matched)
thus ?case by fastforce
next
case (icsSp_Append_sum n'' ns' nx p n')
then obtain ns'' where "realizable n (ns@ns'') nx" by fastforce
from `nx s-p\<rightarrow>\<^bsub>sum\<^esub> n'` obtain nsx where "matched nx nsx n'"
by -(erule sum_SDG_summary_edge_matched)
with `realizable n (ns@ns'') nx` have "realizable n ((ns@ns'')@nsx) n'"
by(rule realizable_Append_matched)
thus ?case by fastforce
next
case (icsSp_Append_call n'' ns' nx p n')
then obtain ns'' where "realizable n (ns@ns'') nx" by fastforce
from `nx s-p\<rightarrow>\<^bsub>call\<^esub> n'` have "valid_SDG_node n'" by(rule sum_SDG_edge_valid_SDG_node)
hence "matched n' [] n'" by(rule matched_Nil)
with `realizable n (ns@ns'') nx` `nx s-p\<rightarrow>\<^bsub>call\<^esub> n'`
have "realizable n ((ns@ns'')@[nx]) n'"
by(fastforce intro:realizable_call sum_SDG_edge_SDG_edge)
thus ?case by fastforce
next
case (icsSp_Append_param_in n'' ns' nx p V n')
then obtain ns'' where "realizable n (ns@ns'') nx" by fastforce
from `nx s-p:V\<rightarrow>\<^bsub>in\<^esub> n'` have "valid_SDG_node n'"
by(rule sum_SDG_edge_valid_SDG_node)
hence "matched n' [] n'" by(rule matched_Nil)
with `realizable n (ns@ns'') nx` `nx s-p:V\<rightarrow>\<^bsub>in\<^esub> n'`
have "realizable n ((ns@ns'')@[nx]) n'"
by(fastforce intro:realizable_call sum_SDG_edge_SDG_edge)
thus ?case by fastforce
qed
qed
subsection {* SDG paths without call edges *}
inductive intra_return_sum_SDG_path ::
"'node SDG_node \<Rightarrow> 'node SDG_node list \<Rightarrow> 'node SDG_node \<Rightarrow> bool"
("_ irs-_\<rightarrow>\<^sub>d* _" [51,0,0] 80)
where irsSp_Nil:
"valid_SDG_node n \<Longrightarrow> n irs-[]\<rightarrow>\<^sub>d* n"
| irsSp_Cons_cdep:
"\<lbrakk>n'' irs-ns\<rightarrow>\<^sub>d* n'; n s\<longrightarrow>\<^bsub>cd\<^esub> n''\<rbrakk> \<Longrightarrow> n irs-n#ns\<rightarrow>\<^sub>d* n'"
| irsSp_Cons_ddep:
"\<lbrakk>n'' irs-ns\<rightarrow>\<^sub>d* n'; n s-V\<rightarrow>\<^bsub>dd\<^esub> n''; n \<noteq> n''\<rbrakk> \<Longrightarrow> n irs-n#ns\<rightarrow>\<^sub>d* n'"
| irsSp_Cons_sum:
"\<lbrakk>n'' irs-ns\<rightarrow>\<^sub>d* n'; n s-p\<rightarrow>\<^bsub>sum\<^esub> n''\<rbrakk> \<Longrightarrow> n irs-n#ns\<rightarrow>\<^sub>d* n'"
| irsSp_Cons_return:
"\<lbrakk>n'' irs-ns\<rightarrow>\<^sub>d* n'; n s-p\<rightarrow>\<^bsub>ret\<^esub> n''\<rbrakk> \<Longrightarrow> n irs-n#ns\<rightarrow>\<^sub>d* n'"
| irsSp_Cons_param_out:
"\<lbrakk>n'' irs-ns\<rightarrow>\<^sub>d* n'; n s-p:V\<rightarrow>\<^bsub>out\<^esub> n''\<rbrakk> \<Longrightarrow> n irs-n#ns\<rightarrow>\<^sub>d* n'"
lemma irs_SDG_path_Append:
"\<lbrakk>n irs-ns\<rightarrow>\<^sub>d* n''; n'' irs-ns'\<rightarrow>\<^sub>d* n'\<rbrakk> \<Longrightarrow> n irs-ns@ns'\<rightarrow>\<^sub>d* n'"
by(induct rule:intra_return_sum_SDG_path.induct,
auto intro:intra_return_sum_SDG_path.intros)
lemma is_SDG_path_irs_SDG_path:
"n is-ns\<rightarrow>\<^sub>d* n' \<Longrightarrow> n irs-ns\<rightarrow>\<^sub>d* n'"
proof(induct rule:intra_sum_SDG_path.induct)
case (isSp_Nil n)
from `valid_SDG_node n` show ?case by(rule irsSp_Nil)
next
case (isSp_Append_cdep n ns n'' n')
from `n'' s\<longrightarrow>\<^bsub>cd\<^esub> n'` have "n'' irs-[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce intro:irsSp_Cons_cdep irsSp_Nil sum_SDG_edge_valid_SDG_node)
with `n irs-ns\<rightarrow>\<^sub>d* n''` show ?case by(rule irs_SDG_path_Append)
next
case (isSp_Append_ddep n ns n'' V n')
from `n'' s-V\<rightarrow>\<^bsub>dd\<^esub> n'` `n'' \<noteq> n'` have "n'' irs-[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce intro:irsSp_Cons_ddep irsSp_Nil sum_SDG_edge_valid_SDG_node)
with `n irs-ns\<rightarrow>\<^sub>d* n''` show ?case by(rule irs_SDG_path_Append)
next
case (isSp_Append_sum n ns n'' p n')
from `n'' s-p\<rightarrow>\<^bsub>sum\<^esub> n'` have "n'' irs-[n'']\<rightarrow>\<^sub>d* n'"
by(fastforce intro:irsSp_Cons_sum irsSp_Nil sum_SDG_edge_valid_SDG_node)
with `n irs-ns\<rightarrow>\<^sub>d* n''` show ?case by(rule irs_SDG_path_Append)
qed
lemma irs_SDG_path_split:
assumes "n irs-ns\<rightarrow>\<^sub>d* n'"
obtains "n is-ns\<rightarrow>\<^sub>d* n'"
| nsx nsx' nx nx' p where "ns = nsx@nx#nsx'" and "n irs-nsx\<rightarrow>\<^sub>d* nx"
and "nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')" and "nx' is-nsx'\<rightarrow>\<^sub>d* n'"
proof(atomize_elim)
from `n irs-ns\<rightarrow>\<^sub>d* n'` show "n is-ns\<rightarrow>\<^sub>d* n' \<or>
(\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n')"
proof(induct rule:intra_return_sum_SDG_path.induct)
case (irsSp_Nil n)
from `valid_SDG_node n` have "n is-[]\<rightarrow>\<^sub>d* n" by(rule isSp_Nil)
thus ?case by simp
next
case (irsSp_Cons_cdep n'' ns n' n)
from `n'' is-ns\<rightarrow>\<^sub>d* n' \<or>
(\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n')`
show ?case
proof
assume "n'' is-ns\<rightarrow>\<^sub>d* n'"
from `n s\<longrightarrow>\<^bsub>cd\<^esub> n''` have "n is-[]@[n]\<rightarrow>\<^sub>d* n''"
by(fastforce intro:isSp_Append_cdep isSp_Nil sum_SDG_edge_valid_SDG_node)
with `n'' is-ns\<rightarrow>\<^sub>d* n'` have "n is-[n]@ns\<rightarrow>\<^sub>d* n'"
by(fastforce intro:is_SDG_path_Append)
thus ?case by simp
next
assume "\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n'"
then obtain nsx nsx' nx nx' p where "ns = nsx@nx#nsx'" and "n'' irs-nsx\<rightarrow>\<^sub>d* nx"
and "nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')" and "nx' is-nsx'\<rightarrow>\<^sub>d* n'" by blast
from `n'' irs-nsx\<rightarrow>\<^sub>d* nx` `n s\<longrightarrow>\<^bsub>cd\<^esub> n''` have "n irs-n#nsx\<rightarrow>\<^sub>d* nx"
by(rule intra_return_sum_SDG_path.irsSp_Cons_cdep)
with `ns = nsx@nx#nsx'` `nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')`
`nx' is-nsx'\<rightarrow>\<^sub>d* n'`
show ?case by fastforce
qed
next
case (irsSp_Cons_ddep n'' ns n' n V)
from `n'' is-ns\<rightarrow>\<^sub>d* n' \<or>
(\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n')`
show ?case
proof
assume "n'' is-ns\<rightarrow>\<^sub>d* n'"
from `n s-V\<rightarrow>\<^bsub>dd\<^esub> n''` `n \<noteq> n''` have "n is-[]@[n]\<rightarrow>\<^sub>d* n''"
by(fastforce intro:isSp_Append_ddep isSp_Nil sum_SDG_edge_valid_SDG_node)
with `n'' is-ns\<rightarrow>\<^sub>d* n'` have "n is-[n]@ns\<rightarrow>\<^sub>d* n'"
by(fastforce intro:is_SDG_path_Append)
thus ?case by simp
next
assume "\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n'"
then obtain nsx nsx' nx nx' p where "ns = nsx@nx#nsx'" and "n'' irs-nsx\<rightarrow>\<^sub>d* nx"
and "nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')" and "nx' is-nsx'\<rightarrow>\<^sub>d* n'" by blast
from `n'' irs-nsx\<rightarrow>\<^sub>d* nx` `n s-V\<rightarrow>\<^bsub>dd\<^esub> n''` `n \<noteq> n''` have "n irs-n#nsx\<rightarrow>\<^sub>d* nx"
by(rule intra_return_sum_SDG_path.irsSp_Cons_ddep)
with `ns = nsx@nx#nsx'` `nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')`
`nx' is-nsx'\<rightarrow>\<^sub>d* n'`
show ?case by fastforce
qed
next
case (irsSp_Cons_sum n'' ns n' n p)
from `n'' is-ns\<rightarrow>\<^sub>d* n' \<or>
(\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n')`
show ?case
proof
assume "n'' is-ns\<rightarrow>\<^sub>d* n'"
from `n s-p\<rightarrow>\<^bsub>sum\<^esub> n''` have "n is-[]@[n]\<rightarrow>\<^sub>d* n''"
by(fastforce intro:isSp_Append_sum isSp_Nil sum_SDG_edge_valid_SDG_node)
with `n'' is-ns\<rightarrow>\<^sub>d* n'` have "n is-[n]@ns\<rightarrow>\<^sub>d* n'"
by(fastforce intro:is_SDG_path_Append)
thus ?case by simp
next
assume "\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n'"
then obtain nsx nsx' nx nx' p' where "ns = nsx@nx#nsx'" and "n'' irs-nsx\<rightarrow>\<^sub>d* nx"
and "nx s-p'\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p':V\<rightarrow>\<^bsub>out\<^esub> nx')"
and "nx' is-nsx'\<rightarrow>\<^sub>d* n'" by blast
from `n'' irs-nsx\<rightarrow>\<^sub>d* nx` `n s-p\<rightarrow>\<^bsub>sum\<^esub> n''` have "n irs-n#nsx\<rightarrow>\<^sub>d* nx"
by(rule intra_return_sum_SDG_path.irsSp_Cons_sum)
with `ns = nsx@nx#nsx'` `nx s-p'\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p':V\<rightarrow>\<^bsub>out\<^esub> nx')`
`nx' is-nsx'\<rightarrow>\<^sub>d* n'`
show ?case by fastforce
qed
next
case (irsSp_Cons_return n'' ns n' n p)
from `n'' is-ns\<rightarrow>\<^sub>d* n' \<or>
(\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n')`
show ?case
proof
assume "n'' is-ns\<rightarrow>\<^sub>d* n'"
from `n s-p\<rightarrow>\<^bsub>ret\<^esub> n''` have "valid_SDG_node n" by(rule sum_SDG_edge_valid_SDG_node)
hence "n irs-[]\<rightarrow>\<^sub>d* n" by(rule irsSp_Nil)
with `n s-p\<rightarrow>\<^bsub>ret\<^esub> n''` `n'' is-ns\<rightarrow>\<^sub>d* n'` show ?thesis by fastforce
next
assume "\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n'"
then obtain nsx nsx' nx nx' p' where "ns = nsx@nx#nsx'" and "n'' irs-nsx\<rightarrow>\<^sub>d* nx"
and "nx s-p'\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p':V\<rightarrow>\<^bsub>out\<^esub> nx')"
and "nx' is-nsx'\<rightarrow>\<^sub>d* n'" by blast
from `n'' irs-nsx\<rightarrow>\<^sub>d* nx` `n s-p\<rightarrow>\<^bsub>ret\<^esub> n''` have "n irs-n#nsx\<rightarrow>\<^sub>d* nx"
by(rule intra_return_sum_SDG_path.irsSp_Cons_return)
with `ns = nsx@nx#nsx'` `nx s-p'\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p':V\<rightarrow>\<^bsub>out\<^esub> nx')`
`nx' is-nsx'\<rightarrow>\<^sub>d* n'`
show ?thesis by fastforce
qed
next
case (irsSp_Cons_param_out n'' ns n' n p V)
from `n'' is-ns\<rightarrow>\<^sub>d* n' \<or>
(\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n')`
show ?case
proof
assume "n'' is-ns\<rightarrow>\<^sub>d* n'"
from `n s-p:V\<rightarrow>\<^bsub>out\<^esub> n''` have "valid_SDG_node n"
by(rule sum_SDG_edge_valid_SDG_node)
hence "n irs-[]\<rightarrow>\<^sub>d* n" by(rule irsSp_Nil)
with `n s-p:V\<rightarrow>\<^bsub>out\<^esub> n''` `n'' is-ns\<rightarrow>\<^sub>d* n'` show ?thesis by fastforce
next
assume "\<exists>nsx nx nsx' p nx'. ns = nsx@nx#nsx' \<and> n'' irs-nsx\<rightarrow>\<^sub>d* nx \<and>
(nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')) \<and> nx' is-nsx'\<rightarrow>\<^sub>d* n'"
then obtain nsx nsx' nx nx' p' where "ns = nsx@nx#nsx'" and "n'' irs-nsx\<rightarrow>\<^sub>d* nx"
and "nx s-p'\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p':V\<rightarrow>\<^bsub>out\<^esub> nx')"
and "nx' is-nsx'\<rightarrow>\<^sub>d* n'" by blast
from `n'' irs-nsx\<rightarrow>\<^sub>d* nx` `n s-p:V\<rightarrow>\<^bsub>out\<^esub> n''` have "n irs-n#nsx\<rightarrow>\<^sub>d* nx"
by(rule intra_return_sum_SDG_path.irsSp_Cons_param_out)
with `ns = nsx@nx#nsx'` `nx s-p'\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p':V\<rightarrow>\<^bsub>out\<^esub> nx')`
`nx' is-nsx'\<rightarrow>\<^sub>d* n'`
show ?thesis by fastforce
qed
qed
qed
lemma irs_SDG_path_matched:
assumes "n irs-ns\<rightarrow>\<^sub>d* n''" and "n'' s-p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' s-p:V\<rightarrow>\<^bsub>out\<^esub> n'"
obtains nx nsx where "matched nx nsx n'" and "n \<in> set nsx"
and "nx s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node n')"
proof(atomize_elim)
from assms
show "\<exists>nx nsx. matched nx nsx n' \<and> n \<in> set nsx \<and>
nx s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node n')"
proof(induct ns arbitrary:n'' n' p V rule:length_induct)
fix ns n'' n' p V
assume IH:"\<forall>ns'. length ns' < length ns \<longrightarrow>
(\<forall>n''. n irs-ns'\<rightarrow>\<^sub>d* n'' \<longrightarrow>
(\<forall>nx' p' V'. (n'' s-p'\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> n'' s-p':V'\<rightarrow>\<^bsub>out\<^esub> nx') \<longrightarrow>
(\<exists>nx nsx. matched nx nsx nx' \<and> n \<in> set nsx \<and>
nx s-p'\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node nx'))))"
and "n irs-ns\<rightarrow>\<^sub>d* n''" and "n'' s-p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' s-p:V\<rightarrow>\<^bsub>out\<^esub> n'"
from `n'' s-p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' s-p:V\<rightarrow>\<^bsub>out\<^esub> n'` have "valid_SDG_node n''"
by(fastforce intro:sum_SDG_edge_valid_SDG_node)
from `n'' s-p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' s-p:V\<rightarrow>\<^bsub>out\<^esub> n'`
have "n'' -p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' -p:V\<rightarrow>\<^bsub>out\<^esub> n'"
by(fastforce intro:sum_SDG_edge_SDG_edge SDG_edge_sum_SDG_edge)
from `n'' s-p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' s-p:V\<rightarrow>\<^bsub>out\<^esub> n'`
have "CFG_node (parent_node n'') s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n')"
by(fastforce elim:sum_SDG_edge.cases intro:sum_SDG_return_edge)
then obtain a Q f where "valid_edge a" and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
and "parent_node n'' = sourcenode a" and "parent_node n' = targetnode a"
by(fastforce elim:sum_SDG_edge.cases)
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` obtain a' Q' r' fs'
where "a \<in> get_return_edges a'" and "valid_edge a'" and "kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'"
and "CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)"
by(erule return_edge_determines_call_and_sum_edge)
from `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
have "CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a')"
by(fastforce intro:sum_SDG_call_edge)
from `CFG_node (parent_node n'') s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n')`
have "get_proc (parent_node n'') = p"
by(auto elim!:sum_SDG_edge.cases intro:get_proc_return)
from `n irs-ns\<rightarrow>\<^sub>d* n''`
show "\<exists>nx nsx. matched nx nsx n' \<and> n \<in> set nsx \<and>
nx s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node n')"
proof(rule irs_SDG_path_split)
assume "n is-ns\<rightarrow>\<^sub>d* n''"
hence "valid_SDG_node n" by(rule is_SDG_path_valid_SDG_node)
then obtain asx where "(_Entry_) -asx\<rightarrow>\<^sub>\<surd>* parent_node n"
by(fastforce dest:valid_SDG_CFG_node Entry_path)
then obtain asx' where "(_Entry_) -asx'\<rightarrow>\<^sub>\<surd>* parent_node n"
and "\<forall>a' \<in> set asx'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by -(erule valid_Entry_path_ascending_path)
from `n is-ns\<rightarrow>\<^sub>d* n''` obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''"
by(erule is_SDG_path_CFG_path)
hence "get_proc (parent_node n) = get_proc (parent_node n'')"
by(rule intra_path_get_procs)
from `valid_SDG_node n` have "valid_node (parent_node n)"
by(rule valid_SDG_CFG_node)
hence "valid_SDG_node (CFG_node (parent_node n))" by simp
have "\<exists>a as. valid_edge a \<and> (\<exists>Q p r fs. kind a = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs) \<and>
targetnode a -as\<rightarrow>\<^sub>\<iota>* parent_node n"
proof(cases "\<forall>a' \<in> set asx'. intra_kind(kind a')")
case True
with `(_Entry_) -asx'\<rightarrow>\<^sub>\<surd>* parent_node n`
have "(_Entry_) -asx'\<rightarrow>\<^sub>\<iota>* parent_node n"
by(fastforce simp:intra_path_def vp_def)
hence "get_proc (_Entry_) = get_proc (parent_node n)"
by(rule intra_path_get_procs)
with get_proc_Entry have "get_proc (parent_node n) = Main" by simp
from `get_proc (parent_node n) = get_proc (parent_node n'')`
`get_proc (parent_node n) = Main`
have "get_proc (parent_node n'') = Main" by simp
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` have "get_proc (sourcenode a) = p"
by(rule get_proc_return)
with `parent_node n'' = sourcenode a` `get_proc (parent_node n'') = Main`
have "p = Main" by simp
with `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` have "kind a = Q\<hookleftarrow>\<^bsub>Main\<^esub>f" by simp
with `valid_edge a` have False by(rule Main_no_return_source)
thus ?thesis by simp
next
assume "\<not> (\<forall>a'\<in>set asx'. intra_kind (kind a'))"
with `\<forall>a' \<in> set asx'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
have "\<exists>a' \<in> set asx'. \<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs"
by(fastforce simp:intra_kind_def)
then obtain as a' as' where "asx' = as@a'#as'"
and "\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs"
and "\<forall>a' \<in> set as'. \<not> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by(erule split_list_last_propE)
with `\<forall>a' \<in> set asx'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
have "\<forall>a'\<in>set as'. intra_kind (kind a')" by(auto simp:intra_kind_def)
from `(_Entry_) -asx'\<rightarrow>\<^sub>\<surd>* parent_node n` `asx' = as@a'#as'`
have "valid_edge a'" and "targetnode a' -as'\<rightarrow>* parent_node n"
by(auto dest:path_split simp:vp_def)
with `\<forall>a'\<in>set as'. intra_kind (kind a')` `\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs`
show ?thesis by(fastforce simp:intra_path_def)
qed
then obtain ax asx Qx rx fsx px where "valid_edge ax"
and "kind ax = Qx:rx\<hookrightarrow>\<^bsub>px\<^esub>fsx" and "targetnode ax -asx\<rightarrow>\<^sub>\<iota>* parent_node n"
by blast
from `valid_edge ax` `kind ax = Qx:rx\<hookrightarrow>\<^bsub>px\<^esub>fsx`
have "get_proc (targetnode ax) = px"
by(rule get_proc_call)
from `targetnode ax -asx\<rightarrow>\<^sub>\<iota>* parent_node n`
have "get_proc (targetnode ax) = get_proc (parent_node n)"
by(rule intra_path_get_procs)
with `get_proc (parent_node n) = get_proc (parent_node n'')`
`get_proc (targetnode ax) = px`
have "get_proc (parent_node n'') = px" by simp
with `get_proc (parent_node n'') = p` have [simp]:"px = p" by simp
from `valid_edge a'` `valid_edge ax` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
`kind ax = Qx:rx\<hookrightarrow>\<^bsub>px\<^esub>fsx`
have "targetnode a' = targetnode ax" by simp(rule same_proc_call_unique_target)
have "parent_node n \<noteq> (_Exit_)"
proof
assume "parent_node n = (_Exit_)"
from `n is-ns\<rightarrow>\<^sub>d* n''` obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n''"
by(erule is_SDG_path_CFG_path)
with `parent_node n = (_Exit_)`
have "(_Exit_) -as\<rightarrow>* parent_node n''" by(simp add:intra_path_def)
hence "parent_node n'' = (_Exit_)" by(fastforce dest:path_Exit_source)
from `get_proc (parent_node n'') = p` `parent_node n'' = (_Exit_)`
`parent_node n'' = sourcenode a` get_proc_Exit
have "p = Main" by simp
with `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` have "kind a = Q\<hookleftarrow>\<^bsub>Main\<^esub>f" by simp
with `valid_edge a` show False by(rule Main_no_return_source)
qed
have "\<exists>nsx. CFG_node (targetnode a') cd-nsx\<rightarrow>\<^sub>d* CFG_node (parent_node n)"
proof(cases "targetnode a' = parent_node n")
case True
with `valid_SDG_node (CFG_node (parent_node n))`
have "CFG_node (targetnode a') cd-[]\<rightarrow>\<^sub>d* CFG_node (parent_node n)"
by(fastforce intro:cdSp_Nil)
thus ?thesis by blast
next
case False
with `targetnode ax -asx\<rightarrow>\<^sub>\<iota>* parent_node n` `parent_node n \<noteq> (_Exit_)`
`valid_edge ax` `kind ax = Qx:rx\<hookrightarrow>\<^bsub>px\<^esub>fsx` `targetnode a' = targetnode ax`
obtain nsx
where "CFG_node (targetnode a') cd-nsx\<rightarrow>\<^sub>d* CFG_node (parent_node n)"
by(fastforce elim!:in_proc_cdep_SDG_path)
thus ?thesis by blast
qed
then obtain nsx
where "CFG_node (targetnode a') cd-nsx\<rightarrow>\<^sub>d* CFG_node (parent_node n)" by blast
hence "CFG_node (targetnode a') i-nsx\<rightarrow>\<^sub>d* CFG_node (parent_node n)"
by(rule cdep_SDG_path_intra_SDG_path)
show ?thesis
proof(cases ns)
case Nil
with `n is-ns\<rightarrow>\<^sub>d* n''` have "n = n''"
by(fastforce elim:intra_sum_SDG_path.cases)
from `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'` `a \<in> get_return_edges a'`
have "matched (CFG_node (targetnode a')) [CFG_node (targetnode a')]
(CFG_node (sourcenode a))" by(rule intra_proc_matched)
from `valid_SDG_node n''`
have "n'' = CFG_node (parent_node n'') \<or> CFG_node (parent_node n'') \<longrightarrow>\<^bsub>cd\<^esub> n''"
by(rule valid_SDG_node_cases)
hence "\<exists>nsx. CFG_node (parent_node n'') i-nsx\<rightarrow>\<^sub>d* n''"
proof
assume "n'' = CFG_node (parent_node n'')"
with `valid_SDG_node n''` have "CFG_node (parent_node n'') i-[]\<rightarrow>\<^sub>d* n''"
by(fastforce intro:iSp_Nil)
thus ?thesis by blast
next
assume "CFG_node (parent_node n'') \<longrightarrow>\<^bsub>cd\<^esub> n''"
from `valid_SDG_node n''` have "valid_node (parent_node n'')"
by(rule valid_SDG_CFG_node)
hence "valid_SDG_node (CFG_node (parent_node n''))" by simp
hence "CFG_node (parent_node n'') i-[]\<rightarrow>\<^sub>d* CFG_node (parent_node n'')"
by(rule iSp_Nil)
with `CFG_node (parent_node n'') \<longrightarrow>\<^bsub>cd\<^esub> n''`
have "CFG_node (parent_node n'') i-[]@[CFG_node (parent_node n'')]\<rightarrow>\<^sub>d* n''"
by(fastforce intro:iSp_Append_cdep sum_SDG_edge_SDG_edge)
thus ?thesis by blast
qed
with `parent_node n'' = sourcenode a`
obtain nsx where "CFG_node (sourcenode a) i-nsx\<rightarrow>\<^sub>d* n''" by fastforce
with `matched (CFG_node (targetnode a')) [CFG_node (targetnode a')]
(CFG_node (sourcenode a))`
have "matched (CFG_node (targetnode a')) ([CFG_node (targetnode a')]@nsx) n''"
by(fastforce intro:matched_Append intra_SDG_path_matched)
moreover
from `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
have "CFG_node (sourcenode a') -p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a')"
by(fastforce intro:SDG_call_edge)
moreover
from `valid_edge a'` have "valid_SDG_node (CFG_node (sourcenode a'))"
by simp
hence "matched (CFG_node (sourcenode a')) [] (CFG_node (sourcenode a'))"
by(rule matched_Nil)
ultimately have "matched (CFG_node (sourcenode a'))
([]@(CFG_node (sourcenode a'))#([CFG_node (targetnode a')]@nsx)@[n'']) n'"
using `n'' s-p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' s-p:V\<rightarrow>\<^bsub>out\<^esub> n'` `parent_node n' = targetnode a`
`parent_node n'' = sourcenode a` `valid_edge a'` `a \<in> get_return_edges a'`
by(fastforce intro:matched_bracket_call dest:sum_SDG_edge_SDG_edge)
with `n = n''` `CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)`
`parent_node n' = targetnode a`
show ?thesis by fastforce
next
case Cons
with `n is-ns\<rightarrow>\<^sub>d* n''` have "n \<in> set ns"
by(induct rule:intra_sum_SDG_path_rev_induct) auto
from `n is-ns\<rightarrow>\<^sub>d* n''` obtain ns' where "matched n ns' n''"
and "set ns \<subseteq> set ns'" by(erule is_SDG_path_matched)
with `n \<in> set ns` have "n \<in> set ns'" by fastforce
from `valid_SDG_node n`
have "n = CFG_node (parent_node n) \<or> CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n"
by(rule valid_SDG_node_cases)
hence "\<exists>nsx. CFG_node (parent_node n) i-nsx\<rightarrow>\<^sub>d* n"
proof
assume "n = CFG_node (parent_node n)"
with `valid_SDG_node n` have "CFG_node (parent_node n) i-[]\<rightarrow>\<^sub>d* n"
by(fastforce intro:iSp_Nil)
thus ?thesis by blast
next
assume "CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n"
from `valid_SDG_node (CFG_node (parent_node n))`
have "CFG_node (parent_node n) i-[]\<rightarrow>\<^sub>d* CFG_node (parent_node n)"
by(rule iSp_Nil)
with `CFG_node (parent_node n) \<longrightarrow>\<^bsub>cd\<^esub> n`
have "CFG_node (parent_node n) i-[]@[CFG_node (parent_node n)]\<rightarrow>\<^sub>d* n"
by(fastforce intro:iSp_Append_cdep sum_SDG_edge_SDG_edge)
thus ?thesis by blast
qed
then obtain nsx' where "CFG_node (parent_node n) i-nsx'\<rightarrow>\<^sub>d* n" by blast
with `CFG_node (targetnode a') i-nsx\<rightarrow>\<^sub>d* CFG_node (parent_node n)`
have "CFG_node (targetnode a') i-nsx@nsx'\<rightarrow>\<^sub>d* n"
by -(rule intra_SDG_path_Append)
hence "matched (CFG_node (targetnode a')) (nsx@nsx') n"
by(rule intra_SDG_path_matched)
with `matched n ns' n''`
have "matched (CFG_node (targetnode a')) ((nsx@nsx')@ns') n''"
by(rule matched_Append)
moreover
from `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
have "CFG_node (sourcenode a') -p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a')"
by(fastforce intro:SDG_call_edge)
moreover
from `valid_edge a'` have "valid_SDG_node (CFG_node (sourcenode a'))"
by simp
hence "matched (CFG_node (sourcenode a')) [] (CFG_node (sourcenode a'))"
by(rule matched_Nil)
ultimately have "matched (CFG_node (sourcenode a'))
([]@(CFG_node (sourcenode a'))#((nsx@nsx')@ns')@[n'']) n'"
using `n'' s-p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' s-p:V\<rightarrow>\<^bsub>out\<^esub> n'` `parent_node n' = targetnode a`
`parent_node n'' = sourcenode a` `valid_edge a'` `a \<in> get_return_edges a'`
by(fastforce intro:matched_bracket_call dest:sum_SDG_edge_SDG_edge)
with `CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)`
`parent_node n' = targetnode a` `n \<in> set ns'`
show ?thesis by fastforce
qed
next
fix ms ms' m m' px
assume "ns = ms@m#ms'" and "n irs-ms\<rightarrow>\<^sub>d* m"
and "m s-px\<rightarrow>\<^bsub>ret\<^esub> m' \<or> (\<exists>V. m s-px:V\<rightarrow>\<^bsub>out\<^esub> m')" and "m' is-ms'\<rightarrow>\<^sub>d* n''"
from `ns = ms@m#ms'` have "length ms < length ns" by simp
with IH `n irs-ms\<rightarrow>\<^sub>d* m` `m s-px\<rightarrow>\<^bsub>ret\<^esub> m' \<or> (\<exists>V. m s-px:V\<rightarrow>\<^bsub>out\<^esub> m')` obtain mx msx
where "matched mx msx m'" and "n \<in> set msx"
and "mx s-px\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node m')" by fastforce
from `m' is-ms'\<rightarrow>\<^sub>d* n''` obtain msx' where "matched m' msx' n''"
by -(erule is_SDG_path_matched)
with `matched mx msx m'` have "matched mx (msx@msx') n''"
by -(rule matched_Append)
from `m s-px\<rightarrow>\<^bsub>ret\<^esub> m' \<or> (\<exists>V. m s-px:V\<rightarrow>\<^bsub>out\<^esub> m')`
have "m -px\<rightarrow>\<^bsub>ret\<^esub> m' \<or> (\<exists>V. m -px:V\<rightarrow>\<^bsub>out\<^esub> m')"
by(auto intro:sum_SDG_edge_SDG_edge SDG_edge_sum_SDG_edge)
from `m s-px\<rightarrow>\<^bsub>ret\<^esub> m' \<or> (\<exists>V. m s-px:V\<rightarrow>\<^bsub>out\<^esub> m')`
have "CFG_node (parent_node m) s-px\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node m')"
by(fastforce elim:sum_SDG_edge.cases intro:sum_SDG_return_edge)
then obtain ax Qx fx where "valid_edge ax" and "kind ax = Qx\<hookleftarrow>\<^bsub>px\<^esub>fx"
and "parent_node m = sourcenode ax" and "parent_node m' = targetnode ax"
by(fastforce elim:sum_SDG_edge.cases)
from `valid_edge ax` `kind ax = Qx\<hookleftarrow>\<^bsub>px\<^esub>fx` obtain ax' Qx' rx' fsx'
where "ax \<in> get_return_edges ax'" and "valid_edge ax'"
and "kind ax' = Qx':rx'\<hookrightarrow>\<^bsub>px\<^esub>fsx'"
and "CFG_node (sourcenode ax') s-px\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode ax)"
by(erule return_edge_determines_call_and_sum_edge)
from `valid_edge ax'` `kind ax' = Qx':rx'\<hookrightarrow>\<^bsub>px\<^esub>fsx'`
have "CFG_node (sourcenode ax') s-px\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode ax')"
by(fastforce intro:sum_SDG_call_edge)
from `mx s-px\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node m')`
have "valid_SDG_node mx" by(rule sum_SDG_edge_valid_SDG_node)
have "\<exists>msx''. CFG_node (targetnode a') cd-msx''\<rightarrow>\<^sub>d* mx"
proof(cases "targetnode a' = parent_node mx")
case True
from `valid_SDG_node mx`
have "mx = CFG_node (parent_node mx) \<or> CFG_node (parent_node mx) \<longrightarrow>\<^bsub>cd\<^esub> mx"
by(rule valid_SDG_node_cases)
thus ?thesis
proof
assume "mx = CFG_node (parent_node mx)"
with `valid_SDG_node mx` True
have "CFG_node (targetnode a') cd-[]\<rightarrow>\<^sub>d* mx" by(fastforce intro:cdSp_Nil)
thus ?thesis by blast
next
assume "CFG_node (parent_node mx) \<longrightarrow>\<^bsub>cd\<^esub> mx"
with `valid_edge a'` True[THEN sym]
have "CFG_node (targetnode a') cd-[]@[CFG_node (targetnode a')]\<rightarrow>\<^sub>d* mx"
by(fastforce intro:cdep_SDG_path.intros)
thus ?thesis by blast
qed
next
case False
show ?thesis
proof(cases "\<forall>ai. valid_edge ai \<and> sourcenode ai = parent_node mx
\<longrightarrow> ai \<notin> get_return_edges a'")
case True
{ assume "parent_node mx = (_Exit_)"
with `mx s-px\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node m')`
obtain ai where "valid_edge ai" and "sourcenode ai = (_Exit_)"
by -(erule sum_SDG_edge.cases,auto)
hence False by(rule Exit_source) }
hence "parent_node mx \<noteq> (_Exit_)" by fastforce
from `valid_SDG_node mx` have "valid_node (parent_node mx)"
by(rule valid_SDG_CFG_node)
then obtain asx where "(_Entry_) -asx\<rightarrow>\<^sub>\<surd>* parent_node mx"
by(fastforce intro:Entry_path)
then obtain asx' where "(_Entry_) -asx'\<rightarrow>\<^sub>\<surd>* parent_node mx"
and "\<forall>a' \<in> set asx'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by -(erule valid_Entry_path_ascending_path)
from `mx s-px\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node m')`
obtain nsi where "matched mx nsi (CFG_node (parent_node m'))"
by -(erule sum_SDG_summary_edge_matched)
then obtain asi where "parent_node mx -asi\<rightarrow>\<^bsub>sl\<^esub>* parent_node m'"
by(fastforce elim:matched_same_level_CFG_path)
hence "get_proc (parent_node mx) = get_proc (parent_node m')"
by(rule slp_get_proc)
from `m' is-ms'\<rightarrow>\<^sub>d* n''` obtain nsi' where "matched m' nsi' n''"
by -(erule is_SDG_path_matched)
then obtain asi' where "parent_node m' -asi'\<rightarrow>\<^bsub>sl\<^esub>* parent_node n''"
by -(erule matched_same_level_CFG_path)
hence "get_proc (parent_node m') = get_proc (parent_node n'')"
by(rule slp_get_proc)
with `get_proc (parent_node mx) = get_proc (parent_node m')`
have "get_proc (parent_node mx) = get_proc (parent_node n'')" by simp
from `get_proc (parent_node n'') = p`
`get_proc (parent_node mx) = get_proc (parent_node n'')`
have "get_proc (parent_node mx) = p" by simp
have "\<exists>asx. targetnode a' -asx\<rightarrow>\<^sub>\<iota>* parent_node mx"
proof(cases "\<forall>a' \<in> set asx'. intra_kind(kind a')")
case True
with `(_Entry_) -asx'\<rightarrow>\<^sub>\<surd>* parent_node mx`
have "(_Entry_) -asx'\<rightarrow>\<^sub>\<iota>* parent_node mx"
by(simp add:vp_def intra_path_def)
hence "get_proc (_Entry_) = get_proc (parent_node mx)"
by(rule intra_path_get_procs)
with `get_proc (parent_node mx) = p` have "get_proc (_Entry_) = p"
by simp
with `CFG_node (parent_node n'') s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node n')`
have False
by -(erule sum_SDG_edge.cases,
auto intro:Main_no_return_source simp:get_proc_Entry)
thus ?thesis by simp
next
case False
hence "\<exists>a' \<in> set asx'. \<not> intra_kind (kind a')" by fastforce
then obtain ai as' as'' where "asx' = as'@ai#as''"
and "\<not> intra_kind (kind ai)" and "\<forall>a' \<in> set as''. intra_kind (kind a')"
by(fastforce elim!:split_list_last_propE)
from `asx' = as'@ai#as''` `\<not> intra_kind (kind ai)`
`\<forall>a' \<in> set asx'. intra_kind(kind a') \<or> (\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)`
obtain Qi ri pi fsi where "kind ai = Qi:ri\<hookrightarrow>\<^bsub>pi\<^esub>fsi"
and "\<forall>a' \<in> set as'. intra_kind(kind a') \<or>
(\<exists>Q r p fs. kind a' = Q:r\<hookrightarrow>\<^bsub>p\<^esub>fs)"
by auto
from `(_Entry_) -asx'\<rightarrow>\<^sub>\<surd>* parent_node mx` `asx' = as'@ai#as''`
`\<forall>a' \<in> set as''. intra_kind (kind a')`
have "valid_edge ai" and "targetnode ai -as''\<rightarrow>\<^sub>\<iota>* parent_node mx"
by(auto intro:path_split simp:vp_def intra_path_def)
hence "get_proc (targetnode ai) = get_proc (parent_node mx)"
by -(rule intra_path_get_procs)
with `get_proc (parent_node mx) = p` `valid_edge ai`
`kind ai = Qi:ri\<hookrightarrow>\<^bsub>pi\<^esub>fsi`
have [simp]:"pi = p" by(fastforce dest:get_proc_call)
from `valid_edge ai` `valid_edge a'`
`kind ai = Qi:ri\<hookrightarrow>\<^bsub>pi\<^esub>fsi` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
have "targetnode ai = targetnode a'"
by(fastforce intro:same_proc_call_unique_target)
with `targetnode ai -as''\<rightarrow>\<^sub>\<iota>* parent_node mx`
show ?thesis by fastforce
qed
then obtain asx where "targetnode a' -asx\<rightarrow>\<^sub>\<iota>* parent_node mx" by blast
from this `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
`parent_node mx \<noteq> (_Exit_)` `targetnode a' \<noteq> parent_node mx` True
obtain msi
where "CFG_node(targetnode a') cd-msi\<rightarrow>\<^sub>d* CFG_node(parent_node mx)"
by(fastforce elim!:in_proc_cdep_SDG_path)
from `valid_SDG_node mx`
have "mx = CFG_node (parent_node mx) \<or> CFG_node (parent_node mx) \<longrightarrow>\<^bsub>cd\<^esub> mx"
by(rule valid_SDG_node_cases)
thus ?thesis
proof
assume "mx = CFG_node (parent_node mx)"
with `CFG_node(targetnode a')cd-msi\<rightarrow>\<^sub>d* CFG_node(parent_node mx)`
show ?thesis by fastforce
next
assume "CFG_node (parent_node mx) \<longrightarrow>\<^bsub>cd\<^esub> mx"
with `CFG_node(targetnode a')cd-msi\<rightarrow>\<^sub>d* CFG_node(parent_node mx)`
have "CFG_node(targetnode a') cd-msi@[CFG_node(parent_node mx)]\<rightarrow>\<^sub>d* mx"
by(fastforce intro:cdSp_Append_cdep)
thus ?thesis by fastforce
qed
next
case False
then obtain ai where "valid_edge ai" and "sourcenode ai = parent_node mx"
and "ai \<in> get_return_edges a'" by blast
with `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
have "CFG_node (targetnode a') \<longrightarrow>\<^bsub>cd\<^esub> CFG_node (parent_node mx)"
by(auto intro:SDG_proc_entry_exit_cdep)
with `valid_edge a'`
have cd_path:"CFG_node (targetnode a') cd-[]@[CFG_node (targetnode a')]\<rightarrow>\<^sub>d*
CFG_node (parent_node mx)"
by(fastforce intro:cdSp_Append_cdep cdSp_Nil)
from `valid_SDG_node mx`
have "mx = CFG_node (parent_node mx) \<or> CFG_node (parent_node mx) \<longrightarrow>\<^bsub>cd\<^esub> mx"
by(rule valid_SDG_node_cases)
thus ?thesis
proof
assume "mx = CFG_node (parent_node mx)"
with cd_path show ?thesis by fastforce
next
assume "CFG_node (parent_node mx) \<longrightarrow>\<^bsub>cd\<^esub> mx"
with cd_path have "CFG_node (targetnode a')
cd-[CFG_node (targetnode a')]@[CFG_node (parent_node mx)]\<rightarrow>\<^sub>d* mx"
by(fastforce intro:cdSp_Append_cdep)
thus ?thesis by fastforce
qed
qed
qed
then obtain msx''
where "CFG_node (targetnode a') cd-msx''\<rightarrow>\<^sub>d* mx" by blast
hence "CFG_node (targetnode a') i-msx''\<rightarrow>\<^sub>d* mx"
by(rule cdep_SDG_path_intra_SDG_path)
with `valid_edge a'`
have "matched (CFG_node (targetnode a')) ([]@msx'') mx"
by(fastforce intro:matched_Append_intra_SDG_path matched_Nil)
with `matched mx (msx@msx') n''`
have "matched (CFG_node (targetnode a')) (msx''@(msx@msx')) n''"
by(fastforce intro:matched_Append)
with `valid_edge a'` `CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a')`
`n'' -p\<rightarrow>\<^bsub>ret\<^esub> n' \<or> n'' -p:V\<rightarrow>\<^bsub>out\<^esub> n'` `a \<in> get_return_edges a'`
`parent_node n'' = sourcenode a` `parent_node n' = targetnode a`
have "matched (CFG_node (sourcenode a'))
([]@CFG_node (sourcenode a')#(msx''@(msx@msx'))@[n'']) n'"
by(fastforce intro:matched_bracket_call matched_Nil sum_SDG_edge_SDG_edge)
with `n \<in> set msx` `CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)`
`parent_node n' = targetnode a`
show ?thesis by fastforce
qed
qed
qed
lemma irs_SDG_path_realizable:
assumes "n irs-ns\<rightarrow>\<^sub>d* n'" and "n \<noteq> n'"
obtains ns' where "realizable (CFG_node (_Entry_)) ns' n'" and "n \<in> set ns'"
proof(atomize_elim)
from `n irs-ns\<rightarrow>\<^sub>d* n'`
have "n = n' \<or> (\<exists>ns'. realizable (CFG_node (_Entry_)) ns' n' \<and> n \<in> set ns')"
proof(rule irs_SDG_path_split)
assume "n is-ns\<rightarrow>\<^sub>d* n'"
show ?thesis
proof(cases "ns = []")
case True
with `n is-ns\<rightarrow>\<^sub>d* n'` have "n = n'" by(fastforce elim:intra_sum_SDG_path.cases)
thus ?thesis by simp
next
case False
with `n is-ns\<rightarrow>\<^sub>d* n'` have "n \<in> set ns" by(fastforce dest:is_SDG_path_hd)
from `n is-ns\<rightarrow>\<^sub>d* n'` have "valid_SDG_node n" and "valid_SDG_node n'"
by(rule is_SDG_path_valid_SDG_node)+
hence "valid_node (parent_node n)" by -(rule valid_SDG_CFG_node)
from `n is-ns\<rightarrow>\<^sub>d* n'` obtain ns' where "matched n ns' n'" and "set ns \<subseteq> set ns'"
by(erule is_SDG_path_matched)
with `n \<in> set ns` have "n \<in> set ns'" by fastforce
from `valid_node (parent_node n)`
show ?thesis
proof(cases "parent_node n = (_Exit_)")
case True
with `valid_SDG_node n` have "n = CFG_node (_Exit_)"
by(rule valid_SDG_node_parent_Exit)
from `n is-ns\<rightarrow>\<^sub>d* n'` obtain as where "parent_node n -as\<rightarrow>\<^sub>\<iota>* parent_node n'"
by -(erule is_SDG_path_intra_CFG_path)
with `n = CFG_node (_Exit_)` have "parent_node n' = (_Exit_)"
by(fastforce dest:path_Exit_source simp:intra_path_def)
with `valid_SDG_node n'` have "n' = CFG_node (_Exit_)"
by(rule valid_SDG_node_parent_Exit)
with `n = CFG_node (_Exit_)` show ?thesis by simp
next
case False
with `valid_SDG_node n`
obtain nsx where "CFG_node (_Entry_) cc-nsx\<rightarrow>\<^sub>d* n"
by(erule Entry_cc_SDG_path_to_inner_node)
hence "realizable (CFG_node (_Entry_)) nsx n"
by(rule cdep_SDG_path_realizable)
with `matched n ns' n'`
have "realizable (CFG_node (_Entry_)) (nsx@ns') n'"
by -(rule realizable_Append_matched)
with `n \<in> set ns'` show ?thesis by fastforce
qed
qed
next
fix nsx nsx' nx nx' p
assume "ns = nsx@nx#nsx'" and "n irs-nsx\<rightarrow>\<^sub>d* nx"
and "nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')" and "nx' is-nsx'\<rightarrow>\<^sub>d* n'"
from `nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')`
have "CFG_node (parent_node nx) s-p\<rightarrow>\<^bsub>ret\<^esub> CFG_node (parent_node nx')"
by(fastforce elim:sum_SDG_edge.cases intro:sum_SDG_return_edge)
then obtain a Q f where "valid_edge a" and "kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f"
and "parent_node nx = sourcenode a" and "parent_node nx' = targetnode a"
by(fastforce elim:sum_SDG_edge.cases)
from `valid_edge a` `kind a = Q\<hookleftarrow>\<^bsub>p\<^esub>f` obtain a' Q' r' fs'
where "a \<in> get_return_edges a'" and "valid_edge a'" and "kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'"
and "CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (targetnode a)"
by(erule return_edge_determines_call_and_sum_edge)
from `valid_edge a'` `kind a' = Q':r'\<hookrightarrow>\<^bsub>p\<^esub>fs'`
have "CFG_node (sourcenode a') s-p\<rightarrow>\<^bsub>call\<^esub> CFG_node (targetnode a')"
by(fastforce intro:sum_SDG_call_edge)
from `n irs-nsx\<rightarrow>\<^sub>d* nx` `nx s-p\<rightarrow>\<^bsub>ret\<^esub> nx' \<or> (\<exists>V. nx s-p:V\<rightarrow>\<^bsub>out\<^esub> nx')`
obtain m ms where "matched m ms nx'" and "n \<in> set ms"
and "m s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node nx')"
by(fastforce elim:irs_SDG_path_matched)
from `nx' is-nsx'\<rightarrow>\<^sub>d* n'` obtain ms' where "matched nx' ms' n'"
and "set nsx' \<subseteq> set ms'" by(erule is_SDG_path_matched)
with `matched m ms nx'` have "matched m (ms@ms') n'" by -(rule matched_Append)
from `m s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node nx')` have "valid_SDG_node m"
by(rule sum_SDG_edge_valid_SDG_node)
hence "valid_node (parent_node m)" by(rule valid_SDG_CFG_node)
thus ?thesis
proof(cases "parent_node m = (_Exit_)")
case True
from `m s-p\<rightarrow>\<^bsub>sum\<^esub> CFG_node (parent_node nx')` obtain a where "valid_edge a"
and "sourcenode a = parent_node m"
by(fastforce elim:sum_SDG_edge.cases)
with True have False by -(rule Exit_source,simp_all)
thus ?thesis by simp
next
case False
with `valid_SDG_node m`
obtain ms'' where "CFG_node (_Entry_) cc-ms''\<rightarrow>\<^sub>d* m"
by(erule Entry_cc_SDG_path_to_inner_node)
hence "realizable (CFG_node (_Entry_)) ms'' m"
by(rule cdep_SDG_path_realizable)
with `matched m (ms@ms') n'`
have "realizable (CFG_node (_Entry_)) (ms''@(ms@ms')) n'"
by -(rule realizable_Append_matched)
with `n \<in> set ms` show ?thesis by fastforce
qed
qed
with `n \<noteq> n'` show "\<exists>ns'. realizable (CFG_node (_Entry_)) ns' n' \<and> n \<in> set ns'"
by simp
qed
end
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/HRB-Slicing/StaticInter/SDG.thy"}
|
#!/usr/bin/python
import matplotlib
import csv
matplotlib.use('Agg') # This lets it run without an X backend
import matplotlib.pyplot as plt
import numpy as np
ring_x = []
ring_y = []
q_x = []
q_y = []
#fname = "vacation_durable_low_2048_unpinned"
#fname = "vacation_durable_low_2048_pinned"
#fname = "vacation_volatile_low_2048_unpinned"
fname = "vacation_volatile_low_2048_pinned"
with open(fname+'.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
if row[0] == "ringstm":
ring_x.append(int(row[1]))
ring_y.append(float(row[2]))
if row[0] == "qstm":
q_x.append(int(row[1]))
q_y.append(float(row[2]))
ring_thpt = [671088/i for i in ring_y]
q_thpt = [671088/i for i in q_y]
# Time plot
#plt.plot(ring_x, ring_y, label='RingSTM')
#plt.plot(q_x, q_y, label='QSTM')
# Throughput plot
plt.plot(ring_x, ring_thpt, label='RingSTM')
plt.plot(q_x, q_thpt, label='QSTM')
plt.xlabel('Threads')
#plt.ylabel('Seconds')
plt.ylabel('Throughput (txn/sec)')
#plt.title('Durable, low, 2x18a, Vacation, 2048, unpinned')
#plt.title('Durable, low, 2x18a, Vacation, 2048, pinned')
#plt.title('Non-durable, low, 2x18a, Vacation, 2048, unpinned')
plt.title('Non-durable, low, 2x18a, Vacation, 2048, pinned')
plt.xlim(0,82)
#plt.ylim(0,30)
plt.ylim(0,200000)
plt.xticks((1, 2, 4, 6, 8, 12, 16, 24, 32, 40, 48, 64, 72, 80))
plt.legend(loc='lower right')
#plt.show()
plt.savefig(fname+".png",dpi=300)
|
{"hexsha": "ae42372accb2d92bebe326b4544515015c39ad2b", "size": 1438, "ext": "py", "lang": "Python", "max_stars_repo_path": "ext/qstm/OF-QSTM/ptms/qstm/data/old/plot_pinning/plot-pinning.py", "max_stars_repo_name": "roghnin/Montage", "max_stars_repo_head_hexsha": "40dbd6bb5a506545f01931336bf37b24cdb72a64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ext/qstm/OF-QSTM/ptms/qstm/data/old/plot_pinning/plot-pinning.py", "max_issues_repo_name": "roghnin/Montage", "max_issues_repo_head_hexsha": "40dbd6bb5a506545f01931336bf37b24cdb72a64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-16T02:57:28.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-16T02:57:28.000Z", "max_forks_repo_path": "ext/qstm/OF-QSTM/ptms/qstm/data/old/plot_pinning/plot-pinning.py", "max_forks_repo_name": "roghnin/Montage", "max_forks_repo_head_hexsha": "40dbd6bb5a506545f01931336bf37b24cdb72a64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.7931034483, "max_line_length": 63, "alphanum_fraction": 0.6933240612, "include": true, "reason": "import numpy", "num_tokens": 493}
|
"""
file_formats.py defines file outputs from input xarray objects
"""
import copy
import numpy as np
import pandas as pd
def load_pvnames(filename):
"""
Given a file with one pv on each line, return a list of pvs.
"""
with open(filename, "r") as f:
lines = f.readlines()
return [l[:-1] for l in lines]
def csv_coll(xarray, pv, avg_period=None):
data = xarray[pv][0].values # numeric
times = xarray[pv].time.values # datetime64
# strip nans
d = []
t = []
for val, tim in zip(data, times):
if not np.isnan(val):
d.append(val)
t.append(tim)
data = d
times = t
def out_str(ts_array):
out = str(ts_array[0])
delims = "-- ::"
for i, d in enumerate(delims):
try: out += d + str(ts_array[i+1])
except: return out
return out
periods = ("year", "month", "day", "hour", "minute", "second")
def stat_append(vals, methods, lists):
for m, lst in zip(methods, lists):
lst.append(m(vals))
def tim_spec(dt64, spec):
timestamp = pd.to_datetime(dt64)
return [getattr(timestamp, periods[i]) for i in range(spec+1)]
if avg_period is not None:
coll = [(pv, "avg", "min", "max")]
if avg_period in periods:
spec = periods.index(avg_period)
# split time into chunks to average
# replace data and times appropriately
avg = []
min = []
max = []
t = []
curr_time = None
for val, tim in zip(data, times):
tim = tim_spec(tim, spec)
if curr_time is None:
curr_time = tim
curr_vals = [val]
elif tim == curr_time:
curr_vals.append(val)
else:
stat_append(curr_vals,
[np.mean, np.min, np.max],
[avg, min, max])
t.append(out_str(curr_time))
curr_time = None
if curr_time is not None and curr_vals:
stat_append(curr_vals,
[np.mean, np.min, np.max],
[avg, min, max])
t.append(out_str(curr_time))
combined = [(str(t[n]), str(avg[n]), str(min[n]), str(max[n])) for n in range(len(t))]
else:
print("bad avg_period, must be in {}".format(periods))
return coll
else:
coll = [(pv, "value")]
spec = len(periods) - 1
times = [tim_spec(t, spec) for t in times]
times = [out_str(t) for t in times]
combined = [(times[n], str(data[n])) for n in range(len(data))]
coll.extend(combined)
return coll
def build_csv(colls, filename=None):
colls = copy.copy(colls)
lens = [len(c) for c in colls]
max_len = max(lens)
for c in colls:
while len(c) < max_len:
#c.append(("",""))
c.append([""]*len(c[0]))
lines = []
for i in range(max_len):
lines.append([])
for coll in colls:
for i, row in enumerate(coll):
txt = ",".join(row)
if not lines[i]:
lines[i] = txt
else:
lines[i] = ",".join((lines[i],txt))
for i in range(len(lines)):
lines[i] += "\n"
if filename:
with open(filename, "w") as f:
f.writelines(lines)
else:
for line in lines:
print(line)
def csv(xarray, filename=None, sync_timestamps=False):
"""
Write xarray data into a csv file. Assumes the data came from an archiver
query, as defined in data.py. Format is something like:
pvname,desc,pvname,desc,pvname,desc
time,value,time,value,time,value
time,value,time,value,time,value
time,value,,,time,value
"""
skip_fields = ("field", "time")
colls = []
for pv in xarray:
if pv not in skip_fields:
coll = [(pv, xarray[pv].attrs.get("DESC", ""))]
data = xarray[pv][0].values
times = xarray[pv].time.values
data = [(str(times[n]), str(data[n])) for n in range(len(data)) if not np.isnan(data[n])]
coll.extend(data)
colls.append(coll)
lens = [len(c) for c in colls]
if lens:
max_len = max(lens)
else:
max_len = 0
for c in colls:
while len(c) < max_len:
c.append(("",""))
lines = []
for i in range(max_len):
lines.append([])
for coll in colls:
for i, row in enumerate(coll):
txt = ",".join(row)
if not lines[i]:
lines[i] = txt
else:
lines[i] = ",".join((lines[i],txt))
if filename:
with open(filename, "w") as f:
f.writelines(lines)
else:
for line in lines:
print(line)
|
{"hexsha": "53246c9f19ab5f8a902d83a81d71728450a94e70", "size": 4946, "ext": "py", "lang": "Python", "max_stars_repo_path": "archapp/file_formats.py", "max_stars_repo_name": "ZryletTC/archapp", "max_stars_repo_head_hexsha": "68299fa3e35c292cff33bba55a3a75e9ae568815", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "archapp/file_formats.py", "max_issues_repo_name": "ZryletTC/archapp", "max_issues_repo_head_hexsha": "68299fa3e35c292cff33bba55a3a75e9ae568815", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "archapp/file_formats.py", "max_forks_repo_name": "ZryletTC/archapp", "max_forks_repo_head_hexsha": "68299fa3e35c292cff33bba55a3a75e9ae568815", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5308641975, "max_line_length": 101, "alphanum_fraction": 0.5046502224, "include": true, "reason": "import numpy", "num_tokens": 1215}
|
# BSD 3-Clause License
#
# Copyright (c) 2020, IPASC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unittest.case import TestCase
from ipasc_tool import PAData
from ipasc_test.tests.utils import create_complete_device_metadata_dictionary, create_complete_acquisition_meta_data_dictionary
import numpy as np
class MetaDataTest(TestCase):
def setUp(self):
self.acquisition_metadata = create_complete_acquisition_meta_data_dictionary()
self.device_metadata = create_complete_device_metadata_dictionary()
self.pa_data = PAData(binary_time_series_data=np.random.random((4, 200)),
meta_data_acquisition=self.acquisition_metadata,
meta_data_device=self.device_metadata)
print("setUp")
def tearDown(self):
print("tearDown")
def test_get_general_information(self):
assert self.pa_data.get_device_uuid() is not None
assert self.pa_data.get_field_of_view() is not None
assert self.pa_data.get_number_of_detectors() is not None
assert self.pa_data.get_number_of_illuminators() is not None
def test_get_illuminator_position(self):
assert self.pa_data.get_illuminator_position() is not None
assert self.pa_data.get_illuminator_position(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_illuminator_position(0) is not None
def test_get_illuminator_orientation(self):
assert self.pa_data.get_illuminator_orientation() is not None
assert self.pa_data.get_illuminator_orientation(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_illuminator_orientation(0) is not None
def test_get_illuminator_size(self):
assert self.pa_data.get_illuminator_geometry() is not None
assert self.pa_data.get_illuminator_geometry(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_illuminator_geometry(0) is not None
def test_get_wavelength_range(self):
assert self.pa_data.get_wavelength_range() is not None
assert self.pa_data.get_wavelength_range(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_wavelength_range(0) is not None
def test_get_energy_profile(self):
assert self.pa_data.get_energy_profile() is not None
assert self.pa_data.get_energy_profile(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_energy_profile(0) is not None
def test_get_stability_profile(self):
assert self.pa_data.get_stability_profile() is not None
assert self.pa_data.get_stability_profile(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_stability_profile(0) is not None
def test_get_pulse_duration(self):
assert self.pa_data.get_pulse_width() is not None
assert self.pa_data.get_pulse_width(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_pulse_width(0) is not None
def test_get_beam_intensity_profile(self):
assert self.pa_data.get_beam_profile() is not None
assert self.pa_data.get_beam_profile(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_beam_profile(0) is not None
def test_get_beam_divergence_angles(self):
assert self.pa_data.get_beam_divergence() is not None
assert self.pa_data.get_beam_divergence(list(self.pa_data.get_illuminator_ids())[0]) is not None
assert self.pa_data.get_beam_divergence(0) is not None
def test_get_detector_position(self):
assert self.pa_data.get_detector_position() is not None
assert self.pa_data.get_detector_position(list(self.pa_data.get_detector_ids())[0]) is not None
assert self.pa_data.get_detector_position(0) is not None
def test_get_detector_orientation(self):
assert self.pa_data.get_detector_orientation() is not None
assert self.pa_data.get_detector_orientation(list(self.pa_data.get_detector_ids())[0]) is not None
assert self.pa_data.get_detector_orientation(0) is not None
def test_get_detector_size(self):
assert self.pa_data.get_detector_size() is not None
assert self.pa_data.get_detector_size(list(self.pa_data.get_detector_ids())[0]) is not None
assert self.pa_data.get_detector_size(0) is not None
def test_get_frequency_response(self):
assert self.pa_data.get_frequency_response() is not None
assert self.pa_data.get_frequency_response(list(self.pa_data.get_detector_ids())[0]) is not None
assert self.pa_data.get_frequency_response(0) is not None
def test_get_angular_response(self):
assert self.pa_data.get_angular_response() is not None
assert self.pa_data.get_angular_response(list(self.pa_data.get_detector_ids())[0]) is not None
assert self.pa_data.get_angular_response(0) is not None
def test_aquisition_metadata(self):
assert self.pa_data.get_encoding() is not None
assert self.pa_data.get_compression() is not None
assert self.pa_data.get_data_UUID() is not None
assert self.pa_data.get_data_type() is not None
assert self.pa_data.get_dimensionality() is not None
assert self.pa_data.get_sizes() is not None
assert self.pa_data.get_device_reference() is not None
assert self.pa_data.get_pulse_laser_energy() is not None
assert self.pa_data.get_time_stamps() is not None
assert self.pa_data.get_wavelengths() is not None
assert self.pa_data.get_time_gain_compensation() is not None
assert self.pa_data.get_overall_gain() is not None
assert self.pa_data.get_element_dependent_gain() is not None
assert self.pa_data.get_temperature() is not None
assert self.pa_data.get_coupling_agent() is not None
assert self.pa_data.get_scanning_method() is not None
assert self.pa_data.get_sampling_rate() is not None
assert self.pa_data.get_frequency_filter() is not None
|
{"hexsha": "d8e475cfb6204ac23195dc6b133c63838f07b973", "size": 7603, "ext": "py", "lang": "Python", "max_stars_repo_path": "ipasc_test/tests/test_pa_data_class.py", "max_stars_repo_name": "IPASC/IPASC_DataConversionTool", "max_stars_repo_head_hexsha": "41c6176ed579d8c7778a9831dcc28ca3f93df82e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-28T10:51:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-10T16:26:38.000Z", "max_issues_repo_path": "ipasc_test/tests/test_pa_data_class.py", "max_issues_repo_name": "IPASC/IPASC_DataConversionTool", "max_issues_repo_head_hexsha": "41c6176ed579d8c7778a9831dcc28ca3f93df82e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-03-13T10:46:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-02T12:51:39.000Z", "max_forks_repo_path": "ipasc_test/tests/test_pa_data_class.py", "max_forks_repo_name": "IPASC/IPASC_DataConversionTool", "max_forks_repo_head_hexsha": "41c6176ed579d8c7778a9831dcc28ca3f93df82e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-10T13:17:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-10T13:17:17.000Z", "avg_line_length": 52.7986111111, "max_line_length": 127, "alphanum_fraction": 0.7503616993, "include": true, "reason": "import numpy", "num_tokens": 1666}
|
import os, os.path
import pandas as pd
import numpy as np
import support_functions as sf
import data_structures as ds
# TEMP
import importlib
importlib.reload(sf)
importlib.reload(ds)
# setup some
dir_py = os.path.dirname(os.path.realpath(__file__))
dir_proj = os.path.dirname(dir_py)
# key subdirectories for the project
dir_jl = sf.check_path(os.path.join(dir_proj, "julia"), False)
dir_out = sf.check_path(os.path.join(dir_proj, "out"), True)
dir_ref = sf.check_path(os.path.join(dir_proj, "ref"), False)
# attribute tables and readthedocs
dir_docs = sf.check_path(os.path.join(os.path.dirname(os.getcwd()), "docs", "source"), False)
dir_attribute_tables = sf.check_path(os.path.join(dir_docs, "csvs"), False)
# get model attributes
model_attributes = ds.ModelAttributes(dir_attribute_tables)
|
{"hexsha": "a89ff0c6a4301957f5098450951c35ea611589fc", "size": 808, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/.ipynb_checkpoints/setup_analysis-checkpoint.py", "max_stars_repo_name": "egobiernoytp/lac_decarbonization", "max_stars_repo_head_hexsha": "7b574c4c91a0b1341dfd97a203fc8477ba32a91d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/.ipynb_checkpoints/setup_analysis-checkpoint.py", "max_issues_repo_name": "egobiernoytp/lac_decarbonization", "max_issues_repo_head_hexsha": "7b574c4c91a0b1341dfd97a203fc8477ba32a91d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/.ipynb_checkpoints/setup_analysis-checkpoint.py", "max_forks_repo_name": "egobiernoytp/lac_decarbonization", "max_forks_repo_head_hexsha": "7b574c4c91a0b1341dfd97a203fc8477ba32a91d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4848484848, "max_line_length": 93, "alphanum_fraction": 0.7636138614, "include": true, "reason": "import numpy", "num_tokens": 197}
|
'''
This program detects aruco ar tags from
a camera stream
Next steps: perform transformation from
image coordinate system to global coordinate
system using extrinsic matrix
'''
import cv2
import cv2.aruco as aruco
import numpy as np
from io import BytesIO
import time
import os
from picamera import PiCamera
from picamera.array import PiRGBArray
from imutils.video import VideoStream
import imutils
import yaml
### For the print statements, feel free to put them in #IFDEBUG flags
def main():
# establish global constants (hardcode in file or make optional param to fn call)
# KNOWN_WIDTH is the width of the AR tags to detect in URC competition
KNOWN_WIDTH = 20 # unit is cm
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(2)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# print("Captured Image")
# grab the raw NumPy array representing the image
image = np.array(frame.array)
# Our operations on the frame come here
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#TODO: change "cv2.aruco.DICT_5x5" to rover aruco lib
aruco_dict = aruco.getPredefinedDictionary(cv2.aruco.DICT_5X5_50)
parameters = aruco.DetectorParameters_create()
# print("Parameters:", parameters)
# Lists of ids and the corners belonging to each id
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray_img,
aruco_dict, parameters=parameters)
# print("Corners:", corners)
# print(type(id))
if(len(corners) > 0):
for i in range(len(corners)): # used to be id
# call function to calculate ar tag heading relative to rover
depth = get_depth(i, corners[i]) #used to be id[i]
heading = global_coord_trans(i, corners[i], depth) # id[i]
# this is where we would send heading over LCM channel
# note: global coord trans could happen somewhere
# outside of this script. It doesn't, but it could
print("Depth and Heading of", i, "are:", depth, "&", heading)
gray_img = aruco.drawDetectedMarkers(gray_img, corners)
#print(rejectedImgPoints)
# Display the resulting frame
cv2.imshow('frame',gray_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
rawCapture.truncate(0)
# When everything done, release the capture
# cap.release()
cv2.destroyAllWindows()
return
def global_coord_trans(id, corners, depth):
#transform the corners into an image coordinate system
#representation, then transform into the global
#coordinate frame of the rover using yaml file with
#extrinsics
#use corners detected to find center pixel
center = np.array([(corners[0][0][1] + corners[0][3][1]) / 2,
(corners[0][1][0] + corners[0][2][0]) / 2])
with open("cam_intrinsic.yaml", 'r') as cam_matrix:
#cam_matrix = file("cam_intrinsic.yaml", 'r')
# extrinsics = file("cam_extrinsic.yaml", 'r')
try:
# extrn = yaml.safe_load(extrinsics)
intrn = yaml.safe_load(cam_matrix)
# rotation = extrn['rotation']
# translation = extrn['translation']
A = intrn['cam_matrix'] #this is given by cam calibration tool
A = np.array(A).reshape((3, 3)) #reshape data to 3x3 matrix
# inner_mat = np.matmul(np.dot(s, center), np.linalg.inv(A)) - translation #missing s for scaling pixel coords. tbh idk what it is
# global_coords = np.matmul(np.linalg.inv(rotation), 0) #change the 0 at the end
u = center[1]
v = center[0] # 0 and 1 may be mixed up
u0 = A[0,2]
v0 = A[1,2]
fx = A[0,0]
fy = A[1,1]
x = (u - u0) * depth / fx
y = (v - v0) * depth / fy
z = depth
except yaml.YAMLError as exc:
print("Failed global transformation for", id, "ERROR:", exc)
# (x, y, z) are 3D backprojected coordinates. Points relative to camera frame
# Another transformation is needed between camera frame and rover frame (using extrinsics)
return (x, y, z) # place heading back in
def get_depth(id, corners):
# print(corners)
# print(corners.shape)
# unit of width is pixels
width = abs(((corners[0][2][1] + corners[0][3][1]) / 2) - ((corners[0][0][1] + corners[0][1][1]) / 2))
# print(width)
with open("cam_intrinsic.yaml", 'r') as cam_matrix:
#cam_matrix = file("cam_intrinsic.yaml", 'r')
try:
intrn = yaml.safe_load(cam_matrix)
A = intrn['cam_matrix'] #this is given by cam calibration tool
A = np.array(A).reshape((3, 3)) #reshape data to 3x3 matrix
fx = A[0,0]
fy = A[1,1]
FOCAL_LENGTH = (fx + fy) / 2.0 #probably wrong
KNOWN_WIDTH = 20
except yaml.YAMLError as exc:
print("Failed focal length loading for", id, "ERROR:", exc)
depth = distance_to_camera(KNOWN_WIDTH, FOCAL_LENGTH, width) / 10
print("depth is:", depth, "\t inches")
return depth
def distance_to_camera(knownWidth, focalLength, perWidth):
# compute and return the distance from the maker to the camera
# this makes use of a triangle similarity
return (knownWidth * focalLength) / perWidth
if __name__ == '__main__':
main()
|
{"hexsha": "863f8d2495c2b64286fd2d58b0a68b325e995926", "size": 5797, "ext": "py", "lang": "Python", "max_stars_repo_path": "aruco_detector.py", "max_stars_repo_name": "umrover/perception-raspi-ar", "max_stars_repo_head_hexsha": "e1ff1da3c77bc2906ef21ceeeada04a141124a96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aruco_detector.py", "max_issues_repo_name": "umrover/perception-raspi-ar", "max_issues_repo_head_hexsha": "e1ff1da3c77bc2906ef21ceeeada04a141124a96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aruco_detector.py", "max_forks_repo_name": "umrover/perception-raspi-ar", "max_forks_repo_head_hexsha": "e1ff1da3c77bc2906ef21ceeeada04a141124a96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.921686747, "max_line_length": 142, "alphanum_fraction": 0.6180783164, "include": true, "reason": "import numpy", "num_tokens": 1488}
|
using NURBS
using Base.Test
@testset "B-Spline curve generator" begin
@testset "bspline" begin
b = [1. 2 4 3; 1 3 3 1; 0 0 0 0]
@test typeof(bspline(4,4,5,b)[1]) == Array{Float64,2}
@test typeof(bspline(4,4,5,b)[2]) == Array{Array{Int64,1},1}
@test isapprox(bspline(4,4,5,b)[1],[1.0 1.875 2.75 3.25 3.0; 1.0 2.125 2.5 2.125 1.0; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
end
@testset "bsplineu" begin
b = [0. 3 6 9; 0 10 3 6; 0 0 0 0]
@test typeof(bsplineu(4,3,5,b)[1]) == Array{Float64,2}
@test typeof(bsplineu(4,3,5,b)[2]) == Array{Array{Int64,1},1}
@test isapprox(bsplineu(4,3,5,b)[1],[1.5 3.0 4.5 6.0 7.5; 5.0 7.875 6.5 4.25 4.5; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
end
@testset "dbspline" begin
b = [1. 2 4 3; 1 3 3 1; 0 0 0 0]
@test typeof(dbspline(4,4,5,b)[1]) == Array{Float64,2}
@test typeof(dbspline(4,4,5,b)[2]) == Array{Float64,2}
@test typeof(dbspline(4,4,5,b)[3]) == Array{Float64,2}
@test typeof(dbspline(4,4,5,b)[4]) == Array{Array{Int64,1},1}
@test isapprox(dbspline(4,4,5,b)[1],[1.0 1.875 2.75 3.25 3.0; 1.0 2.125 2.5 2.125 1.0; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
@test isapprox(dbspline(4,4,5,b)[2],[3.0 3.75 3.0 0.75 -3.0; 6.0 3.0 0.0 -3.0 -6.0; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
@test isapprox(dbspline(4,4,5,b)[3],[6.0 0.0 -6.0 -12.0 -18.0; -12.0 -12.0 -12.0 -12.0 -12.0; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
end
@testset "dbsplineu" begin
b = [0. 3 6 9; 0 10 3 6; 0 0 0 0]
@test typeof(dbsplineu(4,3,5,b)[1]) == Array{Float64,2}
@test typeof(dbsplineu(4,3,5,b)[2]) == Array{Float64,2}
@test typeof(dbsplineu(4,3,5,b)[3]) == Array{Float64,2}
@test typeof(dbsplineu(4,3,5,b)[4]) == Array{Array{Int64,1},1}
@test isapprox(dbsplineu(4,3,5,b)[1],[1.5 3.0 4.5 6.0 7.5; 5.0 7.875 6.5 4.25 4.5; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
@test isapprox(dbsplineu(4,3,5,b)[2],[3.0 3.0 3.0 3.0 3.0; 10.0 1.5 -7.0 -2.0 3.0; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
@test isapprox(dbsplineu(4,3,5,b)[3],[0.0 0.0 0.0 0.0 0.0; -17.0 -17.0 10.0 10.0 10.0; 0.0 0.0 0.0 0.0 0.0],atol=1e-5)
end
@testset "nmatrix" begin
@test typeof(nmatrix(3)[1]) == Array{Float64,2}
@test typeof(nmatrix(3)[2]) == Float64
@test nmatrix(3)[1] == [1.0 -2.0 1.0; -2.0 2.0 0.0; 1.0 1.0 0.0]
@test nmatrix(3)[2] == 0.5
@test nmatrix(4)[1] == [-1.0 3.0 -3.0 1.0; 3.0 -6.0 3.0 0.0; -3.0 0.0 3.0 0.0; 1.0 4.0 1.0 0.0]
@test isapprox(nmatrix(4)[2],0.1666666666,atol=1e-10)
end
@testset "matpbspl" begin
b = [0. 3 6 9; 0 10 3 6; 0 0 0 0]
@test typeof(matpbspl(4,3,5,b)[1]) == Array{Float64,2}
@test typeof(matpbspl(4,3,5,b)[2]) == Array{Array{Int64,1},1}
@test isapprox(matpbspl(4,3,4,b)[1],[1.5 2.5 3.5 4.5 5.5 6.5 7.5 7.83333 6.83333 4.5 2.16667 1.16667 1.5 2.5 3.5; 5.0 7.38889 7.88889 6.5 4.72222 4.05556 4.5 5.0 4.5 3.0 1.88889 2.55556 5.0 7.38889 7.88889; 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0],atol=1e-5)
end
end
|
{"hexsha": "3812000893d353cf5c3e3b55d9f6028696b18642", "size": 3089, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/bspline.jl", "max_stars_repo_name": "eOnofri04/IN480", "max_stars_repo_head_hexsha": "3128bfd990925e8926065e075b309cb9bda1355c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-03-06T17:54:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T04:30:18.000Z", "max_issues_repo_path": "test/bspline.jl", "max_issues_repo_name": "TheChoice04/IN480", "max_issues_repo_head_hexsha": "3128bfd990925e8926065e075b309cb9bda1355c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2018-11-22T18:10:22.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-07T16:32:36.000Z", "max_forks_repo_path": "test/bspline.jl", "max_forks_repo_name": "TheChoice04/IN480", "max_forks_repo_head_hexsha": "3128bfd990925e8926065e075b309cb9bda1355c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-06T17:54:09.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-06T17:54:09.000Z", "avg_line_length": 54.1929824561, "max_line_length": 256, "alphanum_fraction": 0.5403043056, "num_tokens": 1733}
|
[STATEMENT]
lemma (in cat_parallel_2) cat_parallel_op[cat_op_intros]:
"cat_parallel_2 \<alpha> \<bb> \<aa> \<ff> \<gg>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cat_parallel_2 \<alpha> \<bb> \<aa> \<ff> \<gg>
[PROOF STEP]
by (intro cat_parallel_2I)
(auto intro!: cat_parallel_cs_intros cat_parallel_ineq[symmetric])
|
{"llama_tokens": 140, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_ECAT_Parallel", "length": 1}
|
'''
Testing Dynamic Sednet preprocessors
'''
import veneer
import os
import json
import pandas as pd
import numpy as np
import sys
import string
from dsed import preprocessors
from .general import TestServer, write_junit_style_results, arg_or_default
from datetime import datetime
import traceback
veneer.general.PRINT_SCRIPTS=True
def assert_all_equal(exp,act):
if not (exp==act).all():
print("Expected: %s,\n Actual: %s"%(str(exp),str(act)))
assert (exp==act).all()
def assert_empty(results):
if len(results):
print('Differences:')
print('\n'.join([str(r) for r in results]))
assert len(results)==0
def _compare(expected,result,label):
orig_cols = expected.columns
unitless_cols = [col.split(' (')[0] for col in orig_cols]
expected = expected.rename(columns=dict(zip(orig_cols,unitless_cols)))
results_reordered = result[expected.columns]
assert_all_equal(expected.columns,results_reordered.columns)
results=[]
for col in expected.columns:
if results_reordered.dtypes[col]==np.dtype('float64'):
exp = expected[col]
res = results_reordered[col]
if not (np.abs(exp-res)<1e-6).all():
results.append(col)
elif results_reordered.dtypes[col]==np.dtype('int64'):
exp = expected[col]
res = results_reordered[col]
if not (exp==res).all():
results.append(col)
else:
exp = expected[col].fillna('')
res = results_reordered[col].fillna('')
if not (exp==res).all():
results.append(col)
if len(results):
expected.to_csv(label+'_expected.csv')
results_reordered.to_csv(label+'_actual.csv')
assert_empty(results)
def preprocessor_test(context,project_file,preprocessor,preprocess_params,expected):
try:
print('Running %s for %s'%(preprocessor.__name__,project_file))
v = context.start_for_test(project_file)
result = preprocessor(v,**preprocess_params)
print('Preprocessor completed')
failed = False
if isinstance(expected,dict):
for key,expected_df in expected.items():
try:
_compare(expected_df,result[key],project_file.split('.')[0]+'_'+key)
except:
failed = True
else:
_compare(expected,result,project_file.split('.')[0])
assert not failed
finally:
context.shutdown()
if __name__=='__main__':
test_fn = arg_or_default(1)
veneer_path = os.path.abspath(arg_or_default(2))
source_version = arg_or_default(3,'4.1.1')
tests =json.load(open(test_fn,'r'))
wd = os.getcwd()
results = {}
context = TestServer(port=44444)
for test in tests:
preprocessor = 'run_%s'%test['preprocessor']
project_fn = test['project']
args = test['parameters']
expected_fns = test['expected_results']
if isinstance(expected_fns,str):
expected = pd.read_csv(expected_fns)
else:
expected = {key:pd.read_csv(fn) for key,fn in expected_fns.items()}
label = test.get('label','unlabelled')
label = "%s (%s)"%(preprocessor,label)
print("================= %s ================="%label)
param_subst = {
'pwd':os.getcwd().replace('\\','/')
}
for k,v in args.items():
if isinstance(v,str):
args[k] = string.Template(v).substitute(param_subst)
try:
start_t = datetime.now()
preprocessor_test(context,project_fn,getattr(preprocessors,preprocessor),args,expected)
print("SUCCESS: %s"%label)
success=True
msg=None
except Exception as e:
print('FAILED: %s with %s'%(label,str(e)))
print('\n'.join(traceback.format_tb(e.__traceback__)))
success=False
msg = str(e)
finally:
end_t = datetime.now()
elapsed = (end_t - start_t).total_seconds()
os.chdir(wd)
results[label]={
'success':success,
'elapsed':elapsed,
'message':msg
}
write_junit_style_results(results,'preprocessor_test_results.xml','preprocessor tests')
|
{"hexsha": "13ecc5277c851d5c27698037ae469efd2a68d95c", "size": 4357, "ext": "py", "lang": "Python", "max_stars_repo_path": "dsed/testing/preprocessors.py", "max_stars_repo_name": "flowmatters/dsed-py", "max_stars_repo_head_hexsha": "b967db2797320e63bc504e40023b7c7623a0b002", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dsed/testing/preprocessors.py", "max_issues_repo_name": "flowmatters/dsed-py", "max_issues_repo_head_hexsha": "b967db2797320e63bc504e40023b7c7623a0b002", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dsed/testing/preprocessors.py", "max_forks_repo_name": "flowmatters/dsed-py", "max_forks_repo_head_hexsha": "b967db2797320e63bc504e40023b7c7623a0b002", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7593984962, "max_line_length": 99, "alphanum_fraction": 0.5939866881, "include": true, "reason": "import numpy", "num_tokens": 953}
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def importarDados(insertOnes=True, filepath='/data/ex2data1.txt', names=['Prova 1', 'Prova 2', 'Aprovado']):
path = os.getcwd() + filepath
data = pd.read_csv(path, header=None, names=names)
# Carregando os dados do dataset e armazendo em um array. Em seguida damos uma rapida visualizada nos dados
data.head()
# A primeira coluna, preenchida com 1's, represenhta o theta0
if insertOnes:
data.insert(0, 'Ones', 1)
# converte de dataframes para arrays
cols = data.shape[1]
X = data.iloc[:, 0:cols - 1]
y = data.iloc[:, cols - 1:cols]
# converte de arrays para matrizes
X = np.array(X.values)
y = np.array(y.values)
return data, X, y
def plot(data, filename = 'target/plot3.1.png'):
# gerando o grafico de dispersao para analise preliminar dos dados
positivo = data[data['Aprovado'].isin([1])]
negativo = data[data['Aprovado'].isin([0])]
fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positivo['Prova 1'], positivo['Prova 2'], s=50, c='k', marker='+', label='Aprovado')
ax.scatter(negativo['Prova 1'], negativo['Prova 2'], s=50, c='y', marker='o', label='Nao Aprovado')
ax.legend()
ax.set_xlabel('Nota da Prova 1')
ax.set_ylabel('Nota da Prova 2')
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
plt.savefig(filename)
plt.show()
|
{"hexsha": "880395abd69b305762bbdb824c28a592c030674f", "size": 1485, "ext": "py", "lang": "Python", "max_stars_repo_path": "T2/plot_ex2data1.py", "max_stars_repo_name": "andersonmanhaes/ml_mestrado", "max_stars_repo_head_hexsha": "d737d80e07d9392895e4455e49a33b8700080cf1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "T2/plot_ex2data1.py", "max_issues_repo_name": "andersonmanhaes/ml_mestrado", "max_issues_repo_head_hexsha": "d737d80e07d9392895e4455e49a33b8700080cf1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "T2/plot_ex2data1.py", "max_forks_repo_name": "andersonmanhaes/ml_mestrado", "max_forks_repo_head_hexsha": "d737d80e07d9392895e4455e49a33b8700080cf1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5957446809, "max_line_length": 111, "alphanum_fraction": 0.6525252525, "include": true, "reason": "import numpy", "num_tokens": 446}
|
-- La composición de funciones inyectivas es inyectiva
-- ===================================================
-- ----------------------------------------------------
-- Ej. 1. Demostrar que la composición de dos funciones
-- inyectivas es una función inyectiva.
-- ----------------------------------------------------
import tactic
open function
variables {X Y Z : Type}
variable {f : X → Y}
variable {g : Y → Z}
-- 1ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
begin
intros x y h,
apply Hf,
apply Hg,
exact h,
end
-- 2ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
begin
intros x y h,
apply Hf,
exact Hg h,
end
-- 3ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
begin
intros x y h,
exact Hf (Hg h),
end
-- 4ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
λ x y h, Hf (Hg h)
-- 5ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
assume x y,
assume h1 : (g ∘ f) x = (g ∘ f) y,
have h2 : f x = f y, from Hg h1,
show x = y, from Hf h2
-- 6ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
assume x y,
assume h1 : (g ∘ f) x = (g ∘ f) y,
show x = y, from Hf (Hg h1)
-- 7ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
assume x y,
assume h1 : (g ∘ f) x = (g ∘ f) y,
Hf (Hg h1)
-- 8ª demostración
example
(Hf : injective f)
(Hg : injective g)
: injective (g ∘ f) :=
λ x y h1, Hf (Hg h1)
-- 9ª demostración
example
(Hg : injective g)
(Hf : injective f)
: injective (g ∘ f) :=
-- by library_search
injective.comp Hg Hf
-- 10ª demostración
example
(Hg : injective g)
(Hf : injective f)
: injective (g ∘ f) :=
-- by hint
by tauto
|
{"author": "jaalonso", "repo": "Logica_con_Lean", "sha": "beb6765c6ff3c05590a03f45722eda0c815a25cd", "save_path": "github-repos/lean/jaalonso-Logica_con_Lean", "path": "github-repos/lean/jaalonso-Logica_con_Lean/Logica_con_Lean-beb6765c6ff3c05590a03f45722eda0c815a25cd/src/5_Funciones/La_composicion_de_funciones_inyectivas_es_inyectiva.lean"}
|
import argparse
import os
# workaround to unpickle olf model files
import sys
from pdb import set_trace as bp
import numpy as np
import torch
import gym
import my_pybullet_envs
import pybullet as p
import time
from a2c_ppo_acktr.envs import VecPyTorch, make_vec_envs
from a2c_ppo_acktr.utils import get_render_func, get_vec_normalize
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
homedir = os.path.expanduser("~")
ts = 1/240
# may need to refactor this into robot class
def planning(robot):
for ind in range(len(Traj) - 1):
tar_armq = Traj[ind,0:7]
# for ji, i in enumerate(robot.arm_dofs):
# p.resetJointState(robot.arm_id, i, tar_armq[ji])
# for ind in range(len(robot.fin_actdofs)):
# p.resetJointState(robot.arm_id, robot.fin_actdofs[ind], robot.init_fin_q[ind], 0.0)
# for ind in range(len(robot.fin_zerodofs)):
# p.resetJointState(robot.arm_id, robot.fin_zerodofs[ind], 0.0, 0.0)
#print(tar_armq)
p.setJointMotorControlArray(
bodyIndex=robot.arm_id,
jointIndices=robot.arm_dofs,
controlMode=p.POSITION_CONTROL,
targetPositions=list(tar_armq),
forces=[robot.maxForce * 3] * len(robot.arm_dofs))
p.setJointMotorControlArray(
bodyIndex=robot.arm_id,
jointIndices=robot.fin_actdofs,
controlMode=p.POSITION_CONTROL,
targetPositions=list(robot.tar_fin_q),
forces=[robot.maxForce] * len(robot.tar_fin_q))
p.setJointMotorControlArray(
bodyIndex=robot.arm_id,
jointIndices=robot.fin_zerodofs,
controlMode=p.POSITION_CONTROL,
targetPositions=[0.0] * len(robot.fin_zerodofs),
forces=[robot.maxForce / 4.0] * len(robot.fin_zerodofs))
p.stepSimulation()
# print(robot.tar_fin_q)
time.sleep(ts)
cps = p.getContactPoints(bodyA=robot.arm_id)
print(len(cps) == 0)
for _ in range(50):
robot.tar_arm_q = tar_armq
p.stepSimulation()
#time.sleep(1. / 240.) # TODO: stay still for a while
sys.path.append('a2c_ppo_acktr')
parser = argparse.ArgumentParser(description='RL')
parser.add_argument(
'--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
help='log interval, one log per n updates (default: 10)')
parser.add_argument(
'--env-name',
default='ShadowHandDemoBulletEnv-v1',
help='environment to train on (default: PongNoFrameskip-v4)')
parser.add_argument(
'--load-dir',
# default='./trained_models_0114_box_l_4/ppo/', # TODO
default='./trained_models_0117_box_l_1/ppo/',
help='directory to save agent logs (default: ./trained_models/)')
parser.add_argument(
'--non-det',
type=int,
default=0,
help='whether to use a non-deterministic policy, 1 true 0 false')
parser.add_argument(
'--iter',
type=int,
default=-1,
help='which iter pi to test')
args = parser.parse_args()
# TODO
is_cuda = True
device = 'cuda' if is_cuda else 'cpu'
args.det = not args.non_det
#np.random.seed(123)
p.connect(p.GUI)
p.resetSimulation()
p.setTimeStep(ts)
p.setGravity(0, 0, -10)
# must use vector version of make_env as to use vec_normalize
env = make_vec_envs(
args.env_name,
args.seed + 1000,
1,
None,
None,
device=device,
allow_early_resets=False)
# dont know why there are so many wrappers in make_vec_envs...
env_core = env.venv.venv.envs[0]
table_id = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/tabletop.urdf'), [0.27, 0.1, 0.0], useFixedBase=1) # TODO
############################# HERE IS THE INPUT FROM VISION AND LANGUAGE MODULE
tx = np.random.uniform(low=0, high=0.25) # target object location
ty = np.random.uniform(low=-0.1, high=0.5)
destin_x = np.random.uniform(low=0, high=0.25) # destination location for target object
destin_y = np.random.uniform(low=-0.1, high=0.5)
destin_z = 0
#tx = 0.1
#ty = 0.0
#est_tx = tx
#est_ty = ty
est_tx = tx + np.random.uniform(low=-0.01, high=0.01)
est_ty = ty + np.random.uniform(low=-0.01, high=0.01)
OBJECTS = np.array([[est_tx,est_ty,0,0],[0.8, 0.8, 0, 0]])
# oid1 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/cylinder.urdf'), [a_tx, a_ty, 0.1], useFixedBase=0) # tar obj
oid1 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/box.urdf'), [tx, ty, 0.1], useFixedBase=0) # tar obj
# oid2 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/box.urdf'), [0.1, 0.2, 0.1], useFixedBase=0)
# oid3 = p.loadURDF(os.path.join(currentdir, 'my_pybullet_envs/assets/cylinder.urdf'), [0.2, -0.15, 0.1], useFixedBase=0)
env_core.assign_estimated_obj_pos(est_tx, est_ty)
p.changeDynamics(oid1, -1, lateralFriction=1.0)
p.changeDynamics(table_id, -1, lateralFriction=1.0)
# print(oid1)
# # Get a render function
# render_func = get_render_func(env)
#
# print(render_func)
from my_pybullet_envs.inmoov_shadow_grasp_env_v2 import ImaginaryArmObjSession
sess = ImaginaryArmObjSession()
Qreach = np.array(sess.get_most_comfortable_q(OBJECTS[0,0],OBJECTS[0,1]))
Qdestin = np.array(sess.get_most_comfortable_q(destin_x,destin_y)) #################################################### NEEDS TO HAVE Z and object orientation!!!
# send command to Openrave
file_path = homedir+'/container_data/PB_REACH.npz'
np.savez(file_path,OBJECTS,Qreach)
# We need to use the same statistics for normalization as used in training
ori_env_name = 'InmoovHandGraspBulletEnv-v1'
if args.iter >= 0:
path = os.path.join(args.load_dir, ori_env_name + "_" + str(args.iter) + ".pt")
else:
path = os.path.join(args.load_dir, ori_env_name + ".pt")
if is_cuda:
actor_critic, ob_rms = torch.load(path)
else:
actor_critic, ob_rms = torch.load(path, map_location='cpu')
vec_norm = get_vec_normalize(env)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
recurrent_hidden_states = torch.zeros(1,
actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
# if render_func is not None:
# render_func('human')
# obs_old = env.reset()
# print(obs_old)
env.reset()
#
# if args.env_name.find('Bullet') > -1:
# import pybullet as p
#
# torsoId = -1
# for i in range(p.getNumBodies()):
# if (p.getBodyInfo(i)[0].decode() == "r_forearm_link"):
# torsoId = i
control_steps = 0
# TODO: change this to read it OpenRave file
###
#Get planned trajectory from Openrave:
file_path = homedir+'/container_data/OR_REACH.npy'
while not os.path.exists(file_path):
time.sleep(1)
if os.path.isfile(file_path):
Traj = np.load(file_path)
os.remove(file_path)
else:
raise ValueError("%s isn't a file!" % file_path)
###
print("Trajectory obtained from OpenRave!")
input("press enter")
planning(env_core.robot)
# print(robot.get_q_dq(robot.arm_dofs))
# print(robot.tar_arm_q)
# print(robot.tar_fin_q)
# input("press enter")
# env_core.robot.reset_with_certain_arm_q([-7.60999597e-01, 3.05809706e-02, -5.82112526e-01,
# -1.40855264e+00, -6.49374902e-01, -2.42410664e-01,
# 0.00000000e+00])
# env_core.robot.tar_arm_q = [-7.60999597e-01, 3.05809706e-02, -5.82112526e-01,
# -1.40855264e+00, -6.49374902e-01, -2.42410664e-01,
# 0.00000000e+00]
# p.stepSimulation()
# print("tar arm q after reset", robot.tar_arm_q)
# time.sleep(3)
obs = torch.Tensor([env_core.getExtendedObservation()])
if is_cuda:
obs = obs.cuda()
# print("tar arm q after getting obs using env_core", env_core.robot.tar_arm_q)
# print("tar arm q after getting obs", robot.tar_arm_q)
#print("init obs", obs)
# input("press enter")
# # print(obs)
#
# print("diff", obs - obs_old)
for i in range(200):
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=args.det)
# print(action)
# if i > 100:
# action[0, 1] = 0.5
obs, reward, done, _ = env.step(action)
control_steps += 1
# if control_steps >= 100: # done grasping
# for _ in range(1000):
# p.stepSimulation()
# time.sleep(ts)
masks.fill_(0.0 if done else 1.0)
Qmove_init = np.concatenate((env_core.robot.get_q_dq(env_core.robot.arm_dofs)[0],env_core.robot.get_q_dq(env_core.robot.arm_dofs)[1]))
file_path = homedir+'/container_data/PB_MOVE.npz'
np.savez(file_path,OBJECTS,Qmove_init,Qdestin)
file_path = homedir+'/container_data/OR_MOVE.npy'
while not os.path.exists(file_path):
time.sleep(1)
if os.path.isfile(file_path):
Traj = np.load(file_path)
os.remove(file_path)
else:
raise ValueError("%s isn't a file!" % file_path)
print("Trajectory obtained from OpenRave!")
input("press enter")
planning(env_core.robot)
#print(env_core.getExtendedObservation())
bp()
#input("press enter")
|
{"hexsha": "42a8fe503f6debcca49d29b8840f566bc4493caf", "size": 9137, "ext": "py", "lang": "Python", "max_stars_repo_path": "enjoy_test.py", "max_stars_repo_name": "jyf588/pytorch-rl-bullet", "max_stars_repo_head_hexsha": "3ac1835d01e658b2078126895ffa0eb11304abb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "enjoy_test.py", "max_issues_repo_name": "jyf588/pytorch-rl-bullet", "max_issues_repo_head_hexsha": "3ac1835d01e658b2078126895ffa0eb11304abb4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "enjoy_test.py", "max_forks_repo_name": "jyf588/pytorch-rl-bullet", "max_forks_repo_head_hexsha": "3ac1835d01e658b2078126895ffa0eb11304abb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3986254296, "max_line_length": 161, "alphanum_fraction": 0.6694757579, "include": true, "reason": "import numpy", "num_tokens": 2636}
|
[STATEMENT]
lemma (in simplification) rb_correct:
fixes Q :: "('a :: {linorder, infinite}, 'b :: linorder) fmla"
shows "rb Q \<le> rb_spec Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rb Q \<le> rb_spec Q
[PROOF STEP]
proof (induct Q rule: rb.induct[case_names Neg Disj Conj Exists Pred Bool Eq])
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. \<And>Q. rb Q \<le> rb_spec Q \<Longrightarrow> rb (Neg Q) \<le> rb_spec (Neg Q)
2. \<And>Q1 Q2. \<lbrakk>rb Q1 \<le> rb_spec Q1; \<And>x. rb Q2 \<le> rb_spec Q2\<rbrakk> \<Longrightarrow> rb (Disj Q1 Q2) \<le> rb_spec (Disj Q1 Q2)
3. \<And>Q1 Q2. \<lbrakk>rb Q1 \<le> rb_spec Q1; \<And>x. rb Q2 \<le> rb_spec Q2\<rbrakk> \<Longrightarrow> rb (Conj Q1 Q2) \<le> rb_spec (Conj Q1 Q2)
4. \<And>x Q. rb Q \<le> rb_spec Q \<Longrightarrow> rb (Exists x Q) \<le> rb_spec (Exists x Q)
5. \<And>v va. rb (Pred v va) \<le> rb_spec (Pred v va)
6. \<And>v. rb (Bool v) \<le> rb_spec (Bool v)
7. \<And>v va. rb (fmla.Eq v va) \<le> rb_spec (fmla.Eq v va)
[PROOF STEP]
case (Exists x Q)
[PROOF STATE]
proof (state)
this:
rb Q \<le> rb_spec Q
goal (7 subgoals):
1. \<And>Q. rb Q \<le> rb_spec Q \<Longrightarrow> rb (Neg Q) \<le> rb_spec (Neg Q)
2. \<And>Q1 Q2. \<lbrakk>rb Q1 \<le> rb_spec Q1; \<And>x. rb Q2 \<le> rb_spec Q2\<rbrakk> \<Longrightarrow> rb (Disj Q1 Q2) \<le> rb_spec (Disj Q1 Q2)
3. \<And>Q1 Q2. \<lbrakk>rb Q1 \<le> rb_spec Q1; \<And>x. rb Q2 \<le> rb_spec Q2\<rbrakk> \<Longrightarrow> rb (Conj Q1 Q2) \<le> rb_spec (Conj Q1 Q2)
4. \<And>x Q. rb Q \<le> rb_spec Q \<Longrightarrow> rb (Exists x Q) \<le> rb_spec (Exists x Q)
5. \<And>v va. rb (Pred v va) \<le> rb_spec (Pred v va)
6. \<And>v. rb (Bool v) \<le> rb_spec (Bool v)
7. \<And>v va. rb (fmla.Eq v va) \<le> rb_spec (fmla.Eq v va)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
rb Q \<le> rb_spec Q
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
rb Q \<le> rb_spec Q
goal (1 subgoal):
1. rb (Exists x Q) \<le> rb_spec (Exists x Q)
[PROOF STEP]
unfolding rb.simps rb_spec_def bind_rule_complete
[PROOF STATE]
proof (prove)
using this:
rb Q \<le> SPEC (\<lambda>Q'. rrb Q' \<and> simplified Q' \<and> Q \<triangleq> Q' \<and> fv Q' \<subseteq> fv Q)
goal (1 subgoal):
1. rb Q \<le> SPEC (\<lambda>xa. WHILE\<^sub>T\<^bsup>rb_INV x xa\<^esup> (\<lambda>\<Q>. fixbound \<Q> x \<noteq> {}) (\<lambda>\<Q>. RES (fixbound \<Q> x) \<bind> (\<lambda>Qfix. SPEC (cov x Qfix) \<bind> (\<lambda>G. RETURN (\<Q> - {Qfix} \<union> {simp (Conj Qfix (DISJ (qps G)))} \<union> (\<Union>y\<in>eqs x G. {cp (Qfix[x \<^bold>\<rightarrow> y])}) \<union> {cp (Qfix \<^bold>\<bottom> x)})))) (flat_Disj xa) \<le> SPEC (\<lambda>xa. RETURN (simp (DISJ (exists x ` xa))) \<le> SPEC (\<lambda>Q'. rrb Q' \<and> simplified Q' \<and> Exists x Q \<triangleq> Q' \<and> fv Q' \<subseteq> fv (Exists x Q))))
[PROOF STEP]
by (rule order_trans, refine_vcg WHILEIT_rule[where R="measure (\<lambda>\<Q>. card (fixbound \<Q> x))"])
(auto simp: rb_INV_rrb rrb_simp simplified_simp fixbound_fv equiv_trans[OF equiv_Exists_cong rb_INV_equiv]
cov_fixbound fixbound_empty_Gen card_gt_0_iff UNION_singleton_eq_range subset_eq
intro!: equiv_simp[THEN equiv_trans, THEN equiv_sym, OF equiv_sym]
dest!: fv_DISJ[THEN set_mp, rotated 1] fv_simp[THEN set_mp] elim!: bspec elim: rb_INV_fv simp del: cp.simps)
[PROOF STATE]
proof (state)
this:
rb (Exists x Q) \<le> rb_spec (Exists x Q)
goal (6 subgoals):
1. \<And>Q. rb Q \<le> rb_spec Q \<Longrightarrow> rb (Neg Q) \<le> rb_spec (Neg Q)
2. \<And>Q1 Q2. \<lbrakk>rb Q1 \<le> rb_spec Q1; \<And>x. rb Q2 \<le> rb_spec Q2\<rbrakk> \<Longrightarrow> rb (Disj Q1 Q2) \<le> rb_spec (Disj Q1 Q2)
3. \<And>Q1 Q2. \<lbrakk>rb Q1 \<le> rb_spec Q1; \<And>x. rb Q2 \<le> rb_spec Q2\<rbrakk> \<Longrightarrow> rb (Conj Q1 Q2) \<le> rb_spec (Conj Q1 Q2)
4. \<And>v va. rb (Pred v va) \<le> rb_spec (Pred v va)
5. \<And>v. rb (Bool v) \<le> rb_spec (Bool v)
6. \<And>v va. rb (fmla.Eq v va) \<le> rb_spec (fmla.Eq v va)
[PROOF STEP]
qed (auto simp: rb_spec_def bind_rule_complete rrb_simp simplified_simp subset_eq dest!: fv_simp[THEN set_mp]
elim!: order_trans intro!: equiv_simp[THEN equiv_trans, THEN equiv_sym, OF equiv_sym] simp del: cp.simps)
|
{"llama_tokens": 1978, "file": "Safe_Range_RC_Restrict_Bounds", "length": 7}
|
%kiconvolve 'Perform convolution or correlation on image data'
% This MatLab function was automatically generated by a converter (KhorosToMatLab) from the Khoros iconvolve.pane file
%
% Parameters:
% InputFile: i1 'Input image', required: 'input image'
% InputFile: i2 'Kernel ', required: 'kernel'
% Toggle: upcast 'Upcast data', default: 0: 'Upcast data to double or double complex for processing'
% Integer: wc 'Width ', default: 0: 'Width coordinate of kernel hotspot'
% Integer: hc 'Height ', default: 0: 'Height coordinate of kernel hotspot'
% OutputFile: o 'Output image', required: 'output image'
%
% Example: o = kiconvolve({i1, i2}, {'i1','';'i2','';'upcast',0;'wc',0;'hc',0;'o',''})
%
% Khoros helpfile follows below:
%
% PROGRAM
% iconvolve - Perform convolution or correlation on image data
%
% DESCRIPTION
% \fBiconvolve" is a simple interface to the more general \fBklinearop\fP
% routine in the DATAMANIP toolbox. \fBiconvolve" is set up to perform
% convolution or correlation on image data (WxHx?x?x?) using a WxH kernel,
% in zero-padded, linear mode (not circular).
%
% The kernel origin, center, or "hotspot" can be set using the -wc and -hc
% arguments; otherwise the KPDS_KERNEL_ORIGIN attribute for the kernel object
% is queried for that information.
%
%
%
% EXAMPLES
% iconvolve -i1 ball.xv -i2 laplacian -corr 0 -o edges
%
% "SEE ALSO"
% kfft(1), kconvolve(1)
%
% RESTRICTIONS
%
% REFERENCES
%
% COPYRIGHT
% Copyright (C) 1993 - 1997, Khoral Research, Inc. ("KRI") All rights reserved.
%
function varargout = kiconvolve(varargin)
if nargin ==0
Inputs={};arglist={'',''};
elseif nargin ==1
Inputs=varargin{1};arglist={'',''};
elseif nargin ==2
Inputs=varargin{1}; arglist=varargin{2};
else error('Usage: [out1,..] = kiconvolve(Inputs,arglist).');
end
if size(arglist,2)~=2
error('arglist must be of form {''ParameterTag1'',value1;''ParameterTag2'',value2}')
end
narglist={'i1', '__input';'i2', '__input';'upcast', 0;'wc', 0;'hc', 0;'o', '__output'};
maxval={0,0,0,1,1,0};
minval={0,0,0,1,1,0};
istoggle=[0,0,1,1,1,0];
was_set=istoggle * 0;
paramtype={'InputFile','InputFile','Toggle','Integer','Integer','OutputFile'};
% identify the input arrays and assign them to the arguments as stated by the user
if ~iscell(Inputs)
Inputs = {Inputs};
end
NumReqOutputs=1; nextinput=1; nextoutput=1;
for ii=1:size(arglist,1)
wasmatched=0;
for jj=1:size(narglist,1)
if strcmp(arglist{ii,1},narglist{jj,1}) % a given argument was matched to the possible arguments
wasmatched = 1;
was_set(jj) = 1;
if strcmp(narglist{jj,2}, '__input')
if (nextinput > length(Inputs))
error(['Input ' narglist{jj,1} ' has no corresponding input!']);
end
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
elseif strcmp(narglist{jj,2}, '__output')
if (nextoutput > nargout)
error(['Output nr. ' narglist{jj,1} ' is not present in the assignment list of outputs !']);
end
if (isempty(arglist{ii,2}))
narglist{jj,2} = 'OK_out';
else
narglist{jj,2} = arglist{ii,2};
end
nextoutput = nextoutput + 1;
if (minval{jj} == 0)
NumReqOutputs = NumReqOutputs - 1;
end
elseif isstr(arglist{ii,2})
narglist{jj,2} = arglist{ii,2};
else
if strcmp(paramtype{jj}, 'Integer') & (round(arglist{ii,2}) ~= arglist{ii,2})
error(['Argument ' arglist{ii,1} ' is of integer type but non-integer number ' arglist{ii,2} ' was supplied']);
end
if (minval{jj} ~= 0 | maxval{jj} ~= 0)
if (minval{jj} == 1 & maxval{jj} == 1 & arglist{ii,2} < 0)
error(['Argument ' arglist{ii,1} ' must be bigger or equal to zero!']);
elseif (minval{jj} == -1 & maxval{jj} == -1 & arglist{ii,2} > 0)
error(['Argument ' arglist{ii,1} ' must be smaller or equal to zero!']);
elseif (minval{jj} == 2 & maxval{jj} == 2 & arglist{ii,2} <= 0)
error(['Argument ' arglist{ii,1} ' must be bigger than zero!']);
elseif (minval{jj} == -2 & maxval{jj} == -2 & arglist{ii,2} >= 0)
error(['Argument ' arglist{ii,1} ' must be smaller than zero!']);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} < minval{jj})
error(['Argument ' arglist{ii,1} ' must be bigger than ' num2str(minval{jj})]);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} > maxval{jj})
error(['Argument ' arglist{ii,1} ' must be smaller than ' num2str(maxval{jj})]);
end
end
end
if ~strcmp(narglist{jj,2},'OK_out') & ~strcmp(narglist{jj,2},'OK_in')
narglist{jj,2} = arglist{ii,2};
end
end
end
if (wasmatched == 0 & ~strcmp(arglist{ii,1},''))
error(['Argument ' arglist{ii,1} ' is not a valid argument for this function']);
end
end
% match the remaining inputs/outputs to the unused arguments and test for missing required inputs
for jj=1:size(narglist,1)
if strcmp(paramtype{jj}, 'Toggle')
if (narglist{jj,2} ==0)
narglist{jj,1} = '';
end;
narglist{jj,2} = '';
end;
if ~strcmp(narglist{jj,2},'__input') && ~strcmp(narglist{jj,2},'__output') && istoggle(jj) && ~ was_set(jj)
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
if strcmp(narglist{jj,2}, '__input')
if (minval{jj} == 0) % meaning this input is required
if (nextinput > size(Inputs))
error(['Required input ' narglist{jj,1} ' has no corresponding input in the list!']);
else
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
end
else % this is an optional input
if (nextinput <= length(Inputs))
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end;
else
if strcmp(narglist{jj,2}, '__output')
if (minval{jj} == 0) % this is a required output
if (nextoutput > nargout & nargout > 1)
error(['Required output ' narglist{jj,1} ' is not stated in the assignment list!']);
else
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
NumReqOutputs = NumReqOutputs-1;
end
else % this is an optional output
if (nargout - nextoutput >= NumReqOutputs)
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end
end
end
end
if nargout
varargout = cell(1,nargout);
else
varargout = cell(1,1);
end
global KhorosRoot
if exist('KhorosRoot') && ~isempty(KhorosRoot)
w=['"' KhorosRoot];
else
if ispc
w='"C:\Program Files\dip\khorosBin\';
else
[s,w] = system('which cantata');
w=['"' w(1:end-8)];
end
end
[varargout{:}]=callKhoros([w 'kconvolve" -doff'],Inputs,narglist);
|
{"author": "aludnam", "repo": "MATLAB", "sha": "020b5cb02cc843e09a0ed689589382f18cce5e6d", "save_path": "github-repos/MATLAB/aludnam-MATLAB", "path": "github-repos/MATLAB/aludnam-MATLAB/MATLAB-020b5cb02cc843e09a0ed689589382f18cce5e6d/matlab_tools/Converted/kiconvolve.m"}
|
from collections.abc import MutableMapping
import numpy as np
import networkx as nx
import osmnx as ox
import pandas as pd
from gym import spaces
from gym.spaces import flatten, Dict
from epoxy.Rider import Rider
from epoxy.Driver import Driver
class StateGraph:
def __init__(self, number_drivers, ride_data, current_time, graph):
self.number_drivers = number_drivers
self.drivers = []
self.time = current_time
self.ride_data = ride_data
self.amount_of_riders = 0
self.riders = []
self.update_riders()
self.state = [self.drivers, self.riders, self.time]
self.graph = graph
self.missed = 0
self.actions = 0
self.dropped_off_riders = []
def set_drivers_random(self, nodes_list):
self.drivers = []
for i in range(self.number_drivers):
self.drivers.append(Driver(1, nodes_list, i))
return self.drivers
def set_time(self, time):
self.time = time
def update_riders(self):
updated_riders = []
for rider in self.riders:
flag = rider.update()
if flag:
updated_riders.append(rider)
else:
self.missed += rider.get_wait_time()
self.riders = updated_riders
counter = len(self.riders)
for index, value in enumerate(self.ride_data[self.amount_of_riders:]):
if int(value["pickup_time"]) <= self.time:
self.riders.append(Rider(value["pickup_time"], counter, value["PULocationID"],
value["DOLocationID"]))
counter += 1
self.amount_of_riders += counter
def evaluate_state(self):
total = 0
for rider in self.riders:
total += rider.get_wait_time()
for driver in self.drivers:
total += driver.get_wait_time()
return -(total + self.missed)
def remove_riders(self, riders):
new_riders = []
for rider in self.riders:
for rider2 in riders:
if not(rider == rider2):
new_riders.append(rider)
self.riders = new_riders
def perform_action(self, actions, time):
self.time = time
self.actions = actions
for i, action in enumerate(self.actions[0]):
if isinstance(action, bool) and action:
driver_obj = self.drivers[i]
pos = driver_obj.get_position()
rider_pos = self.any_riders_at_pos(pos)
driver_obj.use_action(action, self.graph, rider_pos)
elif isinstance(action, bool) and not action:
riders = self.drivers[i].use_action(action, self.graph)
self.dropped_off_riders.append(riders)
self.remove_riders(riders)
else:
self.drivers[i].use_action(action, self.graph)
self.update_riders()
return self
def any_riders_at_pos(self, position):
riders_list = []
for rider in self.riders:
if int(rider.start_position) == int(position):
riders_list.append(rider)
return riders_list
def state_in_arrays(self):
drivers_list = []
for driver in self.drivers:
drivers_list.append(driver.arrays())
riders_list = []
for rider in self.riders:
riders_list.append(rider.arrays())
return [drivers_list, riders_list]
def to_dict(self):
drivers_list = []
for driver in self.drivers:
drivers_list.append(driver.to_dict())
riders_list = []
for rider in self.riders:
riders_list.append(rider.to_dict())
output = {'driver': drivers_list, 'rider': riders_list}
# value = pd.json_normalize(output, sep='.').to_dict(orient='records')
return output
|
{"hexsha": "5c981bcc663828f71998f499b7d20b9cff00ca4c", "size": 3905, "ext": "py", "lang": "Python", "max_stars_repo_path": "epoxy/stategraph.py", "max_stars_repo_name": "WilliamOrringe/RideSharingMachineLearning", "max_stars_repo_head_hexsha": "5f0d2ac3cab5dbf8618cff202e201fe1c7016728", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "epoxy/stategraph.py", "max_issues_repo_name": "WilliamOrringe/RideSharingMachineLearning", "max_issues_repo_head_hexsha": "5f0d2ac3cab5dbf8618cff202e201fe1c7016728", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "epoxy/stategraph.py", "max_forks_repo_name": "WilliamOrringe/RideSharingMachineLearning", "max_forks_repo_head_hexsha": "5f0d2ac3cab5dbf8618cff202e201fe1c7016728", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9565217391, "max_line_length": 94, "alphanum_fraction": 0.5912932138, "include": true, "reason": "import numpy,import networkx", "num_tokens": 852}
|
#!/usr/bin/env python3
# Usage:
# PYTHONPATH=src ./encode.py <file|directory|glob> /path/to/output.npz
# PYTHONPATH=src ./train --dataset /path/to/output.npz
import argparse
import numpy as np
import sys
import tqdm
from ftfy import fix_text
import tflex_utils
parser = argparse.ArgumentParser(
description='Use FTFY to prepare a dataset for training.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile', metavar='PATH', type=str, help='Input file, directory, or glob pattern (utf-8 text).')
parser.add_argument('--outfile', default="-", type=str, help='Output file path, or - for stdout')
def main():
args = parser.parse_args()
out = sys.stdout if args.outfile == '-' else open(args.outfile, "w")
for i, line in tflex_utils.for_each_line(args.infile, message='Fixing'):
fixed = fix_text(line)
out.write(fixed)
if i % 100 == 0:
out.flush()
if __name__ == '__main__':
main()
|
{"hexsha": "81c24d91912b9c1e8f7733fb325a28d2dffdc44d", "size": 967, "ext": "py", "lang": "Python", "max_stars_repo_path": "prepare_dataset.py", "max_stars_repo_name": "scripples/scripp_gpt-2", "max_stars_repo_head_hexsha": "d0b5b31fce107440d48c5447cc04cce7bc5ef639", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 105, "max_stars_repo_stars_event_min_datetime": "2019-10-15T18:31:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T15:59:41.000Z", "max_issues_repo_path": "prepare_dataset.py", "max_issues_repo_name": "scripples/scripp_gpt-2", "max_issues_repo_head_hexsha": "d0b5b31fce107440d48c5447cc04cce7bc5ef639", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2019-11-15T02:25:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-16T17:42:37.000Z", "max_forks_repo_path": "prepare_dataset.py", "max_forks_repo_name": "scripples/scripp_gpt-2", "max_forks_repo_head_hexsha": "d0b5b31fce107440d48c5447cc04cce7bc5ef639", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 46, "max_forks_repo_forks_event_min_datetime": "2019-08-22T20:12:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T19:09:48.000Z", "avg_line_length": 30.21875, "max_line_length": 116, "alphanum_fraction": 0.6990692865, "include": true, "reason": "import numpy", "num_tokens": 249}
|
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pylab as pl
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.image as mpimg
img = Image.open('309.14.png')#[:,:,2]#.convert('L')
img = mpimg.imread('309.14.png')[:,:,2]
z = np.asarray(img)*100
zz = z#[280:,450:650]
mydata = zz[::1,::1]
x,y = np.mgrid[:mydata.shape[0],:mydata.shape[1]]
#%%
#fig = pl.figure(facecolor='w')
#ax1 = fig.add_subplot(1,2,1)
#im = ax1.imshow(mydata,interpolation='nearest',cmap=pl.cm.jet)
#ax1.set_title('2D')
#
##%%
#
#ax2 = fig.add_subplot(1,2,2,projection='3d')
#ax2.plot_surface(x,y,mydata,cmap=pl.cm.jet,rstride=1,cstride=1,linewidth=0.,antialiased=False)
#ax2.set_title('3D')
#ax2.set_zlim3d(0,100)
#pl.show()
#fig = plt.figure()
#ax = fig.gca(projection='3d')
#surf = ax.plot_surface(x,y,zz, cmap=cm.cool,)
#fig.colorbar(surf, shrink=0.5, aspect=5)
#plt.show()
gd = np.array([])
j = 4
for i in range(len(z[:,0])-10):
summe = sum(z[j-4:j+6,200])/10
gd = np.append(gd,summe)
j += 1
print(len(gd))
fig2 = plt.figure()
print(len(z[4:474,200]))
plt.plot(z[4:474,200])
plt.plot(gd, 'r')
|
{"hexsha": "73a024a33aa07b09937839eb67041cd4ae25d4fd", "size": 1212, "ext": "py", "lang": "Python", "max_stars_repo_path": "2 - data2graph/zzz_testfile.py", "max_stars_repo_name": "Tocha4/HSM-Solubility", "max_stars_repo_head_hexsha": "8a83c1270d739f0c7fbb7decf6202e90e6ebc083", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2 - data2graph/zzz_testfile.py", "max_issues_repo_name": "Tocha4/HSM-Solubility", "max_issues_repo_head_hexsha": "8a83c1270d739f0c7fbb7decf6202e90e6ebc083", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2 - data2graph/zzz_testfile.py", "max_forks_repo_name": "Tocha4/HSM-Solubility", "max_forks_repo_head_hexsha": "8a83c1270d739f0c7fbb7decf6202e90e6ebc083", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.25, "max_line_length": 96, "alphanum_fraction": 0.6402640264, "include": true, "reason": "import numpy", "num_tokens": 420}
|
[STATEMENT]
lemma drop_Cons_Suc:
"\<And>xs. drop n xs = y#ys \<Longrightarrow> drop (Suc n) xs = ys"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>xs. drop n xs = y # ys \<Longrightarrow> drop (Suc n) xs = ys
[PROOF STEP]
proof(induct n)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>xs. drop 0 xs = y # ys \<Longrightarrow> drop (Suc 0) xs = ys
2. \<And>n xs. \<lbrakk>\<And>xs. drop n xs = y # ys \<Longrightarrow> drop (Suc n) xs = ys; drop (Suc n) xs = y # ys\<rbrakk> \<Longrightarrow> drop (Suc (Suc n)) xs = ys
[PROOF STEP]
case (Suc n)
[PROOF STATE]
proof (state)
this:
drop n ?xs = y # ys \<Longrightarrow> drop (Suc n) ?xs = ys
drop (Suc n) xs = y # ys
goal (2 subgoals):
1. \<And>xs. drop 0 xs = y # ys \<Longrightarrow> drop (Suc 0) xs = ys
2. \<And>n xs. \<lbrakk>\<And>xs. drop n xs = y # ys \<Longrightarrow> drop (Suc n) xs = ys; drop (Suc n) xs = y # ys\<rbrakk> \<Longrightarrow> drop (Suc (Suc n)) xs = ys
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
drop n ?xs = y # ys \<Longrightarrow> drop (Suc n) ?xs = ys
drop (Suc n) xs = y # ys
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
drop n ?xs = y # ys \<Longrightarrow> drop (Suc n) ?xs = ys
drop (Suc n) xs = y # ys
goal (1 subgoal):
1. drop (Suc (Suc n)) xs = ys
[PROOF STEP]
by(simp add: drop_Suc)
[PROOF STATE]
proof (state)
this:
drop (Suc (Suc n)) xs = ys
goal (1 subgoal):
1. \<And>xs. drop 0 xs = y # ys \<Longrightarrow> drop (Suc 0) xs = ys
[PROOF STEP]
qed simp
|
{"llama_tokens": 669, "file": "Jinja_Compiler_TypeComp", "length": 6}
|
[STATEMENT]
lemma rewrite_negated_primitives_normalized_preserves_unrelated_helper:
assumes wf_disc_sel: "wf_disc_sel (disc, sel) C"
and disc: "\<forall>a. \<not> disc2 (C a)"
and disc_p: "(\<forall>a. \<not> disc2 (Prot a)) \<or> \<not> has_disc_negated disc False m" (*either we do not disc on protocol or the is no negated port*)
shows "normalized_nnf_match m \<Longrightarrow>
normalized_n_primitive (disc2, sel2) f m \<Longrightarrow>
a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m)) \<Longrightarrow>
normalized_n_primitive (disc2, sel2) f a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
have helper_a_normalized: "a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)) \<Longrightarrow>
normalized_n_primitive (disc, sel) f x \<Longrightarrow>
(\<forall>s \<in> set spts. normalized_n_primitive (disc, sel) f s) \<Longrightarrow>
normalized_n_primitive (disc, sel) f rst \<Longrightarrow>
normalized_n_primitive (disc, sel) f a"
for a x spts rst f disc and sel::"'a common_primitive \<Rightarrow> 'b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc, sel) f a
[PROOF STEP]
apply(subgoal_tac "\<exists> s r. a = MatchAnd x (MatchAnd s r) \<and> s \<in> set spts \<and> r \<in> set (normalize_match rst)")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst; \<exists>s r. a = MatchAnd x (MatchAnd s r) \<and> s \<in> set spts \<and> r \<in> set (normalize_match rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc, sel) f a
2. \<lbrakk>a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst\<rbrakk> \<Longrightarrow> \<exists>s r. a = MatchAnd x (MatchAnd s r) \<and> s \<in> set spts \<and> r \<in> set (normalize_match rst)
[PROOF STEP]
prefer 2
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst\<rbrakk> \<Longrightarrow> \<exists>s r. a = MatchAnd x (MatchAnd s r) \<and> s \<in> set spts \<and> r \<in> set (normalize_match rst)
2. \<lbrakk>a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst; \<exists>s r. a = MatchAnd x (MatchAnd s r) \<and> s \<in> set spts \<and> r \<in> set (normalize_match rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc, sel) f a
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst; \<exists>s r. a = MatchAnd x (MatchAnd s r) \<and> s \<in> set spts \<and> r \<in> set (normalize_match rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc, sel) f a
[PROOF STEP]
apply(elim exE conjE, rename_tac s r)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>s r. \<lbrakk>a \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst; a = MatchAnd x (MatchAnd s r); s \<in> set spts; r \<in> set (normalize_match rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc, sel) f a
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>s r. \<lbrakk>MatchAnd x (MatchAnd s r) \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst; a = MatchAnd x (MatchAnd s r); s \<in> set spts; r \<in> set (normalize_match rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc, sel) f r
[PROOF STEP]
using normalize_match_preserves_normalized_n_primitive
[PROOF STATE]
proof (prove)
using this:
normalized_n_primitive ?disc_sel ?f ?rst \<Longrightarrow> \<forall>m\<in>set (normalize_match ?rst). normalized_n_primitive ?disc_sel ?f m
goal (1 subgoal):
1. \<And>s r. \<lbrakk>MatchAnd x (MatchAnd s r) \<in> MatchAnd x ` (\<Union>x\<in>set spts. MatchAnd x ` set (normalize_match rst)); normalized_n_primitive (disc, sel) f x; \<forall>s\<in>set spts. normalized_n_primitive (disc, sel) f s; normalized_n_primitive (disc, sel) f rst; a = MatchAnd x (MatchAnd s r); s \<in> set spts; r \<in> set (normalize_match rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc, sel) f r
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<lbrakk>?a \<in> MatchAnd ?x ` (\<Union>x\<in>set ?spts. MatchAnd x ` set (normalize_match ?rst)); normalized_n_primitive (?disc, ?sel) ?f ?x; \<forall>s\<in>set ?spts. normalized_n_primitive (?disc, ?sel) ?f s; normalized_n_primitive (?disc, ?sel) ?f ?rst\<rbrakk> \<Longrightarrow> normalized_n_primitive (?disc, ?sel) ?f ?a
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
show "normalized_nnf_match m \<Longrightarrow>
normalized_n_primitive (disc2, sel2) f m \<Longrightarrow>
a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m)) \<Longrightarrow>
normalized_n_primitive (disc2, sel2) f a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(case_tac "\<not> has_disc_negated disc False m")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m)); \<not> has_disc_negated disc False m\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
2. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m)); \<not> \<not> has_disc_negated disc False m\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m)); \<not> has_disc_negated disc False m\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
using rewrite_negated_primitives_normalized_no_modification[OF wf_disc_sel]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<not> has_disc_negated disc False ?m; normalized_nnf_match ?m; ?a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one ?m))\<rbrakk> \<Longrightarrow> ?a = ?m
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m)); \<not> has_disc_negated disc False m\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
by blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m)); \<not> \<not> has_disc_negated disc False m\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(simp add: rewrite_negated_primitives_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (case primitive_extractor (disc, sel) m of (spts, rst) \<Rightarrow> if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(case_tac "primitive_extractor (disc, sel) m", rename_tac spts rst)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (case primitive_extractor (disc, sel) m of (spts, rst) \<Rightarrow> if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(subgoal_tac "normalized_n_primitive (disc2, sel2) f rst")
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
2. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
[PROOF STEP]
prefer 2
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
2. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
subgoal for spts rst
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
[PROOF STEP]
apply(drule primitive_extractor_correct(5)[OF _ wf_disc_sel, where P="f"])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst)\<rbrakk> \<Longrightarrow> primitive_extractor (disc, sel) m = (?as, ?ms)
2. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); \<forall>disc2 sel2. normalized_n_primitive (disc2, sel2) f m \<longrightarrow> normalized_n_primitive (disc2, sel2) f ?ms\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); \<forall>disc2 sel2. normalized_n_primitive (disc2, sel2) f m \<longrightarrow> normalized_n_primitive (disc2, sel2) f rst\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
[PROOF STEP]
by(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(insert disc_p, simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst. \<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a)\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(drule(1) primitive_extractor_correct(8)[OF _ wf_disc_sel])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (if getNeg spts = [] then m else MatchAnd (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))) (MatchAnd (andfold_MatchExp (map (Match \<circ> C) (getPos spts))) rst))); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); (\<not> has_disc_negated disc False m) = (getNeg spts = [])\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; \<exists>x\<in>set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts)))). a \<in> MatchAnd x ` (\<Union>x\<in>set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos spts)))). MatchAnd x ` set (normalize_match rst)); has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(elim bexE)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst x. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts)))); a \<in> MatchAnd x ` (\<Union>x\<in>set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos spts)))). MatchAnd x ` set (normalize_match rst))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
[PROOF STEP]
apply(erule helper_a_normalized)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>spts rst x. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f x
2. \<And>spts rst x. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> \<forall>s\<in>set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos spts)))). normalized_n_primitive (disc2, sel2) f s
3. \<And>spts rst x. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
[PROOF STEP]
subgoal for spts
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f x_
[PROOF STEP]
apply(rule_tac pts="(getNeg spts)" in negated_normalized_folded_ports_normalized_n_primitive[where C=C])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> \<forall>a. \<not> disc2 (C a)
2. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> (\<forall>a. \<not> disc2 (Prot a)) \<or> getNeg spts = []
3. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))
[PROOF STEP]
using disc
[PROOF STATE]
proof (prove)
using this:
\<forall>a. \<not> disc2 (C a)
goal (3 subgoals):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> \<forall>a. \<not> disc2 (C a)
2. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> (\<forall>a. \<not> disc2 (Prot a)) \<or> getNeg spts = []
3. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))
[PROOF STEP]
apply(simp; fail)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> (\<forall>a. \<not> disc2 (Prot a)) \<or> getNeg spts = []
2. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))
[PROOF STEP]
using disc_p primitive_extractor_correct(8)[OF _ wf_disc_sel]
[PROOF STATE]
proof (prove)
using this:
(\<forall>a. \<not> disc2 (Prot a)) \<or> \<not> has_disc_negated disc False m
\<lbrakk>normalized_nnf_match ?m; primitive_extractor (disc, sel) ?m = (?as, ?ms)\<rbrakk> \<Longrightarrow> (\<not> has_disc_negated disc False ?m) = (getNeg ?as = [])
goal (2 subgoals):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> (\<forall>a. \<not> disc2 (Prot a)) \<or> getNeg spts = []
2. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))
[PROOF STEP]
apply blast
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))
[PROOF STEP]
by simp
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>spts rst x. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> \<forall>s\<in>set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos spts)))). normalized_n_primitive (disc2, sel2) f s
2. \<And>spts rst x. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
[PROOF STEP]
subgoal for x
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (x, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg x \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg x))))\<rbrakk> \<Longrightarrow> \<forall>s\<in>set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos x)))). normalized_n_primitive (disc2, sel2) f s
[PROOF STEP]
apply(intro ballI)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>s. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (x, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg x \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg x)))); s \<in> set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos x))))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f s
[PROOF STEP]
apply(rule andfold_MatchExp_normalized_normalized_n_primitive_single[where C=C])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>s. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (x, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg x \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg x)))); s \<in> set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos x))))\<rbrakk> \<Longrightarrow> \<forall>a. \<not> disc2 (C a)
2. \<And>s. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (x, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg x \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg x)))); s \<in> set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos x))))\<rbrakk> \<Longrightarrow> s \<in> set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (?xs1 s))))
[PROOF STEP]
using disc disc_p
[PROOF STATE]
proof (prove)
using this:
\<forall>a. \<not> disc2 (C a)
(\<forall>a. \<not> disc2 (Prot a)) \<or> \<not> has_disc_negated disc False m
goal (2 subgoals):
1. \<And>s. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (x, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg x \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg x)))); s \<in> set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos x))))\<rbrakk> \<Longrightarrow> \<forall>a. \<not> disc2 (C a)
2. \<And>s. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (x, rst_); normalized_n_primitive (disc2, sel2) f rst_; \<forall>a. \<not> disc2 (Prot a); getNeg x \<noteq> []; x_ \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg x)))); s \<in> set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (getPos x))))\<rbrakk> \<Longrightarrow> s \<in> set (normalize_match (andfold_MatchExp (map (Match \<circ> C) (?xs1 s))))
[PROOF STEP]
by(simp)+
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>spts rst x. \<lbrakk>normalized_n_primitive (disc2, sel2) f m; has_disc_negated disc False m; primitive_extractor (disc, sel) m = (spts, rst); normalized_n_primitive (disc2, sel2) f rst; \<forall>a. \<not> disc2 (Prot a); getNeg spts \<noteq> []; x \<in> set (normalize_match (andfold_MatchExp (map (l4_ports_negate_one C) (getNeg spts))))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f rst
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<lbrakk>normalized_nnf_match m; normalized_n_primitive (disc2, sel2) f m; a \<in> set (normalize_match (rewrite_negated_primitives (disc, sel) C l4_ports_negate_one m))\<rbrakk> \<Longrightarrow> normalized_n_primitive (disc2, sel2) f a
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 12678, "file": "Iptables_Semantics_Primitive_Matchers_Ports_Normalize", "length": 42}
|
using Wavelets
using Test
using LinearAlgebra
using DelimitedFiles
# modified from Base.Test
function vecnorm_eq(va, vb, Eps, astr="a", bstr="b")
if length(va) != length(vb)
#error("lengths of ", astr, " and ", bstr, " do not match: ",
# "\n ", astr, " (length $(length(va))) = ", va,
# "\n ", bstr, " (length $(length(vb))) = ", vb)
return false
end
diff = norm(va - vb)
if !isnan(Eps) && diff > Eps
#sdiff = string("|", astr, " - ", bstr, "| <= ", Eps)
#error("assertion failed: ", sdiff,
# "\n difference = ", diff, " > ", Eps)
return false
end
return true
end
macro vecnorm_eq_eps(a, b, c)
:(vecnorm_eq($(esc(a)), $(esc(b)), $(esc(c)), $(string(a)), $(string(b))))
end
@testset "Util" begin include("util.jl") end
@testset "Transforms" begin include("transforms.jl") end
@testset "Threshold" begin include("threshold.jl") end
@testset "Plot" begin include("plot.jl") end
|
{"hexsha": "3b9f7e2f0f955f48406bfe0e82ee75fc1f5221be", "size": 988, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "jonschumacher/Wavelets.jl", "max_stars_repo_head_hexsha": "1a09593ec8b51713c4784a13fabe070351e0d95c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 145, "max_stars_repo_stars_event_min_datetime": "2015-03-24T17:22:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T02:52:56.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "jonschumacher/Wavelets.jl", "max_issues_repo_head_hexsha": "1a09593ec8b51713c4784a13fabe070351e0d95c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2015-01-11T20:27:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T18:39:37.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "jonschumacher/Wavelets.jl", "max_forks_repo_head_hexsha": "1a09593ec8b51713c4784a13fabe070351e0d95c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 39, "max_forks_repo_forks_event_min_datetime": "2015-04-25T19:17:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T00:22:00.000Z", "avg_line_length": 30.875, "max_line_length": 78, "alphanum_fraction": 0.5617408907, "num_tokens": 304}
|
[STATEMENT]
lemma AbstrLevels_A9_A93:
assumes "sA9 \<in> AbstrLevel i"
shows "sA93 \<notin> AbstrLevel i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sA93 \<notin> AbstrLevel i
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sA93 \<notin> AbstrLevel i
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
sA9 \<in> AbstrLevel i
goal (1 subgoal):
1. sA93 \<notin> AbstrLevel i
[PROOF STEP]
by (induct i, simp add: AbstrLevel0, simp add: AbstrLevel1, simp add: AbstrLevel2, simp add: AbstrLevel3)
|
{"llama_tokens": 245, "file": "ComponentDependencies_DataDependenciesCaseStudy", "length": 3}
|
import baostock as bs
import pandas as pd
import numpy as np
from IPython import embed
class Data_Reader():
"""
reading the data from the file
"""
def __init__(self, file="stock.csv"):
self.file = file
self.code_list = []
self.data = None
def read_data(self, file="stock.csv"):
data_list = np.array(pd.read_csv(file, encoding="gbk")).tolist()
for i in range(len(data_list)):
self.code_list.append(data_list[i][1])
for i in range(len(self.code_list)):
self.code_list[i] = str.lower(self.code_list[i][:2]) + '.' + self.code_list[i][2:]
# print(self.code_list)
def download_data(self, code):
"""
:param code: type:str ex:sh.600000
:return: type:list
"""
rs_result = bs.query_history_k_data_plus(code, "date,close", start_date='2019-01-01', end_date='2021-12-2',
frequency="d", adjustflag="3")
df_result = rs_result.get_data()
data_list = np.array(df_result).tolist()
return data_list
def addoutput(self):
date = [i + 1 for i in range(709)]
self.data = pd.DataFrame({"dates": date})
for i in range(len(self.code_list)):
stocks = self.download_data(self.code_list[i])
st = [stock[1] for stock in stocks]
self.data["stock" + str(i+1)] = st
print("stock", i+1, "added")
def forward(self):
self.read_data()
bs.login()
self.addoutput()
self.data.to_csv("data.csv", index=False, sep=",")
bs.logout()
return self.data
def update(self):
raise NotImplementedError
if __name__ == "__main__":
reader = Data_Reader()
data = reader.forward()
|
{"hexsha": "0433118843ef461942d5527c10009de8c62d673d", "size": 1803, "ext": "py", "lang": "Python", "max_stars_repo_path": "loaddata.py", "max_stars_repo_name": "leafy-lee/Time_Series-stock_prediction", "max_stars_repo_head_hexsha": "9b2bcab2c9da5a5ad4898e551dfdfd7cad241c0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "loaddata.py", "max_issues_repo_name": "leafy-lee/Time_Series-stock_prediction", "max_issues_repo_head_hexsha": "9b2bcab2c9da5a5ad4898e551dfdfd7cad241c0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "loaddata.py", "max_forks_repo_name": "leafy-lee/Time_Series-stock_prediction", "max_forks_repo_head_hexsha": "9b2bcab2c9da5a5ad4898e551dfdfd7cad241c0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5573770492, "max_line_length": 115, "alphanum_fraction": 0.5662784248, "include": true, "reason": "import numpy", "num_tokens": 434}
|
function optimo = LocalSearch(Problem,pos,w)
%------------------------------- Copyright --------------------------------
% Copyright (c) 2023 BIMK Group. You are free to use the PlatEMO for
% research purposes. All publications which use this platform or any code
% in the platform should acknowledge the use of "PlatEMO" and reference "Ye
% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB Platform
% for Evolutionary Multi-Objective Optimization [Educational Forum], IEEE
% Computational Intelligence Magazine, 2017, 12(4): 73-87".
%--------------------------------------------------------------------------
MaxIter = 5;
Tol = 1e-3;
step = 1;
k = 1;
error = 10;
while error>Tol && k<MaxIter
grad(1,:) = FiniteDifference(pos,w,Problem);
offspringdec = pos.dec - step*grad(1,:);
offspringdec = min(max(offspringdec,Problem.lower),Problem.upper);
offspring = Problem.Evaluation(offspringdec);
grad(2,:) = FiniteDifference(offspring,w,Problem);
step = abs((offspring.dec-pos.dec)*(grad(2,:)-grad(1,:))')/norm(grad(2,:)-grad(1,:))^2;
error = norm(offspring.dec-pos.dec);
pos = offspring;
k = k + 1;
end
optimo = pos;
end
function df = FiniteDifference(X,W,Problem)
if any(X.con>0)
df = Problem.CalConGrad(X.dec)';
df = sum(df,2);
else
df = Problem.CalObjGrad(X.dec)';
df = df*W';
end
end
|
{"author": "BIMK", "repo": "PlatEMO", "sha": "c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5", "save_path": "github-repos/MATLAB/BIMK-PlatEMO", "path": "github-repos/MATLAB/BIMK-PlatEMO/PlatEMO-c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5/PlatEMO/Algorithms/Multi-objective optimization/GPSO-M/LocalSearch.m"}
|
#coding:utf-8
import numpy as np
import tensorflow as tf
from Model import Model
if tf.__version__ > '0.12.1':
matmul_func = tf.matmul
else:
matmul_func = tf.batch_matmul
class TransR(Model):
r'''
TransR first projects entities from entity space to corresponding relation space
and then builds translations between projected entities.
'''
def _transfer(self, transfer_matrix, embeddings):
return matmul_func(embeddings, transfer_matrix)
def _calc(self, h, t, r):
h = tf.nn.l2_normalize(h, -1)
t = tf.nn.l2_normalize(t, -1)
r = tf.nn.l2_normalize(r, -1)
return abs(h + r - t)
def embedding_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#Defining required parameters of the model, including embeddings of entities and relations, and mapping matrices
self.ent_embeddings = tf.get_variable(name = "ent_embeddings", shape = [config.entTotal, config.ent_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.rel_embeddings = tf.get_variable(name = "rel_embeddings", shape = [config.relTotal, config.rel_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.transfer_matrix = tf.get_variable(name = "transfer_matrix", shape = [config.relTotal, config.ent_size * config.rel_size], initializer = tf.contrib.layers.xavier_initializer(uniform = False))
self.parameter_lists = {"ent_embeddings":self.ent_embeddings, \
"rel_embeddings":self.rel_embeddings, \
"transfer_matrix":self.transfer_matrix}
def loss_def(self):
#Obtaining the initial configuration of the model
config = self.get_config()
#To get positive triples and negative triples for training
#The shapes of pos_h, pos_t, pos_r are (batch_size, 1)
#The shapes of neg_h, neg_t, neg_r are (batch_size, negative_ent + negative_rel)
pos_h, pos_t, pos_r = self.get_positive_instance(in_batch = True)
neg_h, neg_t, neg_r = self.get_negative_instance(in_batch = True)
#Embedding entities and relations of triples, e.g. pos_h_e, pos_t_e and pos_r_e are embeddings for positive triples
pos_h_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_h)
pos_t_e = tf.nn.embedding_lookup(self.ent_embeddings, pos_t)
pos_r_e = tf.nn.embedding_lookup(self.rel_embeddings, pos_r)
neg_h_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_h)
neg_t_e = tf.nn.embedding_lookup(self.ent_embeddings, neg_t)
neg_r_e = tf.nn.embedding_lookup(self.rel_embeddings, neg_r)
#Getting the required mapping matrices
pos_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, pos_r), [-1, config.ent_size, config.rel_size])
#Calculating score functions for all positive triples and negative triples
p_h = self._transfer(pos_matrix, pos_h_e)
p_t = self._transfer(pos_matrix, pos_t_e)
p_r = pos_r_e
if config.negative_rel == 0:
n_h = self._transfer(pos_matrix, neg_h_e)
n_t = self._transfer(pos_matrix, neg_t_e)
n_r = neg_r_e
else:
neg_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, neg_r), [-1, config.ent_size, config.rel_size])
n_h = self._transfer(neg_matrix, neg_h_e)
n_t = self._transfer(neg_matrix, neg_t_e)
n_r = neg_r_e
#The shape of _p_score is (batch_size, 1, hidden_size)
#The shape of _n_score is (batch_size, negative_ent + negative_rel, hidden_size)
_p_score = self._calc(p_h, p_t, p_r)
_n_score = self._calc(n_h, n_t, n_r)
#The shape of p_score is (batch_size, 1, 1)
#The shape of n_score is (batch_size, negative_ent + negative_rel, 1)
p_score = tf.reduce_sum(_p_score, -1, keep_dims = True)
n_score = tf.reduce_sum(_n_score, -1, keep_dims = True)
#Calculating loss to get what the framework will optimize
self.loss = tf.reduce_mean(tf.maximum(p_score - n_score + config.margin, 0))
def predict_def(self):
config = self.get_config()
predict_h, predict_t, predict_r = self.get_predict_instance()
predict_h_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, predict_h), [1, -1, config.ent_size])
predict_t_e = tf.reshape(tf.nn.embedding_lookup(self.ent_embeddings, predict_t), [1, -1, config.ent_size])
predict_r_e = tf.reshape(tf.nn.embedding_lookup(self.rel_embeddings, predict_r), [1, -1, config.rel_size])
predict_matrix = tf.reshape(tf.nn.embedding_lookup(self.transfer_matrix, predict_r[0]), [1, config.ent_size, config.rel_size])
h_e = tf.reshape(self._transfer(predict_matrix, predict_h_e), [-1, config.rel_size])
t_e = tf.reshape(self._transfer(predict_matrix, predict_t_e), [-1, config.rel_size])
r_e = predict_r_e
self.predict = tf.reduce_sum(self._calc(h_e, t_e, r_e), -1, keep_dims = True)
|
{"hexsha": "f167e236e1ceef19976412532ef1e4896add907e", "size": 4628, "ext": "py", "lang": "Python", "max_stars_repo_path": "TransR.py", "max_stars_repo_name": "jaytx/MyRep", "max_stars_repo_head_hexsha": "6333eb7196ecf808810d439297895e7b75a99729", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TransR.py", "max_issues_repo_name": "jaytx/MyRep", "max_issues_repo_head_hexsha": "6333eb7196ecf808810d439297895e7b75a99729", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TransR.py", "max_forks_repo_name": "jaytx/MyRep", "max_forks_repo_head_hexsha": "6333eb7196ecf808810d439297895e7b75a99729", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.5909090909, "max_line_length": 197, "alphanum_fraction": 0.7547536733, "include": true, "reason": "import numpy", "num_tokens": 1242}
|
module SodShockTube
using NLsolve: nlsolve
using PartialFunctions
using Documenter
export solve, ShockTubeProblem
sound_speed(γ, p, ρ) = √(γ * p / ρ)
function shock_tube_fn!(p1, p5, ρ1, ρ5, γ, p4)
z = (p4[1] / p5 - 1)
c1 = sound_speed(γ, p1, ρ1)
c5 = sound_speed(γ, p5, ρ5)
gm1 = γ - 1
gp1 = γ + 1
g2 = 2 * γ
fact = gm1 / g2 * (c5 / c1) * z / √(1 + gp1 / g2 * z)
fact = (1 - fact)^(g2 / gm1)
return p1 * fact - p4[1]
end
function calculate_regions(pℓ, uℓ, ρℓ, pr, ur, ρr, γ=1.4)
if pℓ < pr
ρ1, p1, u1 = ρr, pr, ur
ρ5, p5, u5 = ρℓ, pℓ, uℓ
else
ρ1, p1, u1 = ρℓ, pℓ, uℓ
ρ5, p5, u5 = ρr, pr, ur
end
p4 = nlsolve(shock_tube_fn!$(p1, p5, ρ1, ρ5, γ), [p1]).zero[1]
z = (p4 / p5 - 1)
c5 = sound_speed(γ, p5, ρ5)
gm1 = γ - 1
gp1 = γ + 1
gmfac1 = 0.5 * gm1 / γ
gmfac2 = 0.5 * gp1 / γ
fact = sqrt(1 + gmfac2 * z)
u4 = c5 * z / (γ * fact)
ρ4 = ρ5 * (1 + gmfac2 * z) / (1 + gmfac1 * z)
w = c5 * fact
p3 = p4
u3 = u4
ρ3 = ρ1 * (p3 / p1)^(1 / γ)
region1 = (p1, ρ1, u1)
region3 = (p3, ρ3, u3)
region4 = (p4, ρ4, u4)
region5 = (p5, ρ5, u5)
return region1, region3, region4, region5, w
end
function calc_positions(pℓ, pr, region1, region3, w, xi, t, γ)
p1, ρ1, u1 = region1
p3, ρ3, u3 = region3
c1 = sound_speed(γ, p1, ρ1)
c3 = sound_speed(γ, p3, ρ3)
if pℓ > pr
xsh = xi + w * t
xcd = xi + u3 * t
xft = xi + (u3 - c3) * t
xhd = xi - c1 * t
else
xsh = xi - w * t
xcd = xi - u3 * t
xft = xi - (u3 - c3) * t
xhd = xi + c1 * t
end
return xhd, xft, xcd, xsh
end
function region_states(pℓ, pr, region1, region3, region4, region5)
if pℓ > pr
return Dict(
"Region 1" => region1,
"Region 2" => "RAREFACTION",
"Region 3" => region3,
"Region 4" => region4,
"Region 5" => region5,
)
else
return Dict(
"Region 1" => region5,
"Region 2" => region4,
"Region 3" => region2,
"Region 4" => "RAREFACTION",
"Region 5" => region1,
)
end
end
function create_arrays(
pℓ, pr, xℓ, xr, positions, state1, state3, state4, state5, x_arr, γ, t, xi
)
xhd, xft, xcd, xsh = positions
p1, ρ1, u1 = state1
p3, ρ3, u3 = state3
p4, ρ4, u4 = state4
p5, ρ5, u5 = state5
npts = length(x_arr)
ρ = zeros(npts)
p = zeros(npts)
u = zeros(npts)
c1 = sound_speed(γ, p1, ρ1)
gm1 = γ - 1
gp1 = γ + 1
if pℓ > pr
for (i, x) in enumerate(x_arr)
if x < xhd
ρ[i], p[i], u[i] = ρ1, p1, u1
elseif x < xft
u2 = 2 / gp1 * (c1 + (x - xi) / t)
fact = 1 - 0.5 * gm1 * u2 / c1
ρ2 = ρ1 * fact^(2 / gm1)
p2 = p1 * fact^(2 * γ / gm1)
ρ[i], p[i], u[i] = ρ2, p2, u2
elseif x < xcd
ρ[i], p[i], u[i] = ρ3, p3, u3
elseif x < xsh
ρ[i], p[i], u[i] = ρ4, p4, u4
else
ρ[i], p[i], u[i] = ρ5, p5, u5
end
end
else
for (i, x) in enumerate(x_arr)
if x < xhd
ρ[i], p[i], u[i] = ρ5, p5, u5
elseif x < xft
ρ[i], p[i], u[i] = ρ4, p4, u4
elseif x < xcd
ρ[i], p[i], u[i] = ρ3, p3, u3
elseif x < xsh
u2 = -2 / gp1 * (c1 + (xi - x) / t)
fact = 1 + 0.5 * (γ - 1) * u2 / c1
ρ2 = ρ1 * fact^(2 / gm1)
p2 = p1 * fact^(2 * γ / gm1)
ρ[i], p[i], u[i] = ρ2, p2, u2
else
ρ[i], p[i], u[i] = ρ1, p1, u1
end
end
end
return x_arr, p, ρ, u
end
function solve(left_state, right_state, geometry, t, γ, x_arr)
pℓ, ρℓ, uℓ = left_state.p, left_state.ρ, left_state.u
pr, ρr, ur = right_state.p, right_state.ρ, right_state.u
xℓ, xr, xi = geometry
if xℓ ≥ xr
error("xℓ has to be less than xr")
end
if xℓ ≥ xr || xi ≤ xℓ
error("xi has to be in between xℓ and xr")
end
region1, region3, region4, region5, w = calculate_regions(pℓ, uℓ, ρℓ, pr, ur, ρr, γ)
regions = region_states(pℓ, pr, region1, region3, region4, region5)
x_positions = calc_positions(pℓ, pr, region1, region3, w, xi, t, γ)
pos_descriptions = (
"Head of rarefaction", "Foot of rarefaction", "Contact Discontinuity", "Shock"
)
positions = Dict(desc => pos for (desc, pos) in zip(pos_descriptions, x_positions))
x, p, ρ, u = create_arrays(
pℓ, pr, xℓ, xr, x_positions, region1, region3, region4, region5, x_arr, γ, t, xi
)
energy = @. p / ((γ - 1)) + 0.5 * u^2
values = (x=x, ρ=ρ, p=p, u=u, e=energy)
return positions, regions, values
end
"""
ShockTubeProblem
Contains the parameters of a shock tube problem
# Fields
`geometry::Tuple{Float64, Float64, Float64}` Contains the locations of the (left edge, right edge, initial shock location)
`left_state` Completely specified thermodynamic state of the left side of the discontinuity (NamedTuple of p, ρ, u)
`right_state` Completely specified thermodynamic state of the right side of the discontinuity (NamedTuple of p, ρ, u)
`t::Float64` The time at which the shock tube problem will be solved
`γ::Float64` The heat capacity ratio of the gas in the shock tube
"""
Base.@kwdef struct ShockTubeProblem
geometry::Tuple{Float64,Float64,Float64}
left_state
right_state
t::Float64
γ::Float64
end
"""
solve(s::ShockTubeProblem, x_arr)
Solve the given shock tube problem at the provided x locations.
# Returns
positions: A `Dictionary` which maps descriptive names of the regions to x coordinates
regions: A `Dictionary` which maps regions ("Region 1", "Region 2", etc) to thermodynamic states (ρ, p, u) in the shock tube solution
values: A `NamedTuple` (;x, ρ, p, e) containing the x coordinates, the density, pressure, and stagnation energy, respectively
# Example
```jldoctest;setup = :(using SodShockTube)
julia> problem = ShockTubeProblem(
geometry = (0.0, 1.0, 0.5),
left_state = (ρ = 1.0, u = 0.0, p = 1.0),
right_state = (ρ = 0.125, u = 0.0, p = 0.1),
t = 0.1,
γ = 1.4
);
julia> xs = LinRange(0.0, 1.0, 500);
julia> positions, regions, values = solve(problem, xs);
julia> positions
Dict{String, Float64} with 4 entries:
"Shock" => 0.850431
"Foot of rarefaction" => 0.485945
"Head of rarefaction" => 0.263357
"Contact Discontinuity" => 0.685491
julia> regions
Dict{String, Any} with 5 entries:
"Region 5" => (0.1, 0.125, 0.0)
"Region 1" => (1.0, 1.0, 0.0)
"Region 4" => (0.30313, 0.265574, 0.927453)
"Region 3" => (0.30313, 0.426319, 0.927453)
"Region 2" => "RAREFACTION"
"""
function solve(s::ShockTubeProblem, x_arr)
return solve(s.left_state, s.right_state, s.geometry, s.t, s.γ, x_arr)
end
end
|
{"hexsha": "5c40f19e7e791a8fc4f68c5b0b60abc8ca90214f", "size": 7069, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SodShockTube.jl", "max_stars_repo_name": "archermarx/SodShockTube", "max_stars_repo_head_hexsha": "7b8ef18c9ca05ca78b51d8a4ca17769c0125e32c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/SodShockTube.jl", "max_issues_repo_name": "archermarx/SodShockTube", "max_issues_repo_head_hexsha": "7b8ef18c9ca05ca78b51d8a4ca17769c0125e32c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-14T22:22:04.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-14T22:22:05.000Z", "max_forks_repo_path": "src/SodShockTube.jl", "max_forks_repo_name": "archermarx/SodShockTube", "max_forks_repo_head_hexsha": "7b8ef18c9ca05ca78b51d8a4ca17769c0125e32c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2934362934, "max_line_length": 133, "alphanum_fraction": 0.5327486207, "num_tokens": 2737}
|
\chapter{Agents of Change}
\label{chap:agent}
According to Wikipedia today, Survivorship bias is the``logical error of concentrating on the people or things that made it past some selection process and overlooking those that did not, typically because of their lack of visibility. This can lead to false conclusions in several different ways. It is a form of selection bias'' \cite{wikipedia:survivorshipbias}. In other words, just because something happened ``against all odds,'' doesn't make it inevitable or a good idea. The problem is that those who tried something and failed often don't survive to tell the story and we end up hearing just from the unlikely survivors.
A great example of this is college drop-out entrepreneurs. I know the feeling of wanting to drop out because one has more pressing passions and projects, but I believe strongly that in most cases dropping out doesn't help people become successful, and others agree \cite{Zimmer:2013aa}. Any success that I've had is despite having dropped out of college and not because of it. I spent a lot of my time advising students to finish their degrees. :-)
In \autoref{sec:HAL} I wrote about our efforts to push computer science away from statistical correlations and towards causal relationships, asking if something we did actually caused the change or was it just something occurring at the same time. This relationship between outcomes through causal interventions is also the key to the theory of change described in \autoref{chap:theory}. This is very difficult to derive without a randomized control study, but parts of my life have been quite random and I've tried some of my interventions over and over and in different ways, so I will try to sort out the generalizable causal interventions from the irrelevant ones. But as they say, ``Your mileage may vary.''
\section{Happiness}
In this dissertation, I have written at length about the goals of a system and the individuals in the system. The core aim of my thesis is to help shift the paradigm so that the goals of individuals and systems shift. My hope is that this will cause our systems to become more resilient, robust and sustainable.
The Declaration of Independence of the United States states that ``[A]ll men are created equal, that they are endowed by their Creator with certain unalienable Rights, that among these are Life, Liberty and the pursuit of Happiness.''
In ``The Art of Happiness'' the Dalai Lama also says that ``I believe that the very purpose of our life is to seek happiness.'' The Dalai Lama argues that happiness is more determined by the state of our minds than by our external conditions once our basic survival conditions have been met \cite{lama2009art}.
Abraham Maslow in his 1943 paper ``A Theory of Human Motivation'' argues that there are stages of growth in humans needs. He used the terms ``physiological'', ``safety,'' ``belonging and love,'' ``esteem,'' ``self-actualization,'' and ``self-transcendence'' to describe the layers in his ``hierarchy'' of needs pyramid \cite{maslow_theory_1943}. (See \autoref{fig:maslowpyramid}.) The lower layers of Maslow's hierarchy are fairly straight forward, but as one ascends to the higher layers such as social belonging, esteem, self-actualization and self-transcendence, the extrinsic versus intrinsic nature of the happiness or need is unclear. Self-transcendence was added by Maslow in his later years as he critically explored the dimension of needs beyond self-actualization.\ \cite{maslow1991critique}.
\begin{figure}[t]
\centering
\includegraphics[width=.5\textwidth]{pictures/MaslowsHierarchyOfNeeds}
\caption[Maslow's hierarchy of needs]{Maslow's hierarchy of needs, represented as a pyramid with the more basic needs at the bottom. Source: FireflySixtySeven via Wikipedia. \ccbysa}
\label{fig:maslowpyramid}
\end{figure}
For example, esteem is mostly associated with extrinsic validation such as respect and recognition. However, self-esteem can be quite internal or intrinsic. Self-actualization includes art and athletics, which also have extrinsic and intrinsic motivations. Self-transcendence becomes less ego-oriented, but also can have motivations such as the need to help other humans, or intrinsic motivations such as becoming one with nature. It is clear that the higher levels of Maslow's hierarchy are less zero-sum and less competitive: the extrinsic versus intrinsic distinction changes the nature of the relationship between the individual and the community/society, as well as one's need to adhere to the social systems required for validation.
The Dalai Lama and the contemplative tradition focuses more on intrinsic motivations. While most contemplative traditions aren't necessarily anti-social, a diminished need for external validation for happiness lets one be less concerned with the opinions of others, providing more freedom and time to become self-aware and achieve happiness through the pursuit of one's personal passion or interests. This is an approach that The Venerable Tenzin Priyadarshi and I teach in my Principles of Awareness class described in \autoref{section:awareness}.
The Dalai Lama in \emph{The Art of Happiness} discusses the difference between pleasure and happiness and argues that many people feel ``happy'' when they get more money, get a new car, or progress along an externally provided measurable path. He defines these feelings as pleasures and suggests that the happiness that he describes is more like the happiness of having a happy family. Increasing the size of the family doesn't make one more happy. The Buddhist notion of happiness is quite different from happiness as described by economists who describe it as an increase in utility --- an economic measure \cite{marshall1961principles}. Later, utility was quantified by Paul Samuelson as ``revealed preference'' \cite{samuelson1948consumption} and apparently the more utility the better. The Buddhists would feel more aligned with the adage, ``more than enough is too much'' than the notion of the utility function.
\marginpar{I would suggest the word ``flourishing'' as a way to define happiness or goodness without a need to include progress or growth. A vibrant rainforest is a great example of a flourishing system.}However, many argue that progress is essential and without extrinsic motivations and economically rational humans, we would not have progress. For example, Matt Ridley argues in \emph{The Rational Optimist} that humans have an innate tendency or desire to trade. He argues that markets enable this exchange and that this exchange allows specialization --- I can help build the Internet that you use and you can design the motor for my car and use the Internet. He says that this market-based exchange of goods, services, ideas and products allows progress, and enables ideas to ``have sex'' --- computers and telecommunications coming together to turn into the Internet \cite{ridley_rational_2010}.
While the idea is exciting and helps describe how much of innovation works, he's also describing the system that, in my view, causes income inequality, exploitation of the environment, and the deployment of technologies of convenience at the cost of health. Indeed, he doesn't question whether progress is in fact ``good.''
I would suggest the word ``flourishing'' as a way to define happiness or goodness without a need to include progress or growth. A vibrant rainforest is a great example of a flourishing system. It doesn't need to grow in total size. There is diversity, there is growth and death together. There are many systems that are interconnected and the system is highly robust. While there is some controversy about methods, there is a scientific approach to measuring ecosystem robustness \cite{mumby2014ecological}. There is evolution and ``progress'' but it is slow and more like a slow adaptive search than the geometric growth or even exponential growth of human civilization.
This notion of flourishing is described in \autoref{sec:cultureflourish} and is what I believe that we must strive for in order to achieve sustainable and long-term resilience in our systems.
In \emph{The Human Use of Human Beings}, Norbert Wiener questions our idea that progress is necessarily good.
\begin{quotation}Those who uphold the idea of progress as an ethical principle regard this unlimited and quasi-spontaneous process of change as a Good Thing, and as the basis on which they guarantee to future generations a Heaven on Earth. It is possible to believe in progress as a fact without believing in progress as an ethical principle; but in the catechism of many Americans, the one goes with the other.\end{quotation}
As I wrote in the introduction, ``eudaimonia'' and productive self-actualization described by Aristotle in ``Nicomachaen Ethics`` \cite{rowe2002nicomachean} are useful concepts that includes the notions of progress towards ethics and flourishing.
\section{Interest-Driven Learning}
As I was saying, I dropped out of college twice and was even kicked out of kindergarten for running away too many times. I was never very good at education, but I liked to learn. One of my principles (see \autoref{principles}) is ``learning over education.'' I believe that education is what other people do to you and learning is what you do for yourself.
It wasn't that my schools were particularly bad or that I wasn't provided an opportunity. My sister got straight A's and went to Harvard and Stanford and got two Ph.D.s. I just had a personality that made it difficult for me to learn in a structured way about things that I didn't find useful or interesting. I also had a difficult time studying abstractions in books. I much preferred learning through doing things and talking to people.
It was very lucky for me that I was surrounded by scientists, nature, and then online computer networks and the Internet as I was entering high school. I was able to kludge together an understanding of the world's conversations by pursuing a wide variety of interests including working in a pet shop, being a disk jockey in a nightclub, working as an associate to the producer in a Hollywood film, working in a material science lab writing software for the control system, running an events company, running a computer peripherals mail order company, being a professional scuba instructor, being a record distributor, a columnist in a newspaper, a software distributor, an apparel distributor and many other things.
While I believe that I am unusually poor at structured learning, unusually motivated by my passions and interests, and unusually interested in almost everything, I do believe that interest-driven learning is generalizable.
In 1973, Ivan Illich wrote \emph{Tools for Conviviality} and argued that there are two pivotal moments in the history of scientific and societal progress. The first was in 1913 when Western medicine improved to the point where trained doctors could increase their patients' odds past 50/50. The second was when we focused more on keeping people alive than on worrying about the quality of the patient's life or their agency. Illich writes, ``I choose the term `conviviality' to designate the opposite of industrial productivity. I intend it to mean autonomous and creative intercourse among persons, and the intercourse of persons with their environment; and this in contrast with the conditioned response of persons to the demands made upon them by others, and by a man-made environment'' \cite{Illich:1973aa}.
Illich blames professional elites and economic development for negatively impacting human flourishing in modern times by institutionalizing specialization and taking control of the tools of society away from the average citizen. He believes we must ``give people tools that guarantee their right to work with independent efficiency.''
This ties to his argument that modern education focuses on institutionalization and that, as he argues in 1971's \emph{Deschooling Society}, these institutions are reducing flourishing. He argues that the educational ``funnels'' must be reversed and that we must create ``learning webs'' using advanced technology \cite{Illich:1971aa}.
The Montessori Method \cite{montessori2013montessori} of child-centered education has been used for over 100 years \cite{montessoriintro}. While the Montessori Method is much more flexible and child-guided than traditional educational systems, it still provides a teacher who guides the child by observing and responding to the child's behavior and tendencies. Unschooling, which was coined in the 1970s by John Holt \cite{unschooling}, advocates an even more radical child-directed learning approach that focuses on freeing the child from any form of formal education, depending instead on our natural ability to learn, and a belief that we will learn what we need to learn in the course of doing what we are passionate about.
In my own experience, the only practical thing that I learned how to do in my secondary formal education was touch typing. Otherwise, everything I learned, I learned out of class, except perhaps social skills that I could easily learn through group activities rather than through by sitting in a classroom. As I consider the future of schooling for my one year old daughter, I am thinking deeply about different educational options.
Jean Piaget in the 1930s said that cognitive development in children occurs as children interact with the world around them. \cite{piaget1952origins}. Seymour Papert worked with Paiget at the University of Geneva from 1958 to 1963 \cite{SeymourP62:online} and was one of Piaget's protégés. Papert was a founding member of the MIT Media Lab and developed a theory of learning called Constructionism --- student-centered project-based learning-through-doing -- that is at the core of the Media Lab, as I described in \autoref{antidisciplinaryapproach}. Papert inspired others at the Media Lab including Mitchel Resnick who argues for cultivating creative learning through ``projects, passion, peers and play'' \cite{resnick_lifelong_2018}. Resnick developed the Scratch programming language to empower children to ``code to learn'' instead of ``learning to code.'' Neil Gershenfeld is a former Media Lab professor, the director for the Center for Bits and Atoms and the inventor of the Fab Lab (short for fabrication laboratory) and the creator of the Fab Academy, a learning network. He is trying to create a network of Fab Labs to bring learning-through-making to the rest of the world. Gershenfeld says that Fab also means fabulous --- a kind of flourishing that Illich would have approved of \cite{gershenfeld2008fab}.
I believe that creativity and passion will become even more important in the future and that jobs will become even more differentiated. I also believe that learning how to follow your personal passions, rather than depending on institutions to provide motivation, will be increasingly important as jobs change and institutions go through the current industrial transformation.
Passion can come from a variety of sources. In \emph{The Wealth of Networks}, Yochai Benkler explains that the motivations for many of the online communities to produce is not financial \cite{benkler2006wealth}. In \emph{Not Just for the Money} economist Bruno Frey argues that offering higher pay may make people less committed to their work and may reduce performance \cite{frey1997not}. The social context and our desire to collaborate is a key element in developing passions.
As I write in \autoref{intro:communityvalues} Nowak and Highfield describe the evolution of cooperation and mechanisms for cooperation. They argue that evolution is not only competition but also cooperation, and that cooperation is the master architect of complexity. In ``Spontaneous giving and calculated greed,'' researchers argue that people are intuitively cooperative and thus need to ``calculate'' to overcome their cooperative impulse and become greedy \cite{rand2012spontaneous}. In ``Cooperating with the future'' \cite{hauser2014cooperating} the argument is that a large altruistic, majority would vote to cooperate with a longer view of the future in a democratic setting.
\section{Competition and Greed}
I also have competitive and greedy feelings sometimes, but they are fundamentally overpowered by my passion for the missions of my projects and my desire to collaborate and cooperate, which is supported by the studies above.
The following is an exchange on television between Phil Donahue and economist Milton Friedman from 1979 \cite{noauthor_notable_2015}:
\begin{quote}
Phil Donahue: When you see around the globe the maldistribution of wealth, the desperate plight of millions of people in underdeveloped countries, when you see so few haves and so many have-nots, when you see the greed and the concentration of power, did you ever have a moment of doubt about capitalism and whether greed’s a good idea to run on?
\marginpar{Milton Friedman: Well, first of all, tell me, is there some society you know that doesn’t run on greed? You think Russia doesn’t run on greed? You think China doesn’t run on greed? What is greed?}Milton Friedman: Well, first of all, tell me, is there some society you know that doesn’t run on greed? You think Russia doesn’t run on greed? You think China doesn’t run on greed? What is greed? Of course none of us are greedy. It’s only the other fellow who’s greedy. The world runs on individuals pursuing their separate interests. The great achievements of civilization have not come from government bureaus. Einstein didn’t construct his theory under order from a bureaucrat. Henry Ford didn’t revolutionize the automobile industry that way. In the only cases in which the masses have escaped from the kind of grinding poverty you’re talking about, the only cases in recorded history are where they have had capitalism and largely free trade. If you want to know where the masses are worst off, it’s exactly in the kinds of societies that depart from that. So that the record of history is absolutely crystal clear that there is no alternative way, so far discovered, of improving the lot of the ordinary people that can hold a candle to the productive activities that are unleashed by a free enterprise system.
Donahue: But it seems to reward not virtue as much as ability to manipulate the system.
Friedman: And what does reward virtue? . . . I think you’re taking a lot of things for granted. Just tell me where in the world you find these angels who are going to organize society for us.
\end{quote}
I think this exchange captures the essence of the capitalist ``greed is good'' philosophy that has caused the reductionist single-minded pursuit of personal wealth that has undermined robustness, resilience, flourishing and has crowded out many of the intrinsic and more positive extrinsic motivators in our society.
There is a place for competition and there is a role for self-interest, but these elements should be part of a complex system of values and drivers, and tend to address the lower elements of Maslow's hierarchy.
And one of the problems with Maslow's hierarchy is that it assumes we are individuals first and foremost. When it comes to how we run our academic system, why do we demand academics prove themselves as individuals rather than participants in a group? The tenure process and even the doctoral process focus on the individual even though their work will almost inevitably occur within and because of a social web. Some fields, such as high energy experimental physics, have begun to be more open to large collective projects, but for the most part, academics are judged as individuals. In fact, this doctoral process required me to justify why I didn't have two single authored books and two single authored papers.
The tenure process as I have observed it at MIT has the same problem and pushes junior faculty to worry constantly about seeking external validation of their individual work.
I find that my staff, including my research staff, are fundamentally less competitive and more mission-oriented than the majority of the faculty at MIT. Yet their productivity and creativity exceeds all expectations. My impression is that they are happier as well.
I often wonder whether we can have the creativity and the drive required to make brilliant contributions to society without the competition that drives many of the significant achievements.
My experience with the leaders and the community members of Creative Commons and Internet technical communities involved dealing with some level of ``drama'' --- competition, egos and greed --- but these people and incidents felt more like anomalies and ``problems'' than the normal behavior that Milton Friedman describes so well and is so institutionalized in traditional corporate environments and in some elements of academia.
Startups have a fair share of greed but many of the successful companies are able to also have a healthy cooperative and mission-oriented socially sensitive cultures as well. This could be a shift in the demographic as young people are more concerned about the systems and less driven by the greed of their neo-liberal economic parents. The 2016 Cone Communications Millennial Employee Engagement Study showed that 64 percent of Millennials in the US ``won’t take a job if a company doesn’t have strong corporate social responsibility (CSR) values'' \cite{conestudy}.
\section{Disobedience}
\marginpar{Martin Luther King Jr. wrote, “One has not only a legal but a moral responsibility to obey just laws. Conversely, one has a moral responsibility to disobey unjust laws'' \cite{king2012letter}.}Enjoying cooperation and flourishing does not mean that we must be obedient. For systems to evolve, they require variation, mutations and diversity. Disobedience is necessary to question the status quo in science, society, law or the arts. Timothy Leary used to tell me, ``Question authority and think for yourself.'' Martin Luther King Jr. wrote, “One has not only a legal but a moral responsibility to obey just laws. Conversely, one has a moral responsibility to disobey unjust laws'' \cite{king2012letter}.
A healthy democracy, a healthy academic institution, a healthy community must be disobedience-robust. In other words, people should be allowed and encouraged to speak up against those in power without fear of retribution, and the questioning should make the system more robust, not fragile. This requires a great deal of trust between the individuals and the institution, and a strong constitution on the part of the institution to turn disobedience into positive energy.
I believe that the celebration of socially responsible disobedience is essential. You don't win a Nobel Prize for doing as you're instructed; you win a Nobel Prize by questioning authority and overthrowing previous theories. Max Planck cynically wrote, ``A new scientific truth does not triumph by convincing its opponents and making them see the light, but rather because its opponents eventually die, and a new generation grows up that is familiar with it'' \cite{planck2014scientific} which accurately summarizes the problem with our current academic system.
\begin{figure}[t]
\centering
\includegraphics[width=1\textwidth]{pictures/disobedience}
\caption{A graphic from the award ceremony for the MIT Media Lab Disobedience Award in 2017.}
\label{fig:disobedience}
\end{figure}
With support of entrepreneur and philanthropist Reid Hoffman, since last year, I have been awarding a \$250,000 prize for disobedience from the Media lab. (Artwork from the prize in \autoref{fig:disobedience}.) As we say on the website \cite{disobedience2018}:
\begin{quote}
This award will go to a person or group engaged in what we believe is an extraordinary example of disobedience for the benefit of society: work that impacts society in positive ways, and is consistent with a set of key principles, including nonviolence, creativity, courage, and responsibility for one’s actions. We invite nominations for work across disciplines (scientific research, civil rights, freedom of speech, human rights, and the freedom to innovate, for example).
\end{quote}
Last year, we gave the award to Dr. Mona Hanna-Attisha and Professor Marc Edwards. ``Both are scientists who became activists, using rigorous research to investigate the concerns of citizens in Flint, Michigan to unravel a mystery that many in positions of power would have preferred to keep under wraps'' \cite{disobedience2017}.
We believe that this is a small symbolic gesture but sends a signal to our community as well as to the rest of the world that we should support and celebrate positive disobedience.
\section{Civility and Governance}
Although Illich used conviviality to mean ``autonomous and creative intercourse among persons, and the intercourse of persons with their environment,'' it traditionally means friendliness. While one can actually be disobedient in a friendly way (many of my students are), it is easy to be disobedient and disruptive in an unfriendly and uncivil way.
\marginpar{I think that the ultimate role of a leader in a open non-hierarchical system is to tend to its robustness and its resilience --- to focus on its flourishing.}Whether we are talking about trolls on mailing lists as I described in \autoref{sec:emergentdemo} or world leaders taunting the public or other world leaders, the enforcement of civility or conviviality in the more traditional sense appears to be harder in decentralized bottom-up organizations.
In \autoref{intro:design} I wrote about an experiment in the feminist movement that tried to reject the idea of leaders but ended up in an informal and less accountable form of leadership, as described by Jo Freeman in ``The Tyranny of Structurelessness'' \cite{freeman1972tyranny}. Clearly having no structure is not the answer.
One study of guilds in the online game \emph{World of Warcraft} showed that guilds developed roles that focused on managing both the well-being of the players as well as the productivity and success of these guilds \cite{williams2014structural}. For many years, I ran a rather large \emph{World of Warcraft} guild, managing the diverse group of players who were paying money to collaborate with each other; \emph{World of Warcraft} charges a monthly subscription fee. Managing this community was surprisingly similar to my role as the director of the Media Lab where the primary motivation for participating was not for the money or a very obvious progression path.
Most free and open source projects have similar dynamics --- Wikipedia, Bitcoin, Linux, etc. There is often, but not always, a core group of people who are ultimately in charge. However, most disputes are settled at the local level and through consensus.
Consensus means that everyone reaches an agreement to move forward through discussion. I learned from being on the ICANN board for three years that consensus does not require that everyone ultimately gets what they want, but that you have enough discussion so all voices are heard and the people who object to the majority eventually are convinced or get so tired that they agree to go along. Since everyone was in the room when the decision was made, they can't complain later. At ICANN we would have hours of ``open mic'' where the community would voice objections and grievances, but because we heard them out, we could reach a consensus to move forward.
Consensus doesn't always work. When you can't reach consensus you vote. Voting is never the first choice.
The role of a leader in an open non-hierarchical system is usually to manage the process, sometimes to make tie-breaking decision and sometimes to deal quietly behind-the-scenes with problems that have privacy issues that prevent a public discussion or need a speedy response that a large community can not provide.
However, I think that the ultimate role of a leader in a open non-hierarchical system is to tend to its robustness and its resilience --- to focus on its flourishing. Often it feels like gardening --- watering the plants, sometimes pruning, sometimes moving seedlings, but mostly just making sure that all of the organisms are able to be the best versions of themselves, and creating enough connections and diversity so that the garden is able to fend off the pests and bad weather by itself.
While trolls can always cause trouble as we can see from the polarization in society today, I believe that the best method for dealing with bad culture is good culture. Attacking clostridium difficile with antibiotics doesn't work well but a fecal transplant from a healthy person worked 87 percent of the time in a recent study \cite{jiang2017randomised}. The best way to fight the pathogen is to introduce a diverse and healthy culture, not try to eliminate it. Bombing terrorists has a similar effect to the antibiotics. It kills the healthy culture and makes the negative culture stronger because it ends up with more space, resources and renewed purpose. The old adage, ``Don't feed the trolls'' has a similar point. Getting angry and and focused on the trolls will deplete you of your energy, and is often exactly what the trolls are trying to achieve, giving them more energy and maybe even attracting more.
In ``Why Civil Resistance Works,'' Maria J. Stephan and Erica Chenoweth study the strategic effectiveness of violent and nonviolent conflicts between 1900 and 2006. They show that nonviolent campaigns succeeded 53 percent of the time compared to 26 percent of violent resistance campaigns \cite{chenoweth2011civil}. While the success of a nonviolent campaign is still only a coin-toss, it's statistically more likely to succeed in the conflicts that they studied than violent resistance. The notion of ``satyagraha'' or ``truth force,'' espoused by Gandhi \cite{majmudar2012gandhi} is, in my view, the most effective form of non-violent action.
While most non-violent action is typically employed by communities fighting against the establishment, the basic tenets can work in undermining and disabling negative individuals or sub-communities. In the long run ``taking the higher road'' is the most sustainable way to build a a trusting and robust community with a strong positive culture.
\section{Self-Awareness and Humility}
Whether we are talking about trolls or participant design, the key is to focus on doing a better job yourself instead of trying to tell others what to do or how to do it. I believe that whether we are talking about an individual or an institution, striving for strong core values and excellence, and being open, transparent, and accessible allows others to copy the patterns that work for them in their context. It's important to design organizations for transparency because it's difficult to transform closed organizations into transparent ones \cite{Ito2011Designings}.
Communities are defined by their differences: differences in diversity, size, resources, mission, history, and technical landscapes. Good values can transfer across communities, and adjacent communities can adopt sensibilities and ideas, translating them into local values. We have seen that courage --- from Gandhi's image on the cover of \emph{Life Magazine}, to the Tunisian protesters on social media, to the Parkland students --- can transmit across communities very quickly.
I believe that the most humble, and the most effective, approach to changing the world is to make yourself and your own community better and more flourishing, and to share ideas and connections as freely as possible.
For this, communities and individuals need to become self-aware and reflective so that they --- we --- are able to deprogram the conditioning of decades of institutional education, institutionalized social inequity, and ``greed is good'' justifications for exploitative capitalism. Only thus can we continue to strive to make ourselves better versions of ourselves.
(How ``better'' is defined will be covered in future work.)
|
{"hexsha": "563df5bcc1a91c0bf845a472e9853f5821845451", "size": 32105, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapters/Chapter05.tex", "max_stars_repo_name": "lasernite/phd-dissertation", "max_stars_repo_head_hexsha": "7b3d83a75b520c297c28e22580c2f00a0bfdda5e", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapters/Chapter05.tex", "max_issues_repo_name": "lasernite/phd-dissertation", "max_issues_repo_head_hexsha": "7b3d83a75b520c297c28e22580c2f00a0bfdda5e", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapters/Chapter05.tex", "max_forks_repo_name": "lasernite/phd-dissertation", "max_forks_repo_head_hexsha": "7b3d83a75b520c297c28e22580c2f00a0bfdda5e", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 189.9704142012, "max_line_length": 1323, "alphanum_fraction": 0.8077869491, "num_tokens": 6557}
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.metrics import accuracy_score
import matplotlib as mpl
import matplotlib.colors
import matplotlib.pyplot as plt
if __name__ == "__main__":
data = pd.read_csv('bipartition.txt', sep='\t', header=None)
x, y = data[[0, 1]], data[2]
# 分类器
clf_param = (('linear', 0.1), ('linear', 0.5), ('linear', 1), ('linear', 2),
('rbf', 1, 0.1), ('rbf', 1, 1), ('rbf', 1, 10), ('rbf', 1, 100),
('rbf', 5, 0.1), ('rbf', 5, 1), ('rbf', 5, 10), ('rbf', 5, 100))
x1_min, x2_min = np.min(x, axis=0)
x1_max, x2_max = np.max(x, axis=0)
x1, x2 = np.mgrid[x1_min:x1_max:200j, x2_min:x2_max:200j]
grid_test = np.stack((x1.flat, x2.flat), axis=1)
cm_light = mpl.colors.ListedColormap(['#77E0A0', '#FFA0A0'])
cm_dark = mpl.colors.ListedColormap(['g', 'r'])
mpl.rcParams['font.sans-serif'] = ['SimHei']
mpl.rcParams['axes.unicode_minus'] = False
plt.figure(figsize=(13, 9), facecolor='w')
for i, param in enumerate(clf_param):
clf = svm.SVC(C=param[1], kernel=param[0])
if param[0] == 'rbf':
clf.gamma = param[2]
title = '高斯核,C=%.1f,$\gamma$ =%.1f' % (param[1], param[2])
else:
title = '线性核,C=%.1f' % param[1]
clf.fit(x, y)
y_hat = clf.predict(x)
print('准确率:', accuracy_score(y, y_hat))
# 画图
print(title)
print('支撑向量的数目:', clf.n_support_)
print('支撑向量的系数:', clf.dual_coef_)
print('支撑向量:', clf.support_)
plt.subplot(3, 4, i+1)
grid_hat = clf.predict(grid_test) # 预测分类值
grid_hat = grid_hat.reshape(x1.shape) # 使之与输入的形状相同
plt.pcolormesh(x1, x2, grid_hat, cmap=cm_light, alpha=0.8)
plt.scatter(x[0], x[1], c=y, edgecolors='k', s=40, cmap=cm_dark) # 样本的显示
plt.scatter(x.loc[clf.support_, 0], x.loc[clf.support_, 1], edgecolors='k', facecolors='none', s=100, marker='o') # 支撑向量
z = clf.decision_function(grid_test)
# print 'z = \n', z
print('clf.decision_function(x) = ', clf.decision_function(x))
print('clf.predict(x) = ', clf.predict(x))
z = z.reshape(x1.shape)
plt.contour(x1, x2, z, colors=list('kbrbk'), linestyles=['--', '--', '-', '--', '--'],
linewidths=[1, 0.5, 1.5, 0.5, 1], levels=[-1, -0.5, 0, 0.5, 1])
plt.xlim(x1_min, x1_max)
plt.ylim(x2_min, x2_max)
plt.title(title, fontsize=12)
plt.suptitle('SVM不同参数的分类', fontsize=16)
plt.tight_layout(1.4)
plt.subplots_adjust(top=0.92)
plt.show()
|
{"hexsha": "da9897c98d7f56d87f49f44b792f03c634a6c695", "size": 2668, "ext": "py", "lang": "Python", "max_stars_repo_path": "DMProject/15.package/15.3.SVM_draw.py", "max_stars_repo_name": "gongjunhuang/Spider", "max_stars_repo_head_hexsha": "c683137dafac9c7f4afd359baf9d0717d1a127e2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-02-26T15:45:17.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-26T15:45:17.000Z", "max_issues_repo_path": "DMProject/15.package/15.3.SVM_draw.py", "max_issues_repo_name": "gongjunhuang/Spider", "max_issues_repo_head_hexsha": "c683137dafac9c7f4afd359baf9d0717d1a127e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DMProject/15.package/15.3.SVM_draw.py", "max_forks_repo_name": "gongjunhuang/Spider", "max_forks_repo_head_hexsha": "c683137dafac9c7f4afd359baf9d0717d1a127e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2352941176, "max_line_length": 130, "alphanum_fraction": 0.5610944528, "include": true, "reason": "import numpy", "num_tokens": 949}
|
################################################################################
# MIT License
#
# Copyright (c) 2021 Hajime Nakagami<nakagami@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
const SQL_TYPE_TEXT = 452
const SQL_TYPE_VARYING = 448
const SQL_TYPE_SHORT = 500
const SQL_TYPE_LONG = 496
const SQL_TYPE_FLOAT = 482
const SQL_TYPE_DOUBLE = 480
const SQL_TYPE_D_FLOAT = 530
const SQL_TYPE_TIMESTAMP = 510
const SQL_TYPE_BLOB = 520
const SQL_TYPE_ARRAY = 540
const SQL_TYPE_QUAD = 550
const SQL_TYPE_TIME = 560
const SQL_TYPE_DATE = 570
const SQL_TYPE_INT64 = 580
const SQL_TYPE_INT128 = 32752
const SQL_TYPE_TIMESTAMP_TZ = 32754
const SQL_TYPE_TIME_TZ = 32756
const SQL_TYPE_DEC_FIXED = 32758
const SQL_TYPE_DEC64 = 32760
const SQL_TYPE_DEC128 = 32762
const SQL_TYPE_BOOLEAN = 32764
const SQL_TYPE_NULL = 32766
xsqlvar_type_length = Dict(
SQL_TYPE_TEXT=>-1,
SQL_TYPE_VARYING=>-1,
SQL_TYPE_SHORT=>4,
SQL_TYPE_LONG=>4,
SQL_TYPE_FLOAT=>4,
SQL_TYPE_TIME=>4,
SQL_TYPE_DATE=>4,
SQL_TYPE_DOUBLE=>8,
SQL_TYPE_TIMESTAMP=>8,
SQL_TYPE_BLOB=>8,
SQL_TYPE_ARRAY=>8,
SQL_TYPE_QUAD=>8,
SQL_TYPE_INT64=>8,
SQL_TYPE_INT128=>16,
SQL_TYPE_TIMESTAMP_TZ=>12,
SQL_TYPE_TIME_TZ=>8,
SQL_TYPE_DEC64=>8,
SQL_TYPE_DEC128=>16,
SQL_TYPE_DEC_FIXED=>16,
SQL_TYPE_BOOLEAN=>1,
)
mutable struct XSQLVAR
sqltype::Int
sqlscale::Int
sqlsubtype::Int
sqllen::Int
null_ok::Bool
fieldname::String
relname::String
ownname::String
aliasname::String
end
function io_length(x::XSQLVAR)::Int
x.sqltype == SQL_TYPE_TEXT ? x.sqllen : xsqlvar_type_length[x.sqltype]
end
function has_precision_scale(x::XSQLVAR)::Bool
(x.sqltype == SQL_TYPE_SHORT ||
x.sqltype == SQL_TYPE_LONG ||
x.sqltype == SQL_TYPE_QUAD ||
x.sqltype == SQL_TYPE_INT64 ||
x.sqltype == SQL_TYPE_INT128 ||
x.sqltype == SQL_TYPE_DEC64 ||
x.sqltype == SQL_TYPE_DEC128 ||
x.sqltype == SQL_TYPE_DEC_FIXED) && x.sqlscale != 0
end
function _parse_date(raw_value::Vector{UInt8})::Tuple{Int, Int, Int}
nday = bytes_to_buint32(raw_value) + 678882
century = div(4 * nday - 1, 146097)
nday = 4 * nday - 1 - 146097 * century
day = nday / 4
nday = div(4 * day + 3, 1461)
day = 4 * day + 3 - 1461 * nday
day = div(day + 4, 4)
month = div(5 * day - 3, 153)
day = 5 * day - 3 - 153 * month
day = div(day + 5, 5)
year = 100 * century + nday
if month < 10
month += 3
else
month -= 9
year += 1
end
(year, month, day)
end
function _parse_time(raw_value::Vector{UInt8})
n = bytes_to_buint32(raw_value)
s = div(n, 10000)
m = div(s, 60)
h = div(m, 60)
m = m % 60
s = s % 60
(h, m, s, (n % 10000) * 100000)
end
function parse_date(raw_value::Vector{UInt8})::Date
year, month, day = _parse_date(raw_value)
Date(year, month, day)
end
function parse_time(raw_value::Vector{UInt8})::Time
h, m, s, n = _parse_time(raw_value)
Time(h, m, s, div(n, 1000000))
end
function parse_timestamp(raw_value::Vector{UInt8})::DateTime
year, month, day = _parse_date(raw_value[1:4])
h, m, s, n = _parse_time(raw_value[5:8])
DateTime(year, month, day, h, m, s, div(n, 1000000))
end
function parse_time_tz(raw_value::Vector{UInt8})::ZonedDateTime
h, m, s, n = _parse_time(raw_value[1:4])
timezone = TimeZones.TimeZone(get_timezone_name_by_id_dict()[bytes_to_buint16(raw_value[5:6])])
offset = TimeZones.TimeZone(get_timezone_name_by_id_dict()[bytes_to_buint16(raw_value[7:8])])
zdt = ZonedDateTime(0, 1, 1, h, m, s, div(n, 1000000), timezone)
astimezone(zdt, offset)
end
function parse_timestamp_tz(raw_value::Vector{UInt8})::ZonedDateTime
year, month, day = _parse_date(raw_value[1:4])
h, m, s, n = _parse_time(raw_value[5:8])
timezone = TimeZones.TimeZone(get_timezone_name_by_id_dict()[bytes_to_buint16(raw_value[9:10])])
offset = TimeZones.TimeZone(get_timezone_name_by_id_dict()[bytes_to_buint16(raw_value[11:12])])
zdt = ZonedDateTime(year, month, day, h, m, s, div(n, 1000000), timezone)
astimezone(zdt, offset)
end
function value(x::XSQLVAR, raw_value::Vector{UInt8})
if x.sqltype == SQL_TYPE_TEXT
if x.sqlsubtype == 1
raw_value
else
String(raw_value)
end
elseif x.sqltype == SQL_TYPE_VARYING
if x.sqlsubtype == 1
raw_value
else
String(raw_value)
end
elseif x.sqltype == SQL_TYPE_SHORT
i16::Int16 = bytes_to_bint16(raw_value)
if x.sqlscale == 0
i16
else
if i16 < 0
Decimal(1, i16*-1, x.sqlscale)
else
Decimal(0, i16, x.sqlscale)
end
end
elseif x.sqltype == SQL_TYPE_LONG
i32::Int32 = bytes_to_bint32(raw_value)
if x.sqlscale == 0
i32
else
if i32 < 0
Decimal(1, i32*-1, x.sqlscale)
else
Decimal(0, i32, x.sqlscale)
end
end
elseif x.sqltype == SQL_TYPE_FLOAT
reinterpret(Float32, raw_value)[1]
elseif x.sqltype == SQL_TYPE_DOUBLE
reinterpret(Float64, raw_value)[1]
elseif x.sqltype == SQL_TYPE_TIMESTAMP
parse_timestamp(raw_value)
elseif x.sqltype == SQL_TYPE_BLOB
raw_value
elseif x.sqltype == SQL_TYPE_TIME
parse_time(raw_value)
elseif x.sqltype == SQL_TYPE_DATE
parse_date(raw_value)
elseif x.sqltype == SQL_TYPE_INT64
i64::Int64 = bytes_to_bint64(raw_value)
if x.sqlscale == 0
i64
else
if i64 < 0
Decimal(1, i64*-1, x.sqlscale)
else
Decimal(0, i64, x.sqlscale)
end
end
elseif x.sqltype == SQL_TYPE_INT128
i128::Int128 = bytes_to_bint128(raw_value)
if x.sqlscale == 0
i128
else
if i128 < 0
Decimal(1, i128*-1, x.sqlscale)
else
Decimal(0, i128, x.sqlscale)
end
end
elseif x.sqltype == SQL_TYPE_TIMESTAMP_TZ
parse_timestamp_tz(raw_value)
elseif x.sqltype == SQL_TYPE_TIME_TZ
parse_time_tz(raw_value)
elseif x.sqltype == SQL_TYPE_DEC_FIXED
decimal_fiexed_to_decimal(value_value)
elseif x.sqltype == SQL_TYPE_DEC64
decimal64_to_decimal(value_value)
elseif x.sqltype == SQL_TYPE_DEC128
decimal128_to_decimal(value_value)
elseif x.sqltype == SQL_TYPE_BOOLEAN
raw_value[0] != 0
elseif x.sqltype == SQL_TYPE_NULL
missing
end
end
function juliatype(x::XSQLVAR)
if x.sqltype == SQL_TYPE_TEXT
if x.sqlsubtype == 1
T = Vector{UInt8}
else
T = String
end
elseif x.sqltype == SQL_TYPE_VARYING
if x.sqlsubtype == 1
T = Vector{UInt8}
else
T = String
end
elseif x.sqltype == SQL_TYPE_SHORT
if x.sqlscale != 0
T = Decimal
else
T = Int16
end
elseif x.sqltype == SQL_TYPE_LONG
T = Int64
elseif x.sqltype == SQL_TYPE_FLOAT
T = Float32
elseif x.sqltype == SQL_TYPE_DOUBLE
T = Float64
elseif x.sqltype == SQL_TYPE_TIMESTAMP
T = DateTime
elseif x.sqltype == SQL_TYPE_BLOB
if x.sqlsubtype == 1 # TEXT
T = String
else
T = Vector{UInt8}
end
elseif x.sqltype == SQL_TYPE_TIME
T = Time
elseif x.sqltype == SQL_TYPE_DATE
T = Date
elseif x.sqltype == SQL_TYPE_INT64
if x.sqlscale != 0
T = Decimal
else
T = Int64
end
elseif x.sqltype == SQL_TYPE_INT128
if x.sqlscale != 0
T = Decimal
else
T = Int128
end
elseif x.sqltype == SQL_TYPE_TIMESTAMP_TZ
T = ZonedDateTime
elseif x.sqltype == SQL_TYPE_TIME_TZ
T = ZonedDateTime
elseif x.sqltype == SQL_TYPE_DEC_FIXED
T = Decimal
elseif x.sqltype == SQL_TYPE_DEC64
T = Decimal
elseif x.sqltype == SQL_TYPE_DEC128
T = Decimal
elseif x.sqltype == SQL_TYPE_BOOLEAN
T = Bool
elseif x.sqltype == SQL_TYPE_NULL
T = Missing
end
x.null_ok ? Union{Missing, T} : T
end
|
{"hexsha": "06543d794fa80e1e2732b279b7029335ca46cee7", "size": 9550, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/xsqlvar.jl", "max_stars_repo_name": "nakagami/Firebird.jl", "max_stars_repo_head_hexsha": "5fb5902c892645c58a60e4fd8a42e32d930045c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-11T09:51:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T10:54:20.000Z", "max_issues_repo_path": "src/xsqlvar.jl", "max_issues_repo_name": "nakagami/Firebird.jl", "max_issues_repo_head_hexsha": "5fb5902c892645c58a60e4fd8a42e32d930045c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-04T10:14:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-04T14:01:38.000Z", "max_forks_repo_path": "src/xsqlvar.jl", "max_forks_repo_name": "nakagami/Firebird.jl", "max_forks_repo_head_hexsha": "5fb5902c892645c58a60e4fd8a42e32d930045c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6583850932, "max_line_length": 100, "alphanum_fraction": 0.6207329843, "num_tokens": 2696}
|
import riptable as rt
import random as rand
import pandas as pd
import unittest
functions_str = [
'count',
'sum',
'mean',
'median',
'min',
'max',
# 'prod',
'var',
# 'quantile',
'cumsum',
'cumprod',
# 'cummax',
# 'cummin'
'first',
'last',
# 'mode'
]
import numpy as np
type_list = [
# np.bool, ## not a numeric type
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
# np.float16, ## not supported
np.float32,
np.float64,
# np.complex64, ## not supported
# np.complex128 ## not supported
]
def safe_equal(ary1, ary2):
assert len(ary1) == len(ary2)
def isNaN(num):
return num != num
for a, b in zip(ary1, ary2):
if not (a == b or (isNaN(a) and isNaN(b))):
return False
return True
def min(a, b):
return a if a < b else b
class GroupbyFunctions_Test(unittest.TestCase):
def groupby_func(self, df, fn):
return getattr(df, functions_str[fn])
def test_single_col_groupby_tests(self):
Values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Keys = ['a', 'b', 'c', 'a', 'b', 'c', 'd', 'e', 'f']
for type_ in type_list:
data = {'Vs': rt.FastArray(Values, dtype=type_), 'Ks': Keys}
pd_data = pd.DataFrame(data)
sfw_data = rt.Dataset(data)
key = 'Ks'
val = 'Vs'
pd_gb = pd_data.groupby(key)
sfw_gb = sfw_data.groupby(key)
for name in functions_str:
pd_func = getattr(pd_gb, name)
sfw_func = getattr(sfw_gb, name)
pd_out = pd_func()
sfw_out = sfw_func()
pd_col = pd_out[val]._values
if name == 'count':
sfw_col = sfw_out['Count']
else:
sfw_col = sfw_out[val]
is_integer_subttype = np.issubdtype(type_, np.integer)
is_median = name != 'median'
if not safe_equal(pd_col, sfw_col) and (
not is_integer_subttype and not is_median
):
print('data_type_t = ', type_)
print('function =', name)
print('pandas output =', pd_col)
print('sfw output =', sfw_col)
# TODO move as error message following assert
self.assertTrue(False)
# TODO pytest parameterize type_list
def test_multi_col_groupby_tests(self, numb_keys_and_values=5, numb_rows=20):
col_val_names = ['alpha', 'beta', 'gamma', 'sigma', 'zeta']
col_key_names = ['lions', 'tigers', 'bears', 'oh', 'my']
MAX_LENGTH = min(len(col_val_names), len(col_key_names))
assert numb_keys_and_values <= MAX_LENGTH
for type_ in type_list:
vals = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
keys = 'a b c d e f g'.split(' ')
vs = []
ks = []
for i in range(0, numb_keys_and_values):
vs.append(
[vals[rand.randint(0, len(vals) - 1)] for i in range(0, numb_rows)]
)
ks.append(
[keys[rand.randint(0, len(keys) - 1)] for i in range(0, numb_rows)]
)
data = {}
for i in range(0, numb_keys_and_values):
data[col_val_names[i]] = rt.FastArray(vs[i], dtype=type_)
data[col_key_names[i]] = rt.FastArray(vs[i], dtype=type_)
pd_data = pd.DataFrame(data)
sfw_data = rt.Dataset(data)
key = col_key_names[0:numb_keys_and_values]
val = col_val_names[0:numb_keys_and_values]
pd_gb = pd_data.groupby(key)
sfw_gb = sfw_data.groupby(key)
for name in functions_str:
pd_out = getattr(pd_gb, name)()
sfw_out = getattr(sfw_gb, name)()
if name == 'count':
# only compare one column for count
pd_col = pd_out['alpha']
sfw_col = sfw_out.Count
if not safe_equal(pd_col, sfw_col):
print('function =', name)
print('pandas output =', pd_col)
print('sfw output =', sfw_col)
self.assertTrue(False)
else:
for val in col_val_names:
# extract array from pandas series
pd_col = pd_out[val]._values
sfw_col = sfw_out[val]
is_integer_subttype = np.issubdtype(type_, np.integer)
is_median = name != 'median'
if not safe_equal(pd_col, sfw_col) and (
not is_integer_subttype and not is_median
):
print('function =', name)
print('pandas output =', pd_col)
assert False
if __name__ == "__main__":
tester = unittest.main()
|
{"hexsha": "db9aaa06d51c68bb9b96c92d31068a8b2114720e", "size": 5439, "ext": "py", "lang": "Python", "max_stars_repo_path": "riptable/tests/test_groupby_functions.py", "max_stars_repo_name": "972d5defe3218bd62b741e6a2f11f5b3/riptable", "max_stars_repo_head_hexsha": "bb928c11752e831ec701f91964979b31db53826a", "max_stars_repo_licenses": ["BSD-2-Clause-Patent"], "max_stars_count": 307, "max_stars_repo_stars_event_min_datetime": "2020-08-27T20:25:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T15:51:19.000Z", "max_issues_repo_path": "riptable/tests/test_groupby_functions.py", "max_issues_repo_name": "972d5defe3218bd62b741e6a2f11f5b3/riptable", "max_issues_repo_head_hexsha": "bb928c11752e831ec701f91964979b31db53826a", "max_issues_repo_licenses": ["BSD-2-Clause-Patent"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2020-08-17T19:07:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T11:53:55.000Z", "max_forks_repo_path": "riptable/tests/test_groupby_functions.py", "max_forks_repo_name": "972d5defe3218bd62b741e6a2f11f5b3/riptable", "max_forks_repo_head_hexsha": "bb928c11752e831ec701f91964979b31db53826a", "max_forks_repo_licenses": ["BSD-2-Clause-Patent"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-08-28T00:22:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T20:22:28.000Z", "avg_line_length": 30.5561797753, "max_line_length": 88, "alphanum_fraction": 0.4703070417, "include": true, "reason": "import numpy", "num_tokens": 1309}
|
"""
Copyright (C) 2018 University of Massachusetts Amherst.
This file is part of "coref_tools"
http://github.com/nmonath/coref_tools
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
class BoxModel(object):
"""Implements a bounding box in n-dims."""
def __init__(self, mns, mxs):
"""Set my mins and maxes."""
self.mns = mns
self.mxs = mxs
def hallucinate_merge(self, other):
"""Return the merger of me and other."""
mins = np.min(np.array([self.mns, other.mns]), axis=0)
maxes = np.max(np.array([self.mxs, other.mxs]), axis=0)
return BoxModel(mins, maxes)
def e_score(self, box):
"""Pass in a BoxModel and return its negative log volumne."""
return -np.sum(np.log(np.abs(box.mns - box.mxs)))
def my_e_score(self):
"""Return my score."""
return self.e_score(self)
def update(self, other):
"""Update myself with another box."""
b = self.hallucinate_merge(other)
self.mns = b.mns
self.mxs = b.mxs
def new(self, point):
"""Create a new box around point."""
return BoxModel(point, point)
|
{"hexsha": "b677314368e6b14cf555665295d1495a339300d9", "size": 1645, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/geo/models/BoxModel.py", "max_stars_repo_name": "nmonath/coref_tools", "max_stars_repo_head_hexsha": "542659170897ad05f7612639cb918886859ae9d6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/python/geo/models/BoxModel.py", "max_issues_repo_name": "nmonath/coref_tools", "max_issues_repo_head_hexsha": "542659170897ad05f7612639cb918886859ae9d6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/geo/models/BoxModel.py", "max_forks_repo_name": "nmonath/coref_tools", "max_forks_repo_head_hexsha": "542659170897ad05f7612639cb918886859ae9d6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2708333333, "max_line_length": 72, "alphanum_fraction": 0.6656534954, "include": true, "reason": "import numpy", "num_tokens": 402}
|
using Plots
using Random
using DifferentialEquations
using DynamicalSystems
function make_data(system::DynamicalSystem;
train_time = 100, nspin=500, test_time = 15, n_test = 10, Δt=0.01)
"""make training and testing data from the dynamical system"""
options = (alg = Vern9(), abstol = 1e-12,reltol=1e-12)
D = length(system.u0)
train_data = trajectory(system, train_time, system.u0.*rand(D).+rand(D);
Δt=Δt,
Ttr=nspin, diffeq=options)
test_data = Matrix{Float64}[]
for i in 1:n_test
data = trajectory(system, test_time, system.u0.*rand(D).+rand(D);
Δt=Δt,
Ttr=nspin, diffeq=options)
push!(test_data, collect(Matrix(data)'))
end
return collect(Matrix(train_data)'), test_data
end
function plot_prediction(time, upred, utrue)
"""Plotting function"""
p = []
D = size(upred)[1]
for i in 1:D
if i == 1
px = plot(time, upred[1, :], label="RC")
plot!(px, time, utrue[1, :], label="Truth")
else
px = plot(time, upred[i, :])
plot!(px, time, utrue[i, :], legend=false)
end
ylabel!(px, "X$i")
if i!=D xticks!(px, Int[]) end
push!(p, px)
end
xlabel!(p[end], "Time")
return plot(p..., layout=(D, 1), link=:x)
end
|
{"hexsha": "6cd84641b29e7baff1a80cd08cf0659e689a25ee", "size": 1404, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/utilities.jl", "max_stars_repo_name": "japlatt/BasicReservoirComputing", "max_stars_repo_head_hexsha": "f92a6f143689b9d252d25b750aef9d91aa669509", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-12T03:54:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T07:48:21.000Z", "max_issues_repo_path": "examples/utilities.jl", "max_issues_repo_name": "japlatt/BasicReservoirComputing", "max_issues_repo_head_hexsha": "f92a6f143689b9d252d25b750aef9d91aa669509", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/utilities.jl", "max_forks_repo_name": "japlatt/BasicReservoirComputing", "max_forks_repo_head_hexsha": "f92a6f143689b9d252d25b750aef9d91aa669509", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-30T03:53:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T03:53:23.000Z", "avg_line_length": 32.6511627907, "max_line_length": 85, "alphanum_fraction": 0.5441595442, "num_tokens": 402}
|
import numpy as np
import matplotlib.pyplot as plt
TOL = np.finfo(float).resolution
# Set up our constants
Lx = 10 * 0.01 # [m]
Ly = 7.5 * 0.01 # [m]
rho = 7860 # [kg/m^3]
c_p = 490 # [J/kg.K]
k = 54 # [W/m.K]
alpha = k / (c_p * rho) # [m^2/s]
dx = 2.5 * 0.01 # [m]
dy = 2.5 * 0.01 # [m]
dt = 100
sigma_x = alpha * dt / dx**2
sigma_y = alpha * dt / dy**2
# Create out arrays
nx = int((Lx+TOL) // dx) + 1
ny = int((Ly+TOL) // dy) + 1 # NOTE: Tolerance for floating point error
# Dividing small floating point numbers sometimes doesn't give the expected
# answer. For example, 0.075 / 0.025 = 2.9999999999999996
# Without the tolerance, this gives ny = 3 rather than the expected 4.
# Configure initial conditions
current_temperature = (50+273.15) * np.ones(ny*nx)
# Configure boundary conditions
bottom_edge = np.array([110, 100, 90, 80, 70]) + 273.15
top_edge = np.array([0, 10, 20, 30, 40]) + 273.15
left_edge = np.array([65, 25]) + 273.15
right_edge = np.array([60, 50]) + 273.15
current_temperature[:nx] = bottom_edge
current_temperature[nx*(ny-1):] = top_edge
current_temperature[nx:3*nx:nx] = left_edge
current_temperature[2*nx-1:4*nx-1:nx] = right_edge
recording = []
implicit_matrix = np.zeros((nx*ny, nx*ny))
for i in range(ny):
for j in range(nx):
s = i*nx + j
# Skip updating boundary nodes
if i == 0 or i == ny-1:
implicit_matrix[s, s] = 1
continue
if j == 0 or j == nx-1:
implicit_matrix[s, s] = 1
continue
implicit_matrix[s, s] = (1 + 2*sigma_x + 2*sigma_y)
implicit_matrix[s, s-1] = -sigma_x
implicit_matrix[s, s+1] = -sigma_x
implicit_matrix[s, s-nx] = -sigma_y
implicit_matrix[s, s+nx] = -sigma_y
recording.append(current_temperature)
for _ in range(100):
next_temperature = np.linalg.solve(implicit_matrix, current_temperature)
current_temperature = next_temperature.copy()
recording.append(current_temperature)
# plt.plot([t[12] for t in recording])
# plt.show()
|
{"hexsha": "2cbe17fddf4a0e2ac41431008d7746d6ceb26754", "size": 2063, "ext": "py", "lang": "Python", "max_stars_repo_path": "Week 8 and 9/basic.py", "max_stars_repo_name": "Schalk-Laubscher/2020-Tutorials", "max_stars_repo_head_hexsha": "d720994d80d255da7958bd0e4d3fa4ca69aae9d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Week 8 and 9/basic.py", "max_issues_repo_name": "Schalk-Laubscher/2020-Tutorials", "max_issues_repo_head_hexsha": "d720994d80d255da7958bd0e4d3fa4ca69aae9d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Week 8 and 9/basic.py", "max_forks_repo_name": "Schalk-Laubscher/2020-Tutorials", "max_forks_repo_head_hexsha": "d720994d80d255da7958bd0e4d3fa4ca69aae9d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1139240506, "max_line_length": 76, "alphanum_fraction": 0.6253029569, "include": true, "reason": "import numpy", "num_tokens": 695}
|
"""
Package: SQLdf
sqldf(query::String)::DataFrame
Execute R sqldf and return a julia DataFrame.
Columns in the DataFrame must have a type other than Any. In order to work with dates expressions like
\"""select strftime("%Y", datetime_column, "unixepoch") as year from T\""" may be used.
# Arguments
`query`: SQL query handled by R sqldf
# Returns
Julia DataFrame with the results from R sqldf
# Examples
```julia-repl
julia> T = DataFrame(C1 = [1,2], C2 = ["a","b"])
julia> query = \"""
select *
from T
where C2 = "a"
\""";
julia> sqldf(query)
1×2 DataFrame
Row │ C1 C2
│ Int64 String
─────┼───────────────
1 │ 1 a
julia> @sqldf query
1×2 DataFrame
Row │ C1 C2
│ Int64 String
─────┼───────────────
1 │ 1 a
```
"""
function sqldf(query::String)::DataFrame
# Normalize Query
query = replace(query,"\n"=>" ")
nq = split(query," ")
nq = nq[nq .!= ""]
# Extract tables in query
tables = String[]
nt = false
for w in nq
nt = nt ? push!(tables,w)==0 : lowercase(w) in ["from","join"]
end
# Prepare R
for t in tables
Main.eval(Main.Meta.parse("@rput "*t))
end
# Retrieve and Return
rquery = "c7e881694dfe63f2 = sqldf('"*query*"')"
rquery = replace(rquery,"\"" => "\\\"")
eval(Meta.parse("R\""*rquery*"\""))
R"colnames(c7e881694dfe63f2) <- make.unique(colnames(c7e881694dfe63f2),sep='_')"
res = rcopy(R"c7e881694dfe63f2")
# Clean Up
R"rm(c7e881694dfe63f2)"
for t in tables
eval(Meta.parse("R"*"\"rm("*t*")\""))
end
R"gc()"
return res
end
macro sqldf(query)
return :( sqldf($(esc(query))) )
end
|
{"hexsha": "1264be388180ebe36d6802fbb4b6269649dbaf30", "size": 1765, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/sqldf.jl", "max_stars_repo_name": "viraltux/SQLDF.jl", "max_stars_repo_head_hexsha": "627f233f38cd915961aa487ea880bdc28e698787", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-07-08T19:14:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T23:10:14.000Z", "max_issues_repo_path": "src/sqldf.jl", "max_issues_repo_name": "viraltux/SQLDF.jl", "max_issues_repo_head_hexsha": "627f233f38cd915961aa487ea880bdc28e698787", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-07T12:04:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-11T10:32:41.000Z", "max_forks_repo_path": "src/sqldf.jl", "max_forks_repo_name": "viraltux/SQLdf.jl", "max_forks_repo_head_hexsha": "627f233f38cd915961aa487ea880bdc28e698787", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0119047619, "max_line_length": 102, "alphanum_fraction": 0.5467422096, "num_tokens": 564}
|
import os
import cv2
import numpy as np
import skimage.transform
import torch
from lib.windows import normalize_data
def pad_if_needed(img, min_height, min_width):
input_height, input_width = img.shape[:2]
new_shape = list(img.shape)
new_shape[0] = max(input_height, min_height)
new_shape[1] = max(input_width, min_width)
row_from, col_from = 0, 0
if input_height < min_height:
row_from = (min_height - input_height) // 2
if input_width < min_width:
col_from = (min_width - input_width) // 2
out = np.zeros(new_shape, dtype=img.dtype)
out[row_from:row_from+input_height, col_from:col_from+input_width] = img
return out
def center_crop(img, crop_height, crop_width, centre=None):
"""Either crops by the center of the image, or around a supplied point.
Does not pad; if the supplied centre is towards the egde of the image, the padded
area is shifted so crops start at 0 and only go up to the max row/col
Returns both the new crop, and the top-left coords as a row,col tuple"""
input_height, input_width = img.shape[:2]
if centre is None:
row_from = (input_height - crop_height)//2
col_from = (input_width - crop_width)//2
else:
row_centre, col_centre = centre
row_from = max(row_centre - (crop_height//2), 0)
if (row_from + crop_height) > input_height:
row_from -= (row_from + crop_height - input_height)
col_from = max(col_centre - (crop_width//2), 0)
if (col_from + crop_width) > input_width:
col_from -= (col_from + crop_width - input_width)
return img[row_from:row_from+crop_height, col_from:col_from+crop_width], (row_from, col_from)
def get_original_npy_path_from_exported_npz_path(npz_path, peter_dir):
date, study, file, _end = os.path.basename(npz_path).split('__')
peter_path = os.path.join(peter_dir, date, study, file)
return peter_path
def get_normalized_channel_stack(t1, t2, t1w, t2w, pd, data_stack_format=None):
t1_pre = normalize_data(t1, window_centre=1300.0, window_width=1300.0)
t1_post = normalize_data(t1, window_centre=500.0, window_width=1000.0)
t2 = normalize_data(t2, window_centre=60.0, window_width=120.0)
t1w = t1w - t1w.min()
t1w /= t1w.max()
t2w = t2w - t2w.min()
t2w /= t2w.max()
pd = pd - pd.min()
pd /= pd.max()
t1_pre = (t1_pre*255).astype(np.uint8)
t1_post = (t1_post*255).astype(np.uint8)
t2 = (t2*255).astype(np.uint8)
t1w = (t1w*255).astype(np.uint8)
t2w = (t2w*255).astype(np.uint8)
pd = (pd*255).astype(np.uint8)
if data_stack_format is None:
t1_t2 = None
elif data_stack_format == 'all':
t1_t2 = np.dstack((t1w, t2w, pd, t1_pre, t1_post, t2))
elif data_stack_format == 't1':
t1_t2 = np.dstack((t1_pre, t1_post))
elif data_stack_format == 't2':
t1_t2 = np.expand_dims(t2, axis=-1)
else:
raise ValueError()
return t1_pre, t1_post, t2, t1w, t2w, pd, t1_t2
def prep_normalized_stack_for_inference(t1_t2, fov, as_tensor, tensor_device=None):
t1_t2_crop, _top_left = center_crop(pad_if_needed(t1_t2, min_height=fov, min_width=fov), crop_height=fov, crop_width=fov)
t1_t2_double = skimage.transform.rescale(t1_t2_crop, 2, order=3, multichannel=True)
t1_t2_in = t1_t2_double.transpose((2, 0, 1))
img_batch = np.expand_dims(t1_t2_in, 0).astype(np.float32)
if as_tensor:
img_batch = torch.from_numpy(img_batch).float().to(tensor_device)
return img_batch
def tta(model, x):
flips = [[-1], [-2], [-2, -1]]
pred_batch = model(x)
for f in flips:
xf = torch.flip(x, f)
p_b = model(xf)
p_b = torch.flip(p_b, f)
pred_batch += p_b
pred_batch = pred_batch/len(flips)
return pred_batch
def paths_to_ridge_polygons(xs_epi, ys_epi, xs_end, ys_end, fov):
mask_lvcav = np.zeros((fov, fov), dtype=np.uint8)
mask_lvwall = np.zeros_like(mask_lvcav)
points_end = np.array([list(zip(xs_end, ys_end))])
points_epi = np.array([list(zip(xs_epi, ys_epi))])
color = np.uint8(np.ones(3) * 1).tolist()
cv2.fillPoly(mask_lvcav, points_end, color)
cv2.fillPoly(mask_lvwall, points_epi, color)
return mask_lvcav, mask_lvwall
|
{"hexsha": "274aa172a8e8adc19cf3fa52126e4456e95f64cc", "size": 4268, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/inference.py", "max_stars_repo_name": "jphdotam/T1T2", "max_stars_repo_head_hexsha": "b5003f5cf3aaddc4f43a7b7b4a77f52cef956c27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/inference.py", "max_issues_repo_name": "jphdotam/T1T2", "max_issues_repo_head_hexsha": "b5003f5cf3aaddc4f43a7b7b4a77f52cef956c27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/inference.py", "max_forks_repo_name": "jphdotam/T1T2", "max_forks_repo_head_hexsha": "b5003f5cf3aaddc4f43a7b7b4a77f52cef956c27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7931034483, "max_line_length": 125, "alphanum_fraction": 0.6747891284, "include": true, "reason": "import numpy", "num_tokens": 1306}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 6 14:57:46 2021
@author: iseabrook1
"""
#This script contains the functions to calculate node importance
#and related analyses as presented in Seabrook et. al.,
#Community aware evaluation of node importance
#Isobel Seabrook, ucabeas@ucl.ac.uk
#MIT License. Please reference below publication if used for research purposes.
#Reference: Seabrook et al., Community aware evaluation of node importance
#
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import seaborn as sns
import networkx.algorithms.community as nxcom
from itertools import count
def multi_edge_to_uni(G):
"""This function takes in a graph which has multiple edges between two nodes,
and sums them to a single edge.
Parameters:
G: networkx graph with potentially multi-edges
Returns:
G_uni: graph with multi-edge weights summed to give single total weight.
"""
G_uni = nx.Graph()
for u,v,data in G.edges(data=True):
w = data['total_value']
if G_uni.has_edge(u,v):
G_uni[u][v]['total_value'] += w
else:
G_uni.add_edge(u, v, total_value=w)
return(G_uni)
def set_node_community(G, communities):
'''Add community to node attributes'''
for c, v_c in enumerate(communities):
for v in v_c:
# Add 1 to save 0 for external edges
G.nodes[v]['community'] = c + 1
def mi_generator_symm_tests(data, node, test_type, weight=True, google_matrix=False):
""" Function to generate the values of m_{a-c}, eigenvector centrality, pagerank, degree and
community for an individual node
Parameters:
data: pandas edgelist dataframe for network snapshot - columns seller id, buyer id, total_value
and trade date time.
edge: edge tuple (seller id, buyer id)
Returns:
tuple of values of calculated metrics. Where nodes are not found in the giant component, no value is returned.
"""
#print(node)
M = nx.from_pandas_edgelist(data, source="seller id", target = "buyer id",
edge_attr = ['total_value'],
create_using=nx.MultiGraph())
if node in M.nodes():
G = nx.Graph()#DiGraph if directed
G.add_nodes_from(M)
for u,v,data1 in M.edges(data=True):
w = data1['total_value'] if 'total_value' in data else 1.0
if G.has_edge(u,v):
G[u][v]['weight'] += w
else:
G.add_edge(u, v, weight=w)
if google_matrix==True:
A = nx.google_matrix(G)
else:
A = nx.to_numpy_matrix(G.to_undirected())
#S = A.sum(axis=0)#
S = pd.Series([val for (node, val) in G.degree(weight='weight')], index=[node for (node, val) in G.degree(weight='weight')])
#print(S)
eigenvalues, eigvecs = np.linalg.eigh(A)
idx = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[idx]
eigvecs = eigvecs[:,idx]
#retrieve all of the eigenvector components for node
#print(eigenvalues)
eigvecs_df = pd.DataFrame(eigvecs, index=G.nodes, columns = eigenvalues)#.abs()
#print(eigvecs_df)
max_eigvec = eigvecs_df.iloc[:,0]
grad_A = max_eigvec.loc[node]*max_eigvec
m_i_a = (2/S.loc[node])*(grad_A.sum())
if weight == True:
max_eigvec = eigvecs_df[[x for x in eigenvalues if x>0]].abs().max(axis=1)
max_eigvec_eigval = eigvecs_df[[x for x in eigenvalues if x>0]].abs().idxmax(axis=1)
weighted_max_eigvec = max_eigvec.multiply(max_eigvec_eigval)
lead_eig = eigenvalues[0]
max_eigvec = weighted_max_eigvec/lead_eig
else:
max_eigvec = eigvecs_df[[x for x in eigenvalues if x>0]].abs().max(axis=1)
grad_A = max_eigvec.loc[node]*max_eigvec
m_i_b = (2/S.loc[node])*(grad_A.sum())
eigvecs_sum = eigvecs_df.sum(axis=1)
grad_A = eigvecs_sum.loc[node]*eigvecs_sum
m_i_c = (2/S.loc[node])*(grad_A.sum())
eigvals_pos = [x for x in eigenvalues if x > 0]
eigvecs_sum = eigvecs_df.loc[:,eigvals_pos].sum(axis=1)
grad_A = eigvecs_sum.loc[node]*eigvecs_sum
m_i_d = (2/S.loc[node])*(grad_A.sum())
#eig_cent
if node in G.nodes():
try:
eig_cent = nx.eigenvector_centrality(G, weight='total_value')[node]
except:
try:
eig_cent = nx.eigenvector_centrality_numpy(G, weight='total_value',max_iter=1000)[node]
except:
eig_cent=0
else:
eig_cent = 0
#pagerank
if node in G.nodes():
try:
pagerank = nx.pagerank(G, weight='total_value')[node]
except:
pagerank=0
else:
pagerank = 0
#degree
if node in G.nodes():
try:
deg = G.degree()[node]
except:
deg=0
else:
deg = 0
#community
communities = sorted(nxcom.greedy_modularity_communities(G.to_undirected()), key=len, reverse=True)
set_node_community(G, communities)
if node in G.nodes():
try:
comm = nx.get_node_attributes(G, 'community')[node]
except:
comm=0
else:
comm = 0
return((m_i_a, m_i_b, m_i_c, m_i_d, eig_cent, pagerank, deg, comm))
else:
return(["no value" for i in range(8)])
def da_le_pairs_test(g_df, test_type='all', weight=True):
""" Function to generate dataframe of dS m_i pairs.
Parameters:
g_df: pandas edgelist dataframe for network snapshot - columns seller id, buyer id, total_value and trade date time.
Returns: dataframe, columns:
trade_date_time: timestamp
m_{a-d}: values of structural importance calculated for each node
eig_cent: eigenvector centrality of each node
pagerank: pagerank of each node
degree: degree of each node
community: greedy modularity community label for each node
delta_S_act: subsequent obserenrved dA value.
S_init: initial strength
S_fin: final strength
delta_S_rel: relative change in Strength (S_fin-S_init/S_init)
variable: edge tuple
log_delta_S_rel: natural log of delta_S_rel
"""
G = nx.from_pandas_edgelist(g_df, source="seller id", target = "buyer id",edge_attr = ['total_value'],create_using=nx.MultiGraph())
monthly_values_list = [[] for i in range(10)]
whole_graph_uni = multi_edge_to_uni(G)
for node in whole_graph_uni.nodes():
monthly_values_list[0].append(node)
strength_series = pd.Series(g_df[(g_df["buyer id"]==node) | (g_df["seller id"]==node)].sort_values(by="trade date time").groupby("trade date time").total_value.sum())
monthly_values_list[1].append(strength_series)
x = g_df.groupby(g_df["trade date time"], axis=0).apply(lambda x: mi_generator_symm_tests(x, node, test_type, weight)).apply(pd.Series)
monthly_values_list[2].append(x.iloc[:,0])
monthly_values_list[3].append(x.iloc[:,1])
monthly_values_list[4].append(x.iloc[:,2])
monthly_values_list[5].append(x.iloc[:,3])
monthly_values_list[6].append(x.iloc[:,4])
monthly_values_list[7].append(x.iloc[:,5])
monthly_values_list[8].append(x.iloc[:,6])
monthly_values_list[9].append(x.iloc[:,7])
# create a dataframe with columns for the value of m_i for a given node for a given time, including both relative and absolute change and timestamp
A= pd.DataFrame.from_records(monthly_values_list[1] , index=monthly_values_list[0])
A = A.apply(lambda series: series.loc[:series.last_valid_index()].ffill(), axis=1)
A_T=A.T
A_T_shift = A.shift(-1,axis=1).T
A_T.index.name = "trade date time"
A_T_shift.index.name = "trade date time"
m_a = pd.DataFrame.from_records(monthly_values_list[2] , index=monthly_values_list[0])
m_b = pd.DataFrame.from_records(monthly_values_list[3] , index=monthly_values_list[0])
m_c = pd.DataFrame.from_records(monthly_values_list[4] , index=monthly_values_list[0])
m_d = pd.DataFrame.from_records(monthly_values_list[5] , index=monthly_values_list[0])
eig_cent = pd.DataFrame.from_records(monthly_values_list[6] , index=monthly_values_list[0])
pagerank = pd.DataFrame.from_records(monthly_values_list[7] , index=monthly_values_list[0])
degree = pd.DataFrame.from_records(monthly_values_list[8] , index=monthly_values_list[0])
community = pd.DataFrame.from_records(monthly_values_list[9] , index=monthly_values_list[0])
ds_mi = pd.concat([A_T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]),\
A_T_shift.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]),\
m_a.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]), \
m_b.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]), \
m_c.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]), \
m_d.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]), \
eig_cent.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]), \
pagerank.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]), \
degree.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"]), \
community.T.reset_index().melt(id_vars="trade date time").set_index(["trade date time","variable"])],
join = 'inner', axis=1, sort=True)
ds_mi.reset_index(level=0, inplace=True)
ds_mi.columns = ["trade date time","S_init","S_fin", "m_a","m_b","m_c","m_d", "eig_cent", "pagerank", "degree", "community" ]
ds_mi["delta_S_act"] = ds_mi.S_fin - ds_mi.S_init
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.m_a]]
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.m_b]]
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.m_c]]
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.m_d]]
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.eig_cent]]
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.pagerank]]
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.degree]]
ds_mi =ds_mi[[type(x)!=str for x in ds_mi.community]]
ds_mi.reset_index(inplace=True)
ds_mi["delta_S_rel"] = ds_mi.delta_S_act/ds_mi["S_init"]
ds_mi = ds_mi[ds_mi.delta_S_rel!=np.inf]
ds_mi["log_delta_S_rel"] = np.log(1+ds_mi.delta_S_rel)
ds_mi=ds_mi[ds_mi.log_delta_S_rel!=-np.inf]
return(ds_mi)
def plot_violinplots_multimeasure(ds_mi, measure_columns = ["m_a", "m_b", "m_c", "m_d",'eig_cent','pagerank','degree', 'community']):
""" Function to generate boxplots for the distributions
of measure values for nodes which don't change in comparison
to those that do.
Parameters:
ds_mi: pandas dataframe with columns for each node including measure_columns, and:
trade_date_time: timestamp
change_bool: boolean indicator for whether or not node subsequently changes
Returns: p: p-value of t-test applied to differences in the means of the distributions of the measures
in measure_columns, for nodes that change vs. nodes that don't change
plot: boxplots showing the distributions of each of the measures in measure_columns for nodes
that change vs. nodes that don't change.
"""
#scale each of the measures to a range of 0,1.
ds_mi[measure_columns] -= ds_mi[measure_columns].min() # equivalent to df = df - df.min()
ds_mi[measure_columns] /= ds_mi[measure_columns].max() # equivalent to df = df / df.max()
#melt dataframe
ds_mi_melt = ds_mi[measure_columns].melt(id_vars=['change_bool'], var_name=["measure"])
ds_mi_melt["measure"] = ds_mi_melt.measure.str.replace("m_","")
ds_mi_melt["measure"] = ds_mi_melt.measure.str.replace("eig_cent","eig. cent.")
def ttest(ds):
g1 = ds[(ds['Change indicator']==0)]["value"].values
g2 = ds[(ds['Change indicator']==1)]["value"].values
t,p=stats.ttest_ind(g1,g2)
return(p)
ds_mi_melt.columns = ["Change indicator", "measure", "value"]
p = ds_mi_melt.groupby('measure').apply(ttest)
print(p)
fig, axs=plt.subplots(1,1, figsize=(12,5))
sns.violinplot(x="measure", y="value", hue='Change indicator', data=ds_mi_melt, ax=axs)
#axs.set_title(f'p-value = {p:.3e}')
axs.set_xlabel("Change label")
axs.set_ylabel("Measure value (Scaled)")
plt.show()
return(p)
def plot_network_ev_rankings(raw_data, ds_mi):
""" Function to produce a visualisation of a network, with nodes colored and numbered by the
ranking of the eigenvalue that localises to that node.
Parameters:
raw_data: pandas edgelist dataframe for network snapshot - columns seller id, buyer id, total_value and trade date time.
ds_mi: pandas dataframe with columns for each node including measure_columns, and:
trade_date_time: timestamp
change_bool: boolean indicator for whether or not node subsequently changes
Returns: plot of network with nodes coloured and numbered by the eigenvalue ranking.
"""
raw_data.sort_values(by=['buyer id', 'seller id', 'trade date time'])
raw_data['change_bool'] = raw_data.groupby(['buyer id', 'seller id']).total_value.diff(periods = -1).fillna(0)!=0
init_snapshot = raw_data[raw_data['trade date time']==min(raw_data['trade date time'])]
init_snapshot["tuple_id"]=[(u, v) for u, v in zip(init_snapshot['buyer id'], init_snapshot['seller id'])]
changing_nodes = pd.unique(raw_data[raw_data.change_bool==True][['buyer id', 'seller id']].values.ravel('K'))
g = nx.from_pandas_edgelist(init_snapshot, source="seller id", target = "buyer id",edge_attr = ['total_value'],create_using=nx.MultiGraph())
A = nx.to_numpy_matrix(g, weight='total_value')
#coloring the nodes according to eigenvalue localisation
eigenvalues, eigvecs = np.linalg.eigh(A)
idx = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[idx]
eigvecs = eigvecs[:,idx]
#retrieve all of the eigenvector components for node
eigvecs_df = pd.DataFrame(eigvecs, index=g.nodes, columns = eigenvalues)#.abs()
max_eigvec_eigval = eigvecs_df[[x for x in eigenvalues if x>0]].abs().idxmax(axis=1).to_frame()
def rank_unique(x, **kwargs):
sx = sorted(set(x), **kwargs)# will put x in order of min to max. So high rank is
invsx = {s: i for i, s in enumerate(sx)}
return [1 + invsx[v] for v in x]
max_eigvec_eigval['ev_rank']=rank_unique(max_eigvec_eigval[0])
node_attr = dict(zip(max_eigvec_eigval.index, max_eigvec_eigval['ev_rank']))
weights = [np.sqrt(w.get('total_value'))/50 for u,v,w in g.edges(data=True)]
nx.set_node_attributes(g,node_attr,'node_attr')
# create number for each group to allow use of colormap
# get unique groups
groups = set(nx.get_node_attributes(g,'node_attr').values())
mapping = dict(zip(sorted(groups),count()))
nodes = pd.Series(list(g.nodes()))
ds_mi = ds_mi[ds_mi.variable.isin(nodes)]
changing_nodes = changing_nodes[pd.Series(changing_nodes).isin(nodes)]
colors_changing = [mapping[g.nodes(data=True)[n]['node_attr']] if n in ds_mi['variable'].values else 0 for n in changing_nodes]
colors_unchanging = [mapping[g.nodes(data=True)[n]['node_attr']] if n in ds_mi['variable'].values else 0 for n in nodes[~nodes.isin(changing_nodes)]]
# drawing nodes and edges separately so we can capture collection for colobar
plt.figure(figsize=(10,10))
pos = nx.spring_layout(g, k=0.15, iterations=20)
ec = nx.draw_networkx_edges(g, pos, width=weights)
v_max = max(max([int(i) for i in colors_unchanging]), max([int(i) for i in colors_changing]))
nc = nx.draw_networkx_nodes(g, pos, nodelist=changing_nodes, node_color=colors_changing, alpha=0.5,
node_size=100,cmap=plt.get_cmap('viridis'), vmin=0, vmax=v_max)
nc = nx.draw_networkx_nodes(g, pos, nodelist=nodes[~nodes.isin(changing_nodes)], node_color=colors_unchanging,alpha=0.5,
node_size=100, node_shape='s',cmap=plt.get_cmap('viridis'), vmin=0, vmax=v_max)
nx.draw_networkx_labels(g,pos, nx.get_node_attributes(g, 'node_attr'))
plt.colorbar(nc)
plt.axis('off')
plt.show()
if __name__ == "__main__":
None
|
{"hexsha": "c9d00ffacc48da846a8fabe79bd944b5b644f0c5", "size": 17424, "ext": "py", "lang": "Python", "max_stars_repo_path": "node_importance_functions.py", "max_stars_repo_name": "Iseabrook/structural_node_importance", "max_stars_repo_head_hexsha": "c3f0e05d7e9d7597273f951fd5ec111830250b96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-07T11:01:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T11:01:29.000Z", "max_issues_repo_path": "node_importance_functions.py", "max_issues_repo_name": "Iseabrook/structural_node_importance", "max_issues_repo_head_hexsha": "c3f0e05d7e9d7597273f951fd5ec111830250b96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "node_importance_functions.py", "max_forks_repo_name": "Iseabrook/structural_node_importance", "max_forks_repo_head_hexsha": "c3f0e05d7e9d7597273f951fd5ec111830250b96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.9604863222, "max_line_length": 175, "alphanum_fraction": 0.6330922865, "include": true, "reason": "import numpy,from scipy,import networkx", "num_tokens": 4397}
|
### Julia OpenStreetMap Package ###
### MIT License ###
### Copyright 2014 ###
type OSMattributes
oneway::Bool
oneway_override::Bool
oneway_reverse::Bool
visible::Bool
lanes::Int
name::UTF8String
class::UTF8String
detail::UTF8String
cycleway::UTF8String
sidewalk::UTF8String
bicycle::UTF8String
# XML elements
element::Symbol # :None, :Node, :Way, :Tag[, :Relation]
parent::Symbol # :Building, :Feature, :Highway
way_nodes::Vector{Int} # for buildings and highways
id::Int # Uninitialized
lat::Float64 # Uninitialized
lon::Float64 # Uninitialized
OSMattributes() = new(false,false,false,false,1,
"","","","","","",:None,:None,[])
end
type OSMdata
nodes::Dict{Int,LLA}
highways::Dict{Int,Highway}
buildings::Dict{Int,Building}
features::Dict{Int,Feature}
attr::OSMattributes
OSMdata() = new(Dict(),Dict(),Dict(),Dict(),OSMattributes())
end
function reset_attributes!(osm::OSMattributes)
osm.oneway = osm.oneway_override = osm.oneway_reverse = osm.visible = false
osm.lanes = 1
osm.name = osm.class = osm.detail = osm.cycleway = osm.sidewalk = osm.bicycle = ""
osm.element = osm.parent = :None
empty!(osm.way_nodes)
end
### PARSE XML ELEMENTS ###
function parse_node(attr::OSMattributes, attrs_in::Dict{@compat(AbstractString),@compat(AbstractString)})
attr.visible = true
attr.element = :Node
if haskey(attrs_in, "id")
attr.id = @compat( parse(Int,attrs_in["id"]) )
attr.lat = float(attrs_in["lat"])
attr.lon = float(attrs_in["lon"])
end
end
function parse_way(attr::OSMattributes, attrs_in::Dict{@compat(AbstractString),@compat(AbstractString)})
attr.visible = true
attr.element = :Way
if haskey(attrs_in, "id")
attr.id = @compat( parse(Int,attrs_in["id"]) )
end
end
function parse_nd(attr::OSMattributes, attrs_in::Dict{@compat(AbstractString),@compat(AbstractString)})
if haskey(attrs_in, "ref")
push!(attr.way_nodes, @compat( parse(Int64,attrs_in["ref"]) ) )
end
end
function parse_tag(attr::OSMattributes, attrs_in::Dict{@compat(AbstractString),@compat(AbstractString)})
if haskey(attrs_in, "k") && haskey(attrs_in, "v")
k, v = attrs_in["k"], attrs_in["v"]
if k == "name"
if isempty(attr.name)
attr.name = v # applicable to roads (highways), buildings, features
end
elseif attr.element == :Way
if k == "building"
parse_building(attr, v)
else
parse_highway(attr, k, v) # for other highway tags
end
elseif attr.element == :Node
if haskey(FEATURE_CLASSES, k)
parse_feature(attr, k, v)
end
end
else
# Nothing to be done here?
end
end
### PARSE OSM ENTITIES ###
function parse_highway(attr::OSMattributes, k::@compat(AbstractString), v::@compat(AbstractString))
if k == "highway"
attr.class = v
if v == "services" # Highways marked "services" are not traversable
attr.visible = false
return
end
if v == "motorway" || v == "motorway_link"
attr.oneway = true # motorways default to oneway
end
elseif k == "oneway"
if v == "-1"
attr.oneway = true
attr.oneway_reverse = true
elseif v == "false" || v == "no" || v == "0"
attr.oneway = false
attr.oneway_override = true
elseif v == "true" || v == "yes" || v == "1"
attr.oneway = true
end
elseif k == "junction" && v == "roundabout"
attr.oneway = true
elseif k == "cycleway"
attr.cycleway = v
elseif k == "sidewalk"
attr.sidewalk = v
elseif k == "bicycle"
attr.bicycle = v
elseif k == "lanes" && length(v)==1 && '1' <= v[1] <= '9'
attr.lanes = @compat parse(Int,v)
else
return
end
attr.parent = :Highway
end
function parse_building(attr::OSMattributes, v::@compat(AbstractString))
attr.parent = :Building
if isempty(attr.class)
attr.class = v
end
end
function parse_feature(attr::OSMattributes, k::@compat(AbstractString), v::@compat(AbstractString))
attr.parent = :Feature
attr.class = k
attr.detail = v
end
### LibExpat.XPStreamHandlers ###
function parseElement(handler::LibExpat.XPStreamHandler, name::@compat(AbstractString), attrs_in::Dict{@compat(AbstractString),@compat(AbstractString)})
attr = handler.data.attr::OSMattributes
if attr.visible
if name == "nd"
parse_nd(attr, attrs_in)
elseif name == "tag"
parse_tag(attr, attrs_in)
end
elseif !(haskey(attrs_in, "visible") && attrs_in["visible"] == "false")
if name == "node"
parse_node(attr, attrs_in)
elseif name == "way"
parse_way(attr, attrs_in)
end
end # no work done for "relations" yet
end
function collectValues(handler::LibExpat.XPStreamHandler, name::@compat(AbstractString))
# println(typeof(name))
osm = handler.data::OSMdata
attr = osm.attr::OSMattributes
if name == "node"
osm.nodes[attr.id] = LLA(attr.lat, attr.lon)
if attr.parent == :Feature
osm.features[attr.id] = Feature(attr.class, attr.detail, attr.name)
end
elseif name == "way"
if attr.parent == :Building
osm.buildings[attr.id] = Building(attr.class, attr.name, copy(attr.way_nodes))
elseif attr.parent == :Highway
if attr.oneway_reverse
reverse!(attr.way_nodes)
end
osm.highways[attr.id] = Highway(attr.class, attr.lanes,
(attr.oneway && !attr.oneway_override),
attr.sidewalk, attr.cycleway, attr.bicycle,
attr.name, copy(attr.way_nodes))
end
else # :Tag or :Nd (don't reset values!)
return
end
reset_attributes!(osm.attr)
end
### Parse the data from an openStreetMap XML file ###
function parseMapXML(filename::@compat(AbstractString))
# Parse the file
street_map = LightXML.parse_file(filename)
if LightXML.name(LightXML.root(street_map)) != "osm"
throw(ArgumentError("Not an OpenStreetMap datafile."))
end
return street_map
end
function getOSMData(filename::@compat(AbstractString); args...)
osm = OSMdata()
callbacks = LibExpat.XPCallbacks()
callbacks.start_element = parseElement
callbacks.end_element = collectValues
LibExpat.parsefile(filename, callbacks, data=osm; args...)
osm.nodes, osm.highways, osm.buildings, osm.features
end
|
{"hexsha": "1b9b367dd5aee727e0d7c7d2c367394fe66e7a88", "size": 6852, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/parseMap.jl", "max_stars_repo_name": "UnofficialJuliaMirror/OpenStreetMap.jl-08b6f058-0539-51ec-9920-f66949f89f7a", "max_stars_repo_head_hexsha": "9102e36e4f8304ce2238f2d315589976c83d3d66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2015-05-23T12:04:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T10:32:57.000Z", "max_issues_repo_path": "src/parseMap.jl", "max_issues_repo_name": "UnofficialJuliaMirror/OpenStreetMap.jl-08b6f058-0539-51ec-9920-f66949f89f7a", "max_issues_repo_head_hexsha": "9102e36e4f8304ce2238f2d315589976c83d3d66", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2015-01-03T01:14:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T21:16:04.000Z", "max_forks_repo_path": "src/parseMap.jl", "max_forks_repo_name": "UnofficialJuliaMirror/OpenStreetMap.jl-08b6f058-0539-51ec-9920-f66949f89f7a", "max_forks_repo_head_hexsha": "9102e36e4f8304ce2238f2d315589976c83d3d66", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2015-04-25T08:04:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T09:05:21.000Z", "avg_line_length": 31.4311926606, "max_line_length": 152, "alphanum_fraction": 0.600992411, "num_tokens": 1766}
|
"""Printing module
"""
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
DATE_FORMAT = "%Y-%m-%d"
def user_to_name(argument):
"""Switch functionality
"""
switcher = {
"lopezobrador_": "A. López Obrador",
"RicardoAnayaC": "R. Anaya Cortés",
"luisederbez": "L. Dervez Bautista",
"RafaMorenoValle": "R. Moreno Valle",
"JCRomeroHicks": "J. Romero Hicks",
"eruviel_avila": "E. Ávila Villegas",
"MFBeltrones": "M. Beltrones Rivera",
"JoseAMeadeK": "J. Meade Kuribreña",
"aurelionuno": "A. Nuño Mayer",
"IvonneOP": "I. Ortega Pacheco",
"osoriochong": "M. Osorio Chong",
"LVidegaray": "L. Videgaray Caso",
"Silvano_A": "S. Aureoles Conejo",
"ManceraMiguelMX": "M. Mancera Espinosa",
"Mzavalagc": "M. Zavala Gómez",
"RiosPiterJaguar": "A. Ríos Piter",
"JaimeRdzNL": "J. Rodríguez 'El Bronco'",
"PedroFerriz": "P. Ferriz de Con"
}
return switcher.get(argument, "nothing")
def print_popularity(candidates_list, party=None):
"""Plots candidates popularity
"""
"""
candidates_list = {
"A":[("2017-10-01",10),("2017-10-02",12)],
"B":[("2017-10-01",8),("2017-10-02",15)]
}
"""
plt.clf()
plt.xlabel("Days")
plt.ylabel("Popularity")
if party != None:
plt.title("Last 11 days (" + party + ")")
else:
plt.title("Last 11 days")
plt.grid(True)
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1.0, len(candidates_list.items())))
for i, (candidate, items_list) in enumerate(candidates_list.items()):
y = []
dates = []
print(candidate)
for date, value in items_list:
dates.append(date)
y.append(value)
x = [dt.datetime.strptime(d, DATE_FORMAT).date() for d in dates]
print(x)
print(y)
to_sort = zip(x, y)
z = sorted(to_sort, key=lambda x: x[0])
x, y = zip(*z)
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter(DATE_FORMAT))
plt.gca().xaxis.set_major_locator(mdates.DayLocator())
plt.plot(x, y, label=user_to_name(candidate), color=colors[i])
plt.gcf().autofmt_xdate()
#plt.plot_date(x, y, fmt=DATE_FORMAT, xdate=True, label=str(candidate))
plt.legend()
plt.show()
def pretty_print(data):
"""Pretty print interface.
"""
import pprint
pretty_printer = pprint.PrettyPrinter(indent=4)
pretty_printer.pprint(data)
def main():
"""Main function
"""
#print_popularity(None)
if __name__ == '__main__':
main()
|
{"hexsha": "da5c83c406d625e56cf0d2566cd98a8163083523", "size": 2704, "ext": "py", "lang": "Python", "max_stars_repo_path": "PlotPopularity.py", "max_stars_repo_name": "fornesarturo/twitter-popularity", "max_stars_repo_head_hexsha": "c640e0291ee1483e433c76ee64a89bf53537af75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PlotPopularity.py", "max_issues_repo_name": "fornesarturo/twitter-popularity", "max_issues_repo_head_hexsha": "c640e0291ee1483e433c76ee64a89bf53537af75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-01T21:56:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-01T21:56:32.000Z", "max_forks_repo_path": "PlotPopularity.py", "max_forks_repo_name": "fornesarturo/twitter-popularity", "max_forks_repo_head_hexsha": "c640e0291ee1483e433c76ee64a89bf53537af75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4631578947, "max_line_length": 79, "alphanum_fraction": 0.5865384615, "include": true, "reason": "import numpy", "num_tokens": 796}
|
# Erode and dilate support 3x3 regions only (and higher-dimensional generalizations).
"""
```
imgd = dilate(img, [region])
```
perform a max-filter over nearest-neighbors. The
default is 8-connectivity in 2d, 27-connectivity in 3d, etc. You can specify the
list of dimensions that you want to include in the connectivity, e.g., `region =
[1,2]` would exclude the third dimension from filtering.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(5, 5); img[3, 3] = 1.; img
5×5 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
julia> dilate(img)
5×5 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0
0.0 1.0 1.0 1.0 0.0
0.0 1.0 1.0 1.0 0.0
0.0 1.0 1.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0
julia> dilate(img, 1)
5×5 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
```
"""
dilate(img::AbstractArray, region=coords_spatial(img)) = dilate!(copy(img), region)
"""
```
imge = erode(img, [region])
```
perform a min-filter over nearest-neighbors. The
default is 8-connectivity in 2d, 27-connectivity in 3d, etc. You can specify the
list of dimensions that you want to include in the connectivity, e.g., `region =
[1,2]` would exclude the third dimension from filtering.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(5, 5); img[2:4, 2:4] .= 1.; img
5×5 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0
0.0 1.0 1.0 1.0 0.0
0.0 1.0 1.0 1.0 0.0
0.0 1.0 1.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0
julia> erode(img)
5×5 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
julia> erode(img, 1)
5×5 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 1.0 1.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
```
"""
erode(img::AbstractArray, region=coords_spatial(img)) = erode!(copy(img), region)
dilate!(maxfilt, region=coords_spatial(maxfilt)) = extremefilt!(maxfilt, max, region)
erode!(minfilt, region=coords_spatial(minfilt)) = extremefilt!(minfilt, min, region)
function extremefilt!(A::AbstractArray, select::Function, region=coords_spatial(A))
inds = axes(A)
for d = 1:ndims(A)
if size(A, d) == 1 || !in(d, region)
continue
end
Rpre = CartesianIndices(inds[1:d-1])
Rpost = CartesianIndices(inds[d+1:end])
_extremefilt!(A, select, Rpre, inds[d], Rpost)
end
A
end
@noinline function _extremefilt!(A, select, Rpre, inds, Rpost)
# TODO: improve the cache efficiency
for Ipost in Rpost, Ipre in Rpre
# first element along dim
i1 = first(inds)
a2, a3 = A[Ipre, i1, Ipost], A[Ipre, i1+1, Ipost]
A[Ipre, i1, Ipost] = select(a2, a3)
# interior along dim
for i = i1+2:last(inds)
a1, a2 = a2, a3
a3 = A[Ipre, i, Ipost]
A[Ipre, i-1, Ipost] = select(select(a1, a2), a3)
end
# last element along dim
A[Ipre, last(inds), Ipost] = select(a2, a3)
end
A
end
"""
`imgo = opening(img, [region])` performs the `opening` morphology operation, equivalent to `dilate(erode(img))`.
`region` allows you to control the dimensions over which this operation is performed.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(5, 5); img[1, 1] = 1.; img[3:5, 3:5] .= 1.; img
5×5 Array{Float64,2}:
1.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0
0.0 0.0 1.0 1.0 1.0
0.0 0.0 1.0 1.0 1.0
julia> opening(img)
5×5 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0
0.0 0.0 1.0 1.0 1.0
0.0 0.0 1.0 1.0 1.0
```
"""
opening(img::AbstractArray, region=coords_spatial(img)) = opening!(copy(img), region)
opening!(img::AbstractArray, region=coords_spatial(img)) = dilate!(erode!(img, region),region)
"""
`imgc = closing(img, [region])` performs the `closing` morphology operation, equivalent to `erode(dilate(img))`.
`region` allows you to control the dimensions over which this operation is performed.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(7, 7); img[3:5, 3:5] .= 1.; img[4, 4] = 0.; img
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 1.0 0.0 1.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
julia> closing(img)
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
```
"""
closing(img::AbstractArray, region=coords_spatial(img)) = closing!(copy(img), region)
closing!(img::AbstractArray, region=coords_spatial(img)) = erode!(dilate!(img, region),region)
"""
`imgth = tophat(img, [region])` performs `top hat` of an image,
which is defined as the image minus its morphological opening.
`region` allows you to control the dimensions over which this operation is performed.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(5, 5); img[1, 1] = 1.; img[3:5, 3:5] .= 1.; img
5×5 Array{Float64,2}:
1.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0
0.0 0.0 1.0 1.0 1.0
0.0 0.0 1.0 1.0 1.0
julia> tophat(img)
5×5 Array{Float64,2}:
1.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0
```
"""
tophat(img::AbstractArray, region=coords_spatial(img)) = img - opening(img, region)
"""
`imgbh = bothat(img, [region])` performs `bottom hat` of an image,
which is defined as its morphological closing minus the original image.
`region` allows you to control the dimensions over which this operation is performed.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(7, 7); img[3:5, 3:5] .= 1.; img[4, 4] = 0.; img
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 1.0 0.0 1.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
julia> bothat(img)
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 1.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
```
"""
bothat(img::AbstractArray, region=coords_spatial(img)) = closing(img, region) - img
"""
`imgmg = morphogradient(img, [region])` returns morphological gradient of the image,
which is the difference between the dilation and the erosion of a given image.
`region` allows you to control the dimensions over which this operation is performed.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(7, 7); img[3:5, 3:5] .= 1.; img
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
julia> morphogradient(img)
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 1.0 1.0 1.0 1.0 1.0 0.0
0.0 1.0 1.0 1.0 1.0 1.0 0.0
0.0 1.0 1.0 0.0 1.0 1.0 0.0
0.0 1.0 1.0 1.0 1.0 1.0 0.0
0.0 1.0 1.0 1.0 1.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
```
"""
morphogradient(img::AbstractArray, region=coords_spatial(img)) = dilate(img, region) - erode(img, region)
"""
`imgml = morpholaplace(img, [region])` performs `Morphological Laplacian` of an image,
which is defined as the arithmetic difference between the internal and the external gradient.
`region` allows you to control the dimensions over which this operation is performed.
# Examples
```jldoctest; setup = :(using ImageMorphology), filter = r"Array{Float64,2}|Matrix{Float64}"
julia> img = zeros(7, 7); img[3:5, 3:5] .= 1.; img[4, 4] = 0.; img
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 1.0 0.0 1.0 0.0 0.0
0.0 0.0 1.0 1.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
julia> morpholaplace(img)
7×7 Array{Float64,2}:
0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 1.0 1.0 1.0 1.0 1.0 0.0
0.0 1.0 -1.0 -1.0 -1.0 1.0 0.0
0.0 1.0 -1.0 1.0 -1.0 1.0 0.0
0.0 1.0 -1.0 -1.0 -1.0 1.0 0.0
0.0 1.0 1.0 1.0 1.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0
```
"""
morpholaplace(img::AbstractArray, region=coords_spatial(img)) = dilate(img, region) + erode(img, region) - 2img
|
{"hexsha": "7e1da0757417f449c7ed8ca0275d552188108426", "size": 9392, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/dilation_and_erosion.jl", "max_stars_repo_name": "JiangXL/ImageMorphology.jl", "max_stars_repo_head_hexsha": "e668343ddbff2f750e451f6c73209ac3cd4443cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2018-09-10T23:42:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T13:41:49.000Z", "max_issues_repo_path": "src/dilation_and_erosion.jl", "max_issues_repo_name": "JiangXL/ImageMorphology.jl", "max_issues_repo_head_hexsha": "e668343ddbff2f750e451f6c73209ac3cd4443cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 46, "max_issues_repo_issues_event_min_datetime": "2017-08-13T15:53:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T06:39:00.000Z", "max_forks_repo_path": "src/dilation_and_erosion.jl", "max_forks_repo_name": "JiangXL/ImageMorphology.jl", "max_forks_repo_head_hexsha": "e668343ddbff2f750e451f6c73209ac3cd4443cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2017-08-14T03:33:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-27T20:09:44.000Z", "avg_line_length": 31.9455782313, "max_line_length": 112, "alphanum_fraction": 0.5854982964, "num_tokens": 5196}
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
C=======================================================================
SUBROUTINE MAKROW (LINK, XN, NUMCEN, NUMCOL,
& NEROW, IELROW, IXROW)
C=======================================================================
C --*** MAKROW *** (GEN3D) Order center elements in rows
C -- Written by Amy Gilkey - revised 04/26/88
C --
C --MAKROW orders the elements in all center blocks into rows. Only the
C --elements in the needed number of columns for each row are stored.
C --
C --Parameters:
C -- LINK - IN - the connectivity for the 2D elements, always 4 nodes
C -- XN - IN - the coordinates, needed to find node ordering
C -- NUMCEN - IN - the number of elements in the center blocks
C -- NUMCOL - IN - the number of columns in the center block
C -- NEROW - OUT - the number of element rows in the center blocks
C -- IELROW - IN/OUT - the element numbers of the center elements;
C -- returned as rows of elements
C -- IXROW - OUT - the IELROW index of the starting column for each row
C --
C --Common Variables:
C -- Uses IX1, IX2, IX3, IX4 of /CENPAR/
INCLUDE 'g3_cenpar.blk'
INTEGER LINK(4,*)
REAL XN(*)
INTEGER IELROW(*)
INTEGER IXROW(*)
C --Initialize the IELROW array
C -- IELROW(1..ICOL1-1) holds the ordered elements for previous rows
C -- IELROW(ICOL1..ICOL+NCOL-1) holds the ordered elements for this row
C -- IELROW(IFILL..NUMCEN) holds the elements yet to be ordered
IFILL = 1
ICOL1 = 1
10 CONTINUE
IF (IFILL .LE. NUMCEN) THEN
C --Pick leftmost unordered element for the next row
ICHK = IFILL
XMIN = XN(LINK(IX1,IELROW(IFILL)))
DO 20 I = IFILL+1, NUMCEN
IF (XN(LINK(IX1,IELROW(I))) .LT. XMIN) THEN
ICHK = I
XMIN = XN(LINK(IX1,IELROW(I)))
END IF
20 CONTINUE
NEROW = NEROW + 1
IC = IELROW(ICHK)
IELROW(ICHK) = IELROW(IFILL)
IFILL = IFILL + 1
IELROW(ICOL1) = IC
IXROW(NEROW) = ICOL1
NCOL = 1
c --Find elements to the left of the element until at leftmost column !#VAX
C --Find elements to the right of the element until at rightmost column
40 CONTINUE
IEL = IELROW(ICOL1+NCOL-1)
L2 = LINK(IX2,IEL)
L3 = LINK(IX3,IEL)
DO 50 ICHK = IFILL, NUMCEN
IC = IELROW(ICHK)
IF ((L2 .EQ. LINK(IX1,IC))
& .AND. (L3 .EQ. LINK(IX4,IC))) THEN
IELROW(ICHK) = IELROW(IFILL)
IFILL = IFILL + 1
IELROW(ICOL1+NCOL) = IC
NCOL = NCOL + 1
GOTO 40
END IF
50 CONTINUE
NCOL = MIN (NCOL, NUMCOL)
ICOL1 = ICOL1 + NCOL
GOTO 10
END IF
IXROW(NEROW+1) = ICOL1
RETURN
END
|
{"hexsha": "16cebdb02b030dbb07e6092e6a6b02ca2480338b", "size": 3139, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/seacas/applications/gen3d/g3_makrow.f", "max_stars_repo_name": "jschueller/seacas", "max_stars_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_stars_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_stars_count": 82, "max_stars_repo_stars_event_min_datetime": "2016-02-04T18:38:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:01:49.000Z", "max_issues_repo_path": "packages/seacas/applications/gen3d/g3_makrow.f", "max_issues_repo_name": "jschueller/seacas", "max_issues_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_issues_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2015-11-20T01:57:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:12:04.000Z", "max_forks_repo_path": "packages/seacas/applications/gen3d/g3_makrow.f", "max_forks_repo_name": "jschueller/seacas", "max_forks_repo_head_hexsha": "14c34ae08b757cba43a3a03ec0f129c8a168a9d3", "max_forks_repo_licenses": ["Python-2.0", "Zlib", "BSD-2-Clause", "MIT", "NetCDF", "BSL-1.0", "X11", "BSD-3-Clause"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2016-01-13T22:46:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:25:05.000Z", "avg_line_length": 32.0306122449, "max_line_length": 80, "alphanum_fraction": 0.5584581077, "num_tokens": 975}
|
# Determinant formula from Cavalieri's principle
```python
# setup SymPy
from sympy import *
init_printing()
Vector = Matrix
# setup plotting
#%matplotlib inline
%matplotlib notebook
import matplotlib.pyplot as mpl
from util.plot_helpers import plot_vec, plot_vecs, plot_line, plot_plane, autoscale_arrows
```
## Two dimentions
```python
a,b, c,d = symbols('a b c d')
# Consider the volume of the parallelegram with sides:
u1 = Vector([a,b])
u2 = Vector([c,d])
# We can compute the volume of the parallelpiped by computing the deteminant of
A = Matrix([[a,b],
[c,d]])
A.det()
```
### Cavalieri's principle
Mathematically, we have
$$
D(\vec{u}_1, \ \vec{u}_2)
=
D(\vec{u}_1 - \alpha \vec{u}_2, \ \vec{u}_2).
$$
```python
# choose alpha so A's top right entry will be zero
alpha = symbols('alpha')
alpha = b/d
A[0,:] = A[0,:] - alpha*A[1,:]
A
```
Copmputing the area is the same as computing the product of the entries on the diagonal:
```python
A[0,0]*A[1,1]
```
```python
simplify( A[0,0]*A[1,1] )
```
**Intuition:** the coefficient $\alpha$ encodes something very important about the "alternating multi-linear" structure that deteminants and cross products embody. In words, the choice of $\alpha$ and the resulting expression $a-\frac{bc}{d}$ correponds to what happens to the first component of $\vec{u}_1$ when we make it's second component zero.
(Yeah, I know, handwavy like crazy, but better than nothing.)
## Three dimentions
```python
a,b,c, d,e,f, g,h,i = symbols('a b c d e f g h i')
# Consider the volume of the parellelpiped with sides:
u1 = Vector([a,b,c])
u2 = Vector([d,e,f])
u3 = Vector([g,h,i])
# We can compute the volume of the parallelpiped by computing the deteminant of
A = Matrix([[a,b,c],
[d,e,f],
[g,h,i]])
A.det()
```
### Cavalieri's principle
This principle leads to the following property of the determinant
$$
D(\vec{u}_1, \ \vec{u}_2, \ \vec{u}_3)
=
D(\vec{u}_1, \ \vec{u}_2- k \vec{u}_3, \ \vec{u}_3)
=
D(\vec{u}_1 -\ell\vec{u}_2 - m\vec{u}_3 , \ \vec{u}_2-k \vec{u}_3, \ \vec{u}_3).
$$
In particuler we make the following particular choice for the cofficients
$$
D(\vec{u}_1, \vec{u}_2, \vec{u}_3)
=
D(\vec{u}_1 - \beta \vec{u}_3 - \gamma(\vec{u}_2 - \alpha\vec{u}_3), \ \vec{u}_2 - \alpha\vec{u}_3, \ \vec{u}_3).
$$
Choosing the right coefficients $\alpha$, $\beta$, and $\gamma$ can transform the matrix $A$ into a lower triangular form, which will make computing the determinant easier.
```python
alpha, beta, gamma = symbols('alpha beta gamma')
A
```
```python
# first get rid of f by subtracting third row from second row
alpha = f/i
A[1,:]= A[1,:] - alpha*A[2,:]
A
```
```python
# second get rid of c by subtracting third row from first row
beta = c/i
A[0,:]= A[0,:] - beta*A[2,:]
A
```
```python
# third get rid of b-ch/i by subtracting second row from first row
gamma = A[0,1]/A[1,1]
A[0,:]= A[0,:] - gamma*A[1,:]
A
```
Stargting from the first row, the volume of the determinant is proporitonal to the coeffieicnt `A[0,0]`. The are of the parallelepiped formd by the first two rows is `A[0,0]*A[1,1]` (we can ignore `A[1,0]`) and the overall volume is
```python
A[0,0]*A[1,1]*A[2,2]
```
```python
simplify( A[0,0]*A[1,1]*A[2,2] )
```
```python
# I still don't know how to motivate the recusive formula except to say it turns out that way...
# I tried to think about decomposing the problem into subparts,
# but i cannot motivate det(Ai + Aj + Ak) != det(Ai) + det(Aj) + det(Ak)
# where Ai is the same as A but with A[0,1] and A[0,2] set to zero.
```
```python
u1 = Vector([3,0,0])
u2 = Vector([2,2,0])
u3 = Vector([3,3,3])
plot_vecs(u1,u2,u3)
plot_vec(u1, at=u2, color='k')
plot_vec(u2, at=u1, color='b')
plot_vec(u1, at=u2+u3, color='k')
plot_vec(u2, at=u1+u3, color='b')
plot_vec(u1, at=u3, color='k')
plot_vec(u2, at=u3, color='b')
plot_vec(u3, at=u1, color='g')
plot_vec(u3, at=u2, color='g')
plot_vec(u3, at=u1+u2, color='g')
autoscale_arrows()
```
<IPython.core.display.Javascript object>
```python
```
|
{"hexsha": "a9692634f7f8eb37499be0e652cc95f169b96713", "size": 132694, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "extra/Determinants.ipynb", "max_stars_repo_name": "ChidinmaKO/noBSLAnotebooks", "max_stars_repo_head_hexsha": "c0102473f1e6625fa5fb62768d4545059959fa26", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "extra/Determinants.ipynb", "max_issues_repo_name": "ChidinmaKO/noBSLAnotebooks", "max_issues_repo_head_hexsha": "c0102473f1e6625fa5fb62768d4545059959fa26", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extra/Determinants.ipynb", "max_forks_repo_name": "ChidinmaKO/noBSLAnotebooks", "max_forks_repo_head_hexsha": "c0102473f1e6625fa5fb62768d4545059959fa26", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 99.3218562874, "max_line_length": 65021, "alphanum_fraction": 0.7807587381, "converted": true, "num_tokens": 1407}
|
import numpy as np
from roadrunner import RoadRunner
from roadrunner.testing import TestModelFactory as tmf
from threading import Thread
from multiprocessing import Queue
import time
from platform import platform
import cpuinfo # pip install py-cpuinfo
import mpi4py # pip install mpi4py
NTHREADS = 16
NSIMS = 100000
NSTEPS = 11
if __name__ == '__main__':
# setup timing
start = time.time()
# get sbml to work with from one of our test modules
sbml = tmf.BatchImmigrationDeath03().str()
# create our roadrunner instance
r = RoadRunner(sbml)
# set up a stochastic simulation
r.setIntegrator('gillespie')
# set the seed for reproducuble example
gillespie_integrator = r.getIntegrator()
gillespie_integrator.seed = 1234
# the time it took in serial
serial_time = 64.92753291130066
# compute speedup
# speedup = serial_time / duration
#
# print(f'Took {duration} seconds to run', NSIMS, 'stochastic simulations with', NTHREADS, 'threads')
# print(f'Speed up is {speedup}')
# cpu_info = cpuinfo.get_cpu_info()
# print(f'Platform: {platform()}')
# print('python_version:', cpu_info['python_version'])
# print('Processor:', cpu_info['brand_raw'])
#
# '''
# Output:
# Took 38.20730757713318 seconds to run 100000 stochastic simulations with 16 thread
# Speed up is 1.6993485547293408
# Platform: Windows-10-10.0.22000-SP0
# python_version: 3.9.5.final.0 (64 bit)
# Processor: 11th Gen Intel(R) Core(TM) i9-11980HK @ 2.60GHz
# '''
|
{"hexsha": "7f123e257b5ac5ebbab9a9b6ada020a1fa13664b", "size": 1588, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/source/parallel/gillespie_simulations_mpi4py.py", "max_stars_repo_name": "sys-bio/roadrunner", "max_stars_repo_head_hexsha": "f0a757771ef0e337ddf7409284910e1627c3ad71", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2015-05-21T21:06:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T15:33:30.000Z", "max_issues_repo_path": "docs/source/parallel/gillespie_simulations_mpi4py.py", "max_issues_repo_name": "sys-bio/roadrunner", "max_issues_repo_head_hexsha": "f0a757771ef0e337ddf7409284910e1627c3ad71", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 689, "max_issues_repo_issues_event_min_datetime": "2015-01-27T21:45:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T23:47:28.000Z", "max_forks_repo_path": "docs/source/parallel/gillespie_simulations_mpi4py.py", "max_forks_repo_name": "sys-bio/roadrunner", "max_forks_repo_head_hexsha": "f0a757771ef0e337ddf7409284910e1627c3ad71", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2015-06-25T22:57:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-06T02:03:28.000Z", "avg_line_length": 18.9047619048, "max_line_length": 105, "alphanum_fraction": 0.6807304786, "include": true, "reason": "import numpy", "num_tokens": 445}
|
#############################################################
#
# Spam Reporting Functions
#
#############################################################
function post_users_report_spam(; options=Dict{AbstractString, AbstractString}())
r = post_oauth("https://api.twitter.com/1.1/users/report_spam.json", options)
#return to_USERS(r)
return r.status == 200 ? to_USERS(Requests.json(r)) : r
end
|
{"hexsha": "07928b44ca2348e3c3c1268e6a47d67c6cbbd896", "size": 410, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/spam.jl", "max_stars_repo_name": "JuliaPackageMirrors/Twitter.jl", "max_stars_repo_head_hexsha": "b4f2c07e1197d63ba7d91600986129a931ba11bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/spam.jl", "max_issues_repo_name": "JuliaPackageMirrors/Twitter.jl", "max_issues_repo_head_hexsha": "b4f2c07e1197d63ba7d91600986129a931ba11bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/spam.jl", "max_forks_repo_name": "JuliaPackageMirrors/Twitter.jl", "max_forks_repo_head_hexsha": "b4f2c07e1197d63ba7d91600986129a931ba11bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2857142857, "max_line_length": 81, "alphanum_fraction": 0.5, "num_tokens": 78}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Stephane Caron <stephane.caron@normalesup.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import openravepy
import os
import pickle
import time
from robot import RaveRobotModel
from rrt import RRT
from state import TorqueSampleState
from tunings import Tunings
def get_openrave_robot(display=False):
rave_env = openravepy.Environment()
fpath = 'model/pendulum.env.xml'
if os.path.isfile('./' + fpath):
rave_env.Load('./' + fpath)
elif os.path.isfile('../' + fpath):
rave_env.Load('../' + fpath)
else:
assert False, "Where is %s?" % fpath
if display:
rave_env.SetViewer('qtcoin')
viewer = rave_env.GetViewer()
cam_trans = numpy.array([
[0, 0, -1, 1.1], [1, 0, 0, 0],
[0, -1, 0, 0.3], [0, 0, 0, 1]])
viewer.SetCamera(cam_trans)
rave_robot = rave_env.GetRobots()[0]
rave_robot.GetEnv().GetPhysicsEngine().SetGravity([0, 0, -9.81])
rave_robot.SetTransform(numpy.array([
[0, 0, 1, 0], [0, 1, 0, 0],
[-1., 0, 0, 0.3], [0, 0, 0, 1]]))
return rave_robot
class TestBed(object):
def __init__(self, dump_file=None, tunings_dict=None, custom_tunings=None,
StatePlannerList=[], display=False):
self.tunings = Tunings(tunings_dict)
if custom_tunings:
self.tunings.update(custom_tunings)
numpy.random.seed(self.tunings.random_seed)
self.start_time = time.time()
self.pid = os.getpid()
self.dump_file = dump_file
self.plan_log = []
self.robot = RaveRobotModel(get_openrave_robot(display=display),
self.tunings)
if len(StatePlannerList) < 1:
StatePlannerList.append((RRT, TorqueSampleState))
self.Planners = self._curry_planners(StatePlannerList)
def _curry_planners(self, StatePlannerList):
def porky_curry(StateClass, PlannerClass):
tunings = self.tunings
State = StateClass.Factory(tunings)
init_state = State(q=[0., 0.], qd=[0., 0.])
goal_state = State(q=[-numpy.pi, 0.], qd=[0., 0.])
robot = self.robot
class CurriedRRT(PlannerClass):
def __init__(self):
kron = [robot, init_state, goal_state, tunings]
return super(CurriedRRT, self).__init__(*kron)
return CurriedRRT
CurriedPlanners = {}
for (State, Planner) in StatePlannerList:
pname = Planner.get_name()
CurriedPlanners[pname] = porky_curry(State, Planner)
return CurriedPlanners
def get_new_planner(self, pname):
PClass = self.Planners[pname]
return PClass()
def run(self, rand_poses, rand_velocities):
sims_per_case = self.tunings.sims_per_case
start_index = 0
if 'start_index' in self.tunings.__dict__.keys():
start_index = self.tunings.start_index
with open('../logs/process-%d.log' % self.pid, 'w') as fh:
def log_msg(msg):
dt = time.time() - self.start_time
msg = msg.replace("xxx", "%.2f" % dt)
fh.write("%s\n" % msg)
fh.flush()
log_msg("Test Case: %s\n" % str(self.tunings))
log_msg("Start:")
for rr in xrange(sims_per_case):
r = rr + start_index
run_dict = {}
log_msg(" | Run #%d" % r)
for pname, PClass in self.Planners.iteritems():
log_msg(" | | [xxx] %s: starting..." % pname)
pinst = PClass()
pinst.run(rand_poses[r], rand_velocities[r])
log_msg(" | | [xxx] %s: done." % pname)
run_dict[pname] = pinst.get_dict_repr()
self.plan_log.append(run_dict)
self.dump()
log_msg("All done.\n")
def dump(self):
assert self.dump_file is not None
tun_dic = self.tunings.__dict__.copy()
dump_dic = {'tunings': tun_dic, 'planners': self.plan_log}
with open(self.dump_file, 'w') as f:
pickle.dump(dump_dic, f)
sample_tunings = {
'spatial_prec': 1e-2,
'time_prec': 1e-2,
'max_iter': 1000,
'max_simu_duration': 20,
'modulo': 5,
'rrt_neigh_size': 1,
'nb_traj_samples': 1,
'max_traj_duration': 1.,
'torque_limits': [13., 5.],
'Vmax': 50,
'lqr_torque_weight': 10.,
}
|
{"hexsha": "a19a80a0d14f69fe4182b3ac3c6412da2290e4b1", "size": 5158, "ext": "py", "lang": "Python", "max_stars_repo_path": "rrtcmp/testbed.py", "max_stars_repo_name": "Tastalian/avp-rrt-rss-2013", "max_stars_repo_head_hexsha": "d3d9b50bb582c23a4ee83408b26bcede4d84469e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-08-21T15:25:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-24T02:52:47.000Z", "max_issues_repo_path": "rrtcmp/testbed.py", "max_issues_repo_name": "Tastalian/avp-rrt-rss-2013", "max_issues_repo_head_hexsha": "d3d9b50bb582c23a4ee83408b26bcede4d84469e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rrtcmp/testbed.py", "max_forks_repo_name": "Tastalian/avp-rrt-rss-2013", "max_forks_repo_head_hexsha": "d3d9b50bb582c23a4ee83408b26bcede4d84469e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3287671233, "max_line_length": 78, "alphanum_fraction": 0.576967817, "include": true, "reason": "import numpy", "num_tokens": 1373}
|
"""
Module for fitting the aABC algorithm with two-timescales generative models
"""
from simple_abc_only2Tau import Model, basic_abc, pmc_abc
from generative_models import *
from basic_functions import *
from distance_functions import *
from summary_stats import *
import numpy as np
from scipy import stats
def fit_withABC_2Tau(MyModel, data_ac, priorDist, inter_save_direc, inter_filename, datasave_path, filenameSave,\
epsilon_0, min_samples, steps, minAccRate, parallel = False, n_procs = None, disp = None,\
resume = None):
"""Fits data autocorrelation with a given two-timescales generative model and saves the results.
Parameters
-----------
MyModel : object
Model object containing the generative model and distance functions (check example scripts or tutorials).
data_ac : 1d array
Prior distributions for aABC fitting (check example scripts or tutorials).
priorDist : list object
bin-size for binning data and computing the autocorrelation.
inter_save_direc : string
directory for saving intermediate results after running each step.
inter_filename : string
filename for saving intermediate results after running each step.
filenameSave : string
filename for saving final results, number of steps and maximumTimeLag will be attached to it.
epsilon_0 : float
initial error threshold
min_samples : int
number of accepted samples in postrior distribution for each step of the aABC.
steps : int
maximum number of steps (iterations) for running the aABC algorithm.
minAccRate : float
minimum proportion of samples accepted in each step, between 0 and 1.
parallel : boolean, default False
if run parallel processing.
n_procs : int, optional, default None
number of cores used for parallel processing.
disp : float, default None
The value of dispersion parameter if computed with the grid search method.
resume : numpy record array, optional
A record array of a previous pmc sequence to continue the sequence on.
Returns
-------
abc_results : object
A record containing all aABC output from all steps, including 'theta accepted', 'epsilon'.
final_step : int
Last step of running the aABC algorithm.
"""
#Initialize model object
model = MyModel()
model.set_prior(priorDist)
# give the model our observed data
model.set_data(data_ac)
data = data_ac
np.warnings.filterwarnings('ignore')
# fit the model
abc_results = pmc_abc(model, data, inter_save_direc, inter_filename, epsilon_0 = epsilon_0,\
min_samples = min_samples, steps = steps, parallel = parallel, n_procs = n_procs,\
minAccRate = minAccRate, resume = resume)
# finding the final step and save the results
final_step = steps
for i in range(len(abc_results)):
if abc_results[i][-1] == None:
final_step = i
break
filenameSave = filenameSave + '_steps' + str(final_step)
np.save(datasave_path + filenameSave, abc_results)
print('END OF FITTING!!!')
print('***********************')
return abc_results, final_step
|
{"hexsha": "c3ec06e34b2992cb7c5f24115ba22f2114639740", "size": 3353, "ext": "py", "lang": "Python", "max_stars_repo_path": "abcTau/abc2Tau.py", "max_stars_repo_name": "roxana-zeraati/abcTau", "max_stars_repo_head_hexsha": "ce4352062ee7821c80ac1c660641f41fef023e14", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-06-29T14:36:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T18:18:10.000Z", "max_issues_repo_path": "abcTau/abc2Tau.py", "max_issues_repo_name": "roxana-zeraati/abcTau", "max_issues_repo_head_hexsha": "ce4352062ee7821c80ac1c660641f41fef023e14", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "abcTau/abc2Tau.py", "max_forks_repo_name": "roxana-zeraati/abcTau", "max_forks_repo_head_hexsha": "ce4352062ee7821c80ac1c660641f41fef023e14", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-06-03T13:53:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T18:18:01.000Z", "avg_line_length": 36.0537634409, "max_line_length": 113, "alphanum_fraction": 0.6737250224, "include": true, "reason": "import numpy,from scipy", "num_tokens": 725}
|
# -*- coding: utf-8 -*-
"""
@author: fornax
"""
import numpy as np
import pandas as pd
def get_npwd2881_features(df):
"""
Extracts AOOO commands from pandas file which are
correlated with NPWD2881 line. Those commands are
then used as a train features for final predictions.
:param df: a data frame contains all features with added merged DMOP columns.
:return: a data frame containing selected features used for training of
NPWD2881 power line.
"""
aooo_list = [
['AOOO_current_F03A1',3,True],
['AOOO_current_F04A0',0,True],
['AOOO_current_F05A0',3,False],
['AOOO_current_F06A0',0,False],
['AOOO_current_F100A',0,False],
['AOOO_current_F100C',0,False],
['AOOO_current_F20A1',2,True],
['AOOO_current_F20D1',2,True],
['AOOO_current_F22A0',2,True],
['AOOO_current_F32A0',2,True],
['AOOO_current_F62A0',3,False],
['AOOO_current_F63A0',3,False],
['AOOO_current_F64A0',3,False],
['AOOO_current_F65A0',3,False],
['AOOO_current_F66A0',3,False],
['AOOO_current_F67A0',3,False],
['AOOO_current_F68A0',3,False],
['AOOO_current_F77A0',3,False],
['AOOO_current_F02A1',3,False],
['AOOO_current_F01D0',3,True],
['AOOO_current_F01D1',3,True],
['AOOO_current_F02A0',3,False],
['AOOO_current_F03A0',3,False],
['AOOO_current_F32R0',3,False],
['AOOO_current_F33A0',3,False],
['AOOO_current_F34A0',4,False],
['AOOO_current_F15A0',3,True],
['AOOO_current_F100B',1,True],
['AOOO_current_F15B0',1,True],
['AOOO_current_F22A1',1,True],
['AOOO_current_F22R1',0,False],
['AOOO_current_F23A0',0,True],
['AOOO_current_F24A0',1,False],
['AOOO_current_F100D',0,False],
['ATTT_current_F321D_F321R',1,False],
]
n_cols = np.shape(aooo_list)[0]
aooo_data = np.zeros([df.mission_time.size,n_cols])
aooo_cols = []
it = 0
# iterate over all selected features
for l in aooo_list:
offset = l[1]
v = df[l[0]].values
if(l[2] == True): # offset the signal and pad it while translating
for k in range(1,offset+1):
v2 = np.append(df[l[0]].values[k:],[0]*k)
v = v + v2
else: # just offset the signal
v = np.append(df[l[0]].values[offset:],[0]*offset)
aooo_cols.append(l[0])
aooo_data[:,it] = v
it += 1
pd_aooo = pd.DataFrame(aooo_data,columns=aooo_cols)
return pd_aooo
def correct_dmop(df):
"""
Function removes unwanted subsystem/command combinations, and merges commands
within a single subsystem as indicated for merging by manual inspection (see README).
:param df: data frame
:return: input data frame with features
"""
trash, merge, atmb_vals = correction_list(df)
to_delete = [i for i in trash.values() for i in i]
# merging
to_merge = [i for i in merge.values() if len(i)>1]
to_merge = [i for i in to_merge for i in i]
for cols in to_merge:
name = '_'.join(cols[0].split('_')[:-1] + [i.split('_')[-1] for i in cols])
df[name] = 0
for col in cols:
df[name] += df[col]
to_delete.append(col)
# deleting
for col in to_delete:
if col in df.columns:
subsys = col.split('_')[0]
command = col.split('_')[-1]
col_related = filter(lambda x: subsys in x and command in x and len(x.split('_')) == 3, df.columns)
df.drop(col_related, axis=1, inplace=True)
# ATMB
df['ATMB_temp'] = atmb_vals
return df
def correction_list(pd_data):
"""
Function returns dictionaries of subsystem/command combinations that should
be removed from the data, or tuples of commands to merge together into
a single feature. Also, a temperature-based feature is extracted from
the ATMB subsystem.
:param pd_data: data frame with ATMB subsystem
:return: dictionaries
"""
trash = {}
merge = {}
# --------------------------------------------------
trash['ATTT'] = ['ATTT_current_260A',
'ATTT_current_F301A',
'ATTT_current_F301B',
'ATTT_current_F301E',
'ATTT_current_F301F',
'ATTT_current_F301I',
'ATTT_current_F301J',
'ATTT_current_F310A',
'ATTT_current_F310B',
'ATTT_current_F410B',
'ATTT_current_F420B']
merge['ATTT'] = [['ATTT_current_305C','ATTT_current_305O','ATTT_current_305P','ATTT_current_306C','ATTT_current_306P'],
['ATTT_current_309A','ATTT_current_309B','ATTT_current_309P','ATTT_current_309Q'],
['ATTT_current_F321A','ATTT_current_F321P'],
['ATTT_current_F321D','ATTT_current_F321R']
]
# --------------------------------------------------
trash['ASXX'] = []
merge['ASXX'] = [['ASXX_current_303A','ASXX_current_304A'],
['ASXX_current_307A','ASXX_current_308A'],
['ASXX_current_382C','ASXX_current_383C','ASXX_current_382S','ASXX_current_383S','ASXX_current_382R']]
# --------------------------------------------------
trash['AVVV'] = ['AVVV_current_01A0',
'AVVV_current_02A0',
'AVVV_current_03A0',
'AVVV_current_03B0',
'AVVV_current_05A0',
'AVVV_current_06A0',
'AVVV_current_07A0']
merge['AVVV'] = [[]]
# --------------------------------------------------
trash['AHHH'] = ['AHHH_current_C05A1',
'AHHH_current_C25A1',
'AHHH_current_C532E',
'AHHH_current_F04P3',
'AHHH_current_F095B',
'AHHH_current_F095C',
'AHHH_current_F11A2',
'AHHH_current_F20A1',
'AHHH_current_F23P1',
'AHHH_current_F50A2'
]
merge['AHHH'] = [['AHHH_current_F01A2','AHHH_current_F01P1','AHHH_current_F01R1'],
['AHHH_current_F01S0'],
['AHHH_current_F02A1','AHHH_current_F02P1'],
['AHHH_current_F03A2'],
['AHHH_current_F04A3'],
['AHHH_current_F05A2'],
['AHHH_current_F06A1','AHHH_current_F06P1','AHHH_current_F06R1'],
['AHHH_current_F06S0'],
['AHHH_current_F11A1'],
['AHHH_current_F13A1'],
['AHHH_current_F17A1','AHHH_current_F17B1','AHHH_current_F17C2'],
['AHHH_current_F19A1']]
# --------------------------------------------------
trash['AOOO'] = []
merge['AOOO'] = [[]]
# --------------------------------------------------
trash['AMMM'] = [
'AMMM_current_F01A0',
'AMMM_current_F01B0',
'AMMM_current_F01R0',
'AMMM_current_F71A0',
'AMMM_current_F71AF',
'AMMM_current_F73A0',
'AMMM_current_F21A0',
'AMMM_current_F22A0',
'AMMM_current_F06B0',
'AMMM_current_F06R0',
'AMMM_current_F13A0',
'AMMM_current_F14A0',
'AMMM_current_F26A0',
'AMMM_current_F32A0'
]
merge['AMMM'] = [
['AMMM_current_F04A0','AMMM_current_F40A0'],
['AMMM_current_F05A0','AMMM_current_F40C0'],
['AMMM_current_F19A0'],
['AMMM_current_F51A0','AMMM_current_F52A0','AMMM_current_F52D1','AMMM_current_F52D2','AMMM_current_F52D3','AMMM_current_F52D4'],
['AMMM_current_F10A0','AMMM_current_F11A0','AMMM_current_F12A0','AMMM_current_F18A0','AMMM_current_F20A0','AMMM_current_F23A0','AMMM_current_F24A0','AMMM_current_F40B0']
]
# --------------------------------------------------
trash['APSF'] = [
'APSF_current_12B1',
'APSF_current_12C1',
'APSF_current_12D1',
'APSF_current_12E1',
'APSF_current_12G1',
'APSF_current_82B1',
'APSF_current_83A1',
'APSF_current_83B1',
'APSF_current_88A1',
'APSF_current_29B1',
'APSF_current_15A2',
'APSF_current_16A2',
'APSF_current_22A1',
'APSF_current_01A2',
'APSF_current_02A1',
'APSF_current_03A3',
'APSF_current_13A3',
'APSF_current_14A2',
'APSF_current_23B1',
'APSF_current_28A1',
'APSF_current_30A1',
'APSF_current_30B2',
'APSF_current_30C2',
'APSF_current_31A1',
'APSF_current_31B1',
'APSF_current_32A1',
'APSF_current_33A1',
'APSF_current_35A1',
'APSF_current_37A1',
'APSF_current_38A1',
'APSF_current_40A1',
'APSF_current_82A1',
'APSF_current_89A1'
]
merge['APSF'] = [
['APSF_current_06A1','APSF_current_06A2','APSF_current_60B0'],
['APSF_current_50A2'],
['APSF_current_12H1'],
['APSF_current_28A1','APSF_current_60A0','APSF_current_60D0']]
# --------------------------------------------------
trash['ASSS'] = [
'ASSS_current_F57A0',
'ASSS_current_F58A0',
'ASSS_current_F59A0',
'ASSS_current_F60A0',
'ASSS_current_F63A0',
]
merge['ASSS'] = [
['ASSS_current_F01A0','ASSS_current_F01P0'],
['ASSS_current_F06A0','ASSS_current_F06P0'],
['ASSS_current_F62A0'],
['ASSS_current_F53A0','ASSS_current_F55A0','ASSS_current_F56A0']
]
# --------------------------------------------------
trash['AXXX'] = [
'AXXX_current_301A',
'AXXX_current_301B',
'AXXX_current_301C',
'AXXX_current_301E',
'AXXX_current_302E',
'AXXX_current_305A',
'AXXX_current_305B',
'AXXX_current_380A',
'AXXX_current_380B',
'AXXX_current_380C',
'AXXX_current_380R',
'AXXX_current_381A',
'AXXX_current_381B',
'AXXX_current_381C'
]
merge['AXXX'] = [[]]
# --------------------------------------------------
trash['AACF'] = [
'AACF_current_319O',
'AACF_current_325B',
'AACF_current_E90A',
'AACF_current_E90B',
'AACF_current_U07D',
'AACF_current_M13A',
'AACF_current_E92A',
'AACF_current_325E',
'AACF_current_325C',
'AACF_current_325D',
'AACF_current_M03A',
]
merge['AACF'] = [
['AACF_current_M21A','AACF_current_M22A','AACF_current_E70A',],
['AACF_current_M02A'],
['AACF_current_M06A'],
['AACF_current_M07A'],
['AACF_current_E03A'],
['AACF_current_E05A']
]
# --------------------------------------------------
# --------------------------------------------------
# This subsystem looks like a temperature indicator
# We delete all commands and process it into a single "temperature" feature
# --------------------------------------------------
trash['ATMB'] = ['ATMB_current_003K'
,'ATMB_current_022K'
,'ATMB_current_045K'
,'ATMB_current_057K'
,'ATMB_current_076K'
,'ATMB_current_091K'
,'ATMB_current_114K'
,'ATMB_current_152K'
,'ATMB_current_182K'
,'ATMB_current_228K']
merge['ATMB'] = [[]]
# Creating a single signal
atmb_cols = [i for i in pd_data.columns if i.startswith('ATMB_current_')]
atmb_vals = np.copy(pd_data[atmb_cols[0]])*0
temps = [3,22,45,57,76,91,114,152,182,228]
for i in range(1, np.size(atmb_cols)):
atmb_vals += pd_data[atmb_cols[i]]*temps[i-1]
# --------------------------------------------------
return trash, merge, atmb_vals
|
{"hexsha": "92d1b590f0b7d0ad65ca4bd2690493b3302d7583", "size": 11713, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing/dmop_analysis.py", "max_stars_repo_name": "fornaxco/Mars-Express-Challenge", "max_stars_repo_head_hexsha": "4e0dff9909df0d10e507083af59326b3342d67fe", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-08-14T02:40:47.000Z", "max_stars_repo_stars_event_max_datetime": "2016-09-28T05:55:23.000Z", "max_issues_repo_path": "preprocessing/dmop_analysis.py", "max_issues_repo_name": "fornaxco/Mars-Express-Challenge", "max_issues_repo_head_hexsha": "4e0dff9909df0d10e507083af59326b3342d67fe", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing/dmop_analysis.py", "max_forks_repo_name": "fornaxco/Mars-Express-Challenge", "max_forks_repo_head_hexsha": "4e0dff9909df0d10e507083af59326b3342d67fe", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-08-14T02:45:41.000Z", "max_forks_repo_forks_event_max_datetime": "2017-05-27T08:59:10.000Z", "avg_line_length": 35.0688622754, "max_line_length": 173, "alphanum_fraction": 0.5549389567, "include": true, "reason": "import numpy", "num_tokens": 3471}
|
include("viral_load_infectivity_testpos.jl")
const scen_names = ["(b) Status Quo","(c1) Fortnightly concurrent PCR","(c2) Fortnightly random PCR", "(d) 3 LFDs per week","(e) 2 LFDs per week","(f) Daily LFDs","(g) Daily LFDs + PCR","(h) 3 LFDs + PCR",
"(a) No testing"]
function scenario_1_setup(Ndays::Int) #2 LFDs + 1 concurrent PCR
#assume day0 is random for each person
test_day0 = rand(1:7)
LFD_test_days1 = collect((test_day0+3):7:Ndays)
LFD_test_days2 = collect(test_day0:7:Ndays)
PCR_test_days = collect(test_day0:7:Ndays)
PCR_delays = draw_PCR_delays(length(PCR_test_days))
PCR_result_days = PCR_test_days .+ PCR_delays
if test_day0 > 4
push!(LFD_test_days1,test_day0-4)
end
TestDays = vcat(LFD_test_days1,LFD_test_days2,PCR_test_days)
ResultDays = vcat(LFD_test_days1,LFD_test_days2,PCR_result_days)
TestTypes = vcat(ones(length(LFD_test_days1) + length(LFD_test_days2)),
zeros(length(PCR_test_days)))
itd = sortperm(ResultDays)
return TestDays[itd], TestTypes[itd], ResultDays[itd]
end
function scenario_2_setup(Ndays::Int) #2 LFDs + 1 concurrent fortnightly PCR
#assume day0 is random for each person
test_day0 = rand(1:14)
LFD_test_days1 = collect((test_day0+3):7:Ndays)
LFD_test_days2 = collect(test_day0:7:Ndays)
PCR_test_days = collect(test_day0:14:Ndays)
PCR_delays = draw_PCR_delays(length(PCR_test_days))
PCR_result_days = PCR_test_days .+ PCR_delays
if test_day0 > 4
push!(LFD_test_days1,test_day0-4)
if test_day0 > 7
push!(LFD_test_days1,test_day0-7)
if test_day0 > 11
push!(LFD_test_days1,test_day0-11)
end
end
end
TestDays = vcat(LFD_test_days1,LFD_test_days2,PCR_test_days)
ResultDays = vcat(LFD_test_days1,LFD_test_days2,PCR_result_days)
TestTypes = vcat(ones(length(LFD_test_days1) + length(LFD_test_days2)),
zeros(length(PCR_test_days)))
itd = sortperm(ResultDays)
return TestDays[itd], TestTypes[itd], ResultDays[itd]
end
function scenario_3_setup(Ndays::Int) #2 LFDs + 1 random fortnightly PCR
LFDtest_day0 = rand(1:7)
PCRtest_day0 = rand(1:14)
LFD_test_days1 = collect((LFDtest_day0+3):7:Ndays)
LFD_test_days2 = collect(LFDtest_day0:7:Ndays)
PCR_test_days = collect(PCRtest_day0:14:Ndays)
PCR_delays = draw_PCR_delays(length(PCR_test_days))
PCR_result_days = PCR_test_days .+ PCR_delays
if LFDtest_day0 > 4
push!(LFD_test_days1,LFDtest_day0-4)
end
TestDays = vcat(LFD_test_days1,LFD_test_days2,PCR_test_days)
ResultDays = vcat(LFD_test_days1,LFD_test_days2,PCR_result_days)
TestTypes = vcat(ones(length(LFD_test_days1) + length(LFD_test_days2)),
zeros(length(PCR_test_days)))
itd = sortperm(ResultDays)
return TestDays[itd], TestTypes[itd], ResultDays[itd]
end
function scenario_4_setup(Ndays::Int) #3 LFDs
test_day0 = rand(1:7)
LFD_test_days1 = collect((test_day0+4):7:Ndays)
LFD_test_days2 = collect((test_day0+2):7:Ndays)
LFD_test_days3 = collect(test_day0:7:Ndays)
if test_day0 > 3
push!(LFD_test_days2,test_day0-3)
if test_day0 > 5
push!(LFD_test_days1,test_day0-5)
end
end
TestDays = vcat(LFD_test_days1,LFD_test_days2,LFD_test_days3)
TestTypes = ones(length(TestDays))
itd = sortperm(TestDays)
return TestDays[itd], TestTypes[itd], TestDays[itd] #result days same as test days
end
function scenario_5_setup(Ndays::Int) #2 LFDs
test_day0 = rand(1:7)
LFD_test_days1 = collect((test_day0+3):7:Ndays)
LFD_test_days2 = collect(test_day0:7:Ndays)
if test_day0 > 4
push!(LFD_test_days1,test_day0-4)
end
TestDays = vcat(LFD_test_days1,LFD_test_days2)
TestTypes = ones(length(TestDays))
itd = sortperm(TestDays)
return TestDays[itd], TestTypes[itd], TestDays[itd] #result days same as test days
end
function scenario_6_setup(Ndays::Int) #7 LFDs
TestDays = collect(1:Ndays)
TestTypes = ones(length(TestDays))
return TestDays, TestTypes, TestDays #result days same as test days
end
function scenario_7_setup(Ndays::Int) #Daily LFDs + 1 concurrent PCR
#assume day0 is random for each person
test_day0 = rand(1:7)
LFD_test_days = collect(1:Ndays)
PCR_test_days = collect(test_day0:7:Ndays)
PCR_delays = draw_PCR_delays(length(PCR_test_days))
PCR_result_days = PCR_test_days .+ PCR_delays
TestDays = vcat(LFD_test_days,PCR_test_days)
ResultDays = vcat(LFD_test_days,PCR_result_days)
TestTypes = vcat(ones(length(LFD_test_days)), zeros(length(PCR_test_days)))
itd = sortperm(ResultDays)
return TestDays[itd], TestTypes[itd], ResultDays[itd]
end
function scenario_8_setup(Ndays::Int) #3 LFDs + 1 concurrent PCR
#assume day0 is random for each person
test_day0 = rand(1:7)
LFD_test_days1 = collect((test_day0+4):7:Ndays)
LFD_test_days2 = collect((test_day0+2):7:Ndays)
LFD_test_days3 = collect(test_day0:7:Ndays)
if test_day0 > 3
push!(LFD_test_days2,test_day0-3)
if test_day0 > 5
push!(LFD_test_days1,test_day0-5)
end
end
LFD_test_days = vcat(LFD_test_days1,LFD_test_days2,LFD_test_days3)
PCR_test_days = collect(test_day0:7:Ndays)
PCR_delays = draw_PCR_delays(length(PCR_test_days))
PCR_result_days = PCR_test_days .+ PCR_delays
TestDays = vcat(LFD_test_days,PCR_test_days)
ResultDays = vcat(LFD_test_days,PCR_result_days)
TestTypes = vcat(ones(length(LFD_test_days)), zeros(length(PCR_test_days)))
itd = sortperm(ResultDays)
return TestDays[itd], TestTypes[itd], ResultDays[itd]
end
"""
init_testing_random!(sim::Dict, testing_params::Dict, i_day::Int, Ndays::Int)
Function to initialise test positive profiles and test isolation probabilities
## Arguments:
`sim` = Dict generated by `init_VL_and_infectiousness(Ntot::Int, Pisol::Float64)`
`testing_params` = Dict containing testing options, must have:
"scenario" => `String` = Choose from '2_LFD_+_concPCR', '3_LFD', '2_LFD_+_Fort_concPCR'
'2_LFD_+_Fort_randPCR', '2_LFD'
"comply_prob" => `Float64` = Chance of performing next LFD test (random)
`i_day` = Day to start simulation on (counting from 1)
`Ndays` = Total number of days to simulate (counting from 1)
## Returns:
`Array{Int64,1}` = Days when testing will occur
`Int64` = Next testing day index (from `i_day`)
## See also:
`init_VL_and_infectiousness(Ntot::Int, Pisol::Float64)`
"""
function init_testing_random!(sim::Dict, testing_params::Dict, Conf_PCR::Bool)
#add test positivity profiles to simulation Dict, i_day is start day, Ndays is total length of sim
#testing params contains "tperiod" (days between test)
#testing params contains "protocol" (LFD_mass_protocol or PCR_mass_protocol)
#optional: sens_rel: relative sensitivity of LFD as VL -> infinity
Ndays = length.(sim["VL_profiles"])
sim["will_isolate_with_test"] = ones(Bool,sim["Ntot"])
sim["testing_paused"] = zeros(Bool,sim["Ntot"])
sim["resume_testing"] = -ones(Int64,sim["Ntot"])
sim["test_days"] = Array{Array{Int64,1},1}(undef,sim["Ntot"])
sim["test_types"] = Array{Array{Int64,1},1}(undef,sim["Ntot"])
sim["test_result_days"] = Array{Array{Int64,1},1}(undef,sim["Ntot"])
sim["conf_PCR"] = Array{Array{Bool,1},1}(undef,sim["Ntot"])
TestOutput = []
if testing_params["scenario"] == scen_names[1]
TestOutput = scenario_1_setup.(Ndays)
elseif testing_params["scenario"] == scen_names[2]
TestOutput = scenario_2_setup.(Ndays)
elseif testing_params["scenario"] == scen_names[3]
TestOutput = scenario_3_setup.(Ndays)
elseif testing_params["scenario"] == scen_names[4]
TestOutput = scenario_4_setup.(Ndays)
elseif testing_params["scenario"] == scen_names[5]
TestOutput = scenario_5_setup.(Ndays)
elseif testing_params["scenario"] == scen_names[6]
TestOutput = scenario_6_setup.(Ndays)
elseif testing_params["scenario"] == scen_names[7]
TestOutput = scenario_7_setup.(Ndays)
elseif testing_params["scenario"] == scen_names[8]
TestOutput = scenario_8_setup.(Ndays)
end
for i in 1:sim["Ntot"]
sim["test_days"][i] = TestOutput[i][1]
sim["test_types"][i] = TestOutput[i][2]
sim["test_result_days"][i] = TestOutput[i][3]
sim["conf_PCR"][i] = zeros(Bool,length(sim["test_days"][i]))
if Conf_PCR
sim["conf_PCR"][i][sim["test_types"][i] .== 1] .= true
end
end
sim["test_pos_prob"] = get_test_probs.(sim["VL_profiles"], sim["test_days"],
sim["test_types"]; Do_LFD_test_prob = testing_params["comply_prob"],
all_or_nothing = testing_params["aon_compliance"])
#assuming random compliance
end
"""
run_testing_scenarios_impact(Ntot::Int, Pisol::Float64, LFD_comply::Float64,
Conf_PCR::Bool; )
## Arguments:
`Ntot` = Number of profiles to generate
`LFD_comply` = probability of doing each LFD test
`Conf_PCR` = Do confirmatory PCR after positive LFD (if negative stop isolating early)
## Returns:
`sim_scens` = Array of Dicts, one for each scenario simulated
`sim_names` = Names of scenarios simulated
"""
function run_testing_scenarios_impact(Ntot::Int, Pisol::Float64, LFD_comply::Float64,
Conf_PCR::Bool; Day5release_bool::Bool = false,
LFD_AllorNone::Bool = false)
sim_baseline = init_VL_and_infectiousness(Ntot, Pisol)
Nscens = length(scen_names)
sim_scens = Array{Dict,1}(undef,Nscens)
for i in 1:(Nscens-1)
sim_scens[i] = copy(sim_baseline)
init_testing_random!(sim_scens[i], Dict("scenario"=>scen_names[i],
"comply_prob"=>LFD_comply, "aon_compliance"=>LFD_AllorNone), Conf_PCR)
sim_scens[i]["inf_profile_isolation"] = copy.(sim_scens[i]["infection_profiles"])
sim_scens[i]["isol_days"] = run_testing_scenario!.(sim_scens[i]["inf_profile_isolation"],
sim_scens[i]["infection_profiles"], sim_scens[i]["test_pos_prob"], sim_scens[i]["test_result_days"],
sim_scens[i]["symp_day"] .+ 1, sim_scens[i]["will_isolate"], sim_scens[i]["VL_profiles"],
sim_scens[i]["conf_PCR"]; Day5release=Day5release_bool)
end
sim_baseline["test_pos_prob"] = fill(zeros(0),sim_baseline["Ntot"])
sim_baseline["test_result_days"] = fill(zeros(Int64,0),sim_baseline["Ntot"])
Conf_PCR_h = fill(zeros(Bool,0),sim_baseline["Ntot"])
sim_baseline["inf_profile_isolation"] = copy.(sim_baseline["infection_profiles"])
sim_baseline["isol_days"] = run_testing_scenario!.(sim_baseline["inf_profile_isolation"],
sim_baseline["infection_profiles"], sim_baseline["test_pos_prob"],
sim_baseline["test_result_days"], sim_baseline["symp_day"] .+ 1,
sim_baseline["will_isolate"], sim_baseline["VL_profiles"],
Conf_PCR_h; Day5release=Day5release_bool)
sim_scens[Nscens] = copy(sim_baseline)
for i in 1:(Nscens)
sim_scens[i]["inf_days"] = zeros(Int64, Ntot)
for j in 1:Ntot
sim_scens[i]["inf_days"][j] = sum((sim_scens[i]["inf_profile_isolation"][j] .> 0))
end
end
return sim_scens, scen_names
end
function get_no_testing_scenario(Ntot::Int, Pisol::Float64;
Day5release_bool::Bool = false)
sim_baseline = init_VL_and_infectiousness(Ntot, Pisol)
sim_baseline["test_pos_prob"] = fill(zeros(0),sim_baseline["Ntot"])
sim_baseline["test_result_days"] = fill(zeros(Int64,0),sim_baseline["Ntot"])
Conf_PCR_h = fill(zeros(Bool,0),sim_baseline["Ntot"])
sim_baseline["inf_profile_isolation"] = copy.(sim_baseline["infection_profiles"])
sim_baseline["isol_days"] = run_testing_scenario!.(sim_baseline["inf_profile_isolation"],
sim_baseline["infection_profiles"], sim_baseline["test_pos_prob"],
sim_baseline["test_result_days"], sim_baseline["symp_day"] .+ 1,
sim_baseline["will_isolate"], sim_baseline["VL_profiles"],
Conf_PCR_h; Day5release=Day5release_bool)
return sim_baseline
end
function run_testing_scenarios_vs_baseline(sim_baseline::Dict, LFD_comply::Float64,
Conf_PCR::Bool; Day5release_bool::Bool = false,
LFD_AllorNone::Bool = false)
Ntot = sim_baseline["Ntot"]
Nscens = length(scen_names)
sim_scens = Array{Dict,1}(undef,Nscens)
for i in 1:(Nscens-1)
sim_scens[i] = copy(sim_baseline)
init_testing_random!(sim_scens[i], Dict("scenario"=>scen_names[i],
"comply_prob"=>LFD_comply, "aon_compliance"=>LFD_AllorNone), Conf_PCR)
sim_scens[i]["inf_profile_isolation"] = copy.(sim_scens[i]["infection_profiles"])
sim_scens[i]["isol_days"] = run_testing_scenario!.(sim_scens[i]["inf_profile_isolation"],
sim_scens[i]["infection_profiles"], sim_scens[i]["test_pos_prob"], sim_scens[i]["test_result_days"],
sim_scens[i]["symp_day"] .+ 1, sim_scens[i]["will_isolate"], sim_scens[i]["VL_profiles"],
sim_scens[i]["conf_PCR"]; Day5release=Day5release_bool)
end
sim_scens[Nscens] = copy(sim_baseline)
for i in 1:(Nscens)
sim_scens[i]["inf_days"] = zeros(Int64, Ntot)
for j in 1:Ntot
sim_scens[i]["inf_days"][j] = sum((sim_scens[i]["inf_profile_isolation"][j] .> 0))
end
end
return sim_scens, scen_names
end
|
{"hexsha": "f71d133f28a63efa6af4adc1807d0a90fea51182", "size": 13736, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/repeat_testing_scenarios.jl", "max_stars_repo_name": "CarlWhitfield/Viral_load_testing_COV19_model", "max_stars_repo_head_hexsha": "e12befa4016de7af69c75dcba7c4f80896b74dd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/repeat_testing_scenarios.jl", "max_issues_repo_name": "CarlWhitfield/Viral_load_testing_COV19_model", "max_issues_repo_head_hexsha": "e12befa4016de7af69c75dcba7c4f80896b74dd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/repeat_testing_scenarios.jl", "max_forks_repo_name": "CarlWhitfield/Viral_load_testing_COV19_model", "max_forks_repo_head_hexsha": "e12befa4016de7af69c75dcba7c4f80896b74dd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3312302839, "max_line_length": 205, "alphanum_fraction": 0.6795282469, "num_tokens": 4020}
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
from builtins import range
from future.utils import iteritems
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
from tensorrtserver.api import *
import os
np_dtype_string = np.dtype(object)
TEST_SYSTEM_SHARED_MEMORY = bool(int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))
TEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY', 0)))
class InferZeroTest(unittest.TestCase):
def _full_zero(self, dtype, shapes):
# 'shapes' is list of shapes, one for each input.
# For validation assume any shape can be used...
if tu.validate_for_tf_model(dtype, dtype, dtype, shapes[0], shapes[0], shapes[0]):
# model that supports batching
for bs in (1, 8):
iu.infer_zero(self, 'graphdef', bs, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_zero(self, 'savedmodel', bs, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that does not support batching
iu.infer_zero(self, 'graphdef_nobatch', 1, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
iu.infer_zero(self, 'savedmodel_nobatch', 1, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if tu.validate_for_c2_model(dtype, dtype, dtype, shapes[0], shapes[0], shapes[0]):
# model that supports batching
for bs in (1, 8):
iu.infer_zero(self, 'netdef', bs, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that does not support batching
iu.infer_zero(self, 'netdef_nobatch', 1, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
if tu.validate_for_onnx_model(dtype, dtype, dtype, shapes[0], shapes[0], shapes[0]):
# model that supports batching
for bs in (1, 8):
iu.infer_zero(self, 'onnx', bs, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that does not support batching
iu.infer_zero(self, 'onnx_nobatch', 1, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
for name in ["simple_zero", "sequence_zero", "fan_zero"]:
if tu.validate_for_ensemble_model(name, dtype, dtype, dtype,
shapes[0], shapes[0], shapes[0]):
# model that supports batching
for bs in (1, 8):
iu.infer_zero(self, name, bs, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that does not support batching
iu.infer_zero(self, name + '_nobatch', 1, dtype, shapes, shapes,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY, use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
def test_ff1_sanity(self):
self._full_zero(np.float32, ([1,],))
def test_ff1(self):
self._full_zero(np.float32, ([0,],))
def test_ff3_sanity(self):
self._full_zero(np.float32, ([1,],[2,],[1,]))
def test_ff3_0(self):
self._full_zero(np.float32, ([0,],[0,],[0,]))
def test_ff3_1(self):
self._full_zero(np.float32, ([0,],[0,],[1,]))
def test_ff3_2(self):
self._full_zero(np.float32, ([0,],[1,],[0,]))
def test_ff3_3(self):
self._full_zero(np.float32, ([1,],[0,],[0,]))
def test_ff3_4(self):
self._full_zero(np.float32, ([1,],[0,],[1,]))
def test_hh1_sanity(self):
self._full_zero(np.float16, ([2, 2],))
def test_hh1_0(self):
self._full_zero(np.float16, ([1, 0],))
def test_hh1_1(self):
self._full_zero(np.float16, ([0, 1],))
def test_hh1_2(self):
self._full_zero(np.float16, ([0, 0],))
def test_hh3_sanity(self):
self._full_zero(np.float16, ([2, 2],[2, 2],[1, 1]))
def test_hh3_0(self):
self._full_zero(np.float16, ([0, 0],[0, 0],[0, 0]))
def test_hh3_1(self):
self._full_zero(np.float16, ([0, 1],[0, 1],[2,3]))
def test_hh3_2(self):
self._full_zero(np.float16, ([1, 0],[1, 3],[0, 1]))
def test_hh3_3(self):
self._full_zero(np.float16, ([1, 1],[3, 0],[0, 0]))
def test_hh3_4(self):
self._full_zero(np.float16, ([1, 1],[0, 6],[2, 2]))
def test_oo1_sanity(self):
self._full_zero(np_dtype_string, ([2,],))
def test_oo1(self):
self._full_zero(np_dtype_string, ([0,],))
def test_oo3_sanity(self):
self._full_zero(np_dtype_string, ([2, 2],[2, 2],[1, 1]))
def test_oo3_0(self):
self._full_zero(np_dtype_string, ([0, 0],[0, 0],[0, 0]))
def test_oo3_1(self):
self._full_zero(np_dtype_string, ([0, 1],[0, 1],[2,3]))
def test_oo3_2(self):
self._full_zero(np_dtype_string, ([1, 0],[1, 3],[0, 1]))
def test_oo3_3(self):
self._full_zero(np_dtype_string, ([1, 1],[3, 0],[0, 0]))
def test_oo3_4(self):
self._full_zero(np_dtype_string, ([1, 1],[0, 6],[2, 2]))
def test_bb1_sanity(self):
self._full_zero(np.bool, ([10,],))
def test_bb1_0(self):
self._full_zero(np.bool, ([0,],))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "a772bb1b2ae5c062bc6190a79ab8f62738c58ac1", "size": 7463, "ext": "py", "lang": "Python", "max_stars_repo_path": "qa/L0_infer_zero/infer_zero_test.py", "max_stars_repo_name": "AliAzG/triton-inference-server", "max_stars_repo_head_hexsha": "fbce250035d049d13f32c362e2d76a5cb787da51", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qa/L0_infer_zero/infer_zero_test.py", "max_issues_repo_name": "AliAzG/triton-inference-server", "max_issues_repo_head_hexsha": "fbce250035d049d13f32c362e2d76a5cb787da51", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qa/L0_infer_zero/infer_zero_test.py", "max_forks_repo_name": "AliAzG/triton-inference-server", "max_forks_repo_head_hexsha": "fbce250035d049d13f32c362e2d76a5cb787da51", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-09T11:16:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-09T11:16:23.000Z", "avg_line_length": 47.8397435897, "max_line_length": 123, "alphanum_fraction": 0.6636741257, "include": true, "reason": "import numpy", "num_tokens": 1944}
|
import time
import json
import logging
import numpy as np
import os.path as osp
from pycoco.bleu.bleu import Bleu
from pycoco.meteor.meteor import Meteor
from pycoco.rouge.rouge import Rouge
from pycoco.cider.cider import Cider
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Timer:
def __init__(self):
self.start_time = 0
self.end_time = 0
self.total_time = 0
self.avg_time = 0
self.n_toc = 0
def tic(self):
self.n_toc = 0
self.start_time = time.time()
def toc(self):
self.end_time = time.time()
self.total_time = self.end_time - self.start_time
self.n_toc += 1.
self.avg_time = self.total_time / self.n_toc
return self.total_time
class Logger:
"""
When receiving a message, first print it on screen, then write it into log file.
If save_dir is None, it writes no log and only prints on screen.
"""
def __init__(self, save_dir):
if save_dir is not None:
self.logger = logging.getLogger()
logging.basicConfig(filename=osp.join(save_dir, 'experiment.log'), format='%(asctime)s | %(message)s')
logging.root.setLevel(level=logging.INFO)
else:
self.logger = None
def info(self, msg, to_file=True):
print msg
if self.logger is not None and to_file:
self.logger.info(msg)
def evaluate(gt_file, re_file, logger=None):
"""
This function is reformed from MSCOCO evaluating code.
The reference sentences are read from gt_file,
the generated sentences to be evaluated are read from res_file
"""
gts = json.load(open(gt_file, 'r'))
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
#(Meteor(), "METEOR"),
# (Rouge(), "ROUGE_L"),
(Cider(), "CIDEr")
]
metrics = []
res = json.load(open(re_file, 'r'))
res = {c['image_id']: [c['caption']] for c in res}
gts = {k: v for k, v in zip(gts['image_ids'], gts['captions']) if k in res}
for scorer, method in scorers:
if logger is not None:
logger.info('computing %s score...' % (scorer.method()))
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
if logger is not None:
logger.info("%s: %0.3f" % (m, sc))
metrics.extend(score)
else:
if logger is not None:
logger.info("%s: %0.3f" % (method, score))
metrics.append(score)
return metrics
def lm_caption_step(w_t,lm_state_t,caption_state_t,lm,caption_model,eta,manner):
word = Variable(torch.LongTensor(w_t.tolist()))
if lm.on_gpu:
word = word.cuda()
word_emb = lm.word_embedding_layer(word)
logit, lm_state_t_1 = lm.forward(word_emb, lm_state_t) # logit : (batch_size, vocab_size)
prob = F.softmax(logit) # (batch_size, vocab_size)
P = prob - eta
P *= 10000000
mask = F.sigmoid(P).data.cpu().numpy()
caption_state_t_1,w_t_1 = caption_model.ngram_single_step(caption_state_t, w_t,mask,manner)
return w_t_1,lm_state_t_1,caption_state_t_1
def lm_caption(lm,model,image_ids,vocab,loader,feature,max_step,manner):
w_0 = np.ones((len(image_ids),), dtype=np.int32) * 9488 # set start token for rnn language model
lm_state_0 = lm.init_state()
cap_state_0 = model.initial_state(feature)
eta_0 = 0.00005
cap = np.zeros((max_step, len(image_ids)), dtype=np.int32)
if manner == 'sample':
res = []
for step in range(max_step-1):
w_1, lm_state_1, cap_state_1 = lm_caption_step(w_0, lm_state_0, cap_state_0, lm, model, eta_0*(2**step), manner)
cap[step + 1, :] = w_1[:]
w_0 = w_1
lm_state_0 = lm_state_1
cap_state_0 = cap_state_1
for i in range(loader.batch_size):
index = np.where(cap[1:,i] == 0)[0]
if len(index) > 0:
s = ' '.join(vocab[w] for w in cap[1:index[0]+1, i])
else:
s = ' '.join(vocab[w] for w in cap[1:, i])
res.append({'image_id': image_ids[i], 'caption': s})
else:
cap, res = model.inference(vocab, image_ids, feature, manner='greedy', max_length=max_step)
return cap,res
def att_lm_caption_step(w_t,lm_state_t,patches,caption_state_t,lm,caption_model,eta,manner):
word = Variable(torch.LongTensor(w_t.tolist()))
if lm.on_gpu:
word = word.cuda()
word_emb = lm.word_embedding_layer(word)
logit, lm_state_t_1 = lm.forward(word_emb, lm_state_t) # logit : (batch_size, vocab_size)
prob = F.softmax(logit) # (batch_size, vocab_size)
P = prob - eta
P *= 10000000
mask = F.sigmoid(P).data.cpu().numpy()
caption_state_t_1,w_t_1 = caption_model.ngram_single_step(caption_state_t, w_t,patches,mask,manner)
return w_t_1,lm_state_t_1,caption_state_t_1
def att_lm_caption(lm,model,image_ids,vocab,loader,features,max_step,manner):
w_0 = np.ones((len(image_ids),), dtype=np.int32) * 9488 # set start token for rnn language model
lm_state_0 = lm.init_state()
eta_0 = 0.00005
cap = np.zeros((max_step, len(image_ids)), dtype=np.int32)
if manner == 'sample':
pathes, cap_state_0 = model.initial_state(features)
res = []
for step in range(max_step-1):
w_1, lm_state_1, cap_state_1 = att_lm_caption_step(w_0, lm_state_0,pathes, cap_state_0, lm, model, eta_0*(2**step), manner)
cap[step + 1, :] = w_1[:]
w_0 = w_1
lm_state_0 = lm_state_1
cap_state_0 = cap_state_1
for i in range(loader.batch_size):
index = np.where(cap[1:,i] == 0)[0]
if len(index) > 0:
s = ' '.join(vocab[w] for w in cap[1:index[0]+1, i])
else:
s = ' '.join(vocab[w] for w in cap[1:, i])
res.append({'image_id': image_ids[i], 'caption': s})
else:
cap, res = model.inference(vocab, image_ids, features, manner='greedy', max_length=max_step)
return cap,res
def lm2_caption_step(w_t,first_word,lm_state_t,caption_state_t,lm,caption_model,eta,manner,step):
word = Variable(torch.LongTensor(w_t.tolist()))
if lm.on_gpu:
word = word.cuda()
word_emb = lm.word_embedding_layer(word)
logit, lm_state_t_1 = lm.forward(word_emb, lm_state_t) # logit : (batch_size, vocab_size)
prob = F.softmax(logit) # (batch_size, vocab_size)
P = prob - eta
P *= 10000000
mask = F.sigmoid(P).data.cpu().numpy()[:,:-1] # drop the start token
if step == 0:
caption_state_t_1,w_t_1 = caption_model.ngram_single_step(0,caption_state_t, first_word,mask,manner)
else:
caption_state_t_1, w_t_1 = caption_model.ngram_single_step(step, caption_state_t, w_t, mask, manner)
return w_t_1,lm_state_t_1,caption_state_t_1
def lm2_caption(lm,model,image_ids,vocab,loader,feature,max_step,manner):
w_0 = np.ones((len(image_ids),), dtype=np.int32) * 9488 # set start token for rnn language model
lm_state_0 = lm.init_state()
first_word, cap_state_0 = model.initial_state(feature)
eta_0 = 0.00005
cap = np.zeros((max_step, len(image_ids)), dtype=np.int32)
if manner == 'sample':
res = []
for step in range(max_step-1):
w_1, lm_state_1, cap_state_1 = lm2_caption_step(w_0,first_word, lm_state_0, cap_state_0, lm, model, eta_0*(2**step), manner,step)
cap[step + 1, :] = w_1[:]
w_0 = w_1
lm_state_0 = lm_state_1
cap_state_0 = cap_state_1
for i in range(loader.batch_size):
index = np.where(cap[1:,i] == 0)[0]
if len(index) > 0:
s = ' '.join(vocab[w] for w in cap[1:index[0]+1, i])
else:
s = ' '.join(vocab[w] for w in cap[1:, i])
res.append({'image_id': image_ids[i], 'caption': s})
else:
cap, res = model.inference(vocab, image_ids, feature, manner='greedy', max_length=max_step)
return cap[1:,:],res
def att2_lm_caption_step(w_t,first_word,lm_state_t,patches,caption_state_t,lm,caption_model,eta,manner,step):
word = Variable(torch.LongTensor(w_t.tolist()))
if lm.on_gpu:
word = word.cuda()
word_emb = lm.word_embedding_layer(word)
logit, lm_state_t_1 = lm.forward(word_emb, lm_state_t) # logit : (batch_size, vocab_size)
prob = F.softmax(logit) # (batch_size, vocab_size)
P = prob - eta
P *= 10000000
mask = F.sigmoid(P).data.cpu().numpy()[:,:-1] # drop the start token
if step == 0:
caption_state_t_1,w_t_1 = caption_model.ngram_single_step(0,caption_state_t,first_word,patches,mask,manner)
else:
caption_state_t_1, w_t_1 = caption_model.ngram_single_step(step,caption_state_t, w_t, patches, mask, manner)
return w_t_1,lm_state_t_1,caption_state_t_1
def att2_lm_caption(lm,model,image_ids,vocab,loader,features,max_step,manner):
w_0 = np.ones((len(image_ids),), dtype=np.int32) * 9488 # set start token for rnn language model
lm_state_0 = lm.init_state()
eta_0 = 0.00005
cap = np.zeros((max_step, len(image_ids)), dtype=np.int32)
if manner == 'sample':
patches,first_word, cap_state_0 = model.initial_state(features)
res = []
for step in range(max_step-1):
w_1, lm_state_1, cap_state_1 = att2_lm_caption_step(w_0,first_word, lm_state_0,patches, cap_state_0, lm, model, eta_0*(2**step), manner,step)
cap[step + 1, :] = w_1[:]
w_0 = w_1
lm_state_0 = lm_state_1
cap_state_0 = cap_state_1
for i in range(loader.batch_size):
index = np.where(cap[1:,i] == 0)[0]
if len(index) > 0:
s = ' '.join(vocab[w] for w in cap[1:index[0]+1, i])
else:
s = ' '.join(vocab[w] for w in cap[1:, i])
res.append({'image_id': image_ids[i], 'caption': s})
else:
cap, res = model.inference(vocab, image_ids, features, manner='greedy', max_length=max_step)
return cap[1:,:],res
|
{"hexsha": "04fd9648e29ed4d0118365a88940cbd7744e923f", "size": 10235, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools.py", "max_stars_repo_name": "tgGuo15/PriorImageCaption", "max_stars_repo_head_hexsha": "4ee6017d642116145cc74c6f752685bd2d19b1cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2018-09-18T07:07:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-18T11:17:01.000Z", "max_issues_repo_path": "tools.py", "max_issues_repo_name": "gorgeousyouth/PriorImageCaption", "max_issues_repo_head_hexsha": "4ee6017d642116145cc74c6f752685bd2d19b1cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-10-05T06:41:41.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-12T18:47:52.000Z", "max_forks_repo_path": "tools.py", "max_forks_repo_name": "gorgeousyouth/PriorImageCaption", "max_forks_repo_head_hexsha": "4ee6017d642116145cc74c6f752685bd2d19b1cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-10-25T08:47:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-07T05:48:44.000Z", "avg_line_length": 41.1044176707, "max_line_length": 153, "alphanum_fraction": 0.6246213972, "include": true, "reason": "import numpy", "num_tokens": 2976}
|
library(ggplot2)
library(gridExtra)
load("sc1-multi-time.RData")
# extract if2 data
if2names <- c("if2.R0","if2.r","if2.sigma","if2.eta","if2.berr","if2.Iinit")
if2data <- estmat[,if2names]
colnames(if2data) <- c("R0","r","sigma","eta","berr","Iinit")
if2times <- estmat[,7]
# extract hmc data
hmcnames <- c("hmc.R0","hmc.r","hmc.sigma","hmc.eta","hmc.berr","hmc.Iinit")
hmcdata <- estmat[,hmcnames]
colnames(hmcdata) <- c("R0","r","sigma","eta","berr","Iinit")
hmctimes <- estmat[,14]
# average times
avif2time <- mean(if2times)
avhmctime <- mean(hmctimes)
avif2time
avhmctime
# sort results
if2sorted <- apply(if2data, 2, sort)
hmcsorted <- apply(hmcdata, 2, sort)
## take centre 95%
nTrials <- dim(estmat)[1]
cinds <- (0.025*nTrials+1):(0.975*nTrials)
if295 <- if2sorted[cinds,]
hmc95 <- hmcsorted[cinds,]
if2plotdata <- data.frame(if295)
hmcplotdata <- data.frame(hmc95)
linecolour <- "grey50"
lineweight <- 0.5
## if2 density plots
R0kernel <- qplot(if2plotdata$R0, geom = "density", xlab = expression(R[0]), ylab = "frequency") +
geom_vline(aes(xintercept=pars_true['R0']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(if2plotdata$R0)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
rkernel <- qplot(if2plotdata$r, geom = "density", xlab = "r", ylab = "") +
geom_vline(aes(xintercept=pars_true['r']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(if2plotdata$r)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
sigmakernel <- qplot(if2plotdata$sigma, geom = "density", xlab = expression(sigma), ylab = "") +
geom_vline(aes(xintercept=sigma), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(if2plotdata$sigma)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
infeckernel <- qplot(if2plotdata$Iinit, geom = "density", xlab = "Initial Infected", ylab = "frequency") +
geom_vline(aes(xintercept=i_infec), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(if2plotdata$Iinit)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
etakernel <- qplot(if2plotdata$eta, geom = "density", xlab = expression(eta), ylab = "") +
geom_vline(aes(xintercept=pars_true['eta']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(if2plotdata$eta)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
berrkernel <- qplot(if2plotdata$berr, geom = "density", xlab = expression(epsilon[proc]), ylab = "") +
geom_vline(aes(xintercept=pars_true['berr']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(if2plotdata$berr)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
# show grid
grid.arrange(R0kernel, rkernel, sigmakernel, infeckernel, etakernel, berrkernel, ncol = 3, nrow = 2)
## hmc density plots
R0kernel <- qplot(hmcplotdata$R0, geom = "density", xlab = expression(R[0]), ylab = "frequency") +
geom_vline(aes(xintercept=pars_true['R0']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(hmcplotdata$R0)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
rkernel <- qplot(hmcplotdata$r, geom = "density", xlab = "r", ylab = "") +
geom_vline(aes(xintercept=pars_true['r']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(hmcplotdata$r)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
sigmakernel <- qplot(hmcplotdata$sigma, geom = "density", xlab = expression(sigma), ylab = "") +
geom_vline(aes(xintercept=sigma), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(hmcplotdata$sigma)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
infeckernel <- qplot(hmcplotdata$Iinit, geom = "density", xlab = "Initial Infected", ylab = "frequency") +
geom_vline(aes(xintercept=i_infec), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(hmcplotdata$Iinit)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
etakernel <- qplot(hmcplotdata$eta, geom = "density", xlab = expression(eta), ylab = "") +
geom_vline(aes(xintercept=pars_true['eta']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(hmcplotdata$eta)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
berrkernel <- qplot(hmcplotdata$berr, geom = "density", xlab = expression(epsilon[proc]), ylab = "") +
geom_vline(aes(xintercept=pars_true['berr']), linetype="solid", size=lineweight, color=linecolour) +
geom_vline(aes(xintercept=median(hmcplotdata$berr)), linetype="dashed", size=lineweight, color=linecolour) +
theme_bw()
# show grid
grid.arrange(R0kernel, rkernel, sigmakernel, infeckernel, etakernel, berrkernel, ncol = 3, nrow = 2)
## combined plots
R0kernel <- qplot(hmcplotdata$R0, geom = "density", xlab = expression(R[0]), ylab = "frequency") +
geom_density(aes(x = if2plotdata$R0), linetype = "dashed") +
geom_vline(aes(xintercept=pars_true['R0']), linetype="solid", size=lineweight, color=linecolour) +
theme_bw()
rkernel <- qplot(hmcplotdata$r, geom = "density", xlab = "r", ylab = "") +
geom_density(aes(x = if2plotdata$r), linetype = "dashed") +
geom_vline(aes(xintercept=pars_true['r']), linetype="solid", size=lineweight, color=linecolour) +
theme_bw()
sigmakernel <- qplot(hmcplotdata$sigma, geom = "density", xlab = expression(sigma)), ylab = "") +
geom_density(aes(x = if2plotdata$sigma), linetype = "dashed") +
geom_vline(aes(xintercept=sigma), linetype="solid", size=lineweight, color=linecolour) +
theme_bw()
infeckernel <- qplot(hmcplotdata$Iinit, geom = "density", xlab = "Initial Infected", ylab = "frequency") +
geom_density(aes(x = if2plotdata$Iinit), linetype = "dashed") +
geom_vline(aes(xintercept=i_infec), linetype="solid", size=lineweight, color=linecolour) +
theme_bw()
etakernel <- qplot(hmcplotdata$eta, geom = "density", xlab = expression(eta), ylab = "") +
geom_density(aes(x = if2plotdata$eta), linetype = "dashed") +
geom_vline(aes(xintercept=pars_true['eta']), linetype="solid", size=lineweight, color=linecolour) +
theme_bw()
berrkernel <- qplot(hmcplotdata$berr, geom = "density", xlab = expression(epsilon[proc]), ylab = "") +
geom_density(aes(x = if2plotdata$berr), linetype = "dashed") +
geom_vline(aes(xintercept=pars_true['berr']), linetype="solid", size=lineweight, color=linecolour) +
theme_bw()
grid.arrange(R0kernel, rkernel, sigmakernel, infeckernel, etakernel, berrkernel, ncol = 3, nrow = 2)
|
{"hexsha": "267880a16e45f6e0f2b8049f5fe4d6d0cb0efcd1", "size": 7046, "ext": "r", "lang": "R", "max_stars_repo_path": "code/stochastic-comparison/parfit-parallel/process-sc1-m.r", "max_stars_repo_name": "dbarrows/epidemic-forecasting", "max_stars_repo_head_hexsha": "a0865fa20c992dc4159e79bb332500e3ff2357ae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/stochastic-comparison/parfit-parallel/process-sc1-m.r", "max_issues_repo_name": "dbarrows/epidemic-forecasting", "max_issues_repo_head_hexsha": "a0865fa20c992dc4159e79bb332500e3ff2357ae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/stochastic-comparison/parfit-parallel/process-sc1-m.r", "max_forks_repo_name": "dbarrows/epidemic-forecasting", "max_forks_repo_head_hexsha": "a0865fa20c992dc4159e79bb332500e3ff2357ae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5949367089, "max_line_length": 114, "alphanum_fraction": 0.6881918819, "num_tokens": 2218}
|
SUBROUTINE SGBCO( ABD, LDA, N, ML, MU, IPVT, RCOND, Z )
C
C FACTORS A REAL BAND MATRIX BY GAUSSIAN ELIMINATION
C AND ESTIMATES THE CONDITION OF THE MATRIX.
C
C REVISION DATE: 8/1/82
C AUTHOR: MOLER, C. B., (U. OF NEW MEXICO)
C
C IF RCOND IS NOT NEEDED, SGBFA IS SLIGHTLY FASTER.
C TO SOLVE A*X = B , FOLLOW SBGCO BY SGBSL.
C
C INPUT:
C
C ABD REAL(LDA, N)
C CONTAINS THE MATRIX IN BAND STORAGE. THE COLUMNS
C OF THE MATRIX ARE STORED IN THE COLUMNS OF ABD AND
C THE DIAGONALS OF THE MATRIX ARE STORED IN ROWS
C ML+1 THROUGH 2*ML+MU+1 OF ABD .
C SEE THE COMMENTS BELOW FOR DETAILS.
C
C LDA INTEGER
C THE LEADING DIMENSION OF THE ARRAY ABD .
C LDA MUST BE .GE. 2*ML + MU + 1 .
C
C N INTEGER
C THE ORDER OF THE ORIGINAL MATRIX.
C
C ML INTEGER
C NUMBER OF DIAGONALS BELOW THE MAIN DIAGONAL.
C 0 .LE. ML .LT. N .
C
C MU INTEGER
C NUMBER OF DIAGONALS ABOVE THE MAIN DIAGONAL.
C 0 .LE. MU .LT. N .
C MORE EFFICIENT IF ML .LE. MU .
C
C ON RETURN
C
C ABD AN UPPER TRIANGULAR MATRIX IN BAND STORAGE AND
C THE MULTIPLIERS WHICH WERE USED TO OBTAIN IT.
C THE FACTORIZATION CAN BE WRITTEN A = L*U WHERE
C L IS A PRODUCT OF PERMUTATION AND UNIT LOWER
C TRIANGULAR MATRICES AND U IS UPPER TRIANGULAR.
C
C IPVT INTEGER(N)
C AN INTEGER VECTOR OF PIVOT INDICES.
C
C RCOND REAL
C AN ESTIMATE OF THE RECIPROCAL CONDITION OF A .
C FOR THE SYSTEM A*X = B , RELATIVE PERTURBATIONS
C IN A AND B OF SIZE EPSILON MAY CAUSE
C RELATIVE PERTURBATIONS IN X OF SIZE EPSILON/RCOND .
C IF RCOND IS SO SMALL THAT THE LOGICAL EXPRESSION
C 1.0 + RCOND .EQ. 1.0
C IS TRUE, THEN A MAY BE SINGULAR TO WORKING
C PRECISION. IN PARTICULAR, RCOND IS ZERO IF
C EXACT SINGULARITY IS DETECTED OR THE ESTIMATE
C UNDERFLOWS.
C
C Z REAL(N)
C A WORK VECTOR WHOSE CONTENTS ARE USUALLY UNIMPORTANT.
C IF A IS CLOSE TO A SINGULAR MATRIX, THEN Z IS
C AN APPROXIMATE NULL VECTOR IN THE SENSE THAT
C NORM(A*Z) = RCOND*NORM(A)*NORM(Z) .
C
C BAND STORAGE
C
C IF A IS A BAND MATRIX, THE FOLLOWING PROGRAM SEGMENT
C WILL SET UP THE INPUT.
C
C ML = (BAND WIDTH BELOW THE DIAGONAL)
C MU = (BAND WIDTH ABOVE THE DIAGONAL)
C M = ML + MU + 1
C DO 20 J = 1, N
C I1 = MAX0(1, J-MU)
C I2 = MIN0(N, J+ML)
C DO 10 I = I1, I2
C K = I - J + M
C ABD(K,J) = A(I,J)
C 10 CONTINUE
C 20 CONTINUE
C
C THIS USES ROWS ML+1 THROUGH 2*ML+MU+1 OF ABD .
C IN ADDITION, THE FIRST ML ROWS IN ABD ARE USED FOR
C ELEMENTS GENERATED DURING THE TRIANGULARIZATION.
C THE TOTAL NUMBER OF ROWS NEEDED IN ABD IS 2*ML+MU+1 .
C THE ML+MU BY ML+MU UPPER LEFT TRIANGLE AND THE
C ML BY ML LOWER RIGHT TRIANGLE ARE NOT REFERENCED.
C
C EXAMPLE: IF THE ORIGINAL MATRIX IS
C
C 11 12 13 0 0 0
C 21 22 23 24 0 0
C 0 32 33 34 35 0
C 0 0 43 44 45 46
C 0 0 0 54 55 56
C 0 0 0 0 65 66
C
C THEN N = 6, ML = 1, MU = 2, LDA .GE. 5 AND ABD SHOULD CONTAIN
C
C * * * + + + , * = NOT USED
C * * 13 24 35 46 , + = USED FOR PIVOTING
C * 12 23 34 45 56
C 11 22 33 44 55 66
C 21 32 43 54 65 *
C
C
C ROUTINES CALLED: FROM LINPACK: SGBFA
C FROM BLAS: SAXPY, SDOT, SSCAL, SASUM
C FROM FORTRAN: ABS, AMAX1, MAX0, MIN0, SIGN
C
INTEGER LDA, N, ML, MU, IPVT(*)
REAL ABD(LDA,*), Z(*)
REAL RCOND
C
REAL SDOT, EK, T, WK, WKM
REAL ANORM, S, SASUM, SM, YNORM
INTEGER IS, INFO, J, JU, K, KB, KP1, L, LA, LM, LZ, M, MM
C
C
C ** COMPUTE 1-NORM OF A
ANORM = 0.0E0
L = ML + 1
IS = L + MU
DO 10 J = 1, N
ANORM = AMAX1(ANORM, SASUM(L,ABD(IS,J), 1))
IF (IS .GT. ML + 1) IS = IS - 1
IF (J .LE. MU) L = L + 1
IF (J .GE. N - ML) L = L - 1
10 CONTINUE
C ** FACTOR
CALL SGBFA(ABD, LDA, N, ML, MU, IPVT, INFO)
C
C RCOND = 1/(NORM(A)*(ESTIMATE OF NORM(INVERSE(A)))) .
C ESTIMATE = NORM(Z)/NORM(Y) WHERE A*Z = Y AND TRANS(A)*Y = E .
C TRANS(A) IS THE TRANSPOSE OF A . THE COMPONENTS OF E ARE
C CHOSEN TO CAUSE MAXIMUM LOCAL GROWTH IN THE ELEMENTS OF W WHERE
C TRANS(U)*W = E . THE VECTORS ARE FREQUENTLY RESCALED TO AVOID
C OVERFLOW.
C
C ** SOLVE TRANS(U)*W = E
EK = 1.0E0
DO 20 J = 1, N
Z(J) = 0.0E0
20 CONTINUE
C
M = ML + MU + 1
JU = 0
DO 100 K = 1, N
IF (Z(K) .NE. 0.0E0) EK = SIGN(EK, -Z(K))
IF (ABS(EK-Z(K)) .GT. ABS(ABD(M,K))) THEN
S = ABS(ABD(M,K))/ABS(EK-Z(K))
CALL SSCAL(N, S, Z, 1)
EK = S*EK
ENDIF
WK = EK - Z(K)
WKM = -EK - Z(K)
S = ABS(WK)
SM = ABS(WKM)
IF (ABD(M,K) .NE. 0.0E0) THEN
WK = WK /ABD(M,K)
WKM = WKM/ABD(M,K)
ELSE
WK = 1.0E0
WKM = 1.0E0
ENDIF
KP1 = K + 1
JU = MIN0(MAX0(JU, MU+IPVT(K)), N)
MM = M
IF (KP1 .LE. JU) THEN
DO 60 J = KP1, JU
MM = MM - 1
SM = SM + ABS(Z(J)+WKM*ABD(MM,J))
Z(J) = Z(J) + WK*ABD(MM,J)
S = S + ABS(Z(J))
60 CONTINUE
IF (S .LT. SM) THEN
T = WKM - WK
WK = WKM
MM = M
DO 70 J = KP1, JU
MM = MM - 1
Z(J) = Z(J) + T*ABD(MM,J)
70 CONTINUE
ENDIF
ENDIF
Z(K) = WK
100 CONTINUE
C
S = 1.0E0 / SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
C
C ** SOLVE TRANS(L)*Y = W
DO 120 KB = 1, N
K = N + 1 - KB
LM = MIN0(ML, N-K)
IF (K .LT. N) Z(K) = Z(K) + SDOT(LM, ABD(M+1,K), 1, Z(K+1), 1)
IF (ABS(Z(K)) .GT. 1.0E0) THEN
S = 1.0E0 / ABS(Z(K))
CALL SSCAL(N, S, Z, 1)
ENDIF
L = IPVT(K)
T = Z(L)
Z(L) = Z(K)
Z(K) = T
120 CONTINUE
C
S = 1.0E0 / SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
C
YNORM = 1.0E0
C ** SOLVE L*V = Y
DO 140 K = 1, N
L = IPVT(K)
T = Z(L)
Z(L) = Z(K)
Z(K) = T
LM = MIN0(ML, N-K)
IF (K .LT. N) CALL SAXPY(LM, T, ABD(M+1,K), 1, Z(K+1), 1)
IF (ABS(Z(K)) .GT. 1.0E0) THEN
S = 1.0E0 / ABS(Z(K))
CALL SSCAL(N, S, Z, 1)
YNORM = S*YNORM
ENDIF
140 CONTINUE
C
S = 1.0E0/SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
YNORM = S*YNORM
C ** SOLVE U*Z = W
DO 160 KB = 1, N
K = N + 1 - KB
IF (ABS(Z(K)) .GT. ABS(ABD(M,K))) THEN
S = ABS(ABD(M,K)) / ABS(Z(K))
CALL SSCAL(N, S, Z, 1)
YNORM = S*YNORM
ENDIF
IF (ABD(M,K) .NE. 0.0E0) Z(K) = Z(K)/ABD(M,K)
IF (ABD(M,K) .EQ. 0.0E0) Z(K) = 1.0E0
LM = MIN0(K, M) - 1
LA = M - LM
LZ = K - LM
T = -Z(K)
CALL SAXPY(LM, T, ABD(LA,K), 1, Z(LZ), 1)
160 CONTINUE
C ** MAKE ZNORM = 1.0
S = 1.0E0 / SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
YNORM = S*YNORM
C
IF (ANORM .NE. 0.0E0) RCOND = YNORM/ANORM
IF (ANORM .EQ. 0.0E0) RCOND = 0.0E0
RETURN
END
SUBROUTINE SGBFA( ABD, LDA, N, ML, MU, IPVT, INFO )
C
C FACTORS A REAL BAND MATRIX BY ELIMINATION.
C
C REVISION DATE: 8/1/82
C AUTHOR: MOLER, C. B., (U. OF NEW MEXICO)
C
C SGBFA IS USUALLY CALLED BY SBGCO, BUT IT CAN BE CALLED
C DIRECTLY WITH A SAVING IN TIME IF RCOND IS NOT NEEDED.
C
C INPUT: SAME AS 'SGBCO'
C
C ON RETURN:
C
C ABD,IPVT SAME AS 'SGBCO'
C
C INFO INTEGER
C = 0 NORMAL VALUE.
C = K IF U(K,K) .EQ. 0.0 . THIS IS NOT AN ERROR
C CONDITION FOR THIS SUBROUTINE, BUT IT DOES
C INDICATE THAT SGBSL WILL DIVIDE BY ZERO IF
C CALLED. USE RCOND IN SBGCO FOR A RELIABLE
C INDICATION OF SINGULARITY.
C
C (SEE 'SGBCO' FOR DESCRIPTION OF BAND STORAGE MODE)
C
C ROUTINES CALLED: FROM BLAS: SAXPY, SSCAL, ISAMAX
C FROM FORTRAN: MAX0, MIN0
C
INTEGER LDA, N, ML, MU, IPVT(*), INFO
REAL ABD(LDA,*)
C
REAL T
INTEGER I,ISAMAX,I0,J,JU,JZ,J0,J1,K,KP1,L,LM,M,MM,NM1
C
C
M = ML + MU + 1
INFO = 0
C ** ZERO INITIAL FILL-IN COLUMNS
J0 = MU + 2
J1 = MIN0(N, M) - 1
DO 20 JZ = J0, J1
I0 = M + 1 - JZ
DO 10 I = I0, ML
ABD(I,JZ) = 0.0E0
10 CONTINUE
20 CONTINUE
JZ = J1
JU = 0
C
C ** GAUSSIAN ELIMINATION WITH PARTIAL PIVOTING
NM1 = N - 1
DO 120 K = 1, NM1
KP1 = K + 1
C ** ZERO NEXT FILL-IN COLUMN
JZ = JZ + 1
IF (JZ .LE. N) THEN
DO 40 I = 1, ML
ABD(I,JZ) = 0.0E0
40 CONTINUE
ENDIF
C ** FIND L = PIVOT INDEX
LM = MIN0(ML, N-K)
L = ISAMAX(LM+1, ABD(M,K), 1) + M - 1
IPVT(K) = L + K - M
C
IF (ABD(L,K) .EQ. 0.0E0) THEN
C ** ZERO PIVOT IMPLIES THIS COLUMN
C ** ALREADY TRIANGULARIZED
INFO = K
ELSE
C ** INTERCHANGE IF NECESSARY
IF (L .NE. M) THEN
T = ABD(L,K)
ABD(L,K) = ABD(M,K)
ABD(M,K) = T
ENDIF
C ** COMPUTE MULTIPLIERS
T = -1.0E0 / ABD(M,K)
CALL SSCAL(LM, T, ABD(M+1,K), 1)
C
C ** ROW ELIMINATION WITH COLUMN INDEXING
C
JU = MIN0(MAX0(JU, MU+IPVT(K)), N)
MM = M
DO 80 J = KP1, JU
L = L - 1
MM = MM - 1
T = ABD(L,J)
IF (L .NE. MM) THEN
ABD(L,J) = ABD(MM,J)
ABD(MM,J) = T
ENDIF
CALL SAXPY(LM, T, ABD(M+1,K), 1, ABD(MM+1,J), 1)
80 CONTINUE
C
ENDIF
C
120 CONTINUE
C
IPVT(N) = N
IF (ABD(M,N) .EQ. 0.0E0) INFO = N
RETURN
END
SUBROUTINE SGBSL( ABD, LDA, N, ML, MU, IPVT, B, JOB )
C
C SOLVES THE REAL BAND SYSTEM
C A * X = B OR TRANSPOSE(A) * X = B
C USING THE FACTORS COMPUTED BY SBGCO OR SGBFA.
C
C REVISION DATE: 8/1/82
C AUTHOR: MOLER, C. B., (U. OF NEW MEXICO)
C
C INPUT:
C
C ABD REAL(LDA, N)
C THE OUTPUT FROM SBGCO OR SGBFA.
C
C LDA INTEGER
C THE LEADING DIMENSION OF THE ARRAY ABD .
C
C N INTEGER
C THE ORDER OF THE ORIGINAL MATRIX.
C
C ML INTEGER
C NUMBER OF DIAGONALS BELOW THE MAIN DIAGONAL.
C
C MU INTEGER
C NUMBER OF DIAGONALS ABOVE THE MAIN DIAGONAL.
C
C IPVT INTEGER(N)
C THE PIVOT VECTOR FROM SBGCO OR SGBFA.
C
C B REAL(N)
C THE RIGHT HAND SIDE VECTOR.
C
C JOB INTEGER
C = 0 TO SOLVE A*X = B ,
C = NONZERO TO SOLVE TRANS(A)*X = B , WHERE
C TRANS(A) IS THE TRANSPOSE.
C
C ON RETURN
C
C B THE SOLUTION VECTOR X .
C
C ERROR CONDITION
C
C A DIVISION BY ZERO WILL OCCUR IF THE INPUT FACTOR CONTAINS A
C ZERO ON THE DIAGONAL. TECHNICALLY, THIS INDICATES SINGULARITY,
C BUT IT IS OFTEN CAUSED BY IMPROPER ARGUMENTS OR IMPROPER
C SETTING OF LDA . IT WILL NOT OCCUR IF THE SUBROUTINES ARE
C CALLED CORRECTLY AND IF SBGCO HAS SET RCOND .GT. 0.0
C OR SGBFA HAS SET INFO .EQ. 0 .
C
C TO COMPUTE INVERSE(A) * C WHERE C IS A MATRIX
C WITH P COLUMNS
C CALL SGBCO(ABD,LDA,N,ML,MU,IPVT,RCOND,Z)
C IF (RCOND IS TOO SMALL) GO TO ...
C DO 10 J = 1, P
C CALL SGBSL(ABD,LDA,N,ML,MU,IPVT,C(1,J),0)
C 10 CONTINUE
C
C ROUTINES CALLED: FROM BLAS: SAXPY, SDOT
C FROM FORTRAN: MIN0
C
INTEGER LDA, N, ML, MU, IPVT(*), JOB
REAL ABD(LDA,*), B(*)
C
REAL SDOT,T
INTEGER K,KB,L,LA,LB,LM,M,NM1
C
C
M = MU + ML + 1
NM1 = N - 1
IF (JOB .EQ. 0) THEN
C ** JOB = 0 , SOLVE A * X = B
C ** FIRST SOLVE L*Y = B
IF (ML .NE. 0) THEN
DO 20 K = 1, NM1
LM = MIN0(ML, N-K)
L = IPVT(K)
T = B(L)
IF (L .NE. K) THEN
B(L) = B(K)
B(K) = T
ENDIF
CALL SAXPY( LM, T, ABD(M+1,K), 1, B(K+1), 1 )
20 CONTINUE
ENDIF
C ** NOW SOLVE U*X = Y
DO 40 KB = 1, N
K = N + 1 - KB
B(K) = B(K) / ABD(M,K)
LM = MIN0(K, M) - 1
LA = M - LM
LB = K - LM
T = -B(K)
CALL SAXPY(LM, T, ABD(LA,K), 1, B(LB), 1)
40 CONTINUE
C
ELSE
C ** JOB = NONZERO, SOLVE TRANS(A) * X = B
C ** FIRST SOLVE TRANS(U)*Y = B
DO 60 K = 1, N
LM = MIN0(K, M) - 1
LA = M - LM
LB = K - LM
T = SDOT(LM, ABD(LA,K), 1, B(LB), 1)
B(K) = (B(K) - T)/ABD(M,K)
60 CONTINUE
C ** NOW SOLVE TRANS(L)*X = Y
IF (ML .NE. 0) THEN
DO 80 KB = 1, NM1
K = N - KB
LM = MIN0(ML, N-K)
B(K) = B(K) + SDOT(LM, ABD(M+1,K), 1, B(K+1), 1)
L = IPVT(K)
IF (L .NE. K) THEN
T = B(L)
B(L) = B(K)
B(K) = T
ENDIF
80 CONTINUE
ENDIF
C
ENDIF
C
RETURN
END
SUBROUTINE SGECO( A, LDA, N,IPVT, RCOND, Z )
C
C FACTORS A REAL MATRIX BY GAUSSIAN ELIMINATION
C AND ESTIMATES THE CONDITION OF THE MATRIX.
C
C REVISION DATE: 8/1/82
C AUTHOR: MOLER, C. B., (U. OF NEW MEXICO)
C
C IF RCOND IS NOT NEEDED, SGEFA IS SLIGHTLY FASTER.
C TO SOLVE A*X = B , FOLLOW SGECO BY SGESL.
C
C ON ENTRY
C
C A REAL(LDA, N)
C THE MATRIX TO BE FACTORED.
C
C LDA INTEGER
C THE LEADING DIMENSION OF THE ARRAY A .
C
C N INTEGER
C THE ORDER OF THE MATRIX A .
C
C ON RETURN
C
C A AN UPPER TRIANGULAR MATRIX AND THE MULTIPLIERS
C WHICH WERE USED TO OBTAIN IT.
C THE FACTORIZATION CAN BE WRITTEN A = L*U , WHERE
C L IS A PRODUCT OF PERMUTATION AND UNIT LOWER
C TRIANGULAR MATRICES AND U IS UPPER TRIANGULAR.
C
C IPVT INTEGER(N)
C AN INTEGER VECTOR OF PIVOT INDICES.
C
C RCOND REAL
C AN ESTIMATE OF THE RECIPROCAL CONDITION OF A .
C FOR THE SYSTEM A*X = B , RELATIVE PERTURBATIONS
C IN A AND B OF SIZE EPSILON MAY CAUSE
C RELATIVE PERTURBATIONS IN X OF SIZE EPSILON/RCOND .
C IF RCOND IS SO SMALL THAT THE LOGICAL EXPRESSION
C 1.0 + RCOND .EQ. 1.0
C IS TRUE, THEN A MAY BE SINGULAR TO WORKING
C PRECISION. IN PARTICULAR, RCOND IS ZERO IF
C EXACT SINGULARITY IS DETECTED OR THE ESTIMATE
C UNDERFLOWS.
C
C Z REAL(N)
C A WORK VECTOR WHOSE CONTENTS ARE USUALLY UNIMPORTANT.
C IF A IS CLOSE TO A SINGULAR MATRIX, THEN Z IS
C AN APPROXIMATE NULL VECTOR IN THE SENSE THAT
C NORM(A*Z) = RCOND*NORM(A)*NORM(Z) .
C
C ROUTINES CALLED: FROM LINPACK: SGEFA
C FROM BLAS: SAXPY, SDOT, SSCAL, SASUM
C FROM FORTRAN: ABS, AMAX1, SIGN
C
INTEGER LDA, N, IPVT(*)
REAL A(LDA,*), Z(*)
REAL RCOND
C
REAL SDOT,EK,T,WK,WKM
REAL ANORM,S,SASUM,SM,YNORM
INTEGER INFO,J,K,KB,KP1,L
C
C
C ** COMPUTE 1-NORM OF A
ANORM = 0.0E0
DO 10 J = 1, N
ANORM = AMAX1( ANORM, SASUM(N,A(1,J),1) )
10 CONTINUE
C ** FACTOR
CALL SGEFA(A,LDA,N,IPVT,INFO)
C
C RCOND = 1/(NORM(A)*(ESTIMATE OF NORM(INVERSE(A)))) .
C ESTIMATE = NORM(Z)/NORM(Y) WHERE A*Z = Y AND TRANS(A)*Y = E .
C TRANS(A) IS THE TRANSPOSE OF A . THE COMPONENTS OF E ARE
C CHOSEN TO CAUSE MAXIMUM LOCAL GROWTH IN THE ELEMENTS OF W WHERE
C TRANS(U)*W = E . THE VECTORS ARE FREQUENTLY RESCALED TO AVOID
C OVERFLOW.
C
C ** SOLVE TRANS(U)*W = E
EK = 1.0E0
DO 20 J = 1, N
Z(J) = 0.0E0
20 CONTINUE
C
DO 100 K = 1, N
IF (Z(K) .NE. 0.0E0) EK = SIGN(EK, -Z(K))
IF (ABS(EK-Z(K)) .GT. ABS(A(K,K))) THEN
S = ABS(A(K,K)) / ABS(EK-Z(K))
CALL SSCAL(N, S, Z, 1)
EK = S*EK
ENDIF
WK = EK - Z(K)
WKM = -EK - Z(K)
S = ABS(WK)
SM = ABS(WKM)
IF (A(K,K) .NE. 0.0E0) THEN
WK = WK / A(K,K)
WKM = WKM / A(K,K)
ELSE
WK = 1.0E0
WKM = 1.0E0
ENDIF
KP1 = K + 1
IF (KP1 .LE. N) THEN
DO 60 J = KP1, N
SM = SM + ABS(Z(J)+WKM*A(K,J))
Z(J) = Z(J) + WK*A(K,J)
S = S + ABS(Z(J))
60 CONTINUE
IF (S .LT. SM) THEN
T = WKM - WK
WK = WKM
DO 70 J = KP1, N
Z(J) = Z(J) + T*A(K,J)
70 CONTINUE
ENDIF
ENDIF
Z(K) = WK
100 CONTINUE
C
S = 1.0E0 / SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
C ** SOLVE TRANS(L)*Y = W
DO 120 KB = 1, N
K = N + 1 - KB
IF (K .LT. N) Z(K) = Z(K) + SDOT(N-K, A(K+1,K), 1, Z(K+1), 1)
IF (ABS(Z(K)) .GT. 1.0E0) THEN
S = 1.0E0/ABS(Z(K))
CALL SSCAL(N, S, Z, 1)
ENDIF
L = IPVT(K)
T = Z(L)
Z(L) = Z(K)
Z(K) = T
120 CONTINUE
C
S = 1.0E0 / SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
C ** SOLVE L*V = Y
YNORM = 1.0E0
DO 140 K = 1, N
L = IPVT(K)
T = Z(L)
Z(L) = Z(K)
Z(K) = T
IF (K .LT. N) CALL SAXPY(N-K, T, A(K+1,K), 1, Z(K+1), 1)
IF (ABS(Z(K)) .GT. 1.0E0) THEN
S = 1.0E0/ABS(Z(K))
CALL SSCAL(N, S, Z, 1)
YNORM = S*YNORM
ENDIF
140 CONTINUE
C
S = 1.0E0 / SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
C ** SOLVE U*Z = V
YNORM = S*YNORM
DO 160 KB = 1, N
K = N + 1 - KB
IF (ABS(Z(K)) .GT. ABS(A(K,K))) THEN
S = ABS(A(K,K))/ABS(Z(K))
CALL SSCAL(N, S, Z, 1)
YNORM = S*YNORM
ENDIF
IF (A(K,K) .NE. 0.0E0) Z(K) = Z(K)/A(K,K)
IF (A(K,K) .EQ. 0.0E0) Z(K) = 1.0E0
T = -Z(K)
CALL SAXPY(K-1, T, A(1,K), 1, Z(1), 1)
160 CONTINUE
C ** MAKE ZNORM = 1.0
S = 1.0E0 / SASUM(N, Z, 1)
CALL SSCAL(N, S, Z, 1)
YNORM = S*YNORM
C
IF (ANORM .NE. 0.0E0) RCOND = YNORM/ANORM
IF (ANORM .EQ. 0.0E0) RCOND = 0.0E0
RETURN
END
SUBROUTINE SGEFA( A, LDA, N, IPVT, INFO )
C
C FACTORS A REAL MATRIX BY GAUSSIAN ELIMINATION.
C
C REVISION DATE: 8/1/82
C AUTHOR: MOLER, C. B., (U. OF NEW MEXICO)
C
C SGEFA IS USUALLY CALLED BY SGECO, BUT IT CAN BE CALLED
C DIRECTLY WITH A SAVING IN TIME IF RCOND IS NOT NEEDED.
C (TIME FOR SGECO) = (1 + 9/N)*(TIME FOR SGEFA) .
C
C INPUT: SAME AS 'SGECO'
C
C ON RETURN:
C
C A,IPVT SAME AS 'SGECO'
C
C INFO INTEGER
C = 0 NORMAL VALUE.
C = K IF U(K,K) .EQ. 0.0 . THIS IS NOT AN ERROR
C CONDITION FOR THIS SUBROUTINE, BUT IT DOES
C INDICATE THAT SGESL OR SGEDI WILL DIVIDE BY ZERO
C IF CALLED. USE RCOND IN SGECO FOR A RELIABLE
C INDICATION OF SINGULARITY.
C
C ROUTINES CALLED: FROM BLAS: SAXPY, SSCAL, ISAMAX
C
INTEGER LDA, N, IPVT(*), INFO
REAL A(LDA,*)
C
REAL T
INTEGER ISAMAX,J,K,KP1,L,NM1
C
C
C ** GAUSSIAN ELIMINATION WITH PARTIAL PIVOTING
INFO = 0
NM1 = N - 1
DO 60 K = 1, NM1
KP1 = K + 1
C ** FIND L = PIVOT INDEX
L = ISAMAX( N-K+1, A(K,K), 1) + K-1
IPVT(K) = L
C
IF (A(L,K) .EQ. 0.0E0) THEN
C ** ZERO PIVOT IMPLIES THIS COLUMN
C ** ALREADY TRIANGULARIZED
INFO = K
ELSE
C ** INTERCHANGE IF NECESSARY
IF (L .NE. K) THEN
T = A(L,K)
A(L,K) = A(K,K)
A(K,K) = T
ENDIF
C ** COMPUTE MULTIPLIERS
T = -1.0E0 / A(K,K)
CALL SSCAL( N-K, T, A(K+1,K), 1 )
C
C ** ROW ELIMINATION WITH COLUMN INDEXING
DO 30 J = KP1, N
T = A(L,J)
IF (L .NE. K) THEN
A(L,J) = A(K,J)
A(K,J) = T
ENDIF
CALL SAXPY( N-K, T, A(K+1,K), 1, A(K+1,J), 1 )
30 CONTINUE
C
ENDIF
C
60 CONTINUE
C
IPVT(N) = N
IF (A(N,N) .EQ. 0.0E0) INFO = N
RETURN
END
SUBROUTINE SGESL( A, LDA, N,IPVT, B, JOB )
C
C SOLVES THE REAL SYSTEM
C A * X = B OR TRANS(A) * X = B
C USING THE FACTORS COMPUTED BY SGECO OR SGEFA.
C
C REVISION DATE: 8/1/82
C AUTHOR: MOLER, C. B., (U. OF NEW MEXICO)
C
C ON ENTRY
C
C A REAL(LDA, N)
C THE OUTPUT FROM SGECO OR SGEFA.
C
C LDA INTEGER
C THE LEADING DIMENSION OF THE ARRAY A .
C
C N INTEGER
C THE ORDER OF THE MATRIX A .
C
C IPVT INTEGER(N)
C THE PIVOT VECTOR FROM SGECO OR SGEFA.
C
C B REAL(N)
C THE RIGHT HAND SIDE VECTOR.
C
C JOB INTEGER
C = 0 TO SOLVE A*X = B ,
C = NONZERO TO SOLVE TRANS(A)*X = B WHERE
C TRANS(A) IS THE TRANSPOSE.
C
C ON RETURN
C
C B THE SOLUTION VECTOR X .
C
C ERROR CONDITION
C
C A DIVISION BY ZERO WILL OCCUR IF THE INPUT FACTOR CONTAINS A
C ZERO ON THE DIAGONAL. TECHNICALLY, THIS INDICATES SINGULARITY,
C BUT IT IS OFTEN CAUSED BY IMPROPER ARGUMENTS OR IMPROPER
C SETTING OF LDA . IT WILL NOT OCCUR IF THE SUBROUTINES ARE
C CALLED CORRECTLY AND IF SGECO HAS SET RCOND .GT. 0.0
C OR SGEFA HAS SET INFO .EQ. 0 .
C
C TO COMPUTE INVERSE(A) * C WHERE C IS A MATRIX
C WITH P COLUMNS
C CALL SGECO(A,LDA,N,IPVT,RCOND,Z)
C IF (RCOND IS TOO SMALL) GO TO ...
C DO 10 J = 1, P
C CALL SGESL(A,LDA,N,IPVT,C(1,J),0)
C 10 CONTINUE
C
C
C ROUTINES CALLED: FROM BLAS: SAXPY, SDOT
C
INTEGER LDA, N, IPVT(*), JOB
REAL A(LDA,*), B(*)
C
REAL SDOT,T
INTEGER K,KB,L,NM1
C
C
NM1 = N - 1
IF (JOB .EQ. 0) THEN
C ** JOB = 0 , SOLVE A * X = B
C ** FIRST SOLVE L*Y = B
DO 20 K = 1, NM1
L = IPVT(K)
T = B(L)
IF (L .NE. K) THEN
B(L) = B(K)
B(K) = T
ENDIF
CALL SAXPY( N-K, T, A(K+1,K), 1, B(K+1), 1 )
20 CONTINUE
C ** NOW SOLVE U*X = Y
DO 40 KB = 1, N
K = N + 1 - KB
B(K) = B(K) / A(K,K)
T = -B(K)
CALL SAXPY( K-1, T, A(1,K), 1, B(1), 1 )
40 CONTINUE
C
ELSE
C ** JOB = NONZERO, SOLVE TRANS(A) * X = B
C ** FIRST SOLVE TRANS(U)*Y = B
DO 60 K = 1, N
T = SDOT( K-1, A(1,K), 1, B(1), 1 )
B(K) = (B(K) - T) / A(K,K)
60 CONTINUE
C ** NOW SOLVE TRANS(L)*X = Y
DO 80 KB = 1, NM1
K = N - KB
B(K) = B(K) + SDOT( N-K, A(K+1,K), 1, B(K+1), 1 )
L = IPVT(K)
IF (L .NE. K) THEN
T = B(L)
B(L) = B(K)
B(K) = T
ENDIF
80 CONTINUE
C
ENDIF
C
RETURN
END
REAL FUNCTION SASUM( N, SX, INCX )
C
C --INPUT-- N NUMBER OF ELEMENTS IN VECTOR TO BE SUMMED
C SX SING-PREC ARRAY, LENGTH 1+(N-1)*INCX, CONTAINING VECTOR
C INCX SPACING OF VECTOR ELEMENTS IN 'SX'
C
C --OUTPUT-- SASUM SUM FROM 0 TO N-1 OF ABS(SX(1+I*INCX))
C
REAL SX(*)
C
C
SASUM = 0.0
IF( N.LE.0 ) RETURN
IF( INCX.NE.1 ) THEN
C ** NON-UNIT INCREMENTS
DO 10 I = 1, 1+(N-1)*INCX, INCX
SASUM = SASUM + ABS(SX(I))
10 CONTINUE
ELSE
C ** UNIT INCREMENTS
M = MOD(N,6)
IF( M.NE.0 ) THEN
C ** CLEAN-UP LOOP SO REMAINING VECTOR
C ** LENGTH IS A MULTIPLE OF 6.
DO 30 I = 1, M
SASUM = SASUM + ABS(SX(I))
30 CONTINUE
ENDIF
C ** UNROLL LOOP FOR SPEED
DO 50 I = M+1, N, 6
SASUM = SASUM + ABS(SX(I)) + ABS(SX(I+1)) + ABS(SX(I+2))
$ + ABS(SX(I+3)) + ABS(SX(I+4)) + ABS(SX(I+5))
50 CONTINUE
ENDIF
C
RETURN
END
SUBROUTINE SAXPY( N, SA, SX, INCX, SY, INCY )
C
C Y = A*X + Y (X, Y = VECTORS, A = SCALAR)
C
C --INPUT--
C N NUMBER OF ELEMENTS IN INPUT VECTORS 'X' AND 'Y'
C SA SINGLE PRECISION SCALAR MULTIPLIER 'A'
C SX SING-PREC ARRAY CONTAINING VECTOR 'X'
C INCX SPACING OF ELEMENTS OF VECTOR 'X' IN 'SX'
C SY SING-PREC ARRAY CONTAINING VECTOR 'Y'
C INCY SPACING OF ELEMENTS OF VECTOR 'Y' IN 'SY'
C
C --OUTPUT--
C SY FOR I = 0 TO N-1, OVERWRITE SY(LY+I*INCY) WITH
C SA*SX(LX+I*INCX) + SY(LY+I*INCY),
C WHERE LX = 1 IF INCX .GE. 0,
C = (-INCX)*N IF INCX .LT. 0
C AND LY IS DEFINED IN A SIMILAR WAY USING INCY.
C
REAL SX(*), SY(*), SA
C
C
IF( N.LE.0 .OR. SA.EQ.0.0 ) RETURN
C
IF ( INCX.EQ.INCY .AND. INCX.GT.1 ) THEN
C
DO 10 I = 1, 1+(N-1)*INCX, INCX
SY(I) = SY(I) + SA * SX(I)
10 CONTINUE
C
ELSE IF ( INCX.EQ.INCY .AND. INCX.EQ.1 ) THEN
C
C ** EQUAL, UNIT INCREMENTS
M = MOD(N,4)
IF( M .NE. 0 ) THEN
C ** CLEAN-UP LOOP SO REMAINING VECTOR LENGTH
C ** IS A MULTIPLE OF 4.
DO 20 I = 1, M
SY(I) = SY(I) + SA * SX(I)
20 CONTINUE
ENDIF
C ** UNROLL LOOP FOR SPEED
DO 30 I = M+1, N, 4
SY(I) = SY(I) + SA * SX(I)
SY(I+1) = SY(I+1) + SA * SX(I+1)
SY(I+2) = SY(I+2) + SA * SX(I+2)
SY(I+3) = SY(I+3) + SA * SX(I+3)
30 CONTINUE
C
ELSE
C ** NONEQUAL OR NONPOSITIVE INCREMENTS.
IX = 1
IY = 1
IF( INCX.LT.0 ) IX = 1 + (N-1)*(-INCX)
IF( INCY.LT.0 ) IY = 1 + (N-1)*(-INCY)
DO 40 I = 1, N
SY(IY) = SY(IY) + SA*SX(IX)
IX = IX + INCX
IY = IY + INCY
40 CONTINUE
C
ENDIF
C
RETURN
END
REAL FUNCTION SDOT( N, SX, INCX, SY, INCY )
C
C S.P. DOT PRODUCT OF VECTORS 'X' AND 'Y'
C
C --INPUT--
C N NUMBER OF ELEMENTS IN INPUT VECTORS 'X' AND 'Y'
C SX SING-PREC ARRAY CONTAINING VECTOR 'X'
C INCX SPACING OF ELEMENTS OF VECTOR 'X' IN 'SX'
C SY SING-PREC ARRAY CONTAINING VECTOR 'Y'
C INCY SPACING OF ELEMENTS OF VECTOR 'Y' IN 'SY'
C
C --OUTPUT--
C SDOT SUM FOR I = 0 TO N-1 OF SX(LX+I*INCX) * SY(LY+I*INCY),
C WHERE LX = 1 IF INCX .GE. 0,
C = (-INCX)*N IF INCX .LT. 0,
C AND LY IS DEFINED IN A SIMILAR WAY USING INCY.
C
REAL SX(*), SY(*)
C
C
SDOT = 0.0
IF( N.LE.0 ) RETURN
C
IF ( INCX.EQ.INCY .AND. INCX.GT.1 ) THEN
C
DO 10 I = 1, 1+(N-1)*INCX, INCX
SDOT = SDOT + SX(I) * SY(I)
10 CONTINUE
C
ELSE IF ( INCX.EQ.INCY .AND. INCX.EQ.1 ) THEN
C
C ** EQUAL, UNIT INCREMENTS
M = MOD(N,5)
IF( M .NE. 0 ) THEN
C ** CLEAN-UP LOOP SO REMAINING VECTOR LENGTH
C ** IS A MULTIPLE OF 4.
DO 20 I = 1, M
SDOT = SDOT + SX(I) * SY(I)
20 CONTINUE
ENDIF
C ** UNROLL LOOP FOR SPEED
DO 30 I = M+1, N, 5
SDOT = SDOT + SX(I)*SY(I) + SX(I+1)*SY(I+1)
$ + SX(I+2)*SY(I+2) + SX(I+3)*SY(I+3)
$ + SX(I+4)*SY(I+4)
30 CONTINUE
C
ELSE
C ** NONEQUAL OR NONPOSITIVE INCREMENTS.
IX = 1
IY = 1
IF( INCX.LT.0 ) IX = 1 + (N-1)*(-INCX)
IF( INCY.LT.0 ) IY = 1 + (N-1)*(-INCY)
DO 40 I = 1, N
SDOT = SDOT + SX(IX) * SY(IY)
IX = IX + INCX
IY = IY + INCY
40 CONTINUE
C
ENDIF
C
RETURN
END
LOGICAL FUNCTION WRTBADQ ( quiet, VARNAM )
C
C WRITE NAMES OF ERRONEOUS VARIABLES AND RETURN 'TRUE'
C
C INPUT : VARNAM = NAME OF ERRONEOUS VARIABLE TO BE WRITTEN
C ( CHARACTER, ANY LENGTH )
C ----------------------------------------------------------------------
CHARACTER*(*) VARNAM
INTEGER MAXMSG, NUMMSG
LOGICAL quiet
SAVE NUMMSG, MAXMSG
DATA NUMMSG / 0 /, MAXMSG / 50 /
C
C
WRTBADQ = .TRUE.
NUMMSG = NUMMSG + 1
WRITE ( *, '(3A)' ) ' **** INPUT VARIABLE ', VARNAM,
$ ' IN ERROR ****'
IF ( NUMMSG.EQ.MAXMSG .AND. .NOT. quiet )
$ CALL ERRMSG( 'TOO MANY INPUT ERRORS. ABORTING...$', .TRUE. )
RETURN
END
LOGICAL FUNCTION WRTDIMQ ( quiet, DIMNAM, MINVAL )
C
C WRITE NAME OF TOO-SMALL SYMBOLIC DIMENSION AND
C THE VALUE IT SHOULD BE INCREASED TO; RETURN 'TRUE'
C
C INPUT : DIMNAM = NAME OF SYMBOLIC DIMENSION WHICH IS TOO SMALL
C ( CHARACTER, ANY LENGTH )
C MINVAL = VALUE TO WHICH THAT DIMENSION SHOULD BE
C INCREASED (AT LEAST)
C ----------------------------------------------------------------------
CHARACTER*(*) DIMNAM
INTEGER MINVAL
LOGICAL quiet
C
C
WRITE ( *, '(3A,I7)' ) ' **** SYMBOLIC DIMENSION ',
$ DIMNAM, ' SHOULD BE INCREASED TO AT LEAST ', MINVAL
WRTDIMQ = .TRUE.
RETURN
END
|
{"hexsha": "730f4a62ed10d657e9d443e81b03b65f8c23cf88", "size": 32866, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ubuntu20/projects/libRadtran-2.0.4/libsrc_f/spsmisc.f", "max_stars_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_stars_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ubuntu20/projects/libRadtran-2.0.4/libsrc_f/spsmisc.f", "max_issues_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_issues_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ubuntu20/projects/libRadtran-2.0.4/libsrc_f/spsmisc.f", "max_forks_repo_name": "AmberCrafter/docker-compose_libRadtran", "max_forks_repo_head_hexsha": "0182f991db6a13e0cacb3bf9f43809e6850593e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2117758784, "max_line_length": 72, "alphanum_fraction": 0.4378384957, "num_tokens": 11444}
|
"""
Class and functions for read data.
This module includes the :class:`ReadData` class and a few utility functions for
working with BAM reads of type :class:`pysam.AlignedSegment`.
Classes
* :class:`ReadData`
Functions
* :func:`bamread_get_oq`
* :func:`bamread_get_quals`
"""
import numpy as np
from . import compare_reads
class ReadData():
"""
A class that represents some minimal information from a sequencing read.
Since this won't hold as much data as a BAM read, if you want
to recalibrate a read you should turn it into a ReadData object,
then update the original read with the new information; otherwise
you'll lose annotations. This class also manages read group information.
You should instantiate the object from one of the class methods instead
of directly instantiating it if possible. You should never assign
directly to any of the class attributes; treat them as read-only.
Instance variables should be fine to manipulate.
Class Attributes
* :attr:`rg_to_pu` - Dict of read group id's -> platform unit
* :attr:`rg_to_int` - Dict of read group id's -> int
* :attr:`numrgs` - int number of read groups
Instance Attributes
* :attr:`seq` - Numpy array of characters representing the sequence
* :attr:`qual` - Numpy array of int representing the quality score
* :attr:`skips` - Numpy array of bools representing sites to skip
* :attr:`name` - string representing the name of the read
* :attr:`rg` - int representing the read group the read belongs to
* :attr:`second` - bool representing whether the read is 2nd in pair
* :attr:`errors` - Numpy array of bools representing whether the base is an error
Class Methods
* :meth:`from_bamread` - instantiate a ReadData object from a BAM read
* :meth:`from_fastq` - instantiate a ReadData object from a fastq read
* :meth:`load_rgs_from_bamfile` - load read groups into the class from a bam file object
Instance Methods
* :meth:`str_qual` - Get the quality score as a list of chars
* :meth:`canonical_name` - Return the name with a '/1' or '/2' suffix
* :meth:`get_rg_int` - Get the read group index using the rg_to_int dictionary
* :meth:`get_pu` - Get the PU from the read group and rg_to_pu dictionary.
* :meth:`not_skipped_errors` - Return a logical and of ~:attr:`skips` and :attr:`errors`
Note that from https://docs.python.org/3/library/stdtypes.html#dict , iteration
order is guaranteed to be in insertion order. Thus we are OK saving the rg as an
int on the fly so long as we don't remove any read groups from the rg_to_pu dictionary.
So don't do that! In fact, we should probably remove __del__() and pop() from the dicts...
"""
rg_to_pu = dict()
"""Dict mapping RG ids to the PU tag for the read group."""
rg_to_int = dict()
"""Dict mapping RG ids to integer indices."""
numrgs = 0
"""Number of readgroups encountered so far."""
def __init__(self, seq, qual, skips, name, rg, second, errors):
self.seq = seq
""":class:`numpy.ndarray` of characters representing the sequence"""
self.qual = qual
""":class:`numpy.ndarray` of int representing the quality score"""
self.skips = skips
""":class:`numpy.ndarray` of bools representing sites to skip"""
self.name = name
"""string representing the name of the read"""
self.rg = rg
"""int representing the read group the read belongs to"""
if rg not in ReadData.rg_to_pu:
#if it hasn't been preloaded,
#we create a new PU identical to the rg
#and load it
self.__class__.rg_to_pu[rg] = rg
self.__class__.rg_to_int[rg] = ReadData.numrgs
self.__class__.numrgs = ReadData.numrgs + 1
self.second = second
"""bool representing whether the read is 2nd in pair"""
self.errors = errors
""":class:`numpy.ndarray` of bools representing whether the base is an error"""
@classmethod
def from_bamread(cls, bamread, use_oq = False):
"""
ReadData factory that instantiates a ReadData object from a pysam AlignedSegment.
Skips and errors are initialized to be empty bool arrays. If you need to use
these attributes, make sure you set them the way you need to for your use case.
If :code:`use_oq` is :code:`True`, use the OQ tag for base quality.
The default is to use regular quality scores.
If the read group hasn't been loaded in the dictionary, it will be registered
with a PU equal to the value of :code:`rg`. To load the dictionary, call
:meth:`load_rgs_from_bamfile` first. If there is no RG tag for the read it will
be given a generic RG of None and lumped in with all other reads that have no RG
tag.
This will reverse-complement the sequence and reverse the qualities if the read
aligns on the reverse strand.
:param bamread: read to get data from
:type bamread: :class:`pysam.AlignedSegment`
:param bool use_oq: use the OQ tag for quality scores
:return: a read object
:rtype: :class:`ReadData`
"""
seq = np.array(list(bamread.query_sequence), dtype = np.unicode)
seqlen = len(seq)
qual = bamread_get_quals(bamread, use_oq)
if bamread.is_reverse:
seq = compare_reads.Dinucleotide.veccomplement(np.flip(seq),'N')
qual = np.flip(qual)
rg = bamread.get_tag('RG') if bamread.has_tag('RG') else None
return cls(
seq = seq,
qual = qual,
skips = np.zeros(seqlen, dtype = np.bool),
name = bamread.query_name,
rg = rg,
second = bamread.is_read2,
errors = np.zeros(seqlen, dtype = np.bool),
)
@classmethod
def from_fastq(cls, fastqread, rg = None, second = None, namedelimiter = '_'):
"""
ReadData factory that instantiates a ReadData object from a pysam FastqProxy.
Skips and errors are initialized to be empty bool arrays. If you need to use them,
make sure you set them the way you need to for your use case. The read name will
be set to the first field of the delimited read name, minus any trailing :code:`/1` or :code:`/2`
if they exist.
If :code:`rg` is None (the default), we will attempt to infer the read group id from the
read name. To infer rg, the read group must be
in its own field with the field beginning with :code:`RG:`, such as :code:`RG:example`,
with fields delimited by :code:`namedelimiter`. When multiple fields are found that
begin with :code:`RG:`, the last is chosen to be the true ID. If inference fails,
the read group will remain None.
If the read group (either inferred or provided explicitly) hasn't been
loaded in the dictionary, it will be registered with a PU equal to the value of :code:`rg`.
If second is None (the default) we will attempt to infer if the read
is second in pair based on the name.
To infer second, the first field of the read name must end with :code:`/2`.
If the last 2 characters of the first field are not :code:`/2`, second will be inferred to be false.
:param fastqread: The fastq read to get data from
:type fastqread: :class:`pysam.FastqProxy`
:param str rg: the read group the read belongs to
:param bool second: whether the read is 2nd in pair
:param str namedelimiter: the delimiter for parsing the read name
:return: a read object
:rtype: :class:`ReadData`
"""
seq = np.array(list(fastqread.sequence), dtype = np.unicode)
seqlen = len(seq)
splitname = fastqread.name.split(sep = namedelimiter)
if rg is None:
possible_rgs = [f.split(':')[-1] for f in splitname if f[0:3] == 'RG:']
if possible_rgs:
rg = possible_rgs[-1]
if second is None:
second = (splitname[0][-2:] == '/2')
if splitname[0].endswith(('/1','/2')):
splitname[0] = splitname[0][:-2]
return cls(
seq = seq,
qual = np.array(fastqread.get_quality_array(), dtype = np.int),
skips = np.zeros(seqlen, dtype = np.bool),
name = splitname[0],
rg = rg,
second = second,
errors = np.zeros(seqlen, dtype = np.bool),
)
@classmethod
def load_rgs_from_bamfile(cls, bamfileobj):
"""
Load read group IDs and PUs from the header of the
pysam bamfileobj.
Recalibration is done on a PU basis, so when building
a model using reads in a BAM, the PU for each read is
obtained by looking up the PU associated with its read
group. The dictionary that controls these lookups is a
class variable. To store the RG data in an int form, an
rg-to-int dict is also loaded. This should be equal to
:code:`dict(zip(rg_to_pu, range(len(rg_to_pu))))`
:param bamfileobj: The opened bam file
:type bamfileobj: :class:`pysam.AlignmentFile`
"""
for rg in bamfileobj.header.as_dict()['RG']:
cls.rg_to_pu[rg['ID']] = rg['PU']
cls.rg_to_int[rg['ID']] = cls.numrgs
cls.numrgs = cls.numrgs + 1
def str_qual(self, offset = 33):
"""
Get the quality of this read as a list of characters.
offset (default 33) will be added to the ASCII value
of each character.
:param int offset: Offset to add
:return: read quality
:rtype: list(chr)
"""
return list((self.qual + offset).astype(np.uint32).view('U1'))
def canonical_name(self):
"""
The name with an added suffix based on whether the read
is firstinpair or not.
If the read has its second flag set to True, :code:`/2` is added
to the end. Otherwise, :code:`/1` is added.
:return: read name with a suffix
:rtype: str
"""
suffix = ("/2" if self.second else "/1")
return self.name + suffix
def get_rg_int(self):
"""
Return the RG as an int suitable for indexing rather
than as the actual string RG ID.
:return: read group index
:rtype: int
"""
return self.__class__.rg_to_int[self.rg]
def get_pu(self):
"""
Return the PU from the rg_to_pu dict.
:return: The Read Group's PU tag
:rtype: str
"""
return self.__class__.rg_to_pu[self.rg]
def not_skipped_errors(self):
"""
Return a logical and of not :attr:`skips` and :attr:`errors`
:return: array of valid errors
:rtype: :class:`numpy.ndarray` (bool)
"""
return np.logical_and(self.errors, ~self.skips)
def get_rg_errors(self):
"""
Return an array of rg values that were errors and all valid rg values.
The errors are always a subset of the valid sites.
For example, a read with 4 non skipped sites and 1 error
will return :code:`([rg], [rg, rg, rg, rg])`
:return: errors and valid rg values
:rtype: tuple(:class:`numpy.ndarray` (int) , :class:`numpy.ndarray` (int))
"""
rg = self.get_rg_int()
rg = np.broadcast_to(rg, len(self))
return rg[self.not_skipped_errors()], rg[~self.skips]
def get_q_errors(self):
"""
Return an array of q values that were errors and of all valid q values.
The errors are always a subset of the valid values.
:return: erroneous and valid q values
:rtype: tuple(:class:`numpy.ndarray` (int) , :class:`numpy.ndarray` (int))
"""
qe = self.qual[self.not_skipped_errors()]
qv = self.qual[~self.skips]
return qe, qv
def get_cycle_array(self):
"""
Return an array of cycle values.
This is :code:`range(len(seq))` if second is false,
otherwise it is :code:`-1..-len(seq)` inclusive.
This way we can hold cycle values as an array of
size 2 * seqlen and store negative values at the end
of the array.
:return: cycle values
:rtype: :class:`numpy.ndarray` (int)
"""
cycle = np.arange(len(self))
if self.second:
cycle = np.negative(cycle + 1)
return cycle
def get_cycle_errors(self):
"""
Return an array of cycle values that were errors and of all valid cycle values.
The errors are always a subset of the valid values.
:return: erroneous and valid cycle values
:rtype: tuple(:class:`numpy.ndarray` (int) , :class:`numpy.ndarray` (int))
"""
cycle = self.get_cycle_array()
ce = cycle[self.not_skipped_errors()]
cv = cycle[~self.skips]
return ce, cv
def get_dinucleotide_array(self, minscore = 6):
"""
Return an array of dinucleotide values.
The character to int map is stored in :class:`kbbq.compare_reads.Dinucleotide`.
:return: dinucleotide values
:rtype: :class:`numpy.ndarray` (int)
"""
dinuc = np.char.add(self.seq[:-1], self.seq[1:])
dinuccov = np.zeros(len(self), dtype = np.int)
dinuccov[0] = -1
is_n = (self.seq[1:] == 'N')
follows_n = (self.seq[:-1] == 'N')
invalid = np.logical_or(self.qual[1:] < minscore, np.logical_or(is_n, follows_n))
dinuccov[1:][invalid] = -1
dinuccov[1:][~invalid] = compare_reads.Dinucleotide.vecget(dinuc[~invalid])
return dinuccov
def get_dinuc_errors(self, minscore = 6):
"""
Return an array of dinucleotide values that were errors and of all valid dinucleotide values.
The errors are always a subset of the valid values.
:return: erroneous and valid dinucleotide values
:rtype: tuple(:class:`numpy.ndarray` (int) , :class:`numpy.ndarray` (int))
"""
dinuc = self.get_dinucleotide_array(minscore)
dvalid = np.logical_and(dinuc != -1, ~self.skips)
dvalid_and_error = np.logical_and(dvalid, self.errors)
de = dinuc[dvalid_and_error]
dv = dinuc[dvalid]
return de, dv
def __len__(self):
"""
Return sequence length.
:return: sequence length
:rtype: int
"""
return len(self.seq)
def bamread_get_oq(read, offset = 33):
"""
Get the OQ of the given bamread as an array of int.
offset (default 33) will be subtracted from the ASCII value
of each character.
:param read: Read to get quals from
:type read: :class:`pysam.AlignedSegment`
:param int offset: Offset to subtract
:return: quality scores
:rtype: :class:`numpy.ndarray` of int
"""
oq = np.array(list(read.get_tag('OQ')), dtype = np.unicode)
quals = np.array(oq.view(np.uint32) - offset, dtype = np.uint32)
return quals
def bamread_get_quals(read, use_oq = False):
"""
Return the qualities of a pysam bam read as an array.
If use_oq = True, use the OQ tag for base quality.
The default is to use regular quality scores.
:param read: Read to get quals from
:type read: :class:`pysam.AlignedSegment`
:param bool use_oq: Use OQ tag for quality scores
:return: quality scores
:rtype: :py:class:`numpy.ndarray` of int
"""
if use_oq:
return bamread_get_oq(read)
else:
return np.array(read.query_qualities, dtype = np.int)
|
{"hexsha": "463c83683ee9cd06156fb97970414cc4d41b25ac", "size": 15817, "ext": "py", "lang": "Python", "max_stars_repo_path": "kbbq/read.py", "max_stars_repo_name": "adamjorr/kbbq-py", "max_stars_repo_head_hexsha": "a1b6049458ec03d305c4f4148aad325a3867d627", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kbbq/read.py", "max_issues_repo_name": "adamjorr/kbbq-py", "max_issues_repo_head_hexsha": "a1b6049458ec03d305c4f4148aad325a3867d627", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-11-09T20:21:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-02T00:07:35.000Z", "max_forks_repo_path": "kbbq/read.py", "max_forks_repo_name": "adamjorr/kbbq-py", "max_forks_repo_head_hexsha": "a1b6049458ec03d305c4f4148aad325a3867d627", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-01T01:53:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-01T01:53:33.000Z", "avg_line_length": 38.0216346154, "max_line_length": 108, "alphanum_fraction": 0.6212935449, "include": true, "reason": "import numpy", "num_tokens": 3977}
|
%!TEX root = ../main.tex
\subsection{Anti-Derivative}
\objective{Distinguish and find anti-derivatives and integrals of functions}
Suppose we are given a formula and are told it is the derivative of what we want.
This isn't as abstract as it sounds: velocity is the derivative of position, and (at least in
many cars) it is easier to record velocity than it is position. If the velocity
function is an algebraic equation, we simply need to apply the Power Rule
in reverse and we will have the anti-derivative of the equation.
The Power Rule states that the derivative of $x^n$ is $n\cdot x^{n-1}$. In other words,
``take the exponent out front, and lower the exponent by one''. If we wanted to
turn this backwards, in order to arrive at an exponent of $x^n$, we must have
begun at $x^{n+1}$. However, if we take the derivative of $x^{n+1}$, we must
multiply by $n+1$. To cancel that, we should multiply by $\frac{1}{n+1}$.
\begin{derivation}{Backwards Power Rule}\index{Power Rule!Backwards}
The anti-derivative of $x^n$ is $\frac{1}{n+1} x^{n+1} + C$, where $C$ is an unknown
constant.
\end{derivation}
What is C? Consider whether of not $x^2 +1$ is the anti-derivative of $\frac{1}{2}x$.
Is $x^2-1$? Is $x^2+\pi$? Because constants differentiate to 0, a constant could be
part of our anti-derivative equation and we cannot know what it is, without more information.
What information? Well, if we know the initial conditions (when $x=0$) then we can
solve for $C$ and know precisely which anti-derivative equation we want.
\subsection{Integral}\index{integral!definite}
The preceding definition of anti-derivatives is very helpful algebraically, but what about
graphically? If a given graph is the derivative of what we seek, can we interpret the
graph to give us numerical information?
Consider the graph of a car with constant velocity:
\begin{figure}[h]
\begin{centering}
\begin{tikzpicture}[scale=0.6]
\draw[help lines] (0,0) grid (8,6);
\draw (-0.5,0) -- (8.5,0);
\draw (0,-0.5) -- (0,6.5);
\draw[thick,->] (0,4) node[anchor=east] {40 mph}-- (8.5,4);
\draw (8,0) node[anchor=north] {0.8 hr};
\draw [fill=gray!80,opacity =0.4] (0,0) rectangle (8,4);
\end{tikzpicture}
\caption{A car's velocity in 10's of mph, over tenths of an hour}
\end{centering}
\end{figure}
If we want position or distance, we have known a formula for a long time:
distance = rate $\cdot$ time. The $y$-value is the rate. The $x$-value is the
time. As hard as it may be to conceive of, distance is the \emph{area} under the
graph. In this case, 40 mph times 0.8 hours is 32 miles. Notice that this number does
not depend upon the initial position: the car has travelled 32 positive miles, regardless
of where it began.
\begin{equation}
\int_0^{0.8} 40dx = \left.40x \right|_0^{0.8} = 40(.8) - 40(0) = 32
\end{equation}
What about more complicated velocity? Let us begin with constant acceleration:
\begin{figure}[h]
\begin{centering}
\begin{tikzpicture}[scale=0.7]
\draw[help lines] (0,-2) grid (6,6);
\draw (-0.5,0) -- (6.5,0);
\draw (0,-2.5) -- (0,6.5);
\draw [thick,->] (0,-2) -- (6.5,4.5);
\draw [fill=red!80,opacity=0.4] (0,-2) -- (2,0) -- (0,0);
\draw [fill=gray!80,opacity=0.4] (2,0) -- (6,0) -- (6,4);
\end{tikzpicture}
\caption{A car beginning at -20 mph but steadily accelerating to 40 mph by 0.6 hours later}
\end{centering}
\end{figure}
The object begins with a negative velocity, so we must count that distance as negative.
-2 + 8 = 6, so the object has gained that many units in positive displacement from 0 to 6.
In some problems, we can simply count the squares below the graph and find the
definite integral. In most cases, the graph will be curved and we will need to find an
anti-derivative equation and subtract the evaluation at the left from that of the right.
Finally, notice that we can integrate functions we cannot differentiate, at times. It would
be impossible for a physical object to have a velocity graph like the \texttt{int()} function,
but it can still be meaningful to find the area under the graph.
\begin{example}{Bandwidth Rates}
\exProblem
Suppose an wifi hotspot charges start at 2.50 per hour when you begin, and the rate goes
up .50 every 20 minutes after that. The rate is not incremented
continuously, but jumps every 1/5 hour. Illustrate the cost of using the service for 0.9 hours
as a definite integral.
\exSolution
\begin{tikzpicture}[xscale=4,yscale=1]
\draw (-0.05,0) -- (1,0) ;
\draw (0,-.2) -- (0,5.0);
\draw[thick] (0,2.50) -- (.2,2.5) -- (.2,3) -- (.4,3) -- (.4,3.5) -- (.6,3.5) -- (.6,4) -- (.8,4) -- (.8,4.5) -- (1,4.5);
\draw [fill=gray!80,opacity=0.4] (0,0) rectangle (.2,2.5);
\draw [fill=gray!80,opacity=0.4] (.2,0) rectangle (.4,3);
\draw [fill=gray!80,opacity=0.4] (.4,0) rectangle (.6,3.5);
\draw [fill=gray!80,opacity=0.4] (.6,0) rectangle (.8,4);
\draw [fill=gray!80,opacity=0.4] (.8,0) rectangle (.9,4.5);
\end{tikzpicture}
As the rectangle illustrate, $(0.2)(2.5) + (0.2)(3.0) + (0.2)(3.5) + (0.2)(4.0) + (0.1)(4.5) = 3.05$.
\end{example}
|
{"hexsha": "c7fe20a3723f4e1006092e58d76d9e657e4287fc", "size": 5056, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ch05/0505.tex", "max_stars_repo_name": "aquatiki/AnalysisTextbook", "max_stars_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-10-08T15:05:17.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-07T12:32:53.000Z", "max_issues_repo_path": "ch05/0505.tex", "max_issues_repo_name": "aquatiki/AnalysisTextbook", "max_issues_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ch05/0505.tex", "max_forks_repo_name": "aquatiki/AnalysisTextbook", "max_forks_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9652173913, "max_line_length": 122, "alphanum_fraction": 0.6946202532, "num_tokens": 1706}
|
import numpy as np
class Naive_Bayes(object):
def __init__(self, type = "Gaussian", prior = []):
self.type = type
self.prior = prior
def fit(self, X, y):
if((self.type).lower() == "multinomial"):
count_sample = X.shape[0]
separated = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
if len(self.prior)==0:
self.class_log_prior_ = [np.log(len(i) / count_sample) for i in separated]
else:
self.class_log_prior_ = self.prior
count = np.array([np.array(i).sum(axis=0) for i in separated]) + 1.0
self.feature_log_prob_ = np.log(count / count.sum(axis=1)[np.newaxis].T)
return self
if((self.type).lower() == "gaussian"):
separated = [[x for x, t in zip(X, y) if t == c] for c in np.unique(y)]
self.model = np.array([np.c_[np.mean(i, axis=0), np.std(i, axis=0)]
for i in separated])
return self
def _prob(self, x, mean, std):
if((self.type).lower() == "gaussian"):
exponent = np.exp(- ((x - mean)**2 / (2 * std**2)))
return np.log(exponent / (np.sqrt(2 * np.pi) * std))
def predict_log_proba(self, X):
if((self.type).lower() == "multinomial"):
return [(self.feature_log_prob_ * x).sum(axis=1) + self.class_log_prior_
for x in X]
if((self.type).lower() == "gaussian"):
return [[sum(self._prob(i, *s) for s, i in zip(summaries, x))
for summaries in self.model] for x in X]
def predict(self, X):
return np.argmax(self.predict_log_proba(X), axis=1)
def score(self, X, y):
return sum(self.predict(X) == y) / len(y)
|
{"hexsha": "cc4fbf4c1d6b3a978ca7b691b620efad067ea8e2", "size": 1509, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/naive_bayes.py", "max_stars_repo_name": "arkilpatel/Raw-ML-Classifiers", "max_stars_repo_head_hexsha": "9e6122cc9cf53ee6048e2269aa2b1fad19499a5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-04-09T11:19:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T09:45:44.000Z", "max_issues_repo_path": "src/naive_bayes.py", "max_issues_repo_name": "arkilpatel/Raw-ML-Classifiers", "max_issues_repo_head_hexsha": "9e6122cc9cf53ee6048e2269aa2b1fad19499a5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/naive_bayes.py", "max_forks_repo_name": "arkilpatel/Raw-ML-Classifiers", "max_forks_repo_head_hexsha": "9e6122cc9cf53ee6048e2269aa2b1fad19499a5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9285714286, "max_line_length": 78, "alphanum_fraction": 0.6235917826, "include": true, "reason": "import numpy", "num_tokens": 480}
|
function ackley(x, a=20, b=0.2, c=2π)
d = length(x)
return -a*exp(-b*sqrt(sum(x.^2)/d)) -
exp(sum(cos.(c*xi) for xi in x)/d) + a +
exp(1)
end
|
{"hexsha": "1413d5d2a81933adb8c585de6ea83191d3563d6c", "size": 166, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/problems/Ackley.jl", "max_stars_repo_name": "xh4/MOEA", "max_stars_repo_head_hexsha": "0953e9b4aa8aa1a0ceabc30b481eb954e1920621", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/problems/Ackley.jl", "max_issues_repo_name": "xh4/MOEA", "max_issues_repo_head_hexsha": "0953e9b4aa8aa1a0ceabc30b481eb954e1920621", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/problems/Ackley.jl", "max_forks_repo_name": "xh4/MOEA", "max_forks_repo_head_hexsha": "0953e9b4aa8aa1a0ceabc30b481eb954e1920621", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7142857143, "max_line_length": 48, "alphanum_fraction": 0.4939759036, "num_tokens": 70}
|
import unittest
import neuralnetsim
import numpy as np
class TestExponentialSchedule(unittest.TestCase):
def test_initial_t(self):
cooler = neuralnetsim.ExponentialCoolingSchedule(1.0, 1.0, 0)
self.assertAlmostEqual(1.0, cooler.step())
def test_step(self):
cooler = neuralnetsim.ExponentialCoolingSchedule(1.0, 0.9, 0)
cooler.step()
self.assertAlmostEqual(cooler.step(), 0.90)
self.assertAlmostEqual(cooler.step(), 0.81)
def test_test_final_t(self):
cooler = neuralnetsim.ExponentialCoolingSchedule(1.0, 0.9, 0)
for i in range(1000):
cooler.step()
self.assertAlmostEqual(cooler.t, 0.0)
def test_start(self):
cooler = neuralnetsim.ExponentialCoolingSchedule(1.0, 0.9, 50)
for i in range(51):
cooler.step()
self.assertAlmostEqual(cooler.t, 1.0)
self.assertAlmostEqual(cooler.step(), 0.90)
self.assertAlmostEqual(cooler.step(), 0.81)
def test_stop(self):
cooler = neuralnetsim.ExponentialCoolingSchedule(1.0, 0.9, 0, 50)
for i in range(50):
cooler.step()
test_t = cooler.t
for i in range(50):
self.assertAlmostEqual(cooler.step(), test_t)
class TestAdaptiveSchedule(unittest.TestCase):
def test_initial_t(self):
cooler = neuralnetsim.AdaptiveCoolingSchedule(1.0, 2.0, 10, 0.1, 5,
0, 50, 0.0)
self.assertAlmostEqual(1.0, cooler.step(0.0))
def test_step(self):
cooler = neuralnetsim.AdaptiveCoolingSchedule(1.0, 2.0, 2, 0.1, 2,
0, 50, 0.0)
self.assertAlmostEqual(cooler.step(0.0), 1.0)
self.assertAlmostEqual(cooler.step(0.0), 1.0)
self.assertAlmostEqual(cooler.thistory[0], 1.0)
self.assertAlmostEqual(cooler.thistory[1], 1.0)
self.assertAlmostEqual(cooler.ehistory[0], 0.0)
self.assertAlmostEqual(cooler.ehistory[1], 0.0)
self.assertAlmostEqual(cooler.thistory[0], 1.0)
cooler.step(1.0)
self.assertAlmostEqual(cooler.ehistory[0], 0.0)
self.assertAlmostEqual(cooler.ehistory[1], 1.0)
t = cooler.step(1.0)
self.assertAlmostEqual(cooler.thistory[1], t)
def test_test_final_t(self):
cooler = neuralnetsim.AdaptiveCoolingSchedule(1.0, 2.0, 10, 1.0, 10,
tmin=0.01)
rng = np.random.RandomState(15135)
energies = rng.normal(size=100)
for e in energies:
self.assertGreaterEqual(cooler.step(e), 0.01)
def test_stop(self):
cooler = neuralnetsim.AdaptiveCoolingSchedule(1.0, 2.0, 10, 1.0, 10, 0,
stop=25)
rng = np.random.RandomState(15135)
energies = rng.normal(size=100)
for i in range(25):
cooler.step(energies[i])
test_t = cooler.t
for i in range(75):
self.assertAlmostEqual(cooler.step(energies[i]), test_t)
|
{"hexsha": "a1909ece320a6095e640e18a4d03d243ef2a3e91", "size": 3088, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_cooling.py", "max_stars_repo_name": "Nathaniel-Rodriguez/neuralnetsim", "max_stars_repo_head_hexsha": "c353af92fb3f44539370220963b07bdfd9822149", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_cooling.py", "max_issues_repo_name": "Nathaniel-Rodriguez/neuralnetsim", "max_issues_repo_head_hexsha": "c353af92fb3f44539370220963b07bdfd9822149", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_cooling.py", "max_forks_repo_name": "Nathaniel-Rodriguez/neuralnetsim", "max_forks_repo_head_hexsha": "c353af92fb3f44539370220963b07bdfd9822149", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6, "max_line_length": 79, "alphanum_fraction": 0.5952072539, "include": true, "reason": "import numpy", "num_tokens": 817}
|
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
import os
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
from lightweight_mmm import lightweight_mmm
from lightweight_mmm import utils
class UtilsTest(parameterized.TestCase):
def test_save_model_file_is_correctly_saved(self):
media = jnp.ones((20, 2), dtype=jnp.float32)
extra_features = jnp.arange(20).reshape((20, 1))
costs = jnp.arange(1, 3)
target = jnp.arange(1, 21)
mmm_object = lightweight_mmm.LightweightMMM()
mmm_object.fit(
media=media,
extra_features=extra_features,
total_costs=costs,
target=target,
number_warmup=10,
number_samples=100,
number_chains=1)
file_path = os.path.join(self.create_tempdir().full_path, "model.pkl")
utils.save_model(media_mix_model=mmm_object,
file_path=file_path)
self.assertTrue(os.path.exists(file_path))
def test_load_model_with_all_attributes(self):
media = jnp.ones((20, 2), dtype=jnp.float32)
extra_features = jnp.arange(20).reshape((20, 1))
costs = jnp.arange(1, 3)
target = jnp.arange(1, 21)
mmm_object = lightweight_mmm.LightweightMMM()
mmm_object.fit(
media=media,
extra_features=extra_features,
total_costs=costs,
target=target,
number_warmup=10,
number_samples=100,
number_chains=1)
file_path = os.path.join(self.create_tempdir().full_path, "model.pkl")
utils.save_model(media_mix_model=mmm_object,
file_path=file_path)
loaded_mmm = utils.load_model(file_path)
self.assertEqual(mmm_object, loaded_mmm)
@parameterized.named_parameters([
dict(
testcase_name="shape_100_3_3",
data_size=100,
n_media_channels=3,
n_extra_features=3),
dict(
testcase_name="shape_200_8_1",
data_size=200,
n_media_channels=8,
n_extra_features=1),
dict(
testcase_name="shape_300_2_2",
data_size=300,
n_media_channels=2,
n_extra_features=2),
dict(
testcase_name="shape_400_4_10",
data_size=400,
n_media_channels=4,
n_extra_features=10)
])
def test_simulate_dummy_data_produces_correct_shape(self,
data_size,
n_media_channels,
n_extra_features):
media_data, extra_features, target, costs = utils.simulate_dummy_data(
data_size=data_size,
n_media_channels=n_media_channels,
n_extra_features=n_extra_features)
self.assertEqual(media_data.shape, (data_size, n_media_channels))
self.assertEqual(extra_features.shape, (data_size, n_extra_features))
self.assertEqual(target.shape, (data_size,))
self.assertLen(costs, n_media_channels)
@parameterized.named_parameters([
dict(
testcase_name="shape_0_3_3",
data_size=0,
n_media_channels=3,
n_extra_features=3),
dict(
testcase_name="shape_200_-1_1",
data_size=200,
n_media_channels=-1,
n_extra_features=1),
dict(
testcase_name="shape_300_2_-2",
data_size=300,
n_media_channels=2,
n_extra_features=-2),
dict(
testcase_name="shape_-400_-4_-10",
data_size=-400,
n_media_channels=-4,
n_extra_features=-10)
])
def test_simulate_dummy_data_with_zero_or_neg_parameter_raises_value_error(
self, data_size, n_media_channels, n_extra_features):
with self.assertRaises(ValueError):
utils.simulate_dummy_data(
data_size=data_size,
n_media_channels=n_media_channels,
n_extra_features=n_extra_features)
def test_simulate_geo_data_has_right_shape(self):
data_size = 100
geos = 3
media_data, _, target, _ = utils.simulate_dummy_data(
data_size, 2, 2, geos=geos)
self.assertEqual(target.shape, (data_size, geos))
self.assertEqual(media_data.shape, (data_size, 2, geos))
def test_halfnormal_mean_and_scale(self):
mean = 1.
scale = utils.get_halfnormal_scale_from_mean(mean)
new_mean = utils.get_halfnormal_mean_from_scale(scale)
self.assertEqual(scale, mean * np.sqrt(np.pi) / np.sqrt(2))
self.assertEqual(mean, new_mean)
def test_beta_params_match(self):
a, b = 2., 3.
# Expected mean is 2 / 5.
mu = a / (a + b)
sigma = np.sqrt(a * b / ((a + b) ** 2 * (a + b + 1)))
ahat, bhat = utils.get_beta_params_from_mu_sigma(mu, sigma)
self.assertAlmostEqual(ahat / (ahat + bhat), 2 / 5)
def test_prior_posterior_distance_discrete(self):
p = jnp.array([0] * 2 + [1] * 3)
q = jnp.array([0] * 3 + [1] * 2 + [2] * 1)
ks = utils.distance_pior_posterior(p, q, method="KS", discrete=True)
js = utils.distance_pior_posterior(p, q, method="JS", discrete=True)
hell = utils.distance_pior_posterior(
p, q, method="Hellinger", discrete=True)
mindist = utils.distance_pior_posterior(p, q, method="min", discrete=True)
print(ks, js, hell, mindist)
self.assertAlmostEqual(ks, 1 / 6)
self.assertAlmostEqual(js, 0.283, 3)
self.assertAlmostEqual(hell, 0.325, 3)
self.assertAlmostEqual(mindist, 0.267, 3)
def test_prior_posterior_distance_continuous(self):
p = jnp.array([0] * 2 + [.5] * 3 + [1] * 2)
q = jnp.array([0] * 2 + [.5] * 4 + [1] * 2 + [1.5] * 3)
ks = utils.distance_pior_posterior(p, q, method="KS", discrete=False)
js = utils.distance_pior_posterior(p, q, method="JS", discrete=False)
hell = utils.distance_pior_posterior(
p, q, method="Hellinger", discrete=False)
mindist = utils.distance_pior_posterior(p, q, method="min", discrete=False)
print(ks, js, hell, mindist)
self.assertAlmostEqual(ks, 0.2727, 4)
self.assertAlmostEqual(js, 0.034, 3)
self.assertAlmostEqual(hell, 0.034, 3)
self.assertAlmostEqual(mindist, 0.041, 3)
if __name__ == "__main__":
absltest.main()
|
{"hexsha": "54316926f763fb133581e4550870a1861130c9b3", "size": 6745, "ext": "py", "lang": "Python", "max_stars_repo_path": "lightweight_mmm/utils_test.py", "max_stars_repo_name": "juanitorduz/lightweight_mmm", "max_stars_repo_head_hexsha": "17a4c7fcd860902084ab04f6e58afefba6de5f22", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lightweight_mmm/utils_test.py", "max_issues_repo_name": "juanitorduz/lightweight_mmm", "max_issues_repo_head_hexsha": "17a4c7fcd860902084ab04f6e58afefba6de5f22", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lightweight_mmm/utils_test.py", "max_forks_repo_name": "juanitorduz/lightweight_mmm", "max_forks_repo_head_hexsha": "17a4c7fcd860902084ab04f6e58afefba6de5f22", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7680412371, "max_line_length": 79, "alphanum_fraction": 0.6547071905, "include": true, "reason": "import numpy,import jax", "num_tokens": 1739}
|
#ifndef SUPERGENIUS_PRODUCTION_IMPL_HPP
#define SUPERGENIUS_PRODUCTION_IMPL_HPP
#include "verification/production.hpp"
#include <memory>
#include <boost/asio/basic_waitable_timer.hpp>
#include <outcome/outcome.hpp>
#include "application/app_state_manager.hpp"
#include "authorship/proposer.hpp"
#include "blockchain/block_tree.hpp"
#include "clock/timer.hpp"
#include "base/logger.hpp"
#include "verification/production/production_gossiper.hpp"
#include "verification/production/production_lottery.hpp"
#include "verification/production/epoch_storage.hpp"
#include "verification/production/impl/block_executor.hpp"
#include "crypto/hasher.hpp"
#include "crypto/sr25519_types.hpp"
#include "primitives/production_configuration.hpp"
#include "primitives/common.hpp"
#include "storage/trie/trie_storage.hpp"
namespace sgns::verification {
enum class ProductionState {
WAIT_BLOCK, // Node is just executed and waits for the new block to sync
// missing blocks
CATCHING_UP, // Node received first block announce and started fetching
// blocks between announced one and the latest finalized one
NEED_SLOT_TIME, // Missing blocks were received, now slot time should be
// calculated
SYNCHRONIZED // All missing blocks were received and applied, slot time was
// calculated, current peer can start block production
};
inline const auto kTimestampId =
primitives::InherentIdentifier::fromString("timstap0").value();
inline const auto kProdSlotId =
primitives::InherentIdentifier::fromString("prodslot").value();
class ProductionImpl : public Production, public std::enable_shared_from_this<ProductionImpl> {
public:
/**Node configuration must contain 'genesis' option
* Create an instance of Production implementation
* @param lottery - implementation of Production Lottery
* @param proposer - block proposer
* @param block_tree - tree of the blocks
* @param gossiper of this verification
* @param keypair - SR25519 keypair of this node
* @param authority_index of this node
* @param clock to measure time
* @param hasher to take hashes
* @param timer to be used by the implementation; the recommended one is
* sgns::clock::BasicWaitableTimer
* @param event_bus to deliver events over
*/
ProductionImpl(std::shared_ptr<application::AppStateManager> app_state_manager,
std::shared_ptr<ProductionLottery> lottery,
std::shared_ptr<BlockExecutor> block_executor,
std::shared_ptr<storage::trie::TrieStorage> trie_db,
std::shared_ptr<EpochStorage> epoch_storage,
std::shared_ptr<primitives::ProductionConfiguration> configuration,
std::shared_ptr<authorship::Proposer> proposer,
std::shared_ptr<blockchain::BlockTree> block_tree,
std::shared_ptr<ProductionGossiper> gossiper,
crypto::SR25519Keypair keypair,
std::shared_ptr<clock::SystemClock> clock,
std::shared_ptr<crypto::Hasher> hasher,
std::unique_ptr<clock::Timer> timer,
std::shared_ptr<authority::AuthorityUpdateObserver>
authority_update_observer);
~ProductionImpl() override = default;
bool start();
void setExecutionStrategy(ExecutionStrategy strategy) override {
execution_strategy_ = strategy;
}
void runEpoch(Epoch epoch,
ProductionTimePoint starting_slot_finish_time) override;
void onBlockAnnounce(const network::BlockAnnounce &announce) override;
ProductionMeta getProductionMeta() const;
private:
/**
* Run the next Production slot
*/
void runSlot();
/**
* Finish the current Production slot
*/
void finishSlot();
/**
* Gather the block and broadcast it
* @param output that we are the leader of this slot
*/
void processSlotLeadership(const crypto::VRFOutput &output);
/**
* Finish the Production epoch
*/
void finishEpoch();
ProductionLottery::SlotsLeadership getEpochLeadership(const Epoch &epoch) const;
outcome::result<primitives::PreRuntime> productionPreDigest(
const crypto::VRFOutput &output,
primitives::AuthorityIndex authority_index) const;
primitives::Seal sealBlock(const primitives::Block &block) const;
/**
* To be called if we are far behind other nodes to skip some slots and
* finally synchronize with the network
*/
void synchronizeSlots(const primitives::BlockHeader &new_header);
private:
std::shared_ptr<application::AppStateManager> app_state_manager_;
std::shared_ptr<ProductionLottery> lottery_;
std::shared_ptr<BlockExecutor> block_executor_;
std::shared_ptr<storage::trie::TrieStorage> trie_storage_;
std::shared_ptr<EpochStorage> epoch_storage_;
std::shared_ptr<primitives::ProductionConfiguration> genesis_configuration_;
std::shared_ptr<authorship::Proposer> proposer_;
std::shared_ptr<blockchain::BlockTree> block_tree_;
std::shared_ptr<ProductionGossiper> gossiper_;
crypto::SR25519Keypair keypair_;
std::shared_ptr<clock::SystemClock> clock_;
std::shared_ptr<crypto::Hasher> hasher_;
std::unique_ptr<clock::Timer> timer_;
std::shared_ptr<authority::AuthorityUpdateObserver>
authority_update_observer_;
ProductionState current_state_{ProductionState::WAIT_BLOCK};
Epoch current_epoch_;
/// Estimates of the first block production slot time. Input for the median
/// algorithm
std::vector<ProductionTimePoint> first_slot_times_{};
/// Number of blocks we need to use in median algorithm to get the slot time
const uint32_t kSlotTail = 30;
ProductionSlotNumber current_slot_{};
ProductionLottery::SlotsLeadership slots_leadership_;
ProductionTimePoint next_slot_finish_time_;
boost::optional<ExecutionStrategy> execution_strategy_;
base::Logger log_;
};
} // namespace sgns::verification
#endif // SUPERGENIUS_PRODUCTION_IMPL_HPP
|
{"hexsha": "ccd963524048789c4892a78028bfd57a1ec83ea4", "size": 6128, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/verification/production/impl/production_impl.hpp", "max_stars_repo_name": "GeniusVentures/SuperGenius", "max_stars_repo_head_hexsha": "ae43304f4a2475498ef56c971296175acb88d0ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-07-10T21:25:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-10T21:25:03.000Z", "max_issues_repo_path": "src/verification/production/impl/production_impl.hpp", "max_issues_repo_name": "GeniusVentures/SuperGenius", "max_issues_repo_head_hexsha": "ae43304f4a2475498ef56c971296175acb88d0ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/verification/production/impl/production_impl.hpp", "max_forks_repo_name": "GeniusVentures/SuperGenius", "max_forks_repo_head_hexsha": "ae43304f4a2475498ef56c971296175acb88d0ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6946107784, "max_line_length": 97, "alphanum_fraction": 0.7181788512, "num_tokens": 1347}
|
import pyBigWig
import logging
import pandas as pd
import numpy as np
def prepare_BPNet_output_files(tasks, output_dir, chroms, chrom_sizes,
model_tag, exponentiate_counts, other_tags=[]):
""" prepare output bigWig files for writing bpnet predictions
a. Construct aprropriate filenames
b. Add headers to each bigWig file
Args:
tasks (collections.OrderedDict): nested python dictionary
of tasks. The predictions of each task will be
written to a separate bigWig
output_dir (str): destination directory where the output
files will be created
chroms (list): list of chromosomes for which the bigWigs
will contain predictions
chrom_sizes (str): the path to the chromosome sizes file.
The chrom size is used in constructing the header of
the bigWig file
model_tag (str): the unique tag of the model that is
generating the predictions
exponentiate_counts (boolean): True if counts predictions
are to be exponentiated before writing to the bigWigs.
This will determine if the counts bigWigs have the
'exponentiated' tag in the filename
other_tags (list): list of additional tags to be added as
suffix to the filenames
Returns:
tuple: (list of profile bigWig file objects,
list of counts bigWig file objects)
"""
# the lists of file objects
profile_fileobjs = []
counts_fileobjs = []
# one profile and one counts bigWig for each task
for task in tasks:
other_tags = '_'.join(other_tags)
if len(other_tags) > 0:
profile_fname = "{}/{}_{}_{}.bw".format(
output_dir, model_tag, '_'.join(other_tags),
task)
counts_fname = "{}/{}_{}_{}_counts.bw".format(
output_dir, model_tag, '_'.join(other_tags),
task)
else:
profile_fname = "{}/{}_{}.bw".format(
output_dir, model_tag, task)
counts_fname = "{}/{}_{}_counts.bw".format(
output_dir, model_tag, task)
# add 'exponentiated' tag in the counts filename
if exponentiate_counts:
counts_fname = counts_fname.replace(
'_counts.bw', '_exponentiated_counts.bw')
logging.info("Profile bigWig - {}".format(profile_fname))
logging.info("Counts bigWig - {}".format(counts_fname))
# open the bigWig files and add to the list of file objects
profile_fileobjs.append(pyBigWig.open(profile_fname, 'w'))
counts_fileobjs.append(pyBigWig.open(counts_fname, 'w'))
# read the chrom sizes into a dataframe
# (for constructing the bigWig header)
chrom_sizes_df = pd.read_csv(chrom_sizes, sep = '\t', header=None,
names = ['chrom', 'size'])
chrom_sizes_df = chrom_sizes_df.set_index('chrom')
# construct header for the bigWig file
header = []
# sort chromosomes, to be consistent with how pandas sorts
# chromosomes ... for e.g. chrom21 is < chrom8
chroms.sort()
for chrom in chroms:
size = chrom_sizes_df.at[chrom, 'size']
header.append((chrom, int(size)))
logging.debug("bigWig HEADER - {}".format(header))
# add the header to all the bigWig files
for file_obj in profile_fileobjs:
file_obj.addHeader(header, maxZooms=0)
for file_obj in counts_fileobjs:
file_obj.addHeader(header, maxZooms=0)
# return tuple of lists of profile and counts file objects
return (profile_fileobjs, counts_fileobjs)
def write_BPNet_predictions(profile_predictions, counts_predictions,
profile_fileobjs, counts_fileobjs,
coordinates, tasks, exponentiate_counts,
output_window_size):
""" write one batch of BPNet predictions to bigWig files
Args:
profile_predictions (np.ndarray): 3 dimensional numpy
array of size (batch_size, output_len,
num_tasks*num_strands)
counts_predictions (np.ndarray): 2 dimensional numpy
array of size (batch_size, num_tasks*num_strands)
profile_fileobjs (list): list of file objects that have
been opened to write profile predicitions
counts_fileobjs (list): list of file objects that have
been opened to write counts predicitions
coordinates (list): list of (chrom, start, end) for each
prediction
tasks (collections.OrderedDict): nested python dictionary
of tasks
exponentiate_counts (boolean): True if counts predictions
are to be exponentiated before writing to the bigWigs
output_window_size (int): size of the central window of
the output
"""
# see the 'Adding entries to a bigWig file' section here
# https://github.com/deeptools/pyBigWig, then scroll down to the
# section on Numpy
# to optimize the write time and avoid multiple writes to the
# bigWig files, for each output track we will write the entire
# batch of values in one go. To do that we have to pre allocate
# arrays to hold the chrom strings, the start & end coordinates
# and the values
# the combined length of the profile outputs for the whole batch
profile_array_len = profile_predictions.shape[0] * output_window_size
# pre allocate unicode numpy array with a dummy string of length 5
# 'chr1' is length 4, 'chr20' is length 5
profile_chroms = np.repeat(
np.array(["CCCCC"] * profile_array_len)[..., np.newaxis],
len(profile_fileobjs), axis=1)
# array to hold the start coordinates
profile_starts = np.zeros((profile_array_len, len(profile_fileobjs)),
dtype=np.int64)
# array to hold the end coordinates
profile_ends = np.zeros((profile_array_len, len(profile_fileobjs)),
dtype=np.int64)
# array to hold the values
profile_vals = np.zeros((profile_array_len, len(profile_fileobjs)),
dtype=np.float64)
# the combined length of the counts outputs for the whole batch
counts_array_len = counts_predictions.shape[0]
# pre allocate unicode numpy array with a dummy string of length 5
# 'chr1' is length 4, 'chr20' is length 5
counts_chroms = np.repeat(
np.array(["CCCCC"] * counts_array_len)[..., np.newaxis],
len(counts_fileobjs), axis=1)
# array to hold the start coordinates
counts_starts = np.zeros((counts_array_len, len(counts_fileobjs)),
dtype=np.int64)
# array to hold the end coordinates
counts_ends = np.zeros((counts_array_len, len(counts_fileobjs)),
dtype=np.int64)
# array to hold the values
counts_vals = np.zeros((counts_array_len, len(counts_fileobjs)),
dtype=np.float64)
# populate the preallocated array
for i in range(len(coordinates)):
(chrom, start, end) = coordinates[i]
# profile_predictions has the predicted profiles
# profile_predictions.shape =
# (batchsize, output_len, num_tasks*num_strands)
# counts_predictions has the predicted log(sum(counts))
# counts_predictions.shape = (batchsize, num_tasks*num_strands)
# mid section in chrom coordinates based on output_window_size
start = start + (end - start) // 2 - output_window_size // 2
end = start + output_window_size
# specify the coordinates for the profiles track,
# the arrays are of length output_window_size
profile_starts[i*output_window_size:(i+1)*output_window_size, :] = \
np.expand_dims(np.arange(start, end, dtype=np.int64), axis=1)
profile_ends[i*output_window_size:(i+1)*output_window_size, :] = \
np.expand_dims(np.arange(start+1, end+1, dtype=np.int64), axis=1)
profile_chroms[i*output_window_size:(i+1)*output_window_size, :] = \
np.expand_dims(np.array([chrom] * output_window_size), axis=1)
# now the values
# length of the output
output_len = profile_predictions[i].shape[0]
# start and end indices of the mid section
# of the predictions corresponding to output_window_size
s_idx = output_len // 2 - output_window_size // 2
e_idx = s_idx + output_window_size
# get the values and populate profile_vals
for j in range(len(profile_fileobjs)):
profile_vals[i*output_window_size:(i+1)*output_window_size, j] = \
profile_predictions[i, s_idx:e_idx, j]
# specify the coordinates for the counts track,
# the arrays are of length 1
counts_starts[i, :] = start
counts_ends[i, :] = end
counts_chroms[i, :] = chrom
# now the values
# get the values and populate counts_vals
for j in range(len(counts_fileobjs)):
val = counts_predictions[i, j]
if exponentiate_counts:
val = np.exp(val)
counts_vals[i, j] = val
# now write the values to the bigWig files
try:
# add entries to profile bigWigs
for j in range(len(profile_fileobjs)):
profile_fileobjs[j].addEntries(profile_chroms[:, j].tolist(),
profile_starts[:, j].tolist(),
ends=profile_ends[:, j].tolist(),
values=profile_vals[:, j].tolist())
# add entries to counts bigWigs
for j in range(len(counts_fileobjs)):
counts_fileobjs[j].addEntries(counts_chroms[:, j].tolist(),
counts_starts[:, j].tolist(),
ends=counts_ends[:, j].tolist(),
values=counts_vals[:, j].tolist())
except Exception as e:
logging.error("Skipping the following coordinates due to an error "
"{}".format(coordinates))
|
{"hexsha": "558b9dff96c4ac121dd3609541409ce103561f6c", "size": 10705, "ext": "py", "lang": "Python", "max_stars_repo_path": "basepairmodels/cli/bigwigutils.py", "max_stars_repo_name": "erankotler/basepairmodels", "max_stars_repo_head_hexsha": "d848a787617bc6a698b887c55660d5dbee8d0074", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-04-30T16:46:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T19:33:26.000Z", "max_issues_repo_path": "basepairmodels/cli/bigwigutils.py", "max_issues_repo_name": "erankotler/basepairmodels", "max_issues_repo_head_hexsha": "d848a787617bc6a698b887c55660d5dbee8d0074", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-09-23T22:36:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:02:01.000Z", "max_forks_repo_path": "basepairmodels/cli/bigwigutils.py", "max_forks_repo_name": "erankotler/basepairmodels", "max_forks_repo_head_hexsha": "d848a787617bc6a698b887c55660d5dbee8d0074", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-04-16T01:00:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T11:46:51.000Z", "avg_line_length": 41.9803921569, "max_line_length": 87, "alphanum_fraction": 0.5952358711, "include": true, "reason": "import numpy", "num_tokens": 2296}
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
import os
from astropy.io import fits
import sys
from sklearn.mixture import GMM
from pandas import DataFrame
import legacyanalysis.decals_sim_priors as priors
# Globals
xyrange=dict(x_star=[-0.5,2.2],\
y_star=[-0.3,2.],\
x_elg=[-0.5,2.2],\
y_elg=[-0.3,2.],\
x_lrg= [0, 2.5],\
y_lrg= [-2, 6])
####
def getbic(X, ncomp=[3]):
'''Simple function to compute the Bayesian information criterion.'''
bic = [GMM(n_components=nc, covariance_type="full").fit(X).bic(X) for nc in ncomp]
#for ii in range(len(ncomp)):
# print(ncomp[ii], bic[ii])
return bic
def qa_plot_MoG(Xall,ncomp=2, src='STAR',nsamp=10000,outdir='.',extra=False,append=''):
'''Build a color-color plot. Show the data on the left-hand panel and random draws from
the MoGs on the right-hand panel.'''
if src == 'STAR':
mog_file = 'legacypipe/data/star_colors_mog.fits'
xrange = xyrange['x_%s' % src.lower()]
yrange = xyrange['y_%s' % src.lower()]
xlab='r - z'
ylab='g - r'
elif src == 'ELG':
mog_file = 'legacypipe/data/elg_colors_mog.fits'
xrange = xyrange['x_%s' % src.lower()]
yrange = xyrange['y_%s' % src.lower()]
xlab='r - z'
ylab='g - r'
elif src == 'LRG':
mog_file = 'legacypipe/data/lrg_colors_mog.fits'
xrange = xyrange['x_%s' % src.lower()]
yrange = xyrange['y_%s' % src.lower()]
xlab='r - z'
ylab='r - w1'
else: raise ValueError('src=%s not supported' % src)
# Build MoG
if ncomp is None:
mog = priors._GaussianMixtureModel.load(mog_file)
else:
from sklearn.mixture import GMM
mog = GMM(n_components=ncomp, covariance_type="full").fit(Xall)
samp = mog.sample(n_samples=nsamp)
#if extra:
# # Higher accuracy sampling, but more time consuming and negligible improvment
# samp= mog.sample_full_pdf(nsamp)
fig, ax = plt.subplots(1, 3, sharey=True,figsize=(12, 4))
ax[0].plot(Xall[:,0],Xall[:,1], 'o', c='b', markersize=3)
priors.add_MoG_curves(ax[1], mog.means_, mog.covars_, mog.weights_)
ax[2].plot(samp[:,0], samp[:,1], 'o', c='b', markersize=3)
# Add ts box
if src != 'STAR':
ts= priors.TSBox(src=src)
for i in [0,2]:
ts.add_ts_box(ax[i], xlim=xrange,ylim=yrange)
for i,title in zip(range(3),['Data','Gaussian Mixture','%d Draws' % nsamp]):
xlab1=ax[i].set_xlabel(xlab)
ax[i].set_xlim(xrange)
ax[i].set_ylim(yrange)
ti=ax[i].set_title(title)
ylab1=ax[0].set_ylabel(ylab)
fig.subplots_adjust(wspace=0) #, hspace=0.1)
for i in range(2):
priors.rm_last_ticklabel(ax[i])
name= os.path.join(outdir,'qa-mog-sample-%s%s.png' % (src,append))
print('Writing {}'.format(name))
plt.savefig(name, bbox_extra_artists=[xlab1,ylab1,ti], bbox_inches='tight',dpi=150)
plt.close()
def qa_plot_BIC(Xall,src='STAR',append=''):
'''Number componentes from Bayesian Information Criterion'''
ncomp = np.arange(1, 6)
bic = getbic(Xall, ncomp)
fig, ax = plt.subplots(1, 1, figsize=(8,5))
ax.plot(ncomp, bic, marker='s', ls='-')
ax.set_xlim((0, 10))
ax.set_xlabel('Number of Gaussian Components')
ax.set_ylabel('Bayesian Information Criterion')
if src == 'STAR':
plt.legend(labels=['%s g-r, r-z colors' % src])
elif src == 'ELG':
plt.legend(labels=['%s g-r, r-z colors' % src])
elif src == 'LRG':
plt.legend(labels=['%s r-w1, r-z colors' % src])
else: raise ValueError('src=%s not supportd' % src)
plt.tight_layout()
name='qa-mog-bic-%s%s.png' % (src,append)
print('Writing {}'.format(name))
plt.savefig(name)
plt.close()
def create_joinplot(df,xkey,ykey,xlab,ylab,xlim,ylim,color,src='ELG'):
import seaborn as sns
g = sns.JointGrid(x=xkey, y=ykey, data=df, xlim=xlim, ylim=ylim)
g = g.plot_joint(plt.scatter, color=color, edgecolor="white")
g = g.plot_marginals(sns.distplot, kde=False, color=color)
g = g.set_axis_labels(xlab,ylab)
def f_cut(junk1,junk2):
return 0
g = g.annotate(f_cut, template="Cut to r50 > {val:d}",\
stat="", loc="upper right", fontsize=12)
name='qa-priors-%s-%s-%s.png' % (xkey,ykey,src)
print('Writing {}'.format(name))
g = g.savefig(name)
def qa_plot_Priors(d=None,src='ELG'):
'''d -- dictionary of morphology params'''
import seaborn as sns
assert(d is not None)
# JoinGrid needs pandas DataFrame
df= DataFrame(d)
if src == 'ELG':
grrange = (-0.2, 2.0)
rzrange = (-0.4, 2.5)
else: raise ValueError('src=%s not supported')
col = sns.color_palette()
# Put each in sep plot window
color=col[0]
for xkey,ykey,xlab,ylab,xlim,ylim in zip(\
['rz','r50','ba'],['gr','n','n'],\
['r-z','Half-light radius (arcsec)','Axis ratio (b/a)'], ['g-r','Sersic n','Sersic n'],\
[rzrange,(0,1.5),(0,1)], [grrange,(0,4),(0,4)]):
create_joinplot(df,xkey,ykey,xlab,ylab,xlim,ylim,color,src=src)
if __name__ == "__main__":
# Stars
Xall= priors.star_data()
qa_plot_BIC(Xall,src='STAR')
qa_plot_MoG(Xall,ncomp=6, src='STAR')
# Save Model
mog = GMM(n_components=6, covariance_type="full").fit(Xall)
star_mogfile= 'legacypipe/data/star_colors_mog.fits'
if os.path.exists(star_mogfile):
print('STAR MoG exists, not overwritting: %s' % star_mogfile)
else:
print('Writing {}'.format(star_mogfile))
# with 6 comp, 6th is noise, 1-5 are physical
priors._GaussianMixtureModel.save(mog, star_mogfile,index=np.arange(5))
qa_plot_MoG(Xall,ncomp=None, src='STAR',append='saved')
# ELGs
# FDR data
Xall,cuts= priors.elg_data_for_FDR()
priors.plot_FDR(Xall,cuts,src='ELG')
b= cuts['any_elg']
qa_plot_BIC(Xall[b,:], src='ELG',append='_FDR')
qa_plot_MoG(Xall[b,:],ncomp=6, src='ELG',append='_FDR') #,extra=True)
# Fit template spectra data
Xall,cuts, morph= priors.elg_data()
qa_plot_BIC(Xall, src='ELG',append='_synth')
qa_plot_MoG(Xall,ncomp=3, src='ELG',append='_synth') #,extra=True)
b= cuts['has_morph']
qa_plot_BIC(Xall[b,:], src='ELG',append='_synth+morph')
qa_plot_MoG(Xall[b,:],ncomp=4, src='ELG',append='_synth+morph') #,extra=True)
# only have priors for morph cut
qa_plot_Priors(d=morph,src='ELG')
# Save 3 component synth MoG
mog = GMM(n_components=3, covariance_type="full").fit(Xall)
elg_mogfile='legacypipe/data/elg_colors_mog.fits'
if os.path.exists(elg_mogfile):
print('ELG MoG exists, not overwritting: %s' % elg_mogfile)
else:
print('Writing {}'.format(elg_mogfile))
priors._GaussianMixtureModel.save(mog, elg_mogfile)
qa_plot_MoG(Xall,ncomp=None, src='ELG',append='saved')
# LRGs
Xall,cuts= lrg_data_for_FDR()
priors.plot_FDR(Xall,cuts,src='LRG')
b= cuts['lrg']
qa_plot_BIC(Xall[b,:], src='LRG')
qa_plot_MoG(Xall[b,:], ncomp=2,src='LRG') #,extra=True)
# Save 2 comp model
b= cuts['lrg']
mog = GMM(n_components=2, covariance_type="full").fit(Xall[b,:])
lrg_mogfile= 'legacypipe/data/lrg_colors_mog.fits'
if os.path.exists(lrg_mogfile):
print('LRG MoG exists, not overwritting: %s' % lrg_mogfile)
else:
print('Writing {}'.format(lrg_mogfile))
priors._GaussianMixtureModel.save(mog, lrg_mogfile)
qa_plot_MoG(Xall,ncomp=None, src='LRG',append='saved')
print('done')
|
{"hexsha": "a2c2f68637edc13a6048e074ee6bed6ea8213224", "size": 7697, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/obiwan/decals_sim_priors_plots.py", "max_stars_repo_name": "manera/legacypipe", "max_stars_repo_head_hexsha": "64dfe164fe1def50f5ad53784edd9a63321b0d45", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2015-08-25T00:25:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T06:35:54.000Z", "max_issues_repo_path": "py/obiwan/decals_sim_priors_plots.py", "max_issues_repo_name": "manera/legacypipe", "max_issues_repo_head_hexsha": "64dfe164fe1def50f5ad53784edd9a63321b0d45", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 644, "max_issues_repo_issues_event_min_datetime": "2015-07-08T16:26:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T19:09:10.000Z", "max_forks_repo_path": "py/obiwan/decals_sim_priors_plots.py", "max_forks_repo_name": "manera/legacypipe", "max_forks_repo_head_hexsha": "64dfe164fe1def50f5ad53784edd9a63321b0d45", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2015-08-24T18:27:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-04T03:10:42.000Z", "avg_line_length": 38.103960396, "max_line_length": 100, "alphanum_fraction": 0.6189424451, "include": true, "reason": "import numpy,from scipy,from astropy", "num_tokens": 2476}
|
import networkx as nx
import json
from schematic.utils.curie_utils import extract_name_from_uri_or_curie
from schematic.utils.validate_utils import validate_class_schema
from schematic.utils.validate_rules_utils import validate_schema_rules
def load_schema_into_networkx(schema):
G = nx.MultiDiGraph()
for record in schema["@graph"]:
# TODO: clean up obsolete code
# if record["@type"] == "rdfs:Class":
# creation of nodes
# adding nodes to the graph
node = {}
for (k, value) in record.items():
if ":" in k:
key = k.split(":")[1]
node[key] = value
elif "@" in k:
key = k[1:]
node[key] = value
else:
node[k] = value
# creation of edges
# adding edges to the graph
if "rdfs:subClassOf" in record:
parents = record["rdfs:subClassOf"]
if type(parents) == list:
for _parent in parents:
n1 = extract_name_from_uri_or_curie(_parent["@id"])
n2 = record["rdfs:label"]
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="parentOf")
elif type(parents) == dict:
n1 = extract_name_from_uri_or_curie(parents["@id"])
n2 = record["rdfs:label"]
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="parentOf")
# TODO: refactor: abstract adding relationship method
if "sms:requiresDependency" in record:
dependencies = record["sms:requiresDependency"]
if type(dependencies) == list:
for _dep in dependencies:
n1 = record["rdfs:label"]
n2 = extract_name_from_uri_or_curie(_dep["@id"])
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="requiresDependency")
if "sms:requiresComponent" in record:
components = record["sms:requiresComponent"]
if type(components) == list:
for _comp in components:
n1 = record["rdfs:label"]
n2 = extract_name_from_uri_or_curie(_comp["@id"])
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="requiresComponent")
if "schema:rangeIncludes" in record:
range_nodes = record["schema:rangeIncludes"]
if type(range_nodes) == list:
for _range_node in range_nodes:
n1 = record["rdfs:label"]
n2 = extract_name_from_uri_or_curie(_range_node["@id"])
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="rangeValue")
elif type(range_nodes) == dict:
n1 = record["rdfs:label"]
n2 = extract_name_from_uri_or_curie(range_nodes["@id"])
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="rangeValue")
if "schema:domainIncludes" in record:
domain_nodes = record["schema:domainIncludes"]
if type(domain_nodes) == list:
for _domain_node in domain_nodes:
n1 = extract_name_from_uri_or_curie(_domain_node["@id"])
n2 = record["rdfs:label"]
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="domainValue")
elif type(domain_nodes) == dict:
n1 = extract_name_from_uri_or_curie(domain_nodes["@id"])
n2 = record["rdfs:label"]
# do not allow self-loops
if n1 != n2:
G.add_edge(n1, n2, key="domainValue")
# check schema generator (JSON validation schema gen)
if (
"requiresChildAsValue" in node
and node["requiresChildAsValue"]["@id"] == "sms:True"
):
node["requiresChildAsValue"] = True
if "required" in node:
if "sms:true" == record["sms:required"]:
node["required"] = True
else:
node["required"] = False
# not sure if this is required?
if "sms:validationRules" in record:
node["validationRules"] = record["sms:validationRules"]
if node["validationRules"]:
validate_vr = validate_schema_rules(
record["sms:validationRules"],
record["rdfs:label"],
input_filetype = 'json_schema')
else:
node["validationRules"] = []
node["uri"] = record["@id"]
node["description"] = record["rdfs:comment"]
G.add_node(record["rdfs:label"], **node)
# print(node)
# print(G.nodes())
return G
def node_attrs_cleanup(class_add_mod: dict) -> dict:
# clean map that will be inputted into the node/graph
node = {}
for (k, value) in class_add_mod.items():
if ":" in k:
key = k.split(":")[1]
node[key] = value
elif "@" in k:
key = k[1:]
node[key] = value
else:
node[k] = value
return node
def relationship_edges(
schema_graph_nx: nx.MultiDiGraph, class_add_mod: dict, **kwargs
) -> nx.MultiDiGraph:
"""
Notes:
=====
# pass the below dictionary as the third argument (kwargs) to relationship_edges().
# "in" indicates that the relationship has an in-edges behaviour.
# "out" indicates that the relationship has an out-edges behaviour.
rel_dict = {
"rdfs:subClassOf": {
"parentOf": "in"
},
"schema:domainIncludes": {
"domainValue": "in"
},
"sms:requiresDependency": {
"requiresDependency": "out"
},
"sms:requiresComponent": {
"requiresComponent": "out"
},
"schema:rangeIncludes": {
"rangeValue": "out"
}
}
"""
for rel, rel_lab_node_type in kwargs.items():
for rel_label, node_type in rel_lab_node_type.items():
if rel in class_add_mod:
parents = class_add_mod[rel]
if type(parents) == list:
for _parent in parents:
if node_type == "in":
n1 = extract_name_from_uri_or_curie(_parent["@id"])
n2 = class_add_mod["rdfs:label"]
if node_type == "out":
n1 = class_add_mod["rdfs:label"]
n2 = extract_name_from_uri_or_curie(_parent["@id"])
# do not allow self-loops
if n1 != n2:
schema_graph_nx.add_edge(n1, n2, key=rel_label)
elif type(parents) == dict:
if node_type == "in":
n1 = extract_name_from_uri_or_curie(parents["@id"])
n2 = class_add_mod["rdfs:label"]
if node_type == "out":
n1 = class_add_mod["rdfs:label"]
n2 = extract_name_from_uri_or_curie(parents["@id"])
# do not allow self-loops
if n1 != n2:
schema_graph_nx.add_edge(n1, n2, key=rel_label)
return schema_graph_nx
def class_to_node(class_to_convert: dict) -> nx.Graph:
G = nx.Graph()
node = {} # node to be added the above graph and returned
for (k, v) in class_to_convert.items():
if ":" in k: # if ":" is present in key
key = k.split(":")[1]
node[key] = v
elif "@" in k: # if "@" is present in key
key = k[1:]
node[key] = v
else:
node[k] = v
if "required" in node:
if class_to_convert["sms:required"] == "sms:true":
node["required"] = True
else:
node["required"] = False
if "sms:validationRules" in class_to_convert:
node["validationRules"] = class_to_convert["sms:validationRules"]
else:
node["validationRules"] = []
node["uri"] = class_to_convert["@id"] # add separate "uri" key
node["description"] = class_to_convert[
"rdfs:comment"
] # separately store "comment" as "description"
G.add_node(class_to_convert["rdfs:label"], **node)
return G
def replace_node_in_schema(schema: nx.MultiDiGraph, class_add_mod: dict) -> None:
# part of the code that replaces the modified class in the original JSON-LD schema (not in the data/ folder though)
for i, schema_class in enumerate(schema["@graph"]):
if schema_class["rdfs:label"] == class_add_mod["rdfs:label"]:
validate_class_schema(
class_add_mod
) # validate that the class to be modified follows the structure for any generic class (node)
schema["@graph"][i] = class_add_mod
break
def export_schema(schema, file_path):
with open(file_path, "w") as f:
json.dump(schema, f, sort_keys=True, indent=4, ensure_ascii=False)
|
{"hexsha": "91334c48fccdbd1f5c7ca17f938cfd96b63dc97a", "size": 9478, "ext": "py", "lang": "Python", "max_stars_repo_path": "schematic/utils/schema_utils.py", "max_stars_repo_name": "linglp/schematic", "max_stars_repo_head_hexsha": "fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "schematic/utils/schema_utils.py", "max_issues_repo_name": "linglp/schematic", "max_issues_repo_head_hexsha": "fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "schematic/utils/schema_utils.py", "max_forks_repo_name": "linglp/schematic", "max_forks_repo_head_hexsha": "fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1755725191, "max_line_length": 119, "alphanum_fraction": 0.5188858409, "include": true, "reason": "import networkx", "num_tokens": 2147}
|
import Adapt
using Oceananigans: short_show
using Oceananigans.Utils: user_function_arguments
using Oceananigans.Operators: assumed_field_location, index_and_interp_dependencies
using Oceananigans.Fields: show_location
using Oceananigans.Utils: tupleit
"""
ContinuousForcing{X, Y, Z, P, F, D, I}
A callable object that implements a "continuous form" forcing function
on a field at the location `X, Y, Z` with optional parameters.
"""
struct ContinuousForcing{X, Y, Z, P, F, D, I, ℑ}
func :: F
parameters :: P
field_dependencies :: D
field_dependencies_indices :: I
field_dependencies_interp :: ℑ
# Non-public "temporary" constructor that stores func, parameters, and field_dependencies
# for later regularization
function ContinuousForcing(func, parameters, field_dependencies)
field_dependencies = tupleit(field_dependencies)
return new{Nothing, Nothing, Nothing,
typeof(parameters),
typeof(func),
typeof(field_dependencies),
Nothing,
Nothing}(func, parameters, field_dependencies, nothing, nothing)
end
# Non-public "final" constructor.
function ContinuousForcing{X, Y, Z}(func, parameters=nothing, field_dependencies=(),
field_dependencies_indices=(), field_dependencies_interp=()) where {X, Y, Z}
return new{X, Y, Z,
typeof(parameters),
typeof(func),
typeof(field_dependencies),
typeof(field_dependencies_indices),
typeof(field_dependencies_interp)}(func, parameters, field_dependencies,
field_dependencies_indices, field_dependencies_interp)
end
end
"""
ContinuousForcing(func; parameters=nothing, field_dependencies=())
Construct a "continuous form" forcing with optional `parameters` and optional
`field_dependencies` on other fields in a model.
If neither `parameters` nor `field_dependencies` are provided, then `func` must be
callable with the signature
`func(x, y, z, t)`
where `x, y, z` are the east-west, north-south, and vertical spatial coordinates, and `t` is time.
If `field_dependencies` are provided, the signature of `func` must include them.
For example, if `field_dependencies=(:u, :S)` (and `parameters` are _not_ provided), then
`func` must be callable with the signature
`func(x, y, z, t, u, S)`
where `u` is assumed to be the `u`-velocity component, and `S` is a tracer. Note that any field
which does not have the name `u`, `v`, or `w` is assumed to be a tracer and must be present
in `model.tracers`.
If `parameters` are provided, then the _last_ argument to `func` must be `parameters`.
For example, if `func` has no `field_dependencies` but does depend on `parameters`, then
it must be callable with the signature
`func(x, y, z, t, parameters)`
With `field_dependencies=(:u, :v, :w, :c)` and `parameters`, then `func` must be
callable with the signature
`func(x, y, z, t, u, v, w, c, parameters)`
"""
ContinuousForcing(func; parameters=nothing, field_dependencies=()) =
ContinuousForcing(func, parameters, field_dependencies)
"""
regularize_forcing(forcing::ContinuousForcing, field, field_name, model_field_names)
Regularize `forcing::ContinuousForcing` by determining the indices of `forcing.field_dependencies`
in `model_field_names`, and associated interpolation functions so `forcing` can be used during
time-stepping `IncompressibleModel`.
"""
function regularize_forcing(forcing::ContinuousForcing, field, field_name, model_field_names)
X, Y, Z = location(field)
indices, interps = index_and_interp_dependencies(X, Y, Z,
forcing.field_dependencies,
model_field_names)
return ContinuousForcing{X, Y, Z}(forcing.func, forcing.parameters, forcing.field_dependencies,
indices, interps)
end
#####
##### Functions for calling ContinuousForcing in a time-stepping kernel
#####
@inline function (forcing::ContinuousForcing{X, Y, Z, F})(i, j, k, grid, clock, model_fields) where {X, Y, Z, F}
args = user_function_arguments(i, j, k, grid, model_fields, forcing.parameters, forcing)
return @inbounds forcing.func(xnode(X, i, grid), ynode(Y, j, grid), znode(Z, k, grid), clock.time, args...)
end
"""Show the innards of a `ContinuousForcing` in the REPL."""
Base.show(io::IO, forcing::ContinuousForcing{X, Y, Z, P}) where {X, Y, Z, P} =
print(io, "ContinuousForcing{$P} at ", show_location(X, Y, Z), '\n',
"├── func: $(short_show(forcing.func))", '\n',
"├── parameters: $(forcing.parameters)", '\n',
"└── field dependencies: $(forcing.field_dependencies)")
"""Show the innards of an "non-regularized" `ContinuousForcing` in the REPL."""
Base.show(io::IO, forcing::ContinuousForcing{Nothing, Nothing, Nothing, P}) where P =
print(io, "ContinuousForcing{$P}", '\n',
"├── func: $(short_show(forcing.func))", '\n',
"├── parameters: $(forcing.parameters)", '\n',
"└── field dependencies: $(forcing.field_dependencies)")
Adapt.adapt_structure(to, forcing::ContinuousForcing{X, Y, Z}) where {X, Y, Z} =
ContinuousForcing{X, Y, Z}(Adapt.adapt(to, forcing.func),
Adapt.adapt(to, forcing.parameters),
nothing,
Adapt.adapt(to, forcing.field_dependencies_indices),
Adapt.adapt(to, forcing.field_dependencies_interp))
|
{"hexsha": "a36c561ec08a7b4b58bbd72d49333a9b2ef09d51", "size": 5743, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Forcings/continuous_forcing.jl", "max_stars_repo_name": "charleskawczynski/Oceananigans.jl", "max_stars_repo_head_hexsha": "c34e6cd2166bbaa057186ffa795d348c1802485f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-27T18:27:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-27T18:27:43.000Z", "max_issues_repo_path": "src/Forcings/continuous_forcing.jl", "max_issues_repo_name": "charleskawczynski/Oceananigans.jl", "max_issues_repo_head_hexsha": "c34e6cd2166bbaa057186ffa795d348c1802485f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Forcings/continuous_forcing.jl", "max_forks_repo_name": "charleskawczynski/Oceananigans.jl", "max_forks_repo_head_hexsha": "c34e6cd2166bbaa057186ffa795d348c1802485f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2279411765, "max_line_length": 116, "alphanum_fraction": 0.6508793314, "num_tokens": 1318}
|
import unittest
import h5py
import numpy as np
from bald.tests import BaldTestCase
def _fattrs(f):
f.attrs['bald__'] = 'http://binary_array_ld.net/experimental'
f.attrs['bald__type'] = 'bald__Container'
return f
def _create_parent_child(f, pshape, cshape):
dsetp = f.create_dataset("parent_dataset", pshape, dtype='i')
dsetc = f.create_dataset("child_dataset", cshape, dtype='i')
dsetp.attrs['bald__type'] = 'bald__Dataset'
dsetp.attrs['bald__reference'] = dsetc.ref
dsetc.attrs['bald__type'] = 'bald__Dataset'
return f
class TestArrayReference(BaldTestCase):
def test_match_array_reference(self):
with self.temp_filename('.hdf') as tfile:
f = h5py.File(tfile, "w")
f = _fattrs(f)
f = _create_parent_child(f, (11, 17), (11, 17))
f.close()
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "b182b70db70459c66012624e881d0079d4a45bc9", "size": 890, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/bald/tests/test_array_reference.py", "max_stars_repo_name": "marqh/bald", "max_stars_repo_head_hexsha": "5fc06ef5270e5f1d60ad73f90ac8781cf91c4b0b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2016-11-09T12:24:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T18:55:23.000Z", "max_issues_repo_path": "lib/bald/tests/test_array_reference.py", "max_issues_repo_name": "marqh/bald", "max_issues_repo_head_hexsha": "5fc06ef5270e5f1d60ad73f90ac8781cf91c4b0b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 53, "max_issues_repo_issues_event_min_datetime": "2016-05-06T21:34:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-04T23:25:24.000Z", "max_forks_repo_path": "lib/bald/tests/test_array_reference.py", "max_forks_repo_name": "marqh/bald", "max_forks_repo_head_hexsha": "5fc06ef5270e5f1d60ad73f90ac8781cf91c4b0b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2016-05-05T06:21:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-09T20:18:22.000Z", "avg_line_length": 26.9696969697, "max_line_length": 65, "alphanum_fraction": 0.6595505618, "include": true, "reason": "import numpy", "num_tokens": 260}
|
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import, division
from numba.core import sigutils, types
from .compiler import (
compile_kernel,
JitDPPYKernel,
compile_dppy_func_template,
compile_dppy_func,
get_ordered_arg_access_types,
)
def kernel(signature=None, access_types=None, debug=False):
"""JIT compile a python function conforming using the DPPY backend.
A kernel is equvalent to an OpenCL kernel function, and has the
same restrictions as definined by SPIR_KERNEL calling convention.
"""
if signature is None:
return autojit(debug=False, access_types=access_types)
elif not sigutils.is_signature(signature):
func = signature
return autojit(debug=False, access_types=access_types)(func)
else:
return _kernel_jit(signature, debug, access_types)
def autojit(debug=False, access_types=None):
def _kernel_autojit(pyfunc):
ordered_arg_access_types = get_ordered_arg_access_types(pyfunc, access_types)
return JitDPPYKernel(pyfunc, ordered_arg_access_types)
return _kernel_autojit
def _kernel_jit(signature, debug, access_types):
argtypes, restype = sigutils.normalize_signature(signature)
if restype is not None and restype != types.void:
msg = "DPPY kernel must have void return type but got {restype}"
raise TypeError(msg.format(restype=restype))
def _wrapped(pyfunc):
ordered_arg_access_types = get_ordered_arg_access_types(pyfunc, access_types)
return compile_kernel(None, pyfunc, argtypes, ordered_arg_access_types, debug)
return _wrapped
def func(signature=None):
if signature is None:
return _func_autojit
elif not sigutils.is_signature(signature):
func = signature
return _func_autojit(func)
else:
return _func_jit(signature)
def _func_jit(signature):
argtypes, restype = sigutils.normalize_signature(signature)
def _wrapped(pyfunc):
return compile_dppy_func(pyfunc, restype, argtypes)
return _wrapped
def _func_autojit(pyfunc):
return compile_dppy_func_template(pyfunc)
|
{"hexsha": "ed121f5ea967d0166debba217f93a6293229fd16", "size": 2705, "ext": "py", "lang": "Python", "max_stars_repo_path": "numba_dppy/decorators.py", "max_stars_repo_name": "Rubtsowa/numba-dppy", "max_stars_repo_head_hexsha": "20f9825b144913ebe1f7635c785b334f3743c4cb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "numba_dppy/decorators.py", "max_issues_repo_name": "Rubtsowa/numba-dppy", "max_issues_repo_head_hexsha": "20f9825b144913ebe1f7635c785b334f3743c4cb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-08T09:23:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-08T09:23:15.000Z", "max_forks_repo_path": "numba_dppy/decorators.py", "max_forks_repo_name": "Rubtsowa/numba-dppy", "max_forks_repo_head_hexsha": "20f9825b144913ebe1f7635c785b334f3743c4cb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-09T13:51:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-09T13:51:03.000Z", "avg_line_length": 32.5903614458, "max_line_length": 86, "alphanum_fraction": 0.7449168207, "include": true, "reason": "from numba", "num_tokens": 608}
|
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
import torch
torch.backends.cudnn.benchmark = True
import random
from pathlib import Path
import hydra
import numpy as np
import torch
import torch.utils.data
from dm_env import specs
import dmc
import utils
from logger import Logger
from replay_buffer import ReplayBufferStorage, make_replay_loader
from video import TrainVideoRecorder, VideoRecorder
def _worker_init_fn(worker_id):
seed = np.random.get_state()[1][0] + worker_id
np.random.seed(seed)
random.seed(seed)
def _make_agent(obs_spec, action_spec, cfg):
cfg.obs_shape = obs_spec.shape
cfg.action_shape = action_spec.shape
return hydra.utils.instantiate(cfg)
class Workspace:
def __init__(self, cfg):
self.work_dir = Path.cwd()
print(f'workspace: {self.work_dir}')
self.cfg = cfg
utils.set_seed_everywhere(cfg.seed)
self.device = torch.device(cfg.device)
self.setup()
self.agent = _make_agent(self.train_env.observation_spec(),
self.train_env.action_spec(),
self.cfg.agent)
self.timer = utils.Timer()
self._global_step = 0
self._global_episode = 0
def setup(self):
# create logger
self.logger = Logger(self.work_dir, use_tb=self.cfg.use_tb)
# create envs
self.train_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed, self.cfg.get('xml_path', None))
self.eval_env = dmc.make(self.cfg.task_name, self.cfg.frame_stack,
self.cfg.action_repeat, self.cfg.seed, self.cfg.get('xml_path', None))
# create replay buffer
data_specs = (self.train_env.observation_spec(),
self.train_env.action_spec(),
specs.Array((1,), np.float32, 'reward'),
specs.Array((1,), np.float32, 'discount'))
self.replay_storage = ReplayBufferStorage(data_specs,
self.work_dir / 'buffer')
self.replay_loader = make_replay_loader(
self.work_dir / 'buffer', self.cfg.replay_buffer_size,
self.cfg.batch_size, self.cfg.replay_buffer_num_workers,
self.cfg.save_snapshot, self.cfg.nstep, self.cfg.discount)
self._replay_iter = None
self.video_recorder = VideoRecorder(
self.work_dir if self.cfg.save_video else None)
self.train_video_recorder = TrainVideoRecorder(
self.work_dir if self.cfg.save_train_video else None)
@property
def global_step(self):
return self._global_step
@property
def global_episode(self):
return self._global_episode
@property
def global_frame(self):
return self.global_step * self.cfg.action_repeat
@property
def replay_iter(self):
if self._replay_iter is None:
self._replay_iter = iter(self.replay_loader)
return self._replay_iter
def eval(self):
step, episode, total_reward = 0, 0, 0
eval_until_episode = utils.Until(self.cfg.num_eval_episodes)
while eval_until_episode(episode):
time_step = self.eval_env.reset()
self.video_recorder.init(self.eval_env, enabled=(episode == 0))
while not time_step.last():
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=True)
time_step = self.eval_env.step(action)
self.video_recorder.record(self.eval_env)
total_reward += time_step.reward
step += 1
episode += 1
self.video_recorder.save(f'{self.global_frame}_{episode}.mp4')
with self.logger.log_and_dump_ctx(self.global_frame, ty='eval') as log:
log('episode_reward', total_reward / episode)
log('episode_length', step * self.cfg.action_repeat / episode)
log('episode', self.global_episode)
log('step', self.global_step)
def train(self):
# predicates
train_until_step = utils.Until(self.cfg.num_train_frames,
self.cfg.action_repeat)
seed_until_step = utils.Until(self.cfg.num_seed_frames,
self.cfg.action_repeat)
eval_every_step = utils.Every(self.cfg.eval_every_frames,
self.cfg.action_repeat)
episode_step, episode_reward = 0, 0
self._global_step = 0
self._global_episode = 0
time_step = self.train_env.reset()
self.replay_storage.add(time_step)
self.train_video_recorder.init(time_step.observation)
metrics = None
while train_until_step(self.global_step):
if time_step.last():
self._global_episode += 1
self.train_video_recorder.save(f'{self.global_frame}.mp4')
# wait until all the metrics schema is populated
if metrics is not None:
# log stats
elapsed_time, total_time = self.timer.reset()
episode_frame = episode_step * self.cfg.action_repeat
with self.logger.log_and_dump_ctx(self.global_frame,
ty='train') as log:
log('fps', episode_frame / elapsed_time)
log('total_time', total_time)
log('episode_reward', episode_reward)
log('episode_length', episode_frame)
log('episode', self.global_episode)
log('buffer_size', len(self.replay_storage))
log('step', self.global_step)
# reset env
time_step = self.train_env.reset()
self.replay_storage.add(time_step)
self.train_video_recorder.init(time_step.observation)
# try to save snapshot
if self.cfg.save_snapshot:
self.save_snapshot()
episode_step = 0
episode_reward = 0
# try to evaluate
if eval_every_step(self.global_step):
self.logger.log('eval_total_time', self.timer.total_time(),
self.global_frame)
self.eval()
# sample action
with torch.no_grad(), utils.eval_mode(self.agent):
action = self.agent.act(time_step.observation,
self.global_step,
eval_mode=False)
# try to update the agent
if not seed_until_step(self.global_step):
metrics = self.agent.update(self.replay_iter, self.global_step)
self.logger.log_metrics(metrics, self.global_frame, ty='train')
# take env step
time_step = self.train_env.step(action)
episode_reward += time_step.reward
self.replay_storage.add(time_step)
self.train_video_recorder.record(time_step.observation)
episode_step += 1
self._global_step += 1
def save_snapshot(self):
snapshot = self.work_dir / 'snapshot.pt'
keys_to_save = ['agent', 'timer', '_global_step', '_global_episode']
payload = {k: self.__dict__[k] for k in keys_to_save}
with snapshot.open('wb') as f:
torch.save(payload, f)
def load_snapshot(self):
snapshot = self.work_dir / 'snapshot.pt'
with snapshot.open('rb') as f:
payload = torch.load(f)
for k, v in payload.items():
self.__dict__[k] = v
@hydra.main(config_path='cfgs', config_name='config')
def main(cfg):
root_dir = Path.cwd()
workspace = Workspace(cfg)
snapshot = root_dir / 'snapshot.pt'
if snapshot.exists():
print(f'resuming: {snapshot}')
workspace.load_snapshot()
workspace.train()
if __name__ == '__main__':
main()
|
{"hexsha": "60b90413f5e6d06ab6617d569f9d78ffff5073c4", "size": 8373, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "medric49/sharingan", "max_stars_repo_head_hexsha": "f6b85118016d45456fc1467c6706731562c0f0d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "medric49/sharingan", "max_issues_repo_head_hexsha": "f6b85118016d45456fc1467c6706731562c0f0d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "medric49/sharingan", "max_forks_repo_head_hexsha": "f6b85118016d45456fc1467c6706731562c0f0d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0486725664, "max_line_length": 104, "alphanum_fraction": 0.5825868864, "include": true, "reason": "import numpy", "num_tokens": 1657}
|
!
! Module for parsing command line args
!
module args
use kinds, only : r_dp
use err, only : err_msg
implicit none
private
public :: args_parse, &
args_usage
contains
subroutine args_parse(i, j, k, niter, &
north, south, east, west, top, bottom, &
ierr, output)
implicit none
integer, intent(out) :: i
integer, intent(out) :: j
integer, intent(out) :: k
integer, intent(out) :: niter
real(kind=r_dp), intent(out) :: north
real(kind=r_dp), intent(out) :: south
real(kind=r_dp), intent(out) :: east
real(kind=r_dp), intent(out) :: west
real(kind=r_dp), intent(out) :: top
real(kind=r_dp), intent(out) :: bottom
integer, intent(out) :: ierr
character(len=*), optional, intent(out) :: output
integer :: n ! Number of args
integer :: x, y ! Argument iterators
integer :: iunit ! Namelist file unit
character(len=1024) :: key ! Argument key
character(len=1024) :: val ! Argument value
character(len=1024) :: nlfile ! Namelist file name
namelist /domain/ i, j, k
namelist /jacobi/ niter, north, south, east, west, &
top, bottom
ierr = 0
nlfile = ''
output = ''
y = 0
n = command_argument_count()
do x=1,n,2
call get_command_argument(x, key)
call get_command_argument(x+1, val)
select case(key)
case('-imax')
read(val, '(I8)') i
y = y + 1
case('-jmax')
read(val, '(I8)') j
y = y + 1
case('-kmax')
read(val, '(I8)') k
y = y + 1
case('-niter')
read(val, '(I8)') niter
y = y + 1
case('-east')
read(val, '(F8.0)') east
y = y + 1
case('-west')
read(val, '(F8.0)') west
y = y + 1
case('-north')
read(val, '(F8.0)') north
y = y + 1
case('-south')
read(val, '(F8.0)') south
y = y + 1
case('-top')
read(val, '(F8.0)') top
y = y + 1
case('-bottom')
read(val, '(F8.0)') bottom
y = y + 1
case('-f')
nlfile = val
case('-o')
output = val
case default
call err_msg('Unrecognized option: ' &
// trim(key))
end select
end do
if (len(trim(nlfile)) > 0) then
iunit = 7
open(unit=iunit, file=nlfile, action='read')
read(iunit, nml=domain)
read(iunit, nml=jacobi)
close(iunit)
y = 10
end if
if ( y /= 10) then
call err_msg('Not enough command line arguments.')
call args_usage
ierr = 1
end if
end subroutine args_parse
subroutine args_usage()
implicit none
character(len=1024) :: prog_name
call get_command_argument(0, prog_name)
write(0,*) ''
write(0,*) 'usage: ', trim(prog_name), &
' -imax X -jmax Y -kmax Z \ '
write(0,*) ' -north A -south B -east C -west D -top E ', &
'-bottom F -niter N \'
write(0,*) ' [-f namelist] [-o output]'
write(0,*) ''
write(0,*) ' -imax X Size of domain in the i direction.'
write(0,*) ' -jmax Y Size of domain in the j direction.'
write(0,*) ' -kmax Z Size of domain in the k direction.'
write(0,*) ' -north A Initial condition on the north face.'
write(0,*) ' -south B Initial condition on the south face.'
write(0,*) ' -east C Initial condition on the east face.'
write(0,*) ' -west D Initial condition on the west face.'
write(0,*) ' -top E Initial condition on the top face.'
write(0,*) ' -bottom F Initial condition on the bottom face.'
write(0,*) ' -niter N Number of iterations to make.'
write(0,*) ' -f namelist Namelist file.'
write(0,*) ' -o output HDF5 to write iterations to.'
end subroutine args_usage
end module args
|
{"hexsha": "5b01027e4debb12d8c5719078d8b696f21858487", "size": 5394, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "OpenMP/Lab/jacobi/args.f90", "max_stars_repo_name": "rctraining/USGS-Advanced-HPC-Workshop", "max_stars_repo_head_hexsha": "4b206f6478fef8655eeffee025a43bb4414e7d83", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OpenMP/Lab/jacobi/args.f90", "max_issues_repo_name": "rctraining/USGS-Advanced-HPC-Workshop", "max_issues_repo_head_hexsha": "4b206f6478fef8655eeffee025a43bb4414e7d83", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OpenMP/Lab/jacobi/args.f90", "max_forks_repo_name": "rctraining/USGS-Advanced-HPC-Workshop", "max_forks_repo_head_hexsha": "4b206f6478fef8655eeffee025a43bb4414e7d83", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2, "max_line_length": 73, "alphanum_fraction": 0.3867259918, "num_tokens": 1218}
|
# encoding=utf-8
"""
imageai_prediction.py: predicting the class of an image with ImageAI library
@author: Manish Bhobe
My experiments with Python, Data Science, Machine Learning and Deep Learning
"""
from imageai.Prediction import ImagePrediction
import matplotlib.pyplot as plt
from PIL import Image
import random
import numpy as np
import tensorflow as tf
import os
import sys
import warnings
import time
warnings.filterwarnings('ignore')
seed = 123
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
# replace this path with full path of folder where you downloaded pretrained weights
# from https://github.com/OlafenwaMoses/ImageAI/releases/tag/1.0/
IMAGEAI_MODELS_HOME = "C:\\Dev\\Code\\Python\\pydata-book-master\\learning-ml\\dl-tensorflow\\pretrained_models"
IMAGEAI_MODEL = 'resnet50_weights_tf_dim_ordering_tf_kernels.h5'
assert os.path.exists(
IMAGEAI_MODELS_HOME), "%s - pretrained models dir does not exist!" % IMAGEAI_MODELS_HOME
MODEL_PATH = os.path.join(IMAGEAI_MODELS_HOME, IMAGEAI_MODEL)
assert os.path.exists(MODEL_PATH), "%s - unable find pre-trained model weights here!" % MODEL_PATH
image_file_names = [
"car-1.jpg",
"cat-5.jpg",
"bird-3.jpg",
"dog-2.jpg",
"bird-2.jpg"
]
# instantiate the predictor & load weights
# we are using ResNet50
predictor = ImagePrediction()
predictor.setModelTypeAsResNet()
predictor.setModelPath(MODEL_PATH)
predictor.loadModel()
def display_predictions(predictor, image_path, pred_count=10, fig_size=(8, 6)):
assert os.path.exists(image_path)
assert fig_size is not None
assert pred_count > 0
img = Image.open(image_path)
plt.figure(figsize=fig_size)
plt.imshow(img)
plt.title(image_path)
# display predictions
predictions, probabilities = predictor.predictImage(image_path, result_count=pred_count)
print('Predictions: %s' % image_path)
for pred, prob in zip(predictions, probabilities):
print(' - %s: prob: %.3f' % (pred, prob))
for image_file_name in image_file_names:
test_image = os.path.join(os.getcwd(), image_file_name)
display_predictions(predictor, test_image)
|
{"hexsha": "b669673dd98be2c4b464f660262af02c357f7e7e", "size": 2206, "ext": "py", "lang": "Python", "max_stars_repo_path": "imageai_prediction.py", "max_stars_repo_name": "mjbhobe/dl-articles-medium", "max_stars_repo_head_hexsha": "7fc3ee699117dd802e0c9715324de8c1a5898c9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-26T13:03:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T10:19:50.000Z", "max_issues_repo_path": "imageai_prediction.py", "max_issues_repo_name": "mjbhobe/dl-articles-medium", "max_issues_repo_head_hexsha": "7fc3ee699117dd802e0c9715324de8c1a5898c9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "imageai_prediction.py", "max_forks_repo_name": "mjbhobe/dl-articles-medium", "max_forks_repo_head_hexsha": "7fc3ee699117dd802e0c9715324de8c1a5898c9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-09-10T07:16:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-14T17:31:15.000Z", "avg_line_length": 30.6388888889, "max_line_length": 113, "alphanum_fraction": 0.7316409791, "include": true, "reason": "import numpy", "num_tokens": 532}
|
program dyn_blob
use m_dyn, only: dyn_get
use m_dyn, only: dyn_put
use m_dyn, only: dyn_vect
use m_dyn, only: dyn_clean
use m_const, only: radius_earth
implicit none
character(len=*), parameter :: fname = 'bkg.eta.nc4'
character(len=*), parameter :: pname = 'fsens.eta.nc4'
character(len=*), parameter :: bname = 'blob.eta.nc4'
integer, parameter :: dyntype=5
integer nymd, nhms, ier, im, jm, km, freq, nstep
integer ii, nymdw, nhmsw
integer, parameter :: npnts=4
real blocs(2,npnts)
logical :: advect=.false.
real,parameter:: adrate = 0.75
real,parameter:: lenfcst = 24
real,parameter:: corrlen = 800. ! to be read from file
real,parameter:: corrlength = corrlen*1000/radius_earth
integer, parameter :: zlevout = 1 ! -1 won''t do anything
type(dyn_vect) :: wind
type(dyn_vect) :: pert
integer :: ntimes = 12
integer :: lu=10
real,allocatable :: covloc(:,:,:,:)
real,allocatable :: adcovloc(:,:,:,:)
real,allocatable :: ploc(:,:,:)
! read in perturbation fields
call dyn_get ( trim(pname), nymd, nhms, pert, ier, timidx=1, freq=freq, nstep=nstep, vectype=dyntype )
im=pert%grid%im
jm=pert%grid%jm
km=pert%grid%km
if (zlevout>0) then
open (lu,file='tracer.grd',form='unformatted',access='sequential',convert='little_endian')
call wrtout_ (lu,pert%q(:,:,zlevout,1))
close(lu)
call readin_(lu,pert%q(:,:,zlevout,1))
endif
pert%u = 0.0
pert%v = 0.0
pert%ps = 0.0
pert%delp = 0.0
pert%q = 0.0
pert%pt = 0.0
call set_blobs_
call make_blobs_
call dyn_put ( trim(bname), nymd, nhms, 0, pert, ier, freq=freq, nstep=nstep, vectype=dyntype )
! read in wind fields
call dyn_get ( trim(fname), nymdw, nhmsw, wind, ier, timidx=1, freq=freq, nstep=nstep, vectype=dyntype )
if (advect) then
call advect_blobs_
endif
if (zlevout>0) then
open (lu,file='wind.grd',form='unformatted',access='sequential',convert='little_endian')
call wrtout_ (lu,wind%u(:,:,zlevout))
call wrtout_ (lu,wind%v(:,:,zlevout))
close(lu)
endif
call clean_blobs_
if(zlevout<=0) then
call dyn_put ( trim(pname), nymd, nhms, 0, pert, ier, freq=freq, nstep=nstep, vectype=dyntype )
endif
contains
subroutine wrtout_(lu,fld)
integer, intent(in) :: lu
real, intent(in) :: fld(:,:)
real(4),allocatable:: fld4(:,:)
integer myim,myjm,ndim
myim=size(fld,1)
myjm=size(fld,2)
ndim = myim*myjm
allocate(fld4(myim,myjm))
fld4=fld
call lon_shift(fld4,myim,myjm)
print *, 'wrtout: sum field ', myim, myjm, sum(fld)/ndim, sum(fld4)/ndim
write(lu) fld4
deallocate(fld4)
end subroutine wrtout_
subroutine readin_(lu,fld)
integer, intent(in) :: lu
real, intent(in) :: fld(:,:)
real(4),allocatable:: fld4(:,:)
integer myim,myjm,ndim
myim=size(fld,1)
myjm=size(fld,2)
ndim = myim*myjm
allocate(fld4(myim,myjm))
open (lu,file='tracer.grd',form='unformatted',access='sequential',convert='little_endian')
read(lu) fld4
close(lu)
call lon_shift(fld4,myim,myjm)
print *, 'readin: sum field ', myim, myjm, sum(fld4)/ndim
deallocate(fld4)
end subroutine readin_
subroutine set_blobs_
blocs(1,1)=10 ! :,1=lats, :,2=lons
blocs(2,1)= 0
! blocs(1,2)=90
blocs(1,2)=30
blocs(2,2)=180
blocs(1,3)=45
blocs(2,3)=50
! blocs(1,4)=-90
blocs(1,4)=-30
blocs(2,4)=-60
allocate(ploc(1,1,3))
allocate(covloc(im,jm,3,npnts))
if(advect) allocate(adcovloc(im,jm,3,npnts))
end subroutine set_blobs_
subroutine clean_blobs_
if(advect) deallocate(adcovloc)
deallocate(covloc)
deallocate(ploc)
end subroutine clean_blobs_
subroutine make_blobs_
integer nn,mm,ii,jj,iii,jjj
real pi,cs,sn,dist
mm=km
do nn=1,npnts
call globeloc ( ploc, blocs(1,nn:nn), blocs(2,nn:nn) )
call globeloc ( covloc(:,:,:,nn), pert%grid%lat, pert%grid%lon )
do jj=1,jm
do ii=1,im
dist = sqrt( (covloc(ii,jj,1,nn)-ploc(1,1,1))**2 + &
(covloc(ii,jj,2,nn)-ploc(1,1,2))**2 + &
(covloc(ii,jj,3,nn)-ploc(1,1,3))**2 )/corrlength
if (dist<10*corrlength) pert%pt(ii,jj,:mm) = gc(dist)
enddo
enddo
mm=mm-1
enddo
end subroutine make_blobs_
subroutine advect_blobs_
integer nn,ii,jj,kk,iii,jjj
real fcstlen,dt,pi,cs,sn,dist
end subroutine advect_blobs_
real function gc(r)
! Gaspari-Cohn taper function.
! r should be positive, and normalized so taper = 0 at r = 1
! very close to exp(-(r/c)**2), where c = 0.388
implicit none
real a1,a2,a3,a4,a5,a6,a7,a8,a9
parameter(a1 = -8.0)
parameter(a2 = 8.0)
parameter(a3 = 5.0)
parameter(a4 = 20.0/3.0)
parameter(a5 = 1.0)
parameter(a6 = 8.0/3.0)
parameter(a7 = -10.0)
parameter(a8 = 4.0)
parameter(a9 = -1.0/3.0)
real, intent(in) :: r
if(r < a5)then
if(r > 0.5)then
gc = ( ( ( ( a6*r -a2 )*r +a3 )*r +a4 )*r +a7)*r + a8 + a9/r
else
gc = ( ( ( a1*r +a2)*r +a3 )*r -a4)*r*r + a5
end if
else
gc = 0.0
end if
end function gc
subroutine globeloc (aloc,lat,lon)
implicit none
real,intent(inout) :: aloc(:,:,:)
real,intent(in) :: lat(:),lon(:)
real pi
integer i,j
real,allocatable:: clat(:),clon(:),slat(:),slon(:)
if(size(aloc,3)<3) then
print *, 'globeloc error: check 2nd dim of aloc ', size(aloc,3)
endif
pi=4.0*atan(1.0)
allocate(clat(size(lat)),clon(size(lon)))
allocate(slat(size(lat)),slon(size(lon)))
clat=cos(lat*pi/180.)
slat=sin(lat*pi/180.)
clon=cos(lon*pi/180.)
slon=sin(lon*pi/180.)
do j=1,size(aloc,2)
do i=1,size(aloc,1)
aloc(i,j,1) = clat(j)*clon(i)
aloc(i,j,2) = clat(j)*slon(i)
aloc(i,j,3) = slat(j)
enddo
enddo
deallocate(slat,slon)
deallocate(clat,clon)
end subroutine globeloc
subroutine globeadloc (fcstlen,aloc,lat,lon, u,v)
implicit none
real,intent(in) :: fcstlen
real,intent(inout) :: aloc(:,:,:)
real,intent(in) :: lat(:),lon(:)
real,intent(in) :: u(:,:),v(:,:)
real pi,pi2,halfpi
real adlat,adlon
real adtime
integer i,j
real,allocatable:: clat(:),clon(:),slat(:),slon(:)
if(size(aloc,3)<3) then
print *, 'globeloc error: check 2nd dim of aloc ', size(aloc,3)
endif
pi=4.0*atan(1.0)
pi2=2.0*pi
halfpi=0.5*pi
adtime=adrate*fcstlen*3600./radius_earth
allocate(clat(size(lat)),clon(size(lon)))
allocate(slat(size(lat)),slon(size(lon)))
clat=lat*pi/180.
slat=lat*pi/180.
clon=lon*pi/180.
slon=lon*pi/180.
do j=1,size(aloc,2)
do i=1,size(aloc,1)
adlon = clon(i) - u(i,j) * cos(clat(j)) * adtime
adlat = clat(j) - v(i,j) * adtime
if(adlat > halfpi) then
adlat = pi - adlat
adlon = adlon + pi
else if(adlat < -halfpi) then
adlat = -pi - adlat
adlon = adlon + pi
end if
if(adlon > pi2) then
adlon = mod(adlon,pi2)
else if(adlon < 0.0) then
adlon = mod(adlon,pi2) + pi2
end if
aloc(i,j,1) = cos(adlat)*cos(adlon)
aloc(i,j,2) = cos(adlat)*sin(adlon)
aloc(i,j,3) = sin(adlat)
enddo
enddo
deallocate(slat,slon)
deallocate(clat,clon)
end subroutine globeadloc
subroutine lon_shift(field,im,jm)
Implicit NONE
integer, intent(in) :: im
integer, intent(in) :: jm
real(4), intent(inout) :: field(im,jm)
integer i, j
real(4) tmp
do j = 1, jm
do i = 1, im/2
tmp = field(i,j)
field(i,j) = field(i+im/2,j)
field(i+im/2,j) = tmp
enddo
enddo
end subroutine lon_shift
end program dyn_blob
|
{"hexsha": "849f11b806bf7768604404d3d0f3513cb07956f2", "size": 7348, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "GMAO_hermes/dyn_blob.f90", "max_stars_repo_name": "GEOS-ESM/GMAO_Shared", "max_stars_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23", "max_stars_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-01T17:36:53.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-01T17:36:53.000Z", "max_issues_repo_path": "GMAO_hermes/dyn_blob.f90", "max_issues_repo_name": "GEOS-ESM/GMAO_Shared", "max_issues_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23", "max_issues_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_issues_count": 105, "max_issues_repo_issues_event_min_datetime": "2019-07-08T19:27:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T02:12:16.000Z", "max_forks_repo_path": "GMAO_hermes/dyn_blob.f90", "max_forks_repo_name": "GEOS-ESM/GMAO_Shared", "max_forks_repo_head_hexsha": "022af23abbc7883891006b57379be96d9a50df23", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-07-05T18:00:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T16:26:29.000Z", "avg_line_length": 24.7407407407, "max_line_length": 105, "alphanum_fraction": 0.6336418073, "num_tokens": 2804}
|
From Coq Require Import ZArith Psatz Bool String List FMaps.
From Coq Require Import FunctionalExtensionality.
From CDF Require Import Sequences IMP.
From CDF Require AbstrInterp.
Local Open Scope string_scope.
Local Open Scope Z_scope.
(** * 5. Static analysis by abstract interpretation, improved version *)
(** ** 5.5. Improved interface of abstract domains *)
(** We enrich the interface of abstract domains for values with
operations for inverse analysis of conditionals and for widening. *)
Module Type VALUE_ABSTRACTION.
(** We reuse all the declarations of the simplified interface. *)
Include AbstrInterp.VALUE_ABSTRACTION.
(** [meet] computes a lower bound of its two arguments. *)
Parameter meet: t -> t -> t.
Axiom meet_1: forall n N1 N2, In n N1 -> In n N2 -> In n (meet N1 N2).
(** [isIn] tests whether a concrete value belongs to an abstract value. *)
Parameter isIn: Z -> t -> bool.
Axiom isIn_1: forall n N, In n N -> isIn n N = true.
(** Abstract operators for inverse analysis of comparisons. *)
(** Consider a test [a1 = a2] that evaluates to true at run-time.
Let [N1] be an abstraction of [a1] and [N2] be an abstraction of [a2].
[eq_inv N1 N2] produces a pair of abstract values [N1', N2'].
[N1'] is a possibly more precise abstraction for [a1]
taking into account the fact that [a1 = a2].
[N2'] is a possibly more precise abstraction for [a2]
taking into account the fact that [a1 = a2]. *)
Parameter eq_inv: t -> t -> t * t.
Axiom eq_inv_1:
forall n1 n2 N1 N2,
In n1 N1 -> In n2 N2 -> n1 = n2 ->
In n1 (fst (eq_inv N1 N2)) /\ In n2 (snd (eq_inv N1 N2)).
(** [ne_inv], [le_inv] and [gt_inv] are similar but apply to the other
three basic comparisons: "different", "less than or equal", and
"greater than". *)
Parameter ne_inv: t -> t -> t * t.
Axiom ne_inv_1:
forall n1 n2 N1 N2,
In n1 N1 -> In n2 N2 -> n1 <> n2 ->
In n1 (fst (ne_inv N1 N2)) /\ In n2 (snd (ne_inv N1 N2)).
Parameter le_inv: t -> t -> t * t.
Axiom le_inv_1:
forall n1 n2 N1 N2,
In n1 N1 -> In n2 N2 -> n1 <= n2 ->
In n1 (fst (le_inv N1 N2)) /\ In n2 (snd (le_inv N1 N2)).
Parameter gt_inv: t -> t -> t * t.
Axiom gt_inv_1:
forall n1 n2 N1 N2,
In n1 N1 -> In n2 N2 -> n1 > n2 ->
In n1 (fst (gt_inv N1 N2)) /\ In n2 (snd (gt_inv N1 N2)).
(** [widen N1 N2] computes an upper bound of its first argument, chosen
so as to guarantee and accelerate the convergence of fixed point
iteration. *)
Parameter widen: t -> t -> t.
Axiom widen_1: forall N1 N2, le N1 (widen N1 N2).
(** To guarantee convergence, we provide a measure with nonnegative
integer values that decreases strictly along the fixed point
iteration with widening. *)
Parameter measure: t -> nat.
Axiom measure_top: measure top = 0%nat.
Axiom widen_2: forall N1 N2, (measure (widen N1 N2) <= measure N1)%nat.
Axiom widen_3:
forall N1 N2, ble N2 N1 = false -> (measure (widen N1 N2) < measure N1)%nat.
End VALUE_ABSTRACTION.
(** We enrich the interface of abstract stores with a widening operation. *)
Module Type STORE_ABSTRACTION.
Declare Module V: VALUE_ABSTRACTION.
Parameter t: Type.
Parameter get: ident -> t -> V.t.
Definition In (s: store) (S: t) : Prop := forall x, V.In (s x) (get x S).
Parameter set: ident -> V.t -> t -> t.
Axiom set_1: forall x n N s S, V.In n N -> In s S -> In (update x n s) (set x N S).
Definition le (S1 S2: t) : Prop :=
forall s, In s S1 -> In s S2.
Parameter ble: t -> t -> bool.
Axiom ble_1: forall S1 S2, ble S1 S2 = true -> le S1 S2.
Parameter bot: t.
Axiom bot_1: forall s, ~(In s bot).
Parameter top: t.
Parameter top_1: forall s, In s top.
Parameter join: t -> t -> t.
Axiom join_1: forall s S1 S2, In s S1 -> In s (join S1 S2).
Axiom join_2: forall s S1 S2, In s S2 -> In s (join S1 S2).
(** This is the new widening operator. *)
Parameter widen: t -> t -> t.
Axiom widen_1: forall S1 S2, le S1 (widen S1 S2).
(** The order below corresponds to consecutive rounds of the fixed point
iteration with widening. We require it to be well founded,
so as to guarantee termination. *)
Definition widen_order (S S1: t) :=
exists S2, S = widen S1 S2 /\ ble S2 S1 = false.
Axiom widen_order_wf: well_founded widen_order.
End STORE_ABSTRACTION.
(** ** 5.6. The improved generic analyzer. *)
Module Analysis (ST: STORE_ABSTRACTION).
Module V := ST.V.
(** *** Computing post-fixed points with widening and narrowing. *)
Section FIXPOINT.
Variable F: ST.t -> ST.t.
Program Definition is_true (b: bool) : { b = true } + { b = false } :=
match b with true => left _ | false => right _ end.
Lemma iter_up_acc:
forall (S: ST.t) (acc: Acc ST.widen_order S) (S': ST.t),
ST.ble S' S = false ->
Acc ST.widen_order (ST.widen S S').
Proof.
intros. eapply Acc_inv; eauto. exists S'. auto.
Defined.
Fixpoint iter_up (S: ST.t) (acc: Acc ST.widen_order S) : ST.t :=
let S' := F S in
match is_true (ST.ble S' S) with
| left LE => S
| right NOTLE => iter_up (ST.widen S S') (iter_up_acc S acc S' NOTLE)
end.
Fixpoint iter_down (n: nat) (S: ST.t) : ST.t :=
match n with
| O => S
| S n => let S' := F S in
if ST.ble (F S') S' then iter_down n S' else S
end.
Definition niter_down := 3%nat.
Definition postfixpoint : ST.t :=
iter_down niter_down (iter_up ST.bot (ST.widen_order_wf ST.bot)).
Lemma iter_up_sound:
forall S acc, ST.le (F (iter_up S acc)) (iter_up S acc).
Proof.
induction S using (well_founded_induction ST.widen_order_wf).
intros acc. destruct acc. cbn. destruct (is_true (ST.ble (F S) S)).
- apply ST.ble_1; auto.
- apply H. exists (F S); auto.
Qed.
Lemma iter_down_sound:
forall n S, ST.le (F S) S -> ST.le (F (iter_down n S)) (iter_down n S).
Proof.
induction n; intros; cbn.
- auto.
- destruct (ST.ble (F (F S)) (F S)) eqn:BLE.
+ apply IHn. apply ST.ble_1; auto.
+ auto.
Qed.
Lemma postfixpoint_sound: ST.le (F postfixpoint) postfixpoint.
Proof.
apply iter_down_sound. apply iter_up_sound.
Qed.
End FIXPOINT.
(** *** Abstract interpretation of arithmetic expressions *)
(** Same definition as in the simplified version. *)
Fixpoint Aeval (a: aexp) (S: ST.t) : V.t :=
match a with
| CONST n => V.const n
| VAR x => ST.get x S
| PLUS a1 a2 => V.add (Aeval a1 S) (Aeval a2 S)
| MINUS a1 a2 => V.sub (Aeval a1 S) (Aeval a2 S)
end.
Lemma Aeval_sound:
forall s S a,
ST.In s S -> V.In (aeval a s) (Aeval a S).
Proof.
induction a; cbn; intros.
- apply V.const_1.
- apply H.
- apply V.add_1; auto.
- apply V.sub_1; auto.
Qed.
(** *** Inverse analysis of arithmetic and Boolean expressions *)
(** Assuming that the concrete value of [a] belongs to the abstract
value [Res], what do we learn about the values of the variables
that occur in [a]? The facts that we learn are used to refine
the abstract values of these variables in the abstract store [S]. *)
Fixpoint assume_eval (a: aexp) (Res: V.t) (S: ST.t) : ST.t :=
match a with
| CONST n =>
if V.isIn n Res then S else ST.bot
| VAR x =>
ST.set x (V.meet Res (ST.get x S)) S
| PLUS a1 a2 =>
let N1 := Aeval a1 S in
let N2 := Aeval a2 S in
let Res1 := V.meet N1 (V.sub Res N2) in
let Res2 := V.meet N2 (V.sub Res N1) in
assume_eval a1 Res1 (assume_eval a2 Res2 S)
| MINUS a1 a2 =>
let N1 := Aeval a1 S in
let N2 := Aeval a2 S in
let Res1 := V.meet N1 (V.add Res N2) in
let Res2 := V.meet N2 (V.sub N1 Res) in
assume_eval a1 Res1 (assume_eval a2 Res2 S)
end.
Lemma assume_eval_sound:
forall a Res S s,
V.In (aeval a s) Res -> ST.In s S -> ST.In s (assume_eval a Res S).
Proof.
induction a; cbn; intros.
- (* CONST *)
rewrite V.isIn_1 by auto. auto.
- (* VAR *)
replace s with (update x (s x) s).
apply ST.set_1; auto.
apply V.meet_1; auto.
apply functional_extensionality; intros y.
unfold update; destruct (string_dec x y); congruence.
- (* PLUS *)
set (n1 := aeval a1 s) in *. set (n2 := aeval a2 s) in *.
set (N1 := Aeval a1 S). set (N2 := Aeval a2 S).
assert (V.In n1 N1) by (apply Aeval_sound; auto).
assert (V.In n2 N2) by (apply Aeval_sound; auto).
apply IHa1; fold n1.
apply V.meet_1. auto. replace n1 with ((n1 + n2) - n2) by lia. apply V.sub_1; auto.
apply IHa2; fold n2.
apply V.meet_1. auto. replace n2 with ((n1 + n2) - n1) by lia. apply V.sub_1; auto.
auto.
- (* MINUS *)
set (n1 := aeval a1 s) in *. set (n2 := aeval a2 s) in *.
set (N1 := Aeval a1 S). set (N2 := Aeval a2 S).
assert (V.In n1 N1) by (apply Aeval_sound; auto).
assert (V.In n2 N2) by (apply Aeval_sound; auto).
apply IHa1; fold n1.
apply V.meet_1. auto. replace n1 with ((n1 - n2) + n2) by lia. apply V.add_1; auto.
apply IHa2; fold n2.
apply V.meet_1. auto. replace n2 with (n1 - (n1 - n2)) by lia. apply V.sub_1; auto.
auto.
Qed.
(** Assuming that the Boolean expression [b] evaluates concretely to
the Boolean value [res], what do we learn about the values of the
variables that occur in [b]? The facts that we learn are used to
refine the abstract values of these variables in the abstract
store [S]. *)
Fixpoint assume_test (b: bexp) (res: bool) (S: ST.t): ST.t :=
match b with
| TRUE => if res then S else ST.bot
| FALSE => if res then ST.bot else S
| EQUAL a1 a2 =>
let (Res1, Res2) :=
if res
then V.eq_inv (Aeval a1 S) (Aeval a2 S)
else V.ne_inv (Aeval a1 S) (Aeval a2 S) in
assume_eval a1 Res1 (assume_eval a2 Res2 S)
| LESSEQUAL a1 a2 =>
let (Res1, Res2) :=
if res
then V.le_inv (Aeval a1 S) (Aeval a2 S)
else V.gt_inv (Aeval a1 S) (Aeval a2 S) in
assume_eval a1 Res1 (assume_eval a2 Res2 S)
| NOT b1 =>
assume_test b1 (negb res) S
| AND b1 b2 =>
if res
then assume_test b1 true (assume_test b2 true S)
else ST.join (assume_test b1 false S) (assume_test b2 false S)
end.
Lemma assume_test_sound:
forall b res S s,
beval b s = res -> ST.In s S -> ST.In s (assume_test b res S).
Proof.
induction b; cbn; intros.
- (* TRUE *)
subst res; auto.
- (* FALSE *)
subst res; auto.
- (* EQUAL *)
set (Res := if res
then V.eq_inv (Aeval a1 S) (Aeval a2 S)
else V.ne_inv (Aeval a1 S) (Aeval a2 S)).
assert (A: V.In (aeval a1 s) (fst Res) /\ V.In (aeval a2 s) (snd Res)).
{ unfold Res; destruct res;
[ apply V.eq_inv_1 | apply V.ne_inv_1 ]; auto using Aeval_sound.
- apply Z.eqb_eq; auto.
- apply Z.eqb_neq; auto.
}
destruct A as [A1 A2]. destruct Res as [Res1 Res2]. auto using assume_eval_sound.
- (* LESSEQUAL *)
set (Res := if res
then V.le_inv (Aeval a1 S) (Aeval a2 S)
else V.gt_inv (Aeval a1 S) (Aeval a2 S)).
assert (A: V.In (aeval a1 s) (fst Res) /\ V.In (aeval a2 s) (snd Res)).
{ unfold Res; destruct res;
[ apply V.le_inv_1 | apply V.gt_inv_1 ]; auto using Aeval_sound.
- apply Z.leb_le; auto.
- apply Z.leb_nle in H. lia.
}
destruct A as [A1 A2]. destruct Res as [Res1 Res2]. auto using assume_eval_sound.
- (* NOT *)
apply IHb; auto. rewrite <- H. rewrite negb_involutive; auto.
- (* AND *)
destruct res.
+ (* AND, true *)
destruct (andb_prop _ _ H).
auto.
+ (* AND, false *)
destruct (andb_false_elim _ _ H); [apply ST.join_1 | apply ST.join_2]; auto.
Qed.
(** *** Improved abstract interpretation of commands *)
(** We add calls to [assume_test] every time a Boolean condition
is known to be true or to be false. *)
Fixpoint Cexec (c: com) (S: ST.t) : ST.t :=
match c with
| SKIP => S
| ASSIGN x a => ST.set x (Aeval a S) S
| SEQ c1 c2 => Cexec c2 (Cexec c1 S)
| IFTHENELSE b c1 c2 =>
ST.join (Cexec c1 (assume_test b true S))
(Cexec c2 (assume_test b false S))
| WHILE b c =>
assume_test b false
(postfixpoint (fun X => ST.join S (Cexec c (assume_test b true X))))
end.
Theorem Cexec_sound:
forall c s s' S,
ST.In s S -> cexec s c s' -> ST.In s' (Cexec c S).
Proof.
Opaque niter_down.
induction c; intros s s' S PRE EXEC; cbn.
- (* SKIP *)
inversion EXEC; subst. auto.
- (* ASSIGN *)
inversion EXEC; subst. apply ST.set_1; auto. apply Aeval_sound; auto.
- (* SEQ *)
inversion EXEC; subst. eauto.
- (* IFTHENELSE *)
inversion EXEC; subst. destruct (beval b s) eqn:B.
apply ST.join_1. eapply IHc1; eauto. apply assume_test_sound; auto.
apply ST.join_2. eapply IHc2; eauto. apply assume_test_sound; auto.
- (* WHILE *)
set (F := fun X => ST.join S (Cexec c (assume_test b true X))).
set (X := postfixpoint F).
assert (L : ST.le (F X) X) by (apply postfixpoint_sound).
assert (REC: forall s1 c1 s2,
cexec s1 c1 s2 ->
c1 = WHILE b c ->
ST.In s1 X ->
ST.In s2 (assume_test b false X)).
{ induction 1; intro EQ; inversion EQ; subst; intros.
- (* WHILE done *)
apply assume_test_sound; auto.
- (* WHILE loop *)
apply IHcexec2; auto. apply L. unfold F. apply ST.join_2.
eapply IHc; eauto. apply assume_test_sound; auto.
}
eapply REC; eauto. apply L. unfold F. apply ST.join_1. auto.
Qed.
End Analysis.
(** ** 5.7. An improved abstract domain for stores *)
(** We start from the abstract domain in module [AbstrInterp], section 5.3,
and add the widening operator and its properties. *)
Module IdentMap := AbstrInterp.IdentMap.
Module IMFact := AbstrInterp.IMFact.
Module IMProp := AbstrInterp.IMProp.
Module StoreAbstr (VA: VALUE_ABSTRACTION) <: STORE_ABSTRACTION.
Module V := VA.
Inductive abstr_state : Type :=
| Bot
| Top_except (m: IdentMap.t V.t).
Definition t := abstr_state.
Definition get (x: ident) (S: t) : V.t :=
match S with
| Bot => V.bot
| Top_except m =>
match IdentMap.find x m with
| None => V.top
| Some v => v
end
end.
Definition In (s: store) (S: t) : Prop :=
forall x, V.In (s x) (get x S).
Definition set (x: ident) (N: V.t) (S: t): t :=
if V.ble N V.bot then Bot else
match S with
| Bot => Bot
| Top_except m => Top_except (IdentMap.add x N m)
end.
Lemma set_1:
forall x n N s S,
V.In n N -> In s S -> In (update x n s) (set x N S).
Proof.
unfold In, get, set; intros.
destruct (V.ble N V.bot) eqn:BLE; [ | destruct S ].
- apply V.ble_1 in BLE. apply BLE in H. elim (V.bot_1 n); auto.
- elim (V.bot_1 (s "")). auto.
- rewrite IMFact.add_o. change IdentMap.E.eq_dec with string_dec. unfold update.
destruct (string_dec x x0); auto.
Qed.
(** The order between abstract states. *)
Definition le (S1 S2: t) : Prop :=
forall s, In s S1 -> In s S2.
Definition ble (S1 S2: t) : bool :=
match S1, S2 with
| Bot, _ => true
| _, Bot => false
| Top_except m1, Top_except m2 =>
IMProp.for_all (fun x v => V.ble (get x S1) v) m2
end.
Lemma ble_1: forall S1 S2, ble S1 S2 = true -> le S1 S2.
Proof.
unfold ble, le; intros.
destruct S1 as [ | m1].
- elim (V.bot_1 (s "")). apply H0.
- destruct S2 as [ | m2]. discriminate.
red; cbn; intros. destruct (IdentMap.find x m2) as [N2|] eqn:F2.
+ apply IdentMap.find_2 in F2. eapply IMProp.for_all_iff in H; eauto.
apply V.ble_1 in H. apply H. apply H0.
hnf. intros; subst x0. hnf; intros. subst x0. auto.
+ apply V.top_1.
Qed.
Lemma ble_false: forall s1 s2,
s2 <> Bot -> ble s1 s2 = false -> exists x, V.ble (get x s1) (get x s2) = false.
Proof.
unfold ble; intros.
destruct s1 as [ | m1]. discriminate. destruct s2 as [ | m2]. congruence.
- set (p := fun (x: IdentMap.key) v => V.ble (get x (Top_except m1)) v) in *.
set (m' := IMProp.filter (fun x v => negb (p x v)) m2).
destruct (IdentMap.elements m') as [ | [x1 v1] l1] eqn:ELT.
+ assert (IMProp.for_all p m2 = true).
{ apply IMProp.for_all_iff.
repeat (hnf; intros). congruence.
intros. destruct (p k e) eqn:P; auto.
assert (M: IdentMap.MapsTo k e m').
{ apply IMProp.filter_iff.
repeat (hnf; intros). congruence.
rewrite P; auto. }
apply IdentMap.elements_1 in M. rewrite ELT in M. inversion M.
}
congruence.
+ assert (M: IdentMap.MapsTo x1 v1 m').
{ apply IdentMap.elements_2. rewrite ELT. constructor. hnf; auto. }
apply IMProp.filter_iff in M. destruct M as [M N]. apply negb_true_iff in N.
exists x1. unfold get at 2. erewrite IdentMap.find_1 by eauto. exact N.
repeat (hnf; intros). congruence.
Qed.
(** The lattice operations. *)
Definition bot: t := Bot.
Lemma bot_1: forall s, ~(In s bot).
Proof.
unfold In; cbn. intros s IN. apply (V.bot_1 (s "")). apply IN.
Qed.
Definition top: t := Top_except (IdentMap.empty V.t).
Lemma top_1: forall s, In s top.
Proof.
unfold In, top, get; cbn. intros. apply V.top_1.
Qed.
Definition join_aux (ov1 ov2: option V.t) : option V.t :=
match ov1, ov2 with
| Some v1, Some v2 => Some (V.join v1 v2)
| _, _ => None
end.
Definition join (S1 S2: t) : t :=
match S1, S2 with
| Bot, _ => S2
| _, Bot => S1
| Top_except m1, Top_except m2 =>
Top_except (IdentMap.map2 join_aux m1 m2)
end.
Lemma join_1:
forall s S1 S2, In s S1 -> In s (join S1 S2).
Proof.
unfold join; intros.
destruct S1 as [ | m1]. elim (bot_1 s); auto.
destruct S2 as [ | m2]. auto.
- red; unfold get; intros. rewrite IMFact.map2_1bis; auto.
unfold join_aux. specialize (H x). unfold get in H.
destruct (IdentMap.find x m1).
+ destruct (IdentMap.find x m2).
* apply V.join_1; auto.
* apply V.top_1.
+ apply V.top_1.
Qed.
Lemma join_2:
forall s S1 S2, In s S2 -> In s (join S1 S2).
Proof.
unfold join; intros.
destruct S1 as [ | m1]. auto.
destruct S2 as [ | m2]. elim (bot_1 s); auto.
- red; unfold get; intros. rewrite IMFact.map2_1bis; auto.
unfold join_aux. specialize (H x). unfold get in H.
destruct (IdentMap.find x m1).
+ destruct (IdentMap.find x m2).
* apply V.join_2; auto.
* apply V.top_1.
+ apply V.top_1.
Qed.
(** The widening operator. We apply pointwise the [V.widen] widening
provided by the value domain, with default cases for variables
not described in the map, which are implicitly set to [V.top]. *)
Definition widen_aux (ov1 ov2: option V.t) : option V.t :=
match ov1, ov2 with
| Some v1, Some v2 => Some (V.widen v1 v2)
| None, _ => None
| _, None => None
end.
Definition widen (s1 s2: t) : t :=
match s1, s2 with
| Bot, _ => s2
| _, Bot => s1
| Top_except m1, Top_except m2 => Top_except (IdentMap.map2 widen_aux m1 m2)
end.
Lemma widen_1: forall s1 s2, le s1 (widen s1 s2).
Proof.
unfold le, widen; intros.
destruct s1 as [ | m1]. elim (bot_1 _ H).
destruct s2 as [ | m2]. auto.
red; unfold get; intros. specialize (H x); cbn in H.
rewrite IMFact.map2_1bis; auto. unfold widen_aux.
destruct (IdentMap.find x m1); destruct (IdentMap.find x m2);
auto using V.top_1.
apply V.widen_1; auto.
Qed.
(** Constructing a well-founded order that guarantees termination is difficult.
We start by defining a measure with nonnegative integer values
for a finite map [IdentMap.t V.t]. This measure is the sum
of the measures of the abstract values in the codomain of this
finite map. *)
Definition measure_map (m: IdentMap.t V.t) : nat :=
IdentMap.fold (fun x v n => (V.measure v + n)%nat) m 0%nat.
Remark measure_map_empty:
forall m, IdentMap.Empty m -> measure_map m = 0%nat.
Proof.
intros. apply IMProp.fold_Empty; auto.
Qed.
Remark measure_map_add:
forall m x v m', ~IdentMap.In x m -> IMProp.Add x v m m' ->
measure_map m' = (V.measure v + measure_map m)%nat.
Proof.
intros. unfold measure_map; eapply IMProp.fold_Add with (f := fun x v n => (V.measure v + n)%nat); eauto.
repeat (hnf; intros). congruence.
hnf; intros. lia.
Qed.
Remark measure_map_remove:
forall m x,
measure_map m = (V.measure (get x (Top_except m)) + measure_map (IdentMap.remove x m))%nat.
Proof.
intros. unfold get. destruct (IdentMap.find x m) as [v|] eqn:F.
- apply measure_map_add with x.
apply IMFact.not_find_in_iff. rewrite IMFact.remove_eq_o; auto.
red; intros. rewrite IMFact.add_o, IMFact.remove_o.
destruct (AbstrInterp.IdentMap.E.eq_dec x y); congruence.
- rewrite V.measure_top. unfold measure_map. eapply IMProp.fold_Equal. auto.
repeat (hnf; intros). congruence.
hnf; intros; lia.
red; intros. rewrite IMFact.remove_o.
destruct (AbstrInterp.IdentMap.E.eq_dec x y); congruence.
Qed.
Lemma measure_map_le: forall m1 m2,
(forall x, V.measure (get x (Top_except m1)) <= V.measure (get x (Top_except m2)))%nat ->
(measure_map m1 <= measure_map m2)%nat.
Proof.
intros m0. pattern m0. unfold measure_map at 1; apply IMProp.fold_rec.
- intros m EMPTY m2 LE. lia.
- intros x v1 n m' m'' MAP NOTIN ADD REC m2 LE.
set (m2' := IdentMap.remove x m2).
assert (LE': forall x, (V.measure (get x (Top_except m')) <= V.measure (get x (Top_except m2')))%nat).
{ intros y. generalize (LE y). unfold get, m2'. rewrite ADD, IMFact.add_o, IMFact.remove_o.
destruct (AbstrInterp.IdentMap.E.eq_dec x y).
+ subst y. apply IMFact.not_find_in_iff in NOTIN. rewrite NOTIN. rewrite ! V.measure_top. lia.
+ auto. }
apply REC in LE'.
rewrite (measure_map_remove m2 x). fold m2'.
specialize (LE x). unfold get at 1 in LE. rewrite ADD, IMFact.add_eq_o in LE by auto.
lia.
Qed.
Lemma measure_map_lt: forall m1 m2,
(forall x, V.measure (get x (Top_except m1)) <= V.measure (get x (Top_except m2)))%nat ->
(exists x, V.measure (get x (Top_except m1)) < V.measure (get x (Top_except m2)))%nat ->
(measure_map m1 < measure_map m2)%nat.
Proof.
intros m1 m2 LE (x & LT).
rewrite (measure_map_remove m1 x), (measure_map_remove m2 x).
assert ((measure_map (IdentMap.remove x m1) <= measure_map (IdentMap.remove x m2))%nat).
{ apply measure_map_le.
intros y; unfold get. rewrite ! IMFact.remove_o.
destruct (AbstrInterp.IdentMap.E.eq_dec x y).
lia.
apply LE. }
lia.
Qed.
(** We then show that this measure strictly decreases during a widening
step that does not mention [Bot]. *)
Lemma measure_widen_lt: forall m1 m2,
ble (Top_except m2) (Top_except m1) = false ->
(measure_map (IdentMap.map2 widen_aux m1 m2) < measure_map m1)%nat.
Proof.
intros. apply ble_false in H. 2: congruence. destruct H as (x & BL).
apply measure_map_lt.
- intros y. unfold get. rewrite IMFact.map2_1bis by auto. unfold widen_aux.
destruct (IdentMap.find y m1) as [ v1 |].
destruct (IdentMap.find y m2) as [ v2 |].
apply V.widen_2.
rewrite V.measure_top; lia.
rewrite V.measure_top; lia.
- exists x. unfold get in *. rewrite IMFact.map2_1bis by auto. unfold widen_aux.
destruct (IdentMap.find x m1) as [ v1 |].
destruct (IdentMap.find x m2) as [ v2 |].
apply V.widen_3 in BL; auto.
apply V.widen_3 in BL; rewrite V.measure_top; lia.
assert (T: forall z, V.ble z V.top = true).
{ intros. apply V.ble_2. red; intros. apply V.top_1. }
rewrite T in BL. congruence.
Qed.
(** We conclude that the widening order is well founded. *)
Definition widen_order (S S1: t) := exists S2, S = widen S1 S2 /\ ble S2 S1 = false.
Lemma widen_order_wf: well_founded widen_order.
Proof.
assert (A: forall m, Acc widen_order (Top_except m)).
{ induction m using (well_founded_ind (well_founded_ltof _ measure_map)).
constructor. intros S (S2 & EQ & BLE). subst S.
destruct S2. discriminate. apply H. apply measure_widen_lt. auto. }
assert (B: Acc widen_order Bot).
{ constructor. intros S (S2 & EQ & BLE). subst S.
unfold ble in BLE. destruct S2. discriminate. apply A. }
red. destruct a; auto.
Defined.
End StoreAbstr.
(** ** 5.8. The abstract domain of intervals *)
(** We first define the type [zinf] of integers complemented with a
"plus infinity" value. *)
Inductive zinf : Type := Fin (h: Z) | Inf.
Coercion Fin : Z >-> zinf.
Module Zinf.
Definition In (n: Z) (N: zinf) : Prop :=
match N with Fin h => n <= h | Inf => True end.
Lemma In_mono: forall n1 n2 N, n1 <= n2 -> In n2 N -> In n1 N.
Proof.
unfold In; destruct N; intros. lia. auto.
Qed.
Definition le (N1 N2: zinf) : Prop :=
forall n, In n N1 -> In n N2.
Lemma le_Fin: forall n1 N2, le (Fin n1) N2 <-> In n1 N2.
Proof.
unfold le; cbn; intros; split; intros.
- apply H. lia.
- destruct N2; cbn in *; auto. lia.
Qed.
Lemma le_is_Inf: forall N h, (forall n, h <= n -> In n N) -> N = Inf.
Proof.
destruct N; cbn; intros; auto.
specialize (H (Z.max h0 (h + 1))). lia.
Qed.
Lemma le_Inf: forall N, le Inf N <-> N = Inf.
Proof.
unfold le; intros; split; intros.
- apply le_is_Inf with 0. intros; apply H; exact I.
- subst N; exact I.
Qed.
Definition ble (N1 N2: zinf) : bool :=
match N1, N2 with _, Inf => true | Inf, _ => false | Fin h1, Fin h2 => h1 <=? h2 end.
Lemma ble_1: forall N1 N2, ble N1 N2 = true -> le N1 N2.
Proof.
unfold ble, le, In; intros.
destruct N1, N2; auto.
apply Z.leb_le in H. lia.
discriminate.
Qed.
Lemma ble_2: forall N1 N2, le N1 N2 -> ble N1 N2 = true.
Proof.
unfold ble; intros. destruct N1.
- apply le_Fin in H. destruct N2; auto. apply Z.leb_le; auto.
- apply le_Inf in H. rewrite H. auto.
Qed.
Definition max (N1 N2: zinf) : zinf :=
match N1, N2 with Inf, _ => Inf | _, Inf => Inf | Fin h1, Fin h2 => Fin (Z.max h1 h2) end.
Lemma max_1: forall n N1 N2, In n N1 -> In n (max N1 N2).
Proof.
unfold In, max; intros. destruct N1; auto. destruct N2; auto. lia.
Qed.
Lemma max_2: forall n N1 N2, In n N2 -> In n (max N1 N2).
Proof.
unfold In, max; intros. destruct N1; auto. destruct N2; auto. lia.
Qed.
Definition min (N1 N2: zinf) : zinf :=
match N1, N2 with Inf, _ => N2 | _, Inf => N1 | Fin h1, Fin h2 => Fin (Z.min h1 h2) end.
Lemma min_1: forall n N1 N2, In n N1 -> In n N2 -> In n (min N1 N2).
Proof.
unfold In, min; intros. destruct N1; auto. destruct N2; auto. lia.
Qed.
Definition add (N1 N2: zinf) : zinf :=
match N1, N2 with Inf, _ => Inf | _, Inf => Inf | Fin h1, Fin h2 => Fin (h1 + h2) end.
Lemma add_1: forall n1 n2 N1 N2, In n1 N1 -> In n2 N2 -> In (n1 + n2) (add N1 N2).
Proof.
unfold In, add; intros. destruct N1; auto. destruct N2; auto. lia.
Qed.
Definition isIn (n: Z) (N: zinf) : bool :=
match N with Fin h => n <=? h | Inf => true end.
Lemma isIn_1:
forall n N, In n N -> isIn n N = true.
Proof.
unfold In, isIn; intros. destruct N; auto. apply Z.leb_le; auto.
Qed.
Definition pred (N: zinf) : zinf :=
match N with Inf => Inf | Fin n => Fin (n - 1) end.
Lemma pred_1: forall n N, In n N -> In (n - 1) (pred N).
Proof.
unfold pred, In; intros; destruct N; auto. lia.
Qed.
(** We define widening between two possibly infinite integers as follows:
if the integer increases strictly, we jump to infinity, otherwise
we keep the first integer. *)
Definition widen (N1 N2: zinf) : zinf :=
if ble N2 N1 then N1 else Inf.
Lemma widen_1: forall N1 N2, le N1 (widen N1 N2).
Proof.
unfold widen; intros. destruct (ble N2 N1) eqn:LE.
red; auto.
red; unfold In; auto.
Qed.
Definition measure (N: zinf) : nat :=
match N with Inf => 0%nat | Fin _ => 1%nat end.
Lemma measure_1: forall N, (measure N <= 1)%nat.
Proof.
destruct N; cbn; lia.
Qed.
Lemma widen_2:
forall N1 N2, (measure (widen N1 N2) <= measure N1)%nat.
Proof.
intros. unfold widen. destruct (ble N2 N1) eqn:BLE.
- lia.
- destruct N1. cbn; lia. destruct N2; discriminate.
Qed.
Lemma widen_3:
forall N1 N2, ble N2 N1 = false -> (measure (widen N1 N2) < measure N1)%nat.
Proof.
destruct N1, N2; cbn; intros; auto; try discriminate.
unfold widen. cbn. rewrite H. cbn. lia.
Qed.
End Zinf.
(** An interval is encoded as a pair of two [zinf].
The second [zinf] is the upper bound.
The first [zinf] is the opposite of the lower bound.
This representation trick makes it possible to have only one
infinity [Inf], instead of a negative infinity for lower bounds
and a positive infinity for upper bounds. *)
Module Intervals <: VALUE_ABSTRACTION.
(** The type of abstract values. *)
Record interval : Type := intv { low: zinf; high: zinf }.
Definition t := interval.
(** Membership: [n] must be below the upper bound, and the opposite of [n]
must be below the opposite of the lower bound. *)
Definition In (n: Z) (N: t) : Prop :=
Zinf.In n (high N) /\ Zinf.In (-n) (low N).
Definition le (N1 N2: t) : Prop :=
forall n, In n N1 -> In n N2.
(** Test whether an interval is empty. *)
Definition isempty (N: t) : bool :=
match N with
| {| low := Fin l; high := Fin h |} => h <? (-l)
| _ => false
end.
Lemma isempty_1: forall n N, isempty N = true -> In n N -> False.
Proof.
unfold isempty, In; intros. destruct N as [[l|] [h|]]; try discriminate.
apply Z.ltb_lt in H. cbn in H0. lia.
Qed.
Lemma isempty_2: forall N, isempty N = false -> exists n, In n N.
Proof.
unfold isempty, In; intros. destruct N as [[l|] [h|]]; cbn.
- apply Z.ltb_ge in H. exists h; lia.
- exists (- l); lia.
- exists h; lia.
- exists 0; auto.
Qed.
Lemma nonempty_le: forall N1 N2,
le N1 N2 -> isempty N1 = false -> (Zinf.le (high N1) (high N2) /\ Zinf.le (low N1) (low N2)).
Proof.
unfold le, In, isempty; intros.
destruct N1 as [[l1 |] [h1|]]; cbn in *; rewrite ? Zinf.le_Fin, ? Zinf.le_Inf.
- apply Z.ltb_ge in H0. split.
+ apply H; lia.
+ replace l1 with (- - l1) by lia. apply H. lia.
- split.
+ apply Zinf.le_is_Inf with (-l1). intros; apply H. intuition lia.
+ replace l1 with (- - l1) by lia. apply H. intuition lia.
- split.
+ apply H. intuition lia.
+ apply Zinf.le_is_Inf with(- h1).
intros. replace n with (- - n) by lia. apply H. intuition lia.
- split; apply Zinf.le_is_Inf with 0; intros.
+ apply H; auto.
+ replace n with (- - n) by lia. apply H. auto.
Qed.
(** [ble] is a Boolean-valued function that decides the [le] relation. *)
Definition ble (N1 N2: t) : bool :=
isempty N1 || (Zinf.ble (high N1) (high N2) && Zinf.ble (low N1) (low N2)).
Lemma ble_1: forall N1 N2, ble N1 N2 = true -> le N1 N2.
Proof.
unfold ble, le, In; intros.
destruct (isempty N1) eqn:E.
elim (isempty_1 _ _ E H0).
apply andb_prop in H. destruct H as [B1 B2].
apply Zinf.ble_1 in B1. apply Zinf.ble_1 in B2.
intuition.
Qed.
Lemma ble_2: forall N1 N2, le N1 N2 -> ble N1 N2 = true.
Proof.
unfold ble; intros. destruct (isempty N1) eqn:E; auto.
destruct (nonempty_le N1 N2) as [P Q]; auto.
apply andb_true_intro; split; apply Zinf.ble_2; auto.
Qed.
(** [const n] is the abstract value for the singleton set [{n}]. *)
Definition const (n: Z) : t := {| low := Fin (-n); high := Fin n |}.
Lemma const_1: forall n, In n (const n).
Proof.
unfold const, In, Zinf.In; intros; cbn. lia.
Qed.
(** [bot] represents the empty set. *)
Definition bot: t := {| low := Fin 0; high := Fin (-1) |}.
Lemma bot_1: forall n, ~(In n bot).
Proof.
unfold bot, In, Zinf.In; intros; cbn. lia.
Qed.
(** [top] represents the set of all integers. *)
Definition top: t := {| low := Inf; high := Inf |}.
Lemma top_1: forall n, In n top.
Proof.
intros. split; exact I.
Qed.
(** [join] computes an upper bound of its two arguments. *)
Definition join (N1 N2: t) : t :=
{| low := Zinf.max (low N1) (low N2);
high := Zinf.max (high N1) (high N2) |}.
Lemma join_1:
forall n N1 N2, In n N1 -> In n (join N1 N2).
Proof.
unfold In, join; intros; cbn. split; apply Zinf.max_1; tauto.
Qed.
Lemma join_2:
forall n N1 N2, In n N2 -> In n (join N1 N2).
Proof.
unfold In, join; intros; cbn. split; apply Zinf.max_2; tauto.
Qed.
(** The abstract operators for addition and subtraction. *)
Definition add (N1 N2: t) : t :=
if isempty N1 || isempty N2 then bot else
{| low := Zinf.add (low N1) (low N2);
high := Zinf.add (high N1) (high N2) |}.
Lemma add_1:
forall n1 n2 N1 N2, In n1 N1 -> In n2 N2 -> In (n1 + n2) (add N1 N2).
Proof.
unfold add; intros.
destruct (isempty N1) eqn:E1. elim (isempty_1 n1 N1); auto.
destruct (isempty N2) eqn:E2. elim (isempty_1 n2 N2); auto.
destruct H; destruct H0; split; cbn.
apply Zinf.add_1; auto.
replace (- (n1 + n2)) with ((-n1) + (-n2)) by lia. apply Zinf.add_1; auto.
Qed.
Definition opp (v: t) : t := {| low := high v; high := low v |}.
Lemma opp_1:
forall n v, In n v -> In (-n) (opp v).
Proof.
unfold In, opp; intros; cbn. replace (- - n) with n by lia. tauto.
Qed.
Definition sub (N1 N2: t) : t := add N1 (opp N2).
Lemma sub_1:
forall n1 n2 N1 N2, In n1 N1 -> In n2 N2 -> In (n1 - n2) (sub N1 N2).
Proof.
intros. apply add_1; auto. apply opp_1; auto.
Qed.
(** [meet] computes a lower bound for its two arguments.*)
Definition meet (N1 N2: t) : t :=
{| low := Zinf.min (low N1) (low N2);
high := Zinf.min (high N1) (high N2) |}.
Lemma meet_1:
forall n N1 N2, In n N1 -> In n N2 -> In n (meet N1 N2).
Proof.
unfold In, meet; intros; cbn. split; apply Zinf.min_1; tauto.
Qed.
(** [isIn] tests whether a concrete value belongs to an abstract value. *)
Definition isIn (n: Z) (v: t) : bool :=
Zinf.isIn n (high v) && Zinf.isIn (-n) (low v).
Lemma isIn_1:
forall n v, In n v -> isIn n v = true.
Proof.
unfold In, isIn; intros.
apply andb_true_intro; split; apply Zinf.isIn_1; tauto.
Qed.
(** Abstract operators for inverse analysis of comparisons. *)
Definition eq_inv (N1 N2: t) : t * t := (meet N1 N2, meet N1 N2).
Lemma eq_inv_1:
forall n1 n2 a1 a2,
In n1 a1 -> In n2 a2 -> n1 = n2 ->
In n1 (fst (eq_inv a1 a2)) /\ In n2 (snd (eq_inv a1 a2)).
Proof.
intros; cbn. subst n2. split; apply meet_1; auto.
Qed.
Definition ne_inv (N1 N2: t) : t * t := (N1, N2).
Lemma ne_inv_1:
forall n1 n2 a1 a2,
In n1 a1 -> In n2 a2 -> n1 <> n2 ->
In n1 (fst (ne_inv a1 a2)) /\ In n2 (snd (ne_inv a1 a2)).
Proof.
intros; cbn; auto.
Qed.
(** For the [<=] comparison, the upper bound of [N1] is at most that of [N2],
and the lower bound of [N2] is at least that of [N1]. *)
Definition le_inv (N1 N2: t) : t * t :=
( {| low := low N1; high := Zinf.min (high N1) (high N2) |},
{| low := Zinf.min (low N1) (low N2); high := high N2 |} ).
Lemma le_inv_1:
forall n1 n2 a1 a2,
In n1 a1 -> In n2 a2 -> n1 <= n2 ->
In n1 (fst (le_inv a1 a2)) /\ In n2 (snd (le_inv a1 a2)).
Proof.
unfold In, le_inv; intros; cbn.
intuition auto; apply Zinf.min_1; auto.
apply Zinf.In_mono with n2; auto.
apply Zinf.In_mono with (-n1); auto. lia.
Qed.
(** For the [>] comparison, the upper bound of [N1] is at least that of [N2],
and the lower bound of [N2] is at most that of [N1] - 1. *)
Definition gt_inv (N1 N2: t) : t * t :=
( {| low := Zinf.min (low N1) (Zinf.pred (low N2)); high := high N1 |},
{| low := low N2; high := Zinf.min (high N2) (Zinf.pred (high N1)) |} ).
Lemma gt_inv_1:
forall n1 n2 a1 a2,
In n1 a1 -> In n2 a2 -> n1 > n2 ->
In n1 (fst (gt_inv a1 a2)) /\ In n2 (snd (gt_inv a1 a2)).
Proof.
unfold In, gt_inv; intros; cbn.
intuition auto; apply Zinf.min_1; auto.
apply Zinf.In_mono with ((-n2) - 1). lia. apply Zinf.pred_1; auto.
apply Zinf.In_mono with (n1 - 1). lia. apply Zinf.pred_1; auto.
Qed.
(** The widening operator. *)
Definition widen (N1 N2: t) : t :=
if isempty N1 then N2 else
if isempty N2 then N1 else
{| low := Zinf.widen (low N1) (low N2); high := Zinf.widen (high N1) (high N2) |}.
Lemma widen_1: forall N1 N2, le N1 (widen N1 N2).
Proof.
unfold le, widen; intros.
destruct (isempty N1) eqn:E1. elim (isempty_1 n N1); auto.
destruct (isempty N2) eqn:E2. auto.
destruct H. split; apply Zinf.widen_1; auto.
Qed.
Definition measure (v: t) : nat :=
if isempty v then 3%nat else (Zinf.measure (low v) + Zinf.measure (high v))%nat.
Lemma measure_top: measure top = 0%nat.
Proof.
auto.
Qed.
Remark isempty_widen: forall N1 N2,
isempty N1 = false -> isempty N2 = false -> isempty (widen N1 N2) = false.
Proof.
intros. destruct (isempty (widen N1 N2)) eqn:E; auto.
destruct (isempty_2 _ H) as (n1 & IN1).
elim (isempty_1 n1 _ E). apply widen_1; auto.
Qed.
Lemma widen_2:
forall N1 N2, (measure (widen N1 N2) <= (measure N1))%nat.
Proof.
unfold measure; intros.
destruct (isempty N1) eqn:E1. unfold widen; rewrite E1.
generalize (Zinf.measure_1 (low N2)) (Zinf.measure_1 (high N2)); intros.
destruct (isempty N2); lia.
destruct (isempty N2) eqn:E2. unfold widen; rewrite E1, E2, E1. lia.
rewrite isempty_widen by auto.
unfold widen; rewrite E1, E2; cbn.
generalize (Zinf.widen_2 (low N1) (low N2)) (Zinf.widen_2 (high N1) (high N2)). lia.
Qed.
Lemma widen_3:
forall N1 N2, ble N2 N1 = false -> (measure (widen N1 N2) < measure N1)%nat.
Proof.
unfold ble, measure; intros.
destruct (isempty N2) eqn:E2. discriminate.
destruct (isempty N1) eqn:E1.
- unfold widen; rewrite E1, E2.
generalize (Zinf.measure_1 (low N2)) (Zinf.measure_1 (high N2)). lia.
- rewrite isempty_widen by auto.
unfold widen; rewrite E1, E2. cbn.
generalize (Zinf.widen_2 (low N1) (low N2)) (Zinf.widen_2 (high N1) (high N2)); intros.
destruct (Zinf.ble (high N2) (high N1)) eqn:LE.
+ cbn in H. apply Zinf.widen_3 in H. lia.
+ apply Zinf.widen_3 in LE. lia.
Qed.
End Intervals.
(** ** 5.9. Application: an interval analysis for IMP *)
(** We instantiate the generic analyzer with the domain of intervals. *)
Module SIntervals := StoreAbstr(Intervals).
Module AIntervals := Analysis(SIntervals).
(** First program:
<<
x := 0; y := 100; z := x + y;
while x <= 10 do x := x + 1; y := y - 1 end
>>
*)
Definition prog1 :=
ASSIGN "x" (CONST 0) ;;
ASSIGN "y" (CONST 100) ;;
ASSIGN "z" (PLUS (VAR "x") (VAR "y")) ;;
WHILE (LESSEQUAL (VAR "x") (CONST 10))
(ASSIGN "x" (PLUS (VAR "x") (CONST 1)) ;;
ASSIGN "y" (MINUS (VAR "y") (CONST 1))).
Compute (let S := AIntervals.Cexec prog1 SIntervals.top in
(SIntervals.get "x" S, SIntervals.get "y" S, SIntervals.get "z" S)).
(** Analysis result:
<<
({| Intervals.low := -11; Intervals.high := 11 |},
{| Intervals.low := Inf; Intervals.high := 100 |},
{| Intervals.low := -100; Intervals.high := 100 |})
>>
In other words: [x] is in [[11,11]], [y] in [[-inf,100]], and [z] in [[100,100]].
*)
(** Second program:
<<
x := 0; y := 0;
while x <= 1000 do y := x; x := x + 1 end
>>
*)
Definition prog2 :=
ASSIGN "x" (CONST 0) ;;
ASSIGN "y" (CONST 0) ;;
WHILE (LESSEQUAL (VAR "x") (CONST 1000))
(ASSIGN "y" (VAR "x") ;;
ASSIGN "x" (PLUS (VAR "x") (CONST 1))).
Compute (let S := AIntervals.Cexec prog2 SIntervals.top in
(SIntervals.get "x" S, SIntervals.get "y" S)).
(** Analysis result:
<<
({| Intervals.low := -1001; Intervals.high := 1001 |},
{| Intervals.low := 0; Intervals.high := 1000 |})
>>
In other words: [x] is in [[1001,1001]], and [y] in [[0,1000]].
*)
|
{"author": "xavierleroy", "repo": "cdf-mech-sem", "sha": "f8dc6f7e2cb42f0861406b2fa113e2a7e825c5f3", "save_path": "github-repos/coq/xavierleroy-cdf-mech-sem", "path": "github-repos/coq/xavierleroy-cdf-mech-sem/cdf-mech-sem-f8dc6f7e2cb42f0861406b2fa113e2a7e825c5f3/AbstrInterp2.v"}
|
import discord
from discord.ext import commands
from matplotlib import animation
from sympy.plotting import *
from utility.math_parser import parse_eq
import numpy as np
class Graph(commands.Cog):
"""
Contains various algebra tools
"""
def __init__(self, bot):
self.bot = bot
self.graph_location = 'temp/graph.png'
self.gif_location = 'temp/rotation.gif'
@commands.command()
async def graph(self, ctx, eq: parse_eq):
"""Graphs simple equations"""
p1 = plot(eq, show=False)
await self.send_graph(ctx, p1, self.graph_location, False)
@commands.command()
async def graph3d(self, ctx, eq: parse_eq, show_gif=False):
"""Graphs equations in 3d"""
p1 = plot3d(eq, show=False)
await self.send_graph(ctx, p1, self.graph_location, show_gif)
@commands.command(name='pgraph')
async def p_graph(self, ctx, x: parse_eq, y: parse_eq):
"""
Graph parametric equations
Uses a single parameter
"""
p1 = plot_parametric(x, y)
await self.send_graph(ctx, p1, self.graph_location, False)
@commands.command(name='pgraph3dline')
async def p_graph_line(self, ctx, x: parse_eq, y: parse_eq, z: parse_eq, show_gif=False):
"""
Graph 3d parametric equations in line form
Uses a single parameter
"""
p1 = plot3d_parametric_line(x, y, z)
await self.send_graph(ctx, p1, self.graph_location, show_gif)
@commands.command(name='pgraph3dsurface')
async def p_graph_surface(self, ctx, x: parse_eq, y: parse_eq, z: parse_eq, show_gif=False):
"""
Graph 3d parametric equations in surface form
Uses two parameters
"""
p1 = plot3d_parametric_surface(x, y, z)
await self.send_graph(ctx, p1, self.graph_location, show_gif)
async def send_graph(self, ctx, p1, file_location, show_gif):
p1.save(file_location)
if not show_gif:
await ctx.send(file=discord.File(file_location))
else:
backend = p1._backend
fig = backend.fig
ax = fig.gca(projection='3d')
rotation = animation.FuncAnimation(fig, lambda angle: ax.view_init(azim=angle), frames=np.arange(0, 360, 30), interval=500)
rotation.save(self.gif_location, dpi=80, writer='imagemagick')
await ctx.send(file=discord.File(self.gif_location))
|
{"hexsha": "6815032580c3e0a7eb7945b6d99191ad8c29f9e4", "size": 2447, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/cogs/graph.py", "max_stars_repo_name": "RedPandaMath/redpanda", "max_stars_repo_head_hexsha": "e5afedeb2f27d4b79b8079857fc220965fab896b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-28T23:01:29.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-28T23:01:29.000Z", "max_issues_repo_path": "src/cogs/graph.py", "max_issues_repo_name": "AoWangPhilly/redpanda", "max_issues_repo_head_hexsha": "e5afedeb2f27d4b79b8079857fc220965fab896b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cogs/graph.py", "max_forks_repo_name": "AoWangPhilly/redpanda", "max_forks_repo_head_hexsha": "e5afedeb2f27d4b79b8079857fc220965fab896b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7792207792, "max_line_length": 135, "alphanum_fraction": 0.6403759706, "include": true, "reason": "import numpy,from sympy", "num_tokens": 605}
|
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage{changepage}% http://ctan.org/pkg/changepage
\usepackage{float}
\usepackage{fancyhdr}
\usepackage{lastpage}
\usepackage{graphicx}
\usepackage{ragged2e}
\usepackage{scrextend}
\usepackage{lastpage}
\pagestyle{fancy}
\renewcommand{\headrulewidth}{0pt}
\rhead{\includegraphics[height= 1cm]{ento1.png}\vspace{1em}}
\chead{\includegraphics[width = 5cm]{agritech1.png}}
\lhead{\includegraphics[width=3cm]{acet1.png}}
\fancyfoot{}
\setlength\headheight{40.8pt}
\RequirePackage[colorlinks=true, allcolors=blue]{hyperref}
\usepackage{titlesec}
\titleformat{\section}{\normalfont\fontsize{10}{15}\bfseries}{\thesection}{0em}{}
\titlespacing{\section}{}{1.24em}{0.24em}
\cfoot{Page \thepage \hspace{1pt} of \pageref{LastPage}}
\begin{document}
\begin{addmargin}[2.8in]{}
Willett, Filgueiras, \newline
Benda, Zhang, and Kenworthy \newline
----------------------------- \newline
Denis S. Willett\newline
Department of Entomology \newline
Cornell AgriTech \newline
Cornell University \newline
15 Castle Creek Drive \newline
Geneva, NY 14456 \newline
deniswillett@cornell.edu \newline
\end{addmargin}
\setlength{\parindent}{0cm}
October 24, 2019
\vspace{1.24em}
Scientific Reports
\vspace{1.24em}
Dear Dr. Geary,
\vspace{0.48em}
\setlength{\parindent}{1.24cm}
Thank you for your consideration of \textit{“Sting nematodes modify metabolomic profiles of host plants”} for publication in Scientific Reports. We have revised the manuscript in accordance with reviewer recommendations. A complete and detailed account of the revisions is included below. The reviewer comments were very helpful, much appreciated, and have resulted in a better manuscript. In addition to submitting a revised version of the manuscript incorporating the latest changes, we are also submitting production quality images.
\vspace{2em}
Much appreciated, \newline
\vspace{1em}
Denis S Willett
\vspace{0.48em}
Camila Cramer Filgueiras
\vspace{0.48em}
Nicole D Benda
\vspace{0.48em}
Jing Zhang
\vspace{0.48em}
Kevin E Kenworthy
\vspace{1.4em}
\hline
\setlength\parindent{0pt}
\subsection*{Reviewer 1 (Remarks to the Author)}
You spend a lot of time talking about relative tolerance of the bermudagrass lines to sting nematode. However, I was never very clear on how you are defining tolerance or how this was determined.
\begin{quote}
\textit{This is an excellent question. We have taken the opportunity to elaborate on this in the introduction. In short, tolerance in bermudagrass is determined if there was no reduction in root length or had greater root length than ‘Tifway’ despite \textit{B. longicaudatus} infection. Tolerance of the varieties had been established in previous work by Pang et al. 2011, citations 23, and 24 in the manuscript. }
\end{quote}
On page 2 under Bermudagrass response to sting nematode feeding...I think you should use the term nematode population densities instead of "nematode densities" or "differences in nematode population."
\begin{quote}
\textit{Thanks for pointing this out. We have adjusted the language in that section in accordance with your suggestions.}
\end{quote}
Very well written and very interesting paper
\begin{quote}
\textit{Thank you for your interest and constructive feedback.}
\end{quote}
\subsection*{Reviewer #2 (Technical Comments to the Author)}
This is a well researched study and the manuscript well structured and written. The methods used are scientifically sound, results appropriately interpreted and discussed. Except for a few minor comments below, I recommend publication of this manuscript.
\begin{quote}
\textit{Thank you for your feedback. We appreciate the interest and comments below.}
\end{quote}
Line 58. The authors stated that the canonical correspondence analysis explained 42\% of the observed inertia in the total metabolome. I suggest that the authors explain this because in Figure 2 this analysis explained 71.9\% of the total metabolome variation (CCA1: 45.3\% + CCA2: 26.6\%).
\begin{quote}
\textit{Thanks for your inquiry. Canonical correspondence analysis did explain 42\% of the observed inertia. Of that 42\% explained inertia, axis 1 explained 45.3\% and axis 2 26.6\%. Summing the explained variation of the other 3 axis with the first two would result in 100\% of the variation captured in canonical ordination or 42\% of observed inertia (constrained $\chi^2$) in the total metabolome. This explanation has been elaborated upon in the text. }
\end{quote}
Lines 146 and 150. Change ‘conetainers’ to ‘containers and container’
\begin{quote}
\textit{Thanks for suggesting this. Conetainers should be Cone-tainers to refer to the specific type of container used to house the plants. We have clarified this in the Methods section adding both information about the company from which they can be procured and changing the spelling in lines 148 and 150 for clarification. }
\end{quote}
I suggest that instead of putting in the appendix, the package names used for their statistical analysis, the authors should include these names in the text to educate readers, especially for those not familiar with the ‘R software’
\begin{quote}
\textit{Thanks for your suggestion. This is a good idea; we have incorporated names of the software packages in the main body of the text.}
\end{quote}
\end{document}
|
{"hexsha": "f1f28cfc150fb27a5c49a85460832b4746c6168f", "size": 5405, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "manuscript/sci-rep/reviewer-letter/reviewer-response.tex", "max_stars_repo_name": "acetworld/bermuda-grass-metabolomics", "max_stars_repo_head_hexsha": "38785b9cf0099ee496236c5fa80552aaee9ab105", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "manuscript/sci-rep/reviewer-letter/reviewer-response.tex", "max_issues_repo_name": "acetworld/bermuda-grass-metabolomics", "max_issues_repo_head_hexsha": "38785b9cf0099ee496236c5fa80552aaee9ab105", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manuscript/sci-rep/reviewer-letter/reviewer-response.tex", "max_forks_repo_name": "acetworld/bermuda-grass-metabolomics", "max_forks_repo_head_hexsha": "38785b9cf0099ee496236c5fa80552aaee9ab105", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6390977444, "max_line_length": 541, "alphanum_fraction": 0.7753931545, "num_tokens": 1424}
|
!----------------------------------------------------------------
!*** Copyright Notice ***
!IMPACT-Z� Copyright (c) 2016, The Regents of the University of California, through
!Lawrence Berkeley National Laboratory (subject to receipt of any required approvals
!from the U.S. Dept. of Energy). All rights reserved.
!If you have questions about your rights to use or distribute this software,
!please contact Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov.
!NOTICE. This Software was developed under funding from the U.S. Department of Energy
!and the U.S. Government consequently retains certain rights. As such, the U.S. Government
!has been granted for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable,
!worldwide license in the Software to reproduce, distribute copies to the public, prepare
!derivative works, and perform publicly and display publicly, and to permit other to do so.
!****************************
! Description: v.1.0
! Comments:
!----------------------------------------------------------------
program main
use AccSimulatorclass
implicit none
include 'mpif.h'
double precision :: time
integer ierr,my_rank
integer nargs,flag, ii, nparm
real(kind=8), dimension(100)::X
real(kind=8)::objval=1e30
character(len=1024) :: strings
integer parent
call MPI_INIT(ierr)
call MPI_COMM_GET_PARENT(parent, ierr) ! YL: this is needed if this function is spawned by a master process
call MPI_COMM_RANK(MPI_COMM_WORLD,my_rank,ierr)
if(my_rank==0)then
open(unit=3,file='matchquad.in',status='old')
read(3,*)nparm,ii
close(3)
nargs = iargc()
do ii=1,nparm
call getarg(ii,strings)
read(strings,*)X(ii)
enddo
call FTN(X, objval,nparm,0)
endif
call construct_AccSimulator(time)
call run_AccSimulator()
if(my_rank==0)then
call FTN(X, objval,nparm,1)
print*,"X:",X(1:nparm), "objective: ",objval
endif
if(parent/= MPI_COMM_NULL)then
call MPI_REDUCE(objval, MPI_BOTTOM, 1, MPI_double_precision, MPI_MIN, 0, parent,ierr)
call MPI_Comm_disconnect(parent,ierr)
endif
call destruct_AccSimulator(time)
end program main
subroutine FTN(X, objval,nparm,inout)
use AccSimulatorclass
implicit none
include 'mpif.h'
integer inout
integer nparm,ierr,nproc
real(kind=8), dimension(nparm), intent(in) :: X
real(kind=8), intent(out) :: objval
integer:: i
character*220 :: comst
character*220 :: strings1,strings2
character*250 :: comst2
character, dimension(30,nparm) :: bc
character :: xbc
integer, dimension(nparm) :: nline,nrow,nlinein,nrowin
integer :: j,j1,j2,iline,nlinemax,ii
integer :: nchars=200,code,j3,j4
integer :: j5,j6,j7,j8,j9,j50,j4b,j8b,j7b
character(30) :: tmpstr
character(30) :: tmpstr2
real*8,dimension(7) :: fvalue
real*8,dimension(10) :: objtmp1,objtmp0
!real*8,dimension(10) :: objtmp1,objtmp0
real*8 :: fvalueaa,gambet,emtx,emty
real(kind=8), dimension(nparm) :: XX,x00,xin
real*8 :: deltx1,deltx2,deltx3,delty1,delty2,delty3
integer :: nlaser,ipt
real*8 :: xtmp,xxtmp,x1,x2,x3,x4
real*8 :: delt1,delt2,delt3,delt4
integer :: ip,imod
real*8 :: objtmp01,objtmp03
integer :: npx,npy,firstflag=1
!read in control parameters and targer values
open(unit=3,file='matchquad.in',status='old')
read(3,*)j1,j2
read(3,*)nlinein(1:nparm)
read(3,*)nrowin(1:nparm)
read(3,*)emtx,emty
read(3,*)objtmp0(1:4)
read(3,*)xtmp
read(3,*)xin(1:nparm)
close(3)
!initial design valudes of the parameters
x00(1:nparm) = xin(1:nparm)
nline(1:nparm) = nlinein(1:nparm)
nrow(1:nparm) = nrowin(1:nparm)
do i =1, nparm
XX(i) = x00(i)*(1.0d0+X(i))
enddo
if(inout==0)then
open(unit=1,file='ImpactZ0.in',status='old')
open(unit=2,file='ImpactZ.in',status='unknown')
! location of control parameters inside the input file Impactz.in
!
! quad 1 parameter
! nline(1) = 83
! nrow(1) = 5
! quad 2 parameters
! nline(2) = 85
! nrow(2) = 5
! quad 3 parameters
! nline(3) = 87
! nrow(3) = 5
! quad 4 parameters
! nline(4) = 89
! nrow(4) = 5
! quad 5 parameters
! nline(5) = 91
! nrow(5) = 5
! ...
!--
print*,"Xx: ",XX(1:nparm),nparm
print*,"nline: ",nline(1:nparm),nparm
print*,"nrow: ",nrow(1:nparm),nparm
!-----------------
! prepare Impact input using the control parameters
!
nlinemax = 1000000
iline = 0
ii = 0
do i = 1, nlinemax
READ(1, "(A)", ADVANCE="no", IOSTAT=code,end=111) comst
! change the process decomposition if needed here
if(comst(1:1)/='!' .and. firstflag==1)then
firstflag=0
read(comst,*)npx,npy
call MPI_COMM_SIZE(MPI_COMM_WORLD,nproc,ierr)
if(nproc/=npx*npy)then
write(*,*)'product of process grids not matching mpi count',nproc,npx,npy
npx=2**floor(log(sqrt(dble(nproc)))/log(2d0))
npy=nproc/npx
if(nproc/=npx*npy)then
npx=nproc
npy=1
endif
write(strings1,*)npx
write(strings2,*)npy
comst=trim(adjustl(strings1))//" "//trim(adjustl(strings2))
! write(*,*)comst
endif
endif
iline = iline + 1
do ip = 1, nparm
if(i.eq.nline(ip)) then
j1 = 1
j2 = 0
j3 = 0
j4 = 0
j7 = 0
j4b = 0
j7b = 0
do j = 1, nchars
if(comst(j:j).ne." ") then
j2 = j2 + 1
!bc(j2,j1) = comst(j:j)
xbc = comst(j:j)
else
j1 = j1+1
j2 = 0
endif
if(j1.lt.nrow(ip)) then
j3 = j3 + 1
else if(j1.gt.nrow(ip)) then
j4 = j4 + 1
else
j7 = j7 + 1
endif
enddo
write(tmpstr,*)XX(ip)
j8 = len(tmpstr)
j8b = 0
j4b = 0
j5 = j3+j8+j4+j8b+j4b
do j = 1, j3
comst2(j:j) = comst(j:j)
enddo
do j = j3+1, j3+j8
j9 = j - j3
comst2(j:j) = tmpstr(j9:j9)
enddo
do j = j3+j8+1,j3+j8+j4
j6 = j-j8+j7
comst2(j:j) = comst(j6:j6)
enddo
write(2,"(A)")comst2(1:j5)
imod = 1
exit
else
imod = 0
endif
!print*,"ip: ",ip,imod
enddo
if(imod.eq.0)then
write(2,"(A)")trim(comst)
endif
!print*,"line: ",iline,imod
enddo
111 continue
close(1)
close(2)
!112 format(A)
call flush(2)
else
objtmp1 = 0.0d0
close(18)
close(24)
close(25)
close(3)
!calculate <xz>,<x'z>,<xpz>,<x'pz> from the final particle distribution.
open(unit=15,file='fort.18',status='old')
do i = 1, nlinemax
read(15,*,end=443)fvalue(1:6)
enddo
443 continue
close(15)
gambet = fvalue(3)*fvalue(5)
!objtmp0(1) is beta_x,objtmp01 is sig_x
objtmp01 = sqrt(objtmp0(1)*emtx/gambet)
objtmp03 = sqrt(objtmp0(3)*emty/gambet)
open(unit=14,file='fort.24',status='old')
do i = 1, nlinemax
read(14,*,end=444)fvalue(1:7)
enddo
444 continue
close(14)
objtmp1(1) = fvalue(3)
objtmp1(2) = fvalue(6)
open(unit=13,file='fort.25',status='old')
do i = 1, nlinemax
read(13,*,end=445)fvalue(1:7)
enddo
445 continue
close(13)
objtmp1(3) = fvalue(3)
objtmp1(4) = fvalue(6)
delt1 = (objtmp1(1)-objtmp01)/objtmp01
!alpha is dimensionless
delt2 = (objtmp1(2)-objtmp0(2))
delt3 = (objtmp1(3)-objtmp03)/objtmp03
delt4 = (objtmp1(4)-objtmp0(4))
objval = sqrt((delt1**2+delt2**2+delt3**2+delt4**2)/4)
open(unit=11,file='fort.3',status='unknown',position='append')
write(11,*)"obj: ",XX(1:nparm),objval
close(11)
call flush(11)
endif
end subroutine FTN
|
{"hexsha": "5612a24438e7ba09573d217b0a87f7cd534cfd85", "size": 10715, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "examples/IMPACT-Z/impact-z-driver/main.f90", "max_stars_repo_name": "gptune/GPTune", "max_stars_repo_head_hexsha": "7ed5c63275da2d9625880c8eae837b8eb2d2df81", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2020-09-09T19:58:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T03:16:48.000Z", "max_issues_repo_path": "examples/IMPACT-Z/impact-z-driver/main.f90", "max_issues_repo_name": "gptune/GPTune", "max_issues_repo_head_hexsha": "7ed5c63275da2d9625880c8eae837b8eb2d2df81", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-10-16T11:55:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T06:12:45.000Z", "max_forks_repo_path": "examples/IMPACT-Z/impact-z-driver/main.f90", "max_forks_repo_name": "gptune/GPTune", "max_forks_repo_head_hexsha": "7ed5c63275da2d9625880c8eae837b8eb2d2df81", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-06-14T00:35:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T18:46:59.000Z", "avg_line_length": 36.5699658703, "max_line_length": 113, "alphanum_fraction": 0.4323845077, "num_tokens": 2769}
|
import math
import numpy as np
from netCDF4 import Dataset
from pywrfplotParams import *
# constants used to calculate moist adiabatic lapse rate
# See formula 3.16 in Rogers&Yau
a = 2./7.
b = eps*L*L/(R*cp)
c = a*L/R
def gamma_s(T,p):
"""Calculates moist adiabatic lapse rate for T (Celsius) and p (Pa)
Note: We calculate dT/dp, not dT/dz
See formula 3.16 in Rogers&Yau for dT/dz, but this must be combined with
the dry adiabatic lapse rate (gamma = g/cp) and the
inverse of the hydrostatic equation (dz/dp = -RT/pg)"""
esat = es(T)
wsat = eps*esat/(p-esat) # Rogers&Yau 2.18
numer = a*(T+T_zero) + c*wsat
denom = p * (1 + b*wsat/((T+T_zero)**2))
return numer/denom # Rogers&Yau 3.16
def es(T):
"""Returns saturation vapor pressure (Pascal) at temperature T (Celsius)
Formula 2.17 in Rogers&Yau"""
return 611.2*np.exp(17.67*T/(T+243.5))
def e(w,p):
"""Returns vapor pressure (Pa) at mixing ratio w (kg/kg) and pressure p (Pa)
Formula 2.18 in Rogers&Yau"""
return w*p/(w+eps)
def td(e):
"""Returns dew point temperature (C) at vapor pressure e (Pa)
Insert Td in 2.17 in Rogers&Yau and solve for Td"""
return 243.5 * np.log(e/611.2)/(17.67-np.log(e/611.2))
def interp(geopot, pres, p):
""" Returns the interpolated geopotential at p using the values in pres.
The geopotential for an element in pres must be given by the corresponding
element in geopot. The length of the geopot and pres arrays must be the same.
"""
if (len(geopot) != len(pres)):
raise Exception, "Arrays geopot and pres must have same length"
k = len(pres)-1
while (k > 1 and pres[k-1] <= p):
k = k-1
if (pres[k] > p):
w = 0.0
elif (pres[k-1] < p):
w = 1.0
else:
w = (p-pres[k])/(pres[k-1]-pres[k])
return (1.0-w)*geopot[k] + w*geopot[k-1]
def openWRF(nest):
file_wrf = directory + 'wrfout_d0' + str(nest) + '_' + date + '.nc'
try:
return Dataset(file_wrf, 'r+')
except Exception:
print 'Found no WRF-file for nest: ' + str(nest) + '. Looked for: '+ file_wrf
return None
def openWPS(nest):
file_met = directory + 'met_em.d0' + str(nest) + '.' + date + '.nc'
try:
return Dataset(file_met, 'r+')
except Exception:
print 'Found no WPS-file for nest: ' + str(nest) + '. Looked for: '+ file_met
return None
def getDimensions(nc):
Nx = nc.getncattr('WEST-EAST_GRID_DIMENSION')-1
Ny = nc.getncattr('SOUTH-NORTH_GRID_DIMENSION')-1
Nz = nc.getncattr('BOTTOM-TOP_GRID_DIMENSION')-1
dx = nc.getncattr('DX')
dy = nc.getncattr('DY')
lons = nc.variables['XLONG'][0]
lats = nc.variables['XLAT'][0]
# find coordinates for focus point
x,y = getXY(lons[Ny/2,:],lats[:,Nx/2])
return Nx,Ny,Nz,lons,lats,dx,dy,x,y
def getXY(lon,lat):
x_nr = 0
y_nr = 0
while (lon[x_nr] < lon_focuspoint):
x_nr += 1
while (lat[y_nr] < lat_focuspoint):
y_nr += 1
print "x_nr: " + str(x_nr), " Lon: ", str(lon[x_nr])
print "y_nr: " + str(y_nr), " Lat: ", str(lat[y_nr])
return x_nr,y_nr
|
{"hexsha": "0ff2f2dcf3a959a3533911964d760675c218b287", "size": 3161, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotTools.py", "max_stars_repo_name": "vorcil/epicare", "max_stars_repo_head_hexsha": "3346663ce5c257caa125161c23535c16836502f2", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plotTools.py", "max_issues_repo_name": "vorcil/epicare", "max_issues_repo_head_hexsha": "3346663ce5c257caa125161c23535c16836502f2", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotTools.py", "max_forks_repo_name": "vorcil/epicare", "max_forks_repo_head_hexsha": "3346663ce5c257caa125161c23535c16836502f2", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.61, "max_line_length": 85, "alphanum_fraction": 0.6070863651, "include": true, "reason": "import numpy", "num_tokens": 1054}
|
import pytest
from polyfitter import Polyfitter
import numpy as np
from numpy.testing import assert_array_almost_equal
def test_get_morph():
"""Can we get the proper morphology type?"""
ID = 'OGLE-BLG-ECL-040474'
P=1.8995918
t0=7000.90650
path_to_ogle = 'http://ogledb.astrouw.edu.pl/~ogle/OCVS/data/I/'+ID[-2:]+'/'+ID+'.dat'
lc = np.loadtxt(path_to_ogle).T
time = lc[0]
mag = lc[1]
err = lc[2]
pf = Polyfitter(scale='mag')
t0new,phase,polyfit,messages = pf.get_polyfit(time,mag,err,P,t0)
assert_array_almost_equal( pf.c , np.array([0.42073795]) )
assert_array_almost_equal( pf.get_c( np.vstack((polyfit,polyfit)) ) , np.array([0.42073795,0.42073795]) )
|
{"hexsha": "072d138d3b7609cafff2303fc088bb7b4da0ce7d", "size": 716, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_polyfit.py", "max_stars_repo_name": "astrobatty/polyfitter", "max_stars_repo_head_hexsha": "4c68edfacd6ef9518d970e949d1aa7c0d96a2ce9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-20T02:47:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-20T02:47:52.000Z", "max_issues_repo_path": "tests/test_polyfit.py", "max_issues_repo_name": "astrobatty/polyfitter", "max_issues_repo_head_hexsha": "4c68edfacd6ef9518d970e949d1aa7c0d96a2ce9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_polyfit.py", "max_forks_repo_name": "astrobatty/polyfitter", "max_forks_repo_head_hexsha": "4c68edfacd6ef9518d970e949d1aa7c0d96a2ce9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-27T10:52:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-27T10:52:51.000Z", "avg_line_length": 26.5185185185, "max_line_length": 109, "alphanum_fraction": 0.6731843575, "include": true, "reason": "import numpy,from numpy", "num_tokens": 235}
|
[STATEMENT]
lemma range_to_fract_embed_poly: assumes "set (coeffs p) \<subseteq> range to_fract"
shows "p = map_poly to_fract (map_poly inv_embed p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
have "p = map_poly (to_fract o inv_embed) p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p = map_poly (to_fract \<circ> inv_embed) p
[PROOF STEP]
by (rule sym, rule map_poly_idI, insert assms, auto)
[PROOF STATE]
proof (state)
this:
p = map_poly (to_fract \<circ> inv_embed) p
goal (1 subgoal):
1. p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
p = map_poly (to_fract \<circ> inv_embed) p
goal (1 subgoal):
1. p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
have "\<dots> = map_poly to_fract (map_poly inv_embed p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_poly (to_fract \<circ> inv_embed) p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
by (subst map_poly_map_poly, auto)
[PROOF STATE]
proof (state)
this:
map_poly (to_fract \<circ> inv_embed) p = map_poly to_fract (map_poly inv_embed p)
goal (1 subgoal):
1. p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
p = map_poly to_fract (map_poly inv_embed p)
goal (1 subgoal):
1. p = map_poly to_fract (map_poly inv_embed p)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
p = map_poly to_fract (map_poly inv_embed p)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 765, "file": "Berlekamp_Zassenhaus_Unique_Factorization_Poly", "length": 10}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 3 20:22:12 2020
@author: ramonpuga
"""
# K-Means
# Importar librerías de trabajao
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Cargamos los datos con pandas
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
# Método del codo para averiguar el número de clusters
from sklearn.cluster import KMeans
# Vamos a calcular 10 KMeans, la WCSS y dibujar el gráfico
# Calcular WCSS
wcss = []
# 10 segmentos range 1 to 11, Python no incluye el último valor del rango 11
for i in range(1, 11):
# Para no caer en la trampa de la inicialización aleatoria, usaremos kmeans++
# Ponemos un valor máximo, por si el algoritmo no acaba nunca, por defecto max_iter = 300
# Inicialización alteatoria n_init, por defecto = 10
kmeans = KMeans(n_clusters = i, init = "k-means++", max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(X)
# Calcular la suma de los cuadrados, parámetro inertia_ ya lo calcula el algoritmo
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title("Métocod del codo")
plt.xlabel("Número de clusters")
plt.ylabel("WCSS(k)")
plt.show()
# El número óptimo seria 5
# Aplicar el método de k-means para segmentar el dataset
kmeans = KMeans(n_clusters = 5, init = "k-means++", max_iter = 300, n_init = 10, random_state = 0)
# Necesitamos hacer el ajuste de kmeans, pero también la predicción de a que cluster pertenece cada punto
# Varemos en cada fila a que cluster pertenece del 0 al 4
y_kmeans = kmeans.fit_predict(X)
# Visualización de los clusters
# Pintamos los puntos, la nube de puntos, cada una con su sector al que corresponde
# Seleccionamos los puntos cuyo cluster == 0 para pintarlos de un color, 1 para otro color, etc.
# Seleccionamos de la matriz X solo las filas cuyo cluster == 0, y en el eje de las x(0) ira la primera columna (0),
# de la matriz de caracteristicas X y en el eje de las y(1), la columnas número 1
# Elegimos el tamaño de la bolita s = 100, el color y una etiqueta para diferenciarlo
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = "red", label = "Cluster 1")
# Repertimos la línea por cada cluster
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = "blue", label = "Cluster 2")
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = "green", label = "Cluster 3")
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = "cyan", label = "Cluster 4")
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = "magenta", label = "Cluster 5")
# Pintamos los baricentros (centro geométricos de cada cluster), todas las filas columna 0 y todas las filas columna 1
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = "yellow", label = "Baricentro")
plt.title("Cluster de clientes")
plt.xlabel("Ingresos anuales (en miles de $)")
plt.ylabel("Puntuación de gasto (1-100)")
# Añadimos la leyenda de colores
plt.legend()
plt.show()
# Podríamos poner nombre a cada cluster --> usuarios que gastan mucho y ganan poco, etc.
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = "red", label = "Cautos")
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = "blue", label = "Estándar")
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = "green", label = "Objetivo")
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = "cyan", label = "Descuidados")
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = "magenta", label = "Conservadores")
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = "yellow", label = "Baricentro")
plt.title("Cluster de clientes")
plt.xlabel("Ingresos anuales (en miles de $)")
plt.ylabel("Puntuación de gasto (1-100)")
plt.legend()
plt.show()
# Aunque el gráfico es de 2 dimensiones, el algoritmo es genérico y permite poner todas las características que sean necesarias
|
{"hexsha": "97045af5944e65ed6f419c3d9ac6b55f6da81341", "size": 4007, "ext": "py", "lang": "Python", "max_stars_repo_path": "datasets/Part 4 - Clustering/Section 24 - K-Means Clustering/my_kmeans.py", "max_stars_repo_name": "nylvam/machinelearning-az", "max_stars_repo_head_hexsha": "2ff139082b61ace5a94ef86517c84febee3b7ecb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datasets/Part 4 - Clustering/Section 24 - K-Means Clustering/my_kmeans.py", "max_issues_repo_name": "nylvam/machinelearning-az", "max_issues_repo_head_hexsha": "2ff139082b61ace5a94ef86517c84febee3b7ecb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datasets/Part 4 - Clustering/Section 24 - K-Means Clustering/my_kmeans.py", "max_forks_repo_name": "nylvam/machinelearning-az", "max_forks_repo_head_hexsha": "2ff139082b61ace5a94ef86517c84febee3b7ecb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.2771084337, "max_line_length": 127, "alphanum_fraction": 0.6930371849, "include": true, "reason": "import numpy", "num_tokens": 1334}
|
#!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
means_0 = [0.1, 0.5, 0.9]
means_1 = [0.9, 0.5, 0.1]
maxMean = max(max(means_0), max(means_1))
nbArms = len(means_0)
horizon = 1000
def reward(arm, t):
if t <= horizon/2:
return np.random.binomial(1, means_0[arm])
else:
return np.random.binomial(1, means_1[arm])
def simulateBandit(bandit, detect):
pulls = np.zeros(nbArms, dtype=int)
all_pulls = [ ]
rewards = np.zeros(nbArms)
all_rewards = [ [] for i in range(nbArms) ]
all_rewards_shared = [ ]
last_restart = 0
for t in range(horizon):
choice = bandit(pulls, rewards, t - last_restart)
pulls[choice] += 1
all_pulls.append(choice)
r = reward(choice, t)
rewards[choice] += r
all_rewards[choice].append(r)
all_rewards_shared.append(r)
# print(f"Time {t}, bandit chose {choice}, saw reward {r} (pulls = {pulls}, rewards = {rewards})")
if detect(pulls, rewards, all_rewards, choice, r, t, last_restart):
print(f" CDP algorithm {detect.__name__} chose to restart at time {t}, previous restart was {last_restart}")
last_restart = t
pulls[:] = 0
rewards[:] = 0
all_rewards = [ [] for i in range(nbArms) ]
# now compute the history of regret
maxMeans = np.array(([max(means_0)] * (horizon//2)) + ([max(means_1)] * (horizon//2)))
all_rewards_shared = np.array(all_rewards_shared)
return np.cumsum(maxMeans - all_rewards_shared)
return maxMean * horizon - np.sum(rewards)
def ucb(pulls, rewards, t, *args, **kwargs):
if np.any(pulls <= 0):
return np.random.choice(np.where(pulls <= 0)[0])
indexes = rewards/pulls + np.sqrt(np.log(t) / pulls)
return np.random.choice(np.where(np.isclose(indexes, np.max(indexes)))[0])
def dontdetect(pulls, rewards, all_rewards, arm, r, t, last_restart):
return False
proba_of_random_detection = 0.01
def random_detection(pulls, rewards, all_rewards, arm, r, t, last_restart):
if max(t, last_restart) <= horizon/2:
return False
else:
return np.random.binomial(1, proba_of_random_detection)
w = 80
threshold_b = 20
def detect_chernoff(pulls, rewards, all_rewards, arm, r, t, last_restart):
data = all_rewards[arm]
if len(data) < w:
return False
last_data = data[-w:]
left_sum = np.sum(last_data[:w//2])
right_sum = np.sum(last_data[w//2:])
return abs(left_sum - right_sum) >= threshold_b
alpha = 0.01
gamma = alpha / nbArms
def ucb_ForcedExploration(pulls, rewards, t, *args, **kwargs):
if np.any(pulls <= 0):
return np.random.choice(np.where(pulls <= 0)[0])
assert np.all(pulls >= gamma*t), "Error: we really believe that n_i(t) - n_i(s) >= gamma (t - s) for any s <= t in sequence [last restart, current time] (for any current time)."
if np.any(pulls <= alpha*t):
return np.random.choice(np.where(pulls <= alpha*t)[0])
indexes = rewards/pulls + np.sqrt(np.log(t) / pulls)
return np.random.choice(np.where(np.isclose(indexes, np.max(indexes)))[0])
repetitions = 1
repetitions = 50
def main():
plt.figure()
print(f"\n\nSimulating UCB + no detection, for {repetitions} times...")
results = [simulateBandit(ucb, dontdetect) for _ in tqdm.trange(repetitions)]
# sns.distplot(results, label="UCB")
plt.plot(np.mean(results, axis=0), label="UCB")
print(f"\n\nSimulating UCB with forced exploration + no detection, for {repetitions} times...")
results = [simulateBandit(ucb_ForcedExploration, dontdetect) for _ in tqdm.trange(repetitions)]
# sns.distplot(results, label=fr"UCB + tracking $\alpha={alpha:.3g}$")
plt.plot(np.mean(results, axis=0), label=fr"UCB + tracking $\alpha={alpha:.3g}$")
print(f"\n\nSimulating UCB + random detection delay, for {repetitions} times...")
results = [simulateBandit(ucb, random_detection) for _ in tqdm.trange(repetitions)]
# sns.distplot(results, label=fr"UCB + random CPD ($P={proba_of_random_detection}$ to detect after CP)")
plt.plot(np.mean(results, axis=0), label=fr"UCB + random CPD ($P={proba_of_random_detection}$ to detect after CP)")
print(f"\n\nSimulating UCB with forced exploration + random detection delay, for {repetitions} times...")
results = [simulateBandit(ucb_ForcedExploration, random_detection) for _ in tqdm.trange(repetitions)]
# sns.distplot(results, label=fr"UCB + random CPD ($P={proba_of_random_detection}$ to detect after CP) + tracking $\alpha={alpha:.3g}$")
plt.plot(np.mean(results, axis=0), label=fr"UCB + random CPD ($P={proba_of_random_detection}$ to detect after CP) + tracking $\alpha={alpha:.3g}$")
print(f"\n\nSimulating M-UCB($w={w}$, $b={threshold_b}$), for {repetitions} times...")
results = [simulateBandit(ucb, detect_chernoff) for _ in tqdm.trange(repetitions)]
# sns.distplot(results, label=f"M-UCB($w={w}$, $b={threshold_b}$)")
plt.plot(np.mean(results, axis=0), label=f"M-UCB($w={w}$, $b={threshold_b}$)")
print(f"\n\nSimulating M-UCB($w={w}$, $b={threshold_b}$) with forced exploration, for {repetitions} times...")
results = [simulateBandit(ucb_ForcedExploration, detect_chernoff) for _ in tqdm.trange(repetitions)]
# sns.distplot(results, label=fr"M-UCB($w={w}$, $b={threshold_b}$) + tracking $\alpha={alpha:.3g}$")
plt.plot(np.mean(results, axis=0), label=fr"M-UCB($w={w}$, $b={threshold_b}$) + tracking $\alpha={alpha:.3g}$")
plt.title(f"Distribution of regret after $T={horizon}$ time steps")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
|
{"hexsha": "e24de5f6ce0b0cba5714fc38a0757a3069909d02", "size": 5677, "ext": "py", "lang": "Python", "max_stars_repo_path": "2-Chapters/6-Chapter/nonstatbandits/simulateForcedExploration.py", "max_stars_repo_name": "Naereen/phd-thesis", "max_stars_repo_head_hexsha": "0fa93ca0d738771f4215bc4aeb66157f2026ba00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-11-18T12:22:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T19:29:48.000Z", "max_issues_repo_path": "2-Chapters/6-Chapter/nonstatbandits/simulateForcedExploration.py", "max_issues_repo_name": "Naereen/phd-thesis", "max_issues_repo_head_hexsha": "0fa93ca0d738771f4215bc4aeb66157f2026ba00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-11-18T09:19:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-08T14:13:08.000Z", "max_forks_repo_path": "2-Chapters/6-Chapter/nonstatbandits/simulateForcedExploration.py", "max_forks_repo_name": "Naereen/phd-thesis", "max_forks_repo_head_hexsha": "0fa93ca0d738771f4215bc4aeb66157f2026ba00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-28T20:56:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-13T11:11:57.000Z", "avg_line_length": 42.3656716418, "max_line_length": 181, "alphanum_fraction": 0.6589748106, "include": true, "reason": "import numpy", "num_tokens": 1679}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.