text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
function get_mnist_providers(batch_size::Int; data_name=:data, label_name=:softmax_label, flat=true)
# download MNIST into Pkg.dir("MXNet")/data/mnist if not exist
filenames = mx.get_mnist_ubyte()
# data provider
train_provider = mx.MNISTProvider(image=filenames[:train_data],
label=filenames[:train_label],
data_name=data_name, label_name=label_name,
batch_size=batch_size, shuffle=true, flat=flat, silent=true)
eval_provider = mx.MNISTProvider(image=filenames[:test_data],
label=filenames[:test_label],
data_name=data_name, label_name=label_name,
batch_size=batch_size, shuffle=false, flat=flat, silent=true)
return (train_provider, eval_provider)
end
|
{"hexsha": "12160cf6f18e79594c1fcc23d820b86a020cf557", "size": 1667, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "julia/examples/mnist/mnist-data.jl", "max_stars_repo_name": "Vikas-kum/incubator-mxnet", "max_stars_repo_head_hexsha": "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 228, "max_stars_repo_stars_event_min_datetime": "2018-12-06T09:34:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T17:02:02.000Z", "max_issues_repo_path": "julia/examples/mnist/mnist-data.jl", "max_issues_repo_name": "Vikas-kum/incubator-mxnet", "max_issues_repo_head_hexsha": "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 187, "max_issues_repo_issues_event_min_datetime": "2018-03-16T23:44:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T21:19:54.000Z", "max_forks_repo_path": "julia/examples/mnist/mnist-data.jl", "max_forks_repo_name": "Vikas-kum/incubator-mxnet", "max_forks_repo_head_hexsha": "ba02bf2fe2da423caa59ddb3fd5e433b90b730bf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 51, "max_forks_repo_forks_event_min_datetime": "2019-07-12T05:10:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-28T16:19:06.000Z", "avg_line_length": 49.0294117647, "max_line_length": 100, "alphanum_fraction": 0.6838632274, "num_tokens": 352}
|
from random import *
import math
import argparse
import json
from PIL import Image, ImageDraw, ImageOps
from filters import *
from strokesort import *
import perlin
from util import *
no_cv = False
export_path = "output/out.svg"
draw_contours = True
draw_hatch = True
show_bitmap = False
resolution = 1024
hatch_size = 16
contour_simplify = 1
try:
import numpy as np
import cv2
except:
print("Cannot import numpy/openCV. Switching to NO_CV mode.")
no_cv = True
def find_edges(IM):
print("finding edges...")
if no_cv:
#appmask(IM,[F_Blur])
appmask(IM,[F_SobelX,F_SobelY])
else:
im = np.array(IM)
im = cv2.GaussianBlur(im,(3,3),0)
im = cv2.Canny(im,100,200)
IM = Image.fromarray(im)
return IM.point(lambda p: p > 128 and 255)
def getdots(IM):
print("getting contour points...")
PX = IM.load()
dots = []
w,h = IM.size
for y in range(h-1):
row = []
for x in range(1,w):
if PX[x,y] == 255:
if len(row) > 0:
if x-row[-1][0] == row[-1][-1]+1:
row[-1] = (row[-1][0],row[-1][-1]+1)
else:
row.append((x,0))
else:
row.append((x,0))
dots.append(row)
return dots
def connectdots(dots):
print("connecting contour points...")
contours = []
for y in range(len(dots)):
for x,v in dots[y]:
if v > -1:
if y == 0:
contours.append([(x,y)])
else:
closest = -1
cdist = 100
for x0,v0 in dots[y-1]:
if abs(x0-x) < cdist:
cdist = abs(x0-x)
closest = x0
if cdist > 3:
contours.append([(x,y)])
else:
found = 0
for i in range(len(contours)):
if contours[i][-1] == (closest,y-1):
contours[i].append((x,y,))
found = 1
break
if found == 0:
contours.append([(x,y)])
for c in contours:
if c[-1][1] < y-1 and len(c)<4:
contours.remove(c)
return contours
def getcontours(IM,sc=2, noise=False):
print("generating contours...")
IM = find_edges(IM)
IM1 = IM.copy()
IM2 = IM.rotate(-90,expand=True).transpose(Image.FLIP_LEFT_RIGHT)
dots1 = getdots(IM1)
contours1 = connectdots(dots1)
dots2 = getdots(IM2)
contours2 = connectdots(dots2)
for i in range(len(contours2)):
contours2[i] = [(c[1],c[0]) for c in contours2[i]]
contours = contours1+contours2
for i in range(len(contours)):
for j in range(len(contours)):
if len(contours[i]) > 0 and len(contours[j])>0:
if distsum(contours[j][0],contours[i][-1]) < 8:
contours[i] = contours[i]+contours[j]
contours[j] = []
for i in range(len(contours)):
contours[i] = [contours[i][j] for j in range(0,len(contours[i]),8)]
contours = [c for c in contours if len(c) > 1]
for i in range(0,len(contours)):
contours[i] = [(v[0]*sc,v[1]*sc) for v in contours[i]]
if noise:
for i in range(0,len(contours)):
for j in range(0,len(contours[i])):
contours[i][j] = int(contours[i][j][0]+10*perlin.noise(i*0.5,j*0.1,1)),int(contours[i][j][1]+10*perlin.noise(i*0.5,j*0.1,2))
return contours
def hatch(IM,sc=16, noise=False):
print("hatching...")
PX = IM.load()
w,h = IM.size
lg1 = []
lg2 = []
for x0 in range(w):
for y0 in range(h):
x = x0*sc
y = y0*sc
if PX[x0,y0] > 144:
pass
elif PX[x0,y0] > 64:
lg1.append([(x,y+sc/4),(x+sc,y+sc/4)])
elif PX[x0,y0] > 16:
lg1.append([(x,y+sc/4),(x+sc,y+sc/4)])
lg2.append([(x+sc,y),(x,y+sc)])
else:
lg1.append([(x,y+sc/4),(x+sc,y+sc/4)])
lg1.append([(x,y+sc/2+sc/4),(x+sc,y+sc/2+sc/4)])
lg2.append([(x+sc,y),(x,y+sc)])
lines = [lg1,lg2]
for k in range(0,len(lines)):
for i in range(0,len(lines[k])):
for j in range(0,len(lines[k])):
if lines[k][i] != [] and lines[k][j] != []:
if lines[k][i][-1] == lines[k][j][0]:
lines[k][i] = lines[k][i]+lines[k][j][1:]
lines[k][j] = []
lines[k] = [l for l in lines[k] if len(l) > 0]
lines = lines[0]+lines[1]
if noise:
for i in range(0,len(lines)):
for j in range(0,len(lines[i])):
lines[i][j] = int(lines[i][j][0]+sc*perlin.noise(i*0.5,j*0.1,1)),int(lines[i][j][1]+sc*perlin.noise(i*0.5,j*0.1,2))-j
return lines
def sketch(
path,
resolution=1024,
draw_hatch=True,
hatch_size = 16,
draw_contours=True,
contour_simplify=1,
noise=False
):
IM = None
possible = [path,"images/"+path,"images/"+path+".jpg","images/"+path+".png","images/"+path+".tif"]
for p in possible:
try:
IM = Image.open(p)
break
except:
pass
w,h = IM.size
IM = IM.convert("L")
IM=ImageOps.autocontrast(IM,10)
lines = []
if draw_contours:
lines += getcontours(
IM.resize((int(resolution/contour_simplify),
int(resolution/contour_simplify*h/w))),
contour_simplify,
noise=noise
)
if draw_hatch:
lines += hatch(
IM.resize((int(resolution/hatch_size),
int(resolution/hatch_size*h/w))),
hatch_size,
noise=noise
)
lines = sortlines(lines)
if show_bitmap:
disp = Image.new("RGB",(resolution,resolution*h/w),(255,255,255))
draw = ImageDraw.Draw(disp)
for l in lines:
draw.line(l,(0,0,0),5)
disp.show()
f = open(export_path,'w')
f.write(makesvg(lines))
f.close()
print(len(lines), "strokes.")
print("done.")
return lines
def makesvg(lines):
print("generating svg file...")
out = '<svg xmlns="http://www.w3.org/2000/svg" version="1.1">'
for l in lines:
l = ",".join([str(p[0]*0.5)+","+str(p[1]*0.5) for p in l])
out += '<polyline points="'+l+'" stroke="black" stroke-width="2" fill="none" />\n'
out += '</svg>'
return out
def lines_to_file(lines, filename):
with open(filename, "w") as file_to_save:
json.dump(lines, file_to_save)
def image_to_json(
image,
filename,
resolution=1024,
draw_hatch=True,
hatch_size = 16,
draw_contours=True,
contour_simplify=1,
noise=False
):
lines=sketch(
image,
resolution=resolution,
draw_hatch=draw_hatch,
hatch_size=hatch_size,
draw_contours=draw_contours,
contour_simplify=contour_simplify,
noise=noise
)
lines_to_file(lines, filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert image to vectorized line drawing for plotters.')
parser.add_argument('-i','--input',dest='input_path',
default='lenna',action='store',nargs='?',type=str,
help='Input path')
parser.add_argument('-o','--output',dest='output_path',
default=export_path,action='store',nargs='?',type=str,
help='Output path.')
parser.add_argument('-b','--show_bitmap',dest='show_bitmap',
const = not show_bitmap,default= show_bitmap,action='store_const',
help="Display bitmap preview.")
parser.add_argument('-nc','--no_contour',dest='no_contour',
const = draw_contours,default= not draw_contours,action='store_const',
help="Don't draw contours.")
parser.add_argument('-nh','--no_hatch',dest='no_hatch',
const = draw_hatch,default= not draw_hatch,action='store_const',
help='Disable hatching.')
parser.add_argument('--no_cv',dest='no_cv',
const = not no_cv,default= no_cv,action='store_const',
help="Don't use openCV.")
parser.add_argument('--hatch_size',dest='hatch_size',
default=hatch_size,action='store',nargs='?',type=int,
help='Patch size of hatches. eg. 8, 16, 32')
parser.add_argument('--contour_simplify',dest='contour_simplify',
default=contour_simplify,action='store',nargs='?',type=int,
help='Level of contour simplification. eg. 1, 2, 3')
args = parser.parse_args()
export_path = args.output_path
draw_hatch = not args.no_hatch
draw_contours = not args.no_contour
hatch_size = args.hatch_size
contour_simplify = args.contour_simplify
show_bitmap = args.show_bitmap
no_cv = args.no_cv
sketch(args.input_path)
|
{"hexsha": "7c4d92eaf61ced5382be41c570882d7b0c8e82d9", "size": 9115, "ext": "py", "lang": "Python", "max_stars_repo_path": "linedraw.py", "max_stars_repo_name": "evildmp/linedraw", "max_stars_repo_head_hexsha": "fd5d5c602714a746b7b7a86248b536a1d2e92329", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-10-21T16:04:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T11:18:00.000Z", "max_issues_repo_path": "linedraw.py", "max_issues_repo_name": "evildmp/linedraw", "max_issues_repo_head_hexsha": "fd5d5c602714a746b7b7a86248b536a1d2e92329", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "linedraw.py", "max_forks_repo_name": "evildmp/linedraw", "max_forks_repo_head_hexsha": "fd5d5c602714a746b7b7a86248b536a1d2e92329", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-11-26T17:54:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-11T21:17:14.000Z", "avg_line_length": 29.1214057508, "max_line_length": 140, "alphanum_fraction": 0.5297860669, "include": true, "reason": "import numpy", "num_tokens": 2482}
|
\section{\module{fl} ---
FORMS library interface for GUI applications}
\declaremodule{builtin}{fl}
\platform{IRIX}
\modulesynopsis{FORMS library interface for GUI applications.}
This module provides an interface to the FORMS Library\index{FORMS
Library} by Mark Overmars\index{Overmars, Mark}. The source for the
library can be retrieved by anonymous ftp from host
\samp{ftp.cs.ruu.nl}, directory \file{SGI/FORMS}. It was last tested
with version 2.0b.
Most functions are literal translations of their C equivalents,
dropping the initial \samp{fl_} from their name. Constants used by
the library are defined in module \refmodule[fl-constants]{FL}
described below.
The creation of objects is a little different in Python than in C:
instead of the `current form' maintained by the library to which new
FORMS objects are added, all functions that add a FORMS object to a
form are methods of the Python object representing the form.
Consequently, there are no Python equivalents for the C functions
\cfunction{fl_addto_form()} and \cfunction{fl_end_form()}, and the
equivalent of \cfunction{fl_bgn_form()} is called
\function{fl.make_form()}.
Watch out for the somewhat confusing terminology: FORMS uses the word
\dfn{object} for the buttons, sliders etc. that you can place in a form.
In Python, `object' means any value. The Python interface to FORMS
introduces two new Python object types: form objects (representing an
entire form) and FORMS objects (representing one button, slider etc.).
Hopefully this isn't too confusing.
There are no `free objects' in the Python interface to FORMS, nor is
there an easy way to add object classes written in Python. The FORMS
interface to GL event handling is available, though, so you can mix
FORMS with pure GL windows.
\strong{Please note:} importing \module{fl} implies a call to the GL
function \cfunction{foreground()} and to the FORMS routine
\cfunction{fl_init()}.
\subsection{Functions Defined in Module \module{fl}}
\nodename{FL Functions}
Module \module{fl} defines the following functions. For more
information about what they do, see the description of the equivalent
C function in the FORMS documentation:
\begin{funcdesc}{make_form}{type, width, height}
Create a form with given type, width and height. This returns a
\dfn{form} object, whose methods are described below.
\end{funcdesc}
\begin{funcdesc}{do_forms}{}
The standard FORMS main loop. Returns a Python object representing
the FORMS object needing interaction, or the special value
\constant{FL.EVENT}.
\end{funcdesc}
\begin{funcdesc}{check_forms}{}
Check for FORMS events. Returns what \function{do_forms()} above
returns, or \code{None} if there is no event that immediately needs
interaction.
\end{funcdesc}
\begin{funcdesc}{set_event_call_back}{function}
Set the event callback function.
\end{funcdesc}
\begin{funcdesc}{set_graphics_mode}{rgbmode, doublebuffering}
Set the graphics modes.
\end{funcdesc}
\begin{funcdesc}{get_rgbmode}{}
Return the current rgb mode. This is the value of the C global
variable \cdata{fl_rgbmode}.
\end{funcdesc}
\begin{funcdesc}{show_message}{str1, str2, str3}
Show a dialog box with a three-line message and an OK button.
\end{funcdesc}
\begin{funcdesc}{show_question}{str1, str2, str3}
Show a dialog box with a three-line message and YES and NO buttons.
It returns \code{1} if the user pressed YES, \code{0} if NO.
\end{funcdesc}
\begin{funcdesc}{show_choice}{str1, str2, str3, but1\optional{,
but2\optional{, but3}}}
Show a dialog box with a three-line message and up to three buttons.
It returns the number of the button clicked by the user
(\code{1}, \code{2} or \code{3}).
\end{funcdesc}
\begin{funcdesc}{show_input}{prompt, default}
Show a dialog box with a one-line prompt message and text field in
which the user can enter a string. The second argument is the default
input string. It returns the string value as edited by the user.
\end{funcdesc}
\begin{funcdesc}{show_file_selector}{message, directory, pattern, default}
Show a dialog box in which the user can select a file. It returns
the absolute filename selected by the user, or \code{None} if the user
presses Cancel.
\end{funcdesc}
\begin{funcdesc}{get_directory}{}
\funcline{get_pattern}{}
\funcline{get_filename}{}
These functions return the directory, pattern and filename (the tail
part only) selected by the user in the last
\function{show_file_selector()} call.
\end{funcdesc}
\begin{funcdesc}{qdevice}{dev}
\funcline{unqdevice}{dev}
\funcline{isqueued}{dev}
\funcline{qtest}{}
\funcline{qread}{}
%\funcline{blkqread}{?}
\funcline{qreset}{}
\funcline{qenter}{dev, val}
\funcline{get_mouse}{}
\funcline{tie}{button, valuator1, valuator2}
These functions are the FORMS interfaces to the corresponding GL
functions. Use these if you want to handle some GL events yourself
when using \function{fl.do_events()}. When a GL event is detected that
FORMS cannot handle, \function{fl.do_forms()} returns the special value
\constant{FL.EVENT} and you should call \function{fl.qread()} to read
the event from the queue. Don't use the equivalent GL functions!
\end{funcdesc}
\begin{funcdesc}{color}{}
\funcline{mapcolor}{}
\funcline{getmcolor}{}
See the description in the FORMS documentation of
\cfunction{fl_color()}, \cfunction{fl_mapcolor()} and
\cfunction{fl_getmcolor()}.
\end{funcdesc}
\subsection{Form Objects}
\label{form-objects}
Form objects (returned by \function{make_form()} above) have the
following methods. Each method corresponds to a C function whose
name is prefixed with \samp{fl_}; and whose first argument is a form
pointer; please refer to the official FORMS documentation for
descriptions.
All the \method{add_*()} methods return a Python object representing
the FORMS object. Methods of FORMS objects are described below. Most
kinds of FORMS object also have some methods specific to that kind;
these methods are listed here.
\begin{flushleft}
\begin{methoddesc}[form]{show_form}{placement, bordertype, name}
Show the form.
\end{methoddesc}
\begin{methoddesc}[form]{hide_form}{}
Hide the form.
\end{methoddesc}
\begin{methoddesc}[form]{redraw_form}{}
Redraw the form.
\end{methoddesc}
\begin{methoddesc}[form]{set_form_position}{x, y}
Set the form's position.
\end{methoddesc}
\begin{methoddesc}[form]{freeze_form}{}
Freeze the form.
\end{methoddesc}
\begin{methoddesc}[form]{unfreeze_form}{}
Unfreeze the form.
\end{methoddesc}
\begin{methoddesc}[form]{activate_form}{}
Activate the form.
\end{methoddesc}
\begin{methoddesc}[form]{deactivate_form}{}
Deactivate the form.
\end{methoddesc}
\begin{methoddesc}[form]{bgn_group}{}
Begin a new group of objects; return a group object.
\end{methoddesc}
\begin{methoddesc}[form]{end_group}{}
End the current group of objects.
\end{methoddesc}
\begin{methoddesc}[form]{find_first}{}
Find the first object in the form.
\end{methoddesc}
\begin{methoddesc}[form]{find_last}{}
Find the last object in the form.
\end{methoddesc}
%---
\begin{methoddesc}[form]{add_box}{type, x, y, w, h, name}
Add a box object to the form.
No extra methods.
\end{methoddesc}
\begin{methoddesc}[form]{add_text}{type, x, y, w, h, name}
Add a text object to the form.
No extra methods.
\end{methoddesc}
%\begin{methoddesc}[form]{add_bitmap}{type, x, y, w, h, name}
%Add a bitmap object to the form.
%\end{methoddesc}
\begin{methoddesc}[form]{add_clock}{type, x, y, w, h, name}
Add a clock object to the form. \\
Method:
\method{get_clock()}.
\end{methoddesc}
%---
\begin{methoddesc}[form]{add_button}{type, x, y, w, h, name}
Add a button object to the form. \\
Methods:
\method{get_button()},
\method{set_button()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_lightbutton}{type, x, y, w, h, name}
Add a lightbutton object to the form. \\
Methods:
\method{get_button()},
\method{set_button()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_roundbutton}{type, x, y, w, h, name}
Add a roundbutton object to the form. \\
Methods:
\method{get_button()},
\method{set_button()}.
\end{methoddesc}
%---
\begin{methoddesc}[form]{add_slider}{type, x, y, w, h, name}
Add a slider object to the form. \\
Methods:
\method{set_slider_value()},
\method{get_slider_value()},
\method{set_slider_bounds()},
\method{get_slider_bounds()},
\method{set_slider_return()},
\method{set_slider_size()},
\method{set_slider_precision()},
\method{set_slider_step()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_valslider}{type, x, y, w, h, name}
Add a valslider object to the form. \\
Methods:
\method{set_slider_value()},
\method{get_slider_value()},
\method{set_slider_bounds()},
\method{get_slider_bounds()},
\method{set_slider_return()},
\method{set_slider_size()},
\method{set_slider_precision()},
\method{set_slider_step()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_dial}{type, x, y, w, h, name}
Add a dial object to the form. \\
Methods:
\method{set_dial_value()},
\method{get_dial_value()},
\method{set_dial_bounds()},
\method{get_dial_bounds()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_positioner}{type, x, y, w, h, name}
Add a positioner object to the form. \\
Methods:
\method{set_positioner_xvalue()},
\method{set_positioner_yvalue()},
\method{set_positioner_xbounds()},
\method{set_positioner_ybounds()},
\method{get_positioner_xvalue()},
\method{get_positioner_yvalue()},
\method{get_positioner_xbounds()},
\method{get_positioner_ybounds()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_counter}{type, x, y, w, h, name}
Add a counter object to the form. \\
Methods:
\method{set_counter_value()},
\method{get_counter_value()},
\method{set_counter_bounds()},
\method{set_counter_step()},
\method{set_counter_precision()},
\method{set_counter_return()}.
\end{methoddesc}
%---
\begin{methoddesc}[form]{add_input}{type, x, y, w, h, name}
Add a input object to the form. \\
Methods:
\method{set_input()},
\method{get_input()},
\method{set_input_color()},
\method{set_input_return()}.
\end{methoddesc}
%---
\begin{methoddesc}[form]{add_menu}{type, x, y, w, h, name}
Add a menu object to the form. \\
Methods:
\method{set_menu()},
\method{get_menu()},
\method{addto_menu()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_choice}{type, x, y, w, h, name}
Add a choice object to the form. \\
Methods:
\method{set_choice()},
\method{get_choice()},
\method{clear_choice()},
\method{addto_choice()},
\method{replace_choice()},
\method{delete_choice()},
\method{get_choice_text()},
\method{set_choice_fontsize()},
\method{set_choice_fontstyle()}.
\end{methoddesc}
\begin{methoddesc}[form]{add_browser}{type, x, y, w, h, name}
Add a browser object to the form. \\
Methods:
\method{set_browser_topline()},
\method{clear_browser()},
\method{add_browser_line()},
\method{addto_browser()},
\method{insert_browser_line()},
\method{delete_browser_line()},
\method{replace_browser_line()},
\method{get_browser_line()},
\method{load_browser()},
\method{get_browser_maxline()},
\method{select_browser_line()},
\method{deselect_browser_line()},
\method{deselect_browser()},
\method{isselected_browser_line()},
\method{get_browser()},
\method{set_browser_fontsize()},
\method{set_browser_fontstyle()},
\method{set_browser_specialkey()}.
\end{methoddesc}
%---
\begin{methoddesc}[form]{add_timer}{type, x, y, w, h, name}
Add a timer object to the form. \\
Methods:
\method{set_timer()},
\method{get_timer()}.
\end{methoddesc}
\end{flushleft}
Form objects have the following data attributes; see the FORMS
documentation:
\begin{tableiii}{l|l|l}{member}{Name}{C Type}{Meaning}
\lineiii{window}{int (read-only)}{GL window id}
\lineiii{w}{float}{form width}
\lineiii{h}{float}{form height}
\lineiii{x}{float}{form x origin}
\lineiii{y}{float}{form y origin}
\lineiii{deactivated}{int}{nonzero if form is deactivated}
\lineiii{visible}{int}{nonzero if form is visible}
\lineiii{frozen}{int}{nonzero if form is frozen}
\lineiii{doublebuf}{int}{nonzero if double buffering on}
\end{tableiii}
\subsection{FORMS Objects}
\label{forms-objects}
Besides methods specific to particular kinds of FORMS objects, all
FORMS objects also have the following methods:
\begin{methoddesc}[FORMS object]{set_call_back}{function, argument}
Set the object's callback function and argument. When the object
needs interaction, the callback function will be called with two
arguments: the object, and the callback argument. (FORMS objects
without a callback function are returned by \function{fl.do_forms()}
or \function{fl.check_forms()} when they need interaction.) Call this
method without arguments to remove the callback function.
\end{methoddesc}
\begin{methoddesc}[FORMS object]{delete_object}{}
Delete the object.
\end{methoddesc}
\begin{methoddesc}[FORMS object]{show_object}{}
Show the object.
\end{methoddesc}
\begin{methoddesc}[FORMS object]{hide_object}{}
Hide the object.
\end{methoddesc}
\begin{methoddesc}[FORMS object]{redraw_object}{}
Redraw the object.
\end{methoddesc}
\begin{methoddesc}[FORMS object]{freeze_object}{}
Freeze the object.
\end{methoddesc}
\begin{methoddesc}[FORMS object]{unfreeze_object}{}
Unfreeze the object.
\end{methoddesc}
%\begin{methoddesc}[FORMS object]{handle_object}{} XXX
%\end{methoddesc}
%\begin{methoddesc}[FORMS object]{handle_object_direct}{} XXX
%\end{methoddesc}
FORMS objects have these data attributes; see the FORMS documentation:
\begin{tableiii}{l|l|l}{member}{Name}{C Type}{Meaning}
\lineiii{objclass}{int (read-only)}{object class}
\lineiii{type}{int (read-only)}{object type}
\lineiii{boxtype}{int}{box type}
\lineiii{x}{float}{x origin}
\lineiii{y}{float}{y origin}
\lineiii{w}{float}{width}
\lineiii{h}{float}{height}
\lineiii{col1}{int}{primary color}
\lineiii{col2}{int}{secondary color}
\lineiii{align}{int}{alignment}
\lineiii{lcol}{int}{label color}
\lineiii{lsize}{float}{label font size}
\lineiii{label}{string}{label string}
\lineiii{lstyle}{int}{label style}
\lineiii{pushed}{int (read-only)}{(see FORMS docs)}
\lineiii{focus}{int (read-only)}{(see FORMS docs)}
\lineiii{belowmouse}{int (read-only)}{(see FORMS docs)}
\lineiii{frozen}{int (read-only)}{(see FORMS docs)}
\lineiii{active}{int (read-only)}{(see FORMS docs)}
\lineiii{input}{int (read-only)}{(see FORMS docs)}
\lineiii{visible}{int (read-only)}{(see FORMS docs)}
\lineiii{radio}{int (read-only)}{(see FORMS docs)}
\lineiii{automatic}{int (read-only)}{(see FORMS docs)}
\end{tableiii}
\section{\module{FL} ---
Constants used with the \module{fl} module}
\declaremodule[fl-constants]{standard}{FL}
\platform{IRIX}
\modulesynopsis{Constants used with the \module{fl} module.}
This module defines symbolic constants needed to use the built-in
module \refmodule{fl} (see above); they are equivalent to those defined in
the C header file \code{<forms.h>} except that the name prefix
\samp{FL_} is omitted. Read the module source for a complete list of
the defined names. Suggested use:
\begin{verbatim}
import fl
from FL import *
\end{verbatim}
\section{\module{flp} ---
Functions for loading stored FORMS designs}
\declaremodule{standard}{flp}
\platform{IRIX}
\modulesynopsis{Functions for loading stored FORMS designs.}
This module defines functions that can read form definitions created
by the `form designer' (\program{fdesign}) program that comes with the
FORMS library (see module \refmodule{fl} above).
For now, see the file \file{flp.doc} in the Python library source
directory for a description.
XXX A complete description should be inserted here!
|
{"hexsha": "26133fde840b44102ce1d9729762ab3dd3049f02", "size": 15558, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Doc/lib/libfl.tex", "max_stars_repo_name": "marcosptf/cpython-2.0.1", "max_stars_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_stars_repo_licenses": ["PSF-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-03-26T21:53:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:47:20.000Z", "max_issues_repo_path": "Doc/lib/libfl.tex", "max_issues_repo_name": "marcosptf/cpython-2.0.1", "max_issues_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_issues_repo_licenses": ["PSF-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-11-18T15:48:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-03T21:20:50.000Z", "max_forks_repo_path": "Doc/lib/libfl.tex", "max_forks_repo_name": "marcosptf/cpython-2.0.1", "max_forks_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_forks_repo_licenses": ["PSF-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-07-16T08:14:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T01:55:17.000Z", "avg_line_length": 30.6863905325, "max_line_length": 74, "alphanum_fraction": 0.7463041522, "num_tokens": 4319}
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Classes used for hyperparameter optimisation.
Two main classes exist:
1) HyperparamOptManager used for optimisation on a single machine/GPU.
2) DistributedHyperparamOptManager for multiple GPUs on different machines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import libs.utils as utils
import numpy as np
import pandas as pd
Deque = collections.deque
class HyperparamOptManager:
"""Manages hyperparameter optimisation using random search for a single GPU.
Attributes:
param_ranges: Discrete hyperparameter range for random search.
results: Dataframe of validation results.
fixed_params: Fixed model parameters per experiment.
saved_params: Dataframe of parameters trained.
best_score: Minimum validation loss observed thus far.
optimal_name: Key to best configuration.
hyperparam_folder: Where to save optimisation outputs.
"""
def __init__(self, param_ranges, fixed_params, model_folder, override_w_fixed_params=True):
"""Instantiates model.
Args:
param_ranges: Discrete hyperparameter range for random search.
fixed_params: Fixed model parameters per experiment.
model_folder: Folder to store optimisation artifacts.
override_w_fixed_params: Whether to override serialsed fixed model
parameters with new supplied values.
"""
self.param_ranges = param_ranges
self._max_tries = 1000
self.results = pd.DataFrame()
self.fixed_params = fixed_params
self.saved_params = pd.DataFrame()
self.best_score = np.Inf
self.optimal_name = ""
# Setup
# Create folder for saving if its not there
self.hyperparam_folder = model_folder
utils.create_folder_if_not_exist(self.hyperparam_folder)
self._override_w_fixed_params = override_w_fixed_params
def load_results(self):
"""Loads results from previous hyperparameter optimisation.
Returns:
A boolean indicating if previous results can be loaded.
"""
print("Loading results from", self.hyperparam_folder)
results_file = os.path.join(self.hyperparam_folder, "results.csv")
params_file = os.path.join(self.hyperparam_folder, "params.csv")
if os.path.exists(results_file) and os.path.exists(params_file):
self.results = pd.read_csv(results_file, index_col=0)
self.saved_params = pd.read_csv(params_file, index_col=0)
if not self.results.empty:
self.results.at["loss"] = self.results.loc["loss"].apply(float)
self.best_score = self.results.loc["loss"].min()
is_optimal = self.results.loc["loss"] == self.best_score
self.optimal_name = self.results.T[is_optimal].index[0]
return True
return False
def _get_params_from_name(self, name):
"""Returns previously saved parameters given a key."""
params = self.saved_params
selected_params = dict(params[name])
if self._override_w_fixed_params:
for k in self.fixed_params:
selected_params[k] = self.fixed_params[k]
return selected_params
def get_best_params(self):
"""Returns the optimal hyperparameters thus far."""
optimal_name = self.optimal_name
return self._get_params_from_name(optimal_name)
def clear(self):
"""Clears all previous results and saved parameters."""
shutil.rmtree(self.hyperparam_folder)
os.makedirs(self.hyperparam_folder)
self.results = pd.DataFrame()
self.saved_params = pd.DataFrame()
def _check_params(self, params):
"""Checks that parameter map is properly defined."""
valid_fields = list(self.param_ranges.keys()) + list(self.fixed_params.keys())
invalid_fields = [k for k in params if k not in valid_fields]
missing_fields = [k for k in valid_fields if k not in params]
if invalid_fields:
raise ValueError("Invalid Fields Found {} - Valid ones are {}".format(invalid_fields, valid_fields))
if missing_fields:
raise ValueError("Missing Fields Found {} - Valid ones are {}".format(missing_fields, valid_fields))
def _get_name(self, params):
"""Returns a unique key for the supplied set of params."""
self._check_params(params)
fields = list(params.keys())
fields.sort()
return "_".join([str(params[k]) for k in fields])
def get_next_parameters(self, ranges_to_skip=None):
"""Returns the next set of parameters to optimise.
Args:
ranges_to_skip: Explicitly defines a set of keys to skip.
"""
if ranges_to_skip is None:
ranges_to_skip = set(self.results.index)
if not isinstance(self.param_ranges, dict):
raise ValueError("Only works for random search!")
param_range_keys = list(self.param_ranges.keys())
param_range_keys.sort()
def _get_next():
"""Returns next hyperparameter set per try."""
parameters = {k: np.random.choice(self.param_ranges[k]) for k in param_range_keys}
# Adds fixed params
for k in self.fixed_params:
parameters[k] = self.fixed_params[k]
return parameters
for _ in range(self._max_tries):
parameters = _get_next()
name = self._get_name(parameters)
if name not in ranges_to_skip:
return parameters
raise ValueError("Exceeded max number of hyperparameter searches!!")
def update_score(self, parameters, loss, model, info=""):
"""Updates the results from last optimisation run.
Args:
parameters: Hyperparameters used in optimisation.
loss: Validation loss obtained.
model: Model to serialised if required.
info: Any ancillary information to tag on to results.
Returns:
Boolean flag indicating if the model is the best seen so far.
"""
if np.isnan(loss):
loss = np.Inf
if not os.path.isdir(self.hyperparam_folder):
os.makedirs(self.hyperparam_folder)
name = self._get_name(parameters)
is_optimal = self.results.empty or loss < self.best_score
# save the first model
if is_optimal:
# Try saving first, before updating info
if model is not None:
print("Optimal model found, updating")
model.save(self.hyperparam_folder)
self.best_score = loss
self.optimal_name = name
self.results[name] = pd.Series({"loss": loss, "info": info})
self.saved_params[name] = pd.Series(parameters)
self.results.to_csv(os.path.join(self.hyperparam_folder, "results.csv"))
self.saved_params.to_csv(os.path.join(self.hyperparam_folder, "params.csv"))
return is_optimal
class DistributedHyperparamOptManager(HyperparamOptManager):
"""Manages distributed hyperparameter optimisation across many gpus."""
def __init__(
self,
param_ranges,
fixed_params,
root_model_folder,
worker_number,
search_iterations=1000,
num_iterations_per_worker=5,
clear_serialised_params=False,
):
"""Instantiates optimisation manager.
This hyperparameter optimisation pre-generates #search_iterations
hyperparameter combinations and serialises them
at the start. At runtime, each worker goes through their own set of
parameter ranges. The pregeneration
allows for multiple workers to run in parallel on different machines without
resulting in parameter overlaps.
Args:
param_ranges: Discrete hyperparameter range for random search.
fixed_params: Fixed model parameters per experiment.
root_model_folder: Folder to store optimisation artifacts.
worker_number: Worker index defining which set of hyperparameters to
test.
search_iterations: Maximum number of random search iterations.
num_iterations_per_worker: How many iterations are handled per worker.
clear_serialised_params: Whether to regenerate hyperparameter
combinations.
"""
max_workers = int(np.ceil(search_iterations / num_iterations_per_worker))
# Sanity checks
if worker_number > max_workers:
raise ValueError(
"Worker number ({}) cannot be larger than the total number of workers!".format(max_workers)
)
if worker_number > search_iterations:
raise ValueError(
"Worker number ({}) cannot be larger than the max search iterations ({})!".format(
worker_number, search_iterations
)
)
print("*** Creating hyperparameter manager for worker {} ***".format(worker_number))
hyperparam_folder = os.path.join(root_model_folder, str(worker_number))
super().__init__(param_ranges, fixed_params, hyperparam_folder, override_w_fixed_params=True)
serialised_ranges_folder = os.path.join(root_model_folder, "hyperparams")
if clear_serialised_params:
print("Regenerating hyperparameter list")
if os.path.exists(serialised_ranges_folder):
shutil.rmtree(serialised_ranges_folder)
utils.create_folder_if_not_exist(serialised_ranges_folder)
self.serialised_ranges_path = os.path.join(serialised_ranges_folder, "ranges_{}.csv".format(search_iterations))
self.hyperparam_folder = hyperparam_folder # override
self.worker_num = worker_number
self.total_search_iterations = search_iterations
self.num_iterations_per_worker = num_iterations_per_worker
self.global_hyperparam_df = self.load_serialised_hyperparam_df()
self.worker_search_queue = self._get_worker_search_queue()
@property
def optimisation_completed(self):
return False if self.worker_search_queue else True
def get_next_parameters(self):
"""Returns next dictionary of hyperparameters to optimise."""
param_name = self.worker_search_queue.pop()
params = self.global_hyperparam_df.loc[param_name, :].to_dict()
# Always override!
for k in self.fixed_params:
print("Overriding saved {}: {}".format(k, self.fixed_params[k]))
params[k] = self.fixed_params[k]
return params
def load_serialised_hyperparam_df(self):
"""Loads serialsed hyperparameter ranges from file.
Returns:
DataFrame containing hyperparameter combinations.
"""
print(
"Loading params for {} search iterations form {}".format(
self.total_search_iterations, self.serialised_ranges_path
)
)
if os.path.exists(self.serialised_ranges_folder):
df = pd.read_csv(self.serialised_ranges_path, index_col=0)
else:
print("Unable to load - regenerating search ranges instead")
df = self.update_serialised_hyperparam_df()
return df
def update_serialised_hyperparam_df(self):
"""Regenerates hyperparameter combinations and saves to file.
Returns:
DataFrame containing hyperparameter combinations.
"""
search_df = self._generate_full_hyperparam_df()
print(
"Serialising params for {} search iterations to {}".format(
self.total_search_iterations, self.serialised_ranges_path
)
)
search_df.to_csv(self.serialised_ranges_path)
return search_df
def _generate_full_hyperparam_df(self):
"""Generates actual hyperparameter combinations.
Returns:
DataFrame containing hyperparameter combinations.
"""
np.random.seed(131) # for reproducibility of hyperparam list
name_list = []
param_list = []
for _ in range(self.total_search_iterations):
params = super().get_next_parameters(name_list)
name = self._get_name(params)
name_list.append(name)
param_list.append(params)
full_search_df = pd.DataFrame(param_list, index=name_list)
return full_search_df
def clear(self): # reset when cleared
"""Clears results for hyperparameter manager and resets."""
super().clear()
self.worker_search_queue = self._get_worker_search_queue()
def load_results(self):
"""Load results from file and queue parameter combinations to try.
Returns:
Boolean indicating if results were successfully loaded.
"""
success = super().load_results()
if success:
self.worker_search_queue = self._get_worker_search_queue()
return success
def _get_worker_search_queue(self):
"""Generates the queue of param combinations for current worker.
Returns:
Queue of hyperparameter combinations outstanding.
"""
global_df = self.assign_worker_numbers(self.global_hyperparam_df)
worker_df = global_df[global_df["worker"] == self.worker_num]
left_overs = [s for s in worker_df.index if s not in self.results.columns]
return Deque(left_overs)
def assign_worker_numbers(self, df):
"""Updates parameter combinations with the index of the worker used.
Args:
df: DataFrame of parameter combinations.
Returns:
Updated DataFrame with worker number.
"""
output = df.copy()
n = self.total_search_iterations
batch_size = self.num_iterations_per_worker
max_worker_num = int(np.ceil(n / batch_size))
worker_idx = np.concatenate([np.tile(i + 1, self.num_iterations_per_worker) for i in range(max_worker_num)])
output["worker"] = worker_idx[: len(output)]
return output
|
{"hexsha": "e18f5b71634d54735f79f8b2ad778acfebc719d2", "size": 15327, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/benchmarks/TFT/libs/hyperparam_opt.py", "max_stars_repo_name": "lpd6375/qlib", "max_stars_repo_head_hexsha": "3a911bc09ba5136cd7c61c2c8dcca8a63339e738", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-12T20:48:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-25T02:26:09.000Z", "max_issues_repo_path": "examples/benchmarks/TFT/libs/hyperparam_opt.py", "max_issues_repo_name": "lpd6375/qlib", "max_issues_repo_head_hexsha": "3a911bc09ba5136cd7c61c2c8dcca8a63339e738", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-10T03:57:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T03:57:50.000Z", "max_forks_repo_path": "examples/benchmarks/TFT/libs/hyperparam_opt.py", "max_forks_repo_name": "lpd6375/qlib", "max_forks_repo_head_hexsha": "3a911bc09ba5136cd7c61c2c8dcca8a63339e738", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-22T03:09:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T03:09:56.000Z", "avg_line_length": 35.5614849188, "max_line_length": 120, "alphanum_fraction": 0.6439616363, "include": true, "reason": "import numpy", "num_tokens": 2925}
|
"""
Integration test taking in csv of local attributions
and producing csv of global attributions
"""
import glob
import os
import numpy as np
import pytest
from gam import gam
def test_read_csv():
g = gam.GAM(attributions_path="tests/test_attributes.csv")
g._read_local()
assert hasattr(g, "attributions")
assert g.attributions.shape == (4, 3)
assert hasattr(g, "feature_labels")
assert g.feature_labels == ["a1", "a2", "a3"]
def test_normalize():
"""Tests normalization of attributions from csv"""
g = gam.GAM(attributions_path="tests/test_attributes.csv")
g._read_local()
normalized_attributions = gam.GAM.normalize(g.attributions)
assert normalized_attributions.shape == g.attributions.shape
assert not np.any(np.where(normalized_attributions < 0))
assert normalized_attributions.sum(axis=1)[0] == pytest.approx(1.0)
def test_get_subpopulation_sizes():
"""Tests transformation of membership array into subpopulation sizes"""
subpopulations = [0, 1, 0, 0, 1, 2, 0]
subpopulation_sizes = gam.GAM.get_subpopulation_sizes(subpopulations)
assert subpopulation_sizes == [4, 2, 1]
def test_cluster():
"""Tests subpopulations generated by clustering attributions"""
g = gam.GAM(attributions_path="tests/test_attributes.csv")
g._read_local()
# g.normalized_attributions = gam.GAM.normalize(g.attributions)
g.clustering_attributions = gam.GAM.normalize(g.attributions)
g._cluster()
assert len(g.explanations) == 2
assert g.subpopulation_sizes[0] > 0
assert g.subpopulation_sizes[1] > 0
assert len(g.explanations) == 2
assert g.explanations[0][0][0] == g.feature_labels[0]
first_explanation_sum = sum([weight for label, weight in g.explanations[0]])
assert first_explanation_sum == pytest.approx(1)
second_explanation_sum = sum([weight for label, weight in g.explanations[1]])
assert second_explanation_sum == pytest.approx(1)
def test_plotting_top2():
explanations = [
[("height", 0.3), ("weight", 0.6), ("hair color", 0.1)],
[("height", 0.05), ("weight", 0.05), ("hair color", 0.9)],
[("height", 0.9), ("weight", 0.05), ("hair color", 0.05)],
[("height", 0.3), ("weight", 0.6), ("hair color", 0.1)],
[("height", 0.05), ("weight", 0.05), ("hair color", 0.9)],
[("height", 0.9), ("weight", 0.05), ("hair color", 0.05)],
]
g = gam.GAM(attributions_path="tests/test_attributes.csv", k=len(explanations))
g.explanations = explanations
fname = "tests/image1"
g.plot(num_features=2, output_path_base=fname, display=False)
output = glob.glob(fname + "*")
assert len(output) > 0
for ofile in output:
os.remove(ofile)
def test_plotting_top3():
explanations = [
[("height", 0.3), ("weight", 0.6), ("hair color", 0.1)],
[("height", 0.05), ("weight", 0.05), ("hair color", 0.9)],
[("height", 0.9), ("weight", 0.05), ("hair color", 0.05)],
[("height", 0.3), ("weight", 0.6), ("hair color", 0.1)],
[("height", 0.05), ("weight", 0.05), ("hair color", 0.9)],
[("height", 0.9), ("weight", 0.05), ("hair color", 0.05)],
]
g = gam.GAM(attributions_path="tests/test_attributes.csv", k=len(explanations))
g.explanations = explanations
fname = "tests/image2"
g.plot(num_features=3, output_path_base=fname, display=False)
output = glob.glob(fname + "*")
assert len(output) > 0
for ofile in output:
os.remove(ofile)
def test_plotting_2attributes():
explanations = [
[("height", 0.05), ("weight", 0.05), ("hair color", 0.9)],
[("height", 0.9), ("weight", 0.05), ("hair color", 0.05)],
]
g = gam.GAM(attributions_path="tests/test_attributes.csv", k=len(explanations))
g.explanations = explanations
fname = "tests/image3"
g.plot(num_features=2, output_path_base=fname, display=False)
output = glob.glob(fname + "*")
assert len(output) > 0
for ofile in output:
os.remove(ofile)
|
{"hexsha": "83c01de3b979b0fdef2413c4e6065a18f1be8f95", "size": 4054, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_gam.py", "max_stars_repo_name": "timwong101/project-gam", "max_stars_repo_head_hexsha": "6a0b87418091772517e2f3b2339e8998c43ffc54", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_gam.py", "max_issues_repo_name": "timwong101/project-gam", "max_issues_repo_head_hexsha": "6a0b87418091772517e2f3b2339e8998c43ffc54", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_gam.py", "max_forks_repo_name": "timwong101/project-gam", "max_forks_repo_head_hexsha": "6a0b87418091772517e2f3b2339e8998c43ffc54", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-18T02:30:19.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-18T02:30:19.000Z", "avg_line_length": 31.1846153846, "max_line_length": 83, "alphanum_fraction": 0.6364084854, "include": true, "reason": "import numpy", "num_tokens": 1162}
|
# -*- coding: utf-8 -*-
import gzip
from matplotlib import pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
import sklearn.preprocessing
def extract_params(statefile):
"""Extract the alpha and beta values from the statefile.
Args:
statefile (str): Path to statefile produced by Mallet.
Returns:
tuple: alpha, beta
"""
with gzip.open(statefile, 'r') as state:
params = [x.decode('utf8').strip() for x in state.readlines()[1:3]]
return (list(params[0].split(":")[1].split(" ")), float(params[1].split(":")[1]))
def state_to_df(statefile):
"""Transform state file into pandas dataframe
"""
return pd.read_csv(statefile,
compression='gzip',
sep=' ',
skiprows=[1,2]
)
def get_topic_keys(keyfile):
"""Turn topic/key information into pandas dataframe
"""
df = pd.read_csv(keyfile, sep='\t', header=None)
return df.rename(columns = {0:'topic', 1: 'overallWeight', 2: 'topic_words'})
def aggregate_data(df, topic_col='topic', word_col='type'):
"""
"""
df = df.groupby([topic_col, word_col]).agg({word_col: {'count': lambda x: x.count()}})
df.columns = df.columns.droplevel(0)
return df.reset_index()
def pivot_and_smooth(df, smooth_value, rows='type', cols='topic'):
"""
"""
matrix = df.pivot(index=rows, columns=cols, values='count').fillna(value=0)
matrix = matrix + smooth_value
return pd.DataFrame(sklearn.preprocessing.normalize(matrix, norm='l1', axis=0))
def graph_matrix(matrix):
"""
Note: Negative correlations, which would indicate that the words in one topic are
missing from another topic, are not interesting for our purpose of measuring when topics
overshare words. By centering at 0.5, we are focusing on positive overlap between topics.
"""
f, ax = plt.subplots(figsize=(30,30))
mask = np.zeros_like(matrix)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
sns.heatmap(matrix,
center = 0.5,
mask = mask,
cmap = sns.diverging_palette(220, 10, as_cmap=True),
square = True,
xticklabels = 2,
cbar_kws={"label":"Correlation (Pearson) between Topics"},
ax = ax)
def get_top_pairs(matrix, n_pairs):
df = pd.DataFrame(matrix.where(np.triu(np.ones(matrix.shape), k=1).astype(np.bool)).stack().sort_values(ascending=False)[:n_pairs]).reset_index()
df = df.rename(columns = {'level_0': 'topic_1', 'level_1': 'topic_2', 0: 'correlation'})
return df
def merge_frames(pairs, keys):
merge1 = pairs.merge(keys, left_on='topic_1', right_on='topic', how='left')
return merge1.merge(keys, left_on='topic_2', right_on='topic', how='left')
|
{"hexsha": "eab5b21924a9b4873244c7701e1c5a31cfd9bab5", "size": 2928, "ext": "py", "lang": "Python", "max_stars_repo_path": "GoH/verify_model.py", "max_stars_repo_name": "jerielizabeth/GoH", "max_stars_repo_head_hexsha": "7c16eac86d76525170330924348cecccce3aa5cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-09T12:25:05.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-09T12:25:05.000Z", "max_issues_repo_path": "GoH/verify_model.py", "max_issues_repo_name": "jerielizabeth/GoH", "max_issues_repo_head_hexsha": "7c16eac86d76525170330924348cecccce3aa5cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GoH/verify_model.py", "max_forks_repo_name": "jerielizabeth/GoH", "max_forks_repo_head_hexsha": "7c16eac86d76525170330924348cecccce3aa5cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6551724138, "max_line_length": 149, "alphanum_fraction": 0.6181693989, "include": true, "reason": "import numpy", "num_tokens": 712}
|
from io import BytesIO
from django.shortcuts import render
from django.http import HttpResponse
import librosa
import soundfile as sf
from .models import File
from devices.models import DeviceContext
from projects.models import Project
import scipy.io.wavfile as sa
# Create your views here.
def list_files(request, proj_id, device_id):
device = DeviceContext.objects.get(id=device_id)
return render(request, 'files_list.html',{
'files' : device.all_files.order_by('tstart'),
'project': Project.objects.get(id=proj_id),
'device' : device
})
def get_audio(request, proj_id, device_id, file_id):
offset = 0
if "offset" in request.GET:
offset = request.GET["offset"]
file_entry = File.objects.get(id=file_id)
fname = file_entry.path
buffer = BytesIO()
print("cnaisdn")
y, _ = librosa.load(fname,sr=file_entry.sample_rate,offset=float(),mono=not file_entry.stereo)
sa.write(buffer,file_entry.sample_rate,y)
response=HttpResponse(buffer,content_type="audio/wav")
response["Accept-Ranges"] = "bytes"
return response
|
{"hexsha": "191381c288ae90ddeef840a9b7b2778653e707d2", "size": 1131, "ext": "py", "lang": "Python", "max_stars_repo_path": "files/views.py", "max_stars_repo_name": "plaf2000/webspec", "max_stars_repo_head_hexsha": "487ccccff088ddbda0e5e475aaad167a01f4aab2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "files/views.py", "max_issues_repo_name": "plaf2000/webspec", "max_issues_repo_head_hexsha": "487ccccff088ddbda0e5e475aaad167a01f4aab2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "files/views.py", "max_forks_repo_name": "plaf2000/webspec", "max_forks_repo_head_hexsha": "487ccccff088ddbda0e5e475aaad167a01f4aab2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0638297872, "max_line_length": 98, "alphanum_fraction": 0.7029177719, "include": true, "reason": "import scipy", "num_tokens": 264}
|
import numpy as np
from .propagator import Propagator
from ..optics import Wavefront, make_agnostic_optical_element
from ..field import Field
@make_agnostic_optical_element()
class FraunhoferPropagator(Propagator):
'''A monochromatic perfect lens propagator.
This implements the propagation of a wavefront through a perfect lens. The wavefront
is assumed to be exactly in the front focal plane of the lens and is propagated to the
back focal plane. The implementation follows [1]_.
.. [1] Goodman, J.W., 2005 Introduction to Fourier optics. Roberts and Company Publishers.
Parameters
----------
input_grid : Grid
The grid on which the incoming wavefront is defined.
output_grid : Grid
The grid on which the outgoing wavefront is to be evaluated.
wavelength : scalar
The wavelength of the wavefront.
focal_length : scalar
The focal length of the lens system.
'''
def __init__(self, input_grid, output_grid, focal_length=1, wavelength=1):
from ..fourier import make_fourier_transform
self.uv_grid = output_grid.scaled(2*np.pi / (focal_length * wavelength))
self.fourier_transform = make_fourier_transform(input_grid, self.uv_grid)
self.output_grid = output_grid
# Intrinsic to Fraunhofer propagation
self.norm_factor = 1 / (1j * (focal_length * wavelength))
self.input_grid = input_grid
def forward(self, wavefront):
'''Propagate a wavefront forward through the lens.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
U_new = self.fourier_transform.forward(wavefront.electric_field) * self.norm_factor
return Wavefront(Field(U_new, self.output_grid), wavefront.wavelength)
def backward(self, wavefront):
'''Propagate a wavefront backward through the lens.
Parameters
----------
wavefront : Wavefront
The incoming wavefront.
Returns
-------
Wavefront
The wavefront after the propagation.
'''
U_new = self.fourier_transform.backward(wavefront.electric_field) / self.norm_factor
return Wavefront(Field(U_new, self.input_grid), wavefront.wavelength)
def get_transformation_matrix_forward(self, input_grid, wavelength=1):
'''Create the forward linear transformation between the internal input grid and output grid.
Parameters
----------
input_grid : Grid
The input grid on which the wavefront is defined.
Currently this parameter is ignored and an internal grid is used.
wavelength : scalar
The wavelength of the wavefront.
Returns
-------
ndarray
The transformation matrix that describes the propagation.
'''
# Ignore input wavelength and just use the internal one.
return self.fourier_transform.get_transformation_matrix_forward() * self.norm_factor
def get_transformation_matrix_backward(self, input_grid, wavelength=1):
'''Create the backward linear transformation between the internal input grid and output grid.
Parameters
----------
input_grid : Grid
The input grid on which the wavefront is defined.
Currently this parameter is ignored and an internal grid is used.
wavelength : scalar
The wavelength of the wavefront.
Returns
-------
ndarray
The transformation matrix that describes the propagation.
'''
# Ignore input wavelength and just use the internal one.
return self.fourier_transform.get_transformation_matrix_backward() / self.norm_factor
|
{"hexsha": "e44356e27de64727928de326d4f1629606ffaae7", "size": 3440, "ext": "py", "lang": "Python", "max_stars_repo_path": "hcipy/propagation/fraunhofer.py", "max_stars_repo_name": "rahulbhadani/hcipy", "max_stars_repo_head_hexsha": "b52726cb9502b5225ddff9d7b1ff417f2350cda8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hcipy/propagation/fraunhofer.py", "max_issues_repo_name": "rahulbhadani/hcipy", "max_issues_repo_head_hexsha": "b52726cb9502b5225ddff9d7b1ff417f2350cda8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hcipy/propagation/fraunhofer.py", "max_forks_repo_name": "rahulbhadani/hcipy", "max_forks_repo_head_hexsha": "b52726cb9502b5225ddff9d7b1ff417f2350cda8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1495327103, "max_line_length": 95, "alphanum_fraction": 0.7476744186, "include": true, "reason": "import numpy", "num_tokens": 833}
|
#!/usr/bin/python3
# Name: Chenying Wang
# Email: chenying.wang@usc.edu
# USC ID: ****-****-**
# Date: Friday, March 20, 2020
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
import sys
COLOR = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red']
def main(input_csv, output_file):
features = np.loadtxt(input_csv, delimiter = ',', usecols = [0, 1, 2])
label = np.loadtxt(input_csv, delimiter = ',', usecols = -1, dtype = 'str')
labelSet = np.unique(label)
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
for i in range(np.size(labelSet)):
_features = features \
[label == labelSet[i], :]
ax.scatter(_features[:, 0], _features[:, 1], _features[:, 2], \
label = labelSet[i], \
marker = 'x', alpha = 0.8, color = COLOR[i])
ax.legend()
fig.savefig(output_file)
if (__name__ == '__main__'):
if (len(sys.argv) < 3):
exit()
main(sys.argv[1], sys.argv[2])
|
{"hexsha": "376989e14bdefdf7351ce3073de9614a97fb572c", "size": 1017, "ext": "py", "lang": "Python", "max_stars_repo_path": "ee569/hw4/plot/plot_3d.py", "max_stars_repo_name": "chenying-wang/usc-ee-coursework-public", "max_stars_repo_head_hexsha": "5bc94c2350bcebf1036fb058fe7dc4f7e31e1de1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-24T10:46:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-24T10:46:20.000Z", "max_issues_repo_path": "ee569/hw4/plot/plot_3d.py", "max_issues_repo_name": "chenying-wang/usc-ee-coursework-public", "max_issues_repo_head_hexsha": "5bc94c2350bcebf1036fb058fe7dc4f7e31e1de1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ee569/hw4/plot/plot_3d.py", "max_forks_repo_name": "chenying-wang/usc-ee-coursework-public", "max_forks_repo_head_hexsha": "5bc94c2350bcebf1036fb058fe7dc4f7e31e1de1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-25T09:18:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-25T09:18:45.000Z", "avg_line_length": 29.9117647059, "max_line_length": 79, "alphanum_fraction": 0.581120944, "include": true, "reason": "import numpy", "num_tokens": 290}
|
import sys
sys.path.append('..')
from neml import elasticity, interpolate
from neml.math import tensors, rotations
import unittest
from common import *
import numpy as np
import numpy.linalg as la
class CommonElasticity(object):
"""
Tests that could apply to any elastic model.
"""
def test_C2S(self):
C = self.model.C(self.T)
self.assertTrue(np.allclose(self.model.S(self.T), la.inv(C)))
def test_S2C(self):
S = self.model.S(self.T)
self.assertTrue(np.allclose(self.model.C(self.T), la.inv(S)))
def test_tensor_C(self):
CT = self.model.C_tensor(self.T)
C = self.model.C(self.T)
self.assertEqual(CT, tensors.SymSymR4(C))
def test_tensor_S(self):
ST = self.model.S_tensor(self.T)
S = self.model.S(self.T)
self.assertEqual(ST, tensors.SymSymR4(S))
class TestIsotropicConstantModel(CommonElasticity, unittest.TestCase):
def setUp(self):
self.mu = 29000.0
self.K = 64000.0
self.T = 325.0
self.Q = rotations.Orientation(31.0, 59.0, 80.0, angle_type = "degrees",
convention = "bunge")
self.model = elasticity.IsotropicLinearElasticModel(self.mu,
"shear", self.K, "bulk")
def test_modulii(self):
S = self.model.S(self.T)
E = 9*self.K*self.mu/(3*self.K + self.mu)
nu = (3*self.K - 2*self.mu)/(2*(3*self.K+self.mu))
self.assertTrue(np.isclose(S[0,0], 1/E))
self.assertTrue(np.isclose(S[0,1], -nu/E))
self.assertTrue(np.isclose(S[3,3], (1+nu)/E))
def test_rotated(self):
no = self.model.C_tensor(self.T)
yes = self.model.C_tensor(self.T, self.Q)
self.assertEqual(no,yes)
class TestEquivalentDefinitions(unittest.TestCase):
def setUp(self):
self.E = 100000.0
self.nu = 0.3
self.mu = self.E / (2 * (1 + self.nu))
self.K = self.E / (3 * (1 - 2.0 * self.nu))
self.model_Ev = elasticity.IsotropicLinearElasticModel(
self.E, "youngs", self.nu, "poissons")
self.model_GK = elasticity.IsotropicLinearElasticModel(
self.mu, "shear", self.K, "bulk")
self.T = 300.0
def test_equivalent_C(self):
C1 = self.model_Ev.C(self.T)
C2 = self.model_GK.C(self.T)
self.assertTrue(np.allclose(C1,C2))
def test_equivalent_S(self):
S1 = self.model_Ev.S(self.T)
S2 = self.model_GK.S(self.T)
self.assertTrue(np.allclose(S1,S2))
def test_equivalent_modulii(self):
self.assertTrue(np.isclose(self.E, self.model_Ev.E(self.T)))
self.assertTrue(np.isclose(self.E, self.model_GK.E(self.T)))
self.assertTrue(np.isclose(self.nu, self.model_Ev.nu(self.T)))
self.assertTrue(np.isclose(self.nu, self.model_GK.nu(self.T)))
self.assertTrue(np.isclose(self.mu, self.model_Ev.G(self.T)))
self.assertTrue(np.isclose(self.mu, self.model_GK.G(self.T)))
self.assertTrue(np.isclose(self.K, self.model_Ev.K(self.T)))
self.assertTrue(np.isclose(self.K, self.model_GK.K(self.T)))
class TestCubicModel(CommonElasticity, unittest.TestCase):
def setUp(self):
self.mu = 29000.0
self.E = 120000.0
self.nu = 0.3
self.T = 325.0
self.Q = rotations.Orientation(31.0, 59.0, 80.0, angle_type = "degrees",
convention = "bunge")
self.Q_cube = rotations.Orientation(90.0, 0.0, 0.0, angle_type = "degrees",
convention = "bunge")
self.model = elasticity.CubicLinearElasticModel(self.E,
self.nu, self.mu, "moduli")
self.v1 = tensors.Vector(np.array([1.0,0.0,0]))
self.v2 = tensors.Vector(np.array([0.0,1.0,0]))
def test_definition(self):
self.assertTrue(np.isclose(self.model.G(self.T), self.mu))
self.assertTrue(np.isclose(self.model.G(self.T,
self.Q_cube, self.v1, self.v2), self.mu))
self.assertEqual(self.model.C_tensor(self.T),
self.model.C_tensor(self.T, self.Q_cube))
|
{"hexsha": "fee23822f0fc0a2b43e892beba29e1f991ae2b36", "size": 3766, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_elasticity.py", "max_stars_repo_name": "ajey091/neml", "max_stars_repo_head_hexsha": "23dd2cdb83057fdd17a37fa19f4592c54f821dbf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-05-06T17:04:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T20:02:22.000Z", "max_issues_repo_path": "test/test_elasticity.py", "max_issues_repo_name": "ajey091/neml", "max_issues_repo_head_hexsha": "23dd2cdb83057fdd17a37fa19f4592c54f821dbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 66, "max_issues_repo_issues_event_min_datetime": "2018-10-26T01:32:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-01T03:02:18.000Z", "max_forks_repo_path": "test/test_elasticity.py", "max_forks_repo_name": "ajey091/neml", "max_forks_repo_head_hexsha": "23dd2cdb83057fdd17a37fa19f4592c54f821dbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-11-28T17:07:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T16:57:15.000Z", "avg_line_length": 30.868852459, "max_line_length": 79, "alphanum_fraction": 0.660913436, "include": true, "reason": "import numpy", "num_tokens": 1103}
|
from functools import partial
import mmcv
import numpy as np
from six.moves import map, zip
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
num_imgs = tensor.size(0)
mean = np.array(mean, dtype=np.float32)
std = np.array(std, dtype=np.float32)
imgs = []
for img_id in range(num_imgs):
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
img = mmcv.imdenormalize(
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
imgs.append(np.ascontiguousarray(img))
return imgs
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
def vid_result2txt(results,savepath):
id_maps={0: 12,
1: 13,
2: 23,
3: 8,
4: 7,
5: 0,
6: 25,
7: 19,
8: 20,
9: 6,
10: 5,
11: 17,
12: 27,
13: 28,
14: 4,
15: 10,
16: 16,
17: 2,
18: 15,
19: 26,
20: 18,
21: 3,
22: 11,
23: 1,
24: 9,
25: 22,
26: 21,
27: 24,
28: 29,
29: 14}
with open(savepath,'w+') as outfile:
for each_snip in results:
result=each_snip['result']
ids=each_snip['ids']
assert len(ids) == len(result)
for idx,each_frame in enumerate(result):
assert len(each_frame)==30
frame_id=ids[idx]
for i,each_class in enumerate(each_frame):
#class transform
clss=id_maps[i]
if each_class.size==0:
continue
else:
for box in each_class:
txt='{} {} {} {} {} {} {}'.format(frame_id,clss,box[-1],box[0],box[1],box[2],box[3])
print(txt,file=outfile)
|
{"hexsha": "1921ec5beff20a368a32a2d7c953f59a696b9fa5", "size": 2496, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmdet/core/utils/misc.py", "max_stars_repo_name": "youshyee/Greatape-Detection", "max_stars_repo_head_hexsha": "333b63d8f76538659bcd2bc6022128830a7a435b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-22T16:47:16.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-22T16:47:16.000Z", "max_issues_repo_path": "mmdet/core/utils/misc.py", "max_issues_repo_name": "youshyee/Greatape-Detection", "max_issues_repo_head_hexsha": "333b63d8f76538659bcd2bc6022128830a7a435b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mmdet/core/utils/misc.py", "max_forks_repo_name": "youshyee/Greatape-Detection", "max_forks_repo_head_hexsha": "333b63d8f76538659bcd2bc6022128830a7a435b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0449438202, "max_line_length": 112, "alphanum_fraction": 0.4635416667, "include": true, "reason": "import numpy", "num_tokens": 703}
|
import requests
import os
from datetime import datetime
import json
from bs4 import BeautifulSoup as bs
import time
import random
import numpy
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class MyIGBot:
def __init__(self, username, password, use_cookie = True):
self.username = username
self.password = password
self.use_cookie = use_cookie
self.path = os.getcwd()
if use_cookie == False or os.path.exists(self.path+f'//cookie_{self.username}.bot') == False:
link = 'https://www.instagram.com/accounts/login/'
login_url = 'https://www.instagram.com/accounts/login/ajax/'
time_now = int(datetime.now().timestamp())
response = requests.get(link)
csrf = response.cookies['csrftoken']
payload = {
'username': self.username,
'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{time_now}:{self.password}',
'queryParams': {},
'optIntoOneTap': 'false'
}
login_header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://www.instagram.com/accounts/login/",
"x-csrftoken": csrf
}
login_response = requests.post(login_url, data=payload, headers=login_header)
json_data = json.loads(login_response.text)
cookies = login_response.cookies
cookie_jar = cookies.get_dict()
self.csrf_token = cookie_jar['csrftoken']
try:
if json_data["authenticated"]:
pass
else:
print(bcolors.FAIL+"[✗] Login Failed!"+bcolors.ENDC, login_response.text)
quit()
except KeyError:
try:
if json_data["two_factor_required"]:
self.ig_nrcb = cookie_jar['ig_nrcb']
self.ig_did = cookie_jar['ig_did']
self.mid = cookie_jar['mid']
otp = input(bcolors.OKBLUE+'[!] Two Factor Auth. Detected! Enter Code Here: '+bcolors.ENDC)
twofactor_url = 'https://www.instagram.com/accounts/login/ajax/two_factor/'
twofactor_payload = {
'username': self.username,
'verificationCode': otp,
'identifier': json_data["two_factor_info"]["two_factor_identifier"],
'queryParams': {}
}
twofactor_header = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded",
"cookie": 'ig_did='+self.ig_did+'; ig_nrcb='+self.ig_nrcb+'; csrftoken='+self.csrf_token+'; mid='+self.mid,
"origin": "https://www.instagram.com",
"referer": "https://www.instagram.com/accounts/login/two_factor?next=%2F",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "0",
"x-instagram-ajax": "00c4537694a4",
"x-requested-with": "XMLHttpRequest"
}
login_response = requests.post(twofactor_url, data=twofactor_payload, headers=twofactor_header)
if login_response.ok:
pass
else:
print(bcolors.FAIL+"[✗] Login Failed!"+bcolors.ENDC)
quit()
except KeyError:
print(bcolors.FAIL+'[✗] Login Failed! Try Again After Few Minutes!'+bcolors.ENDC)
quit()
self.sessionid = login_response.headers['Set-Cookie'].split('sessionid=')[1].split(';')[0]
self.cookie = "sessionid=" + self.sessionid + "; csrftoken=" + self.csrf_token + ";"
create_cookie = open(self.path+f'//cookie_{self.username}.bot', 'w+', encoding='utf-8')
create_cookie.write(self.cookie)
create_cookie.close()
self.session = requests.session()
cookie_obj = requests.cookies.create_cookie(
name='sessionid', secure=True, value=self.sessionid)
self.session.cookies.set_cookie(cookie_obj)
elif os.path.exists(self.path+f'//cookie_{self.username}.bot'):
try:
read_cookie = open(self.path+f'//cookie_{self.username}.bot', 'r', encoding='utf-8')
self.cookie = read_cookie.read()
read_cookie.close()
homelink = 'https://www.instagram.com/op/'
self.session = requests.session()
self.sessionid = self.cookie.split('=')[1].split(';')[0]
self.csrf_token = self.cookie.split('=')[2].replace(';', '')
cookie_obj = requests.cookies.create_cookie(
name='sessionid', secure=True, value=self.sessionid)
self.session.cookies.set_cookie(cookie_obj)
login_response = self.session.get(homelink)
time.sleep(1)
soup = bs(login_response.text, 'html.parser')
soup.find("strong", {"class": "-cx-PRIVATE-NavBar__username -cx-PRIVATE-NavBar__username__"}).get_text()
except AttributeError:
print(bcolors.FAIL+"[✗] Login Failed! Cookie file is corupted!"+bcolors.ENDC)
os.remove(self.path+f'//cookie_{self.username}.bot')
print(bcolors.WARNING+"[-] Deleted Corupted Cookie File! Try Again!"+bcolors.ENDC)
quit()
if use_cookie == False or os.path.exists(self.path+f'//m.cookie_{self.username}.bot') == False:
link = 'https://www.instagram.com/accounts/login/'
login_url = 'https://i.instagram.com/api/v1/accounts/login/'
time_now = int(datetime.now().timestamp())
response = requests.get(link)
csrf = response.cookies['csrftoken']
payload = {'username': username,'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{time_now}:{self.password}',"_csrftoken":csrf,"adid":"","device_id":"android-accbc71ffccdc11a","google_tokens":"[]"}
login_header = {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': f'csrftoken={csrf};',
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-SHBID': '15274',
'IG-U-RUR': 'RVA',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close'
}
login_response = requests.post(login_url, data=payload, headers=login_header)
json_data = json.loads(login_response.text)
cookies = login_response.cookies
cookie_jar = cookies.get_dict()
self.mcsrf_token = cookie_jar['csrftoken']
try:
if json_data["logged_in_user"]:
pass
else:
pass
except KeyError:
try:
if json_data["two_factor_required"]:
self.mmid = cookie_jar['mid']
otp = input(bcolors.OKBLUE+'[!] Two Factor Auth. Detected! Enter Code Here: '+bcolors.ENDC)
twofactor_url = 'https://i.instagram.com/api/v1/accounts/two_factor_login/'
twofactor_payload = {"verification_code":otp,"_csrftoken":self.mcsrf_token,"two_factor_identifier":json_data["two_factor_info"]["two_factor_identifier"],"username":self.username,"device_id":"android-accbc71ffccdc11a"}
twofactor_header = {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'true',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': f'mid={self.mmid}; csrftoken={self.mcsrf_token}',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close'
}
login_response = requests.post(twofactor_url, data=twofactor_payload, headers=twofactor_header)
if login_response.ok:
pass
else:
print(bcolors.FAIL+"[✗] Login Failed!"+bcolors.ENDC)
quit()
except KeyError:
print(bcolors.FAIL+"[✗] Login Failed!"+bcolors.ENDC)
quit()
self.msessionid = login_response.headers['Set-Cookie'].split('sessionid=')[1].split(';')[0]
self.mcookie = "sessionid=" + self.msessionid + "; csrftoken=" + self.mcsrf_token + ";"
create_cookie = open(self.path+f'//m.cookie_{self.username}.bot', 'w+', encoding='utf-8')
create_cookie.write(self.mcookie)
create_cookie.close()
self.msession = requests.session()
cookie_obj = requests.cookies.create_cookie(
name='sessionid', secure=True, value=self.msessionid)
self.msession.cookies.set_cookie(cookie_obj)
elif os.path.exists(self.path+f'//m.cookie_{self.username}.bot'):
try:
read_cookie = open(self.path+f'//m.cookie_{self.username}.bot', 'r', encoding='utf-8')
self.mcookie = read_cookie.read()
read_cookie.close()
homelink = 'https://www.instagram.com/op/'
self.msession = requests.session()
self.msessionid = self.mcookie.split('=')[1].split(';')[0]
self.mcsrf_token = self.mcookie.split('=')[2].replace(';', '')
cookie_obj = requests.cookies.create_cookie(
name='sessionid', secure=True, value=self.msessionid)
self.msession.cookies.set_cookie(cookie_obj)
login_response = self.msession.get(homelink)
time.sleep(1)
soup = bs(login_response.text, 'html.parser')
soup.find("strong", {"class": "-cx-PRIVATE-NavBar__username -cx-PRIVATE-NavBar__username__"}).get_text()
except AttributeError:
print(bcolors.FAIL+"[✗] Login Failed! Cookie file is corupted!"+bcolors.ENDC)
os.remove(self.path+f'//cookie_{self.username}.bot')
print(bcolors.WARNING+"[-] Deleted Corupted Cookie File! Try Again!"+bcolors.ENDC)
quit()
def already_liked(self, url):
resp = self.session.get(url)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = url.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = url.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
liked = data_json["graphql"]["shortcode_media"]["viewer_has_liked"]
return bool(liked)
def like(self, url):
try:
if self.already_liked(url) == False:
resp = self.session.get(url)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = url.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = url.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
id_post = data_json["graphql"]["shortcode_media"]["id"]
url_post = f"https://www.instagram.com/web/likes/{id_post}/like/"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "0",
"content-type": "application/x-www-form-urlencoded",
"cookie": self.cookie,
"origin": "https://www.instagram.com",
"referer": url,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_post, headers=headers)
if response.status_code != 200:
return bcolors.FAIL+'[✗] Unable to Like Post! url: '+bcolors.ENDC+url
else:
return bcolors.OKCYAN+'[i] Post Already Liked! url: '+bcolors.ENDC+url
except:
return bcolors.FAIL+'[✗] Unable to Like Post! url: '+bcolors.ENDC+url
return bcolors.OKGREEN+"[✓] Liked Post Successfully! url: "+bcolors.ENDC+url
def unlike(self, url):
try:
if self.already_liked(url) == True:
resp = self.session.get(url)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = url.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = url.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
id_post = data_json["graphql"]["shortcode_media"]["id"]
url_post = f"https://www.instagram.com/web/likes/{id_post}/unlike/"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "0",
"content-type": "application/x-www-form-urlencoded",
"cookie": self.cookie,
"origin": "https://www.instagram.com",
"referer": url,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_post, headers=headers)
if response.status_code != 200:
return bcolors.FAIL+'[✗] Unable to Unlike Post! url: '+bcolors.ENDC+url
else:
return bcolors.OKCYAN+'[i] Post Already Unliked! url: '+bcolors.ENDC+url
except:
return bcolors.FAIL+'[✗] Unable to Unlike Post! url: '+bcolors.ENDC+url
return bcolors.OKGREEN+"[✓] Unliked Post Successfully! url: "+bcolors.ENDC+url
def like_recent(self, username):
resp = self.session.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
try:
shortcode = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']["edge_owner_to_timeline_media"]["edges"][0]["node"]["shortcode"]
self.like('https://www.instagram.com/p/'+shortcode+'/')
except IndexError:
return bcolors.FAIL+'[✗] No Post Found! username: '+bcolors.ENDC+username
except KeyError:
return bcolors.FAIL+'[✗] Invalid username! username: '+bcolors.ENDC+username
def comment(self, url, comment_text):
try:
resp = self.session.get(url)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
data_script = str(scripts[15])
time.sleep(1)
try:
shortcode = url.split('/p/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/p/{shortcode}/',''', '')
except:
shortcode = url.split('/tv/')[1].replace('/', '')
data_script = data_script.replace(
f'''<script type="text/javascript">window.__additionalDataLoaded('/tv/{shortcode}/',''', '')
data_object = data_script.replace(");</script>", '')
data_json = json.loads(data_object)
id_post = data_json["graphql"]["shortcode_media"]["id"]
url_post = f"https://www.instagram.com/web/comments/{id_post}/add/"
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US,en;q=0.9",
"content-length": "39",
"content-type": "application/x-www-form-urlencoded",
"cookie": self.cookie,
"origin": "https://www.instagram.com",
"referer": url,
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_post, headers=headers, data=f"comment_text={comment_text}&replied_to_comment_id=".encode('utf-8'))
if response.status_code != 200:
return bcolors.FAIL+'[✗] Unable to Comment on The Post! url: '+bcolors.ENDC+url
except:
return bcolors.FAIL+'[✗] Unable to Comment on The Post! url: '+bcolors.ENDC+url
return bcolors.OKGREEN+"[✓] Commented on The Post Successfully! url: "+bcolors.ENDC+url
def comment_recent(self, username, comment_text):
resp = self.session.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
try:
shortcode = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']["edge_owner_to_timeline_media"]["edges"][0]["node"]["shortcode"]
self.comment('https://www.instagram.com/p/'+shortcode+'/', comment_text)
except IndexError:
return bcolors.FAIL+'[✗] No Post Found! username: '+bcolors.ENDC+username
except KeyError:
return bcolors.FAIL+'[✗] Invalid username! username: '+bcolors.ENDC+username
def already_followed(self, username):
resp = self.session.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
followed = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['followed_by_viewer']
return bool(followed)
def follow(self, username):
try:
if self.already_followed(username) == False:
resp = self.session.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
id_page = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url_page = f"https://www.instagram.com/web/friendships/{id_page}/follow/"
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '0',
'content-type': 'application/x-www-form-urlencoded',
'cookie': self.cookie,
"origin": "https://www.instagram.com",
"referer": f"https://www.instagram.com/{username}/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_page, headers=headers)
if response.status_code == 200:
return bcolors.OKGREEN+"[✓] Followed User Successfully! username: "+bcolors.ENDC+username
else:
return bcolors.FAIL+'[✗] Unable to Follow User! username: '+bcolors.ENDC+username
else:
return bcolors.OKCYAN+'[i] User Already Followed! username: '+bcolors.ENDC+username
except KeyError:
return bcolors.FAIL+'[✗] Invalid username! username: '+bcolors.ENDC+username
def unfollow(self, username):
try:
if self.already_followed(username) == True:
resp = self.session.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
id_page = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url_page = f"https://www.instagram.com/web/friendships/{id_page}/unfollow/"
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '0',
'content-type': 'application/x-www-form-urlencoded',
'cookie': self.cookie,
"origin": "https://www.instagram.com",
"referer": f"https://www.instagram.com/{username}/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
response = requests.request("POST", url_page, headers=headers)
if response.status_code == 200:
return bcolors.OKGREEN+"[✓] Unfollowed User Successfully! username: "+bcolors.ENDC+username
else:
return bcolors.FAIL+'[✗] Unable to Unfollow User! username: '+bcolors.ENDC+username
else:
return bcolors.OKCYAN+'[i] User Already Unfollowed! username: '+bcolors.ENDC+username
except KeyError:
return bcolors.FAIL+'[✗] Invalid username! username: '+bcolors.ENDC+username
def story_view(self, username):
try:
resp = self.session.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
try:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
return bcolors.FAIL+'[✗] Invalid Username!'+bcolors.ENDC
page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
surl = f'https://www.instagram.com/graphql/query/?query_hash=c9c56db64beb4c9dea2d17740d0259d9&variables=%7B%22reel_ids%22%3A%5B%22{page_id}%22%5D%2C%22tag_names%22%3A%5B%5D%2C%22location_ids%22%3A%5B%5D%2C%22highlight_reel_ids%22%3A%5B%5D%2C%22precomposed_overlay%22%3Afalse%2C%22show_story_viewer_list%22%3Atrue%2C%22story_viewer_fetch_count%22%3A50%2C%22story_viewer_cursor%22%3A%22%22%2C%22stories_video_dash_manifest%22%3Afalse%7D'
resp = self.session.get(surl)
time.sleep(1)
soup = bs(resp.text, 'html.parser')
data_json = json.loads(str(soup))
story_count = len(data_json["data"]["reels_media"][0]["items"])
for i in range(0, story_count):
id_story = data_json["data"]["reels_media"][0]["items"][i]['id']
taken_at_timestamp = data_json["data"]["reels_media"][0]["items"][i]['taken_at_timestamp']
stories_page = f"https://www.instagram.com/stories/reel/seen"
headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'content-length': '127',
'content-type': 'application/x-www-form-urlencoded',
'cookie': self.cookie,
"origin": "https://www.instagram.com",
"referer": f"https://www.instagram.com/stories/{username}/{id_story}/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "936619743392459",
"x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV",
"x-instagram-ajax": "d3d3aea32e75",
"x-requested-with": "XMLHttpRequest"
}
data = {
'reelMediaId': id_story,
'reelMediaOwnerId': page_id,
'reelId': page_id,
'reelMediaTakenAt': taken_at_timestamp,
'viewSeenAt': taken_at_timestamp
}
requests.request("POST", stories_page, headers=headers, data=data)
except IndexError:
return bcolors.OKCYAN+'[i] No Story Found! username: '+bcolors.ENDC+username
except KeyError:
return bcolors.FAIL+'[✗] Invalid username! username: '+bcolors.ENDC+username
return bcolors.OKGREEN+"[✓] Story View Sent! username: "+bcolors.ENDC+username
def story_poll(self, username, poll_vote='random'):
self.story_view(username)
resp = self.msession.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url=f'https://i.instagram.com/api/v1/feed/user/{page_id}/story/?supported_capabilities_new=%5B%7B%22name%22%3A%22SUPPORTED_SDK_VERSIONS%22%2C%22value%22%3A%2266.0%2C67.0%2C68.0%2C69.0%2C70.0%2C71.0%2C72.0%2C73.0%2C74.0%2C75.0%2C76.0%2C77.0%2C78.0%2C79.0%2C80.0%2C81.0%2C82.0%2C83.0%2C84.0%2C85.0%2C86.0%2C87.0%2C88.0%2C89.0%2C90.0%2C91.0%2C92.0%2C93.0%2C94.0%2C95.0%2C96.0%2C97.0%2C98.0%2C99.0%2C100.0%22%7D%2C%7B%22name%22%3A%22FACE_TRACKER_VERSION%22%2C%22value%22%3A%2214%22%7D%2C%7B%22name%22%3A%22COMPRESSION%22%2C%22value%22%3A%22ETC2_COMPRESSION%22%7D%5D'
headers= {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-SHBID': '15274',
'IG-U-RUR': 'RVA',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close'
}
response = requests.get(url, headers=headers)
jsonResponse = response.json()
no_of_stories = len(jsonResponse['reel']['items'])
intaractive_stories = []
for story_no in range(0, no_of_stories):
if "story_polls" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_polls'][0]['poll_sticker']['poll_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':poll')
headers = {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-RUR': 'RVA',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close',
'Content-Length': '287'
}
if poll_vote == 'random':
vote = str(random.randint(0,1))
elif poll_vote > 1 or poll_vote < 0:
return bcolors.WARNING+'[✗] Invalid Input! poll_vote can only be 0 or 1.'+bcolors.ENDC
else:
vote = str(poll_vote)
for intaractive_story in intaractive_stories:
id_sticker, id_story, type_story = intaractive_story.split(':')
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_poll_vote/'
data={"delivery_class":"organic","_csrftoken":self.mcsrf_token,"vote":vote,"container_module":"reel_profile"}
response = requests.post(url, headers=headers, data=data)
if response.status_code == 200:
return bcolors.OKGREEN+"[✓] Succeeded! username: "+bcolors.ENDC+username
else:
return bcolors.FAIL+'[✗] Failed! username: '+bcolors.ENDC+username
def story_quiz(self, username, quiz_answer='random'):
self.story_view(username)
resp = self.msession.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url=f'https://i.instagram.com/api/v1/feed/user/{page_id}/story/?supported_capabilities_new=%5B%7B%22name%22%3A%22SUPPORTED_SDK_VERSIONS%22%2C%22value%22%3A%2266.0%2C67.0%2C68.0%2C69.0%2C70.0%2C71.0%2C72.0%2C73.0%2C74.0%2C75.0%2C76.0%2C77.0%2C78.0%2C79.0%2C80.0%2C81.0%2C82.0%2C83.0%2C84.0%2C85.0%2C86.0%2C87.0%2C88.0%2C89.0%2C90.0%2C91.0%2C92.0%2C93.0%2C94.0%2C95.0%2C96.0%2C97.0%2C98.0%2C99.0%2C100.0%22%7D%2C%7B%22name%22%3A%22FACE_TRACKER_VERSION%22%2C%22value%22%3A%2214%22%7D%2C%7B%22name%22%3A%22COMPRESSION%22%2C%22value%22%3A%22ETC2_COMPRESSION%22%7D%5D'
headers= {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-SHBID': '15274',
'IG-U-RUR': 'RVA',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close'
}
response = requests.get(url, headers=headers)
jsonResponse = response.json()
no_of_stories = len(jsonResponse['reel']['items'])
intaractive_stories = []
no_of_options = 4
for story_no in range(0, no_of_stories):
if "story_quizs" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_quizs'][0]['quiz_sticker']['quiz_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':quiz')
if no_of_options > len(jsonResponse['reel']['items'][story_no]['story_quizs'][0]['quiz_sticker']['tallies']):
no_of_options = len(jsonResponse['reel']['items'][story_no]['story_quizs'][0]['quiz_sticker']['tallies'])
headers = {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-RUR': 'RVA',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close',
'Content-Length': '287'
}
if quiz_answer == 'random':
answer = str(random.randint(0,no_of_options-1))
elif no_of_options-1 < quiz_answer:
return bcolors.WARNING+f'[✗] This Poll Only Has {no_of_options} Options !'+bcolors.ENDC
else:
answer = str(quiz_answer)
for intaractive_story in intaractive_stories:
id_sticker, id_story, type_story = intaractive_story.split(':')
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_quiz_answer/'
data={"delivery_class":"organic","answer":answer,"_csrftoken":self.mcsrf_token,"container_module":"reel_profile"}
response = requests.post(url, headers=headers, data=data)
if response.status_code == 200:
return bcolors.OKGREEN+"[✓] Succeeded! username: "+bcolors.ENDC+username
else:
return bcolors.FAIL+'[✗] Failed! username: '+bcolors.ENDC+username
def story_question(self, username, question_response='random'):
self.story_view(username)
resp = self.msession.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url=f'https://i.instagram.com/api/v1/feed/user/{page_id}/story/?supported_capabilities_new=%5B%7B%22name%22%3A%22SUPPORTED_SDK_VERSIONS%22%2C%22value%22%3A%2266.0%2C67.0%2C68.0%2C69.0%2C70.0%2C71.0%2C72.0%2C73.0%2C74.0%2C75.0%2C76.0%2C77.0%2C78.0%2C79.0%2C80.0%2C81.0%2C82.0%2C83.0%2C84.0%2C85.0%2C86.0%2C87.0%2C88.0%2C89.0%2C90.0%2C91.0%2C92.0%2C93.0%2C94.0%2C95.0%2C96.0%2C97.0%2C98.0%2C99.0%2C100.0%22%7D%2C%7B%22name%22%3A%22FACE_TRACKER_VERSION%22%2C%22value%22%3A%2214%22%7D%2C%7B%22name%22%3A%22COMPRESSION%22%2C%22value%22%3A%22ETC2_COMPRESSION%22%7D%5D'
headers= {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-SHBID': '15274',
'IG-U-RUR': 'RVA',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close'
}
response = requests.get(url, headers=headers)
jsonResponse = response.json()
no_of_stories = len(jsonResponse['reel']['items'])
intaractive_stories = []
for story_no in range(0, no_of_stories):
if "story_questions" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_questions'][0]['question_sticker']['question_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':question')
headers = {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-RUR': 'RVA',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close',
'Content-Length': '287'
}
if question_response == 'random':
response = random.choice(['Hello', 'Hi', "What's up?", 'Nice Feed'])
elif isinstance(question_response, list):
response = random.choice(question_response)
else:
response = question_response
for intaractive_story in intaractive_stories:
id_sticker, id_story, type_story = intaractive_story.split(':')
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_question_response/'
data={"delivery_class":"organic","_csrftoken":self.mcsrf_token,"response":response,"container_module":"reel_profile"}
response = requests.post(url, headers=headers, data=data)
if response.status_code == 200:
return bcolors.OKGREEN+"[✓] Succeeded! username: "+bcolors.ENDC+username
else:
return bcolors.FAIL+'[✗] Failed! username: '+bcolors.ENDC+username
def story_slider(self, username, slider_value='random'):
self.story_view(username)
resp = self.msession.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url=f'https://i.instagram.com/api/v1/feed/user/{page_id}/story/?supported_capabilities_new=%5B%7B%22name%22%3A%22SUPPORTED_SDK_VERSIONS%22%2C%22value%22%3A%2266.0%2C67.0%2C68.0%2C69.0%2C70.0%2C71.0%2C72.0%2C73.0%2C74.0%2C75.0%2C76.0%2C77.0%2C78.0%2C79.0%2C80.0%2C81.0%2C82.0%2C83.0%2C84.0%2C85.0%2C86.0%2C87.0%2C88.0%2C89.0%2C90.0%2C91.0%2C92.0%2C93.0%2C94.0%2C95.0%2C96.0%2C97.0%2C98.0%2C99.0%2C100.0%22%7D%2C%7B%22name%22%3A%22FACE_TRACKER_VERSION%22%2C%22value%22%3A%2214%22%7D%2C%7B%22name%22%3A%22COMPRESSION%22%2C%22value%22%3A%22ETC2_COMPRESSION%22%7D%5D'
headers= {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-SHBID': '15274',
'IG-U-RUR': 'RVA',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close'
}
response = requests.get(url, headers=headers)
jsonResponse = response.json()
no_of_stories = len(jsonResponse['reel']['items'])
intaractive_stories = []
for story_no in range(0, no_of_stories):
if "story_sliders" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_sliders'][0]['slider_sticker']['slider_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':slider')
headers = {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-RUR': 'RVA',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close',
'Content-Length': '287'
}
if slider_value == 'random':
vote = random.randint(1,99)/100
elif slider_value >= 0 or slider_value <= 100:
vote = slider_value/100
else:
return bcolors.WARNING+'[✗] Invalid Input! slider_value can only be between 0 and 100.'+bcolors.ENDC
for intaractive_story in intaractive_stories:
id_sticker, id_story, type_story = intaractive_story.split(':')
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_slider_vote/'
data={"delivery_class":"organic","_csrftoken":self.mcsrf_token,"vote":vote,"container_module":"reel_profile"}
response = requests.post(url, headers=headers, data=data)
if response.status_code == 200:
return bcolors.OKGREEN+"[✓] Succeeded! username: "+bcolors.ENDC+username
else:
return bcolors.FAIL+'[✗] Failed! username: '+bcolors.ENDC+username
def intaract_with_stories(self, username, poll=True, quiz=True, question=True, slider=True, poll_vote='random', quiz_answer='random', question_response='random', slider_value='random'):
self.story_view(username)
resp = self.msession.get('https://www.instagram.com/'+username+'/')
time.sleep(1)
soup = bs(resp.text, 'html.parser')
scripts = soup.find_all('script')
try:
data_script = str(scripts[4])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
except:
data_script = str(scripts[3])
time.sleep(1)
data_script = data_script.replace(
'''<script type="text/javascript">window._sharedData = ''', '')
data_object = data_script.replace(";</script>", '')
data_json = json.loads(data_object)
page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]['user']['id']
url=f'https://i.instagram.com/api/v1/feed/user/{page_id}/story/?supported_capabilities_new=%5B%7B%22name%22%3A%22SUPPORTED_SDK_VERSIONS%22%2C%22value%22%3A%2266.0%2C67.0%2C68.0%2C69.0%2C70.0%2C71.0%2C72.0%2C73.0%2C74.0%2C75.0%2C76.0%2C77.0%2C78.0%2C79.0%2C80.0%2C81.0%2C82.0%2C83.0%2C84.0%2C85.0%2C86.0%2C87.0%2C88.0%2C89.0%2C90.0%2C91.0%2C92.0%2C93.0%2C94.0%2C95.0%2C96.0%2C97.0%2C98.0%2C99.0%2C100.0%22%7D%2C%7B%22name%22%3A%22FACE_TRACKER_VERSION%22%2C%22value%22%3A%2214%22%7D%2C%7B%22name%22%3A%22COMPRESSION%22%2C%22value%22%3A%22ETC2_COMPRESSION%22%7D%5D'
headers= {
'X-IG-App-Locale': 'en_US',
'X-IG-Device-Locale': 'en_US',
'X-IG-Mapped-Locale': 'en_US',
'X-Bloks-Version-Id': '5a6434fa5b288b6b3f3e131afe8c0738e9373c529e2f1b8c36e49335ff4b2413',
'X-IG-WWW-Claim': 'hmac.AR0-DKr695uW6c2HK7KQhKcJDPOEWD-wOQznKmaBAsJXJUdl',
'X-Bloks-Is-Layout-RTL': 'false',
'X-Bloks-Is-Panorama-Enabled': 'false',
'X-IG-Connection-Type': 'WIFI',
'X-IG-Capabilities': '3brTvx8=',
'X-IG-App-ID': '567067343352427',
'User-Agent': 'Instagram 165.1.0.29.119 Android (26/8.0.0; 320dpi; 768x1184; unknown/Android; Custom Phone; vbox86p; vbox86; en_US; 253447818)',
'Accept-Language': 'en-US',
'Cookie': self.mcookie,
'Authorization': 'Bearer',
'IG-U-IG-DIRECT-REGION-HINT': 'FRC',
'IG-U-SHBID': '15274',
'IG-U-RUR': 'RVA',
'Accept-Encoding': 'gzip, deflate',
'Host': 'i.instagram.com',
'X-FB-HTTP-Engine': 'Liger',
'X-FB-Client-IP': 'True',
'Connection': 'close'
}
response = requests.get(url, headers=headers)
jsonResponse = response.json()
no_of_stories = len(jsonResponse['reel']['items'])
self.mcsrf_token='4uo6K9OytOGsWSTgnLSsMmoTdVT003YO'
intaractive_stories = []
for story_no in range(0, no_of_stories):
if slider:
if "story_sliders" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_sliders'][0]['slider_sticker']['slider_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':slider')
if question:
if "story_questions" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_questions'][0]['question_sticker']['question_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':question')
if poll:
if "story_polls" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_polls'][0]['poll_sticker']['poll_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':poll')
if quiz:
if "story_quizs" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_quizs'][0]['quiz_sticker']['quiz_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':quiz')
no_of_options = 4
for story_no in range(0, no_of_stories):
if "story_quizs" in jsonResponse['reel']['items'][story_no]:
intaractive_stories.append(str(jsonResponse['reel']['items'][story_no]['story_quizs'][0]['quiz_sticker']['quiz_id'])+':'+jsonResponse['reel']['items'][story_no]['id']+':quiz')
if no_of_options > len(jsonResponse['reel']['items'][story_no]['story_quizs'][0]['quiz_sticker']['tallies']):
no_of_options = len(jsonResponse['reel']['items'][story_no]['story_quizs'][0]['quiz_sticker']['tallies'])
for intaractive_story in intaractive_stories:
id_sticker, id_story, type_story = intaractive_story.split(':')
if type_story=='slider':
if slider_value == 'random':
vote = random.randint(1,99)/100
elif slider_value >= 0 or slider_value <= 100:
vote = slider_value/100
else:
return bcolors.WARNING+'[✗] Invalid Input! slider_value can only be between 0 and 100.'+bcolors.ENDC
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_slider_vote/'
data={"delivery_class":"organic","_csrftoken":self.mcsrf_token,"vote":vote,"container_module":"reel_profile"}
elif type_story=='question':
if question_response == 'random':
response = random.choice(['Hello', 'Hi', "What's up?", 'Nice Feed'])
elif isinstance(question_response, list):
response = random.choice(question_response)
else:
response = question_response
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_question_response/'
data={"delivery_class":"organic","_csrftoken":self.mcsrf_token,"response":response,"container_module":"reel_profile"}
elif type_story=='poll':
if poll_vote == 'random':
vote = str(random.randint(0,1))
elif poll_vote > 1 or poll_vote < 0:
return bcolors.WARNING+'[✗] Invalid Input! poll_vote can only be 0 or 1.'+bcolors.ENDC
else:
vote = str(poll_vote)
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_poll_vote/'
data={"delivery_class":"organic","_csrftoken":self.mcsrf_token,"vote":vote,"container_module":"reel_profile"}
elif type_story=='quiz':
if quiz_answer == 'random':
answer = str(random.randint(0,no_of_options-1))
elif no_of_options-1 < quiz_answer:
return bcolors.WARNING+f'[✗] This Poll Only Has {no_of_options} Options !'+bcolors.ENDC
else:
answer = str(quiz_answer)
url=f'https://i.instagram.com/api/v1/media/{id_story}/{id_sticker}/story_quiz_answer/'
data={"delivery_class":"organic","answer":answer,"_csrftoken":self.mcsrf_token,"container_module":"reel_profile"}
response = requests.post(url, headers=headers, data=data)
if response.status_code == 200:
return bcolors.OKGREEN+"[✓] Succeeded! username: "+bcolors.ENDC+username
else:
return bcolors.FAIL+'[✗] Failed! username: '+bcolors.ENDC+username
def upload_post(self, photo, caption=''):
micro_time = int(datetime.now().timestamp())
headers = {
"content-type": "image / jpg",
"content-length": "1",
"X-Entity-Name": f"fb_uploader_{micro_time}",
"Offset": "0",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"x-entity-length": "1",
"X-Instagram-Rupload-Params": f'{{"media_type": 1, "upload_id": {micro_time}, "upload_media_height": 1080, "upload_media_width": 1080}}',
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "1217981644879628",
"cookie": self.cookie
}
upload_response = requests.post(f'https://www.instagram.com/rupload_igphoto/fb_uploader_{micro_time}',
data=open(photo, "rb"), headers=headers)
json_data = json.loads(upload_response.text)
upload_id = json_data['upload_id']
if json_data["status"] == "ok":
url = "https://www.instagram.com/create/configure/"
payload = 'upload_id=' + upload_id + '&caption=' + caption + '&usertags=&custom_accessibility_caption=&retry_timeout='
headers = {
'authority': 'www.instagram.com',
'x-ig-www-claim': 'hmac.AR2-43UfYbG2ZZLxh-BQ8N0rqGa-hESkcmxat2RqMAXejXE3',
'x-instagram-ajax': 'adb961e446b7-hot',
'content-type': 'application/x-www-form-urlencoded',
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'x-csrftoken': self.csrf_token,
'x-ig-app-id': '1217981644879628',
'origin': 'https://www.instagram.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.instagram.com/create/details/',
'accept-language': 'en-US,en;q=0.9,fa-IR;q=0.8,fa;q=0.7',
'cookie': self.cookie
}
response = requests.request("POST", url, headers=headers, data=payload)
json_data = json.loads(response.text)
if json_data["status"] == "ok":
return bcolors.OKGREEN+"[✓] Post Shared Successfully!"+bcolors.ENDC
else:
return bcolors.FAIL+'[✗] Failed!'+bcolors.ENDC+json_data
def upload_story(self, photo):
micro_time = int(datetime.now().timestamp())
headers = {
"content-type": "image / jpg",
"content-length": "1",
"X-Entity-Name": f"fb_uploader_{micro_time}",
"Offset": "0",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36",
"x-entity-length": "1",
"X-Instagram-Rupload-Params": f'{{"media_type": 1, "upload_id": {micro_time}, "upload_media_height": 1080, "upload_media_width": 1080}}',
"x-csrftoken": self.csrf_token,
"x-ig-app-id": "1217981644879628",
"cookie": self.cookie
}
upload_response = requests.post(f'https://www.instagram.com/rupload_igphoto/fb_uploader_{micro_time}',
data=open(photo, "rb"), headers=headers)
json_data = json.loads(upload_response.text)
upload_id = json_data['upload_id']
if json_data["status"] == "ok":
url = "https://www.instagram.com/create/configure_to_story/"
payload = 'upload_id=' + upload_id + '&caption=&usertags=&custom_accessibility_caption=&retry_timeout='
headers = {
'authority': 'www.instagram.com',
'x-ig-www-claim': 'hmac.AR2-43UfYbG2ZZLxh-BQ8N0rqGa-hESkcmxat2RqMAXejXE3',
'x-instagram-ajax': 'adb961e446b7-hot',
'content-type': 'application/x-www-form-urlencoded',
'accept': '*/*',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'x-csrftoken': self.csrf_token,
'x-ig-app-id': '1217981644879628',
'origin': 'https://www.instagram.com',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': 'https://www.instagram.com/create/details/',
'accept-language': 'en-US,en;q=0.9,fa-IR;q=0.8,fa;q=0.7',
'cookie': self.cookie
}
response = requests.request("POST", url, headers=headers, data=payload)
json_data = json.loads(response.text)
if json_data["status"] == "ok":
return bcolors.OKGREEN+"[✓] Story Shared Successfully!"+bcolors.ENDC
else:
return bcolors.FAIL+'[✗] Failed!'+bcolors.ENDC+json_data
|
{"hexsha": "b1dc4b81ce50670ee0ed435fd489e7dbe2144e1d", "size": 72400, "ext": "py", "lang": "Python", "max_stars_repo_path": "myigbot.py", "max_stars_repo_name": "vishaljoshi789/MyIGBot", "max_stars_repo_head_hexsha": "1fa3920e478464098bd6ece7d7828bfd2b62d3eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-18T04:39:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-18T04:39:37.000Z", "max_issues_repo_path": "myigbot.py", "max_issues_repo_name": "vishaljoshi789/MyIGBot", "max_issues_repo_head_hexsha": "1fa3920e478464098bd6ece7d7828bfd2b62d3eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "myigbot.py", "max_forks_repo_name": "vishaljoshi789/MyIGBot", "max_forks_repo_head_hexsha": "1fa3920e478464098bd6ece7d7828bfd2b62d3eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.5899333827, "max_line_length": 571, "alphanum_fraction": 0.5378729282, "include": true, "reason": "import numpy", "num_tokens": 18683}
|
"""
SCRIPT FOR TRAINING 2DCNN MODELS
Run with two arguments - arg1=region, arg2=model type
"""
import os, sys
import torch
import numpy as np
import time
from CNN import *
from Training import *
from Data_maker_loader import *
from random import randint, uniform, choice
if sys.argv[2] == "2D":
from CNN import *
else:
from ConvRNN import *
random.seed(200)
if sys.argv[1]:
region = sys.argv[1]
else:
region = "Junin"
print("REGION: ", region)
if sys.argv[2]:
modeltype = sys.argv[2]
else:
modeltype = "3D"
print("MODEL TYPE: ", modeltype)
start = time.time()
server = "/rds/general/user/jgb116/home/satellite/satellite/junin"
# server = '/rds/general/project/aandedemand/live/satellite/junin'
# WHERE TO IMPORT DATA FROM
# wherepath = server + '/data_reduced/tensors'
# savepath = server + '/data_reduced/tensors'
wherepath = server + "/data/" + region
savepath = server + "/data/" + region + "/out"
if not os.path.exists(savepath):
os.makedirs(savepath)
# WHERE TO SAVE MODEL CHECKPOINT
modelpath = server + "/models/" + region + "_models/" + modeltype
if not os.path.exists(modelpath):
os.makedirs(modelpath)
# WHERE TO SAVE IMAGES TRACKING TRAINING PROCESS
picspath = server + "/models/" + region + "_models/" + modeltype + "/pics"
if not os.path.exists(picspath):
os.makedirs(picspath)
# WHERE TO SAVE MODEL PERFORMANCE OF EACH JOB FOR TRAIN, VAL AND TEST DATA
file = (
server
+ "/models/"
+ region
+ "_models/"
+ modeltype
+ "/grid_summary/"
+ modeltype
+ ".txt"
)
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
if __name__ == "__main__":
# Set training time period
start_year = 14
end_year = 17
# set CNN model parameters
# size = 45
sizes = [45, 49, 55, 59]
DSM = False
# CHOOSE THE INPUT DIMENSIONS - No DSM is (2,8). With DSM is (3,8)
if DSM:
input_dim = 11
else:
input_dim = 10
# Need 4 for 2DCNN
hidden_dim = [64, 128, 128]
# Different for 2D/3D models
# kernel_size = [(3, 3), (2, 3, 3), (3, 3)]
kernel_size = [(5, 5), (5, 5), (3, 3), (3, 3)]
stride = [(2, 2), (1, 1), (1, 1), (1, 1)]
padding = [0, 0, 0, 0]
# dropout = 0.4
dropouts = [0.2, 0.3, 0.4, 0.5] # 3 options
levels = [10]
# set ratios of 0:1 labels in Train and Validation data sets
train_times = 4
test_times = 4
# set criteria for Early stopping
AUC = True
BCE_Wloss = False
FNcond = False
# set parameters for the cost of the confusion matrix
w = 10 # weights on the False Negative Rate
perc = (100 * train_times) / (
train_times + 1
) # the percentile to for treshold selection. Advisable to be 100*times/(times+1)
# Weight parameter for the weighted BCE/CE loss
pos_weight = 2
# pos_weight = torch.tensor([1, pos_weight], dtype=torch.float, device="cuda:0")
# Adam optimiser parameters:
lrs = [0.000005, 0.00001, 0.00003, 0.00006, 0.00008, 0.0001, 0.0003, 0.001]
weight_decay = 0
# Early Stopping parameters
n_splits = 5
n_epochs = 20
patience = 7
training_time = (
23.5 # Time in hours (needs to be less than 24 for GPUs in Imperial HPC)
)
# train_model parameters for debbuging and time regulations
stop_batch = None
print_batch = 1000
batch_size = 32
# batch_size = 1024
# To explote different parameters in parallel
if "PBS_ARRAY_INDEX" in os.environ:
job = int(os.environ["PBS_ARRAY_INDEX"])
else:
job = 3
if "PBS_JOBID" in os.environ:
job_id = str(os.environ["PBS_JOBID"])
else:
job_id = "1"
if job == 1:
print("default params")
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 2:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 3:
# end_year = 17
# print("Settings to make it run a lot faster for debugging purposes...")
# input_dim=(3,8)
# hidden_dim=(16,32,32)
# kernel_size=((5,5),(2,5,5),(5,5))
# levels=(10,)
# training_time = 30
# stop_batch = 10
# print_batch = 1
# batch_size = 64
# stride = [(2,2),(1,1),(1,1),(1,1)]
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 4:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 5:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 6:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 7:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 8:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 9:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 10:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 11:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
if job == 12:
lr = choice(lrs)
size = (2 * randint(20, 40)) + 1
dropout = round(uniform(0.1, 0.8), 1)
model = CNNmodel(
input_dim=input_dim,
hidden_dim=hidden_dim,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dropout=dropout,
levels=levels,
)
model = torch.nn.DataParallel(model)
# Set loss criterion and optimiser type
# criterion = torch.nn.CrossEntropyLoss(reduction='mean', weight = pos_weight)
criterion = torch.nn.BCEWithLogitsLoss(
reduction="mean", pos_weight=torch.tensor(pos_weight)
)
optimiser = torch.optim.Adam(
params=model.parameters(), lr=lr, weight_decay=weight_decay
)
# Load data
Data = with_DSM(
size=int(size / 2),
start_year=start_year,
end_year=end_year,
wherepath=wherepath,
DSM=DSM,
type=modeltype,
)
if not (
os.path.isfile(wherepath + "/" + "Train_idx%d.npy" % (end_year))
& os.path.isfile(wherepath + "/" + "Test_idx%d.npy" % (end_year))
):
print("Creating indexes split")
train_idx, test_idx = train_test_split(
np.arange(len(Data.labels)),
test_size=0.2,
random_state=42,
shuffle=True,
stratify=Data.labels,
)
np.save(wherepath + "/" + "Train_idx%d.npy" % (end_year), train_idx)
np.save(wherepath + "/" + "Test_idx%d.npy" % (end_year), test_idx)
else:
print("loading: " + wherepath + "/" + "Train_idx%d.npy" % (end_year))
train_idx = np.load(wherepath + "/" + "Train_idx%d.npy" % (end_year))
test_idx = np.load(wherepath + "/" + "Test_idx%d.npy" % (end_year))
train_sampler = ImbalancedDatasetUnderSampler(
labels=Data.labels, indices=train_idx, times=train_times
)
test_sampler = ImbalancedDatasetUnderSampler(
labels=Data.labels, indices=test_idx, times=test_times
)
# Print model and training details
print(
"Model:",
str(type(model))[8:-2],
"\nPeriod 20%d-20%d -> 20%d" % (start_year, end_year, end_year + 1),
)
print(
"\t% deforested pixels in train:",
train_sampler.count[1] / sum(train_sampler.count),
)
print(
"\t% deforested pixels in val:", test_sampler.count[1] / sum(test_sampler.count)
)
print("Job: ", job_id)
print("DSM:", DSM)
print("\nHyperparameters: ")
print("\tImage size: %d" % (size))
print("\tHidden dim: ", hidden_dim)
print("\tDropout: ", dropout)
print(
"\tTrain and Val ratios of 0:1 labels: 1:%d ; 1:%d " % (train_times, test_times)
)
print(
"\tADAM optimizer parameters: lr=%.7f, weight decay=%.2f, batch size=%d"
% (lr, weight_decay, batch_size)
)
print("\tBCEWithLogitsLoss pos_weights = %.2f" % (pos_weight))
print("\tn_epochs = %d with patience of %d epochs" % (n_epochs, patience))
print("\tCross Validation with n_splits = %d " % (n_splits))
print(
"\tIf to use BCEWithLogitsLoss as an early stop criterion :",
((not AUC) & (not FNcond)),
)
print("\tIf to use AUC as an early stop criterion :", AUC)
print("\tIf to use cost = FP+w*FN / TP+FP+w*FN+TN as an early stop criterion")
print(
"\twith w = %d and treshhold = the %d percentile of the output" % (w, perc),
FNcond,
)
print("\nModel: \n", model)
print("\nCriterion: \n", criterion)
print("\nOptimiser: \n", optimiser)
# Initiate training routine
(
model,
train_loss,
valid_loss,
AUCs_train,
AUCs_val,
costs_train,
costs_val,
name,
) = train_model(
Data=Data,
model=model,
sampler=train_sampler,
criterion=criterion,
optimiser=optimiser,
patience=patience,
n_epochs=n_epochs,
n_splits=n_splits,
batch_size=batch_size,
stop_batch=stop_batch,
print_batch=print_batch,
training_time=training_time,
w=w,
FNcond=FNcond,
AUC=AUC,
job=job_id,
path=modelpath,
)
# Produce graphs
visualize(
train=train_loss,
valid=valid_loss,
name="BCEloss",
modelname=name,
best="min",
path=picspath,
)
visualize(
train=AUCs_train,
valid=AUCs_val,
name="AUC",
modelname=name,
best="max",
path=picspath,
)
visualize(
train=costs_train,
valid=costs_val,
name="Cost",
modelname=name,
best="min",
path=picspath,
)
test_loss, test_AUC, test_cost = test_model(
model=model,
Data=Data,
criterion=criterion,
w=w,
perc=perc,
test_sampler=test_sampler,
batch_size=batch_size,
stop_batch=stop_batch,
name=name,
path=picspath,
)
write_report(
name=name,
job_id=job_id,
train_loss=train_loss,
valid_loss=valid_loss,
test_loss=test_loss,
AUCs_train=AUCs_train,
AUCs_val=AUCs_val,
test_AUC=test_AUC,
costs_train=costs_train,
costs_val=costs_val,
test_cost=test_cost,
file=file,
FNcond=FNcond,
AUC=AUC,
)
print("\n\nEND!Total time (in h):", (time.time() - start) / 3600)
|
{"hexsha": "6748bf14deb0144e8563fb8b269a59fbaf75a6ad", "size": 11082, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/2DCNN_training.py", "max_stars_repo_name": "PatBall1/DeepForestcast", "max_stars_repo_head_hexsha": "f9444490d71b89aa7823e830cf7fbe6752c74d9a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/2DCNN_training.py", "max_issues_repo_name": "PatBall1/DeepForestcast", "max_issues_repo_head_hexsha": "f9444490d71b89aa7823e830cf7fbe6752c74d9a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-05T10:35:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-05T10:35:48.000Z", "max_forks_repo_path": "src/models/2DCNN_training.py", "max_forks_repo_name": "PatBall1/DeepForestcast", "max_forks_repo_head_hexsha": "f9444490d71b89aa7823e830cf7fbe6752c74d9a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5671641791, "max_line_length": 88, "alphanum_fraction": 0.5675870781, "include": true, "reason": "import numpy", "num_tokens": 3268}
|
# Copyright - Transporation, Bots, and Disability Lab - Carnegie Mellon University
# Released under MIT License
"""
Common 2D Rotation Operations
"""
from .basic import *
import numpy as np
__all__ = [
'clip_radian_rotation', 'find_rotation', "theta_to_clock",
'find_theta_distance', 'deg_to_theta'
]
def clip_radian_rotation(rad: float) -> float:
"""Clip the radian to be between (pi, pi] which is the common form in robotics.
Parameters
----------
rad : float
The rotation given in radian. If beyond PI * 2, the additional revolutions are ignored.
Returns
-------
float
The clipped value between the range (-pi, pi]
"""
while rad > np.pi:
rad -= (np.pi*2)
while rad <= -np.pi:
rad += (np.pi*2)
return rad
def deg_to_theta(deg: float) -> float:
"""Convert Degrees to Radian clipped between (-pi, pi]
Parameters
----------
deg : float
Degrees
Returns
-------
float
Clipped radian
"""
rad = np.deg2rad(deg)
return clip_radian_rotation(rad)
def theta_to_clock(rad: float) -> int:
"""Map radian onto a clock (1-12), where theta = 0 is 12 o'clock and theta = np.pi/2 is 9'o clock
Parameters
----------
rad : float
Theta, will be clipped to (-np.pi, np.pi] if outside of range.
Returns
-------
int
The hour hand of the clock
"""
rad = clip_radian_rotation(rad)
clock_hand = -(rad/0.523599)
if clock_hand <= 0:
clock_hand += 12
if clock_hand > 12:
clock_hand -= 12
clock_hand = np.rint(clock_hand)
return int(clock_hand.item(0))
def find_theta_distance(t1: float, t2: float) -> float:
"""Find the shortest theta that moves t1 to t2.
Parameters
----------
t1 : float
theta 1
t2 : float
theta 2
Returns
-------
float
The theta [-np.pi, np.pi] that moves t1 to t2
"""
# make sure they are in valid range
t1 = clip_radian_rotation(t1)
t2 = clip_radian_rotation(t2)
if t1 > 0 and t2 < 0:
# through zero
dist1 = np.abs(t2) + t1
# through the other side
dist2 = (np.pi + t2) + (np.pi - t1)
if dist1 < dist2:
val = dist1
else:
val = dist2 * 1
elif t1 < 0 and t2 > 0:
# through zero
dist1 = np.abs(t1) + t2
# through the other side
dist2 = (np.pi + t1) + (np.pi - t2)
if dist1 < dist2:
val = dist1 * -1
else:
val = dist2
else:
# they are on the same side
val = t2 - t1
return val
def find_rotation(v1, v2):
"""
Find the shortest rotation, theta (in radian) that will rotate v1 to v2
parameters
----------
v1 : numpy array
2D array of the starting position
v2 : numpy array
2D array of the ending position
returns
-------
float
The rotation in radian that rotates v1 to v2
"""
# calculate
rot = np.arctan2(v2[1], v2[0]) - np.arctan2(v1[1], v1[0])
return clip_radian_rotation(rot)
|
{"hexsha": "7efaa8c48988e6f13d1347c70f74011b121cbe71", "size": 3147, "ext": "py", "lang": "Python", "max_stars_repo_path": "alloy/math/rotation_2D.py", "max_stars_repo_name": "CMU-TBD/alloy", "max_stars_repo_head_hexsha": "cf66738e044613fb274bd1b159864a7600e15cb5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "alloy/math/rotation_2D.py", "max_issues_repo_name": "CMU-TBD/alloy", "max_issues_repo_head_hexsha": "cf66738e044613fb274bd1b159864a7600e15cb5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "alloy/math/rotation_2D.py", "max_forks_repo_name": "CMU-TBD/alloy", "max_forks_repo_head_hexsha": "cf66738e044613fb274bd1b159864a7600e15cb5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7034482759, "max_line_length": 101, "alphanum_fraction": 0.5611693677, "include": true, "reason": "import numpy", "num_tokens": 887}
|
"""Spectral Temporal SIMilarity"""
from dataclasses import dataclass
import numpy as np
from vibromaf.signal.spectrum import compute_spectral_support
from vibromaf.signal.transform import PerceptualSpectrumBuilder, preprocess_input_signal
def st_sim(distorted: np.array, reference: np.array, eta: float = 2 / 3) -> float:
"""Wrapper function to calculate the ST-SIM score
Parameters
------
* `distorted: np.array` Distorted vibrotactile signal.
* `reference: np.array` Reference vibrotactile signal.
* `eta: float` Importance of temporal component compared to spectral component. Should be between 0 and 1.
Returns
-------
* `float` The ST-SIM score.
"""
metric = STSIM(eta)
return metric.calculate(distorted, reference)
@dataclass(frozen=True)
class STSIM:
"""Spectral Temporal SIMilarity"""
eta: float
perceptual_spectrum_builder = PerceptualSpectrumBuilder()
def calculate(self, distorted: np.array, reference: np.array) -> float:
distorted = preprocess_input_signal(distorted, reference)
if np.array_equal(distorted, reference):
return 1
ref_spectral_support = compute_spectral_support(
self.perceptual_spectrum_builder.compute_perceptual_spectrum(reference)
)
dist_spectral_support = compute_spectral_support(
self.perceptual_spectrum_builder.compute_perceptual_spectrum(distorted)
)
spectral_sim = STSIM.compute_sim(ref_spectral_support, dist_spectral_support)
ref_normalized_blocks = (
self.perceptual_spectrum_builder.block_builder.divide_and_normalize(
reference
)
)
dist_normalized_blocks = (
self.perceptual_spectrum_builder.block_builder.divide_and_normalize(
distorted
)
)
temporal_sim = STSIM.compute_sim(ref_normalized_blocks, dist_normalized_blocks)
return pow(temporal_sim, self.eta) * pow(spectral_sim, 1 - self.eta)
@staticmethod
def compute_sim(reference: np.array, distorted: np.array) -> float:
return float(
np.mean(
np.sum(reference * distorted, axis=1)
/ (np.sum(np.power(reference, 2), axis=1) + np.finfo(float).eps)
)
)
def __post_init__(self):
if not 0.0 < self.eta < 1.0:
raise ValueError("Eta must be between 0 and 1.")
|
{"hexsha": "9d9edd0718569e4ebf9521c1e9f94bdf60d6125b", "size": 2454, "ext": "py", "lang": "Python", "max_stars_repo_path": "vibromaf/metrics/stsim.py", "max_stars_repo_name": "hofbi/vibromaf", "max_stars_repo_head_hexsha": "7678042d18fa3b4ab006283bdbd1b1cc6d84e822", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-11T19:56:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T19:56:59.000Z", "max_issues_repo_path": "vibromaf/metrics/stsim.py", "max_issues_repo_name": "hofbi/vibromaf", "max_issues_repo_head_hexsha": "7678042d18fa3b4ab006283bdbd1b1cc6d84e822", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vibromaf/metrics/stsim.py", "max_forks_repo_name": "hofbi/vibromaf", "max_forks_repo_head_hexsha": "7678042d18fa3b4ab006283bdbd1b1cc6d84e822", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1621621622, "max_line_length": 110, "alphanum_fraction": 0.6699266504, "include": true, "reason": "import numpy", "num_tokens": 542}
|
import cPickle as pck
import numpy as np
from make_input.qe_input import makeQEInput_new
from make_input.qe_run import run_qe_hpc
from tqdm import tqdm
from make_input.SSSP_acc_PBE_info import wfccutoffs,rhocutoffs
calculation_type = '"vc-relax"'
sites_z = [14]
kpt = [2,2,2]
Nkpt = 3000
# rhocutoff ,wfccutoff = None,None
rhocutoff ,wfccutoff = [], []
for zatom in sites_z:
rhocutoff.append(rhocutoffs[zatom])
wfccutoff.append( wfccutoffs[zatom])
rhocutoff = np.max(rhocutoff)
wfccutoff = np.max(wfccutoff)
smearing = 1e-3
etot_conv_thr = 1e-4
forc_conv_thr = 1e-4
nstep = 100
scf_conv_thr = 1e-6
symprec = 1e-5
hpc = 'deneb'
node = 1
tasks = 16
cpus_per_tasks = 1
mem = 63000
time = '20:00:00'
debug = False
dry_run = False
if debug:
time = '00:30:00'
dataPath = '/scratch/musil/qmat/data/'
# dataPath = '/home/musil/git/run_qe/test_run/'
ppPath='"/home/musil/git/run_qe/pseudo/SSSP_acc_PBE/"'
fileNames = {}
structurePath = './structures/'
fileNames['crystals'] = structurePath + 'structures_141117.pck'
with open(fileNames['crystals'],'rb') as f:
crystals = pck.load(f)
dirNames = {it:dataPath + 'run_relax_Si_new/idx_{}'.format(it)
for it, _ in enumerate(crystals)}
# crystal = crystals[sg][it]
# dirName = dataPath + 'test_run/sg_{}-f_{}'.format(sg,it)
# print 'Calc in folder:'
# print dirName
print 'sending the calcs'
pbar = tqdm(total=len(dirNames),ascii=True)
for it,dirName in dirNames.iteritems():
crystal = crystals[it]
input_str = makeQEInput_new(crystal, sites_z, symprec=symprec,
rhocutoff=rhocutoff, wfccutoff=wfccutoff,
calculation_type=calculation_type, smearing=smearing,
pressure=0, press_conv_thr=0.5, cell_factor=2,
etot_conv_thr=etot_conv_thr, forc_conv_thr=forc_conv_thr, nstep=nstep,
scf_conv_thr=scf_conv_thr, print_forces=True, print_stress=True,
restart=False, collect_wf=True, force_ibrav0=False,
kpt=kpt, Nkpt=Nkpt, kpt_offset=[0, 0, 0],
ppPath=ppPath)
exitstatus = run_qe_hpc(input_str,dirName,verbose=False,hpc=hpc, node=node,
tasks_per_node=tasks,name='id_{}'.format(it),dry_run=dry_run,
cpus_per_tasks=cpus_per_tasks, mem=mem, time=time, debug=debug)
pbar.update()
pbar.close()
|
{"hexsha": "3493f980b5b818aaf1375334f42591178ae75274", "size": 2479, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_relax_Si_new.py", "max_stars_repo_name": "felixmusil/run_qe", "max_stars_repo_head_hexsha": "10001c2779788122e59b299d088ef83821e24a38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_relax_Si_new.py", "max_issues_repo_name": "felixmusil/run_qe", "max_issues_repo_head_hexsha": "10001c2779788122e59b299d088ef83821e24a38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_relax_Si_new.py", "max_forks_repo_name": "felixmusil/run_qe", "max_forks_repo_head_hexsha": "10001c2779788122e59b299d088ef83821e24a38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2417582418, "max_line_length": 102, "alphanum_fraction": 0.6502622025, "include": true, "reason": "import numpy", "num_tokens": 737}
|
// This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
/* svd_utils_test.cc
Jeremy Barnes, 18 November 2012
Copyright (c) 2012 mldb.ai inc. All rights reserved.
Test for SVD utilities
*/
#define BOOST_TEST_MAIN
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "mldb/builtin/intersection_utils.h"
#include "mldb/utils/smart_ptr_utils.h"
#include "mldb/utils/string_functions.h"
#include "mldb/utils/vector_utils.h"
#include "mldb/utils/pair_utils.h"
#include "mldb/base/parallel.h"
#include "mldb/utils/distribution.h"
#include <cmath>
using namespace std;
using namespace MLDB;
void testBucket(std::vector<uint32_t> subs1,
std::vector<uint32_t> subs2)
{
cerr << "doing " << subs1 << " and " << subs2 << endl;
std::sort(subs1.begin(), subs1.end());
std::sort(subs2.begin(), subs2.end());
IntersectionEntry::Bucket bucket1, bucket2;
for (auto & s: subs1)
bucket1.add(s, SH(s));
for (auto & s: subs2)
bucket2.add(s, SH(s));
bucket1.compress();
bucket2.compress();
double count11 = bucket1.calcOverlap(bucket1, HAMMING);
BOOST_CHECK_EQUAL(count11, subs1.size());
double count22 = bucket2.calcOverlap(bucket2, HAMMING);
BOOST_CHECK_EQUAL(count22, subs2.size());
int expected = intersectionCount(&subs1[0], &subs1[0] + subs1.size(),
&subs2[0], &subs2[0] + subs2.size());
double count1 = bucket1.calcOverlap(bucket2, HAMMING);
double count2 = bucket2.calcOverlap(bucket1, HAMMING);
BOOST_CHECK_EQUAL(expected, count1);
BOOST_CHECK_EQUAL(expected, count2);
}
BOOST_AUTO_TEST_CASE( test_buckets )
{
testBucket({}, {});
testBucket({1}, {0});
testBucket({1}, {1});
testBucket({1,2,3}, {1,2,3});
testBucket({1,2,3}, {1});
testBucket({1,2,3,100,200,300}, {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17});
}
|
{"hexsha": "c0d3f684004a42bfb8377d98229a9eb82c1d3ec1", "size": 1932, "ext": "cc", "lang": "C++", "max_stars_repo_path": "testing/svd_utils_test.cc", "max_stars_repo_name": "kstepanmpmg/mldb", "max_stars_repo_head_hexsha": "f78791cd34d01796705c0f173a14359ec1b2e021", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-04-29T12:39:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-29T12:39:34.000Z", "max_issues_repo_path": "testing/svd_utils_test.cc", "max_issues_repo_name": "tomzhang/mldb", "max_issues_repo_head_hexsha": "a09cf2d9ca454d1966b9e49ae69f2fe6bf571494", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-20T05:52:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-15T17:52:54.000Z", "max_forks_repo_path": "testing/svd_utils_test.cc", "max_forks_repo_name": "matebestek/mldb", "max_forks_repo_head_hexsha": "f78791cd34d01796705c0f173a14359ec1b2e021", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-11-23T20:03:38.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-23T20:03:38.000Z", "avg_line_length": 26.8333333333, "max_line_length": 81, "alphanum_fraction": 0.6552795031, "num_tokens": 557}
|
"""
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% function [XPP,YPP]=cast2(t,XP,YP)
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Enveloppe convexe d'une courbe de Bézier
%% Construction des points de contrôle
%% Deuxième partie t dans [0.5,1.]
%%
%% Données : t valeur du paramètre
%% XP, YP coordonnées des points de contrôle
%%
%% Résultats : XPP,YPP coordonnées des points de contrôle
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%% Introduction au calcul scientifique par la pratique %%%%%%%
%%%%%%% I. Danaila, P. Joly, S. M. Kaber et M. Postel %%%%%%%
%%%%%%% Dunod, 2005 %%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
"""
import numpy as np
def cast2(t,XP,YP):
m = (np.shape(XP)[0]) - 1
xx = np.zeros(m)
yy = np.zeros(m)
for k in range(0,m):
xx[m-1-k] = XP[k]
yy[m-1-k] = YP[k]
XPP = np.zeros(m)
YPP = np.zeros(m)
for kk in range(0,m):
xxx = xx.copy()
yyy = yy.copy()
XPP[m-1-kk] = xx[kk]
YPP[m-1-kk] = yy[kk]
for k in range(kk,m):
xx[k]=t*xxx[k]+(1-t)*xxx[k]
yy[k]=t*yyy[k]+(1-t)*yyy[k]
XPP[0]=xx[m-1]
YPP[0]=yy[m-1]
return (XPP,YPP)
|
{"hexsha": "0d56cd30501eabacf43b9872bdb63691cd3a2b81", "size": 1476, "ext": "py", "lang": "Python", "max_stars_repo_path": "cast2.py", "max_stars_repo_name": "JosueGauthier/Surface-de-Bezier-Python", "max_stars_repo_head_hexsha": "e37847296cbbe36f0c72c9cefc1861870ce883db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cast2.py", "max_issues_repo_name": "JosueGauthier/Surface-de-Bezier-Python", "max_issues_repo_head_hexsha": "e37847296cbbe36f0c72c9cefc1861870ce883db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cast2.py", "max_forks_repo_name": "JosueGauthier/Surface-de-Bezier-Python", "max_forks_repo_head_hexsha": "e37847296cbbe36f0c72c9cefc1861870ce883db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1967213115, "max_line_length": 68, "alphanum_fraction": 0.3611111111, "include": true, "reason": "import numpy", "num_tokens": 409}
|
module Mod_sld_ExternalForces
use typre
use Mod_sld_BaseElmope
implicit none
private
public SetPointersExternalForces
integer(ip), allocatable :: kfl_IsSet
contains
!----------------------------------------------------------------------------
!Setting Pointers
subroutine SetPointersExternalForces(itask)
implicit none
integer(ip) :: itask
select case (itask)
case(0)
allocate(kfl_IsSet)
call a%Memor%allocObj(0,'kfl_IsSet','InitProcedurePointer',1)
kfl_IsSet = -1
case(1)
if (kfl_IsSet == -1) then
kfl_IsSet = 1
ProcPointer%ExternalForces => sldForces
endif
case(100)
deallocate(kfl_IsSet)
call a%Memor%deallocObj(0,'kfl_IsSet','InitProcedurePointer',1)
end select
end subroutine
!---------------------------------------------------------------------------
!Computation Subroutines
subroutine sldForces
implicit none
!Compute vector of external forces
call sld_ComputeExternalForces(e,densi,a%grnor,a%gravi,a%traction,elext,dvol,a%force_factor)
end subroutine
end module
|
{"hexsha": "5c388a06ab2f3c81ba63e1c7775e23684a6988ce", "size": 1251, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/modules/solids/models/Mod_sld_ExternalForces.f90", "max_stars_repo_name": "ciaid-colombia/InsFEM", "max_stars_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-24T08:19:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T08:19:54.000Z", "max_issues_repo_path": "Sources/modules/solids/models/Mod_sld_ExternalForces.f90", "max_issues_repo_name": "ciaid-colombia/InsFEM", "max_issues_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/modules/solids/models/Mod_sld_ExternalForces.f90", "max_forks_repo_name": "ciaid-colombia/InsFEM", "max_forks_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0625, "max_line_length": 98, "alphanum_fraction": 0.5371702638, "num_tokens": 305}
|
import numpy as np
import struct
UINT8 = "B"
UINT16 = "H"
UINT32 = "I"
class SABFormat(object):
def __init__(self):
super(SABFormat, self).__init__()
self.RadialHeaderSize = 128
self.InfSize = 28
def RadialHeader(self):
return (
('reserve0', '14s'),
('flag', UINT16), ##1-标识雷达数据
('reserve1', '12s'),
('mSends', UINT32), # 径向数据收集时间(毫秒,自 00:00 开始)
('JulianDate', UINT16), # 儒略日(Julian)表示,自 1970 年 1 月 1 日开始
('URange', UINT16), # 不模糊距离(表示:数值/10.=千米)
('AZ', UINT16), # 方位角(编码方式:[数值/8.]*[180./4096.]=度)
('RadialNumber', UINT16), # 当前仰角内径向数据序号
('RadialStatus', UINT16), # 径向数据状态
('El', UINT16),
# 仰角 (编码方式:[数值/8.]*[180./4096.]=度)
('ElNumber', UINT16), # 体扫内的仰角数
('RangeToFirstGateOfRef', UINT16), # 反射率数据的第一个距离库的实际距离(单位:米)
('RangeToFirstGateOfDop', UINT16), # 多普勒数据的第一个距离库的实际距离(单位:米)
('GateSizeOfReflectivity', UINT16), # 反射率数据的距离库长(单位:米)
('GateSizeOfDoppler', UINT16), # 多普勒数据的距离库长(单位:米)
('GatesNumberOfReflectivity', UINT16), # 反射率的距离库数
('GatesNumberOfDoppler', UINT16), # 多普勒的距离库数
('CutSectorNumber', UINT16), # 扇区号
('CalibrationConst', UINT32), # 系统订正常数
('PtrOfReflectivity', UINT16), # 反射率数据指针(偏离雷达数据信息头的字节数) 表示第一个反射率数据的位置
('PtrOfVelocity', UINT16), # 速度数据指针(偏离雷达数据信息头的字节数),表示第一个速度数据的位置
('PtrOfSpectrumWidth', UINT16), # 谱宽数据指针(偏离雷达数据信息头的字节数),表示第一个谱宽数据的位置
('ResolutionOfVelocity', UINT16), # 多普勒速度分辨率。 2:表示 0.5 米/秒
('VcpNumber', UINT16), # 体扫(VCP)模式
('reserve2', '14s'), # 保留
('Nyquist', UINT16), # Nyquist 速度(表示:数值/100. = 米/秒)
('reserve3', '38s'))
def RadialData(self, rnumber, vnumber):
"""
:param rnumber: 反射率的库数
:param vnumber: 多普勒的库数
:param radialbins: 整个径向的长度
:return:
"""
return np.dtype([('dBZ', 'u1', rnumber),
('V', 'u1', vnumber),
('W', 'u1', vnumber)])
dtype_sab = SABFormat()
|
{"hexsha": "3243ac81ecece94ab06868ba5d06fff4c1bbf13e", "size": 2183, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycwr/io/BaseDataProtocol/SABProtocol.py", "max_stars_repo_name": "zhaopingsun/pycwr", "max_stars_repo_head_hexsha": "7459371588e6d0d6d0737e249afa3921fe073151", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-12-24T06:07:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-13T02:24:18.000Z", "max_issues_repo_path": "pycwr/io/BaseDataProtocol/SABProtocol.py", "max_issues_repo_name": "zhaopingsun/pycwr", "max_issues_repo_head_hexsha": "7459371588e6d0d6d0737e249afa3921fe073151", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pycwr/io/BaseDataProtocol/SABProtocol.py", "max_forks_repo_name": "zhaopingsun/pycwr", "max_forks_repo_head_hexsha": "7459371588e6d0d6d0737e249afa3921fe073151", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.298245614, "max_line_length": 82, "alphanum_fraction": 0.5309207513, "include": true, "reason": "import numpy", "num_tokens": 938}
|
#!/usr/bin/env Rscript
# run with Rscript plot-gradients.r -i taxontable -w 'Bacteroides,Prevotella' -o outdir
# REQUIRED GLOBAL VARIABLES: PLEASE EDIT
#source(paste(Sys.getenv('MWAS_DIR'),'/lib/gradients.r',sep=''))
#source(paste(Sys.getenv('MWAS_DIR'),'/lib/util.r',sep=''))
require('RColorBrewer')
require('optparse')
require('vegan')
# make option list and parse command line
option_list <- list(
make_option(c("-i","--input_fp"), type="character",
help="QIIME-formatted input taxon table (tab-delimited text, not biom) [required]."),
make_option(c("-m","--map_fp"), type="character",default=NULL,
help="QIIME-formatted mapping file (optional). If provided, only samples in both taxon table and mapping file will be plotted."),
make_option(c("-c","--column"), type="character",default=NULL,
help="Name of metadata column to color plot by (optional). If included, does not plot gradients."),
make_option(c("-d","--distance_fp"), type="character",default=NULL,
help="QIIME-formatted distance table file (optional). If omitted, the script uses Bray-Curtis distance."),
make_option(c("-p","--pcoa_fp"), type="character",default=NULL,
help="QIIME-formatted pcoa table file (optional). If omitted, the script uses Bray-Curtis distance. If included, takes priority over --distance_fp."),
make_option(c("-w", "--which_taxa"), type="character", default=NULL,
help="Comma-separated list of taxa to plot [default: plot top --nplot taxa]"),
make_option(c("-s", "--shorten_taxa"),action='store_true',default=FALSE,
help="Shorten taxonomy names to lowest defined level. [default: %default]"),
make_option(c("-x", "--multiple_axes"),action='store_true',default=FALSE,
help="Show PC1 v PC2, PC1 v PC3, PC2 v PC3 in 3 separate plots. [default: %default]"),
make_option(c("-n", "--nplot"), type="numeric", default=10,
help="Number of taxa to plot (in order of decreasing mean). Ignored if --which_taxa exists [default: %default]"),
make_option(c("-o", "--outdir"), type="character", default='.',
help="Output directory [default %default]")
)
opts <- parse_args(OptionParser(option_list=option_list),
args=commandArgs(trailing=TRUE))
# create output directory if needed
if(opts$outdir != ".") dir.create(opts$outdir,showWarnings=FALSE, recursive=TRUE)
# LOAD DATA
x <- t(read.table(opts$input_fp,sep='\t',head=T,row=1,check=F,quote='"'))
if(!is.null(opts$map_fp)){
m <- read.table(opts$map_fp,sep='\t',head=T,row=1,check=F,comment='',quote='"')
# check rownames in mapping file matrix
missing.taxa.samples <- setdiff(rownames(x), rownames(m))
missing.map.samples <- setdiff(rownames(m), rownames(x))
if(length(missing.taxa.samples) > 0){
stop(sprintf('\n\nError: one or more sample names from taxonomy table (%s, ...) not present in metadata table (%s, ...).',
paste(sort(missing.taxa.samples)[1:5],collapse=', '),
paste(sort(missing.map.samples)[1:5],collapse=', ')))
}
x <- x[intersect(rownames(x),rownames(m)),,drop=F]
m <- droplevels(m[rownames(x),,drop=F])
}
# check that taxon.names are in taxon table
if(is.null(opts$which_taxa)){
taxon.names <- colnames(x)[rev(order(colMeans(x)))]
taxon.names <- taxon.names[1:min(opts$nplot, length(taxon.names))]
} else {
taxon.names <- strsplit(opts$which_taxa,',')[[1]]
if(!all(taxon.names %in% colnames(x))){
stop(paste('The following taxa are not present in the taxon table:',
paste(taxon.names[!(taxon.names %in% colnames(x))],collapse=', '),
'\n'))
}
}
if(opts$shorten_taxa){
colnames(x) <- shorten.taxonomy(colnames(x))
taxon.names <- shorten.taxonomy(taxon.names)
}
if(is.null(opts$pcoa_fp)){
if(is.null(opts$distance_fp)){
d <- vegdist(x)
} else {
d <- read.table(opts$distance_fp,sep='\t',head=T,row=1,check=F)
# check rownames in distance matrix
missing.taxa.samples <- union(setdiff(rownames(x), rownames(d)), setdiff(rownames(x), colnames(d)))
missing.distance.samples <- union(setdiff(rownames(d), rownames(x)), setdiff(colnames(d), rownames(x)))
if(length(missing.taxa.samples) > 0){
stop(sprintf('\n\nError: one or more sample names from taxonomy table (%s, ...) not present in distance table (%s, ...).',
paste(sort(missing.taxa.samples)[1:5],collapse=', '),
paste(sort(missing.distance.samples)[1:5],collapse=', ')))
}
d <- d[rownames(x),rownames(x)]
d <- as.dist(d)
}
pc <- cmdscale(d,k=5)
} else {
pc <- read.table(opts$pcoa_fp,sep='\t',row=1,head=T)
if(rownames(pc)[nrow(pc)] == '% variation explained'){
pc <- pc[1:(nrow(pc)-2),1:min(5,ncol(pc))]
}
if(mean(rownames(x) %in% rownames(pc)) < 1){
stop('Taxon table row names do not match PC file row names')
}
pc <- pc[rownames(x),]
}
# plots
if(is.null(opts$column)) {
fp <- sprintf('%s/gradients.pdf',opts$outdir)
} else {
if(!is.element(opts$column,colnames(m))) stop(paste(opts$column,'not in mapping file.'))
fp <- sprintf('%s/pcoa.pdf',opts$outdir)
}
if(opts$multiple_axes){
pdf(fp,width=11,height=3.75)
par(mfrow=c(1,3))
combs <- combn(1:3,2)
} else {
pdf(fp,width=6,height=5)
combs <- matrix(1:2,ncol=1)
}
if(is.null(opts$column)){
for(i in seq_along(taxon.names)){
for(j in 1:ncol(combs)){
show.gradients(x[,taxon.names[i]], pc[,combs[,j]], incl.legend=TRUE,pt.alpha='CC',
axis.labels=sprintf('PC%d',combs[,j]),
title.text=sprintf('%s - PC%d v PC%d',taxon.names[i],combs[1,j],combs[2,j]))
}
}
} else {
for(j in 1:ncol(combs)){
show.metadata(m[,opts$column], pc[,combs[,j]], incl.legend=TRUE,pt.alpha='CC',
axis.labels=sprintf('PC%d',combs[,j]),
title.text=sprintf('%s - PC%d v PC%d',opts$column,combs[1,j],combs[2,j]))
}
}
dev.off()
|
{"hexsha": "6f404a801934d1a9c86dc3d39370fcdf82ba2192", "size": 5758, "ext": "r", "lang": "R", "max_stars_repo_path": "bin/util/plot-gradients.r", "max_stars_repo_name": "hhuang2018/mwas", "max_stars_repo_head_hexsha": "26518c937be831026c6015f8fdfb764c1d3a58be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/util/plot-gradients.r", "max_issues_repo_name": "hhuang2018/mwas", "max_issues_repo_head_hexsha": "26518c937be831026c6015f8fdfb764c1d3a58be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/util/plot-gradients.r", "max_forks_repo_name": "hhuang2018/mwas", "max_forks_repo_head_hexsha": "26518c937be831026c6015f8fdfb764c1d3a58be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1285714286, "max_line_length": 158, "alphanum_fraction": 0.6589093435, "num_tokens": 1677}
|
import os
from glob import glob
import numpy as np
import pandas as pd
DATASET_NAME = "adhd_sin"
DATASET_PATH = f"../datasets/{DATASET_NAME}"
REFN_DATA_PATH = f"{DATASET_PATH}" # reference data
REFN_FXTN_PATH = f"{REFN_DATA_PATH}_fxtn" # fixations from reference data
SYNT_DATA_PATH = f"{DATASET_PATH}_synt" # synthetic data
SYNT_FXTN_PATH = f"{SYNT_DATA_PATH}_fxtn" # fixations from synthetic data
# ax = [1,2,3,4,5,6,7]
# ay = [2,3,4,5,6,7,8]
# bx = [2,3,4]
# by = [3,4,5]
def calculate_rmse(a: np.ndarray, b: np.ndarray):
# keep ax,ay as longer array
na = a.shape[0]
nb = b.shape[0]
if a.shape[0] < b.shape[0]:
a, b, na, nb = b, a, nb, na
# extend b to fit a
b = np.column_stack([
np.interp(np.linspace(0, nb - 1, na), np.arange(nb), b[:, 0]),
np.interp(np.linspace(0, nb - 1, na), np.arange(nb), b[:, 1])
])
# return rmse
return np.sqrt(np.mean(np.sum(np.square(a - b), axis=1))), na
def adhd_sin_filename_parse(part: str):
pid = part[:3]
p2 = part[11:]
possible_noise_levels = ['0', '5', '10', '15', '20', '25']
if p2[0] in possible_noise_levels:
return [pid, p2[0], p2[1:]]
elif p2[:2] in possible_noise_levels:
return [pid, p2[:2], p2[2:]]
else:
raise RuntimeError("Filename cannot be parsed", part)
if __name__ == '__main__':
synt_data_glob = glob(f"{SYNT_DATA_PATH}/*.csv")
# print(f"{len(synt_data_glob)} synthetic data files found")
synt_fxtn_glob = glob(f"{SYNT_FXTN_PATH}/*.csv")
# print(f"{len(synt_data_glob)} synthetic fixation files found")
cols = []
stats_cols = ["noise_model", "tracker_model", "rmse", "points"]
df: pd.DataFrame
if DATASET_NAME == "n_back":
cols = ["participant", "mode", "task", "position", *stats_cols]
if DATASET_NAME == "adhd_sin":
cols = ["participant", "noise", "question", *stats_cols]
df = pd.DataFrame(columns=cols)
# compare reference [data] against synthetic [data]
for synt_data_fp in synt_data_glob:
(file_name, file_ext) = os.path.splitext(os.path.basename(synt_data_fp))
parts = file_name.split('-')
file_parts = [*parts[:-2], parts[-1]]
if DATASET_NAME == "adhd_sin":
file_parts = [*adhd_sin_filename_parse(file_parts[0]), *file_parts[1:]]
refn_data_fp = f"{REFN_DATA_PATH}/{file_name.rsplit('-', 3)[0]}{file_ext}"
# print(f'Comparing: {synt_data_fp} against {refn_data_fp}')
# opening synthetic and reference files
synt_df = pd.read_csv(synt_data_fp, index_col='t')[['fx', 'fy']]
synt_df.columns = ['x', 'y']
refn_df = pd.read_csv(refn_data_fp, index_col='t')[['x', 'y']]
refn_df.index = refn_df.index / 1000.0
s = synt_df[['x', 'y']].to_numpy()
r = refn_df[['x', 'y']].to_numpy()
rmse, points = calculate_rmse(s, r)
row = [*file_parts, rmse, points]
df.loc[len(df.index)] = row
print(row)
df.set_index(df.columns[0]).to_csv(f"../comparisons/{DATASET_NAME}_synt_data_comparison.csv")
# # compare reference [fxtn] against synthetic [fxtn]
# for synt_fxtn_fp in synt_fxtn_glob:
# (file_name, file_ext) = os.path.splitext(os.path.basename(synt_fxtn_fp))
# refn_fxtn_fp = f"{REFN_FXTN_PATH}/{file_name.rsplit('-', 3)[0]}{file_ext}"
# print(f'Comparing: {synt_fxtn_fp} against {refn_fxtn_fp}')
# # opening synthetic and reference files
# synt_df = pd.read_csv(synt_fxtn_fp, index_col='t')
# refn_df = pd.read_csv(refn_fxtn_fp, index_col='t')
|
{"hexsha": "38c3648958de49fd9f967580cef4d5d7cb546d80", "size": 3393, "ext": "py", "lang": "Python", "max_stars_repo_path": "projects/eyetracking/dataset_compare.py", "max_stars_repo_name": "nirdslab/streaminghub", "max_stars_repo_head_hexsha": "a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "projects/eyetracking/dataset_compare.py", "max_issues_repo_name": "nirdslab/streaminghub", "max_issues_repo_head_hexsha": "a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projects/eyetracking/dataset_compare.py", "max_forks_repo_name": "nirdslab/streaminghub", "max_forks_repo_head_hexsha": "a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-22T15:35:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-22T15:35:29.000Z", "avg_line_length": 36.8804347826, "max_line_length": 95, "alphanum_fraction": 0.6581196581, "include": true, "reason": "import numpy", "num_tokens": 1110}
|
#
# TODO: should ITensorMap be a special version of
# an ITensorNetwork with input and output indices specified?
#
# T is however the nodes are indexed
# TODO: how to deal with 2D, multiple networks, etc.?
# struct IndexSetNetwork{T}
# # Use Vector{SortedVector{Pair{T, IndexSet}}}
# data::Vector{Vector{Pair{T, IndexSet}}}
# end
# Make MPS, PEPS, etc. wrappers around `ITensorNetwork`,
# which could be subclasses of `AbstractITensorNetwork`
#
# Also `InfiniteITensorNetwork`, `AbstractInfiniteITensorNetwork`:
#
# struct InfiniteMPS <: AbstractInfiniteITensorNetwork
# tensornetwork::InfiniteITensorNetwork
# end
#
# struct ITensorNetwork <: AbstractITensorNetwork
# itensors::Vector{ITensor}
# indnetwork::IndexSetNetwork # Adjacency list of IndexSets
# input_inds::IndexSet
# output_inds::IndexSet
# end
#
# TODO: how to deal with networks of ITensorNetwork,
# for example a network of MPS and MPO?
# ITensorNetworkNetwork that is a tree of ITensorNetwork?
#
abstract type AbstractITensorMap end
input_inds(T::AbstractITensorMap) = T.input_inds
output_inds(T::AbstractITensorMap) = T.output_inds
(T::AbstractITensorMap)(v::ITensor) = replaceinds(T * v, output_inds(T) => input_inds(T))
# convert from Tuple to Vector
tuple_to_vector(t::Tuple) = collect(t)
tuple_to_vector(v::Vector) = v
# Represents the action of applying the
# vector of ITensors to a starting state and then mapping
# them back (from output_inds to input_inds)
# TODO: rename ITensorNetworkMap?
# TODO: maybe parametrize the type to allow storing just 1 ITensor?
# TODO: contraction order optimization!
struct ITensorMap <: AbstractITensorMap
itensors::Vector{ITensor}
scalar::Number
input_inds::Vector{Index}
output_inds::Vector{Index}
function ITensorMap(itensors::Vector{ITensor}, scalar, input_inds, output_inds)
inds_in = tuple_to_vector(input_inds)
inds_out = tuple_to_vector(output_inds)
#inds_eltype = promote_type(eltype(input_inds), eltype(output_inds))
#return new{inds_eltype}(itensors, inds_in, inds_out)
return new(itensors, scalar, inds_in, inds_out)
end
end
function ITensorMap(itensors::Vector{ITensor}, input_inds, output_inds)
return ITensorMap(itensors, true, input_inds, output_inds)
end
function (M1::ITensorMap * M2::ITensorMap)
# TODO: check the directions are correct
@assert output_inds(M2) == input_inds(M1)
return ITensorMap(
vcat(M1.itensors, M2.itensors), M1.scalar * M2.scalar, input_inds(M2), output_inds(M1)
)
end
function default_input_inds(itensors::Vector{ITensor})
return filter(i -> plev(i) == 0, noncommoninds(itensors...))
end
function ITensorMap(
itensors::Vector{ITensor};
input_inds=default_input_inds(itensors),
output_inds=dag(input_inds'),
)
return ITensorMap(itensors, input_inds, output_inds)
end
Base.iterate(T::ITensorMap, args...) = iterate(T.itensors, args...)
function Base.transpose(T::ITensorMap)
return ITensorMap(reverse(T.itensors), output_inds(T), input_inds(T))
end
# This is actually a Hermitian conjugation, not priming
function Base.adjoint(T::ITensorMap)
return ITensorMap(reverse(dag.(T.itensors)), dag(output_inds(T)), dag(input_inds(T)))
end
# TODO: make a default constructor that searches for pairs of primed and unprimed indices
# TODO: this would be useful for ITensor matrix factorizations!
# function ITensorMap(itensors::Vector{ITensor}, pair_match::Pair = 0 => 1)
# # pair_match could be:
# # pair_match = 0 => 2
# # pair_match = "Input" => "Output"
# # pair_match = ("Input", 0) => ("Output", 1)
# external_inds = unioninds(siteinds(itensors)...)
# input_match = first(pair_match)
# output_match = first(pair_match)
# # TODO: preprocess pair_match to be of the form `(tags, plev)`
# ind_match(i::Index, match) = hastags(i, match[1]) && hasplev(i, match[2])
# input_inds = filter(i -> ind_match(i, input_match), external_inds)
# output_inds = dag(replaceprime(replacetags(input_inds, first.(pair_match)), last.(pair_match)))
# @assert = hassameinds(output_inds, filter(i -> ind_match(i, output_match), external_inds))
# return ITensorMap(itensors, input_inds, output_inds)
# end
function set_scalar(T::ITensorMap, scalar::Number)
return ITensorMap(T.itensors, scalar, input_inds(T), output_inds(T))
end
# Lazily scale by a scalar
(T::ITensorMap * c::Number) = set_scalar(T, T.scalar * c)
(c::Number * T::ITensorMap) = set_scalar(T, c * T.scalar)
-(T::ITensorMap) = set_scalar(T, -T.scalar)
# TODO: assert isempty(uniqeinds(v, T))?
# Apply the operator as T|v⟩
(T::ITensorMap * v::ITensor) = T.scalar * contract(pushfirst!(copy(T.itensors), v)) #*(v, T...)
# Apply the operator as ⟨v̅|T (simple left multiplication, without conjugation)
# This applies the ITensorMap tensors in reverse (maybe this is not always the best contraction
# ordering)
(v::ITensor * T::ITensorMap) = T.scalar * contract(pushfirst!(reverse(T.itensors, v))) #*(v, reverse(T)...)
# TODO: implement Base.iterate(::Base.Iterators.Reverse{MPS})
# TODO: use something like:
# neighbors(ψ, ϕ, (1, 1))
# neighbors(ψ, ϕ, (2, 1))
# neighbors(ψ, ϕ, (1, N))
# neighbors(ψ, ϕ, (2, N))
# Transfer matrix made from two MPS: T|v⟩ -> |w⟩
function ITensorMap(ψ::MPS, ϕ::MPS; input_inds=nothing, output_inds=nothing)
N = length(ψ)
@assert length(ϕ) == N
itensors::Vector{ITensor} = reverse(collect(Iterators.flatten(Iterators.zip(ψ, ϕ))))
if isnothing(input_inds)
input_inds = unioninds(
uniqueinds(ψ[N], ψ[N - 1], ϕ[N]), uniqueinds(ϕ[N], ϕ[N - 1], ψ[N])
)
end
if isnothing(output_inds)
output_inds = unioninds(uniqueinds(ψ[1], ψ[2], ϕ[1]), uniqueinds(ϕ[1], ϕ[2], ψ[1]))
end
return ITensorMap(itensors, input_inds, output_inds)
end
# Represents a sum of ITensor maps
struct ITensorMapSum <: AbstractITensorMap
itensormaps::Vector{ITensorMap}
input_inds::Vector{Index}
output_inds::Vector{Index}
function ITensorMapSum(itensormaps::Vector{ITensorMap})
# TODO: check that all input_inds and output_inds are the same
return new(itensormaps, input_inds(first(itensormaps)), output_inds(first(itensormaps)))
end
end
(M1::ITensorMap + M2::ITensorMap) = ITensorMapSum([M1, M2])
(M1::ITensorMapSum + M2::ITensorMap) = ITensorMapSum(push!(copy(M1.itensormaps), M2))
(M1::ITensorMap + M2::ITensorMapSum) = M2 + M1
(M1::ITensorMap - M2::ITensorMap) = M1 + (-M2)
(M1::ITensorMapSum - M2::ITensorMap) = M1 + (-M2)
(M1::ITensorMap - M2::ITensorMapSum) = M1 + (-M2)
(M::ITensorMapSum * v::ITensor) = sum([m * v for m in M.itensormaps])
(M::ITensorMapSum * c::Number) = ITensorMapSum([m * c for m in M.itensormaps])
(c::Number * M::ITensorMapSum) = M * c
-(M::ITensorMapSum) = -1 * M
# Required by Arpack.eigs
Base.size(T::AbstractITensorMap) = (dim(output_inds(T)), dim(input_inds(T)))
LinearAlgebra.issymmetric(::AbstractITensorMap) = false
# TODO: promote the element types of all of the ITensors
# in the ITensorMap
Base.eltype(T::AbstractITensorMap) = eltype(T.itensors[1])
function LinearAlgebra.mul!(y, A::AbstractITensorMap, x)
xt = itensor(x, dag(input_inds(A)); tol=1e-15)
# XXX: something wrong with in-place ITensor
# contraction, not overwriting input data
yt = A(xt)
yt = permute(yt, input_inds(A))
y .= vec(array(yt))
return y
end
|
{"hexsha": "543d53d7f9701d1480d256ecfe62627e870ebe2c", "size": 7226, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/itensormap.jl", "max_stars_repo_name": "LHerviou/ITensorInfiniteMPS.jl", "max_stars_repo_head_hexsha": "1489817f7960903e84331b324d810631074d0f35", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-01T15:21:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T15:05:07.000Z", "max_issues_repo_path": "src/itensormap.jl", "max_issues_repo_name": "LHerviou/ITensorInfiniteMPS.jl", "max_issues_repo_head_hexsha": "1489817f7960903e84331b324d810631074d0f35", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2022-01-05T14:29:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T15:25:10.000Z", "max_forks_repo_path": "src/itensormap.jl", "max_forks_repo_name": "LHerviou/ITensorInfiniteMPS.jl", "max_forks_repo_head_hexsha": "1489817f7960903e84331b324d810631074d0f35", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-12-09T16:53:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T21:44:24.000Z", "avg_line_length": 36.6802030457, "max_line_length": 107, "alphanum_fraction": 0.7240520343, "num_tokens": 2179}
|
delivery_util(reward::Float64, ie::InteractionEvent) = reward
"""
Look up the CDF of the Epanechnikov distribution for the arrival time.
"""
function delivery_success_prob(std_scale::Float64, ref_time::Float64, ie::InteractionEvent)
travel_time = ie.timestamps[SUCCESS] - ie.timestamps[FINISH]
# TODO: Need something more principled than this distribution
meanval = travel_time
stdval = travel_time/std_scale
tt_dist = Distributions.Epanechnikov(meanval, stdval)
# Compute the CDF of the finish time
succ_prob = Distributions.cdf(tt_dist, ie.timestamps[FINISH])
return succ_prob
end
"""
Valid tasks are those within the distance threshold from the depot.
"""
function scoba_routing!(server::RoutingAllocation, routing_sim::RoutingSimulator,
rng::RNG=Random.GLOBAL_RNG) where {RNG <: AbstractRNG}
# First collect all available drones
available_drones = Dict{Int,Vector{String}}()
# Split out available drones in terms of depot
# Since drones in same depot will use priority ordering
for (drone_nm, dp) in server.agent_prop_set
drone = server.agent_set[drone_nm]
if dp.at_depot == true
if ~(haskey(available_drones, drone.depot_number))
available_drones[drone.depot_number] = [drone_nm]
else
push!(available_drones[drone.depot_number], drone_nm)
end
end
end # for (drone_nm, dp)
# Diagnostics to send to CBA
task_util_allocation = Dict{String,SCoBASolver.TaskUtil}()
all_considered_tasks = Dict{String,Set{String}}()
assignment_util = 0.0
util_val_fn(ie) = delivery_util(routing_sim.delivery_reward, ie)
success_prob(ref_time, ie) = delivery_success_prob(routing_sim.tt_est_std_scale,
ref_time, ie)
# @infiltrate
# Assign all available drones
for (depot_number, depot_drones) in available_drones
# Just get the location of any of them
depot_loc = server.agent_set[depot_drones[1]].depot_loc
# Iterate over active packages and check those that are in range
pkgs_in_range = Set{String}()
for (pkg_nm, pp) in routing_sim.active_packages
if Distances.evaluate(EuclideanLatLongMetric(), convert_to_vector(depot_loc), convert_to_vector(pp.delivery)) <= routing_sim.distance_thresh
push!(pkgs_in_range, pkg_nm)
end
end # for (pkg_nm, pp)
depot_assigned_pkgs = Set{String}()
# For drones from same depot, priority ordering
for drone_nm in depot_drones
# @info "Assigning available drones $(drone_nm)"
# Exclude already assigned packages FROM DEPOT
pkgs_to_consider = setdiff(pkgs_in_range, depot_assigned_pkgs)
# @info "$(length(pkgs_to_consider)) packages considered!"
# Run tree gen function!
generate_search_tree!(server, drone_nm, pkgs_to_consider, success_prob, util_val_fn, 0.0)
tree = server.agent_prop_set[drone_nm].tree
# Get the attempt index and util if applicable
if ~(isempty(tree))
dec_idx = get_next_attempt_idx(tree)
if dec_idx != -1
dec_node = tree.nodes[dec_idx]
# Update depot_considered tasks
push!(depot_assigned_pkgs, dec_node.task_name)
# Update things that will be sent to coordinator
assignment_util += dec_node.util
task_util_allocation[drone_nm] = (task=dec_node.task_name, util=dec_node.util)
all_considered_tasks[drone_nm] = pkgs_to_consider
end
end # if ~(isempty(tree))
end # for drone in depot_drones
end # for (depot_number, depot_drones)
# Run coordinate_assignment to resolve conflicts
if ~(isempty(task_util_allocation))
scoba_alg = SCoBAAlgorithm(allocation=server)
true_task_util_allocation = coordinate_allocation!(scoba_alg, task_util_allocation,
all_considered_tasks, assignment_util,
success_prob, util_val_fn, 0.0)
# NOTE: might have redundancies in true_task_util_assignment - ignore!
for (drone_nm, pkg_util) in true_task_util_allocation
@assert haskey(server.agent_task_allocation, drone_nm) == false
pkg_nm = pkg_util.task
if ~(haskey(routing_sim.busy_packages, pkg_nm))
server.agent_task_allocation[drone_nm] = (name=pkg_nm, time=Inf)
# NOTE: set true delivery return time once assigned
routing_sim.true_delivery_return[(drone_nm, pkg_nm)] = sample_true_delivery_return_time(server.agent_task_windows[(drone_nm, pkg_nm)],
server.current_time,
routing_sim.tt_est_std_scale,
rng)
server.agent_prop_set[drone_nm].at_depot = false
server.agent_prop_set[drone_nm].current_package = pkg_nm
# TODO: Deleting from active pkgs here for convenience
new_busy_package = routing_sim.active_packages[pkg_nm]
routing_sim.busy_packages[pkg_nm] = new_busy_package
delete!(routing_sim.active_packages, pkg_nm)
routing_sim.num_active_packages -= 1
end
end # for (drone, pkg_util)
end # if ~isempty task_util_allocation
end # function
|
{"hexsha": "499b3dffbe82bda6cf47d40094d52b3025a38fcb", "size": 5853, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/domains/routing/routing_scoba.jl", "max_stars_repo_name": "sisl/SCoBA.jl", "max_stars_repo_head_hexsha": "e66633dcf044decfb63b2d3cb1b5b059f79cd6ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2020-05-27T20:51:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T15:25:45.000Z", "max_issues_repo_path": "src/domains/routing/routing_scoba.jl", "max_issues_repo_name": "sisl/SCoBA.jl", "max_issues_repo_head_hexsha": "e66633dcf044decfb63b2d3cb1b5b059f79cd6ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-22T05:44:52.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-07T21:56:40.000Z", "max_forks_repo_path": "src/domains/routing/routing_scoba.jl", "max_forks_repo_name": "sisl/SCoBA.jl", "max_forks_repo_head_hexsha": "e66633dcf044decfb63b2d3cb1b5b059f79cd6ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-06-28T11:28:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T03:32:35.000Z", "avg_line_length": 40.6458333333, "max_line_length": 152, "alphanum_fraction": 0.6159234581, "num_tokens": 1254}
|
import torch
import numpy as np
from utils.block_diag_matrix import block_diag_irregular
from scipy.spatial import distance_matrix
def compute_adjs(args, seq_start_end):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
mat = []
for t in range(0, args.obs_len + args.pred_len):
interval = end - start
mat.append(torch.from_numpy(np.ones((interval, interval))))
adj_out.append(torch.stack(mat, 0))
return block_diag_irregular(adj_out)
def compute_adjs_knnsim(args, seq_start_end, obs_traj, pred_traj_gt):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
obs_and_pred_traj = torch.cat((obs_traj, pred_traj_gt))
knn_t = []
for t in range(0, args.obs_len + args.pred_len):
dists = distance_matrix(np.asarray(obs_and_pred_traj[t, start:end, :]),
np.asarray(obs_and_pred_traj[t, start:end, :]))
knn = np.argsort(dists, axis=1)[:, 0: min(args.top_k_neigh, dists.shape[0])]
final_dists = []
for i in range(dists.shape[0]):
knni = np.zeros((dists.shape[1],))
knni[knn[i]] = 1
final_dists.append(knni)
final_dists = np.stack(final_dists)
knn_t.append(torch.from_numpy(final_dists))
adj_out.append(torch.stack(knn_t, 0))
return block_diag_irregular(adj_out)
def compute_adjs_distsim(args, seq_start_end, obs_traj, pred_traj_gt):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
obs_and_pred_traj = torch.cat((obs_traj, pred_traj_gt))
sim_t = []
for t in range(0, args.obs_len + args.pred_len):
dists = distance_matrix(np.asarray(obs_and_pred_traj[t, start:end, :]),
np.asarray(obs_and_pred_traj[t, start:end, :]))
#sum_dist = np.sum(dists)
#dists = np.divide(dists, sum_dist)
sim = np.exp(-dists / args.sigma)
sim_t.append(torch.from_numpy(sim))
adj_out.append(torch.stack(sim_t, 0))
return block_diag_irregular(adj_out)
def compute_adjs_knnsim_pred(top_k_neigh, seq_start_end, pred_traj):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
dists = distance_matrix(np.asarray(pred_traj[start:end, :]),
np.asarray(pred_traj[start:end, :]))
knn = np.argsort(dists, axis=1)[:, 0: min(top_k_neigh, dists.shape[0])]
final_dists = []
for i in range(dists.shape[0]):
knni = np.zeros((dists.shape[1],))
knni[knn[i]] = 1
final_dists.append(knni)
final_dists = np.stack(final_dists)
adj_out.append(torch.from_numpy(final_dists))
return block_diag_irregular(adj_out)
def compute_adjs_distsim_pred(sigma, seq_start_end, pred_traj):
adj_out = []
for _, (start, end) in enumerate(seq_start_end):
dists = distance_matrix(np.asarray(pred_traj[start:end, :]),
np.asarray(pred_traj[start:end, :]))
sim = np.exp(-dists / sigma)
adj_out.append(torch.from_numpy(sim))
return block_diag_irregular(adj_out)
|
{"hexsha": "d1e0677fa7f582efc4167bff7afb79397a695c20", "size": 3227, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/adj_matrix.py", "max_stars_repo_name": "alessiabertugli/AC-VRNN", "max_stars_repo_head_hexsha": "3a204bd23a7b90c3939efc6468fa6477c31a733f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-08-10T07:52:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T13:24:49.000Z", "max_issues_repo_path": "utils/adj_matrix.py", "max_issues_repo_name": "alessiabertugli/AC-VRNN", "max_issues_repo_head_hexsha": "3a204bd23a7b90c3939efc6468fa6477c31a733f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-02-11T02:54:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-08T06:40:59.000Z", "max_forks_repo_path": "utils/adj_matrix.py", "max_forks_repo_name": "alessiabertugli/AC-VRNN", "max_forks_repo_head_hexsha": "3a204bd23a7b90c3939efc6468fa6477c31a733f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-14T00:37:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-25T21:39:40.000Z", "avg_line_length": 41.9090909091, "max_line_length": 88, "alphanum_fraction": 0.6160520607, "include": true, "reason": "import numpy,from scipy", "num_tokens": 792}
|
import numpy as np
from cvxpy import *
import matplotlib.pyplot as pyplot
import heapq
import time
settings.USE_CVXCANON = True
ANSWERS = []
TIME = 0
np.random.seed(0)
m=100
k=40 # max # permuted measurements
n=20
A=10 * np.random.randn(m,n)
x_true=np.random.randn(n,1) # true x value
y_true = A.dot(x_true) + np.random.randn(m,1)
# build permuted indices
perm_idxs=np.random.permutation(m)
perm_idxs=np.sort(perm_idxs[:k])
temp_perm=np.random.permutation(k)
new_pos=np.zeros(k)
for i in range(k):
new_pos[i] = perm_idxs[temp_perm[i]]
new_pos = new_pos.astype(int)
# true permutation matrix
P=np.identity(m)
P[perm_idxs,:]=P[new_pos,:]
true_perm=[]
for i in range(k):
if perm_idxs[i] != new_pos[i]:
true_perm = np.append(true_perm, perm_idxs[i])
y = P.dot(y_true)
new_pos = None
P_fixed = np.identity(m)
x_fixed = None
def optimizeP(A, x, y):
P = np.identity(m)
Ax = A.dot(x)
Ax_largest = heapq.nlargest(m, range(len(Ax)), Ax.take)
y_largest = heapq.nlargest(m, range(len(y)), y.take)
for i in range(m):
P[ y_largest[i], y_largest[i] ] = 0
P[ y_largest[i], Ax_largest[i] ] = 1
return P
def numPermuted(P):
result = 0
for i in range(m):
if P[i,i] != 1:
result += 1
return result
firstIter = None
for _ in range(20):
x = Variable(n)
objective = Minimize( norm( A*x - P_fixed.T.dot( y)) )
constraints = []
prob = Problem(objective, constraints)
tic = time.time()
result = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(result)
if firstIter is None:
firstIter = result
x_fixed = x.value
P_fixed = optimizeP(A, x_fixed, y)
print "Num permuted the same as k: ", numPermuted(P) == k
print "Final objective", result
print "P = I error", firstIter
|
{"hexsha": "8a2d74b42dbe7952449f4c43d3d5f95068a571e0", "size": 1724, "ext": "py", "lang": "Python", "max_stars_repo_path": "cvxpy/cvxcore/tests/python/364A_scripts/lsq_permute.py", "max_stars_repo_name": "jasondark/cvxpy", "max_stars_repo_head_hexsha": "56aaa01b0e9d98ae5a91a923708129a7b37a6f18", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2015-10-16T16:55:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T05:06:01.000Z", "max_issues_repo_path": "cvxpy/cvxcore/tests/python/364A_scripts/lsq_permute.py", "max_issues_repo_name": "h-vetinari/cvxpy", "max_issues_repo_head_hexsha": "86307f271819bb78fcdf64a9c3a424773e8269fa", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2015-09-16T16:33:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-23T07:31:44.000Z", "max_forks_repo_path": "cvxpy/cvxcore/tests/python/364A_scripts/lsq_permute.py", "max_forks_repo_name": "h-vetinari/cvxpy", "max_forks_repo_head_hexsha": "86307f271819bb78fcdf64a9c3a424773e8269fa", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2015-09-16T14:56:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T05:06:03.000Z", "avg_line_length": 18.9450549451, "max_line_length": 59, "alphanum_fraction": 0.6786542923, "include": true, "reason": "import numpy,from cvxpy", "num_tokens": 538}
|
import matplotlib
import numpy as np
from PIL import Image
from scipy.signal import convolve2d
matplotlib.use('agg')
if __name__ == '__main__':
file = 'images/histeq.png'
img = Image.open(file)
img = img.convert('L')
# im = np.array(img, dtype=np.float64)
# im = im[:, 15:615]
#
# kernel = np.ones([9, 9]) / 81
# im = convolve2d(im, kernel, mode='same').astype(np.uint8)
# img = Image.fromarray(im)
img.save('results/histeq.jpg')
|
{"hexsha": "173fe00beb5e2583680fcbfb5f409cf24f90e070", "size": 475, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "Patrick22414/cw-computer-vision", "max_stars_repo_head_hexsha": "899ed5ee6346ebd1b2b52ea2b9f618d90596a458", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "Patrick22414/cw-computer-vision", "max_issues_repo_head_hexsha": "899ed5ee6346ebd1b2b52ea2b9f618d90596a458", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "Patrick22414/cw-computer-vision", "max_forks_repo_head_hexsha": "899ed5ee6346ebd1b2b52ea2b9f618d90596a458", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5909090909, "max_line_length": 63, "alphanum_fraction": 0.6252631579, "include": true, "reason": "import numpy,from scipy", "num_tokens": 142}
|
#
# Tests for the Processed Variable class
#
import pybamm
import casadi
import numpy as np
import unittest
import tests
class TestProcessedSymbolicVariable(unittest.TestCase):
def test_processed_variable_0D(self):
# without inputs
y = pybamm.StateVector(slice(0, 1))
var = 2 * y
var.mesh = None
t_sol = np.linspace(0, 1)
y_sol = np.array([np.linspace(0, 5)])
solution = pybamm.Solution(t_sol, y_sol)
processed_var = pybamm.ProcessedSymbolicVariable(var, solution)
np.testing.assert_array_equal(processed_var.value(), 2 * y_sol)
# No sensitivity as variable is not symbolic
with self.assertRaisesRegex(ValueError, "Variable is not symbolic"):
processed_var.sensitivity()
def test_processed_variable_0D_with_inputs(self):
# with symbolic inputs
y = pybamm.StateVector(slice(0, 1))
p = pybamm.InputParameter("p")
q = pybamm.InputParameter("q")
var = p * y + q
var.mesh = None
t_sol = np.linspace(0, 1)
y_sol = np.array([np.linspace(0, 5)])
solution = pybamm.Solution(t_sol, y_sol)
solution.inputs = {"p": casadi.MX.sym("p"), "q": casadi.MX.sym("q")}
processed_var = pybamm.ProcessedSymbolicVariable(var, solution)
np.testing.assert_array_equal(
processed_var.value({"p": 3, "q": 4}).full(), 3 * y_sol + 4
)
np.testing.assert_array_equal(
processed_var.sensitivity({"p": 3, "q": 4}).full(),
np.c_[y_sol.T, np.ones_like(y_sol).T],
)
# via value_and_sensitivity
val, sens = processed_var.value_and_sensitivity({"p": 3, "q": 4})
np.testing.assert_array_equal(val.full(), 3 * y_sol + 4)
np.testing.assert_array_equal(
sens.full(), np.c_[y_sol.T, np.ones_like(y_sol).T]
)
# Test bad inputs
with self.assertRaisesRegex(TypeError, "inputs should be 'dict'"):
processed_var.value(1)
with self.assertRaisesRegex(KeyError, "Inconsistent input keys"):
processed_var.value({"not p": 3})
def test_processed_variable_0D_some_inputs(self):
# with some symbolic inputs and some non-symbolic inputs
y = pybamm.StateVector(slice(0, 1))
p = pybamm.InputParameter("p")
q = pybamm.InputParameter("q")
var = p * y - q
var.mesh = None
t_sol = np.linspace(0, 1)
y_sol = np.array([np.linspace(0, 5)])
solution = pybamm.Solution(t_sol, y_sol)
solution.inputs = {"p": casadi.MX.sym("p"), "q": 2}
processed_var = pybamm.ProcessedSymbolicVariable(var, solution)
np.testing.assert_array_equal(
processed_var.value({"p": 3}).full(), 3 * y_sol - 2
)
np.testing.assert_array_equal(
processed_var.sensitivity({"p": 3}).full(), y_sol.T
)
def test_processed_variable_1D(self):
var = pybamm.Variable("var", domain=["negative electrode", "separator"])
x = pybamm.SpatialVariable("x", domain=["negative electrode", "separator"])
eqn = var + x
# On nodes
disc = tests.get_discretisation_for_testing()
disc.set_variable_slices([var])
x_sol = disc.process_symbol(x).entries[:, 0]
eqn_sol = disc.process_symbol(eqn)
# With scalar t_sol
t_sol = [0]
y_sol = np.ones_like(x_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
processed_eqn = pybamm.ProcessedSymbolicVariable(eqn_sol, sol)
np.testing.assert_array_equal(
processed_eqn.value(), y_sol + x_sol[:, np.newaxis]
)
# With vector t_sol
t_sol = np.linspace(0, 1)
y_sol = np.ones_like(x_sol)[:, np.newaxis] * np.linspace(0, 5)
sol = pybamm.Solution(t_sol, y_sol)
processed_eqn = pybamm.ProcessedSymbolicVariable(eqn_sol, sol)
np.testing.assert_array_equal(
processed_eqn.value(), (y_sol + x_sol[:, np.newaxis]).T.reshape(-1, 1)
)
def test_processed_variable_1D_with_scalar_inputs(self):
var = pybamm.Variable("var", domain=["negative electrode", "separator"])
x = pybamm.SpatialVariable("x", domain=["negative electrode", "separator"])
p = pybamm.InputParameter("p")
q = pybamm.InputParameter("q")
eqn = var * p + 2 * q
# On nodes
disc = tests.get_discretisation_for_testing()
disc.set_variable_slices([var])
x_sol = disc.process_symbol(x).entries[:, 0]
eqn_sol = disc.process_symbol(eqn)
# Scalar t
t_sol = [0]
y_sol = np.ones_like(x_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
sol.inputs = {"p": casadi.MX.sym("p"), "q": casadi.MX.sym("q")}
processed_eqn = pybamm.ProcessedSymbolicVariable(eqn_sol, sol)
# Test values
np.testing.assert_array_equal(
processed_eqn.value({"p": 27, "q": -42}), 27 * y_sol - 84
)
# Test sensitivities
np.testing.assert_array_equal(
processed_eqn.sensitivity({"p": 27, "q": -84}),
np.c_[y_sol, 2 * np.ones_like(y_sol)],
)
################################################################################
# Vector t
t_sol = np.linspace(0, 1)
y_sol = np.ones_like(x_sol)[:, np.newaxis] * np.linspace(0, 5)
sol = pybamm.Solution(t_sol, y_sol)
sol.inputs = {"p": casadi.MX.sym("p"), "q": casadi.MX.sym("q")}
processed_eqn = pybamm.ProcessedSymbolicVariable(eqn_sol, sol)
# Test values
np.testing.assert_array_equal(
processed_eqn.value({"p": 27, "q": -42}), (27 * y_sol - 84).T.reshape(-1, 1)
)
# Test sensitivities
np.testing.assert_array_equal(
processed_eqn.sensitivity({"p": 27, "q": -42}),
np.c_[y_sol.T.flatten(), 2 * np.ones_like(y_sol.T.flatten())],
)
def test_processed_variable_1D_with_vector_inputs(self):
var = pybamm.Variable("var", domain=["negative electrode", "separator"])
x = pybamm.SpatialVariable("x", domain=["negative electrode", "separator"])
p = pybamm.InputParameter("p", domain=["negative electrode", "separator"])
p.set_expected_size(65)
q = pybamm.InputParameter("q")
eqn = (var * p) ** 2 + 2 * q
# On nodes
disc = tests.get_discretisation_for_testing()
disc.set_variable_slices([var])
x_sol = disc.process_symbol(x).entries[:, 0]
n = x_sol.size
eqn_sol = disc.process_symbol(eqn)
# Scalar t
t_sol = [0]
y_sol = np.ones_like(x_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
sol.inputs = {"p": casadi.MX.sym("p", n), "q": casadi.MX.sym("q")}
processed_eqn = pybamm.ProcessedSymbolicVariable(eqn_sol, sol)
# Test values - constant p
np.testing.assert_array_equal(
processed_eqn.value({"p": 27 * np.ones(n), "q": -42}),
(27 * y_sol) ** 2 - 84,
)
# Test values - varying p
p = np.linspace(0, 1, n)
np.testing.assert_array_equal(
processed_eqn.value({"p": p, "q": 3}), (p[:, np.newaxis] * y_sol) ** 2 + 6
)
# Test sensitivities - constant p
np.testing.assert_array_equal(
processed_eqn.sensitivity({"p": 2 * np.ones(n), "q": -84}),
np.c_[100 * np.eye(y_sol.size), 2 * np.ones(n)],
)
# Test sensitivities - varying p
# d/dy((py)**2) = (2*p*y) * y
np.testing.assert_array_equal(
processed_eqn.sensitivity({"p": p, "q": -84}),
np.c_[
np.diag((2 * p[:, np.newaxis] * y_sol ** 2).flatten()), 2 * np.ones(n)
],
)
# Bad shape
with self.assertRaisesRegex(
ValueError, "Wrong shape for input 'p': expected 65, actual 5"
):
processed_eqn.value({"p": casadi.MX.sym("p", 5), "q": 1})
def test_1D_different_domains(self):
# Negative electrode domain
var = pybamm.Variable("var", domain=["negative electrode"])
x = pybamm.SpatialVariable("x", domain=["negative electrode"])
disc = tests.get_discretisation_for_testing()
disc.set_variable_slices([var])
x_sol = disc.process_symbol(x).entries[:, 0]
var_sol = disc.process_symbol(var)
t_sol = [0]
y_sol = np.ones_like(x_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
pybamm.ProcessedSymbolicVariable(var_sol, sol)
# Particle domain
var = pybamm.Variable("var", domain=["negative particle"])
r = pybamm.SpatialVariable("r", domain=["negative particle"])
disc = tests.get_discretisation_for_testing()
disc.set_variable_slices([var])
r_sol = disc.process_symbol(r).entries[:, 0]
var_sol = disc.process_symbol(var)
t_sol = [0]
y_sol = np.ones_like(r_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
pybamm.ProcessedSymbolicVariable(var_sol, sol)
# Current collector domain
var = pybamm.Variable("var", domain=["current collector"])
z = pybamm.SpatialVariable("z", domain=["current collector"])
disc = tests.get_1p1d_discretisation_for_testing()
disc.set_variable_slices([var])
z_sol = disc.process_symbol(z).entries[:, 0]
var_sol = disc.process_symbol(var)
t_sol = [0]
y_sol = np.ones_like(z_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
pybamm.ProcessedSymbolicVariable(var_sol, sol)
# Other domain
var = pybamm.Variable("var", domain=["line"])
x = pybamm.SpatialVariable("x", domain=["line"])
geometry = pybamm.Geometry(
{"line": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(1)}}}
)
submesh_types = {"line": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh)}
var_pts = {x: 10}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
disc = pybamm.Discretisation(mesh, {"line": pybamm.FiniteVolume()})
disc.set_variable_slices([var])
x_sol = disc.process_symbol(x).entries[:, 0]
var_sol = disc.process_symbol(var)
t_sol = [0]
y_sol = np.ones_like(x_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
pybamm.ProcessedSymbolicVariable(var_sol, sol)
# 2D fails
var = pybamm.Variable(
"var",
domain=["negative particle"],
auxiliary_domains={"secondary": "negative electrode"},
)
r = pybamm.SpatialVariable(
"r",
domain=["negative particle"],
auxiliary_domains={"secondary": "negative electrode"},
)
disc = tests.get_p2d_discretisation_for_testing()
disc.set_variable_slices([var])
r_sol = disc.process_symbol(r).entries[:, 0]
var_sol = disc.process_symbol(var)
t_sol = [0]
y_sol = np.ones_like(r_sol)[:, np.newaxis] * 5
sol = pybamm.Solution(t_sol, y_sol)
with self.assertRaisesRegex(NotImplementedError, "Shape not recognized"):
pybamm.ProcessedSymbolicVariable(var_sol, sol)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
|
{"hexsha": "1346de88a295cdea615aeb6e2a78597652d66ab4", "size": 11566, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/test_solvers/test_processed_symbolic_variable.py", "max_stars_repo_name": "DrSOKane/PyBaMM", "max_stars_repo_head_hexsha": "903b4a05ef5a4f91633e990d4aec12c53df723a2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/unit/test_solvers/test_processed_symbolic_variable.py", "max_issues_repo_name": "DrSOKane/PyBaMM", "max_issues_repo_head_hexsha": "903b4a05ef5a4f91633e990d4aec12c53df723a2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/unit/test_solvers/test_processed_symbolic_variable.py", "max_forks_repo_name": "DrSOKane/PyBaMM", "max_forks_repo_head_hexsha": "903b4a05ef5a4f91633e990d4aec12c53df723a2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0705128205, "max_line_length": 88, "alphanum_fraction": 0.5860280131, "include": true, "reason": "import numpy", "num_tokens": 3028}
|
Lutheran Episcopal Christian Fellowship (LECF) is a campus student Religious and Spiritual Organizations organization of Christians and seekers of the Lutheran, Episcopal (Anglican), and other traditions as well as seekers. LECF meets at The Belfry.
LECF is a progressive Christian group, in the liberal Christian tradition. They believe the Bible is the inspired word of God, and is the story of Gods redeeming love for the world. They focus on the grace and forgiveness of God in Christ. They are a diverse group which does not require uniformity in beliefs and which honors each persons spiritual journey. They are open and affirming of LGBTIQ persons and their ministries. They believe in personal transformation, doing justice, loving God and neighbor as oneself, and living abundantly in Christ.
Events
There is a weekly worship, dinner, and discussion on Wednesday nights at 7 PM (during academic terms) at The Belfry for students, faculty, staff and alumni. Worship is held every Wednesday night at 7 pm, and is immediately followed by dinner and discussion. Worship is held in the Ranstrom Chapel and is traditional Lutheran/Episcopal liturgy with readings from Scripture, a brief homily, prayers, and Holy Communion, lasting about half an hour. All are welcome at worship! A lively discussion of topics of faith and life, current events, or spirituality follows dinner.
There are also fellowship, service, learning, and retreat activities with Lutheran Episcopal Campus Ministry at The Belfry. See their affiliated website at http://www.thebelfry.org for more details or come by and pick up a few brochures.
They also organize a free lecture series called the http://www.thebelfry.org/staugustine.html St. Augustine Chair once a year, usually in the Spring term. Topics of mutual interest to the academic and faith communities are addressed by an engaging speaker.
|
{"hexsha": "ddba611017071a5004258b2d43c146f6ddf5a15f", "size": 1887, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Lutheran_Episcopal_Christian_Fellowship.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Lutheran_Episcopal_Christian_Fellowship.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Lutheran_Episcopal_Christian_Fellowship.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 134.7857142857, "max_line_length": 571, "alphanum_fraction": 0.808691044, "num_tokens": 389}
|
// Copyright Carl Philipp Reh 2009 - 2016.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <fcppt/algorithm/fold.hpp>
#include <fcppt/preprocessor/disable_gcc_warning.hpp>
#include <fcppt/preprocessor/pop_warning.hpp>
#include <fcppt/preprocessor/push_warning.hpp>
#include <fcppt/config/external_begin.hpp>
#include <boost/test/unit_test.hpp>
#include <functional>
#include <vector>
#include <fcppt/config/external_end.hpp>
FCPPT_PP_PUSH_WARNING
FCPPT_PP_DISABLE_GCC_WARNING(-Weffc++)
BOOST_AUTO_TEST_CASE(
algorithm_fold
)
{
FCPPT_PP_POP_WARNING
typedef
std::vector<
int
>
int_vector;
typedef
std::vector<
int_vector
>
int_vector_vector;
int_vector_vector const vectors{
int_vector{
1,
2
},
int_vector{
3,
4
}
};
int const sum(
fcppt::algorithm::fold(
vectors,
0,
[](
int_vector const &_vec,
int const _sum
)
{
return
fcppt::algorithm::fold(
_vec,
_sum,
std::plus<
int
>()
);
}
)
);
BOOST_CHECK_EQUAL(
sum,
10
);
}
|
{"hexsha": "cb00738e8a849f4c2af3868b77d1ee02f40e8870", "size": 1183, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/algorithm/fold.cpp", "max_stars_repo_name": "vinzenz/fcppt", "max_stars_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/algorithm/fold.cpp", "max_issues_repo_name": "vinzenz/fcppt", "max_issues_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/algorithm/fold.cpp", "max_forks_repo_name": "vinzenz/fcppt", "max_forks_repo_head_hexsha": "3f8cc5babdee178a9bbd06ca3ce7ad405d19aa6a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.5657894737, "max_line_length": 61, "alphanum_fraction": 0.6686390533, "num_tokens": 350}
|
import argparse
import numpy as np
import json
import torch
from torchvision import datasets, transforms, models
from utility import process_image
from model import load_checkpoint
parser = argparse.ArgumentParser(description='Predict the top K most likely flower classes based on image path and saved checkpoint')
# Create 3 command line arguments as mentioned above using add_argument() from ArguementParser method
parser.add_argument('--image_dir', type=str, default='./flowers/test/74/image_01191.jpg',
help='set path of the flower image (default=./flowers/test/74/image_01191.jpg)')
parser.add_argument('--model_input', type=str, default='checkpoint.pth',
help='set path of the model checkpoint (default=checkpoint.pth)')
parser.add_argument('--top_k', type=int, default=5,
help='set number of top K most likely classes (default=5)')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='set file for mapping of flower categories to category names (default=cat_to_name.json)')
parser.add_argument('--device', type=str, default='cuda', choices=['cuda', 'cpu'],
help='set device mode cuda or cpu (default=cuda)')
results = parser.parse_args()
image_dir = results.image_dir
model_input = results.model_input
top_k = results.top_k
if results.device == 'cuda' and torch.cuda.is_available():
device = torch.device('cuda')
print('CUDA GPU mode is enabled now')
elif results.device == 'cpu':
device = torch.device('cpu')
print('CPU mode is enabled now')
else:
print('CUDA GPU is not supported on this system so switching to CPU mode')
device = torch.device('cpu')
category_names = results.category_names
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
model, optimizer = load_checkpoint(model_input)
for param in model.parameters():
param.requires_grad = False
# Class Prediction (The predict function successfully takes the path to an image and a checkpoint,
# then returns the top K most probably classes for that image)
# Predicting with GPU: The function allows users to use the GPU or CPU to calculate the predictions
def predict(image_dir, model, topk, device):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO COMPLETED: Implement the code to predict the class from an image file
model.to(device)
model.eval()
image_tensor = process_image(image_dir)
image_tensor = image_tensor.to(device)
with torch.no_grad():
output = model.forward(image_tensor)
ps = torch.exp(output)
# Top K classes: the function predicts the top K classes along with associated probabilities
probs_topk = np.array(ps.topk(topk)[0])[0]
class_idx = np.array(ps.topk(topk)[1])[0]
# Invert the model.class_to_idx dict to obtain mapping
idx_to_class = dict((value, key) for key, value in model.class_to_idx.items())
class_topk = []
for idx in class_idx:
class_topk.append(idx_to_class[idx])
return image_tensor, probs_topk, class_topk
# Predicting classes: The predict function successfully reads in an image and a checkpoint
# then returns the most likely image class index and it's associated probability
# Run prediction function
image_tensor, probs_topk, class_topk = predict(image_dir, model, top_k, device)
predicted_flower_names = [cat_to_name[idx] for idx in class_topk]
print("{} most likely predicted flower names = {}".format(top_k, predicted_flower_names))
print("{} most likely predicted probability = {}".format(top_k, probs_topk))
print("The model predicted flower name as {} with {:.3f}% probability".format(predicted_flower_names[0], 100 * probs_topk[0]))
|
{"hexsha": "cb74915b4b17b4b56c49503c598701f95fbc9473", "size": 3797, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict.py", "max_stars_repo_name": "cynthia3r/flower_image_classifier", "max_stars_repo_head_hexsha": "bac89f88410642d164030fd60436597ba5270f20", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-10T23:27:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-10T23:27:55.000Z", "max_issues_repo_path": "predict.py", "max_issues_repo_name": "cynthia3r/flower_image_classifier", "max_issues_repo_head_hexsha": "bac89f88410642d164030fd60436597ba5270f20", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict.py", "max_forks_repo_name": "cynthia3r/flower_image_classifier", "max_forks_repo_head_hexsha": "bac89f88410642d164030fd60436597ba5270f20", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9684210526, "max_line_length": 133, "alphanum_fraction": 0.7234658941, "include": true, "reason": "import numpy", "num_tokens": 836}
|
import os
import cv2
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import skimage
import glob
ROOT_DIR = os.getcwd()
sys.path.append(ROOT_DIR)
from Mask_RCNN.mrcnn import utils
from Mask_RCNN.mrcnn import visualize
from Mask_RCNN.mrcnn.visualize import display_images
import Mask_RCNN.mrcnn.model as modellib
from Mask_RCNN.mrcnn.model import log
from train import TrainConfig
from train import TumorDataset
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
print(os.getcwd())
custom_WEIGHTS_PATH = "Mask_RCNN/logs/tumor_detect20211207T1827/mask_rcnn_tumor_detect_0100.h5"
class InferenceConfig(TrainConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def get_ax(rows=1, cols=1, size=7):
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
inference_config = InferenceConfig()
DATASET_DIR = './brain-tumor-segmentation/brain_tumor_data/'
dataset_val = TumorDataset()
dataset_val.load_brain_tumor_images(DATASET_DIR, 'val')
dataset_val.prepare()
with tf.device("/cpu:0"):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=inference_config)
print("Loading weights ", custom_WEIGHTS_PATH)
model.load_weights(custom_WEIGHTS_PATH, by_name=True)
from importlib import reload
reload(visualize)
image_id = 3
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config, image_id, use_mini_mask=False)
info = dataset_val.image_info[image_id]
print("image ID: {}.{} ({}) {}".format(info["source"], info["id"], image_id,
dataset_val.image_reference(image_id)))
# Run object detection
results = model.detect([image], verbose=1)
r = results[0]
print(r)
visualize.display_differences(
image,
gt_bbox, gt_class_id, gt_mask,
r['rois'], r['class_ids'], r['scores'], r['masks'],
class_names=['tumor'], title="", ax=get_ax(),
show_mask=True, show_box=True)
plt.show()
|
{"hexsha": "f75314bb4952672c082c83819e9de3a44c8ad901", "size": 2097, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference_2.py", "max_stars_repo_name": "mosvlad/tumor_mask_rcnn", "max_stars_repo_head_hexsha": "16d6b20431553e6e1cf1594686a1f503171d5f8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference_2.py", "max_issues_repo_name": "mosvlad/tumor_mask_rcnn", "max_issues_repo_head_hexsha": "16d6b20431553e6e1cf1594686a1f503171d5f8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference_2.py", "max_forks_repo_name": "mosvlad/tumor_mask_rcnn", "max_forks_repo_head_hexsha": "16d6b20431553e6e1cf1594686a1f503171d5f8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8846153846, "max_line_length": 95, "alphanum_fraction": 0.7415355269, "include": true, "reason": "import numpy", "num_tokens": 537}
|
import os
import pickle
import numpy as np
import matplotlib.pylab as plt
import powerlaw as pl
def plot_avalanches(aval_times, aval_sizes):
"""Plot avalanche events distrubutions
Includes plots and power-law fits for duration, size, and average size
"""
# figure main parameters
FIG_SIZE = (6, 5)
FONT_SIZE = 12
# load variables
aval_times = np.array(aval_times)
aval_sizes = np.array(aval_sizes)
# fit power-laws
# xmax should be estimated based on the plot
size = pl.Fit(aval_sizes, discrete=True, xmin=1, xmax=1000)
time = pl.Fit(aval_times, discrete=True, xmin=1, xmax=100)
unique_t = np.unique(aval_times)
mean_s = np.zeros_like(unique_t)
for t, dur in enumerate(unique_t):
mean_s[t] = aval_sizes[np.where(aval_times == dur)[0]].mean()
fig_dur = plt.figure(figsize=FIG_SIZE)
x_range = np.arange(1, aval_times.max())
plt.plot(x_range, x_range**(-time.alpha), 'r',
label=r'$\alpha = %.2f$' % time.alpha)
t_unique, t_counts = np.unique(aval_times, return_counts=True)
plt.plot(t_unique, t_counts / float(t_counts.sum()), '.', color='gray',
label=r'data')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$t$', fontsize=FONT_SIZE)
plt.ylabel(r'$f(t)$', fontsize=FONT_SIZE)
plt.title(r'Avalanche times', fontsize=FONT_SIZE)
plt.legend(loc='best', frameon=False, fontsize=FONT_SIZE)
fig_size = plt.figure(figsize=FIG_SIZE)
x_range = np.arange(1, aval_sizes.max())
plt.plot(x_range, x_range**(-size.alpha), 'r',
label=r'$\alpha = %.2f$' % size.alpha)
s_unique, s_counts = np.unique(aval_sizes, return_counts=True)
plt.plot(s_unique, s_counts / float(s_counts.sum()), '.', color='gray',
label=r'data')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$s$', fontsize=FONT_SIZE)
plt.ylabel(r'$f(s)$', fontsize=FONT_SIZE)
plt.title(r'Avalanche sizes', fontsize=FONT_SIZE)
plt.legend(loc='best', frameon=False, fontsize=FONT_SIZE)
fig_scale = plt.figure(figsize=FIG_SIZE)
x_range = np.arange(1, unique_t.max())
plt.plot(unique_t, mean_s, '.', color='gray',
label = r'sim. data')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'$t$', fontsize=FONT_SIZE)
plt.ylabel(r'$\langle s \rangle$', fontsize=FONT_SIZE)
plt.title(r'Average avalanche size', fontsize=FONT_SIZE)
plt.legend(loc='best', frameon=False, fontsize=FONT_SIZE)
# save figures
if not os.path.exists('../plots'):
os.makedirs('../plots')
fig_dur.savefig('../plots/avalanche_duration.pdf', dpi=200)
fig_size.savefig('../plots/avalanche_size.pdf', dpi=200)
fig_scale.savefig('../plots/avalanche_scaling.pdf', dpi=200)
def plot_pile(lattice):
"""Plot the final shape of the pile of sand."""
fig = plt.figure(figsize=(5, 5))
plt.imshow(lattice, interpolation='none', cmap='magma')
plt.xticks([], [])
plt.yticks([], [])
if not os.path.exists('../plots'):
os.makedirs('../plots')
fig.savefig('../plots/sandpile.pdf', dpi=200)
if __name__ == "__main__":
EXP_NAME='test/ASM_1'
for n_sim in range(1, 1000):
results_dir = '../results/'+EXP_NAME+'/'
if os.path.exists(results_dir):
break
aval_times = pickle.load(open(results_dir+'avalanche_times.p', 'rb'))
aval_sizes = pickle.load(open(results_dir+'avalanche_sizes.p', 'rb'))
plot_avalanches(aval_times, aval_sizes)
|
{"hexsha": "832a767ed9908d3aa3e7738ce6c9e25bdd6f400f", "size": 3505, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plot_avalanches.py", "max_stars_repo_name": "delpapa/sandpilemodel", "max_stars_repo_head_hexsha": "6d176ff2e711f33668a33ea1947d71a69393871e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/plot_avalanches.py", "max_issues_repo_name": "delpapa/sandpilemodel", "max_issues_repo_head_hexsha": "6d176ff2e711f33668a33ea1947d71a69393871e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_avalanches.py", "max_forks_repo_name": "delpapa/sandpilemodel", "max_forks_repo_head_hexsha": "6d176ff2e711f33668a33ea1947d71a69393871e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-09T21:13:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T16:53:54.000Z", "avg_line_length": 33.380952381, "max_line_length": 75, "alphanum_fraction": 0.6470756063, "include": true, "reason": "import numpy", "num_tokens": 934}
|
from os import name
from numpy import load
from .bed import load_from_bed, recode_zarr
import pandas
import numpy
from .load import BinaryICDLoader
import zarr
import pytest
@pytest.mark.skip(reason="Requires gwas results")
def test_load_from_bed():
bfile_prefix = '/media/data1/ag3r/ukb/runs/all/split/train'
fam = f'{bfile_prefix}/plink.fam'
bed = f'{bfile_prefix}/plink.bed'
names = ['fid', 'iid', '2', '3', '4', '5']
samples_frame = pandas.read_csv(fam, names=names, header=None, sep='\s+')
samples = samples_frame.iloc[:50, 0].tolist()
gwas_path = '/media/data1/ag3r/ukb/runs/gwas/diabetes_e119/plink2.PHENO1.glm.logistic.hybrid'
data = load_from_bed(bed, samples, gwas_path, limit=100)
assert data.shape == (50, 100)
unique = numpy.unique(data)
assert all(unique == numpy.array([-127, 0, 1, 2]))
@pytest.mark.skip(reason="Requires old zarr recoding")
def test_recode_zarr():
# val_zarr = '/media/data1/ag3r/ukb/runs/all/split/val/zarr'
test_zarr_path = '/media/data1/ag3r/test/recode_zarr_old'
group = zarr.open_group(test_zarr_path, mode='w')
group.create_dataset('samples', data=numpy.array([1, 2]))
group.create_dataset('positions', data=numpy.arange(15))
data = numpy.random.randint(0, 4, size=(2, 15), dtype=numpy.uint8)
group.create_dataset('data', data=data, dtype=numpy.uint8)
new_zarr_path = '/media/data1/ag3r/test/recode_zarr_new'
recode_zarr(test_zarr_path, new_zarr_path)
new_group = zarr.open_group(new_zarr_path, mode='r')
print(new_group.tree())
print(group.tree())
new_data = new_group['data'][:]
new_data = numpy.unpackbits(new_data, axis=1)
new_data = new_data[:, ::2]*2 + new_data[:, 1::2]
assert (data == new_data[:, :15]).all()
@pytest.mark.skip(reason="Requires old zarr recoding")
def test_recoded_zarr_benchmark():
test_zarr_path = '/media/data1/ag3r/test/recode_zarr_old'
group = zarr.open_group(test_zarr_path, mode='w')
samples_count = 1024*10
variants_count = 1024*32
group.create_dataset('samples', data=numpy.arange(samples_count))
group.create_dataset('positions', data=numpy.arange(variants_count))
data = numpy.random.randint(0, 4, size=(samples_count, variants_count), dtype=numpy.uint8)
group.create_dataset('data', data=data, dtype=numpy.uint8)
new_zarr_path = '/media/data1/ag3r/test/recode_zarr_new'
recode_zarr(test_zarr_path, new_zarr_path)
new_group = zarr.open_group(new_zarr_path, mode='r')
print(new_group.tree())
print(group.tree())
new_data = new_group['data'][:]
new_data = numpy.unpackbits(new_data, axis=1)
new_data = new_data[:, ::2]*2 + new_data[:, 1::2]
assert (data == new_data[:, :15]).all()
|
{"hexsha": "0058f176d1a2b9559e85109c66008974a7dfccb3", "size": 2746, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ukb_loader/bed_test.py", "max_stars_repo_name": "alex-medvedev-msc/ukb_loader", "max_stars_repo_head_hexsha": "4940f3859eb1cc167bd768def97ce8d55c14892b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ukb_loader/bed_test.py", "max_issues_repo_name": "alex-medvedev-msc/ukb_loader", "max_issues_repo_head_hexsha": "4940f3859eb1cc167bd768def97ce8d55c14892b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ukb_loader/bed_test.py", "max_forks_repo_name": "alex-medvedev-msc/ukb_loader", "max_forks_repo_head_hexsha": "4940f3859eb1cc167bd768def97ce8d55c14892b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1388888889, "max_line_length": 97, "alphanum_fraction": 0.6984705025, "include": true, "reason": "import numpy,from numpy", "num_tokens": 804}
|
module da_minimisation
!---------------------------------------------------------------------------
! Purpose: Collection of routines associated with minimisation.
!---------------------------------------------------------------------------
use module_configure, only : grid_config_rec_type
use module_dm, only : wrf_dm_sum_real, wrf_dm_sum_integer
#ifdef DM_PARALLEL
use module_dm, only : local_communicator, mytask, ntasks, ntasks_x, &
ntasks_y, data_order_xy, data_order_xyz
use module_comm_dm, only : halo_wpec_sub, halo_wpec_adj_sub
#endif
use module_domain, only : domain, ep_type, vp_type, x_type, domain_clockprint, &
domain_clockadvance, domain_clock_get, domain_clock_set
use module_state_description, only : dyn_em,dyn_em_tl,dyn_em_ad,p_g_qv, &
p_g_qc, p_g_qr, num_moist, PARAM_FIRST_SCALAR
!#ifdef DM_PARALLEL
! use mpi, only : mpi_barrier
!#endif
use da_airep, only : da_calculate_grady_airep, da_ao_stats_airep, &
da_oi_stats_airep, da_get_innov_vector_airep, da_residual_airep, &
da_jo_and_grady_airep
use da_airsr , only : da_calculate_grady_airsr, da_ao_stats_airsr, &
da_oi_stats_airsr, da_get_innov_vector_airsr, da_residual_airsr, &
da_jo_and_grady_airsr
use da_bogus, only : da_calculate_grady_bogus, da_ao_stats_bogus, &
da_oi_stats_bogus, da_get_innov_vector_bogus, da_residual_bogus, &
da_jo_and_grady_bogus
use da_buoy , only : da_calculate_grady_buoy, da_ao_stats_buoy, &
da_oi_stats_buoy,da_get_innov_vector_buoy, da_residual_buoy, &
da_jo_and_grady_buoy
use da_control, only : trace_use, var4d_bin, trajectory_io, analysis_date, &
var4d, rootproc,jcdfi_use,jcdfi_diag,ierr,comm,num_fgat_time, &
var4d_lbc, stdout, eps, stats_unit, test_dm_exact, global, multi_inc, &
calculate_cg_cost_fn,anal_type_randomcv,cv_size_domain,je_factor, &
jb_factor,ntmax,omb_add_noise,write_iv_rad_ascii,use_obs_errfac, &
rtm_option,rtm_option_rttov, rtm_option_crtm, anal_type_verify, &
write_filtered_rad,omb_set_rand,use_rad,var_scaling2,var_scaling1, &
var_scaling4,var_scaling5,var_scaling3, jo_unit, test_gradient, &
print_detail_grad,omb_set_rand,grad_unit,cost_unit, num_pseudo, cv_options, &
cv_size_domain_je,cv_size_domain_jb, cv_size_domain_jp, cv_size_domain_js, cv_size_domain_jl, cv_size_domain_jt, &
sound, mtgirs, sonde_sfc, synop, profiler, gpsref, gpseph, gpspw, polaramv, geoamv, ships, metar, &
satem, radar, ssmi_rv, ssmi_tb, ssmt1, ssmt2, airsr, pilot, airep,tamdar, tamdar_sfc, rain, &
bogus, buoy, qscat,pseudo, radiance, monitor_on, max_ext_its, use_rttov_kmatrix,&
use_crtm_kmatrix,precondition_cg, precondition_factor, use_varbc, varbc_factor, &
biasprep, qc_rad, num_procs, myproc, use_gpspwobs, use_rainobs, use_gpsztdobs, &
use_radar_rf, radar_rf_opt,radar_rf_rscl,radar_rv_rscl,use_radar_rhv,use_radar_rqv,pseudo_var, num_pseudo, &
num_ob_indexes, num_ob_vars, npres_print, pptop, ppbot, qcstat_conv_unit, gas_constant, &
orthonorm_gradient, its, ite, jts, jte, kts, kte, ids, ide, jds, jde, kds, kde, cp, &
use_satcv, sensitivity_option, print_detail_outerloop, adj_sens, filename_len, &
ims, ime, jms, jme, kms, kme, ips, ipe, jps, jpe, kps, kpe, fgat_rain_flags, var4d_bin_rain, freeze_varbc, &
use_wpec, wpec_factor, use_4denvar, anal_type_hybrid_dual_res, alphacv_method, alphacv_method_xa, &
write_detail_grad_fn, pseudo_uvtpq, lanczos_ep_filename, use_divc, divc_factor, &
cloud_cv_options, use_cv_w, var_scaling6, var_scaling7, var_scaling8, var_scaling9, &
var_scaling10, var_scaling11, &
write_gts_omb_oma, write_unpert_obs, write_rej_obs_conv, pseudo_time, &
use_varbc_tamdar, varbc_tamdar_nobsmin, varbc_tamdar_unit
use da_define_structures, only : iv_type, y_type, j_type, be_type, &
xbx_type, jo_type, da_allocate_y,da_zero_x,da_zero_y,da_deallocate_y, &
da_zero_vp_type, qhat_type
use da_dynamics, only : da_wpec_constraint_lin,da_wpec_constraint_adj, &
da_divergence_constraint, da_divergence_constraint_adj
use da_obs, only : da_transform_xtoy_adj,da_transform_xtoy, &
da_add_noise_to_ob,da_random_omb_all, da_obs_sensitivity
use da_geoamv, only : da_calculate_grady_geoamv, da_ao_stats_geoamv, &
da_oi_stats_geoamv, da_get_innov_vector_geoamv,da_residual_geoamv, &
da_jo_and_grady_geoamv
use da_gpspw, only : da_calculate_grady_gpspw, da_ao_stats_gpspw, &
da_oi_stats_gpspw, da_get_innov_vector_gpspw, da_residual_gpspw, &
da_jo_and_grady_gpspw, da_get_innov_vector_gpsztd
use da_gpsref, only : da_calculate_grady_gpsref, da_ao_stats_gpsref, &
da_oi_stats_gpsref, da_get_innov_vector_gpsref, da_residual_gpsref, &
da_jo_and_grady_gpsref
use da_gpseph, only : da_calculate_grady_gpseph, da_ao_stats_gpseph, &
da_oi_stats_gpseph, da_get_innov_vector_gpseph, da_residual_gpseph, &
da_jo_and_grady_gpseph
use da_obs_io, only : da_final_write_y, da_write_y, da_final_write_obs, &
da_write_obs,da_write_obs_etkf,da_write_noise_to_ob, da_use_obs_errfac, &
da_write_iv_for_multi_inc, da_read_iv_for_multi_inc
use da_metar, only : da_calculate_grady_metar, da_ao_stats_metar, &
da_oi_stats_metar, da_get_innov_vector_metar, da_residual_metar, &
da_jo_and_grady_metar
use da_pilot, only : da_calculate_grady_pilot, da_ao_stats_pilot, &
da_oi_stats_pilot, da_get_innov_vector_pilot, da_residual_pilot, &
da_jo_and_grady_pilot
use da_par_util, only : da_system,da_cv_to_global
use da_par_util1, only : da_proc_sum_real,da_proc_sum_ints
use da_polaramv, only : da_calculate_grady_polaramv, da_ao_stats_polaramv, &
da_oi_stats_polaramv, da_get_innov_vector_polaramv, da_residual_polaramv, &
da_jo_and_grady_polaramv
use da_profiler, only : da_calculate_grady_profiler, da_ao_stats_profiler, &
da_oi_stats_profiler,da_get_innov_vector_profiler, da_residual_profiler, &
da_jo_and_grady_profiler
use da_pseudo, only : da_calculate_grady_pseudo, da_ao_stats_pseudo, &
da_oi_stats_pseudo, da_get_innov_vector_pseudo, da_residual_pseudo, &
da_jo_and_grady_pseudo
use da_qscat, only : da_calculate_grady_qscat, da_ao_stats_qscat, &
da_oi_stats_qscat, da_get_innov_vector_qscat, da_residual_qscat, &
da_jo_and_grady_qscat
use da_mtgirs, only : da_calculate_grady_mtgirs, &
da_ao_stats_mtgirs, da_oi_stats_mtgirs,da_oi_stats_mtgirs, &
da_get_innov_vector_mtgirs, &
da_jo_and_grady_mtgirs, da_residual_mtgirs
use da_tamdar, only : da_calculate_grady_tamdar, &
da_ao_stats_tamdar, da_oi_stats_tamdar,da_oi_stats_tamdar, &
da_get_innov_vector_tamdar, &
da_jo_and_grady_tamdar, da_residual_tamdar, &
da_calculate_grady_tamdar_sfc, &
da_ao_stats_tamdar_sfc, da_oi_stats_tamdar_sfc,da_oi_stats_tamdar_sfc, &
da_get_innov_vector_tamdar_sfc, &
da_jo_and_grady_tamdar_sfc, da_residual_tamdar_sfc
use da_varbc_tamdar, only : da_varbc_tamdar_tl,da_varbc_tamdar_adj, &
da_varbc_tamdar_direct,da_varbc_tamdar_precond
#if defined(RTTOV) || defined(CRTM)
use da_radiance, only : da_calculate_grady_rad, da_write_filtered_rad, &
da_get_innov_vector_radiance, satinfo
use da_radiance1, only : da_ao_stats_rad,da_oi_stats_rad, &
da_write_iv_rad_ascii,da_residual_rad,da_jo_and_grady_rad, &
da_biasprep, da_qc_rad
#endif
use da_radar, only : da_calculate_grady_radar, da_ao_stats_radar, &
da_oi_stats_radar, da_get_innov_vector_radar, da_residual_radar, &
da_jo_and_grady_radar
use da_rain, only : da_calculate_grady_rain, da_ao_stats_rain, &
da_oi_stats_rain, da_get_innov_vector_rain, da_residual_rain, &
da_jo_and_grady_rain, da_get_hr_rain, da_transform_xtoy_rain, &
da_transform_xtoy_rain_adj
use da_reporting, only : da_message, da_warning, da_error
use da_satem, only : da_calculate_grady_satem, da_ao_stats_satem, &
da_oi_stats_satem, da_get_innov_vector_satem, da_residual_satem, &
da_jo_and_grady_satem
use da_ships, only : da_calculate_grady_ships, da_ao_stats_ships, &
da_oi_stats_ships, da_get_innov_vector_ships, da_residual_ships, &
da_jo_and_grady_ships
use da_sound, only : da_calculate_grady_sound,da_calculate_grady_sonde_sfc, &
da_ao_stats_sound, da_oi_stats_sound,da_oi_stats_sound, &
da_oi_stats_sonde_sfc,da_ao_stats_sonde_sfc,da_get_innov_vector_sound, &
da_get_innov_vector_sonde_sfc,da_jo_and_grady_sound, da_residual_sound, &
da_jo_and_grady_sound,da_jo_and_grady_sonde_sfc,da_residual_sonde_sfc
use da_ssmi, only : da_calculate_grady_ssmi_tb,da_calculate_grady_ssmi_rv,da_calculate_grady_ssmt1, &
da_calculate_grady_ssmt2, da_ao_stats_ssmi_tb ,da_ao_stats_ssmt2, &
da_ao_stats_ssmt2, da_oi_stats_ssmt1, da_oi_stats_ssmt2, &
da_oi_stats_ssmi_tb,da_oi_stats_ssmi_rv,da_ao_stats_ssmt1,da_get_innov_vector_ssmi_tb, &
da_get_innov_vector_ssmi_rv, da_residual_ssmi_rv, da_residual_ssmi_tb, &
da_get_innov_vector_ssmt1,da_get_innov_vector_ssmt2, &
da_jo_and_grady_ssmt1, da_jo_and_grady_ssmt2,da_jo_and_grady_ssmi_tb, &
da_jo_and_grady_ssmi_rv, &
da_residual_ssmt1,da_residual_ssmt2, da_ao_stats_ssmi_rv
use da_synop, only : da_calculate_grady_synop, da_ao_stats_synop, &
da_oi_stats_synop, da_get_innov_vector_synop, da_residual_synop, &
da_jo_and_grady_synop
use da_statistics, only : da_analysis_stats, da_print_qcstat
use da_tools_serial, only : da_get_unit,da_free_unit
use da_tracing, only : da_trace_entry, da_trace_exit,da_trace
use da_transfer_model, only : da_transfer_wrftltoxa,da_transfer_xatowrftl, &
da_transfer_xatowrftl_adj,da_transfer_wrftltoxa_adj
#if defined(RTTOV) || defined(CRTM)
use da_varbc, only : da_varbc_tl,da_varbc_adj,da_varbc_precond,da_varbc_coldstart, da_varbc_direct
#endif
use da_vtox_transforms, only : da_transform_vtox,da_transform_vtox_adj,da_transform_xtoxa,da_transform_xtoxa_adj
use da_vtox_transforms, only : da_copy_xa, da_add_xa, da_transform_vpatox, da_transform_vpatox_adj
use da_wrf_interfaces, only : wrf_dm_bcast_real, wrf_get_dm_communicator
use module_symbols_util, only : wrfu_finalize
use da_lapack, only : dsteqr
use da_wrfvar_io, only : da_med_initialdata_input
use da_transfer_model, only : da_transfer_wrftoxb
#ifdef VAR4D
use da_4dvar, only : da_tl_model, da_ad_model, model_grid, input_nl_xtraj, &
kj_swap_reverse, upsidedown_ad_forcing, u6_2, v6_2, w6_2, t6_2, ph6_2, p6, &
mu6_2, psfc6, moist6
use da_transfer_model, only : da_transfer_xatowrftl_lbc, da_transfer_xatowrftl_adj_lbc, &
da_transfer_wrftl_lbc_t0, da_transfer_wrftl_lbc_t0_adj, da_get_2nd_firstguess
USE module_io_wrf, only : auxinput6_only
#endif
implicit none
#ifdef DM_PARALLEL
include 'mpif.h'
#endif
private :: da_dot, da_dot_cv
contains
#include "da_calculate_j.inc"
#include "da_calculate_gradj.inc"
#include "da_jo_and_grady.inc"
#include "da_calculate_residual.inc"
#include "da_get_var_diagnostics.inc"
#include "da_get_innov_vector.inc"
#include "da_dot.inc"
#include "da_dot_cv.inc"
#include "da_write_diagnostics.inc"
#include "da_minimise_cg.inc"
#include "da_minimise_lz.inc"
#include "da_calculate_grady.inc"
#include "da_transform_vtoy.inc"
#include "da_transform_vtoy_adj.inc"
#include "da_transform_vtod_wpec.inc"
#include "da_transform_vtod_wpec_adj.inc"
#include "da_adjoint_sensitivity.inc"
#include "da_sensitivity.inc"
#include "da_amat_mul.inc"
#include "da_kmat_mul.inc"
#include "da_lanczos_io.inc"
#include "da_swap_xtraj.inc"
#include "da_read_basicstates.inc"
end module da_minimisation
|
{"hexsha": "248cc5906eff9b29ef72d112095479d1f1483b87", "size": 11855, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "var/da/da_minimisation/da_minimisation.f90", "max_stars_repo_name": "wasserblum/wrf-teb", "max_stars_repo_head_hexsha": "38f741a996868634d9b1bc6bc055f640a1b38751", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-17T21:31:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T19:18:28.000Z", "max_issues_repo_path": "var/da/da_minimisation/da_minimisation.f90", "max_issues_repo_name": "teb-model/wrf-teb", "max_issues_repo_head_hexsha": "60882e61a2a3d91f1c94cb5b542f46ffaebfad71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-21T13:43:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T16:17:26.000Z", "max_forks_repo_path": "var/da/da_minimisation/da_minimisation.f90", "max_forks_repo_name": "teb-model/wrf-teb", "max_forks_repo_head_hexsha": "60882e61a2a3d91f1c94cb5b542f46ffaebfad71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-25T08:36:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-05T09:28:21.000Z", "avg_line_length": 54.6313364055, "max_line_length": 120, "alphanum_fraction": 0.7789118515, "num_tokens": 3795}
|
#load python included modules
import tkinter as tk
from tkinter import filedialog
#load additional python modules
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
root = tk.Tk()
root.withdraw()
#parameters to load data
x_axis_name = "genotype"
y_axis_name = "aneuploidy ratio"
#ask for the datafile
file_path = filedialog.askopenfilename(
title = "Select a compiled FISH results file")
data = pd.read_excel(file_path, index_col=None)
ls_gt = list(data['genotype'].unique())
#create figure
plt.figure(figsize=(4,6))
plt.rcParams.update({'font.size': 18})
ax = plt.subplot(1,1,1)
ax.tick_params(axis='x', which='major', labelsize=10)
#plot mean line and standard deviation errorbars
sns.barplot(x=x_axis_name, y=y_axis_name,
data=data, errwidth=1, capsize=0.4,
color="white", ci="sd", linewidth=1,
edgecolor="black", order = ls_gt)
#plot individual cyst dots
sns.stripplot(x=x_axis_name, y=y_axis_name,
data=data, jitter=True, color="green",
size=4, linewidth=0, order = ls_gt)
#format layout
plt.xlabel('')
plt.xticks(rotation=90)
plt.margins(x=None, y=0.2, tight=True)
plt.tight_layout()
#show plot
plt.show()
|
{"hexsha": "f51ec755111a2b98c01218d6780949ec784022b3", "size": 1279, "ext": "py", "lang": "Python", "max_stars_repo_path": "barplot FISH results.py", "max_stars_repo_name": "BioJoe/automated-FISH-analyses", "max_stars_repo_head_hexsha": "c2859fe9ee8fc122e3651537ead3c5ccb7e270a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "barplot FISH results.py", "max_issues_repo_name": "BioJoe/automated-FISH-analyses", "max_issues_repo_head_hexsha": "c2859fe9ee8fc122e3651537ead3c5ccb7e270a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "barplot FISH results.py", "max_forks_repo_name": "BioJoe/automated-FISH-analyses", "max_forks_repo_head_hexsha": "c2859fe9ee8fc122e3651537ead3c5ccb7e270a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1020408163, "max_line_length": 54, "alphanum_fraction": 0.684910086, "include": true, "reason": "import numpy", "num_tokens": 325}
|
x = [ 1 2 3 4 5 6];
y = [ 2 6 8 7 8 5];
barh(x,y);
title('\bfExample of a Horizontal Bar Plot');
xlabel('\bf\ity');
ylabel('\bf\itx');
axis([0 10 0 7]);
|
{"author": "101Hub", "repo": "Matlab101", "sha": "07273f68f1147a110443aeb121fa10962234f298", "save_path": "github-repos/MATLAB/101Hub-Matlab101", "path": "github-repos/MATLAB/101Hub-Matlab101/Matlab101-07273f68f1147a110443aeb121fa10962234f298/assets/\u300aMatlab\u7f16\u7a0b\u300b\u6e90\u7801/chap6/barh_plot.m"}
|
using Revise, Test, ForwardDiff, Parameters, Setfield, Plots, LinearAlgebra
using BifurcationKit, Test
const BK = BifurcationKit
norminf(x) = norm(x, Inf)
####################################################################################################
function COm(u, p)
@unpack q1,q2,q3,q4,q5,q6,k = p
x, y, s = u
z = 1-x-y-s
[
2q1 * z^2 - 2q5 * x^2 - q3 * x * y,
q2 * z - q6 * y - q3 * x * y,
q4 * (z - k * s)
]
end
jet = BK.getJet(COm; matrixfree=false)
par_com = (q1 = 2.5, q2 = 2.0, q3 = 10., q4 = 0.0675, q5 = 1., q6 = 0.1, k = 0.4)
z0 = [0.001137, 0.891483, 0.062345]
opts_br = ContinuationPar(pMin = 0.5, pMax = 2.0, ds = 0.002, dsmax = 0.01, nInversion = 6, detectBifurcation = 3, maxBisectionSteps = 25, nev = 3, maxSteps = 20000)
@set! opts_br.newtonOptions.verbose = true
br, = @time continuation(jet[1], jet[2], z0, par_com, (@lens _.q2), opts_br;
recordFromSolution = (x, p) -> (x = x[1], y = x[2], s = x[3]),
plot = false, verbosity = 3, normC = norminf,
bothside = true)
show(br)
plot(br, plotfold=false, markersize=4, legend=:topright, ylims=(0,0.16))
####################################################################################################
@set! opts_br.newtonOptions.verbose = true
@set! opts_br.newtonOptions.maxIter = 10
opts_br = @set opts_br.newtonOptions.tol = 1e-12
sn, = newton(jet[1:2]..., br, 2; options = opts_br.newtonOptions, bdlinsolver = MatrixBLS())
hp, = newton(jet[1:2]..., br, 1; options = NewtonPar( opts_br.newtonOptions; maxIter = 10),startWithEigen=true, d2F = jet[3])
BK.hopfNormalForm(jet..., hp, opts_br.newtonOptions.linsolver)
hpnf = computeNormalForm(jet..., br, 1)
sn_codim2, = continuationFold(jet[1:2]..., br, 2, (@lens _.k), ContinuationPar(opts_br, pMax = 3.2, pMin = 0., detectBifurcation = 0, dsmin=1e-5, ds = -0.001, dsmax = 0.05, nInversion = 6, detectEvent = 2, detectFold = false) ; plot = true,
verbosity = 3,
normC = norminf,
updateMinAugEveryStep = 1,
startWithEigen = true,
recordFromSolution = (u,p; kw...) -> (x = u.u[1] ),
bothside=true,
bdlinsolver = MatrixBLS()
)
using Test
@test sn_codim2.specialpoint[1].printsol.k ≈ 0.971397 rtol = 1e-4
@test sn_codim2.specialpoint[1].printsol.q2 ≈ 1.417628 rtol = 1e-4
@test sn_codim2.specialpoint[3].printsol.k ≈ 0.722339 rtol = 1e-4
@test sn_codim2.specialpoint[3].printsol.q2 ≈ 1.161199 rtol = 1e-4
plot(sn_codim2)#, real.(sn_codim2.BT), ylims = (-1,1), xlims=(0,2))
plot(sn_codim2, vars=(:q2, :x), branchlabel = "Fold", plotstability = false);plot!(br,xlims=(0.8,1.8))
hp_codim2, = continuation(jet[1:2]..., br, 1, (@lens _.k), ContinuationPar(opts_br, pMin = 0., pMax = 2.8, detectBifurcation = 0, ds = -0.0001, dsmax = 0.08, dsmin = 1e-4, nInversion = 6, detectEvent = 2, detectLoop = true, maxSteps = 50, detectFold=false) ; plot = true,
verbosity = 3,
normC = norminf,
tangentAlgo = BorderedPred(),
detectCodim2Bifurcation = 2,
updateMinAugEveryStep = 1,
startWithEigen = true,
recordFromSolution = (u,p; kw...) -> (x = u.u[1] ),
d2F = jet[3], d3F = jet[4],
bothside = true,
bdlinsolver = MatrixBLS())
@test hp_codim2.branch[5].l1 |> real ≈ 33.15920 rtol = 1e-1
@test hp_codim2.specialpoint[1].printsol.k ≈ 0.305879 rtol = 1e-3
@test hp_codim2.specialpoint[1].printsol.q2 ≈ 0.924255 rtol = 1e-3
@test hp_codim2.specialpoint[2].printsol.k ≈ 0.235550 rtol = 1e-4
@test hp_codim2.specialpoint[2].printsol.q2 ≈ 0.896099 rtol = 1e-4
plot(sn_codim2, vars=(:q2, :x), branchlabel = "Fold", plotcirclesbif = true)
plot!(hp_codim2, vars=(:q2, :x), branchlabel = "Hopf",plotcirclesbif = true)
plot!(br,xlims=(0.6,1.5))
plot(sn_codim2, vars=(:k, :q2), branchlabel = "Fold")
plot!(hp_codim2, vars=(:k, :q2), branchlabel = "Hopf",)
plot(hp_codim2, vars=(:q2, :x), branchlabel = "Hopf")
####################################################################################################
|
{"hexsha": "a741aa133e516468d9daca681cafb90ff68d55e0", "size": 3868, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/COModel.jl", "max_stars_repo_name": "free-Gift-card/BifurcationKit.jl", "max_stars_repo_head_hexsha": "07938db6909fa00b10736f916750d19f92b87e22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/COModel.jl", "max_issues_repo_name": "free-Gift-card/BifurcationKit.jl", "max_issues_repo_head_hexsha": "07938db6909fa00b10736f916750d19f92b87e22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/COModel.jl", "max_forks_repo_name": "free-Gift-card/BifurcationKit.jl", "max_forks_repo_head_hexsha": "07938db6909fa00b10736f916750d19f92b87e22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-15T01:38:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T01:38:35.000Z", "avg_line_length": 42.0434782609, "max_line_length": 271, "alphanum_fraction": 0.6098759049, "num_tokens": 1492}
|
subroutine decay
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this subroutine calculates degradation of pesticide in the soil and on
!! the plants
!! ~ ~ ~ INCOMING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! decay_f(:) |none |exponential of the rate constant for
!! |degradation of the pesticide on foliage
!! decay_s(:) |none |exponential of the rate constant for
!! |degradation of the pesticide in soil
!! hru_dafr(:) |none |fraction of watershed area in HRU
!! hrupest(:) |none |pesticide use flag:
!! | 0: no pesticides used in HRU
!! | 1: pesticides used in HRU
!! ihru |none |HRU number
!! npmx |none |number of different pesticides used in
!! |the simulation
!! npno(:) |none |array of unique pesticides used in watershed
!! plt_pst(:,:) |kg/ha |pesticide on plant foliage
!! sol_nly(:) |none |number of layers in soil profile
!! sol_pst(:,:,:)|kg/ha |pesticide in soil layer
!! wshd_pstdg(:) |kg pst/ha |amount of pesticide lost through degradation
!! |in watershed
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ OUTGOING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! plt_pst(:,:) |kg/ha |pesticide on plant foliage
!! sol_pst(:,:,:)|kg/ha |pesticide in soil layer
!! wshd_pstdg(:) |kg pst/ha |amount of pesticide lost through degradation
!! |in watershed
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ LOCAL DEFINITIONS ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! j |none |HRU number
!! k |none |counter
!! kk |none |pesticide number from pest.dat
!! l |none |counter (soil layers)
!! x1 |kg/ha |amount of pesticide present at beginning of
!! |day
!! xx |kg/ha |amount of pesticide present at end of day
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ SUBROUTINES/FUNCTIONS CALLED ~ ~ ~
!! ~ ~ ~ ~ ~ ~ END SPECIFICATIONS ~ ~ ~ ~ ~ ~
use parm
integer :: j, k, kk, l
real*8 :: x1, xx
j = 0
j = ihru
if (hrupest(j) == 0) return
do k = 1, npmx
kk = 0
kk = npno(k)
if (kk > 0) then
!! calculate degradation in soil
do l = 1, sol_nly(j)
x1 = 0.
x1 = sol_pst(k,j,l)
if (x1 >= 0.0001) then
xx = 0.
xx = x1 * decay_s(kk)
wshd_pstdg(k) = wshd_pstdg(k) + (x1 - xx) * hru_dafr(j)
sol_pst(k,j,l) = xx
end if
end do
!! calculate degradation off plant foliage
x1 = 0.
x1 = plt_pst(k,j)
if (x1 >= 0.0001) then
xx = 0.
xx = x1 * decay_f(kk)
wshd_pstdg(k) = wshd_pstdg(k) + (x1 - xx) * hru_dafr(j)
plt_pst(k,j) = xx
end if
end if
end do
return
end
|
{"hexsha": "ea09a07fe7fbc8051f48635a676801bf39ae4729", "size": 3773, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "swat_cli/rev670_source/decay.f", "max_stars_repo_name": "GISWAT/erosion-sediment", "max_stars_repo_head_hexsha": "6ab469eba99cba8e5c365cd4d18cba2e8781ccf6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-05T06:33:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-05T06:33:14.000Z", "max_issues_repo_path": "swat_cli/rev670_source/decay.f", "max_issues_repo_name": "GISWAT/erosion-sediment", "max_issues_repo_head_hexsha": "6ab469eba99cba8e5c365cd4d18cba2e8781ccf6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "swat_cli/rev670_source/decay.f", "max_forks_repo_name": "GISWAT/erosion-sediment", "max_forks_repo_head_hexsha": "6ab469eba99cba8e5c365cd4d18cba2e8781ccf6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1382978723, "max_line_length": 81, "alphanum_fraction": 0.384309568, "num_tokens": 1118}
|
[STATEMENT]
lemma env_restr_esing[simp]:
"x\<in> S \<Longrightarrow> esing x\<cdot>v f|` S = esing x\<cdot>v"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> S \<Longrightarrow> esing x\<cdot>v f|` S = esing x\<cdot>v
[PROOF STEP]
by (auto intro: env_restr_useless dest: subsetD[OF edom_esing_subset])
|
{"llama_tokens": 133, "file": "Launchbury_Env", "length": 1}
|
"""
Defines abstract samplers.
"""
import numpy as np
import csb.core
from abc import ABCMeta, abstractmethod, abstractproperty
class DimensionError(TypeError):
pass
class AbstractSampler(object):
"""
Abstract interface for sampling algorithms.
"""
__metaclass__ = ABCMeta
@abstractmethod
def sample(self):
"""
Draw a sample.
@rtype: L{AbstractState}
"""
pass
class AbstractState(object):
"""
Represents a point in phase-space.
"""
__metaclass__ = ABCMeta
@abstractproperty
def position(self):
pass
@abstractproperty
def momentum(self):
pass
class State(AbstractState):
"""
Represents a point in phase-space.
"""
@staticmethod
def check_flat_array(*args):
"""
Check whether arguments are flat, one-dimensional numpy arrays.
"""
for q in args:
if not isinstance(q, np.ndarray):
raise TypeError(q, 'numpy.ndarray expected!')
if not len(q.squeeze().shape) <= 1:
raise DimensionError(q, '1d numpy.ndarray expected!')
@staticmethod
def check_equal_length(q, p):
"""
Check whether arguments have equal length.
"""
if len(q) != len(p):
raise DimensionError(p, 'momentum needs to have the same dimension as coordinates!')
def __init__(self, position, momentum=None):
self._position = None
self._momentum = None
self.position = position
self.momentum = momentum
def __eq__(self, other):
return self.position == other.position and self.momentum == other.momentum
@property
def position(self):
return self._position.copy()
@position.setter
def position(self, value):
State.check_flat_array(value)
self._position = np.array(value)
@property
def momentum(self):
if self._momentum is None:
return None
else:
return self._momentum.copy()
@momentum.setter
def momentum(self, value):
if not value is None:
State.check_flat_array(value)
State.check_equal_length(value, self.position)
self._momentum = np.array(value)
else:
self._momentum = None
def clone(self):
if self.momentum is not None:
return self.__class__(self.position.copy(), self.momentum.copy())
else:
return self.__class__(self.position.copy())
class EnsembleState(csb.core.BaseCollectionContainer, AbstractState):
"""
Defines an Ensemble Monte Carlo state; it is a read-only collection
of State objects.
@param items: initialization list of states
@type items: list of L{States}
"""
def __init__(self, items):
super(EnsembleState, self).__init__(items, type=State)
@property
def position(self):
return np.array([s.position for s in self])
@property
def momentum(self):
return np.array([s.momentum for s in self])
|
{"hexsha": "f821101d585f307b54d18e22e6e847a9025edd45", "size": 3221, "ext": "py", "lang": "Python", "max_stars_repo_path": "csb/statistics/samplers/__init__.py", "max_stars_repo_name": "ujjwalsh/CSB", "max_stars_repo_head_hexsha": "cbe04f35e1ecace7fa01cabce669d99714b9dd38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2017-07-03T14:06:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-20T07:49:08.000Z", "max_issues_repo_path": "csb/statistics/samplers/__init__.py", "max_issues_repo_name": "ujjwalsh/CSB", "max_issues_repo_head_hexsha": "cbe04f35e1ecace7fa01cabce669d99714b9dd38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-07-03T13:29:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-02T12:45:29.000Z", "max_forks_repo_path": "csb/statistics/samplers/__init__.py", "max_forks_repo_name": "ujjwalsh/CSB", "max_forks_repo_head_hexsha": "cbe04f35e1ecace7fa01cabce669d99714b9dd38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-07-04T14:45:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-18T08:57:27.000Z", "avg_line_length": 24.4015151515, "max_line_length": 96, "alphanum_fraction": 0.5864638311, "include": true, "reason": "import numpy", "num_tokens": 677}
|
using HybridSystems
include(joinpath(dirname(dirname(pathof(HybridSystems))), "examples", "cruise_control.jl"));
if true
D = 1.0
U = 1.0
v_shift = 2.0
vmin = -1.0
vmax = 2.0
v = (1.0,)
m = 1.0
m0 = 1.0
h = 0.5
kd = 1/2
ks = 1/2
Δv = 5.0
else
function constant(scaling)
va = 15.6 * scaling
vb = 24.5 * scaling
vc = 29.5 * scaling
v = (va, vb, vc)
U = 4.0 * scaling
D = 0.5 * scaling
vmin = 5.0 * scaling
return D, U, vmin, v
end
D, U, vmin, v = constant(1.0)
vmax = 35.0
m = 1000
m0 = 500
h = 0.8
kd = 4600
ks = 4500
Δv = (v[1] - vmin) / 2
end
T = 2
N = 1 + (T+1) * length(v)
function system(M, T; v=v, N = 1 + (T+1) * length(v), sym=false, vmax = vmax)
H = h * T
#return cruise_control_example(N, M, vmin = vmin, vmax=vmax, v=v, U=U, H=H, D=D, T=T, sym=sym, m0 = m0)
return cruise_control_example(N, M, vmin = vmin, vmax=vmax, v=v,
U=U, H=h*T, D=D, T=T, sym=sym, m0 = m0, ks=ks, m=m, kd=kd)
end
_vec(M, d, v, u) = [repeat([d, v], M); v; u]
# |string_elongation| < D
# |acceleration| < U
sym_rect(M) = _vec(M, D, 1.0, U) # _vec(M, D, Δv, U)
using SetProg
using SwitchOnSafety
const SOS = SwitchOnSafety;
using MathOptInterface
const MOI = MathOptInterface
function symsolve(M, set_variable; volume_heuristic=set -> L1_heuristic(set, sym_rect(M)))
hs = system(M, T, N=1, sym=true)
# Shift interval 5 <= v <= 15.6 -> -5.3 <= v <= 5.3
sets = invariant_sets(hs, sdp_factory, [set_variable], volume_heuristic=volume_heuristic)
[SOS.SetProg.Sets.Translation(set, _vec(M, 0, 2.0, 0)) for set in sets]
end
function fullsolve(T, M, setvar::Function;
volume_heuristic=set -> L1_heuristic(set, sym_rect(M)),
oneshot = false,
onlyone = false)
hs = system(M, T)
function hv(v)
h = zeros(statedim(hs, 1))
for i in 1:M
h[2i] = (vmin .+ v) / 2
end
h[2M+1] = (vmin .+ v) / 2
h
end
habc = SOS.SetProg.InteriorPoint.(hv.(v))
ha = habc[1]
@show nstates(hs)
hi = fill(ha, nstates(hs))
set_variables = map(setvar, hi)
if oneshot
λ = Dict(t => 1.0 for t in transitions(hs))
else
λ = Dict(t => 1.0 for t in transitions(hs) if source(hs, t) == target(hs, t))
end
if oneshot
return invariant_sets(hs, sdp_factory, set_variables, λ = λ, volume_heuristic=volume_heuristic);
else
sets = Vector{SwitchOnSafety.SetProg.Sets.AbstractSet{Float64}}(undef, nstates(hs))
for i in (onlyone ? (1:1) : (1:nstates(hs)))
println("Computing set $i")
invariant_sets!(sets, i:i, hs, sdp_factory, set_variables, λ = λ, enabled=1:i,
volume_heuristic=volume_heuristic)
end
return sets
end
end
|
{"hexsha": "6fbf1341b6a7b04ead3fb1a929e58f1a8746e305", "size": 2912, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/cruise_control.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/SwitchOnSafety.jl-ceb7f16a-07bf-5f4a-9354-b68f01b1610f", "max_stars_repo_head_hexsha": "e9fefe2cb8f45f27ed9ea95d3edee725e8b85122", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/cruise_control.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/SwitchOnSafety.jl-ceb7f16a-07bf-5f4a-9354-b68f01b1610f", "max_issues_repo_head_hexsha": "e9fefe2cb8f45f27ed9ea95d3edee725e8b85122", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/cruise_control.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/SwitchOnSafety.jl-ceb7f16a-07bf-5f4a-9354-b68f01b1610f", "max_forks_repo_head_hexsha": "e9fefe2cb8f45f27ed9ea95d3edee725e8b85122", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.12, "max_line_length": 107, "alphanum_fraction": 0.5552884615, "num_tokens": 1051}
|
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
class SVMClassifier:
def __init__(self, visualization=True):
self.visualization = visualization
self.colors = {0: 'r', 1: 'b'}
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
def fit(self, data):
self.data = data
# { ||w||: {w, b} }
opt_dict = {}
transforms = [[-1, -1], [-1, 1], [1, -1], [1, 1]]
all_data = []
for y in self.data:
for featureset in self.data[y]:
for feature in featureset:
all_data.append(feature)
self.max_feature_value = max(all_data)
self.min_feature_value = max(all_data)
all_data = None
step_sizes = [self.max_feature_value*0.1,
self.max_feature_value*0.01,
self.max_feature_value*0.001]
# more expensive
b_range_multiple = 5
latest_optimum = self.max_feature_value*10
for step in step_sizes:
w = np.array([latest_optimum, latest_optimum])
optimized = False
while not optimized:
for b in np.arange(-1*self.max_feature_value*b_range_multiple,
self.max_feature_value*b_range_multiple,
step*b_multiple):
for transform in transforms:
w_t = w*tranform
valid_option = True
for y in self.data:
for x in self.data[y]:
if y*(np.dot(w_t, x)) + b < 1:
valid_option = False
break
if not valid_option:
break
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t, b]
if w[0] < 0:
optimized = True
print('Optimized a Step.')
else:
w -= step
norms = sorted([n for n in opt_dict])
opt_choice = opt_dict[norms[0]]
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0]
def predict(self, features):
prediction = np.sigh(np.dot(np.array(features), self.w) + self.b)
if prediction != 0 and self.visualization:
self.ax.scatter(featuers[0], features[1], s=200, marker=*, c.self.colors[precition])
return prediction
def visualize(self):
[[self.ax.scatter(x[0], x[1], s=100, color=self.colors[i] for x in data_dict[y] for y in data_dict]
def hyperplane(x, w, b, v):
return (-w[0]*x - b + v)/
style.use('ggplot')
data_dict = {
-1: np.array([[1, 7], [2, 8], [3, 8]]),
1: np.array([[5, 1], [6, -1], [7, 3]])
}
|
{"hexsha": "9c482a5a41b5d8e61aa36633a7a1129e696a4750", "size": 3089, "ext": "py", "lang": "Python", "max_stars_repo_path": "manual_svm_test.py", "max_stars_repo_name": "dkirel/ManualSVM", "max_stars_repo_head_hexsha": "0c8aaf5631f272d537126a8cd6237eaa937413cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "manual_svm_test.py", "max_issues_repo_name": "dkirel/ManualSVM", "max_issues_repo_head_hexsha": "0c8aaf5631f272d537126a8cd6237eaa937413cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manual_svm_test.py", "max_forks_repo_name": "dkirel/ManualSVM", "max_forks_repo_head_hexsha": "0c8aaf5631f272d537126a8cd6237eaa937413cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9902912621, "max_line_length": 107, "alphanum_fraction": 0.4713499514, "include": true, "reason": "import numpy", "num_tokens": 694}
|
[STATEMENT]
lemma eq_fin_le_fininf_transp[intro, trans]:
assumes "w\<^sub>1 =\<^sub>F w\<^sub>2" "w\<^sub>2 \<preceq>\<^sub>F\<^sub>I w\<^sub>3"
shows "w\<^sub>1 \<preceq>\<^sub>F\<^sub>I w\<^sub>3"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w\<^sub>1 \<preceq>\<^sub>F\<^sub>I w\<^sub>3
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
w\<^sub>1 =\<^sub>F w\<^sub>2
w\<^sub>2 \<preceq>\<^sub>F\<^sub>I w\<^sub>3
goal (1 subgoal):
1. w\<^sub>1 \<preceq>\<^sub>F\<^sub>I w\<^sub>3
[PROOF STEP]
by blast
|
{"llama_tokens": 253, "file": "Partial_Order_Reduction_Traces", "length": 2}
|
\documentclass[a4paper,12 pt]{article}
\usepackage{graphicx}
\usepackage{caption}
\usepackage{refstyle}
\usepackage{wrapfig}
\usepackage{subcaption}
\usepackage{geometry}
\geometry{
a4paper,
total={210mm,297mm},
left=25mm,
right=25mm,
top=25mm,
bottom=25mm,
}
\title {Project Report \\ Sensor Module Interfacing \\[10pt] Task: Interfacing Gyroscope with ATmega2560 in Firebird V Robot \\[25pt] Team members }
\author {Chayatan \and Mukilan A \and Shantanu}
\begin{document}
\maketitle
\begin{center}
\begin{large}
Under the guidance of\\
\textbf{Prof. Kavi Arya\\and\\Parin Chedda}\\
\vspace{0.5in}
\end{large}
\end{center}
\begin{center}
\includegraphics[scale=0.32]{iitb.png}
\end{center}
\begin{center}
\begin{large}
Embedded and Real-Time Systems Laboratory \\
Department of Computer Science and Engineering \\
Indian Institute of Technology \\
Bombay \\
\end{large}
\end{center}
\newpage
\tableofcontents
\newpage
%----------------------------------------------------------------------------------------------------------------
\begin{abstract}The project aims at interfacing a Gyroscope with Fire Bird V educational robot. This additional module can be used for measuring or maintaining the orientation, based on the principles of angular momentum. In this paper we will see about the basic L3G4200D Gyroscope module interfacing with Atmega 2560 in Fire Bird V robot. This will include the working principle, basic interfacing circuit, programming and applications of the Gyroscope.
\end{abstract}
%----------------------------------------------------------------------------------------------------------------
\section{Introduction}
\begin{wrapfigure}{r}{0.4\textwidth}
\begin{center}
\vspace{-10mm}
\includegraphics[scale=0.2]{Gyr0.jpg}
\end{center}
\caption{A Gyroscope}
\end{wrapfigure}
\ \ The root word in Gyroscope is derived from a Latin word ‘guros’ meaning ring. The basic model of a Gyroscope is a device that consists of a disc or a wheel that is spun rapidly around an axis, such that its orientation is independent of the tilting of the mounting. Hence, they are used in providing stability or in maintaining a reference direction in navigation systems, automatic pilots, and stabilizers.
In this document, we are going to discuss the Gyroscope sensor L3G4200D, and understand its interfacing with the FireBird V Robot.
%----------------------------------------------------------------------------------------------------------------
\section{Specifications of L3G4200D Gyroscope: }
\begin{itemize}
\item Onboard 3.3V Low Drop voltage regulator with input range of 3.6V to 6V.
\item Dimensions: 0.9”(L) X 0.5”(W)
\item 2 x Mounting holes
\item I2C/SPI digital output interface
\item Three selectable full scales (250/500/2000dps)
\item Sensitivity:
\begin{itemize}
\item 250 dps : 8.75 mdps/digit
\item 500 dps : 17.50 mdps/digit
\item 2000 dps : 70 mdps/digit
\end{itemize}
\item 16 bit data output
\item Embedded temperature sensor with 8-bit temperature data output
\item Integrated low- and high-pass filters with user selectable bandwidth
\item Embedded power-down and sleep mode
\item Extended operating temperature range (-40 °C to +85 °C)
\end{itemize}
%----------------------------------------------------------------------------------------------------------------
\pagebreak
\section{Pin Connections of L3G4200D:}
\subsection{Pin Diagram}
\begin{figure}[!h]
\begin{center}
\includegraphics[scale=0.8]{gyr3.png}
\end{center}
\caption{Pin Diagram}
\label{fig:1}
\end{figure}
\newpage
\subsection{Useful Pins in the L3G4200D Gyroscope and its Connections with the Firebird V Robot}
\begin{figure}[!h]
\begin{center}
\includegraphics[scale=0.30]{gyr2.jpg}
\end{center}
\caption{Useful Pins in the L3G4200D Gyroscope}
\label{fig:1}
\end{figure}
%---------------------------------------------------------------------------------
\begin{figure}[!h]
\begin{center}
\includegraphics[scale=0.60]{gyr7.jpg}
\end{center}
\caption{Useful Pins in the L3G4200D Gyroscope}
\label{fig:1}
\end{figure}
%---------------------------------------------------------------------------------
\vspace{-10mm}
\begin{center}
\begin{table}[!h]
\hspace{-10mm}
\caption{}
\begin{tabular}{|c|c|}
\hline
$Pins of L3G4200D$&$Pins of Firebird V Robot $\\
Gyroscope Sensor$ $&$$\\
\hline
GND&Pin 23/24 (Ground) in Microcontroller Expansion Slot\\
\hline
Vin&3.3 Volts in Xbee Module in Firebird V\\
\hline
SDA&Pin 19 in Microcontroller Expansion Slot\\
\hline
SCL&Pin 20 in Microcontroller Expansion Slot\\
\hline
\end{tabular}
\label{table:t1}
\end{table}
\end{center}
\subsection{Communication between Firebird V and L3G4200D using I2C Protocol}
\textbf{I2C interface between L3G4200D and 3.3V microcontroller}
\begin{figure}[!h]
\begin{center}
\includegraphics[]{gyr5.jpg}
\end{center}
\caption{I2C interface between L3G4200D and 3.3V microcontroller}
\label{fig:1}
\end{figure}
\newpage
\section{Procedure to interface IMU to FireBird V}
\subsection{Procedure to write data into a slave}
\begin{enumerate}
\item Send START condition to initiate the process.
\item Sending the start condition will generate an interrupt. Wait for TWINT flag to be set.
\item Load SLA\_W into TWDR Register to switch to Master Write mode. The address is 0xD2 for gyroscope.
\item Clear the TWINT flag to start transmission of slave address.
\item Wait for TWINT flag to be set which signifies that the slave address has been transmitted.
\item Send address of register byte that we want to access.
\item Clear the TWINT flag to start transmission of the register address.
\item Wait for TWINT flag set which means that an interrupt is generated for sending the register address.
\item Convert the character to equivalent BCD value and load into TWDR.
\item Clear the TWINT flag to start transmission of data byte.
\item Wait for the TWINT flag to be set.
\item Send STOP condition to terminate the data transfer.
\end{enumerate}
\subsection{Procedure to read data from a slave}
\begin{enumerate}
\item Send the START condition.
\item START condition sent will generate an interrupt. Wait for TWINT Flag set which means that the interrupt has occurred.
\item Load SLA\_W into TWDR Register to switch to Master Write mode.
\item Then clear the TWINT flag to start transmission of slave address.
\item Transmission of slave address will generate an interrupt. Wait for TWINT flag to be set.
\item Send the address of the register byte that we want to access.
\item Then clear the TWINT flag to start transmission of slave address.
\item Transmission of slave address will generate an interrupt. Wait for TWINT Flag to be set.
\item Send RESTART condition and start again to operate in Master Read mode.
\item RESTART condition sent will also generate an interrupt. So wait for TWINT Flag set which means that the interrupt has occured.
\item Load SLA\_R into TWDR Register to switch to Master Read mode. The address is D3 for Gyroscope.
\item Clear the TWINT flag to start the transmission of slave address.
\item Wait for TWINT flag to be set.
\item Clear the TWINT flag to read the addressed register.
\item Wait for the TWINT flag set.
\item Load the NO-ACK value to TWDR register.
\item Clear TWINT flag to start transmission of NO\_ACK signal.
\item Wait for TWINT flag to be set.
\item Now the value read can be used for any purpose.
\end{enumerate}
\textbf{Important Note:}
\begin{itemize}
\item SA0 pin is internally pulled up to VIO which sets LSB of I2C address as 1.
\item CS pin is internally pulled up to VIO which enables I2C mode.
\item 3V3OUT is capable of delivering 3.3V@ 40mAmps. It can be used set up pull ups for I/O pins related to L3G4200D. It should not be used for other purposes.
\end{itemize}
%\newpage
\section{Output of the Gyroscope L3G4200D}
\subsection{Output Displayed on LCD}
\begin{figure}[!h]
\begin{center}
\includegraphics[scale=0.40]{CAM00213.jpg}
\end{center}
\caption{Output of Gyroscope on LCD}
\label{fig:1}
\end{figure}
\section{Applications}
\begin{itemize}
\item Quadrotor
\item Balancing robots
\item Advance robotics
\item Navigation
\item Motion Control with MMI (Man Machine Interface)
\item Gaming and virtual reality input devices
\end{itemize}
\begin{figure}[!h]
\centering
\begin{subfigure}[b]{0.3\textwidth}
\includegraphics[width=\textwidth]{gyr3.jpg}
\caption{Quadrotor}
\label{fig:gull}
\end{subfigure}%
~ %add desired spacing between images, e. g. ~, \quad, \qquad, \hfill etc.
%(or a blank line to force the subfigure onto a new line)
\begin{subfigure}[b]{0.3\textwidth}
\includegraphics[width=\textwidth]{gyr4.jpg}
\caption{Self-Balancing Robot}
\label{fig:tiger}
\end{subfigure}
~ %add desired spacing between images, e. g. ~, \quad, \qquad, \hfill etc.
%(or a blank line to force the subfigure onto a new line)
\begin{subfigure}[b]{0.3\textwidth}
\includegraphics[width=\textwidth]{gyr6.jpg}
\caption{Gyroscope in Gaming}
\label{fig:mouse}
\end{subfigure}
\caption{Gyroscope Applications}\label{fig:animal}
\end{figure}
\newpage
\section{Reference}
\begin{enumerate}
\item L3G4200D IMU datasheet.
\item LSM303DLHC accelerometer datasheet.
\item ATMEGA 2560 datasheet.
\end{enumerate}
\end{document}
|
{"hexsha": "8f637edd40afee97fa12c3325073d7cfeae7dae0", "size": 9443, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "5.IMU/Gyroscope/documentation&tutorial/Gyro_manual/Gyroscope.tex", "max_stars_repo_name": "eyantra/Sensor-Module-Interfacing", "max_stars_repo_head_hexsha": "a5d7d3dc259fb7ddf369c513d20f011a099c927f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "5.IMU/Gyroscope/documentation&tutorial/Gyro_manual/Gyroscope.tex", "max_issues_repo_name": "eyantra/Sensor-Module-Interfacing", "max_issues_repo_head_hexsha": "a5d7d3dc259fb7ddf369c513d20f011a099c927f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "5.IMU/Gyroscope/documentation&tutorial/Gyro_manual/Gyroscope.tex", "max_forks_repo_name": "eyantra/Sensor-Module-Interfacing", "max_forks_repo_head_hexsha": "a5d7d3dc259fb7ddf369c513d20f011a099c927f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6007751938, "max_line_length": 455, "alphanum_fraction": 0.6995658159, "num_tokens": 2512}
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from math import cos, sin
from IPython import embed
robot_radius = 3.35/2.0;
def draw_circle(ax, pos_ang, radius, color, label=None):
pos = pos_ang[:2]
circ = plt.Circle(pos, radius, color=color, label=label)
ax.add_artist(circ);
if len(pos_ang) == 3:
ang = pos_ang[-1]
end = np.asarray((cos(ang)*radius, sin(ang)*radius))
arr = plt.arrow(pos[0], pos[1], end[0], end[1],
fc=np.asarray(color)*0.5, ec="k",
head_width=radius/2., head_length=radius/2.)
ax.add_artist(arr);
return circ
def draw_env(ax, start_pos, end_pos, robot_radius, obs_poses, obs_radii):
draw_circle(ax, start_pos, robot_radius, color=(0,1,0))
draw_circle(ax, end_pos, robot_radius, color=(1,0,0))
for i in xrange(obs_radii.size):
obs_radius = obs_radii[i]
obs_pos = obs_poses[i]
ax.add_artist(plt.Circle(obs_pos, obs_radius, color=(0.1,0.1,0.1)))
def draw_traj(ax, states, robot_radius, color, label, skip = 3):
label_circ = draw_circle(ax, states[0], robot_radius, color=color, label=label)
for i in xrange(len(states)):
if i % skip != 0:
continue;
state = states[i]
draw_circle(ax, state, robot_radius, color=color)
#ax.add_artist(plt.Circle(state, 0.5, color=color))
#plt.plot(states[:,0], states[:,1], color=0.5*np.asarray(color), linewidth=3)
return label_circ
def load_and_draw(ax, state_file, color, skip):
# First two rows are the start and end state. Rest is trajectory.
states = np.genfromtxt(state_file, delimiter=[13,13,13]);
start_pos = states[0,:]
end_pos = states[1,:]
states = states[2:,:]
label_circ = draw_traj(ax, states, robot_radius, color=color, label=state_file, skip=skip);
return label_circ
def parse_draw_files(states_files, obstacles_file, COLORS = None, show = True):
if COLORS is None:
COLORS = [(0.3,0.3,0.3, 0.2), (0.1,0.8,0.8, 0.2), (0.1,0.3,0.8, 0.2), (0.8,0.3,0.8, 0.2), (0.7,0.8,0.2, 0.2)]
if len(COLORS) < len(states_files):
for i in range(len(states_files) - len(COLORS)):
COLORS.append(tuple(np.random.random(3)) + (0.2,))
DRAW_Z = 0
BASE_SCALE = 1.0
world_dim = np.genfromtxt(obstacles_file, delimiter=[13,13,13,13], max_rows = 1);
obstacles = np.genfromtxt(obstacles_file, delimiter=[13,13,13], skip_header = 1);
obstacles = np.atleast_2d(obstacles)
obs_poses = obstacles[:,:2]
obs_radii = obstacles[:,2:]
states = np.genfromtxt(states_files[0], delimiter=[13,13,13]);
start_pos = states[0,:]
end_pos = states[1,:]
plt.figure(figsize=(10, 8))
ax = plt.gca()
draw_env(ax, start_pos, end_pos, robot_radius, obs_poses, obs_radii);
skip = 3
labels = []
for i in range(len(states_files)):
color = COLORS[i]
label_circ = load_and_draw(ax, states_files[i], color, skip)
labels.append(label_circ)
plt.axis('square')
plt.axis(world_dim)
ax.legend(handles=labels)
plt.tight_layout()
if show:
plt.show()
return ax
if __name__ == "__main__":
#obstacles_file = "../../build/obstacles.csv"
#states_file = "../../build/states.csv"
#obstacles_file = "obstacles.csv"
obstacles_file = "./has_obs_obstacles.csv"
#states_file = ["states.csv"]
#states_files = ["./ilqr_true_states.csv", "./hindsight_50-50_states.csv", "./hindsight_25-75_states.csv", "./hindsight_10-90_states.csv", "./argmax_states.csv"]
#states_files = ["./ilqr_true_states.csv", "./hindsight_10-90_states.csv", "./weighted_10-90_states.csv"]
states_files = ["./has_obs_ilqr_true_states.csv", "./no_obs_ilqr_true_states.csv"]
parse_draw_files(states_files, obstacles_file);
print('hi')
|
{"hexsha": "9e7fadd0e49a451522752e0970ce0eacce77e39b", "size": 3845, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/visualize_circle_world.py", "max_stars_repo_name": "LAIRLAB/qr_trees", "max_stars_repo_head_hexsha": "66eb7310daa1d9978158198a508d02bf2128a377", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-16T08:42:33.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-16T08:42:33.000Z", "max_issues_repo_path": "src/python/visualize_circle_world.py", "max_issues_repo_name": "LAIRLAB/qr_trees", "max_issues_repo_head_hexsha": "66eb7310daa1d9978158198a508d02bf2128a377", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/visualize_circle_world.py", "max_forks_repo_name": "LAIRLAB/qr_trees", "max_forks_repo_head_hexsha": "66eb7310daa1d9978158198a508d02bf2128a377", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-07-10T03:25:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-22T15:58:44.000Z", "avg_line_length": 35.2752293578, "max_line_length": 165, "alphanum_fraction": 0.6421326398, "include": true, "reason": "import numpy", "num_tokens": 1136}
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from edict import AttrDict
import six
import numpy as np
_C = AttrDict()
cfg = _C
#
# Training options
#
# Snapshot period
_C.snapshot_iter = 2000
# min valid area for gt boxes
_C.gt_min_area = -1
# max target box number in an image
_C.max_box_num = 50
#
# Training options
#
# valid score threshold to include boxes
_C.valid_thresh = 0.005
# threshold vale for box non-max suppression
_C.nms_thresh = 0.45
# the number of top k boxes to perform nms
_C.nms_topk = 400
# the number of output boxes after nms
_C.nms_posk = 100
# score threshold for draw box in debug mode
_C.draw_thresh = 0.5
#
# Model options
#
# pixel mean values
_C.pixel_means = [0.485, 0.456, 0.406]
# pixel std values
_C.pixel_stds = [0.229, 0.224, 0.225]
# anchors box weight and height
_C.anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
# anchor mask of each yolo layer
_C.anchor_masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
# IoU threshold to ignore objectness loss of pred box
_C.ignore_thresh = .7
#
# SOLVER options
#
# batch size
_C.batch_size = 8
# derived learning rate the to get the final learning rate.
_C.learning_rate = 0.001
# maximum number of iterations
_C.max_iter = 500200
# warm up to learning rate
_C.warm_up_iter = 4000
_C.warm_up_factor = 0.
# lr steps_with_decay
_C.lr_steps = [400000, 450000]
_C.lr_gamma = 0.1
# L2 regularization hyperparameter
_C.weight_decay = 0.0005
# momentum with SGD
_C.momentum = 0.9
#
# ENV options
#
# support both CPU and GPU
_C.use_gpu = True
# Class number
_C.class_num = 80
# dataset path
_C.train_file_list = 'annotations/instances_train2017.json'
_C.train_data_dir = 'train2017'
_C.val_file_list = 'annotations/instances_val2017.json'
_C.val_data_dir = 'val2017'
def merge_cfg_from_args(args):
"""Merge config keys, values in args into the global config."""
for k, v in sorted(six.iteritems(vars(args))):
try:
value = eval(v)
except:
value = v
_C[k] = value
|
{"hexsha": "b7e1eb1c7bf0fe0a2024ed8ef29270687fae5a3a", "size": 2763, "ext": "py", "lang": "Python", "max_stars_repo_path": "fluid/PaddleCV/yolov3/config.py", "max_stars_repo_name": "kuke/models", "max_stars_repo_head_hexsha": "b610d1654fa1d728fe2171fb02ee47497942fe24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-04-29T09:12:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-30T02:39:02.000Z", "max_issues_repo_path": "fluid/PaddleCV/yolov3/config.py", "max_issues_repo_name": "kuke/models", "max_issues_repo_head_hexsha": "b610d1654fa1d728fe2171fb02ee47497942fe24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-06-26T03:21:49.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-19T09:43:42.000Z", "max_forks_repo_path": "fluid/PaddleCV/yolov3/config.py", "max_forks_repo_name": "kuke/models", "max_forks_repo_head_hexsha": "b610d1654fa1d728fe2171fb02ee47497942fe24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-25T12:54:05.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-25T12:54:05.000Z", "avg_line_length": 21.4186046512, "max_line_length": 91, "alphanum_fraction": 0.7238508867, "include": true, "reason": "import numpy", "num_tokens": 803}
|
[STATEMENT]
lemma comp_left_increasing_sup:
"x * y \<le> (x \<squnion> z) * y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x * y \<le> (x \<squnion> z) * y
[PROOF STEP]
by (simp add: comp_left_isotone)
|
{"llama_tokens": 100, "file": "Stone_Relation_Algebras_Relation_Algebras", "length": 1}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 25 19:57:35 2020
Read and write event data.
Conversion of event data into frames (images, 2D):
- histograms of events
- thresholded (1 f-stop)
- brightness increment images
- time surfaces: exponential decay or average time
With polarity on the same representation or split by polarity
Visualization of event data in 3D
- with or without polarity
- change of viewpoint and movie creation
Write it nicely in utility functions
@author: ggb
"""
import os
import numpy as np
from matplotlib import pyplot as plt
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
plt.close('all')
# %% Read a file of events and write another file with a subset of them
filename_sub = 'slider_depth/events_chunk.txt'
"""
# This is how the file events_chunk.txt was generated from the events.txt file in the IJRR 2017 dataset
events_raw = open('slider_depth/events.txt', "r")
events_sub = open(filename_sub, "w")
# format: timestamp, x, y, polarity
for k in range(50000):
line = events_raw.readline()
#print(line)
events_sub.write(line)
events_raw.close()
events_sub.close()
"""
# %% Read file with a subset of events
# Simple. There may be more efficient ways.
def extract_data(filename):
infile = open(filename, 'r')
timestamp = []
x = []
y = []
pol = []
for line in infile:
words = line.split()
timestamp.append(float(words[0]))
x.append(int(words[1]))
y.append(int(words[2]))
pol.append(int(words[3]))
infile.close()
return timestamp,x,y,pol
# Call the function to read data
timestamp, x, y, pol = extract_data(filename_sub)
# %% Sensor size
# Get the size of the sensor using a grayscale frame (in case of a DAVIS)
# filename_frame = 'slider_depth/images/frame_00000000.png'
# import cv2
# img = cv2.imread(filename_frame, cv2.IMREAD_GRAYSCALE)
# print img.shape
# img = np.zeros(img.shape, np.int)
# For this exercise, we just provide the sensor size (height, width)
img_size = (180,240)
# %% Brightness incremet image (Balance of event polarities)
num_events = 5000 # Number of events used
print("Brightness incremet image: numevents = ", num_events)
# Compute image by accumulating polarities.
img = np.zeros(img_size, np.int)
for i in range(num_events):
# Need to convert the polarity bit from {0,1} to {-1,+1} and accumulate
img[y[i],x[i]] += (2*pol[i]-1)
# Display the image in grayscale
fig = plt.figure()
fig.suptitle('Balance of event polarities')
maxabsval = np.amax(np.abs(img))
plt.imshow(img, cmap='gray', clim=(-maxabsval,maxabsval))
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# Same plot as above, but changing the color map
fig = plt.figure()
fig.suptitle('Balance of event polarities')
maxabsval = np.amax(np.abs(img))
plt.imshow(img, cmap='seismic_r', clim=(-maxabsval,maxabsval))
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# %% 2D Histograms of events, split by polarity (positive and negative events in separate images)
img_pos = np.zeros(img_size, np.int)
img_neg = np.zeros(img_size, np.int)
for i in range(num_events):
if (pol[i] > 0):
img_pos[y[i],x[i]] += 1 # count events
else:
img_neg[y[i],x[i]] += 1
fig = plt.figure()
fig.suptitle('Histogram of positive events')
plt.imshow(img_pos)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
fig = plt.figure()
fig.suptitle('Histogram of negative events')
plt.imshow(img_neg)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# %% Thresholded brightness increment image (Ternary image)
# What if we only use 3 values in the event accumulation image?
# Saturated signal: -1, 0, 1
# For example, store the polarity of the last event at each pixel
img = np.zeros(img_size, np.int)
for i in range(num_events):
img[y[i],x[i]] = (2*pol[i]-1) # no accumulation; overwrite the stored value
# Display the ternary image
fig = plt.figure()
fig.suptitle('Last event polarity per pixel')
plt.imshow(img, cmap='gray')
#plt.imshow(img, cmap='bwr')
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# _____________________________________________________________________________
# %% Time surface (or time map, or SAE="Surface of Active Events")
num_events = len(timestamp)
print("Time surface: numevents = ", num_events)
img = np.zeros(img_size, np.float32)
t_ref = timestamp[-1] # time of the last event in the packet
tau = 0.03 # decay parameter (in seconds)
for i in range(num_events):
img[y[i],x[i]] = np.exp(-(t_ref-timestamp[i]) / tau)
fig = plt.figure()
fig.suptitle('Time surface (exp decay). Both polarities')
plt.imshow(img)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# %% Time surface (or time map, or SAE), separated by polarity
sae_pos = np.zeros(img_size, np.float32)
sae_neg = np.zeros(img_size, np.float32)
for i in range(num_events):
if (pol[i] > 0):
sae_pos[y[i],x[i]] = np.exp(-(t_ref-timestamp[i]) / tau)
else:
sae_neg[y[i],x[i]] = np.exp(-(t_ref-timestamp[i]) / tau)
fig = plt.figure()
fig.suptitle('Time surface (exp decay) of positive events')
plt.imshow(sae_pos)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
fig = plt.figure()
fig.suptitle('Time surface (exp decay) of negative events')
plt.imshow(sae_neg)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# %% Time surface (or time map, or SAE), using polarity as sign of the time map
sae = np.zeros(img_size, np.float32)
for i in range(num_events):
if (pol[i] > 0):
sae[y[i],x[i]] = np.exp(-(t_ref-timestamp[i]) / tau)
else:
sae[y[i],x[i]] = -np.exp(-(t_ref-timestamp[i]) / tau)
fig = plt.figure()
fig.suptitle('Time surface (exp decay), using polarity as sign')
plt.imshow(sae, cmap='seismic') # using color (Red/blue)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# %% "Balance of time surfaces"
# Accumulate exponential decays using polarity as sign of the time map
sae = np.zeros(img_size, np.float32)
for i in range(num_events):
if (pol[i] > 0):
sae[y[i],x[i]] += np.exp(-(t_ref-timestamp[i]) / tau)
else:
sae[y[i],x[i]] -= np.exp(-(t_ref-timestamp[i]) / tau)
fig = plt.figure()
fig.suptitle('Time surface (exp decay), balance of both polarities')
#plt.imshow(sae)
maxabsval = np.amax(np.abs(sae))
plt.imshow(sae, cmap='seismic', clim=(-maxabsval,maxabsval))
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# %% Average timestamp per pixel
sae = np.zeros(img_size, np.float32)
count = np.zeros(img_size, np.int)
for i in range(num_events):
sae[y[i],x[i]] += timestamp[i]
count[y[i],x[i]] += 1
# Compute per-pixel average if count at the pixel is >1
count [count < 1] = 1 # to avoid division by zero
sae = sae / count
fig = plt.figure()
fig.suptitle('Average timestamps regardless of polarity')
plt.imshow(sae)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# %% Average timestamp per pixel. Separate by polarity
sae_pos = np.zeros(img_size, np.float32)
sae_neg = np.zeros(img_size, np.float32)
count_pos = np.zeros(img_size, np.int)
count_neg = np.zeros(img_size, np.int)
for i in range(num_events):
if (pol[i] > 0):
sae_pos[y[i],x[i]] += timestamp[i]
count_pos[y[i],x[i]] += 1
else:
sae_neg[y[i],x[i]] += timestamp[i]
count_neg[y[i],x[i]] += 1
# Compute per-pixel average if count at the pixel is >1
count_pos [count_pos < 1] = 1; sae_pos = sae_pos / count_pos
count_neg [count_neg < 1] = 1; sae_neg = sae_neg / count_neg
fig = plt.figure()
fig.suptitle('Average timestamps of positive events')
plt.imshow(sae_pos)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
fig = plt.figure()
fig.suptitle('Average timestamps of negative events')
plt.imshow(sae_neg)
plt.xlabel("x [pixels]")
plt.ylabel("y [pixels]")
plt.colorbar()
plt.show()
# _____________________________________________________________________________
# %% 3D plot
# Time axis in horizontal position
m = 2000 # Number of points to plot
print("Space-time plot and movie: numevents = ", m)
# Plot without polarity
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.set_aspect('equal') # only works for time in Z axis
ax.scatter(x[:m], timestamp[:m], y[:m], marker='.', c='b')
ax.set_xlabel('x [pix]')
ax.set_ylabel('time [s]')
ax.set_zlabel('y [pix] ')
ax.view_init(azim=-90, elev=-180) # Change viewpoint with the mouse, for example
plt.show()
# %% Plot each polarity with a different color (red / blue)
idx_pos = np.asarray(pol[:m]) > 0
idx_neg = np.logical_not(idx_pos)
xnp = np.asarray(x[:m])
ynp = np.asarray(y[:m])
tnp = np.asarray(timestamp[:m])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xnp[idx_pos], tnp[idx_pos], ynp[idx_pos], marker='.', c='b')
ax.scatter(xnp[idx_neg], tnp[idx_neg], ynp[idx_neg], marker='.', c='r')
ax.set(xlabel='x [pix]', ylabel='time [s]', zlabel='y [pix]')
ax.view_init(azim=-90, elev=-180)
plt.show()
# %% Transition between two viewpoints
num_interp_viewpoints = 60 # number of interpolated viewpoints
ele = np.linspace(-150,-180, num=num_interp_viewpoints)
azi = np.linspace( -50, -90, num=num_interp_viewpoints)
# Create directory to save images and then create a movie
dirName = 'tempDir'
if not os.path.exists(dirName):
os.mkdir(dirName)
print("Directory " , dirName , " Created ")
else:
print("Directory " , dirName , " already exists")
for ii in xrange(0,num_interp_viewpoints):
ax.view_init(azim=azi[ii], elev=ele[ii])
plt.savefig(dirName + "/movie%04d.png" % ii)
# %% Create a movie using ffmpeg static build (https://johnvansickle.com/ffmpeg/)
# Video coding options, such as lossless: https://trac.ffmpeg.org/wiki/Encode/H.264
def createMovie():
os.system("/home/ggb/Downloads/ffmpeg-4.2.2-i686-static/ffmpeg -r 20 -i "
+ dirName + "/movie%04d.png -c:v libx264 -crf 0 -y movie_new.mp4")
# Call the function to create the movie
createMovie()
# _____________________________________________________________________________
# %% Voxel grid
num_bins = 5
print("Number of time bins = ", num_bins)
t_max = np.amax(np.asarray(timestamp[:m]))
t_min = np.amin(np.asarray(timestamp[:m]))
t_range = t_max - t_min
dt_bin = t_range / num_bins # size of the time bins (bins)
t_edges = np.linspace(t_min,t_max,num_bins+1) # Boundaries of the bins
# Compute 3D histogram of events manually with a loop
# ("Zero-th order or nearest neighbor voting")
hist3d = np.zeros(img.shape+(num_bins,), np.int)
for ii in xrange(m):
idx_t = int( (timestamp[ii]-t_min) / dt_bin )
if idx_t >= num_bins:
idx_t = num_bins-1 # only one element (the last one)
hist3d[y[ii],x[ii],idx_t] += 1
# Checks:
print("hist3d")
print hist3d.shape
print np.sum(hist3d) # This should equal the number of votes
# %% Compute 3D histogram of events using numpy function histogramdd
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogramdd.html#numpy.histogramdd
# Specify bin edges in each dimension
bin_edges = (np.linspace(0,img_size[0],img_size[0]+1),
np.linspace(0,img_size[1],img_size[1]+1), t_edges)
yxt = np.transpose(np.array([y[:m], x[:m], timestamp[:m]]))
hist3dd, edges = np.histogramdd(yxt, bins=bin_edges)
# Checks
print("\nhist3dd")
print("min = ", np.min(hist3dd))
print("max = ", np.max(hist3dd))
print np.sum(hist3dd)
print np.linalg.norm( hist3dd - hist3d) # Check: zero if both histograms are equal
print("Ratio of occupied bins = ", np.sum(hist3dd > 0) / float(np.prod(hist3dd.shape)) )
# Plot of the 3D histogram. Empty cells are transparent (not displayed)
# Example: https://matplotlib.org/3.1.1/gallery/mplot3d/voxels_rgb.html#sphx-glr-gallery-mplot3d-voxels-rgb-py
fig = plt.figure()
fig.suptitle('3D histogram (voxel grid), zero-th order voting')
ax = fig.gca(projection='3d')
# prepare some coordinates
r, g, b = np.indices((img_size[0]+1,img_size[1]+1,num_bins+1))
ax.voxels(g,b,r, hist3d) # No need to swap the data to plot with reordered axes
ax.set(xlabel='x', ylabel='time bin', zlabel='y')
ax.view_init(azim=-90, elev=-180) # edge-on, along time axis
#ax.view_init(azim=-63, elev=-145) # oblique viewpoint
plt.show()
# %% Compute interpolated 3D histogram (voxel grid)
hist3d_interp = np.zeros(img.shape+(num_bins,), np.float64)
for ii in xrange(m-1):
tn = (timestamp[ii] - t_min) / dt_bin # normalized time, in [0,num_bins]
ti = np.floor(tn-0.5) # index of the left bin
dt = (tn-0.5) - ti # delta fraction
# Voting on two adjacent bins
if ti >=0 :
hist3d_interp[y[ii],x[ii],int(ti) ] += 1. - dt
if ti < num_bins-1 :
hist3d_interp[y[ii],x[ii],int(ti)+1] += dt
# Checks
print("\nhist3d_interp")
print("min = ", np.min(hist3d_interp))
print("max = ", np.max(hist3d_interp))
print np.sum(hist3d_interp)
# Some votes are lost because of the missing last layer
print np.linalg.norm( hist3d - hist3d_interp)
print("Ratio of occupied bins = ", np.sum(hist3d_interp > 0) / float(np.prod(hist3d_interp.shape)) )
# Plot voxel grid
colors = np.zeros(hist3d_interp.shape + (3,))
tmp = hist3d_interp/np.amax(hist3d_interp) # normalize in [0,1]
colors[..., 0] = tmp
colors[..., 1] = tmp
colors[..., 2] = tmp
fig = plt.figure()
fig.suptitle('Interpolated 3D histogram (voxel grid)')
ax = fig.gca(projection='3d')
ax.voxels(g,b,r, hist3d_interp, facecolors=colors)
ax.set(xlabel='x', ylabel='time bin', zlabel='y')
ax.view_init(azim=-63, elev=-145)
plt.show()
# %% A different visualization viewpoint
ax.view_init(azim=-90, elev=-180) # edge-on, along time axis
plt.show()
# %% Compute interpolated 3D histogram (voxel grid) using polarity
hist3d_interp_pol = np.zeros(img.shape+(num_bins,), np.float64)
for ii in xrange(m-1):
tn = (timestamp[ii] - t_min) / dt_bin # normalized time, in [0,num_bins]
ti = np.floor(tn-0.5) # index of the left bin
dt = (tn-0.5) - ti # delta fraction
# Voting on two adjacent bins
if ti >=0 :
hist3d_interp_pol[y[ii],x[ii],int(ti) ] += (1. - dt) * (2*pol[ii]-1)
if ti < num_bins-1 :
hist3d_interp_pol[y[ii],x[ii],int(ti)+1] += dt * (2*pol[ii]-1)
# Checks
# Some votes are lost because of the missing last layer
print("\nhist3d_interp_pol")
print("min = ", np.min(hist3d_interp_pol))
print("max = ", np.max(hist3d_interp_pol))
print np.sum(np.abs(hist3d_interp_pol))
print("Ratio of occupied bins = ", np.sum(np.abs(hist3d_interp_pol) > 0) / float(np.prod(hist3d_interp_pol.shape)) )
# Plot interpolated voxel grid using polarity
# Normalize the symmetric range to [0,1]
maxabsval = np.amax(np.abs(hist3d_interp_pol))
colors = np.zeros(hist3d_interp_pol.shape + (3,))
tmp = (hist3d_interp_pol + maxabsval)/(2*maxabsval)
colors[..., 0] = tmp
colors[..., 1] = tmp
colors[..., 2] = tmp
fig = plt.figure()
fig.suptitle('Interpolated 3D histogram (voxel grid), including polarity')
ax = fig.gca(projection='3d')
ax.voxels(g,b,r, hist3d_interp_pol, facecolors=colors)
ax.set(xlabel='x', ylabel='time bin', zlabel='y')
ax.view_init(azim=-63, elev=-145)
plt.show()
# %% Better visualization viewpoint to see positive and negative edges
ax.view_init(azim=-90, elev=-180) # edge-on, along time axis
plt.show()
|
{"hexsha": "50273ac00b62fd070d98f4ba37953d2f0fec08cb", "size": 15488, "ext": "py", "lang": "Python", "max_stars_repo_path": "ex2_events_visualization.py", "max_stars_repo_name": "tub-rip/events_viz", "max_stars_repo_head_hexsha": "dfc6fd27688c70f11e98b349111e35a5aad9a718", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-12-27T01:58:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T11:07:17.000Z", "max_issues_repo_path": "ex2_events_visualization.py", "max_issues_repo_name": "tub-rip/events_viz", "max_issues_repo_head_hexsha": "dfc6fd27688c70f11e98b349111e35a5aad9a718", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ex2_events_visualization.py", "max_forks_repo_name": "tub-rip/events_viz", "max_forks_repo_head_hexsha": "dfc6fd27688c70f11e98b349111e35a5aad9a718", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-03-06T06:25:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T02:52:57.000Z", "avg_line_length": 30.5483234714, "max_line_length": 116, "alphanum_fraction": 0.6887913223, "include": true, "reason": "import numpy", "num_tokens": 4428}
|
import tensorflow as tf
import numpy as np
import math
# ======================================================h===================== #
# TensorFlow implementation of Text Boxes encoding / decoding.
# =========================================================================== #
def tf_text_bboxes_encode_layer(bboxes,
anchors_layer, num,
matching_threshold=0.5,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
dtype=tf.float32):
"""
Encode groundtruth labels and bounding boxes using Textbox anchors from
one layer.
Arguments:
bboxes: Nx4 Tensor(float) with bboxes relative coordinates;
anchors_layer: Numpy array with layer anchors;
matching_threshold: Threshold for positive match with groundtruth bboxes;
prior_scaling: Scaling of encoded coordinates.
Return:
(target_localizations, target_scores): Target Tensors.
# thisi is a binary problem, so target_score and tartget_labels are same.
"""
# Anchors coordinates and volume.
yref, xref, href, wref = anchors_layer
ymin = yref - href / 2.
xmin = xref - wref / 2.
ymax = yref + href / 2.
xmax = xref + wref / 2.
vol_anchors = (xmax - xmin) * (ymax - ymin)
# Initialize tensors...
shape = (yref.shape[0], yref.shape[1], yref.shape[2], href.size)
# all follow the shape(feat.size, feat.size, 2, 6)
#feat_labels = tf.zeros(shape, dtype=tf.int64)
feat_scores = tf.zeros(shape, dtype=dtype)
feat_ymin = tf.zeros(shape, dtype=dtype)
feat_xmin = tf.zeros(shape, dtype=dtype)
feat_ymax = tf.ones(shape, dtype=dtype)
feat_xmax = tf.ones(shape, dtype=dtype)
def jaccard_with_anchors(bbox):
"""
Compute jaccard score between a box and the anchors.
"""
int_ymin = tf.maximum(ymin, bbox[0])
int_xmin = tf.maximum(xmin, bbox[1])
int_ymax = tf.minimum(ymax, bbox[2])
int_xmax = tf.minimum(xmax, bbox[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
inter_vol = h * w
union_vol = vol_anchors - inter_vol \
+ (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
jaccard = tf.div(inter_vol, union_vol)
return jaccard
"""
# never use in Textbox
def intersection_with_anchors(bbox):
'''
Compute intersection between score a box and the anchors.
'''
int_ymin = tf.maximum(ymin, bbox[0])
int_xmin = tf.maximum(xmin, bbox[1])
int_ymax = tf.minimum(ymax, bbox[2])
int_xmax = tf.minimum(xmax, bbox[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
inter_vol = h * w
scores = tf.div(inter_vol, vol_anchors)
return scores
"""
def condition(i, feat_scores,
feat_ymin, feat_xmin, feat_ymax, feat_xmax):
"""Condition: check label index.
"""
#r = tf.less(i, tf.shape(bboxes)[0])
r = tf.less(i, num)
return r
def body(i, feat_scores,feat_ymin, feat_xmin, feat_ymax, feat_xmax):
"""Body: update feature labels, scores and bboxes.
Follow the original SSD paper for that purpose:
- assign values when jaccard > 0.5;
- only update if beat the score of other bboxes.
"""
# Jaccard score.
bbox = bboxes[i]
jaccard = jaccard_with_anchors(bbox)
# Mask: check threshold + scores + no annotations + num_classes.
mask = tf.greater(jaccard, feat_scores)
mask = tf.logical_and(mask, tf.greater(jaccard, matching_threshold))
#mask = tf.logical_and(mask, feat_scores > -0.5)
#mask = tf.logical_and(mask, label < num_classes)
imask = tf.cast(mask, tf.int64)
fmask = tf.cast(mask, dtype)
# Update values using mask.
#feat_labels = imask * label + (1 - imask) * feat_labels
feat_scores = tf.where(mask, jaccard, feat_scores)
feat_ymin = fmask * bbox[0] + (1 - fmask) * feat_ymin
feat_xmin = fmask * bbox[1] + (1 - fmask) * feat_xmin
feat_ymax = fmask * bbox[2] + (1 - fmask) * feat_ymax
feat_xmax = fmask * bbox[3] + (1 - fmask) * feat_xmax
# Check no annotation label: ignore these anchors...
#interscts = intersection_with_anchors(bbox)
#mask = tf.logical_and(interscts > ignore_threshold,
# label == no_annotation_label)
# Replace scores by -1.
#feat_scores = tf.where(mask, -tf.cast(mask, dtype), feat_scores)
return [i+1, feat_scores,
feat_ymin, feat_xmin, feat_ymax, feat_xmax]
# Main loop definition.
i = 0
[i,feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax] = tf.while_loop(condition, body,
[i, feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax])
'''
for i, bbox in enumerate(tf.unpack(bboxes, axis=0)):
[i,feat_scores,feat_ymin,
feat_xmin, feat_ymax, feat_xmax] = body(i, feat_scores,
feat_ymin, feat_xmin,
feat_ymax, feat_xmax,bbox)
'''
# Transform to center / size.
feat_cy = (feat_ymax + feat_ymin) / 2.
feat_cx = (feat_xmax + feat_xmin) / 2.
feat_h = feat_ymax - feat_ymin
feat_w = feat_xmax - feat_xmin
# Encode features.
feat_cy = (feat_cy - yref) / href / prior_scaling[0]
feat_cx = (feat_cx - xref) / wref / prior_scaling[1]
feat_h = tf.log(feat_h / href) / prior_scaling[2]
feat_w = tf.log(feat_w / wref) / prior_scaling[3]
# Use SSD ordering: x / y / w / h instead of ours.
feat_localizations = tf.stack([feat_cx, feat_cy, feat_w, feat_h], axis=-1)
return feat_localizations, feat_scores
def tf_text_bboxes_encode(bboxes,
anchors, num,
matching_threshold=0.5,
prior_scaling=[0.1, 0.1, 0.2, 0.2],
dtype=tf.float32,
scope='text_bboxes_encode'):
"""Encode groundtruth labels and bounding boxes using SSD net anchors.
Encoding boxes for all feature layers.
Arguments:
bboxes: Nx4 Tensor(float) with bboxes relative coordinates;
anchors: List of Numpy array with layer anchors;
matching_threshold: Threshold for positive match with groundtruth bboxes;
prior_scaling: Scaling of encoded coordinates.
Return:
(target_labels, target_localizations, target_scores):
Each element is a list of target Tensors.
"""
with tf.name_scope('text_bboxes_encode'):
target_labels = []
target_localizations = []
target_scores = []
for i, anchors_layer in enumerate(anchors):
with tf.name_scope('bboxes_encode_block_%i' % i):
t_loc, t_scores = \
tf_text_bboxes_encode_layer(bboxes, anchors_layer, num,
matching_threshold,
prior_scaling, dtype)
target_localizations.append(t_loc)
target_scores.append(t_scores)
return target_localizations, target_scores
## produce anchor for one layer
# each feature point has 12 default textboxes(6 boxes + 6 offsets boxes)
# aspect ratios = (1,2,3,5,7,10)
# feat_size :
# conv4_3 ==> 38 x 38
# fc7 ==> 19 x 19
# conv6_2 ==> 10 x 10
# conv7_2 ==> 5 x 5
# conv8_2 ==> 3 x 3
# pool6 ==> 1 x 1
def textbox_anchor_one_layer(img_shape,
feat_size,
ratios,
scale,
offset = 0.5,
dtype=np.float32):
# Follow the papers scheme
# 12 ahchor boxes with out sk' = sqrt(sk * sk+1)
y, x = np.mgrid[0:feat_size[0], 0:feat_size[1]] + 0.5
y_offset = y + offset
y = y.astype(dtype) / feat_size[0]
x = x.astype(dtype) / feat_size[1]
x_offset = x
y_offset = y_offset.astype(dtype) / feat_size[1]
x_out = np.stack((x, x_offset), -1)
y_out = np.stack((y, y_offset), -1)
y_out = np.expand_dims(y_out, axis=-1)
x_out = np.expand_dims(x_out, axis=-1)
#
num_anchors = 6
h = np.zeros((num_anchors, ), dtype=dtype)
w = np.zeros((num_anchors, ), dtype=dtype)
for i ,r in enumerate(ratios):
h[i] = scale / math.sqrt(r)
w[i] = scale * math.sqrt(r)
return y_out, x_out, h, w
## produce anchor for all layers
def textbox_achor_all_layers(img_shape,
layers_shape,
anchor_ratios,
scales,
offset=0.5,
dtype=np.float32):
"""
Compute anchor boxes for all feature layers.
"""
layers_anchors = []
for i, s in enumerate(layers_shape):
anchor_bboxes = textbox_anchor_one_layer(img_shape, s,
anchor_ratios,
scales[i],
offset=offset, dtype=dtype)
layers_anchors.append(anchor_bboxes)
return layers_anchors
if __name__ == "__main__":
scales = [0.2, 0.34, 0.48, 0.62, 0.76, 0.90]
y_out, x_out, h, w = textbox_anchor_one_layer((300, 300), (38,38), (1,2,3,5,7,10), scale=0.2)
print y_out.shape, x_out.shape, h.shape, w.shape
ymin = y_out - h / 2.
print ymin.shape
yref, xref, href, wref = y_out, x_out, h, w
ymin = yref - href / 2.
xmin = xref - wref / 2.
ymax = yref + href / 2.
xmax = xref + wref / 2.
vol_anchors = (xmax - xmin) * (ymax - ymin)
print href.size
|
{"hexsha": "c06bf8cc15b265093a3f079ec72c0ab25d70e93a", "size": 10065, "ext": "py", "lang": "Python", "max_stars_repo_path": "openvision/ocr/textbox/nets/textbox_common.py", "max_stars_repo_name": "liuzz1983/open_vision", "max_stars_repo_head_hexsha": "f346e2f789944ea590c1d263e72a6e93490bb3a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openvision/ocr/textbox/nets/textbox_common.py", "max_issues_repo_name": "liuzz1983/open_vision", "max_issues_repo_head_hexsha": "f346e2f789944ea590c1d263e72a6e93490bb3a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openvision/ocr/textbox/nets/textbox_common.py", "max_forks_repo_name": "liuzz1983/open_vision", "max_forks_repo_head_hexsha": "f346e2f789944ea590c1d263e72a6e93490bb3a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7335766423, "max_line_length": 98, "alphanum_fraction": 0.5554893194, "include": true, "reason": "import numpy", "num_tokens": 2602}
|
from argparse import ArgumentParser
import os
import cv2
import numpy as np
import torch
import pytorch_lightning as pl
from pytorch_lightning import Trainer, loggers
from torchsummary import summary
import torch.nn.functional as F
from autoencoder import Autoencoder
from harbour_datamodule import list_frames_in_dir
def play_thermal(view, crop, set, encoder=None, decoder=None, norm=True, save=True, n_channels=1):
frames = list_frames_in_dir(os.path.join('data/',view,crop,set), 'png')
if frames is None:
return
if save:
output_dir = 'output'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
view_dir = os.path.join(output_dir,view)
if not os.path.exists(view_dir):
os.mkdir(view_dir)
set_dir = os.path.join(view_dir,set)
if not os.path.exists(set_dir):
os.mkdir(set_dir)
input, rec, file, loss = [], [], [], []
print(len(frames))
for i, path in enumerate(frames):
img = cv2.imread(path)
input.append(img)
if n_channels == 3:
intensity, dx, dy = cv2.split(img)
if model:
img = img.transpose((2, 0, 1))
img = img / 255.0
if norm:
img[0] = (img[0] - 0.5)/0.5
img[1] = (img[1] - 0.5)/0.5
img[2] = (img[2] - 0.5)/0.5
img = torch.from_numpy(img)
img = img.float()
z = encoder(img.unsqueeze(0))
rec = decoder(z)[0]
loss = F.mse_loss(rec, img)
if norm:
rec[0] = rec[0] * 0.5 - 0.5
rec[1] = rec[1] * 0.5 - 0.5
rec[2] = rec[2] * 0.5 - 0.5
#rec = rec.mul(255).permute(1, 2, 0).byte().numpy()
input.append(img)
intensity_, dx_, dy_ = cv2.split(rec)
vis_org = np.concatenate((intensity, dx, dy), axis=1)
vis_reg = np.concatenate((intensity_, dx_, dy_), axis=1)
vis = np.concatenate((vis_org, vis_reg), axis=0)
else:
vis = np.concatenate((intensity, dx, dy), axis=1)
vis = cv2.putText(vis, str(i).zfill(4), (5,10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, 255, 1, cv2.LINE_AA)
vis = cv2.resize(vis, (vis.shape[1]*3,vis.shape[0]*3), interpolation = cv2.INTER_AREA)
cv2.imshow(set,vis)
if save:
cv2.imwrite("output/{}.png".format(str(i).zfill(5)),vis)
key = cv2.waitKey(0)
if key == 27:
break
def test(hparams, path):
#model = Autoencoder.load_from_checkpoint(path)
#model = Autoencoder(hparams)
encoder = torch.load(torch.load('encoder.pt'))
decoder = torch.load(torch.load('decoder.pt'))
model.eval()
#play(data_root = hparams.data_root, set = 'test_val', folder = 'aligned_out_item00_image00', model = model)
play_thermal(view = 'view1_nc1', crop = 'crop0', set = 'test',
encoder=encoder, decoder=decoder, n_channels=hparams.nc)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--data_root", type=str, default="data/view1_nc1/crop0", help="View root directory")
parser.add_argument("--log_dir", type=str, default="logs", help="Logging directory")
parser.add_argument("--num_workers", type=int, default=4, help="num_workers > 0 turns on multi-process data loading")
parser.add_argument("--image_size", type=int, default=64, help="Spatial size of training images")
parser.add_argument("--max_epochs", type=int, default=10, help="Number of maximum training epochs")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size during training")
parser.add_argument("--nc", type=int, default=1, help="Number of channels in the training images")
parser.add_argument("--norm", type=int, default=0, help="Normalize or not")
parser.add_argument("--nz", type=int, default=8, help="Size of latent vector z")
parser.add_argument("--nfe", type=int, default=32, help="Size of feature maps in encoder")
parser.add_argument("--nfd", type=int, default=32, help="Size of feature maps in decoder")
parser.add_argument("--lr", type=float, default=0.0002, help="Learning rate for optimizer")
parser.add_argument("--beta1", type=float, default=0.9, help="Beta1 hyperparameter for Adam optimizer")
parser.add_argument("--beta2", type=float, default=0.999, help="Beta2 hyperparameter for Adam optimizer")
parser.add_argument("--gpus", type=int, default=1, help="Number of GPUs. Use 0 for CPU mode")
args = parser.parse_args()
model_path = 'logs/dadata/view1_nc1/crop0_is64_nc1/version_6/checkpoints/epoch=6.ckpt'
test(args,model_path)
#play(view = 'view1', crop = 'crop0', set = 'test')
|
{"hexsha": "34e0177e697ea98cee1ad015cce46cdc1dbaf92c", "size": 4761, "ext": "py", "lang": "Python", "max_stars_repo_path": "embed.py", "max_stars_repo_name": "markpp/thermal_autoencoder", "max_stars_repo_head_hexsha": "a122128f973f89aa61f641449d5cbe2dd222b40f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "embed.py", "max_issues_repo_name": "markpp/thermal_autoencoder", "max_issues_repo_head_hexsha": "a122128f973f89aa61f641449d5cbe2dd222b40f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "embed.py", "max_forks_repo_name": "markpp/thermal_autoencoder", "max_forks_repo_head_hexsha": "a122128f973f89aa61f641449d5cbe2dd222b40f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.8918918919, "max_line_length": 121, "alphanum_fraction": 0.6242386053, "include": true, "reason": "import numpy", "num_tokens": 1276}
|
import numpy as np
from pyscf.lib.linalg_helper import eig
from pyscf.lib.numpy_helper import einsum
from scipy import linalg as la
import matplotlib.pyplot as plt
def createMPO(hamType,hamParams):
############################################
# Determine MPO
Sp = np.array([[0,1],[0,0]])
Sm = np.array([[0,0],[1,0]])
n = np.array([[0,0],[0,1]])
v = np.array([[1,0],[0,0]])
I = np.array([[1,0],[0,1]])
z = np.array([[0,0],[0,0]])
W = []
if hamType == 'tasep':
alpha = hamParams[0]
beta = hamParams[1]
s = hamParams[2]
W.insert(len(W),np.array([[alpha*(np.exp(-s)*Sm-v),np.exp(-s)*Sp,-n,I]]))
W.insert(len(W),np.array([[I],[Sm],[v],[beta*(np.exp(-s)*Sp-n)]]))
W.insert(len(W),np.array([[I,z,z,z],[Sm,z,z,z],[v,z,z,z],[z,np.exp(-s)*Sp,-n,I]]))
elif hamType == 'sep':
alpha = hamParams[0]
delta = hamParams[1]
gamma = hamParams[2]
beta = hamParams[3]
p = hamParams[4]
q = hamParams[5]
s = hamParams[6]
exp_alpha = np.exp(-s)*alpha
exp_beta = np.exp(-s)*beta
exp_p = np.exp(-s)*p
exp_q = np.exp(s)*q
exp_delta = np.exp(s)*delta
exp_gamma = np.exp(s)*gamma
W.insert(len(W),np.array([[exp_alpha*Sm-alpha*v+exp_gamma*Sp-gamma*n, Sp, -n, Sm,-v, I]]))
W.insert(len(W),np.array([[I ],
[exp_p*Sm ],
[p*v ],
[exp_q*Sp ],
[q*n ],
[exp_delta*Sm-delta*v+exp_beta*Sp-beta*n]]))
W.insert(len(W),np.array([[I, z, z, z, z, z],
[exp_p*Sm, z, z, z, z, z],
[p*v, z, z, z, z, z],
[exp_q*Sp, z, z, z, z, z],
[q*n, z, z, z, z, z],
[z, Sp, -n, Sm,-v, I]]))
############################################
return W
def createInitMPS(W,maxBondDim=10,d=2):
############################################
# Make Initial Unit Cell
H = np.zeros((2**2,2**2))
occ = np.zeros((2**2,2),dtype=int)
sum_occ = np.zeros(2**2,dtype=int)
for i in range(2**2):
occ[i,:] = np.asarray(list(map(lambda x: int(x),'0'*(2-len(bin(i)[2:]))+bin(i)[2:])))
#print(occ[i,:])
sum_occ[i] = np.sum(occ[i,:])
# Calculate Hamiltonian
for i in range(2**2):
i_occ = occ[i,:]
for j in range(2**2):
j_occ = occ[j,:]
tmp_mat0 = np.array([[1]])
for k in range(2):
tmp_mat0 = einsum('ij,jk->ik',tmp_mat0,W[k][:,:,i_occ[k],j_occ[k]])
H[i,j] += tmp_mat0[[0]]
# Diagonalize Hamiltonian
e0,lwf,rwf = la.eig(H,left=True)
inds = np.argsort(e0)
e0 = e0[inds[-1]]
rwf = rwf[:,inds[-1]]
lwf = lwf[:,inds[-1]]
#print(einsum('i,ij,j->',rwf.conj(),H,rwf)/einsum('i,i->',rwf.conj(),rwf))
#print(einsum('i,ij,j->',lwf.conj(),H,rwf)/einsum('i,i->',lwf.conj(),rwf))
# Ensure Proper Normalization
# <-|R> = 1
# <L|R> = 1
rwf = rwf/np.sum(rwf)
lwf = lwf/np.sum(lwf*rwf)
print('\nExact Diagonalization Energy: {}'.format(e0))
print('Energy Check {}'.format(einsum('i,ij,j->',lwf.conj(),H,rwf)/einsum('i,i->',lwf.conj(),rwf)))
############################################
############################################
# Reshape wavefunction for SVD
rpsi = np.reshape(rwf,(2,2))
lpsi = np.reshape(lwf,(2,2))
print('After Reshaping, Energy = {}'.format(einsum('ij,klim,lnjo,mo->',rpsi.conj(),W[0],W[1],rpsi)/
einsum('ij,ij->',rpsi.conj(),rpsi)))
############################################
############################################
# Do SVD of initial unit cell
U,S,V = np.linalg.svd(rpsi)
a = [1,min(maxBondDim,d)] # Keep Track of bond dimensions
A = np.reshape(U,(a[0],d,a[1]))
A = np.swapaxes(A,0,1)
B = np.reshape(V,(a[1],d,a[0]))
B = np.swapaxes(B,0,1)
print(rpsi)
print(A)
print(S)
print(B)
print('After SVD, Energy = {}'.format(einsum('jik,k,lkm,nojr,oplt,rqs,s,tsu->',A.conj(),S,B.conj(),W[0],W[1],A,S,B)/
einsum('jik,k,lkm,jno,o,lop->',A.conj(),S,B.conj(),A,S,B)))
# Store left and right environments
LBlock = einsum('jik,jno->ko',A.conj(),A)
RBlock = einsum('lkm,lop->ko',B.conj(),B)
LHBlock= einsum('jik,nojr,rqs->kos',A.conj(),W[0],A)
RHBlock= einsum('lkm,oplt,tsu->kos',B.conj(),W[1],B)
E = einsum('ijk,i,k,ijk->',LHBlock,S,S,RHBlock) / einsum('ko,k,o,ko->',LBlock,S,S,RBlock)
print('Energy = {}'.format(E))
############################################
return ([A,B],[LBlock,RBlock],[LHBlock,RHBlock])
def makeBlocks(A,B,LBlock,RBlock,LHBlock,RHBlock):
LBlock = einsum('ij,kil,kim->lm',LBlock,A.conj(),A)
RBlock = einsum('ijk,ilm,km->jl',B.conj(),B,RBlock)
LHBlock= einsum('ijk,lim,jnlo,okp->mnp',LHBlock,A.conj(),W[2],A)
RHBlock= einsum('ijk,lmin,nop,kmp->jlo',B.conj(),W[2],B,RHBlock)
return (LBlock,RBlock,LHBlock,RHBlock)
def decompose(psi,a,d=2):
# Canonicalize state
U,S,V = np.linalg.svd(psi)
A = np.reshape(U,(a[0],d,-1))
A = A[:,:,:a[1]]
A = np.swapaxes(A,0,1)
B = np.reshape(V,(-1,d,a[0]))
B = B[:a[1],:,:]
B = np.swapaxes(B,0,1)
S = S[:a[1]]
return (A,S,B)
def runOpt(MPS,Block,HBlock,maxBondDim=10,maxIter=1000,tol=1e-8,plotConv=True,d=2):
############################################
converged = False
iterCnt = 0
nBond = 1
E_prev = 0
a = [1,min(maxBondDim,d)] # Keep Track of bond dimensions
A,B = MPS[0],MPS[1]
LBlock,RBlock = Block[0],Block[1]
LHBlock,RHBlock = HBlock[0],HBlock[1]
if plotConv:
fig = plt.figure()
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
Evec = []
nBondVec = []
while not converged:
nBond += 2
a[0] = a[1]
a[1] = min(maxBondDim,a[0]*2)
# -----------------------------------------------------------------------------
# Determine Hamiltonian
H = einsum('ijk,jlmn,lopq,ros->mpirnqks',LHBlock,W[2],W[2],RHBlock)
(n1,n2,n3,n4,n5,n6,n7,n8) = H.shape
print(n1,n2,n3,n4)
H = np.reshape(H,(n1*n2*n3*n4,n5*n6*n7*n8))
# -----------------------------------------------------------------------------
# Solve Eigenproblem
u,v = la.eig(H)
ind = np.argsort(u)[-1]
E = u[ind]/nBond
v = v[:,ind]
print('\tEnergy from Optimization = {}'.format(E))
# ------------------------------------------------------------------------------
# Reshape result into state
psi = np.reshape(v,(n1,n2,n3,n4)) # s_l s_(l+1) a_(l-1) a_(l+1)
psi = np.transpose(psi,(2,0,1,3)) # a_(l-1) s_l a_(l+1) s_(l+1)
psi = np.reshape(psi,(n3*n1,n4*n2))
# ------------------------------------------------------------------------------
# Perform USV Decomposition
(A,S,B) = decompose(psi,a,d=2)
# -----------------------------------------------------------------------------
# Store left and right environments
(LBlock,RBlock,LHBlock,RHBlock) = makeBlocks(A,B,LBlock,RBlock,LHBlock,RHBlock)
# ------------------------------------------------------------------------------
# Check for convergence
if np.abs(E - E_prev) < tol:
converged = True
print('System Converged {} {}'.format(E,E_prev))
elif iterCnt == maxIter:
converged = True
print('Convergence not acheived')
else:
E_prev = E
iterCnt += 1
if plotConv:
Evec.append(E)
nBondVec.append(nBond)
ax1.cla()
ax1.plot(nBondVec,Evec,'r.')
ax2.cla()
ax2.semilogy(nBondVec[:-1],np.abs(Evec[:-1]-Evec[-1]),'r.')
plt.pause(0.01)
return E
if __name__ == "__main__":
############################################
# Inputs
alpha = 0.35
beta = 2./3.
p = 1.
s = -1.
ds = 0.01
hamType = 'tasep'
############################################
# Run Current Calculation 1
W = createMPO(hamType,(alpha,beta,s+ds))
(MPS,Block,HBlock) = createInitMPS(W,maxBondDim=10)
E1 = runOpt(MPS,Block,HBlock)
# Run Current Calculation 2
W = createMPO(hamType,(alpha,beta,s-ds))
(MPS,Block,HBlock) = createInitMPS(W,maxBondDim=10)
E2 = runOpt(MPS,Block,HBlock)
print('Current = {}'.format((E1-E2)/(2*ds)))
|
{"hexsha": "6189fc7aa4b7060f197a8c8e5819cced3f2ba735", "size": 8981, "ext": "py", "lang": "Python", "max_stars_repo_path": "old/iMPS_2site.py", "max_stars_repo_name": "philliphelms/iMPS", "max_stars_repo_head_hexsha": "ce7e097bf67ad68a01cf8180c3f7577660c38ee8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "old/iMPS_2site.py", "max_issues_repo_name": "philliphelms/iMPS", "max_issues_repo_head_hexsha": "ce7e097bf67ad68a01cf8180c3f7577660c38ee8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "old/iMPS_2site.py", "max_forks_repo_name": "philliphelms/iMPS", "max_forks_repo_head_hexsha": "ce7e097bf67ad68a01cf8180c3f7577660c38ee8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.563876652, "max_line_length": 120, "alphanum_fraction": 0.4380358535, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2734}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : Communication.py
# @Time : 2021/11/5 11:20 上午
# @Author : Mingxue Cai
# @Email : im_caimingxue@163.com
# @github : https://github.com/caimingxue/magnetic-robot-simulation
# @notice :
from math import *
import numpy as np
import struct
from TCP import TCPClient
from Ping import *
import time
class TCP_Communication(object):
def __init__(self):
self.client = TCPClient('192.168.1.195', 8181)
if self.client.connect():
print("================= TCP Commu. Success ====================")
def send(self, data):
Head = bytes.fromhex('55 AA 99 11')
data_Byte_1 = struct.pack(">f", data[0])
data_Byte_2 = struct.pack(">f", data[1])
data_Byte_3 = struct.pack(">f", data[2])
data_Byte_4 = struct.pack(">f", data[3])
data_Byte_5 = struct.pack(">f", data[4])
data_Byte_6 = struct.pack(">f", data[5])
data_Byte_7 = struct.pack(">f", data[6])
data_Byte_8 = struct.pack(">f", data[7])
data_Byte_9 = struct.pack(">f", data[8])
data_Byte_10 = struct.pack(">f", data[9])
data_Byte_11 = struct.pack(">f", data[10])
data_Byte_12 = struct.pack(">f", data[11])
data_Byte_13 = struct.pack(">f", data[12])
data_Byte_14 = struct.pack(">f", data[13])
_CommandEnd = bytes.fromhex('AA BB CC DD')
DATA = bytes()
DATA = bytes().join([Head, data_Byte_1, data_Byte_2, data_Byte_3, data_Byte_4, data_Byte_5, data_Byte_6, data_Byte_7,
data_Byte_8, data_Byte_9, data_Byte_10, data_Byte_11, data_Byte_12, data_Byte_13, data_Byte_14, _CommandEnd])
self.client.sendBytes(DATA)
print("========================== Send Data Sucess ==================")
# def main():
# Robot = TCP_Communication()
# time.sleep(5)
#
# n = 0
# while n < 10000:
# magData = [2.0, 2.0, 90.0, 90.0, 2.0, 2.0, 30.0, 60.0, 60.0, 60.0, 45.0, 45.0, 45.0]
# Robot.send(magData)
# time.sleep(0.5)
# n = n + 1
# Robot.client.close()
# if __name__ == "__main__":
# main()
|
{"hexsha": "2c5f0a784facc7dcab1f7bc98a0ce04bb541d5ba", "size": 2146, "ext": "py", "lang": "Python", "max_stars_repo_path": "color_tracker/utils/communication.py", "max_stars_repo_name": "caimingxue/color_tracker", "max_stars_repo_head_hexsha": "11e00daf540a46022dc16a2f79ce4a787dce4f9b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "color_tracker/utils/communication.py", "max_issues_repo_name": "caimingxue/color_tracker", "max_issues_repo_head_hexsha": "11e00daf540a46022dc16a2f79ce4a787dce4f9b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "color_tracker/utils/communication.py", "max_forks_repo_name": "caimingxue/color_tracker", "max_forks_repo_head_hexsha": "11e00daf540a46022dc16a2f79ce4a787dce4f9b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0634920635, "max_line_length": 138, "alphanum_fraction": 0.5680335508, "include": true, "reason": "import numpy", "num_tokens": 693}
|
[STATEMENT]
lemma iso5_sharp [simp]: "(((x \<sqinter> nc) \<cdot> 1\<^sub>\<pi>) \<parallel> nc) \<cdot> 1\<^sub>\<pi> = (x \<sqinter> nc) \<cdot> 1\<^sub>\<pi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x \<sqinter> nc) \<cdot> 1\<^sub>\<pi> \<parallel> nc \<cdot> 1\<^sub>\<pi> = (x \<sqinter> nc) \<cdot> 1\<^sub>\<pi>
[PROOF STEP]
by (simp add: local.c3 local.c4)
|
{"llama_tokens": 172, "file": "Multirelations_C_Algebras", "length": 1}
|
function gather_check_dims(X::AbstractArray{Tx,Nx},
Y::AbstractArray{Ty,Ny},
idx::AbstractArray{Tidx,Nidx}) where
{Tx,Ty,Tidx<:IntOrIntTuple,Nx,Ny,Nidx}
M = NNlib.typelength(Tidx)
dims = gather_check_dims(Nx, Ny, M, Nidx)
size(X)[1:dims] == size(Y)[1:dims] || throw(ArgumentError("Incompatible input shapes."))
size(Y)[dims+1:end] == size(idx) || throw(ArgumentError("Incompatible input shapes."))
return dims
end
function gather_check_dims(X::AbstractArray{Tx,Nx},
Y::AbstractArray{Ty,Ny},
idx::AbstractArray{CartesianIndex{M},Nidx}) where
{Tx,Ty,Nx,Ny,M,Nidx}
dims = gather_check_dims(Nx, Ny, M, Nidx)
size(X)[1:dims] == size(Y)[1:dims] || throw(ArgumentError("Incompatible input shapes."))
size(Y)[dims+1:end] == size(idx) || throw(ArgumentError("Incompatible input shapes."))
return dims
end
function gather_check_dims(Nx, Ny, M, Nidx)
@assert Nx - M == Ny - Nidx "Incompatible input shapes of (dst, src, idx) = ($Nx, $Ny, $Nidx)."
dims = Nx - M
dims < 0 && throw(ArgumentError("dims must be non-negative but got dims=$dims."))
return dims
end
function gather_kernel!(dst, src, idx, max_idx, max_dims_idx, dims_size)
index = threadIdx().x + (blockIdx().x - 1) * blockDim().x
@inbounds if index <= max_idx
j, k = divrem(index-1, max_dims_idx)
dims_i = CartesianIndices(dims_size)[k+1]
dst[index] = src[dims_i, idx[j+1]...]
end
return nothing
end
function NNlib.gather!(dst::AnyCuArray, src::AnyCuArray, idx::AnyCuArray)
dims = gather_check_dims(src, dst, idx)
dims_size = size(src)[1:dims]
max_dims_idx = prod(dims_size)
max_idx = max_dims_idx * length(idx)
args = dst, src, idx, max_idx, max_dims_idx, dims_size
kernel = @cuda launch=false gather_kernel!(args...)
config = launch_configuration(kernel.fun; max_threads=256)
threads = min(max_idx, config.threads)
blocks = cld(max_idx, threads)
kernel(args...; threads=threads, blocks=blocks)
return dst
end
|
{"hexsha": "dcfd29b03249e8ef8f88cea846440fd9303cf2ab", "size": 2166, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/gather.jl", "max_stars_repo_name": "yuehhua/NNlibCUDA.jl", "max_stars_repo_head_hexsha": "96a334633ef3a3707c85fc1754c2c7eb8849db4e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/gather.jl", "max_issues_repo_name": "yuehhua/NNlibCUDA.jl", "max_issues_repo_head_hexsha": "96a334633ef3a3707c85fc1754c2c7eb8849db4e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gather.jl", "max_forks_repo_name": "yuehhua/NNlibCUDA.jl", "max_forks_repo_head_hexsha": "96a334633ef3a3707c85fc1754c2c7eb8849db4e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1111111111, "max_line_length": 99, "alphanum_fraction": 0.6265004617, "num_tokens": 615}
|
import numpy
from IPython.display import HTML
import ipywidgets
from matplotlib import animation, pyplot
def create_init_fig(wrapped_signal, freq_arr, xcm_arr):
""" creates initial figure needed for animation, but it doesn't display it.
"""
fig, ax = pyplot.subplots(figsize=(10.0, 5.0))
pyplot.tight_layout()
fig.suptitle('Frequency = {:.2f}'.format(freq_arr[0]))
ax1 = pyplot.subplot2grid((1, 3), (0, 0))
ax2 = pyplot.subplot2grid((1, 3), (0, 1), colspan=2)
circle1 = pyplot.Circle((0, 0), 1, fill=None, lw=2, ls='--', alpha=0.3)
ax1.add_patch(circle1)
ax1.grid()
ticks= numpy.linspace(-1,1, 5, endpoint=True)
ylabels = [-1, -0.5, None, 0.5, 1]
ax1.set_xticks(ticks)
ax1.set_yticks(ticks)
ax1.set_yticklabels(ylabels)
wrapped_signal_plot = ax1.plot(wrapped_signal.real,
wrapped_signal.imag, alpha=0.5,
label=r'$g(t)e^{2\pi ift}$')[0]
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
ax1.spines['left'].set_position('center')
ax1.spines['bottom'].set_position('center')
# Eliminate upper and right axes
ax1.spines['right'].set_color('none')
ax1.spines['top'].set_color('none')
ax1.set_adjustable('box')
ax1.set_aspect('equal')
ax1.set_xlim(-1.1,1.1)
ax1.set_ylim(-1.1,1.1)
ax1.legend(loc='upper left', bbox_to_anchor=(0.48, 1.12))
#f_list = numpy.full_like(freqs, None)
almost_fourier_plot = ax2.plot(freq_arr[0], xcm_arr[0], '-')[0]
ax2.spines['right'].set_color('none')
ax2.spines['top'].set_color('none')
ax2.set_adjustable('box')
ax2.set_aspect('equal')
ax2.set_xlabel('Frequency')
ax2.set_ylabel('xcm')
ax2.set_xlim(0.9,5.1)
ax2.set_ylim(-0.3,1.1)
ax2.grid()
pyplot.tight_layout()
pyplot.close()
return {'fig': fig, 'WSP': wrapped_signal_plot, 'AF': almost_fourier_plot}
def comp_fourier_term(f, t):
circ = numpy.exp(-2*numpy.pi*1j*f*t)
return circ
def update_figure(f, anim_dict, g_t, t_arr, freq_arr, display_fig=False):
res = g_t * comp_fourier_term(freq_arr[f], t_arr)
anim_dict['fig'].suptitle('Frequency = {:.2f}'.format(freq_arr[f]))
anim_dict['xcm_arr'][f] = numpy.mean(res.real)
anim_dict['WSP'].set_data(res.real, res.imag)
anim_dict['AF'].set_data(freq_arr[:f+1], anim_dict['xcm_arr'][:f+1])
if display_fig:
display(anim_dict['fig'])
def create_animation(sinewave, time, freqs):
wrap0 = sinewave * comp_fourier_term(freqs[0], time)
xcm_array = numpy.full_like(freqs, None)
xcm_array[0] = numpy.mean(wrap0.real)
anim_dict = create_init_fig(wrap0, freqs, xcm_array)
anim_dict['xcm_arr'] = xcm_array
anim = animation.FuncAnimation(anim_dict['fig'], update_figure,
frames=len(freqs),
fargs=(anim_dict, sinewave, time, freqs),
interval=300)
# Display the animation.
return HTML(anim.to_html5_video())
|
{"hexsha": "d236ac4ca7e988a60f80eae023eb738eb0ea38a3", "size": 3141, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/almost_fourier_helper.py", "max_stars_repo_name": "engineersCode/EngComp5_surfourier", "max_stars_repo_head_hexsha": "bdf2eb7330e555106f17e64b693f0fbdd04b7710", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-09T03:18:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-08T09:03:31.000Z", "max_issues_repo_path": "scripts/almost_fourier_helper.py", "max_issues_repo_name": "engineersCode/EngComp5_surfourier", "max_issues_repo_head_hexsha": "bdf2eb7330e555106f17e64b693f0fbdd04b7710", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/almost_fourier_helper.py", "max_forks_repo_name": "engineersCode/EngComp5_surfourier", "max_forks_repo_head_hexsha": "bdf2eb7330e555106f17e64b693f0fbdd04b7710", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-03-10T20:35:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-12T21:44:16.000Z", "avg_line_length": 28.8165137615, "max_line_length": 79, "alphanum_fraction": 0.6138172557, "include": true, "reason": "import numpy", "num_tokens": 928}
|
from timeit import timeit
from typing import Type
import numpy as np
import tensorflow as tf
from tensorflow.python.framework.errors_impl import FailedPreconditionError
import sandblox as sx
import sandblox.util.tf_util as U
from sandblox.core.io import bind_resolved
from sandblox.test.core.foo import FooLogic
class Suppressed(object):
# Wrapped classes don't get tested themselves
class TestBlockBase(object):
mold_cls = None # type: Type[sx.TFMold]
bad_mold_cls = None # type: Type[sx.TFMold]
def create_block(self, **props):
return self.mold_cls(**props)
def create_bad_block(self, **props):
return self.bad_mold_cls(**props)
def build_block(self, block=None, **props) -> sx.TFMold:
if block is None:
block = self.create_block()
return FooLogic.args_call(block, props=sx.Props(**props))
def create_bad_built_block(self, block=None, **props) -> sx.TFMold:
if block is None:
block = self.create_bad_block()
return FooLogic.args_call(block, props=sx.Props(**props))
OVERHEAD_RATIO_LIMIT = 15
def __init__(self, method_name: str = 'runTest'):
super(Suppressed.TestBlockBase, self).__init__(method_name)
built_block = self.build_block()
with tf.variable_scope(built_block.scope.rel, reuse=True):
self.bound_flattened_logic_args = bind_resolved(FooLogic.call, *FooLogic.args,
**FooLogic.kwargs)
self.logic_outs = list(FooLogic.resolved_args_call(FooLogic.call))
self.options = tf.RunOptions()
self.options.output_partition_graphs = True
def test_block_inputs(self):
built_block = self.build_block()
self.assertEqual(built_block.i.__dict__, self.bound_flattened_logic_args)
def test_block_dynamic_inputs(self):
built_block = self.build_block()
self.assertEqual(built_block.di, [sx.resolve(*FooLogic.di)])
def assertEqual(self, first, second, msg=None):
first, second = U.core_op_name(first), U.core_op_name(second)
super(Suppressed.TestBlockBase, self).assertEqual(first, second, msg)
def test_block_out(self):
built_block = self.build_block()
self.assertEqual(U.core_op_name(built_block.o.a), U.core_op_name(self.logic_outs[1]))
self.assertEqual(U.core_op_name(built_block.o.b), U.core_op_name(self.logic_outs[0]))
def test_block_out_order(self):
built_block = self.build_block()
self.assertEqual(U.core_op_name(built_block.oz), U.core_op_name(self.logic_outs))
def test_run(self):
with tf.Session() as sess:
built_block = self.build_block()
sess.run(tf.variables_initializer(built_block.get_variables()))
eval_100 = built_block.run(100)
metadata = tf.RunMetadata()
eval_0 = built_block.use(self.options, metadata).run(0)
self.assertTrue(hasattr(metadata, 'partition_graphs') and len(metadata.partition_graphs) > 0)
self.assertEqual(eval_100[0], eval_0[0] + 100)
self.assertNotEqual(eval_100[1], eval_0[1]) # Boy aren't you unlucky if you fail this test XD
def test_non_Out_return_assertion(self):
with self.assertRaises(AssertionError) as bad_foo_context:
with tf.Session(graph=tf.Graph()):
self.create_bad_built_block(reuse=None)
self.assertTrue('must return only' in str(bad_foo_context.exception))
def test_run_overhead(self):
with tf.Session() as sess:
built_block = self.build_block()
sess.run(tf.variables_initializer(built_block.get_variables()))
run_backup = built_block.built_fn.sess.run
built_block.built_fn.sess.run = no_op_fn
actual_elapse = timeit(lambda: built_block.run(100), number=1000)
stub_elapse = timeit(lambda: built_block.built_fn.sess.run(), number=1000)
built_block.built_fn.sess.run = run_backup
overhead_ratio = (actual_elapse - stub_elapse) / stub_elapse
if overhead_ratio > Suppressed.TestBlockBase.OVERHEAD_RATIO_LIMIT:
self.fail('Overhead factor of %.1f exceeded limit of %.1f' % (
overhead_ratio, Suppressed.TestBlockBase.OVERHEAD_RATIO_LIMIT))
elif overhead_ratio / Suppressed.TestBlockBase.OVERHEAD_RATIO_LIMIT > 0.8:
print('WARNING %s: Overhead factor of %.1f approaching limit of %.1f' % (
type(self).__name__, overhead_ratio, Suppressed.TestBlockBase.OVERHEAD_RATIO_LIMIT))
def test_session_specification(self):
sess = tf.Session(graph=tf.Graph())
with tf.Session(graph=tf.Graph()):
block = self.build_block(session=sess)
with sess.graph.as_default():
sess.run(tf.variables_initializer(block.get_variables()))
self.assertEqual(block.sess, sess)
block.run(100)
block.set_session(tf.Session())
self.assertNotEqual(block.sess, sess)
with self.assertRaises(RuntimeError) as ctx:
block.run(100)
self.assertTrue('graph is empty' in str(ctx.exception))
with self.assertRaises(AssertionError) as ctx:
self.build_block(session='some_invalid_session')
self.assertTrue('must be of type tf.Session' in str(ctx.exception))
def test_get_variables(self):
with tf.Graph().as_default():
block1 = self.build_block(scope_name='source')
vars1 = block1.get_variables()
self.assertEqual([var.name for var in vars1], ['source/foo_var:0'])
init = tf.variables_initializer(vars1)
with tf.Session() as sess:
with self.assertRaises(FailedPreconditionError) as ctx:
sess.run(vars1)
self.assertTrue('source/foo_var' in ctx.exception.message)
sess.run(init)
vals1 = sess.run(vars1)
self.assertEqual(len(vals1), 1)
self.assertEqual(vals1[0], np.float32)
def test_variable_assignment(self):
with tf.Graph().as_default():
block1 = self.build_block(scope_name='source')
block2 = self.build_block(scope_name='block')
vars1 = block1.get_variables()
vars2 = block2.get_variables()
init = tf.variables_initializer(vars1 + vars2)
assignment_op = block2.assign_vars(block1)
eq_op = tf.equal(vars1, vars2)
with tf.Session() as sess:
sess.run(init)
self.assertTrue(not sess.run(eq_op))
sess.run(assignment_op)
self.assertTrue(sess.run(eq_op))
def test_make_scope_unique(self):
with tf.Graph().as_default():
block1 = self.build_block(scope_name='make_me_unique')
block2 = self.build_block(scope_name='make_me_unique')
vars1 = block1.get_variables()
vars2 = block2.get_variables()
self.assertTrue(all([var1.name != var2.name for var1, var2 in zip(vars1, vars2)]))
init = tf.variables_initializer(vars1 + vars2)
eq_op = tf.equal(vars1, vars2)
var1_eq_var2 = [tf.assign(var1, var2) for var1, var2 in zip(vars1, vars2)]
with tf.Session() as sess:
sess.run(init)
sess.run(var1_eq_var2)
self.assertTrue(sess.run(eq_op))
sess.run(init)
self.assertTrue(not sess.run(eq_op))
def test_reuse(self):
with tf.Graph().as_default():
block1 = self.build_block(scope_name='reuse_me')
block2 = self.build_block(scope_name='reuse_me', reuse=True)
vars1 = block1.get_variables()
vars2 = block2.get_variables()
init = tf.variables_initializer(vars1 + vars2)
eq_op = tf.equal(vars1, vars2)
update_vars_1 = [tf.assign(var, 2) for var in vars1]
with tf.Session() as sess:
sess.run(init)
self.assertTrue(sess.run(eq_op))
sess.run(update_vars_1)
self.assertTrue(sess.run(eq_op))
def no_op_fn(*_args, **_kwargs):
return ()
|
{"hexsha": "3b31f04834674c2a14f8b2add0c020b80aac7419", "size": 7279, "ext": "py", "lang": "Python", "max_stars_repo_path": "sandblox/test/core/base.py", "max_stars_repo_name": "reubenjohn/sandblox", "max_stars_repo_head_hexsha": "0b7917eb866ddbc4749a098884046d4ebb441985", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-08T16:29:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T23:17:50.000Z", "max_issues_repo_path": "sandblox/test/core/base.py", "max_issues_repo_name": "reubenjohn/sandblox", "max_issues_repo_head_hexsha": "0b7917eb866ddbc4749a098884046d4ebb441985", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-12-16T21:08:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T07:06:43.000Z", "max_forks_repo_path": "sandblox/test/core/base.py", "max_forks_repo_name": "reubenjohn/sandblox", "max_forks_repo_head_hexsha": "0b7917eb866ddbc4749a098884046d4ebb441985", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-04-09T15:49:55.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-09T15:49:55.000Z", "avg_line_length": 37.5206185567, "max_line_length": 98, "alphanum_fraction": 0.7259238906, "include": true, "reason": "import numpy", "num_tokens": 1867}
|
import pickle
import numpy as np
from .band_interface import *
from .s1_interface import BigEarthNet_S1_Patch
from .s2_interface import BigEarthNet_S2_Patch
# FUTURE: Write a base class that gives the
# common skeleton to inherit from
class BigEarthNet_S1_S2_Patch:
def __init__(
self,
bandVH: np.ndarray,
bandVV: np.ndarray,
band01: np.ndarray,
band02: np.ndarray,
band03: np.ndarray,
band04: np.ndarray,
band05: np.ndarray,
band06: np.ndarray,
band07: np.ndarray,
band08: np.ndarray,
band8A: np.ndarray,
band09: np.ndarray,
band11: np.ndarray,
band12: np.ndarray,
**kwargs,
):
self.s1_patch = BigEarthNet_S1_Patch(bandVH=bandVH, bandVV=bandVV)
self.s2_patch = BigEarthNet_S2_Patch(
band01=band01,
band02=band02,
band03=band03,
band04=band04,
band05=band05,
band06=band06,
band07=band07,
band08=band08,
band8A=band8A,
band09=band09,
band11=band11,
band12=band12,
)
self.bands = [*self.s1_patch.bands, *self.s2_patch.bands]
# store extra kwargs
for k, v in kwargs.items():
setattr(self, k, v)
self.__stored_args__ = {**kwargs}
@classmethod
def short_init(
cls,
VH: np.ndarray,
VV: np.ndarray,
B01: np.ndarray,
B02: np.ndarray,
B03: np.ndarray,
B04: np.ndarray,
B05: np.ndarray,
B06: np.ndarray,
B07: np.ndarray,
B08: np.ndarray,
B8A: np.ndarray,
B09: np.ndarray,
B11: np.ndarray,
B12: np.ndarray,
**kwargs,
):
"""
Alternative `__init__` function.
Only difference is the encoded names.
"""
return cls(
bandVH=VH,
bandVV=VV,
band01=B01,
band02=B02,
band03=B03,
band04=B04,
band05=B05,
band06=B06,
band07=B07,
band08=B08,
band8A=B8A,
band09=B09,
band11=B11,
band12=B12,
**kwargs,
)
def dump(self, file):
return pickle.dump(self, file, protocol=4)
def dumps(self):
return pickle.dumps(self, protocol=4)
@staticmethod
def load(file) -> "BigEarthNet_S1_S2_Patch":
return pickle.load(file)
@staticmethod
def loads(data) -> "BigEarthNet_S1_S2_Patch":
return pickle.loads(data)
def get_band_by_name(self, name: str) -> Band:
band = None
for b in self.bands:
if b.name == name:
band = b
if band is None:
raise KeyError(f"{name} is not known")
return band
def get_band_data_by_name(self, name: str) -> np.ndarray:
band = self.get_band_by_name(name)
return band.data
def __repr__(self):
r_str = f"{self.__class__.__name__} with:\n"
r_str += "\n".join(f"\t{b}" for b in self.bands)
if len(self.__stored_args__) != 0:
r_str += "\nAnd the extra metadata:\n"
for key, metadata in self.__stored_args__.items():
r_str += f"\t{key}: {metadata}\n"
return r_str
|
{"hexsha": "7dfa3dc577aacbc5c738e29f9d2143abf7f40c30", "size": 3400, "ext": "py", "lang": "Python", "max_stars_repo_path": "bigearthnet_patch_interface/merged_interface.py", "max_stars_repo_name": "kai-tub/bigearthnet_patch_interface", "max_stars_repo_head_hexsha": "395f40f486c471f383a74667d1ae2006ee13e328", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-30T22:20:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T22:20:07.000Z", "max_issues_repo_path": "bigearthnet_patch_interface/merged_interface.py", "max_issues_repo_name": "kai-tub/bigearthnet_patch_interface", "max_issues_repo_head_hexsha": "395f40f486c471f383a74667d1ae2006ee13e328", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-11T11:20:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T10:15:37.000Z", "max_forks_repo_path": "bigearthnet_patch_interface/merged_interface.py", "max_forks_repo_name": "kai-tub/bigearthnet_patch_interface", "max_forks_repo_head_hexsha": "395f40f486c471f383a74667d1ae2006ee13e328", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9541984733, "max_line_length": 74, "alphanum_fraction": 0.5370588235, "include": true, "reason": "import numpy", "num_tokens": 890}
|
[STATEMENT]
lemma index_of_r_to_l_lm: "nat_to_pr index_of_r_to_l (c_pair x (c_pair y z)) = c_pair (c_pair x y) z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nat_to_pr index_of_r_to_l (c_pair x (c_pair y z)) = c_pair (c_pair x y) z
[PROOF STEP]
apply(unfold index_of_r_to_l_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nat_to_pr (pair_by_index (pair_by_index index_of_c_fst (comp_by_index index_of_c_fst index_of_c_snd)) (comp_by_index index_of_c_snd index_of_c_snd)) (c_pair x (c_pair y z)) = c_pair (c_pair x y) z
[PROOF STEP]
apply(simp add: pair_by_index_main)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c_f_pair (c_f_pair (nat_to_pr index_of_c_fst) (nat_to_pr (comp_by_index index_of_c_fst index_of_c_snd))) (nat_to_pr (comp_by_index index_of_c_snd index_of_c_snd)) (c_pair x (c_pair y z)) = c_pair (c_pair x y) z
[PROOF STEP]
apply(unfold c_f_pair_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c_pair (c_pair (nat_to_pr index_of_c_fst (c_pair x (c_pair y z))) (nat_to_pr (comp_by_index index_of_c_fst index_of_c_snd) (c_pair x (c_pair y z)))) (nat_to_pr (comp_by_index index_of_c_snd index_of_c_snd) (c_pair x (c_pair y z))) = c_pair (c_pair x y) z
[PROOF STEP]
apply(simp add: index_of_c_fst_main)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c_pair (c_pair x (nat_to_pr (comp_by_index index_of_c_fst index_of_c_snd) (c_pair x (c_pair y z)))) (nat_to_pr (comp_by_index index_of_c_snd index_of_c_snd) (c_pair x (c_pair y z))) = c_pair (c_pair x y) z
[PROOF STEP]
apply(simp add: comp_by_index_main)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c_pair (c_pair x (nat_to_pr index_of_c_fst (nat_to_pr index_of_c_snd (c_pair x (c_pair y z))))) (nat_to_pr index_of_c_snd (nat_to_pr index_of_c_snd (c_pair x (c_pair y z)))) = c_pair (c_pair x y) z
[PROOF STEP]
apply(simp add: index_of_c_fst_main)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c_pair (c_pair x (c_fst (nat_to_pr index_of_c_snd (c_pair x (c_pair y z))))) (nat_to_pr index_of_c_snd (nat_to_pr index_of_c_snd (c_pair x (c_pair y z)))) = c_pair (c_pair x y) z
[PROOF STEP]
apply(simp add: index_of_c_snd_main)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1086, "file": "Recursion-Theory-I_RecEnSet", "length": 8}
|
mutable struct LossFunction{Δ, FT, ML, F, T, L, P}
first_targets :: FT
max_simulation_length :: ML
field_weights :: F # scenario weights
time_series :: T
profile :: L
end
allsame(x) = all(y -> y ≈ first(x), x)
t_interval(data) = data.t[2:end] .- data.t[1:end-1]
function (loss::LossFunction)(simulation, observations, θ::Vector{<:FreeParameters})
# iterate the model and record discrepancy summary in `time_series`
evaluate!(loss, simulation, observations, θ)
N_ens = ensemble_size(simulation.model)
error = zeros((N_ens, 1))
for ts in loss.time_series
data_error = ts.analysis(ts.data) / N_ens
error .+= data_error
end
return error
end
function LossFunction(simulation::Simulation{<:OneDimensionalEnsembleModel}, observations::OneDimensionalTimeSeriesBatch; data_weights=[1.0 for b in observations], relative_weights)
@assert all([allsame(t_interval(data)) for data in observations]) "Simulation time steps are not uniformly spaced."
@assert allsame([t_interval(data)[1] for data in observations]) "Time step differs between simulations."
all_targets = getproperty.(observations, :targets)
first_targets = getindex.(all_targets, 1)
max_simulation_length = maximum(length.(all_targets))
profile = ValueProfileAnalysis(simulation.model.grid, analysis = column_mean)
field_weights = Dict(f => [] for f in [:u, :v, :b, :e])
for (i, data) in enumerate(observations)
data_fields = data.relevant_fields # e.g. (:b, :e)
targets = all_targets[i]
rw = [relative_weights[f] for f in data_fields]
weights = estimate_weights(profile, data, rw) # e.g. (1.0, 0.5)
for (j, field_name) in enumerate(data_fields)
push!(field_weights[field_name], weights[j] * data_weights[i])
end
for field_name in keys(field_weights)
field_name ∉ data_fields && push!(field_weights[field_name], 0)
end
end
time_series = [EnsembleTimeSeriesAnalysis(observations[i].t[all_targets[i]], simulation.model.grid.Nx) for i in 1:length(observations)]
return LossFunction(first_targets, max_simulation_length, field_weights, time_series, profile)
end
function calculate_value_discrepancy!(value, model_field, data_field)
discrepancy = value.discrepancy
interior(discrepancy) .= (interior(data_field) .- interior(model_field)) .^ 2
return nothing
end
"""
analyze_profile_discrepancy(value, model_field, data_field)
Calculates the discrepancy between model and data field values, and returns an
analysis of the discrepancy profile.
"""
function analyze_profile_discrepancy(value, model_field, data_field)
calculate_value_discrepancy!(value, model_field, data_field) # MSE for each grid element
return value.analysis(value.discrepancy) # e.g.column_mean of discrepancy field
end
function calculate_gradient_discrepancy!(prof, model_field, data_field)
# Coarse grain the data
ϵ = prof.ϵ
set!(ϵ, data_field)
# Calculate profients of both data and discrepancy
∇ϕ = prof.∇ϕ
∇ϵ = prof.∇ϵ
∂z!(∇ϵ, ϵ)
∂z!(∇ϕ, model_field)
for i in eachindex(ϵ)
@inbounds ϵ[i] = (ϵ[i] - model_field[i])^2
@inbounds ∇ϵ[i] = (∇ϵ[i] - ∇ϕ[i])^2 # includes bottom boundary value, which will later be ignored.
end
# Top boundary contribution (ignored for now)
#N = d.grid.N
#@inbounds ∇d[N+1] = (∇d[N+1] - ∇ϕ[N+1])^2
return nothing
end
"""
analyze_profile_discrepancy(prof::GradientProfileAnalysis, model_field, data_field)
Calculates the discrepencies between both values and gradients of model and data fields,
and returns an analysis of the two discrepancy profiles.
"""
function analyze_profile_discrepancy(prof::GradientProfileAnalysis, model_field, data_field)
calculate_gradient_discrepancy!(prof, model_field, data_field)
# Calculate analysis on gradient, excluding boundary points.
return prof.analysis(prof.ϵ) + prof.gradient_weight * prof.analysis(prof.∇ϵ.data[2:end-1])
end
#
# Loss function utils
#
@inline get_weight(::Nothing, field_index) = 1
@inline get_weight(weights, field_index) = @inbounds weights[field_index]
function new_field(field_name, field_data, grid)
field_name == :u && return XFaceField(grid, field_data)
field_name == :v && return YFaceField(grid, field_data)
field_name == :b && return CenterField(grid, field_data)
field_name == :e && return CenterField(grid, field_data)
end
function analyze_weighted_profile_discrepancy(loss::LossFunction, model::OneDimensionalEnsembleModel, observations::OneDimensionalTimeSeriesBatch, target)
total_discrepancy = zeros(model.grid.Nx, model.grid.Ny, 1)
for field_name in [:u, :v, :b, :e]
model_field = get_model_field(model, field_name)
# compensate for setting model time index 1 to to index `first_target` in data.
data_indices = target .+ loss.first_targets .- 1
field_data = column_ensemble_interior(observations, field_name, data_indices, model.grid.Nx)
data_field = new_field(field_name, field_data, model_field.grid)
# Calculate the per-field profile-based discrepancy
field_discrepancy = analyze_profile_discrepancy(loss.profile, model_field, data_field)
#=
if any(isnan.(field_discrepancy))
field_discrepency .= weight * remaining_time / stop_time
end
=#
# Accumulate weighted profile-based discrepancies in the total discrepancyor
total_discrepancy .+= loss.field_weights[field_name]' .* field_discrepancy # accumulate discrepancyor
end
return nan2inf.(total_discrepancy)
end
function evaluate!(loss::LossFunction, simulation::Simulation{<:OneDimensionalEnsembleModel}, observations::OneDimensionalTimeSeriesBatch, parameters)
# Initialize
initialize_forward_run!(simulation.model, observations, parameters, loss.first_targets)
# this should be improved
all_lengths = length.(getproperty.(observations, :t))
longest_sim = observations[argmax(all_lengths)]
# Calculate a loss function time-series
for target in 1:loss.max_simulation_length
simulation.stop_time = longest_sim.t[target]
run!(simulation)
discrepancy = analyze_weighted_profile_discrepancy(loss, simulation.model, observations, target)
for (j, ts) in enumerate(loss.time_series)
if target <= length(ts.time)
# `ts.data` is N_ensemble x N_timesteps; `discrepancy` is N_ensemble x N_cases x 1
ts.data[:, target] .= discrepancy[:, j, 1]
end
end
end
return nothing
end
#
# Miscellanea
#
function max_variance(data)
fields = data.relevant_fields
max_variances = zeros(length(fields))
for (i, field) in enumerate(fields)
max_variances[i] = get_weight(loss.weights, i) * max_variance(data, field)
end
return max_variances
end
function mean_variance(data)
fields = data.relevant_fields
mean_variance = zeros(length(fields))
for (i, field) in enumerate(fields)
mean_variance[i] = get_weight(loss.weights, i) * mean_variance(data, field)
end
return mean_variances
end
|
{"hexsha": "145ade70ed40aefb915fb59395478b3426b25f56", "size": 7280, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LossFunctions/loss_function.jl", "max_stars_repo_name": "adelinehillier/OceanTurbulenceParameterEstimation.jl", "max_stars_repo_head_hexsha": "c0253976746d43c6667414be3801343818f6b2bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-03T04:11:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T04:11:22.000Z", "max_issues_repo_path": "src/LossFunctions/loss_function.jl", "max_issues_repo_name": "adelinehillier/OceanTurbulenceParameterEstimation.jl", "max_issues_repo_head_hexsha": "c0253976746d43c6667414be3801343818f6b2bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-09-15T20:32:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-02T22:17:15.000Z", "max_forks_repo_path": "src/LossFunctions/loss_function.jl", "max_forks_repo_name": "adelinehillier/OceanTurbulenceParameterEstimation.jl", "max_forks_repo_head_hexsha": "c0253976746d43c6667414be3801343818f6b2bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0186915888, "max_line_length": 181, "alphanum_fraction": 0.7031593407, "num_tokens": 1834}
|
import argparse
import numpy as np
import timm
import torch
from onnx.optimizer import optimize
from timm.models import load_checkpoint
from models.t2t_vit import *
try:
import onnx
import onnxruntime as rt
except ImportError as e:
raise ImportError(f'Please install onnx and onnxruntime first. {e}')
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('model_name', help='')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
args = parser.parse_args()
return args
def optimize_onnx_graph(onnx_model_path):
onnx_model = onnx.load(onnx_model_path)
onnx_model = optimize(onnx_model, ['extract_constant_to_initializer',
'eliminate_unused_initializer'])
inputs = onnx_model.graph.input
name_to_input = {}
for input in inputs:
name_to_input[input.name] = input
for initializer in onnx_model.graph.initializer:
if initializer.name in name_to_input:
inputs.remove(name_to_input[initializer.name])
onnx.save(onnx_model, onnx_model_path)
if __name__ == '__main__':
args = parse_args()
model = timm.create_model(args.model_name, pretrained=args.pretrained, exportable=True)
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
model.eval()
print(model.default_cfg)
try:
input_shape = (1, ) + model.default_cfg['test_input_size']
except KeyError:
input_shape = (1, ) + model.default_cfg['input_size']
print(input_shape)
dummy_input = torch.randn(*input_shape)
torch.onnx.export(
model,
dummy_input,
args.output_file,
export_params=True,
keep_initializers_as_inputs=True,
verbose=False,
opset_version=args.opset_version,
input_names=['image'],
output_names=['probs'],
strip_doc_string=True,
enable_onnx_checker=False,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX,
dynamic_axes = {
"image": {
0: "batch_size",
2: "height",
3: "width"
}
}
)
optimize_onnx_graph(args.output_file)
|
{"hexsha": "bfd1b9bd9fc333e95e6649f20b1e7214a780b0dc", "size": 2741, "ext": "py", "lang": "Python", "max_stars_repo_path": "export.py", "max_stars_repo_name": "druzhkov-paul/T2T-ViT", "max_stars_repo_head_hexsha": "819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "export.py", "max_issues_repo_name": "druzhkov-paul/T2T-ViT", "max_issues_repo_head_hexsha": "819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "export.py", "max_forks_repo_name": "druzhkov-paul/T2T-ViT", "max_forks_repo_head_hexsha": "819c3ddc4cb6f464d4a9866d8713c7ace42ebf6c", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8720930233, "max_line_length": 91, "alphanum_fraction": 0.6493980299, "include": true, "reason": "import numpy", "num_tokens": 598}
|
#include <iostream>
#include <iomanip>
#include <stdexcept>
#include <math.h>
#include <set>
#include <boost/multiprecision/gmp.hpp>
#include <boost/multiprecision/number.hpp>
using namespace std;
using namespace boost::multiprecision;
int target = 100;
int main(int argc, char** argv) {
set<mpz_int> visited;
for (int a = 2; a <= target; a++) {
for (int b = 2; b <= target; b++) {
mpz_int val = 1;
for (int i = 0; i < b; i++) {
val *= a;
}
visited.insert(val);
cout << val << endl;
}
}
cout << "Total unique elements: " << visited.size() << endl;
return 0;
}
|
{"hexsha": "d0530bd585bcffa537a5673ef00590e556f78ac6", "size": 622, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "29.cpp", "max_stars_repo_name": "DouglasSherk/project-euler", "max_stars_repo_head_hexsha": "f3b188b199ff31671c6d7683b15675be7484c5b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "29.cpp", "max_issues_repo_name": "DouglasSherk/project-euler", "max_issues_repo_head_hexsha": "f3b188b199ff31671c6d7683b15675be7484c5b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "29.cpp", "max_forks_repo_name": "DouglasSherk/project-euler", "max_forks_repo_head_hexsha": "f3b188b199ff31671c6d7683b15675be7484c5b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.4375, "max_line_length": 62, "alphanum_fraction": 0.5868167203, "num_tokens": 177}
|
'''This module contains the S-model Environment.'''
import numpy as np
# from random import uniform as u
class SEnvironment(object):
'''The S-model Learning Environment.'''
def __init__(self, p_vector, precision=1):
'''Create a probability vector from the probability of
success vector.'''
self.p = np.array(p_vector)
self.precision = precision
# Only the environment knows the best xmission a priori.
# best xmission is used to evaluate a posteriori learning.
self.best_xmission = max(self.p)
def response(self, depth_index):
'''Respond to the mobile-agent the value of the timeout
probability.'''
# return self.p[depth_index] > u(0, 1)
return self.p[(depth_index - 1) * self.precision]
|
{"hexsha": "6c972d994fc47546fd056a01b8f4f5cca73346ca", "size": 798, "ext": "py", "lang": "Python", "max_stars_repo_path": "distest/senvironment.py", "max_stars_repo_name": "0xSteve/detection_learning", "max_stars_repo_head_hexsha": "e767d740ffbb2df4570d8522a29062eca01b14ee", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-09-10T16:35:33.000Z", "max_stars_repo_stars_event_max_datetime": "2017-09-10T16:35:33.000Z", "max_issues_repo_path": "distest/senvironment.py", "max_issues_repo_name": "0xSteve/detection_learning", "max_issues_repo_head_hexsha": "e767d740ffbb2df4570d8522a29062eca01b14ee", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "distest/senvironment.py", "max_forks_repo_name": "0xSteve/detection_learning", "max_forks_repo_head_hexsha": "e767d740ffbb2df4570d8522a29062eca01b14ee", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6956521739, "max_line_length": 66, "alphanum_fraction": 0.6516290727, "include": true, "reason": "import numpy", "num_tokens": 174}
|
## from Markdown.jl
import Base: display, show
graph_types = AbstractString["Plot", "FramedPlot"]
function tohtml(io::IO, m::MIME"text/html", x)
show(io, m, x)
end
function tohtml(io::IO, m::MIME"text/latex", x)
show(io, m, x)
end
function tohtml(io::IO, m::MIME"text/plain", x)
show(io, m, x)
end
function tohtml(io::IO, m::MIME"image/png", img)
print(io, """<img src="data:image/png;base64,""")
print(io, stringmime(m, img))
print(io, "\" />")
end
function tohtml(m::MIME"image/svg+xml", img)
sprint(io -> show(io, m, img))
end
# Display infrastructure
function bestmime(val)
for mime in ("text/html", "text/latex", "application/x-latex", "image/svg+xml", "image/png", "text/plain")
showable(mime, val) && return MIME(Symbol(mime))
end
error("Cannot render $val to Markdown.")
end
tohtml(io::IO, x) = tohtml(io, bestmime(x), x)
##################################################
## write mime methods
function with_environment(f, io, tag)
print(io, "\\begin{$tag}")
f()
print(io, "\\end{$tag}")
end
function with_delimiter(f, io, tag)
print(io, "$tag")
f()
print(io, "$tag")
end
#XXXshow(io::IO, ::MIME"text/latex", md::Markdown.Content) =
# show(io, "text/plain", md)
##XXX function show(io::IO, mime::MIME"text/latex", block::Markdown.Block)
## for md in block.content[1:end-1]
## show(io::IO, mime, md)
## println(io)
## end
## show(io::IO, mime, block.content[end])
## end
function show(io::IO, mime::MIME"text/latex", header::Markdown.Header{l}) where {l}
txt = join(header.text)
if l == 1
print(io, "\\section{$(txt)}")
end
if l == 2
print(io, "\\subsection{$(txt)}")
end
if l > 2
print(io, "\\subsubsection{$(txt)}")
end
end
"heuristic to identify code blocks"
const block_code_re = r"^\n.*\n$"
is_blockcode(content) = isa(content, Markdown.Code) && occursin(block_code_re, content.code)
#function show(io::IO, ::MIME"text/latex", code::Markdown.BlockCode)
function show(io::IO, ::MIME"text/latex", code::Markdown.Code)
println((code.code, is_blockcode(code)))
if is_blockcode(code)
with_delimiter(io, "verbatim") do
print(io, code.code)
end
else
txt = code.code
txt = replace(txt, "^" => "\\^{}")
txt = replace(txt, "_" => "\\_{}")
txt = replace(txt, "#" => "\\#")
txt = replace(txt, "@" => "\\@")
print(io, "\\texttt{$(txt)}")
end
end
#function show(io::IO, ::MIME"text/latex", code::Markdown.InlineCode)
# print(io, "\\texttt{$(code.code)}")
#end
function show(io::IO, ::MIME"text/latex", md::Markdown.Paragraph)
println(io, "\\newline")
for md in md.content
show(io, "text/latex", md)
end
end
function show(io::IO, ::MIME"text/latex", md::Markdown.BlockQuote)
with_environment(io, "quotation") do
for item in md.content
show(io, "text/latex", item)
end
end
end
function show(io::IO, ::MIME"text/latex", md::Markdown.List)
with_environment(io, md.ordered < 0 ? "enumerate" : "itemize") do
for item in md.items
print(io, "\\item ")
[show(io, "text/latex", i) for i in item]
end
end
end
function show(io::IO, ::MIME"text/latex", md::Markdown.HorizontalRule)
print(io, "\\hrule")
end
# Inline elements
##XXX function show(io::IO, ::MIME"text/latex", md::Markdown.Plain)
## print(io, md.text)
## end
function show(io::IO, ::MIME"text/latex", md::Markdown.Bold)
print(io, "\\textbf{$(join(md.text))}")
end
function show(io::IO, ::MIME"text/latex", md::Markdown.Italic)
print(io, "\\textit{$(join(md.text))}")
end
function show(io::IO, ::MIME"text/latex", code::Markdown.Image)
print(io, "\\includegraphics{$(code.url)}")
end
# function show(io::IO, ::MIME"text/latex", md::Markdown.Image)
# print(io, """<img src="$(md.url)" alt="$(md.alt)"></img>""")
# end
function show(io::IO, ::MIME"text/latex", md::Markdown.Link)
print(io, "\\href{$(md.url)}{$(join(md.text))}")
end
function show(io::IO, ::MIME"text/latex", md::Markdown.LaTeX)
## Hack, we use $$~ ~$$ to mark these up, so if we see ~..~ wrapping
## we add in ...
txt = md.formula
if occursin(r"^~.*", txt)
print(io, "\n")
show(io, "text/latex", L"$$")
show(io, "text/latex", txt[2:(end-1)])
show(io, "text/latex", L"$$")
print(io, "\n")
else
show(io, "text/plain", md)
end
end
function show(io::IO, ::MIME"text/html", md::Markdown.LaTeX)
## Hack, we use $$~ ~$$ to mark these up, so if we see ~..~ wrapping
## we add in ...
txt = md.formula
if occursin(r"^~.*", txt)
print(io, "\n")
show(io, "text/latex", L"$$")
show(io, "text/latex", txt[2:(end-1)])
show(io, "text/latex", L"$$")
print(io, "\n")
else
show(io, "text/plain", md)
end
end
function show(io::IO, ::MIME"text/latex", md::T) where {T <: AbstractString}
print(io, md)
end
function Base.show(io::IO, ::MIME"text/latex", x::Alert)
print(io, "XXX")
end
|
{"hexsha": "7994f97166f0f58b5782f93fdde313d7328fa243", "size": 4937, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/markdown-additions.jl", "max_stars_repo_name": "jverzani/WeavePynb.jl", "max_stars_repo_head_hexsha": "5c2834f717fb3583d1fc245ed767b4d17aae6b39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-01-05T09:27:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-24T19:14:51.000Z", "max_issues_repo_path": "src/markdown-additions.jl", "max_issues_repo_name": "jverzani/WeavePynb.jl", "max_issues_repo_head_hexsha": "5c2834f717fb3583d1fc245ed767b4d17aae6b39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/markdown-additions.jl", "max_forks_repo_name": "jverzani/WeavePynb.jl", "max_forks_repo_head_hexsha": "5c2834f717fb3583d1fc245ed767b4d17aae6b39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-24T21:05:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:37:46.000Z", "avg_line_length": 24.5621890547, "max_line_length": 109, "alphanum_fraction": 0.5991492809, "num_tokens": 1551}
|
import unittest
from ..weights import W, WSP
from .. import util
from ..util import WSP2W, lat2W
from ..contiguity import Rook
from ...io.fileio import FileIO as psopen
from ... import examples
from ..distance import KNN
import numpy as np
NPTA3E = np.testing.assert_array_almost_equal
class TestW(unittest.TestCase):
def setUp(self):
self.w = Rook.from_shapefile(
examples.get_path("10740.shp"), silence_warnings=True
)
self.neighbors = {
0: [3, 1],
1: [0, 4, 2],
2: [1, 5],
3: [0, 6, 4],
4: [1, 3, 7, 5],
5: [2, 4, 8],
6: [3, 7],
7: [4, 6, 8],
8: [5, 7],
}
self.weights = {
0: [1, 1],
1: [1, 1, 1],
2: [1, 1],
3: [1, 1, 1],
4: [1, 1, 1, 1],
5: [1, 1, 1],
6: [1, 1],
7: [1, 1, 1],
8: [1, 1],
}
self.w3x3 = util.lat2W(3, 3)
def test_W(self):
w = W(self.neighbors, self.weights, silence_warnings=True)
self.assertEqual(w.pct_nonzero, 29.62962962962963)
def test___getitem__(self):
self.assertEqual(self.w[0], {1: 1.0, 4: 1.0, 101: 1.0, 85: 1.0, 5: 1.0})
def test___init__(self):
w = W(self.neighbors, self.weights, silence_warnings=True)
self.assertEqual(w.pct_nonzero, 29.62962962962963)
def test___iter__(self):
w = lat2W(3, 3)
res = {}
for i, wi in enumerate(w):
res[i] = wi
self.assertEqual(res[0], (0, {1: 1.0, 3: 1.0}))
self.assertEqual(res[8], (8, {5: 1.0, 7: 1.0}))
def test_asymmetries(self):
w = lat2W(3, 3)
w.transform = "r"
result = w.asymmetry()
self.assertEqual(
result,
[
(0, 1),
(0, 3),
(1, 0),
(1, 2),
(1, 4),
(2, 1),
(2, 5),
(3, 0),
(3, 4),
(3, 6),
(4, 1),
(4, 3),
(4, 5),
(4, 7),
(5, 2),
(5, 4),
(5, 8),
(6, 3),
(6, 7),
(7, 4),
(7, 6),
(7, 8),
(8, 5),
(8, 7),
],
)
def test_asymmetry(self):
w = lat2W(3, 3)
self.assertEqual(w.asymmetry(), [])
w.transform = "r"
self.assertFalse(w.asymmetry() == [])
def test_cardinalities(self):
w = lat2W(3, 3)
self.assertEqual(
w.cardinalities, {0: 2, 1: 3, 2: 2, 3: 3, 4: 4, 5: 3, 6: 2, 7: 3, 8: 2}
)
def test_diagW2(self):
NPTA3E(
self.w3x3.diagW2, np.array([2.0, 3.0, 2.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0])
)
def test_diagWtW(self):
NPTA3E(
self.w3x3.diagW2, np.array([2.0, 3.0, 2.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0])
)
def test_diagWtW_WW(self):
NPTA3E(
self.w3x3.diagWtW_WW,
np.array([4.0, 6.0, 4.0, 6.0, 8.0, 6.0, 4.0, 6.0, 4.0]),
)
def test_full(self):
wf = np.array(
[
[0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0],
]
)
ids = list(range(9))
wf1, ids1 = self.w3x3.full()
NPTA3E(wf1, wf)
self.assertEqual(ids1, ids)
def test_get_transform(self):
self.assertEqual(self.w3x3.transform, "O")
self.w3x3.transform = "r"
self.assertEqual(self.w3x3.transform, "R")
self.w3x3.transform = "b"
def test_higher_order(self):
weights = {
0: [1.0, 1.0, 1.0],
1: [1.0, 1.0, 1.0],
2: [1.0, 1.0, 1.0],
3: [1.0, 1.0, 1.0],
4: [1.0, 1.0, 1.0, 1.0],
5: [1.0, 1.0, 1.0],
6: [1.0, 1.0, 1.0],
7: [1.0, 1.0, 1.0],
8: [1.0, 1.0, 1.0],
}
neighbors = {
0: [4, 6, 2],
1: [3, 5, 7],
2: [8, 0, 4],
3: [7, 1, 5],
4: [8, 0, 2, 6],
5: [1, 3, 7],
6: [4, 0, 8],
7: [3, 1, 5],
8: [6, 2, 4],
}
wneighbs = {
k: {neighb: weights[k][i] for i, neighb in enumerate(v)}
for k, v in list(neighbors.items())
}
w2 = util.higher_order(self.w3x3, 2)
test_wneighbs = {
k: {ne: weights[k][i] for i, ne in enumerate(v)}
for k, v in list(w2.neighbors.items())
}
self.assertEqual(test_wneighbs, wneighbs)
def test_histogram(self):
hist = [
(0, 1),
(1, 1),
(2, 4),
(3, 20),
(4, 57),
(5, 44),
(6, 36),
(7, 15),
(8, 7),
(9, 1),
(10, 6),
(11, 0),
(12, 2),
(13, 0),
(14, 0),
(15, 1),
]
self.assertEqual(self.w.histogram, hist)
def test_id2i(self):
id2i = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8}
self.assertEqual(self.w3x3.id2i, id2i)
def test_id_order_set(self):
w = W(neighbors={"a": ["b"], "b": ["a", "c"], "c": ["b"]})
self.assertFalse(w.id_order_set)
def test_islands(self):
w = W(
neighbors={"a": ["b"], "b": ["a", "c"], "c": ["b"], "d": []},
silence_warnings=True,
)
self.assertEqual(w.islands, ["d"])
self.assertEqual(self.w3x3.islands, [])
def test_max_neighbors(self):
w = W(
neighbors={"a": ["b"], "b": ["a", "c"], "c": ["b"], "d": []},
silence_warnings=True,
)
self.assertEqual(w.max_neighbors, 2)
self.assertEqual(self.w3x3.max_neighbors, 4)
def test_mean_neighbors(self):
w = util.lat2W()
self.assertEqual(w.mean_neighbors, 3.2)
def test_min_neighbors(self):
w = util.lat2W()
self.assertEqual(w.min_neighbors, 2)
def test_n(self):
w = util.lat2W()
self.assertEqual(w.n, 25)
def test_neighbor_offsets(self):
d = {
0: [3, 1],
1: [0, 4, 2],
2: [1, 5],
3: [0, 6, 4],
4: [1, 3, 7, 5],
5: [2, 4, 8],
6: [3, 7],
7: [4, 6, 8],
8: [5, 7],
}
self.assertEqual(self.w3x3.neighbor_offsets, d)
def test_nonzero(self):
self.assertEqual(self.w3x3.nonzero, 24)
def test_order(self):
w = util.lat2W(3, 3)
o = {
0: [-1, 1, 2, 1, 2, 3, 2, 3, 0],
1: [1, -1, 1, 2, 1, 2, 3, 2, 3],
2: [2, 1, -1, 3, 2, 1, 0, 3, 2],
3: [1, 2, 3, -1, 1, 2, 1, 2, 3],
4: [2, 1, 2, 1, -1, 1, 2, 1, 2],
5: [3, 2, 1, 2, 1, -1, 3, 2, 1],
6: [2, 3, 0, 1, 2, 3, -1, 1, 2],
7: [3, 2, 3, 2, 1, 2, 1, -1, 1],
8: [0, 3, 2, 3, 2, 1, 2, 1, -1],
}
self.assertEqual(util.order(w), o)
def test_pct_nonzero(self):
self.assertEqual(self.w3x3.pct_nonzero, 29.62962962962963)
def test_s0(self):
self.assertEqual(self.w3x3.s0, 24.0)
def test_s1(self):
self.assertEqual(self.w3x3.s1, 48.0)
def test_s2(self):
self.assertEqual(self.w3x3.s2, 272.0)
def test_s2array(self):
s2a = np.array(
[[16.0], [36.0], [16.0], [36.0], [64.0], [36.0], [16.0], [36.0], [16.0]]
)
NPTA3E(self.w3x3.s2array, s2a)
def test_sd(self):
self.assertEqual(self.w3x3.sd, 0.66666666666666663)
def test_set_transform(self):
w = util.lat2W(2, 2)
self.assertEqual(w.transform, "O")
self.assertEqual(w.weights[0], [1.0, 1.0])
w.transform = "r"
self.assertEqual(w.weights[0], [0.5, 0.5])
def test_shimbel(self):
d = {
0: [-1, 1, 2, 1, 2, 3, 2, 3, 4],
1: [1, -1, 1, 2, 1, 2, 3, 2, 3],
2: [2, 1, -1, 3, 2, 1, 4, 3, 2],
3: [1, 2, 3, -1, 1, 2, 1, 2, 3],
4: [2, 1, 2, 1, -1, 1, 2, 1, 2],
5: [3, 2, 1, 2, 1, -1, 3, 2, 1],
6: [2, 3, 4, 1, 2, 3, -1, 1, 2],
7: [3, 2, 3, 2, 1, 2, 1, -1, 1],
8: [4, 3, 2, 3, 2, 1, 2, 1, -1],
}
self.assertEqual(util.shimbel(self.w3x3), d)
def test_sparse(self):
self.assertEqual(self.w3x3.sparse.nnz, 24)
def test_trcW2(self):
self.assertEqual(self.w3x3.trcW2, 24.0)
def test_trcWtW(self):
self.assertEqual(self.w3x3.trcWtW, 24.0)
def test_trcWtW_WW(self):
self.assertEqual(self.w3x3.trcWtW_WW, 48.0)
def test_symmetrize(self):
symm = self.w.symmetrize()
np.testing.assert_allclose(symm.sparse.toarray(), self.w.sparse.toarray())
knn = KNN.from_shapefile(
examples.get_path("baltim.shp"), k=10, silence_warnings=True
)
sknn = knn.symmetrize()
assert not np.allclose(knn.sparse.toarray(), sknn.sparse.toarray())
np.testing.assert_allclose(sknn.sparse.toarray(), sknn.sparse.toarray().T)
knn.symmetrize(inplace=True)
np.testing.assert_allclose(sknn.sparse.toarray(), knn.sparse.toarray())
np.testing.assert_allclose(knn.sparse.toarray().T, knn.sparse.toarray())
def test_connected_components(self):
disco = {0: [1], 1: [0], 2: [3], 3: [2]}
disco = W(disco)
assert disco.n_components == 2
def test_roundtrip_write(self):
self.w.to_file("./tmp.gal")
new = W.from_file("./tmp.gal")
np.testing.assert_array_equal(self.w.sparse.toarray(), new.sparse.toarray())
class Test_WSP_Back_To_W(unittest.TestCase):
# Test to make sure we get back to the same W functionality
def setUp(self):
self.w = Rook.from_shapefile(
examples.get_path("10740.shp"), silence_warnings=True
)
wsp = self.w.to_WSP()
self.w = wsp.to_W(silence_warnings=True)
self.neighbors = {
0: [3, 1],
1: [0, 4, 2],
2: [1, 5],
3: [0, 6, 4],
4: [1, 3, 7, 5],
5: [2, 4, 8],
6: [3, 7],
7: [4, 6, 8],
8: [5, 7],
}
self.weights = {
0: [1, 1],
1: [1, 1, 1],
2: [1, 1],
3: [1, 1, 1],
4: [1, 1, 1, 1],
5: [1, 1, 1],
6: [1, 1],
7: [1, 1, 1],
8: [1, 1],
}
self.w3x3 = util.lat2W(3, 3)
w3x3 = WSP(self.w3x3.sparse, self.w3x3.id_order)
self.w3x3 = WSP2W(w3x3)
def test_W(self):
w = W(self.neighbors, self.weights, silence_warnings=True)
self.assertEqual(w.pct_nonzero, 29.62962962962963)
def test___getitem__(self):
self.assertEqual(self.w[0], {1: 1.0, 4: 1.0, 101: 1.0, 85: 1.0, 5: 1.0})
def test___init__(self):
w = W(self.neighbors, self.weights, silence_warnings=True)
self.assertEqual(w.pct_nonzero, 29.62962962962963)
def test___iter__(self):
w = util.lat2W(3, 3)
res = {}
for i, wi in enumerate(w):
res[i] = wi
self.assertEqual(res[0], (0, {1: 1.0, 3: 1.0}))
self.assertEqual(res[8], (8, {5: 1.0, 7: 1.0}))
def test_asymmetries(self):
w = util.lat2W(3, 3)
w.transform = "r"
result = w.asymmetry()
self.assertEqual(
result,
[
(0, 1),
(0, 3),
(1, 0),
(1, 2),
(1, 4),
(2, 1),
(2, 5),
(3, 0),
(3, 4),
(3, 6),
(4, 1),
(4, 3),
(4, 5),
(4, 7),
(5, 2),
(5, 4),
(5, 8),
(6, 3),
(6, 7),
(7, 4),
(7, 6),
(7, 8),
(8, 5),
(8, 7),
],
)
def test_asymmetry(self):
w = util.lat2W(3, 3)
self.assertEqual(w.asymmetry(), [])
w.transform = "r"
self.assertFalse(w.asymmetry() == [])
def test_cardinalities(self):
w = util.lat2W(3, 3)
self.assertEqual(
w.cardinalities, {0: 2, 1: 3, 2: 2, 3: 3, 4: 4, 5: 3, 6: 2, 7: 3, 8: 2}
)
def test_diagW2(self):
NPTA3E(
self.w3x3.diagW2, np.array([2.0, 3.0, 2.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0])
)
def test_diagWtW(self):
NPTA3E(
self.w3x3.diagW2, np.array([2.0, 3.0, 2.0, 3.0, 4.0, 3.0, 2.0, 3.0, 2.0])
)
def test_diagWtW_WW(self):
NPTA3E(
self.w3x3.diagWtW_WW,
np.array([4.0, 6.0, 4.0, 6.0, 8.0, 6.0, 4.0, 6.0, 4.0]),
)
def test_full(self):
wf = np.array(
[
[0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0],
]
)
ids = list(range(9))
wf1, ids1 = self.w3x3.full()
NPTA3E(wf1, wf)
self.assertEqual(ids1, ids)
def test_get_transform(self):
self.assertEqual(self.w3x3.transform, "O")
self.w3x3.transform = "r"
self.assertEqual(self.w3x3.transform, "R")
self.w3x3.transform = "b"
def test_higher_order(self):
weights = {
0: [1.0, 1.0, 1.0],
1: [1.0, 1.0, 1.0],
2: [1.0, 1.0, 1.0],
3: [1.0, 1.0, 1.0],
4: [1.0, 1.0, 1.0, 1.0],
5: [1.0, 1.0, 1.0],
6: [1.0, 1.0, 1.0],
7: [1.0, 1.0, 1.0],
8: [1.0, 1.0, 1.0],
}
neighbors = {
0: [4, 6, 2],
1: [3, 5, 7],
2: [8, 0, 4],
3: [7, 1, 5],
4: [8, 0, 2, 6],
5: [1, 3, 7],
6: [4, 0, 8],
7: [3, 1, 5],
8: [6, 2, 4],
}
wneighbs = {
k: {neighb: weights[k][i] for i, neighb in enumerate(v)}
for k, v in list(neighbors.items())
}
w2 = util.higher_order(self.w3x3, 2)
test_wneighbs = {
k: {ne: w2.weights[k][i] for i, ne in enumerate(v)}
for k, v in list(w2.neighbors.items())
}
self.assertEqual(test_wneighbs, wneighbs)
def test_histogram(self):
hist = [
(0, 1),
(1, 1),
(2, 4),
(3, 20),
(4, 57),
(5, 44),
(6, 36),
(7, 15),
(8, 7),
(9, 1),
(10, 6),
(11, 0),
(12, 2),
(13, 0),
(14, 0),
(15, 1),
]
self.assertEqual(self.w.histogram, hist)
def test_id2i(self):
id2i = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8}
self.assertEqual(self.w3x3.id2i, id2i)
def test_id_order_set(self):
w = W(neighbors={"a": ["b"], "b": ["a", "c"], "c": ["b"]})
self.assertFalse(w.id_order_set)
def test_islands(self):
w = W(
neighbors={"a": ["b"], "b": ["a", "c"], "c": ["b"], "d": []},
silence_warnings=True,
)
self.assertEqual(w.islands, ["d"])
self.assertEqual(self.w3x3.islands, [])
def test_max_neighbors(self):
w = W(
neighbors={"a": ["b"], "b": ["a", "c"], "c": ["b"], "d": []},
silence_warnings=True,
)
self.assertEqual(w.max_neighbors, 2)
self.assertEqual(self.w3x3.max_neighbors, 4)
def test_mean_neighbors(self):
w = util.lat2W()
self.assertEqual(w.mean_neighbors, 3.2)
def test_min_neighbors(self):
w = util.lat2W()
self.assertEqual(w.min_neighbors, 2)
def test_n(self):
w = util.lat2W()
self.assertEqual(w.n, 25)
def test_nonzero(self):
self.assertEqual(self.w3x3.nonzero, 24)
def test_order(self):
w = util.lat2W(3, 3)
o = {
0: [-1, 1, 2, 1, 2, 3, 2, 3, 0],
1: [1, -1, 1, 2, 1, 2, 3, 2, 3],
2: [2, 1, -1, 3, 2, 1, 0, 3, 2],
3: [1, 2, 3, -1, 1, 2, 1, 2, 3],
4: [2, 1, 2, 1, -1, 1, 2, 1, 2],
5: [3, 2, 1, 2, 1, -1, 3, 2, 1],
6: [2, 3, 0, 1, 2, 3, -1, 1, 2],
7: [3, 2, 3, 2, 1, 2, 1, -1, 1],
8: [0, 3, 2, 3, 2, 1, 2, 1, -1],
}
self.assertEqual(util.order(w), o)
def test_pct_nonzero(self):
self.assertEqual(self.w3x3.pct_nonzero, 29.62962962962963)
def test_s0(self):
self.assertEqual(self.w3x3.s0, 24.0)
def test_s1(self):
self.assertEqual(self.w3x3.s1, 48.0)
def test_s2(self):
self.assertEqual(self.w3x3.s2, 272.0)
def test_s2array(self):
s2a = np.array(
[[16.0], [36.0], [16.0], [36.0], [64.0], [36.0], [16.0], [36.0], [16.0]]
)
NPTA3E(self.w3x3.s2array, s2a)
def test_sd(self):
self.assertEqual(self.w3x3.sd, 0.66666666666666663)
def test_set_transform(self):
w = util.lat2W(2, 2)
self.assertEqual(w.transform, "O")
self.assertEqual(w.weights[0], [1.0, 1.0])
w.transform = "r"
self.assertEqual(w.weights[0], [0.5, 0.5])
def test_shimbel(self):
d = {
0: [-1, 1, 2, 1, 2, 3, 2, 3, 4],
1: [1, -1, 1, 2, 1, 2, 3, 2, 3],
2: [2, 1, -1, 3, 2, 1, 4, 3, 2],
3: [1, 2, 3, -1, 1, 2, 1, 2, 3],
4: [2, 1, 2, 1, -1, 1, 2, 1, 2],
5: [3, 2, 1, 2, 1, -1, 3, 2, 1],
6: [2, 3, 4, 1, 2, 3, -1, 1, 2],
7: [3, 2, 3, 2, 1, 2, 1, -1, 1],
8: [4, 3, 2, 3, 2, 1, 2, 1, -1],
}
self.assertEqual(util.shimbel(self.w3x3), d)
def test_sparse(self):
self.assertEqual(self.w3x3.sparse.nnz, 24)
def test_trcW2(self):
self.assertEqual(self.w3x3.trcW2, 24.0)
def test_trcWtW(self):
self.assertEqual(self.w3x3.trcWtW, 24.0)
def test_trcWtW_WW(self):
self.assertEqual(self.w3x3.trcWtW_WW, 48.0)
class TestWSP(unittest.TestCase):
def setUp(self):
self.w = psopen(examples.get_path("sids2.gal")).read()
self.wsp = WSP(self.w.sparse, self.w.id_order)
w3x3 = util.lat2W(3, 3)
self.w3x3 = WSP(w3x3.sparse)
def test_WSP(self):
self.assertEqual(self.w.id_order, self.wsp.id_order)
self.assertEqual(self.w.n, self.wsp.n)
np.testing.assert_array_equal(
self.w.sparse.todense(), self.wsp.sparse.todense()
)
def test_diagWtW_WW(self):
NPTA3E(
self.w3x3.diagWtW_WW,
np.array([4.0, 6.0, 4.0, 6.0, 8.0, 6.0, 4.0, 6.0, 4.0]),
)
def test_trcWtW_WW(self):
self.assertEqual(self.w3x3.trcWtW_WW, 48.0)
def test_s0(self):
self.assertEqual(self.w3x3.s0, 24.0)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "fd1832a57e71073fa84f4ec8754dbeae38205ff9", "size": 20248, "ext": "py", "lang": "Python", "max_stars_repo_path": "libpysal/weights/tests/test_weights.py", "max_stars_repo_name": "Kanahiro/dbf-df-translator", "max_stars_repo_head_hexsha": "6603ca1ac306203bf8c95e6545685c509324a438", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libpysal/weights/tests/test_weights.py", "max_issues_repo_name": "Kanahiro/dbf-df-translator", "max_issues_repo_head_hexsha": "6603ca1ac306203bf8c95e6545685c509324a438", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libpysal/weights/tests/test_weights.py", "max_forks_repo_name": "Kanahiro/dbf-df-translator", "max_forks_repo_head_hexsha": "6603ca1ac306203bf8c95e6545685c509324a438", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3449275362, "max_line_length": 85, "alphanum_fraction": 0.4254741209, "include": true, "reason": "import numpy", "num_tokens": 7955}
|
#
# created by Severin Dicks (IBSM, Freiburg)
#
#
import cupy as cp
import cudf
import cugraph
import anndata
import time
import numpy as np
import pandas as pd
import scipy
import math
from scipy import sparse
import seaborn as sns
import matplotlib.pyplot as plt
from cuml.manifold import TSNE
from cuml.cluster import KMeans
from cuml.decomposition import PCA
from cuml.linear_model import LinearRegression
def select_groups(labels, groups_order_subset='all'):
adata_obs_key = labels
groups_order = labels.cat.categories
groups_masks = cp.zeros(
(len(labels.cat.categories), len(labels.cat.codes)), dtype=bool
)
for iname, name in enumerate(labels.cat.categories.to_pandas()):
# if the name is not found, fallback to index retrieval
if labels.cat.categories[iname] in labels.cat.codes:
mask = labels.cat.categories[iname] == labels.cat.codes
else:
mask = iname == labels.cat.codes
groups_masks[iname] = mask.values
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
cp.where(cp.array(labels.cat.categories.to_array().astype("int32")) == int(name))[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = cp.where(
cp.in1d(
cp.arange(len(labels.cat.categories)).astype(str),
cp.array(groups_order_subset),
)
)[0]
groups_ids = [groups_id.item() for groups_id in groups_ids]
groups_masks = groups_masks[groups_ids]
groups_order_subset = labels.cat.categories[groups_ids].to_array().astype(int)
else:
groups_order_subset = groups_order.to_array()
return groups_order_subset, groups_masks
def rank_genes_groups(
X,
labels, # louvain results
var_names,
groups=None,
reference='rest',
n_genes=100,
**kwds,
):
"""
Rank genes for characterizing groups.
Parameters
----------
X : cupy.ndarray of shape (n_cells, n_genes)
The cellxgene matrix to rank genes
labels : cudf.Series of size (n_cells,)
Observations groupings to consider
var_names : cudf.Series of size (n_genes,)
Names of genes in X
groups : Iterable[str] (default: 'all')
Subset of groups, e.g. ['g1', 'g2', 'g3'], to which comparison
shall be restricted, or 'all' (default), for all groups.
reference : str (default: 'rest')
If 'rest', compare each group to the union of the rest of the group.
If a group identifier, compare with respect to this group.
n_genes : int (default: 100)
The number of genes that appear in the returned tables.
"""
#### Wherever we see "adata.obs[groupby], we should just replace w/ the groups"
import time
start = time.time()
# for clarity, rename variable
if groups == 'all':
groups_order = 'all'
elif isinstance(groups, (str, int)):
raise ValueError('Specify a sequence of groups')
else:
groups_order = list(groups)
if isinstance(groups_order[0], int):
groups_order = [str(n) for n in groups_order]
if reference != 'rest' and reference not in set(groups_order):
groups_order += [reference]
if (
reference != 'rest'
and reference not in set(labels.cat.categories)
):
cats = labels.cat.categories.tolist()
raise ValueError(
f'reference = {reference} needs to be one of groupby = {cats}.'
)
groups_order, groups_masks = select_groups(labels, groups_order)
original_reference = reference
n_vars = len(var_names)
# for clarity, rename variable
n_genes_user = n_genes
# make sure indices are not OoB in case there are less genes than n_genes
if n_genes_user > X.shape[1]:
n_genes_user = X.shape[1]
# in the following, n_genes is simply another name for the total number of genes
n_genes = X.shape[1]
n_groups = groups_masks.shape[0]
ns = cp.zeros(n_groups, dtype=int)
for imask, mask in enumerate(groups_masks):
ns[imask] = cp.where(mask)[0].size
if reference != 'rest':
ireference = cp.where(groups_order == reference)[0][0]
reference_indices = cp.arange(n_vars, dtype=int)
rankings_gene_scores = []
rankings_gene_names = []
# Perform LogReg
# if reference is not set, then the groups listed will be compared to the rest
# if reference is set, then the groups listed will be compared only to the other groups listed
from cuml.linear_model import LogisticRegression
reference = groups_order[0]
if len(groups) == 1:
raise Exception('Cannot perform logistic regression on a single cluster.')
grouping_mask = labels.astype('int').isin(cudf.Series(groups_order).astype('int'))
grouping = labels.loc[grouping_mask]
X = X[grouping_mask.values, :]# Indexing with a series causes issues, possibly segfault
y = labels.loc[grouping]
clf = LogisticRegression(**kwds)
clf.fit(X.get(), grouping.to_array().astype('float32'))
scores_all = cp.array(clf.coef_).T
for igroup, group in enumerate(groups_order):
if len(groups_order) <= 2: # binary logistic regression
scores = scores_all[0]
else:
scores = scores_all[igroup]
partition = cp.argpartition(scores, -n_genes_user)[-n_genes_user:]
partial_indices = cp.argsort(scores[partition])[::-1]
global_indices = reference_indices[partition][partial_indices]
rankings_gene_scores.append(scores[global_indices].get()) ## Shouldn't need to take this off device
rankings_gene_names.append(var_names[global_indices].to_pandas())
if len(groups_order) <= 2:
break
groups_order_save = [str(g) for g in groups_order]
if (len(groups) == 2):
groups_order_save = [g for g in groups_order if g != reference]
print("Ranking took (GPU): " + str(time.time() - start))
start = time.time()
scores = np.rec.fromarrays(
[n for n in rankings_gene_scores],
dtype=[(rn, 'float32') for rn in groups_order_save],
)
names = np.rec.fromarrays(
[n for n in rankings_gene_names],
dtype=[(rn, 'U50') for rn in groups_order_save],
)
print("Preparing output np.rec.fromarrays took (CPU): " + str(time.time() - start))
print("Note: This operation will be accelerated in a future version")
return scores, names, original_reference
def leiden(adata, resolution=1.0):
"""
Performs Leiden Clustering using cuGraph
Parameters
----------
adata : annData object with 'neighbors' field.
resolution : float, optional (default: 1)
A parameter value controlling the coarseness of the clustering.
Higher values lead to more clusters.
"""
# Adjacency graph
adjacency = adata.obsp["connectivities"]
offsets = cudf.Series(adjacency.indptr)
indices = cudf.Series(adjacency.indices)
g = cugraph.Graph()
if hasattr(g, 'add_adj_list'):
g.add_adj_list(offsets, indices, None)
else:
g.from_cudf_adjlist(offsets, indices, None)
# Cluster
leiden_parts, _ = cugraph.leiden(g,resolution = resolution)
# Format output
clusters = leiden_parts.to_pandas().sort_values('vertex')[['partition']].to_numpy().ravel()
clusters = pd.Categorical(clusters.astype(str))
adata.obs['leiden'] = clusters
def louvain(adata, resolution=1.0):
"""
Performs Louvain Clustering using cuGraph
Parameters
----------
adata : annData object with 'neighbors' field.
resolution : float, optional (default: 1)
A parameter value controlling the coarseness of the clustering.
Higher values lead to more clusters.
"""
# Adjacency graph
adjacency = adata.obsp["connectivities"]
offsets = cudf.Series(adjacency.indptr)
indices = cudf.Series(adjacency.indices)
g = cugraph.Graph()
if hasattr(g, 'add_adj_list'):
g.add_adj_list(offsets, indices, None)
else:
g.from_cudf_adjlist(offsets, indices, None)
# Cluster
louvain_parts, _ = cugraph.louvain(g,resolution = resolution)
# Format output
clusters = louvain_parts.to_pandas().sort_values('vertex')[['partition']].to_numpy().ravel()
clusters = pd.Categorical(clusters.astype(str))
adata.obs['louvain'] = clusters
def kmeans(adata, n_clusters =8, random_state= 42):
"""
KMeans is a basic but powerful clustering method which is optimized via
Expectation Maximization.
Parameters
----------
adata: adata object with `.obsm['X_pca']`
n_clusters: int (default:8)
Number of clusters to compute
random_state: float (default: 42)
if you want results to be the same when you restart Python, select a
state.
"""
kmeans_out = KMeans(n_clusters=n_clusters, random_state=random_state).fit(adata.obsm['X_pca'])
adata.obs['kmeans'] = kmeans_out.labels_.astype(str)
def pca(adata, n_comps = 50):
"""
Performs PCA using the cuML decomposition function
Parameters
----------
adata : annData object
n_comps: int (default: 50)
Number of principal components to compute. Defaults to 50
Returns
else adds fields to `adata`:
`.obsm['X_pca']`
PCA representation of data.
`.uns['pca']['variance_ratio']`
Ratio of explained variance.
`.uns['pca']['variance']`
Explained variance, equivalent to the eigenvalues of the
covariance matrix.
"""
pca_func = PCA(n_components=n_comps, output_type="numpy")
adata.obsm["X_pca"] = pca_func.fit_transform(adata.X)
adata.uns['pca'] ={'variance':pca_func.explained_variance_, 'variance_ratio':pca_func.explained_variance_ratio_}
def tsne(adata, n_pcs,perplexity = 30, early_exaggeration = 12,learning_rate =1000):
"""
Performs t-distributed stochastic neighborhood embedding (tSNE) using cuML libraray. Variable description adapted from scanpy and default are the same
Parameters
---------
adata: adata object with `.obsm['X_pca']`
n_pcs: int
use this many PCs
perplexity: float (default: 30)
The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50. The choice is not extremely critical since t-SNE is quite insensitive to this parameter.
early_exaggeration : float (default:12)
Controls how tight natural clusters in the original space are in the embedded space and how much space will be between them. For larger values, the space between natural clusters will be larger in the embedded space. Again, the choice of this parameter is not very critical. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high.
learning_rate : float (default:1000)
Note that the R-package “Rtsne” and cuML uses a default of 200. The learning rate can be a critical parameter. It should be between 100 and 1000. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. If the cost function gets stuck in a bad local minimum increasing the learning rate helps sometimes.
"""
adata.obsm['X_tsne'] = TSNE(perplexity=perplexity, early_exaggeration=early_exaggeration,learning_rate=learning_rate).fit_transform(adata.obsm["X_pca"][:,:n_pcs])
def diffmap(adata, n_comps=15, neighbors_key = None, sort = 'decrease',density_normalize = True):
"""
Diffusion maps has been proposed for visualizing single-cell data.
This is a reimplementation of scanpys function.
The width ("sigma") of the connectivity kernel is implicitly determined by
the number of neighbors used to compute the single-cell graph in
:func:`~scanpy.pp.neighbors`.
Parameters
----------
adata : AnnData
Annotated data matrix.
n_comps : int, optional (default: 15)
The number of dimensions of the representation.
neighbors_key : typing.Union[str, NoneType], optional (default: None)
If not specified, diffmap looks at .obsp['connectivities'] for neighbors connectivities
If specified, diffmap looks at .obsp['neighbors_key_ connectivities'] for neighbors connectivities
sort: string (default:'decrease')
Leave as is for the same behavior as sc.tl.diffmap
density_normalize: boolean(default: True)
Leave as is for the same behavior as sc.tl.diffmap
Returns
----------
updates `adata` with the following fields.
`X_diffmap` : :class:`numpy.ndarray` (`adata.obsm`)
Diffusion map representation of data, which is the right eigen basis of
the transition matrix with eigenvectors as columns.
`diffmap_evals` : :class:`numpy.ndarray` (`adata.uns`)
Array of size (number of eigen vectors).
Eigenvalues of transition matrix.
"""
from scipy.sparse import issparse
import cupyx.scipy.sparse.linalg
import cupyx.scipy.sparse
import cupyx as cpx
if neighbors_key:
connectivities = adata.obsp[neighbors_key+"_connectivities"]
else:
connectivities = adata.obsp["connectivities"]
if issparse(connectivities):
W = cp.sparse.csr_matrix(connectivities, dtype=cp.float32)
else:
W = cp.asarray(connectivities)
if density_normalize:
# q[i] is an estimate for the sampling density at point i
# it's also the degree of the underlying graph
q = cp.asarray(W.sum(axis=0))
if not cpx.scipy.sparse.issparse(W):
Q = cp.diag(1.0 / q)
else:
Q = cpx.scipy.sparse.spdiags(1.0 / q, 0, W.shape[0], W.shape[0])
K = Q @ W @ Q
else:
K = W
# z[i] is the square root of the row sum of K
z = cp.sqrt(cp.asarray(K.sum(axis=0)))
if not cpx.scipy.sparse.issparse(K):
Z = cp.diag(1.0 / z)
else:
Z = cpx.scipy.sparse.spdiags(1.0 / z, 0, K.shape[0], K.shape[0])
matrix = Z @ K @ Z
if n_comps == 0:
evals, evecs = cpx.scipy.sparse.linalg.eigsh(matrix)
else:
n_comps = min(matrix.shape[0] - 1, n_comps)
# ncv = max(2 * n_comps + 1, int(np.sqrt(matrix.shape[0])))
ncv = None
which = 'LM' if sort == 'decrease' else 'SM'
# it pays off to increase the stability with a bit more precision
matrix = matrix.astype(cp.float64)
evals, evecs = cpx.scipy.sparse.linalg.eigsh(
matrix, k=n_comps, which=which, ncv=ncv
)
evals, evecs = evals.astype(cp.float32), evecs.astype(cp.float32)
if sort == 'decrease':
evals = evals[::-1]
evecs = evecs[:, ::-1]
adata.uns["diffmap_evals"] = evals.get()
adata.obsm["X_diffmap"] = evecs.get()
def plt_scatter(cudata, x, y, save = None, show =True, dpi =300):
"""
Violin plot.
Wraps :func:`seaborn.scaterplot` for :class:`~cunnData.cunnData`. This plotting function so far is really basic and doesnt include all the features form sc.pl.scatter.
Parameters
---------
cudata:
cunnData object
x:
Keys for accessing variables of fields of `.obs`.
y:
Keys for accessing variables of fields of `.obs`.
save: str default(None (no plot will be saved))
file name to save plot as in ./figures
show: boolean (default: True)
if you want to display the plot
dpi: int (default: 300)
The resolution in dots per inch for save
Returns
------
nothing
"""
fig,ax = plt.subplots()
sns.scatterplot(data=cudata.obs, x=x, y=y, color='k')
if save:
os.makedirs("./figures/",exist_ok=True)
fig_path = "./figures/"+save
plt.savefig(fig_path, dpi=dpi ,bbox_inches = 'tight')
if show is False:
plt.close()
def plt_violin(cudata, key, group_by=None, size =1, save = None, show =True, dpi =300):
"""
Violin plot.
Wraps :func:`seaborn.violinplot` for :class:`~cunnData.cunnData`. This plotting function so far is really basic and doesnt include all the features form sc.pl.violin.
Parameters
---------
cudata:
cunnData object
key:
Keys for accessing variables of fields of `.obs`.
group_by:
The key of the observation grouping to consider.(e.g batches)
size:
pt_size for stripplot if 0 no strip plot will be shown.
save: str default(None (no plot will be saved))
file name to save plot as in ./figures
show: boolean (default: True)
if you want to display the plot
dpi: int (default: 300)
The resolution in dots per inch for save
Returns
------
nothing
"""
fig,ax = plt.subplots()
ax = sns.violinplot(data=cudata.obs, y=key,scale='width',x= group_by, inner = None)
if size:
ax = sns.stripplot(data=cudata.obs, y=key,x= group_by, color='k', size= 1, dodge = True, jitter = True)
if save:
os.makedirs("./figures/",exist_ok=True)
fig_path = "./figures/"+save
plt.savefig(fig_path, dpi=dpi ,bbox_inches = 'tight')
if show is False:
plt.close()
|
{"hexsha": "1018f18126bbc162ff17e2b9a0f516acf53962e8", "size": 17904, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/scanpy_gpu_funcs.py", "max_stars_repo_name": "metzgerpatrick/rapids_singlecell", "max_stars_repo_head_hexsha": "319dabba5e6b15eb24e8ebc1b95e0d309b96bcc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/scanpy_gpu_funcs.py", "max_issues_repo_name": "metzgerpatrick/rapids_singlecell", "max_issues_repo_head_hexsha": "319dabba5e6b15eb24e8ebc1b95e0d309b96bcc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/scanpy_gpu_funcs.py", "max_forks_repo_name": "metzgerpatrick/rapids_singlecell", "max_forks_repo_head_hexsha": "319dabba5e6b15eb24e8ebc1b95e0d309b96bcc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4307692308, "max_line_length": 412, "alphanum_fraction": 0.6424262735, "include": true, "reason": "import numpy,import scipy,from scipy,import cupy", "num_tokens": 4378}
|
from difflib import SequenceMatcher
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib as mpl
import argparse, math, random, gzip, pickle, types
from collections import defaultdict
import os
import adjustText
# Change following routines for other environments:
L_init = 5 # Initiation unit
dL = 5 # elongation unit (also means CG unit)
transcription_time = 1
dt = transcription_time * dL # Folding time for each elongation step (0.1 s/nt)
population_size_limit = 100 # maximum type of strands in the pool
MULTI_PROCESS = 32
km_start = 8
km_end = 13
km_interval = 1
SD_start, SD_end = 21, 28
k_pre = 1e11
equi_p_unbound = [0.0414220, 0.0612670, 0.0839040, 0.9764600, 0.9300200, 0.0861740, 0.2976000]
NUM_COLORS = 17
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
def localss_population_processing(input_prefix):
local_structure_collection_data = defaultdict(lambda: defaultdict(np.float))
if not os.path.exists(input_prefix):
os.makedirs(input_prefix)
with open(input_prefix + '.dat', 'r+') as folding_input:
sss = [(x.split()[0], np.float(x.split()[1]))
for x in folding_input.readlines()]
for ss in sss:
if ss[0].startswith('#'):
time = ss[1] - 35
else:
# print(ss)
if len(ss[0]) >= SD_end:
SD_ss = ss[0][SD_start:SD_end]
local_structure_collection_data[SD_ss][time] += ss[1]
with open(input_prefix + '/local_population' + '.dat', 'w+') as local_output:
for local_ss in local_structure_collection_data.keys():
local_output.write(local_ss + '\n')
local_output.write(' '.join(map(str, local_structure_collection_data[local_ss].keys())) + '\n')
local_output.write(' '.join(map(str, local_structure_collection_data[local_ss].values())) + '\n')
def data_ploting(ax_punbound, input_prefix, label, start_index, end_index, color_rank):
data_punbound = defaultdict(np.float)
if not os.path.exists(input_prefix):
os.makedirs(input_prefix)
if not os.path.exists(input_prefix + '/p_unbound'):
os.makedirs(input_prefix + '/p_unbound')
f = open(input_prefix + f'/p_unbound/base{start_index}_{end_index}.dat', 'w')
with open(input_prefix + '.dat', 'r+') as folding_input:
sss = [(x.split()[0], np.float(x.split()[1]))
for x in folding_input.readlines()]
for ss in sss:
if ss[0].startswith('#'):
time = ss[1] - 35
else:
# print(ss)
if len(ss[0]) >= end_index:
target_ss = ss[0][start_index:end_index]
data_punbound[time] += ss[1] / (end_index - start_index) * target_ss.count('.')
data_plot = np.array([list(data_punbound.keys()), list(data_punbound.values())])
ax_punbound.plot(data_plot[0][:210], data_plot[1][:210], color=tableau20[color_rank%20])
tt = plt.text(data_plot[0][210], data_plot[1][210], label, fontsize=14, color=tableau20[color_rank%20])
for d in data_punbound.items():
f.write(f'{d[0]} {d[1]}\n')
f.close()
return tt
def data_ploting_equ(ax_punbound, mut, input_prefix, label, start_index, end_index, color_rank):
data_punbound = defaultdict(np.float)
if not os.path.exists(input_prefix):
os.makedirs(input_prefix)
if not os.path.exists(input_prefix + '/p_unbound'):
os.makedirs(input_prefix + '/p_unbound')
f = open(input_prefix + f'/p_unbound/base{start_index}_{end_index}.dat', 'w')
with open(mut + '/summary_pairs.dat', 'r+') as folding_input: # NOTE: Format here need to be unified!
pairs_data = [list(map(np.float, x.split())) for x in folding_input.readlines()]
for dat in pairs_data:
if len(dat) >= end_index:
time = len(dat)-35 # NOTE: should be len(dat) for later version
data_punbound[time] += np.sum(dat[start_index:end_index]) / (end_index - start_index)
data_plot = np.array([list(data_punbound.keys()), list(data_punbound.values())])
ax_punbound.plot(data_plot[0][:210], data_plot[1][:210], color=tableau20[color_rank%20])
tt = plt.text(data_plot[0][210], data_plot[1][210], label, fontsize=14, color=tableau20[color_rank%20])
for d in data_punbound.items():
f.write(f'{d[0]} {d[1]}\n')
f.close()
return tt
if __name__ == '__main__':
# plt.style.use('bmh')
# mpl.rcParams['axes.color_cycle'] = colors
mpl.rcParams['axes.titlesize'] = 17
mpl.rcParams['axes.titleweight'] = 10
params = {
'axes.labelsize': 20,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.labelsize': 14,
'text.usetex': False,
'figure.figsize': [12, 9]
}
mpl.rcParams.update(params)
parser = argparse.ArgumentParser()
parser.add_argument('sequence', type=str, help="RNA sequence (one line)")
parser.add_argument('--working-path', type=str, default='.', help="Path to store outputs")
# parser.add_argument('--k', type=np.float, default=1., \
# help="pre exponential factor")
clargs = parser.parse_args()
with open('sequences/' + clargs.sequence + '.in', 'r') as sequence_file:
full_sequence = sequence_file.readline().rstrip('\n')
PATH = clargs.working_path
mut = clargs.sequence
# Start IO
fig = plt.figure()
fig.add_axes()
cm = plt.get_cmap('rainbow')
ax_punbound = fig.add_subplot(111)
ax_punbound.set_color_cycle([cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)])
# ax_localpop.set_title(f'Average p_unbound for base[-9](G) {PATH}')
plt.text(100, 1.08, f'{clargs.sequence} -- '+r'Average SD $p_{unbound}$', fontsize=17, ha="center")
ax_punbound.spines["top"].set_visible(False)
ax_punbound.spines["bottom"].set_visible(False)
ax_punbound.spines["right"].set_visible(False)
ax_punbound.spines["left"].set_visible(False)
ax_punbound.get_xaxis().tick_bottom()
ax_punbound.get_yaxis().tick_left()
plt.text(100, 0.12, 'Transcript length', fontsize=14, ha="center", color="0.3")
ax_punbound.set_ylabel(r'$p_{unbound}$', color="0.3")
ax_punbound.grid(axis='y', color="0.9", linestyle='--', linewidth=1)
# ax_localpop.set_yscale('log')
# ax_localpop.set_xscale('log')
ax_punbound.set_xlim(-10, 200)
ax_punbound.set_ylim(0.2, 1.0)
color_rank = 0
labels = []
for e_k in range(km_start, km_end, km_interval):
k = 1*10**e_k
# print('k= %.2g'%k)
prefix = PATH + '/k' + '%.2g' % k
label = r'$k_T$ = ' + '%.2g' % (k_pre/k) + r' nt/s'
localss_population_processing(prefix)
labels.append(data_ploting(ax_punbound, prefix, label, SD_start, SD_end, color_rank))
color_rank += 1
'''
print('k= inf')
data = defaultdict(np.float)
local_structure_collection_data = defaultdict(lambda: defaultdict(np.float))
prefix = PATH + '/k' + 'inf'
label = r'$k_T/k_f$ = ' + '0' + r' nt/s'
localss_population_processing(prefix)
color_rank += 1
labels.append(data_ploting(ax_punbound, prefix, label, SD_start, SD_end, color_rank))
'''
prefix = PATH + '/equilibrium' # Need to be copied here
label = 'Equilibrium'
labels.append(data_ploting_equ(ax_punbound, mut, prefix, label, SD_start, SD_end, color_rank+1)) # Another format
# ax_punbound.legend(loc='best')
adjustText.adjust_text(labels, arrowprops=dict(arrowstyle='-', color='0.7'))
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# fig.tight_layout()
fig.savefig(PATH + f'/p_unbound_SD_k_tuning.png', bbox_inches="tight")
# plt.show()
for base_position in range(0, SD_end-SD_start): # note: Relative to SD_start
base_gene_position = base_position+SD_start-35
fig = plt.figure()
fig.add_axes()
ax_punbound = fig.add_subplot(111)
ax_punbound.set_color_cycle([cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)])
# ax_localpop.set_title(f'Average p_unbound for base[-9](G) {PATH}')
plt.text(100, 1.08, f'{PATH}: base [{base_gene_position}] '+r'$p_{unbound}$', fontsize=17, ha="center")
ax_punbound.spines["top"].set_visible(False)
ax_punbound.spines["bottom"].set_visible(False)
ax_punbound.spines["right"].set_visible(False)
ax_punbound.spines["left"].set_visible(False)
ax_punbound.get_xaxis().tick_bottom()
ax_punbound.get_yaxis().tick_left()
plt.text(100, -0.08, 'Transcript length', fontsize=14, ha="center", color="0.3")
ax_punbound.set_ylabel(r'$p_{unbound}$', color="0.3")
ax_punbound.grid(axis='y', color="0.9", linestyle='--', linewidth=1)
# ax_localpop.set_yscale('log')
# ax_localpop.set_xscale('log')
ax_punbound.set_xlim(-10, 200)
ax_punbound.set_ylim(0, 1.0)
color_rank = 0
labels = []
for e_k in range(km_start, km_end, km_interval):
k = 1 * 10 ** e_k
# print('k= %.2g' % k)
prefix = PATH + '/k' + '%.2g' % k
label = r'$k_T$ = ' + '%.2g' % (k_pre/k) + r' nt/s'
try:
localss_population_processing(prefix)
labels.append(data_ploting(ax_punbound, prefix, label, SD_start+base_position, SD_start+base_position+1, color_rank))
color_rank += 1
except:
continue
'''
print('k= inf')
data = defaultdict(np.float)
local_structure_collection_data = defaultdict(lambda: defaultdict(np.float))
prefix = PATH + '/k' + 'inf'
label = r'$k_T/k_f$ = ' + '0'
localss_population_processing(prefix)
color_rank += 1
labels.append(data_ploting(ax_punbound, prefix, label, SD_start+base_position, SD_start+base_position+1, color_rank))
'''
prefix = PATH + '/equilibrium' # Need to be copied here
label = 'Equilibrium'
labels.append(data_ploting_equ(ax_punbound, mut, prefix, label, SD_start+base_position, SD_start+base_position+1, color_rank + 1)) # Another format
# ax_punbound.legend(loc='best')
adjustText.adjust_text(labels, arrowprops=dict(arrowstyle='-', color='0.7'))
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
fig.savefig(PATH + f'/p_unbound_base[{base_gene_position}]_k_tuning_linear.png', bbox_inches="tight")
# plt.show()
exit()
|
{"hexsha": "2f12c8527cd1f000d26491e6863834fc796a0997", "size": 11394, "ext": "py", "lang": "Python", "max_stars_repo_path": "Analysis/p_unbound_analysis.py", "max_stars_repo_name": "Utenaq/GenoFold", "max_stars_repo_head_hexsha": "31b626ebf3d08b16b1cdf6544c36bbea75147719", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Analysis/p_unbound_analysis.py", "max_issues_repo_name": "Utenaq/GenoFold", "max_issues_repo_head_hexsha": "31b626ebf3d08b16b1cdf6544c36bbea75147719", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Analysis/p_unbound_analysis.py", "max_forks_repo_name": "Utenaq/GenoFold", "max_forks_repo_head_hexsha": "31b626ebf3d08b16b1cdf6544c36bbea75147719", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3346303502, "max_line_length": 156, "alphanum_fraction": 0.6237493418, "include": true, "reason": "import numpy", "num_tokens": 3220}
|
constant f : Nat → Nat
@[simp] axiom fEq (x : Nat) (h : x ≠ 0) : f x = x
example (x : Nat) (h : x ≠ 0) : f x = x + 0 := by
simp (discharger := trace_state; exact (fun h' => h') h)
example (x y : Nat) (h1 : x ≠ 0) (h2 : y ≠ 0) (h3 : x = y) : f x = f y + 0 := by
simp (discharger := trace_state; assumption)
assumption
example (x y : Nat) (h1 : x ≠ 0) (h2 : y ≠ 0) (h3 : x = y) : f x = f y + 0 := by
simp (discharger := assumption)
assumption
example (x y : Nat) (h1 : x ≠ 0) (h2 : y ≠ 0) (h3 : x = y) : f x = f y + 0 := by
simp (disch := assumption)
assumption
example (x y : Nat) (h1 : x ≠ 0) (h2 : y ≠ 0) (h3 : x = y) : f x = f y + 0 := by
conv => lhs; simp (disch := assumption)
trace_state
conv => rhs; simp (disch := assumption)
trace_state
assumption
|
{"author": "Kha", "repo": "lean4-nightly", "sha": "b4c92de57090e6c47b29d3575df53d86fce52752", "save_path": "github-repos/lean/Kha-lean4-nightly", "path": "github-repos/lean/Kha-lean4-nightly/lean4-nightly-b4c92de57090e6c47b29d3575df53d86fce52752/tests/lean/simpDisch.lean"}
|
#!/usr/bin/python
import numpy as np
import scipy
import sys
import random
import time
from math import pi ,sqrt, cos, sin
random.seed(time.time())
M = int(float(sys.argv[1]))
nrepeat = int(sys.argv[2])
nMol = nrepeat*nrepeat*nrepeat
nAtoms = nMol
d = [0.0,3.11,4.0,4.48,4.93,5.31,5.65]
d0 = d[M]
pdb = open("initialEqu.txt", 'w')
pdb.write("LAMMPS 'data.' description \n")
pdb.write("\n")
pdb.write(" %d atoms\n"% (nAtoms))
pdb.write("\n")
pdb.write(" 1 atom types\n")
pdb.write("\n")
pdb.write(" 0.0 %1.2f xlo xhi\n" % (d0*nrepeat) )
pdb.write(" 0.0 %1.2f ylo yhi\n" % (d0*nrepeat) )
pdb.write(" 0.0 %1.2f zlo zhi\n" % (d0*nrepeat) )
pdb.write("\n\n")
pdb.write("Atoms\n")
pdb.write("\n")
natom = 1
nmol = 1
for kx in range(nrepeat):
for ky in range(nrepeat):
for kz in range(nrepeat):
coord = [d0/2.0+kx*d0,d0/2.0+ky*d0,d0/2.0+kz*d0]
pdb.write(" %d %d 1 %1.10f %1.4f %1.4f %1.4f\n" % (natom,nmol,0.0,coord[0],coord[1],coord[2]) )
natom += 1
nmol += 1
pdb.close()
|
{"hexsha": "aafc332e1f9e182025aa2ada7ef25926b65f6cf1", "size": 1059, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/study.cases/LAMMPS/setup/model/writeInitEqu.py", "max_stars_repo_name": "JonathanLehner/korali", "max_stars_repo_head_hexsha": "90f97d8e2fed2311f988f39cfe014f23ba7dd6cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2018-07-26T07:20:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T10:23:12.000Z", "max_issues_repo_path": "examples/study.cases/LAMMPS/setup/model/writeInitEqu.py", "max_issues_repo_name": "JonathanLehner/korali", "max_issues_repo_head_hexsha": "90f97d8e2fed2311f988f39cfe014f23ba7dd6cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 212, "max_issues_repo_issues_event_min_datetime": "2018-09-21T10:44:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T14:33:05.000Z", "max_forks_repo_path": "examples/study.cases/LAMMPS/setup/model/writeInitEqu.py", "max_forks_repo_name": "JonathanLehner/korali", "max_forks_repo_head_hexsha": "90f97d8e2fed2311f988f39cfe014f23ba7dd6cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2018-07-25T15:00:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T14:19:46.000Z", "avg_line_length": 19.6111111111, "max_line_length": 116, "alphanum_fraction": 0.5807365439, "include": true, "reason": "import numpy,import scipy", "num_tokens": 427}
|
import gc
import os
import tqdm
import cv2
import torch
import numpy as np
import pandas as pd
import segmentation_models_pytorch as smp
import pickle
from torch.utils.data import DataLoader
from clouds.models import Pretrained
from clouds.io import CloudDataset, ClassificationCloudDataset
from clouds.inference import PseudoLabeler
from clouds.experiments.utils import get_validation_augmentation, \
get_preprocessing, setup_train_and_sub_df
def main(config):
"""
Main code for creating the segmentation-only submission file. All masks are
converted to either "" or RLEs
Args:
args (instance of argparse.ArgumentParser): arguments must be compiled with parse_args
Returns:
None
"""
torch.cuda.empty_cache()
gc.collect()
# setting up the test I/O
# setting up the train/val split with filenames
train_csv_path = config["train_csv_path"]
sample_sub_csv_path = config["sample_sub_csv_path"]
train_df, sub, _ = setup_train_and_sub_df(train_csv_path, sample_sub_csv_path)
test_ids = sub["Image_Label"].apply(lambda x: x.split("_")[0]).drop_duplicates().values
print(f"# of test ids: {len(test_ids)}")
n_encoded = len(sub["EncodedPixels"])
print(f"length of sub: {n_encoded}")
# datasets/data loaders
io_params = config["io_params"]
preprocessing_fn = smp.encoders.get_preprocessing_fn(config["model_names"][0],
"imagenet")
preprocessing_transform = get_preprocessing(preprocessing_fn)
val_aug = get_validation_augmentation(io_params["aug_key"])
# fetching the proper datasets and models
print("Assuming that all models are from the same family...")
if config["mode"] == "segmentation":
test_dataset = CloudDataset(io_params["image_folder"], df=sub,
im_ids=test_ids,
transforms=val_aug,
preprocessing=preprocessing_transform)
models = [smp.Unet(encoder_name=name, encoder_weights=None,
classes=4, activation=None, attention_type=None)
for name in config["model_names"]]
elif config["mode"] == "classification":
test_dataset = ClassificationCloudDataset(io_params["image_folder"],
df=sub, im_ids=test_ids,
transforms=val_aug,
preprocessing=preprocessing_transform)
models = [Pretrained(variant=name, num_classes=4, pretrained=False)
for name in config["model_names"]]
test_loader = DataLoader(test_dataset, batch_size=io_params["batch_size"],
shuffle=False, num_workers=io_params["num_workers"])
pseudo = PseudoLabeler(config["checkpoint_paths"], test_loader,
models=models, mode=config["mode"],
**config["pseudo_params"])
pseudo.create_clf_pseudo_df(sub=sub, **config["hard_labels_params"])
if __name__ == "__main__":
import yaml
import argparse
parser = argparse.ArgumentParser(description="For training.")
parser.add_argument("--yml_path", type=str, required=True,
help="Path to the .yml config.")
args = parser.parse_args()
with open(args.yml_path, 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
main(config)
|
{"hexsha": "aa21a34a5d13a06991008bbb08603684b9ec9a3b", "size": 3619, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/create_clf_pseudo.py", "max_stars_repo_name": "jchen42703/understanding-clouds-kaggle", "max_stars_repo_head_hexsha": "6972deb25cdf363ae0d9a9ad26d538280613fc94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-26T16:33:40.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-26T16:33:40.000Z", "max_issues_repo_path": "scripts/create_clf_pseudo.py", "max_issues_repo_name": "jchen42703/understanding-clouds-kaggle", "max_issues_repo_head_hexsha": "6972deb25cdf363ae0d9a9ad26d538280613fc94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-08T02:50:25.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-19T03:36:54.000Z", "max_forks_repo_path": "scripts/create_clf_pseudo.py", "max_forks_repo_name": "jchen42703/understanding-clouds-kaggle", "max_forks_repo_head_hexsha": "6972deb25cdf363ae0d9a9ad26d538280613fc94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.125, "max_line_length": 94, "alphanum_fraction": 0.6291793313, "include": true, "reason": "import numpy", "num_tokens": 711}
|
import gym
import time
from gym.envs.registration import register
import argparse
import numpy as np
parser = argparse.ArgumentParser(description=None)
parser.add_argument('-e', '--env', default='collect', type=str)
args = parser.parse_args()
def main():
if args.env == 'soccer':
register(
id='multigrid-soccer-v0',
entry_point='gym_multigrid.envs:SoccerGame4HEnv10x15N2',
)
env = gym.make('multigrid-soccer-v0')
else:
register(
id='multigrid-collect-v0',
entry_point='gym_multigrid.envs:CollectGame4HEnv10x10N2',
)
env = gym.make('multigrid-collect-v0')
_ = env.reset()
nb_agents = len(env.agents)
while True:
env.render(mode='human', highlight=True)
time.sleep(0.1)
#print(env.action_space.sample())
ac = [env.action_space.sample() for _ in range(nb_agents)]
print(ac)
obs, _, done, _ = env.step(ac)
#print(np.asarray(obs).shape)
if done:
break
if __name__ == "__main__":
main()
|
{"hexsha": "8ace97a5197de018f39bc1388718b202a99bb586", "size": 1094, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_env.py", "max_stars_repo_name": "euodiadodd1/RL_berry_poisoning_game", "max_stars_repo_head_hexsha": "46ce3e0d14651c82b56430308f992810dd5a4e05", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_env.py", "max_issues_repo_name": "euodiadodd1/RL_berry_poisoning_game", "max_issues_repo_head_hexsha": "46ce3e0d14651c82b56430308f992810dd5a4e05", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_env.py", "max_forks_repo_name": "euodiadodd1/RL_berry_poisoning_game", "max_forks_repo_head_hexsha": "46ce3e0d14651c82b56430308f992810dd5a4e05", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3265306122, "max_line_length": 69, "alphanum_fraction": 0.6014625229, "include": true, "reason": "import numpy", "num_tokens": 277}
|
import os
from flask import Flask, request, send_file
import sys
import torch
import numpy as np
from scipy.io import wavfile
import io
from nemo.collections.tts.models import TalkNetSpectModel
from nemo.collections.tts.models import TalkNetPitchModel
from nemo.collections.tts.models import TalkNetDursModel
import json
sys.path.append("hifi-gan")
from env import AttrDict
from meldataset import MAX_WAV_VALUE
from models import Generator
from denoiser import Denoiser
app = Flask(__name__)
RUN_PATH = os.path.dirname(os.path.realpath(__file__))
DEVICE = "cuda:0"
def load_hifigan(model_name, conf_name):
# Load HiFi-GAN
conf = os.path.join("hifi-gan", conf_name + ".json")
with open(conf) as f:
json_config = json.loads(f.read())
h = AttrDict(json_config)
torch.manual_seed(h.seed)
hifigan = Generator(h).to(torch.device(DEVICE))
state_dict_g = torch.load(model_name, map_location=torch.device(DEVICE))
hifigan.load_state_dict(state_dict_g["generator"])
hifigan.eval()
hifigan.remove_weight_norm()
denoiser = Denoiser(hifigan, mode="normal")
return hifigan, h, denoiser
def generate_json(input, outpath):
output = ""
sample_rate = 22050
lpath = input.split("|")[0].strip()
size = os.stat(lpath).st_size
x = {
"audio_filepath": lpath,
"duration": size / (sample_rate * 2),
"text": input.split("|")[1].strip(),
}
output += json.dumps(x) + "\n"
with open(outpath, "w", encoding="utf8") as w:
w.write(output)
def load_talknet(talknet_path):
with torch.no_grad():
tnmodel = TalkNetSpectModel.restore_from(talknet_path)
durs_path = os.path.join(os.path.dirname(talknet_path), "TalkNetDurs.nemo")
tndurs = TalkNetDursModel.restore_from(durs_path)
tnmodel.add_module("_durs_model", tndurs)
pitch_path = os.path.join(os.path.dirname(talknet_path), "TalkNetPitch.nemo")
tnpitch = TalkNetPitchModel.restore_from(pitch_path)
tnmodel.add_module("_pitch_model", tnpitch)
tnmodel.eval()
return tnmodel
def generate_audio(transcript, tnmodel, hifigan, denoiser):
with torch.no_grad():
tokens = tnmodel.parse(text=transcript.strip())
spect = tnmodel.generate_spectrogram(tokens=tokens)
y_g_hat = hifigan(spect.float())
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio_denoised = denoiser(audio.view(1, -1), strength=35)[:, 0]
audio_np = audio_denoised.detach().cpu().numpy().reshape(-1).astype(np.int16)
buffer = io.BytesIO()
wavfile.write(buffer, 22050, audio_np)
return buffer
@app.route("/", methods=["GET"])
def get_check():
return "TalkNet server online"
@app.route("/api/tts", methods=["GET"])
def get_tts():
if "text" not in request.args:
return ""
transcript = request.args.get("text")
return send_file(
generate_audio(transcript, tnmodel, hifigan, denoiser),
attachment_filename="audio.wav",
mimetype="audio/x-wav",
)
if __name__ == "__main__":
hifigan, h, denoiser = load_hifigan(
os.path.join(
RUN_PATH, "models", "1QnOliOAmerMUNuo2wXoH-YoainoSjZen", "hifiganmodel"
),
"config_v1",
)
tnmodel = load_talknet(
os.path.join(
RUN_PATH, "models", "1QnOliOAmerMUNuo2wXoH-YoainoSjZen", "TalkNetSpect.nemo"
)
)
app.run(debug=False)
|
{"hexsha": "d94eb8ed04c6c1322aff411ab8ef1c55d822c9dc", "size": 3468, "ext": "py", "lang": "Python", "max_stars_repo_path": "mycroft_talknet.py", "max_stars_repo_name": "abb128/ControllableTalkNet", "max_stars_repo_head_hexsha": "6c806c5d6cd0cb9fe7725fc16ce85e59cc55dbfd", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-21T07:44:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T02:46:03.000Z", "max_issues_repo_path": "mycroft_talknet.py", "max_issues_repo_name": "RAYTRAC3R/ControllableTalkNet", "max_issues_repo_head_hexsha": "0c1fd3d68f9fdcce03ce0bba6c21ab76e566a036", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-29T13:27:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-13T07:12:13.000Z", "max_forks_repo_path": "mycroft_talknet.py", "max_forks_repo_name": "RAYTRAC3R/ControllableTalkNet", "max_forks_repo_head_hexsha": "0c1fd3d68f9fdcce03ce0bba6c21ab76e566a036", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1565217391, "max_line_length": 88, "alphanum_fraction": 0.6689734717, "include": true, "reason": "import numpy,from scipy", "num_tokens": 940}
|
#include <cmath>
#include <boost/numeric/conversion/cast.hpp>
#include <boost/math/special_functions/factorials.hpp>
#include "kernel.h"
const std::map<std::string, double> calculator::WExpression::global_constants
{
{"pi", boost::math::constants::pi<double>()},
{"e", boost::math::constants::e<double>()}
};
double calculator::WExpression::expression()
{
Token t {ts.pop()};
bool isNeg {false};
if (t.kind == TokenKind::minus)
isNeg = true;
else if (t.kind == TokenKind::plus)
isNeg = false;
else
ts.push(t);
double left {term()};
if (isNeg) left = -left;
while (true)
{
Token t {ts.pop()};
switch(t.kind)
{
case TokenKind::plus:
left += term();
break;
case TokenKind::minus:
left -= term();
break;
default:
ts.push(t);
return left;
}
}
}
double calculator::WExpression::term()
{
using boost::numeric_cast;
double left {primary()};
while (true)
{
Token t {ts.pop()};
switch (t.kind) {
case TokenKind::multiply:
left *= primary();
break;
case TokenKind::divide:
{
double d {primary()};
if (d == 0.0) throw std::logic_error("division by 0");
left /= d;
break;
}
case TokenKind::mod:
{
double d{primary()};
int i1 = numeric_cast<int>(left);
double di1 = numeric_cast<double>(i1);
if (left - di1 > std::numeric_limits<double>::min())
throw std::logic_error("left operand of %(mod) is not integer");
int i2 = numeric_cast<int>(d);
double di2 = numeric_cast<double>(i2);
if (d - di2 > std::numeric_limits<double>::min())
throw std::logic_error("right operand of %(mod) is not integer");
left = i1 % i2;
break;
}
default:
ts.push(t);
return left;
}
}
}
double calculator::WExpression::primary()
{
using boost::numeric_cast;
double result {0.0};
Token t {ts.pop()};
auto endBracket = [](TokenKind tk)->TokenKind{
switch (tk)
{
case TokenKind::bracket_left0:
return TokenKind::bracket_right0;
case TokenKind::bracket_left1:
return TokenKind::bracket_right1;
default:
return tk;
}
};
switch (t.kind) {
case TokenKind::number:
{
result = t.value;
break;
}
case TokenKind::variable:
{
// first find in constants
auto gcit {global_constants.find(t.name)};
if (gcit == cend(global_constants))
{
auto vit = variables.find(t.name);
if (vit == cend(variables))
{
throw std::logic_error("undefined symbolic literal " + t.name);
}
result = vit->second;
break;
}
result = gcit->second;
break;
// auto it {variables.find(t.name)};
// if (it == end(variables))
// throw std::logic_error("undefined variable "+t.name);
// result = it->second;
// break;
}
case TokenKind::function:
{
result = function(t.name);
break;
}
case TokenKind::bracket_left0:
case TokenKind::bracket_left1:
{
double d {expression()};
Token t2 {ts.pop()};
if (t2.kind != endBracket(t.kind)/*TokenKind::bracket_right0*/)
throw std::logic_error("bracket_right expected");
result = d;
break;
}
// case TokenKind::bracket_left1:
// {
// double d {expression()};
// t = ts.pop();
// if (t.kind != TokenKind::bracket_right1)
// throw std::logic_error("bracket_right expected");
// result = d;
// break;
// }
default:
throw std::logic_error("primary is expected");
}
while (true)
{
Token tp {ts.pop()};
switch (tp.kind)
{
case TokenKind::factorial:
{
unsigned int f = numeric_cast<unsigned int>(result);
double df = numeric_cast<double>(f);
if (result-df > std::numeric_limits<double>::min())
throw std::logic_error("factorial applied to not integer");
result = boost::math::factorial<double>(f);
break;
}
default:
{
ts.push(tp);
return result;
}
}
}
// return result;
// switch (tf.kind)
// {
// case TokenKind::factorial:
// {
// unsigned int f = numeric_cast<unsigned int>(result);
// double df = numeric_cast<double>(f);
// if (result-df > std::numeric_limits<double>::min())
// throw std::logic_error("factorial applied to not integer");
// return boost::math::factorial<double>(f);
// }
// default:
// ts.push(tf);
// return result;
// }
}
double calculator::WExpression::function(std::string function_name)
{
// double result {0.0};
Token t {ts.pop()};
std::vector<double> args;
auto endBracket = [](TokenKind tk)->TokenKind{
switch (tk)
{
case TokenKind::bracket_left0:
return TokenKind::bracket_right0;
case TokenKind::bracket_left1:
return TokenKind::bracket_right1;
default:
return tk;
}
};
switch (t.kind)
{
case TokenKind::bracket_left0:
case TokenKind::bracket_left1:
{
while (ist)
{
double d {expression()};
args.push_back(d);
Token t2 {ts.pop()};
if (t2.kind != TokenKind::comma)
{
ts.push(t2);
break;
}
} // args is done
Token t3 {ts.pop()};
if (t3.kind != endBracket(t.kind))
throw std::logic_error("correct bracket_right is expected");
break;
}
default:
throw std::logic_error("bracket_left is expected");
}
if (function_name == "pow")
{
if (args.size() != 2)
throw std::logic_error("2 args for pow function");
return std::pow(args[0], args[1]);
}
else if (function_name == "cos")
{
if (args.size() != 1)
throw std::logic_error("1 args for cos function");
return std::cos(args[0]);
}
else if (function_name == "sin")
{
if (args.size() != 1)
throw std::logic_error("1 args for sin function");
return std::sin(args[0]);
}
else if (function_name == "tan")
{
if (args.size() != 1)
throw std::logic_error("1 args for tan function");
return std::tan(args[0]);
}
else if (function_name == "acos")
{
if (args.size() != 1)
throw std::logic_error("1 args for acos function");
return std::acos(args[0]);
}
else if (function_name == "asin")
{
if (args.size() != 1)
throw std::logic_error("1 args for asin function");
return std::asin(args[0]);
}
else if (function_name == "atan")
{
if (args.size() != 1)
throw std::logic_error("1 args for atan function");
return std::atan(args[0]);
}
else if (function_name == "atan2")
{
if (args.size() != 2)
throw std::logic_error("2 args for atan2 function");
return std::atan2(args[0], args[1]);
}
else if (function_name == "exp")
{
if (args.size() != 1)
throw std::logic_error("1 args for exp function");
return std::exp(args[0]);
}
else if (function_name == "log")
{
if (args.size() != 1)
throw std::logic_error("1 args for log function");
return std::log(args[0]);
}
else if (function_name == "log10")
{
if (args.size() != 1)
throw std::logic_error("1 args for log10 function");
return std::log10(args[0]);
}
else if (function_name == "sqrt")
{
if (args.size() != 1)
throw std::logic_error("1 args for sqrt function");
return std::sqrt(args[0]);
}
else if (function_name == "abs")
{
if (args.size() != 1)
throw std::logic_error("1 args for abs function");
return std::fabs(args[0]);
}
else
{
throw std::logic_error("undefined function"+function_name);
}
}
calculator::Token calculator::Token_stream::pop()
{
if (!(data.empty()))
{
auto res = data.top();
data.pop();
return res;
}
Token result;
if (!ist) return result;
char sym {0};
ist >> sym;
if (ist.eof()) return result;
switch (sym)
{
case '(':
result.kind = TokenKind::bracket_left0;
break;
case ')':
result.kind = TokenKind::bracket_right0;
break;
case '{':
result.kind = TokenKind::bracket_left1;
break;
case '}':
result.kind = TokenKind::bracket_right1;
break;
case '*':
result.kind = TokenKind::multiply;
break;
case '/':
result.kind = TokenKind::divide;
break;
case '%':
result.kind = TokenKind::mod;
break;
case '+':
result.kind = TokenKind::plus;
break;
case '-':
result.kind = TokenKind::minus;
break;
case '!':
result.kind = TokenKind::factorial;
break;
case ',':
result.kind = TokenKind::comma;
break;
case '.':
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
{
ist.unget();
double val {0.0};
ist >> val;
result.kind = TokenKind::number;
result.value = val;
break;
}
default:
{
if (isalpha(sym)) // function or variable name: func ( | lsldkm239283
{
std::string s;
s += sym;
char ch {0};
while (ist.get(ch) && (isalpha(ch) || isdigit(ch))) s += ch;
ist.putback(ch);
ist >> ch;
if (ch == '(' || ch == '{')
{
ist.putback(ch);
result.kind = TokenKind::function;
result.name = s;
break;
}
else
{
ist.putback(ch);
result.kind = TokenKind::variable;
result.name = s;
break;
}
}
else
{
throw std::logic_error("bad Token");
}
}
}
return result;
}
|
{"hexsha": "4cc3361c6f50c20e006e5f120887451fc2e8df35", "size": 11774, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "kernel.cpp", "max_stars_repo_name": "vega1986/wcalc_expression_parser", "max_stars_repo_head_hexsha": "e9645a5fa8086c4108ce4dc1f3ad7da3cead6480", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kernel.cpp", "max_issues_repo_name": "vega1986/wcalc_expression_parser", "max_issues_repo_head_hexsha": "e9645a5fa8086c4108ce4dc1f3ad7da3cead6480", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kernel.cpp", "max_forks_repo_name": "vega1986/wcalc_expression_parser", "max_forks_repo_head_hexsha": "e9645a5fa8086c4108ce4dc1f3ad7da3cead6480", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1002386635, "max_line_length": 85, "alphanum_fraction": 0.4556650246, "num_tokens": 2618}
|
"""
act.qc.radiometer_tests
------------------------------
Tests specific to radiometers
"""
from scipy.fftpack import rfft, rfftfreq
import numpy as np
import xarray as xr
import pandas as pd
import datetime
import dask
import warnings
from act.utils.datetime_utils import determine_time_delta
from act.utils.geo_utils import get_sunrise_sunset_noon, is_sun_visible
def fft_shading_test(obj, variable='diffuse_hemisp_narrowband_filter4',
fft_window=30,
shad_freq_lower=[0.008, 0.017],
shad_freq_upper=[0.0105, 0.0195],
ratio_thresh=[3.15, 1.2],
time_interval=None, smooth_window=5, shading_thresh=0.4):
"""
Function to test shadowband radiometer (MFRSR, RSS, etc) instruments
for shading related problems. Program was adapted by Adam Theisen
from the method defined in Alexandrov et al 2007 to process on a
point by point basis using a window of data around that point for
the FFT analysis.
For ARM data, testing has found that this works the best on narrowband
filter4 for MFRSR data.
Function has been tested and is in use by the ARM DQ Office for
problem detection. It is know to have some false positives at times.
Need to run obj.clean.cleanup() ahead of time to ensure proper addition
to QC variable
Parameters
----------
obj : xarray Dataset
Data object
variable : string
Name of variable to process
fft_window : int
Number of samples to use in the FFT window. Default is +- 30 samples
Note: this is +- so the full window will be double
shad_freq_lower : list
Lower frequency over which to look for peaks in FFT
shad_freq_upper : list
Upper frequency over which to look for peaks in FFT
ratio_thresh : list
Threshold for each freq window to flag data. I.e. if the peak is 3.15 times
greater than the surrounding area
time_interval : float
Sampling rate of the instrument
smooth_window : int
Number of samples to use in smoothing FFTs before analysis
shading_thresh : float
After smoothing, the value over which is considered a shading signal
Returns
-------
obj : xarray Dataset
Data object
References
----------
Alexandrov, Mikhail & Kiedron, Peter & Michalsky, Joseph & Hodges, Gary
& Flynn, Connor & Lacis, Andrew. (2007). Optical depth measurements by
shadow-band radiometers and their uncertainties. Applied optics. 46.
8027-38. 10.1364/AO.46.008027.
"""
# Get time and data from variable
time = obj['time'].values
data = obj[variable].values
if 'missing_value' in obj[variable].attrs:
missing = obj[variable].attrs['missing_value']
else:
missing = -9999.
# Get time interval between measurements
if time_interval is None:
dt = determine_time_delta(time)
else:
dt = time_interval
# Compute the FFT for each point +- window samples
task = []
sun_up = is_sun_visible(latitude=obj['lat'].values, longitude=obj['lon'].values, date_time=time)
for t in range(len(time)):
sind = t - fft_window
eind = t + fft_window
if sind < 0:
sind = 0
if eind > len(time):
eind = len(time)
# Get data and remove all nan/missing values
d = data[sind:eind]
idx = ((d != missing) & (np.isnan(d) is not True))
index = np.where(idx)
d = d[index]
# Add to task for dask processing
task.append(dask.delayed(fft_shading_test_process)(
time[t], d,
shad_freq_lower=shad_freq_lower,
shad_freq_upper=shad_freq_upper,
ratio_thresh=ratio_thresh,
time_interval=dt,
is_sunny=sun_up[t]))
# Process using dask
result = dask.compute(*task)
# Run data through a rolling median to filter out singular
# false positives
shading = [r['shading'] for r in result]
shading = pd.Series(shading).rolling(window=smooth_window, min_periods=1).median()
# Find indices where shading is indicated
idx = (np.asarray(shading) > shading_thresh)
index = np.where(idx)
# Add test to QC Variable
desc = 'FFT Shading Test'
obj.qcfilter.add_test(variable, index=index, test_meaning=desc)
# Prepare frequency and fft variables for adding to object
fft = np.empty([len(time), fft_window * 2])
fft[:] = np.nan
freq = np.empty([len(time), fft_window * 2])
freq[:] = np.nan
for i, r in enumerate(result):
dummy = r['fft']
fft[i, 0:len(dummy)] = dummy
dummy = r['freq']
freq[i, 0:len(dummy)] = dummy
attrs = {'units': '', 'long_name': 'FFT Results for Shading Test', 'upper_freq': shad_freq_upper,
'lower_freq': shad_freq_lower}
fft_window = xr.DataArray(range(fft_window * 2), dims=['fft_window'],
attrs={'long_name': 'FFT Window', 'units': '1'})
da = xr.DataArray(fft, dims=['time', 'fft_window'], attrs=attrs, coords=[obj['time'], fft_window])
obj['fft'] = da
attrs = {'units': '', 'long_name': 'FFT Frequency Values for Shading Test'}
da = xr.DataArray(freq, dims=['time', 'fft_window'], attrs=attrs, coords=[obj['time'], fft_window])
obj['fft_freq'] = da
return obj
def fft_shading_test_process(time, data, shad_freq_lower=None,
shad_freq_upper=None, ratio_thresh=None,
time_interval=None, is_sunny=None):
"""
Processing function to do the FFT calculations/thresholding
Parameters
----------
time : datetime
Center time of calculation used for calculating sunrise/sunset
data : list
Data for run through fft processing
shad_freq_lower : list
Lower limits of freqencies to look for shading issues
shad_freq_upper : list
Upper limits of freqencies to look for shading issues
ratio_thresh : list
Thresholds to apply, corresponding to frequencies chosen
time_interval : float
Time interval of data
Returns
-------
shading : int
Binary to indicate shading problem (1) or not (0)
"""
if not is_sunny:
return {'shading': 0, 'fft': [np.nan] * len(data), 'freq': [np.nan] * len(data)}
# FFT Algorithm
fftv = abs(rfft(data))
freq = rfftfreq(fftv.size, d=time_interval)
# Get FFT data under threshold
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
idx = (fftv > 1.)
index = np.where(idx)
fftv[index] = np.nan
freq[index] = np.nan
# Return if FFT is empty
if len(fftv) == 0:
return {'shading': 0, 'fft': [np.nan] * len(data), 'freq': [np.nan] * len(data)}
# Commented out as it seems to work better without smoothing
# fftv=pd.DataFrame(data=fftv).rolling(min_periods=3,window=3,center=True).mean().values.flatten()
ratio = []
# Calculates the ratio (size) of the peaks in the FFT to the surrounding
# data
wind = 3
# Run through each frequency to look for peaks
# Calculate threshold of peak value to surrounding values
for i in range(len(shad_freq_lower)):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
idx = np.logical_and(freq > shad_freq_lower[i],
freq < shad_freq_upper[i])
index = np.where(idx)
if len(index[0]) == 0:
continue
peak = max(fftv[index])
index = index[0]
sind = index[0] - wind
if sind < 0:
sind = 0
eind = index[-1] + wind
if eind > len(fftv):
eind = len(fftv)
if len(range(sind, index[0])) == 0 or len(range(index[-1], eind)) == 0:
ratio.append(0.0)
else:
# Calculates to the left/right of each peak
peak_l = max(fftv[range(sind, index[0])])
peak_r = max(fftv[range(index[-1], eind)])
ratio.append(peak / np.mean([peak_l, peak_r]))
# Checks ratios against thresholds for each freq range
shading = 0
if len(ratio) > 0:
pass1 = False
pass2 = False
if ratio[0] > ratio_thresh[0]:
pass1 = True
if len(ratio) > 1:
if ratio[1] > ratio_thresh[1]:
pass2 = True
else:
pass2 = True
if pass1 and pass2:
shading = 1
return {'shading': shading, 'fft': fftv, 'freq': freq}
|
{"hexsha": "1833aa72d84a385027cfef5fa5a51c13f6924206", "size": 8681, "ext": "py", "lang": "Python", "max_stars_repo_path": "act/qc/radiometer_tests.py", "max_stars_repo_name": "rcjackson/ACT", "max_stars_repo_head_hexsha": "c57fb55094b142bbbef63e7069d4024049996139", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-13T16:10:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-13T16:10:37.000Z", "max_issues_repo_path": "act/qc/radiometer_tests.py", "max_issues_repo_name": "cgodine/ACT", "max_issues_repo_head_hexsha": "af9f0edb76e6f16e2764d5441a4bf4d7fb3a9f39", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "act/qc/radiometer_tests.py", "max_forks_repo_name": "cgodine/ACT", "max_forks_repo_head_hexsha": "af9f0edb76e6f16e2764d5441a4bf4d7fb3a9f39", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3884615385, "max_line_length": 103, "alphanum_fraction": 0.6146757286, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2167}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Interstellar Technologies Inc. All Rights Reserved.
# Authors : Takahiro Inagawa, Kazuki Sakaki
#
# Lisence : MIT Lisence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""
OpenGoddard.optimize - Optimal Trajectories module with PseudoSpectral Method
"""
import numpy as np
from scipy import special
from scipy import interpolate
from scipy import optimize
import matplotlib.pyplot as plt
class Problem:
""" OpenGoddard Problem class.
Args:
time_init (list of float) : [time_start, time_section0, time_section0, , , time_final]
nodes (int) : number of nodes
number_of_states (list) : number of states
number_of_controls (list) : number of controls
maxIterator (int) : iteration max
Attributes:
nodes (int) : time nodes.
number_of_states (int) : number of states.
number_of_controls (int) : number of controls
number_of_section (int) : number of section
number_of_param (int) : number of inner variables
div (list) : division point of inner variables
tau : Gauss nodes
w : weights of Gaussian quadrature
D : differentiation matrix of Gaussian quadrature
time : time
maxIterator (int) : max iterator
time_all_section : all section time
unit_states (list of float) : canonical unit of states
unit_controls (list of float) : canonical unit of controls
unit_time (float) : canonical unit of time
p ((N,) ndarray) : inner variables for optimization
dynamics (function) : function list, list of function of dynamics
knot_states_smooth (list of True/False): list of states are smooth on phase knots
cost (function) : cost function
running_cost (function, optional) : (default = None)
cost_derivative (function, optional) : (default = None)
equality (function) : (default = None)
inequality (function) : (default = None)
"""
def _LegendreFunction(self, x, n):
Legendre, Derivative = special.lpn(n, x)
return Legendre[-1]
def _LegendreDerivative(self, x, n):
Legendre, Derivative = special.lpn(n, x)
return Derivative[-1]
def _nodes_LG(self, n):
'''Return Gauss-Legendre nodes.'''
nodes, weight = special.p_roots(n)
return nodes
def _weight_LG(self, n):
'''Return Gauss-Legendre weight.'''
nodes, weight = special.p_roots(n)
return weight
def _differentiation_matrix_LG(self, n):
tau = self._nodes_LG(n)
D = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i != j:
D[i, j] = self._LegendreDerivative(tau[i], n) \
/ self._LegendreDerivative(tau[j], n) \
/ (tau[i] - tau[j])
else:
D[i, j] = tau[i] / (1 - tau[i]**2)
return D
def method_LG(self, n):
""" Legendre-Gauss Pseudospectral method
Gauss nodes are roots of :math:`P_n(x)`.
Args:
n (int) : number of nodes
Returns:
ndarray, ndarray, ndarray : nodes, weight, differentiation_matrix
"""
nodes, weight = special.p_roots(n)
D = _differentiation_matrix_LG(n)
return nodes, weight, D
def _nodes_LGR(self, n):
'''Return Gauss-Radau nodes.'''
roots, weight = special.j_roots(n-1, 0, 1)
nodes = np.hstack((-1, roots))
return nodes
def _weight_LGR(self, n):
'''Return Gauss-Legendre weight.'''
nodes = self._nodes_LGR(n)
w = np.zeros(0)
for i in range(n):
w = np.append(w, (1-nodes[i])/(n*n*self._LegendreFunction(nodes[i], n-1)**2))
return w
def _differentiation_matrix_LGR(self, n):
tau = self._nodes_LGR(n)
D = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i != j:
D[i, j] = self._LegendreFunction(tau[i], n-1) \
/ self._LegendreFunction(tau[j], n-1) \
* (1 - tau[j]) / (1 - tau[i]) / (tau[i] - tau[j])
elif i == j and i == 0:
D[i, j] = -(n-1)*(n+1)*0.25
else:
D[i, j] = 1 / (2 * (1 - tau[i]))
return D
def method_LGR(self, n):
""" Legendre-Gauss-Radau Pseudospectral method
Gauss-Radau nodes are roots of :math:`P_n(x) + P_{n-1}(x)`.
Args:
n (int) : number of nodes
Returns:
ndarray, ndarray, ndarray : nodes, weight, differentiation_matrix
"""
nodes = _nodes_LGR(n)
weight = _weight_LGR(n)
D = _differentiation_matrix_LGR(n)
return nodes, weight, D
def _nodes_LGL_old(self, n):
"""Return Legendre-Gauss-Lobatto nodes.
Gauss-Lobatto nodes are roots of P'_{n-1}(x) and -1, 1.
ref. http://keisan.casio.jp/exec/system/1360718708
"""
x0 = np.zeros(0)
for i in range(2, n):
xi = (1-3.0*(n-2)/8.0/(n-1)**3)*np.cos((4.0*i-3)/(4.0*(n-1)+1)*np.pi)
x0 = np.append(x0, xi)
x0 = np.sort(x0)
roots = np.zeros(0)
for x in x0:
optResult = optimize.root(self._LegendreDerivative, x, args=(n-1,))
roots = np.append(roots, optResult.x)
nodes = np.hstack((-1, roots, 1))
return nodes
def _nodes_LGL(self, n):
""" Legendre-Gauss-Lobatto(LGL) points"""
roots, weight = special.j_roots(n-2, 1, 1)
nodes = np.hstack((-1, roots, 1))
return nodes
def _weight_LGL(self, n):
""" Legendre-Gauss-Lobatto(LGL) weights."""
nodes = self._nodes_LGL(n)
w = np.zeros(0)
for i in range(n):
w = np.append(w, 2/(n*(n-1)*self._LegendreFunction(nodes[i], n-1)**2))
return w
def _differentiation_matrix_LGL(self, n):
""" Legendre-Gauss-Lobatto(LGL) differentiation matrix."""
tau = self._nodes_LGL(n)
D = np.zeros((n, n))
for i in range(n):
for j in range(n):
if i != j:
D[i, j] = self._LegendreFunction(tau[i], n-1) \
/ self._LegendreFunction(tau[j], n-1) \
/ (tau[i] - tau[j])
elif i == j and i == 0:
D[i, j] = -n*(n-1)*0.25
elif i == j and i == n-1:
D[i, j] = n*(n-1)*0.25
else:
D[i, j] = 0.0
return D
def method_LGL(self, n):
""" Legendre-Gauss-Lobatto Pseudospectral method
Gauss-Lobatto nodes are roots of :math:`P'_{n-1}(x)` and -1, 1.
Args:
n (int) : number of nodes
Returns:
ndarray, ndarray, ndarray : nodes, weight, differentiation_matrix
References:
Fariba Fahroo and I. Michael Ross. "Advances in Pseudospectral Methods
for Optimal Control", AIAA Guidance, Navigation and Control Conference
and Exhibit, Guidance, Navigation, and Control and Co-located Conferences
http://dx.doi.org/10.2514/6.2008-7309
"""
nodes = _nodes_LGL(n)
weight = _weight_LGL(n)
D = _differentiation_matrix_LGL(n)
return nodes, weight, D
def _make_param_division(self, nodes, number_of_states, number_of_controls):
prev = 0
div = []
for index, node in enumerate(nodes):
num_param = number_of_states[index] + number_of_controls[index]
temp = [i*(node) + prev for i in range(1, num_param + 1)]
prev = temp[-1]
div.append(temp)
return div
def _division_states(self, state, section):
assert section < len(self.nodes), \
"section argument out of own section range"
assert state < self.number_of_states[section], \
"states argument out of own states range"
if (state == 0):
if (section == 0):
div_front = 0
else:
div_front = self.div[section-1][-1]
else:
div_front = self.div[section][state-1]
div_back = self.div[section][state]
return div_back, div_front
def _division_controls(self, control, section):
assert section < len(self.nodes), \
"section argument out of own section range"
assert control < self.number_of_controls[section], \
"controls argument out of own controls range"
div_front = self.div[section][self.number_of_states[section] + control - 1]
div_back = self.div[section][self.number_of_states[section] + control]
return div_back, div_front
def states(self, state, section):
"""getter specify section states array
Args:
state (int) : state number
section (int) : section number
Returns:
states ((N,) ndarray) :
1-D array of state
"""
div_back, div_front = self._division_states(state, section)
return self.p[div_front:div_back] * self.unit_states[section][state]
def states_all_section(self, state):
"""get states array
Args:
state (int) : state number
Returns:
states_all_section ((N,) ndarray) :
1-D array of all section state
"""
temp = np.zeros(0)
for i in range(self.number_of_section):
temp = np.concatenate([temp, self.states(state, i)])
return temp
def controls(self, control, section):
"""getter specify section controls array
Args:
control (int) : control number
section (int) : section number
Returns:
controls (ndarray) :
1-D array of controls
"""
div_back, div_front = self._division_controls(control, section)
return self.p[div_front:div_back] * self.unit_controls[section][control]
def controls_all_section(self, control):
"""get controls array
Args:
control (int) : control number
Returns:
controls_all_section ((N, ) ndarray) :
1-D array of all section control
"""
temp = np.zeros(0)
for i in range(self.number_of_section):
temp = np.concatenate([temp, self.controls(control, i)])
return temp
def time_start(self, section):
""" get time at section "start"
Args:
section (int) : section
Returns:
time_start (int) : time at section start
"""
if (section == 0):
return self.t0
else:
time_start_index = range(-self.number_of_section - 1, 0)
return self.p[time_start_index[section]] * self.unit_time
def time_final(self, section):
""" get time at section "end"
Args:
section (int) : section
Returns:
time_final (int) : time at section end
"""
time_final_index = range(-self.number_of_section, 0)
return self.p[time_final_index[section]] * self.unit_time
def time_final_all_section(self):
""" get time at "end"
Args:
section (int) : section
Returns:
time_final_all_section (int) : time at end
"""
tf = []
for section in range(self.number_of_section):
tf = tf + [self.time_final(section)]
return tf
def set_states(self, state, section, value):
"""set value to state at specific section
Args:
state (int) : state
section (int) : section
value (int) : value
"""
assert len(value) == self.nodes[section], "Error: value length is NOT match nodes length"
div_back, div_front = self._division_states(state, section)
self.p[div_front:div_back] = value / self.unit_states[section][state]
def set_states_all_section(self, state, value_all_section):
"""set value to state at all section
Args:
state (int) : state
value_all_section (int) : value
"""
div = 0
for i in range(self.number_of_section):
value = value_all_section[div:div + self.nodes[i]]
div = div + self.nodes[i]
self.set_states(state, i, value)
def set_controls(self, control, section, value):
"""set value to control at all section
Args:
control (int) : control
section (int) : section
value (int) : value
"""
assert len(value) == self.nodes[section], "Error: value length is NOT match nodes length"
div_back, div_front = self._division_controls(control, section)
self.p[div_front:div_back] = value / self.unit_controls[section][control]
def set_controls_all_section(self, control, value_all_section):
"""set value to control at all section
Args:
control (int) : control
value_all_section (int) : value
"""
div = 0
for i in range(self.number_of_section):
value = value_all_section[div:div + self.nodes[i]]
div = div + self.nodes[i]
self.set_controls(control, i, value)
def set_time_final(self, section, value):
""" set value to final time at specific section
Args:
section (int) : seciton
value (float) : value
"""
time_final_index = range(-self.number_of_section, 0)
self.p[time_final_index[section]] = value / self.unit_time
def set_states_bounds(self, state, section, lb, ub):
""" set value to bounds of state at specific section
Args:
state (int) : state
section (int) : section
lb (float or None) : lower bound
ub (float or None) : upper bound
"""
lb = lb / self.unit_states[section][state] if lb is not None else None
ub = ub / self.unit_states[section][state] if ub is not None else None
div_back, div_front = self._division_states(state, section)
self.bounds[div_front:div_back] = [(lb, ub)] * self.nodes[section]
def set_states_bounds_all_section(self, state, lb, ub):
""" set value to bounds of state at all sections
Args:
state (int) : state
lb (float or None) : lower bound
ub (float or None) : upper bound
"""
for section in range(self.number_of_section):
self.set_states_bounds(state, section, lb, ub)
def set_controls_bounds(self, control, section, lb, ub):
""" set value to bounds of control at specific section
Args:
control (int) : control
section (int) : section
lb (float or None) : lower bound
ub (float or None) : upper bound
"""
lb = lb / self.unit_controls[section][control] if lb is not None else None
ub = ub / self.unit_controls[section][control] if ub is not None else None
div_back, div_front = self._division_controls(control, section)
self.bounds[div_front:div_back] = [(lb, ub)] * self.nodes[section]
def set_controls_bounds_all_section(self, control, lb, ub):
""" set value to bounds of control at all sections
Args:
control (int) : control
lb (float or None) : lower bound
ub (float or None) : upper bound
"""
for section in range(self.number_of_section):
self.set_controls_bounds(control, section, lb, ub)
def set_time_final_bounds(self, section, lb, ub):
""" set value to bounds of time_final at specific section
Args:
section (int) : section
lb (float or None) : lower bound
ub (float or None) : upper bound
"""
lb = lb / self.unit_time if lb is not None else 0.0
ub = ub / self.unit_time if ub is not None else None
self.bounds[self.index_time_final(section)] = (lb, ub)
def time_to_tau(self, time):
time_init = min(time)
time_final = max(time)
time_center = (time_init + time_final) / 2
temp = []
for x in time:
temp += [2 / (time_final - time_init) * (x - time_center)]
return np.array(temp)
def time_update(self):
""" get time array after optimization
Returns:
time_update : (N,) ndarray
time array
"""
self.time = []
t = [0] + self.time_final_all_section()
for i in range(self.number_of_section):
self.time.append((t[i+1] - t[i]) / 2.0 * self.tau[i]
+ (t[i+1] + t[i]) / 2.0)
return np.concatenate([i for i in self.time])
def time_knots(self):
""" get time at knot point
Returns:
time_knots (list) : time at knot point
"""
return [0] + self.time_final_all_section()
def index_states(self, state, section, index=None):
""" get index of state at specific section
Args:
state (int) : state
section (int) : section
index (int, optional) : index
Returns:
index_states (int) : index of states
"""
div_back, div_front = self._division_states(state, section)
if (index is None):
return div_front
assert index < div_back - div_front, "Error, index out of range"
if (index < 0):
index = div_back - div_front + index
return div_front + index
def index_controls(self, control, section, index=None):
div_back, div_front = self._division_controls(control, section)
if (index is None):
return div_front
assert index < div_back - div_front, "Error, index out of range"
if (index < 0):
index = div_back - div_front + index
return div_front + index
def index_time_final(self, section):
time_final_range = range(-self.number_of_section, 0)
return self.number_of_variables + time_final_range[section]
"""
===========================
UNIT SCALING ZONE
===========================
"""
def set_unit_states(self, state, section, value):
""" set a canonical unit value to the state at a specific section
Args:
state (int) : state
section (int) : section
value (float) : value
"""
self.unit_states[section][state] = value
def set_unit_states_all_section(self, state, value):
""" set a canonical unit value to the state at all sections
Args:
state (int) : state
value (float) : value
"""
for i in range(self.number_of_section):
self.set_unit_states(state, i, value)
def set_unit_controls(self, control, section, value):
""" set a canonical unit value to the control at a specific section
Args:
control (int) : control
section (int) : section
value (float) : value
"""
self.unit_controls[section][control] = value
def set_unit_controls_all_section(self, control, value):
""" set a canonical unit value to the control at all sections
Args:
control (int) : control
value (float) : value
"""
for i in range(self.number_of_section):
self.set_unit_controls(control, i, value)
def set_unit_time(self, value):
""" set a canonical unit value to the time
Args:
value (float) : value
"""
self.unit_time = value
time_init = np.array(self.time_init) / value
self.time_init = list(time_init)
self.time = []
for index, node in enumerate(self.nodes):
self.time.append((time_init[index+1] - time_init[index]) / 2.0 * self.tau[index]
+ (time_init[index+1] + time_init[index]) / 2.0)
self.t0 = time_init[0]
self.time_all_section = np.concatenate([i for i in self.time])
for section in range(self.number_of_section):
self.set_time_final(section, time_init[section+1] * value)
""" ==============================
"""
def _dummy_func():
pass
""" ==============================
"""
def solve(self, obj, display_func=_dummy_func, **options):
""" solve NLP
Args:
obj (object instance) : instance
display_func (function) : function to display intermediate values
ftol (float, optional) : Precision goal for the value of f in the
stopping criterion, (default: 1e-6)
maxiter (int, optional) : Maximum number of iterations., (default : 25)
Examples:
"prob" is Problem class's instance.
>>> prob.solve(obj, display_func, ftol=1e-12)
"""
assert len(self.dynamics) != 0, "It must be set dynamics"
assert self.cost is not None, "It must be set cost function"
assert self.equality is not None, "It must be set equality function"
assert self.inequality is not None, "It must be set inequality function"
def equality_add(equality_func, obj):
""" add pseudospectral method conditions to equality function.
collocation point condition and knotting condition.
"""
result = self.equality(self, obj)
# collation point condition
for i in range(self.number_of_section):
D = self.D
derivative = np.zeros(0)
for j in range(self.number_of_states[i]):
state_temp = self.states(j, i) / self.unit_states[i][j]
derivative = np.hstack((derivative, D[i].dot(state_temp)))
tix = self.time_start(i) / self.unit_time
tfx = self.time_final(i) / self.unit_time
dx = self.dynamics[i](self, obj, i)
result = np.hstack((result, derivative - (tfx - tix) / 2.0 * dx))
# knotting condition
for knot in range(self.number_of_section - 1):
if (self.number_of_states[knot] != self.number_of_states[knot + 1]):
continue # if states are not continuous on knot, knotting condition skip
for state in range(self.number_of_states[knot]):
param_prev = self.states(state, knot) / self.unit_states[knot][state]
param_post = self.states(state, knot + 1) / self.unit_states[knot][state]
if (self.knot_states_smooth[knot]):
result = np.hstack((result, param_prev[-1] - param_post[0]))
return result
def cost_add(cost_func, obj):
"""Combining nonintegrated function and integrated function.
"""
not_integrated = self.cost(self, obj)
if self.running_cost is None:
return not_integrated
integrand = self.running_cost(self, obj)
weight = np.concatenate([i for i in self.w])
integrated = sum(integrand * weight)
return not_integrated + integrated
def wrap_for_solver(func, arg0, arg1):
def for_solver(p, arg0, arg1):
self.p = p
return func(arg0, arg1)
return for_solver
# def wrap_for_solver(func, *args):
# def for_solver(p, *args):
# self.p = p
# return func(*args)
# return for_solver
cons = ({'type': 'eq',
'fun': wrap_for_solver(equality_add, self.equality, obj),
'args': (self, obj,)},
{'type': 'ineq',
'fun': wrap_for_solver(self.inequality, self, obj),
'args': (self, obj,)})
if (self.cost_derivative is None):
jac = None
else:
jac = wrap_for_solver(self.cost_derivative, self, obj)
ftol = options.setdefault("ftol", 1e-6)
maxiter = options.setdefault("maxiter", 25)
while self.iterator < self.maxIterator:
print("---- iteration : {0} ----".format(self.iterator+1))
opt = optimize.minimize(wrap_for_solver(cost_add, self.cost, obj),
self.p,
args=(self, obj),
bounds=self.bounds,
constraints=cons,
jac=jac,
method='SLSQP',
options={"disp": True,
"maxiter": maxiter,
"ftol": ftol})
print(opt.message)
display_func()
print("")
if not(opt.status):
break
self.iterator += 1
""" ==============================
"""
def __init__(self, time_init, nodes, number_of_states, number_of_controls,
maxIterator = 100, method="LGL"):
assert isinstance(time_init, list), \
"error: time_init is not list"
assert isinstance(nodes, list), \
"error: nodes are not list"
assert isinstance(number_of_states, list), \
"error: number of states are not list"
assert isinstance(number_of_controls, list), \
"error: number of controls are not list"
assert len(time_init) == len(nodes) + 1, \
"error: time_init length is not match nodes length"
assert len(nodes) == len(number_of_states), \
"error: nodes length is not match states length"
assert len(nodes) == len(number_of_controls), \
"error: nodes length is not match controls length"
self.nodes = nodes
self.number_of_states = number_of_states
self.number_of_controls = number_of_controls
self.div = self._make_param_division(nodes, number_of_states, number_of_controls)
self.number_of_section = len(self.nodes)
self.number_of_param = np.array(number_of_states) + np.array(number_of_controls)
self.number_of_variables = sum(self.number_of_param * nodes) + self.number_of_section
self.tau = []
self.w = []
self.D = []
self.time = []
for index, node in enumerate(nodes):
self.tau.append(self._nodes_LGL(node))
self.w.append(self._weight_LGL(node))
self.D.append(self._differentiation_matrix_LGL(node))
self.time.append((time_init[index+1] - time_init[index]) / 2.0 * self.tau[index]
+ (time_init[index+1] + time_init[index]) / 2.0)
self.maxIterator = maxIterator
self.iterator = 0
self.time_init = time_init
self.t0 = time_init[0]
self.time_all_section = np.concatenate([i for i in self.time])
# ====
self.unit_states = []
self.unit_controls = []
self.unit_time = 1.0
for i in range(self.number_of_section):
self.unit_states.append([1.0]*self.number_of_states[i])
self.unit_controls.append([1.0]*self.number_of_controls[i])
# ====
self.p = np.zeros(self.number_of_variables, dtype=float)
self.bounds = [(None, None)] * self.number_of_variables
for i in range(self.number_of_section):
self.set_time_final_bounds(i, 0.0, None)
# ====
# function
self.dynamics = []
self.knot_states_smooth = []
self.cost = None
self.running_cost = None
self.cost_derivative = None
self.equality = None
self.inequality = None
# ====
for section in range(self.number_of_section):
self.set_time_final(section, time_init[section+1])
self.dynamics.append(None)
for section in range(self.number_of_section-1):
self.knot_states_smooth.append(True)
def __repr__(self):
s = "---- parameter ----" + "\n"
s += "nodes = " + str(self.nodes) + "\n"
s += "number of states = " + str(self.number_of_states) + "\n"
s += "number of controls = " + str(self.number_of_controls) + "\n"
s += "number of sections = " + str(self.number_of_section) + "\n"
s += "number of variables = " + str(self.number_of_variables) + "\n"
s += "---- algorithm ----" + "\n"
s += "max iteration = " + str(self.maxIterator) + "\n"
s += "---- function ----" + "\n"
s += "dynamics = " + str(self.dynamics) + "\n"
s += "cost = " + str(self.cost) + "\n"
s += "cost_derivative = " + str(self.cost_derivative) + "\n"
s += "equality = " + str(self.equality) + "\n"
s += "inequality = " + str(self.inequality) + "\n"
s += "knot_states_smooth = " + str(self.dynamics) + "\n"
return s
def to_csv(self, filename="OpenGoddard_output.csv", delimiter=","):
""" output states, controls and time to csv file
Args:
filename (str, optional) : csv filename
delimiter : (str, optional) : default ","
"""
result = np.zeros(0)
result = np.hstack((result, self.time_update()))
header = "time, "
for i in range(self.number_of_states[0]):
header += "state%d, " % (i)
result = np.vstack((result, self.states_all_section(i)))
for i in range(self.number_of_controls[0]):
header += "control%d, " % (i)
result = np.vstack((result, self.controls_all_section(i)))
np.savetxt(filename, result.T, delimiter=delimiter, header=header)
print("Completed saving \"%s\"" % (filename))
def plot(self, title_comment=""):
""" plot inner variables that to be optimized
Args:
title_comment (str) : string for title
"""
plt.figure()
plt.title("OpenGoddard inner variables" + title_comment)
plt.plot(self.p, "o")
plt.xlabel("variables")
plt.ylabel("value")
for section in range(self.number_of_section):
for line in self.div[section]:
plt.axvline(line, color="C%d" % ((section+1) % 6), alpha=0.5)
plt.grid()
class Guess:
"""Class for initial value guess for optimization.
Collection of class methods
"""
@classmethod
def zeros(cls, time):
""" return zeros that array size is same as time length
Args:
time (array_like) :
Returns:
(N, ) ndarray
"""
return np.zeros(len(time))
@classmethod
def constant(cls, time, const):
""" return constant values that array size is same as time length
Args:
time (array_like) :
const (float) : set value
Returns:
(N, ) ndarray
"""
return np.ones(len(time)) * const
@classmethod
def linear(cls, time, y0, yf):
""" return linear function values that array size is same as time length
Args:
time (array_like) : time
y0 (float): initial value
yf (float): final value
Returns:
(N, ) ndarray
"""
x = np.array([time[0], time[-1]])
y = np.array([y0, yf])
f = interpolate.interp1d(x, y)
return f(time)
@classmethod
def cubic(cls, time, y0, yprime0, yf, yprimef):
""" return cubic function values that array size is same as time length
Args:
time (array_like) : time
y0 (float) : initial value
yprime0 (float) : slope of initial value
yf (float) : final value
yprimef (float) : slope of final value
Returns:
(N, ) ndarray
"""
y = np.array([y0, yprime0, yf, yprimef])
t0 = time[0]
tf = time[-1]
A = np.array([[1, t0, t0**2, t0**3], [0, 1, 2*t0, 3*t0**2],
[1, tf, tf**2, tf**3], [0, 1, 2*tf, 3*tf**2]])
invA = np.linalg.inv(A)
C = invA.dot(y)
ys = C[0] + C[1]*time + C[2]*time**2 + C[3]*time**3
return ys
@classmethod
def plot(cls, x, y, title="", xlabel="", ylabel=""):
""" plot wrappper
Args:
x (array_like) : array on the horizontal axis of the plot
y (array_like) : array on the vertical axis of the plot
title (str, optional) : title
xlabel (str, optional) : xlabel
ylabel (str, optional) : ylabel
"""
plt.figure()
plt.plot(x, y, "-o")
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid()
class Condition(object):
"""OpenGoddard.optimize Condition class
thin wrappper of numpy zeros and hstack
Examples:
for examples in equality function.
Initial condtion : x[0] = 0.0
Termination Condition : x[-1] = 100
>>> result = Condition()
>>> result.equal(x[0], 0.0)
>>> result.equal(x[-1], 100)
>>> return result()
for examples in inequality function
Inequation condtion : 0.0 <= x <= 100
>>> result = Condition()
>>> result.lower_bound(x, 0.0)
>>> result.upper_bound(x, 100)
>>> return result()
"""
def __init__(self, length=0):
self._condition = np.zeros(length)
# def add(self, *args):
# for arg in args:
# self._condition = np.hstack((self._condition, arg))
def add(self, arg, unit=1.0):
"""add condition
Args:
arg (array_like) : condition
"""
self._condition = np.hstack((self._condition, arg / unit))
def equal(self, arg1, arg2, unit=1.0):
"""add equation constraint condition in Problem equality function
arg1 = arg2
Args:
arg1 (float or array_like) : right side of the equation
arg2 (float or array_like) : left side of the equation
unit (float, optional) : argX / unit (default : 1.0)
Notes:
It must be used in equality function.
"""
arg = arg1 - arg2
self.add(arg, unit)
def lower_bound(self, arg1, arg2, unit=1.0):
"""add inequation constraint condition in Problem inequality function
arg1 >= arg2
Args:
arg1 (array like) : arg1 is greater than or equal to arg2
arg2 (float or array like) : arg1 is greater than or equal to arg2
unit (float, optional) : argX / unit (default : 1.0)
Notes:
It must be used in inequality function.
"""
arg = arg1 - arg2
self.add(arg, unit)
def upper_bound(self, arg1, arg2, unit=1.0):
"""add inequation constraint condition in Problem inequality function
arg1 <= arg2
Args:
arg1 (array like) : arg1 is less than or equal to arg2
arg2 (float or array like) : arg1 is less than or equal to arg2
unit (float, optional) : argX / unit (default : 1.0)
Notes:
It must be used in inequality function.
"""
arg = arg2 - arg1
self.add(arg, unit)
def change_value(self, index, value):
self._condition[index] = value
def __call__(self):
return self._condition
class Dynamics(object):
"""OpenGoddard.optimize Condition class.
thin wrapper for dynamics function.
Behave like a dictionary type.
Examples:
Dynamics class must be used in dynamics function.
It is an example of the equation of motion of thrust and free fall.
Thrust is controllable.
.. math::
\dot{x} &= v
\dot{v} &= T/m - g
>>> def dynamics(prob, obj, section):
>>> x = prob.states(0, section)
>>> v = prob.states(1, section)
>>> T = prob.controls(0, section)
>>> g = 9.8
>>> m = 1.0
>>> dx = Dynamics(prob, section)
>>> dx[0] = v
>>> dx[1] = T / m - g
>>> return dx()
"""
def __init__(self, prob, section=0):
""" prob is instance of OpenGoddard class
"""
self.section = section
self.number_of_state = prob.number_of_states[section]
self.unit_states = prob.unit_states
self.unit_time = prob.unit_time
for i in range(self.number_of_state):
self.__dict__[i] = np.zeros(prob.nodes[section])
def __getitem__(self, key):
assert key < self.number_of_state, "Error, Dynamics key out of range"
return self.__dict__[key]
def __setitem__(self, key, value):
assert key < self.number_of_state, "Error, Dynamics key out of range"
self.__dict__[key] = value
def __call__(self):
dx = np.zeros(0)
for i in range(self.number_of_state):
temp = self.__dict__[i] * (self.unit_time / self.unit_states[self.section][i])
dx = np.hstack((dx, temp))
return dx
if __name__ == '__main__':
print("==== OpenGoddard test program ====")
plt.close("all")
plt.ion()
time_init = [0, 1]
n = [10]
num_states = [3]
num_controls = [1]
max_iteration = 8
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
print("t0 = %.1f\nnodes = " % (prob.t0))
print(n)
print("number of states = ")
print(num_states)
print("number of controls = ")
print(num_controls)
print("tau = ")
print(prob.tau)
# print("D = ")
# print(prob.D)
print("div = ")
print(prob.div)
print("=====" * 10)
time_init = [0.0, 0.10, 0.2]
n = [20, 10]
num_states = [3, 3]
num_controls = [1, 1]
max_iteration = 1
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
print("t0 = %.1f\nnodes = " % (prob.t0))
print(n)
print("number of states = ")
print(num_states)
print("number of controls = ")
print(num_controls)
print("tau = ")
print(prob.tau)
print("time_init = ")
print(prob.time)
print("time_all_section = ")
print(prob.time_all_section)
# print("D = ")
# print(prob.D)
print("div = ")
print(prob.div)
print("=====" * 10)
print("p = ")
print(np.round(prob.p, 0))
print("states #0 all section = ")
print(prob.states_all_section(0))
print("states #1 all section = ")
print(prob.states_all_section(1))
print("controls #0 section #0 = ")
print(prob.controls(0, 0))
print("controls #0 all section = ")
print(prob.controls_all_section(0))
print("time final #0 = ")
print(prob.time_final(0))
print("time final #1 = ")
print(prob.time_final(1))
print("=====" * 10)
plt.close("all")
tau = prob.tau
time_init = prob.time_init
# initial estimation
H_init = Guess.cubic(prob.time_all_section, 1.0, 0.0, 1.01, 0.0)
Guess.plot(prob.time_all_section, H_init, "Altitude", "time", "Altitude")
V_init = Guess.linear(prob.time_all_section, 0.0, 0.0)
M_init = Guess.cubic(prob.time_all_section, 1.0, -0.6, 0.6, 0.0)
T_init1 = Guess.linear(prob.time[0], 3.5, 3.5)
T_init2 = Guess.linear(prob.time[1], 0.0, 0.0)
T_init = np.hstack((T_init1, T_init2))
Guess.plot(prob.time_all_section, T_init, "Thrust Guess", "time", "Thrust")
plt.show()
prob.set_states_all_section(0, H_init)
prob.set_states_all_section(1, V_init)
prob.set_states_all_section(2, M_init)
prob.set_controls_all_section(0, T_init)
print("p =")
print(np.round(prob.p, 2))
print("=====" * 10)
print("===== ====")
print("===== Rocket Ascent Problem ====")
print("===== ====")
obj = 1.0
def dynamics(prob, obj, section):
h = prob.states(0, section)
v = prob.states(1, section)
m = prob.states(2, section)
T = prob.controls(0, section)
Dc = 0.5 * 620 * 1.0 / 1.0
c = 0.5 * np.sqrt(1.0 * 1.0)
drag = 1 * Dc * v ** 2 * np.exp(-500 * (h - 1.0) / 1.0)
g = 1.0 * (1.0 / h)**2
dx = np.zeros(0)
dx0 = v
dx1 = (T - drag) / m - g
dx2 = - T / c
dx = np.hstack((dx0, dx1, dx2))
return dx
def equality(prob, obj):
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
t0 = prob.time_start(0)
ts = prob.time_final(0)
tf = prob.time_final(1)
result = Condition()
# event condition
result.add(h[0] - 1.0)
result.add(v[0] - 0.0)
result.add(m[0] - 1.0)
result.add(v[-1] - 0.0)
result.add(m[-1] - 0.6)
return result()
def inequality(prob, obj):
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
# lower bounds
result.add(h - 1.0)
result.add(v - 0.0)
result.add(m - 0.6)
result.add(T - 0.0)
result.add(tf - 0.1)
# upper bounds
result.add(1.0 - m)
result.add(3.5 - T)
return result()
def cost(prob, obj):
h = prob.states_all_section(0)
return -h[-1]
def cost_derivative(prob, obj):
jac = Condition(prob.number_of_variables)
index_h_end = prob.index_states(0, -1, -1)
jac.change_value(index_h_end, -1)
return jac()
dx0 = dynamics(prob, obj, 0)
dx1 = dynamics(prob, obj, 1)
result_eq = equality(prob, obj)
result_ineq = inequality(prob, obj)
print("dx section #0")
print(np.round(dx0, 2))
print("dx section #0")
print(np.round(dx1, 2))
print("equality = ")
print(np.round(result_eq, 2))
print("inequality = ")
print(np.round(result_ineq, 2))
print("=====" * 10)
prob.dynamics = [dynamics, dynamics]
prob.knot_states_smooth = [True]
prob.cost = cost
prob.cost_derivative = cost_derivative
prob.equality = equality
prob.inequality = inequality
def display_func():
h = prob.states_all_section(0)
print("max altitude: {0:.5f}".format(h[-1]))
prob.solve(obj, display_func)
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
time = prob.time_update()
Dc = 0.5 * 620 * 1.0 / 1.0
drag = 1 * Dc * v ** 2 * np.exp(-500 * (h - 1.0) / 1.0)
g = 1.0 * (1.0 / h)**2
plt.figure()
plt.title("Altitude profile")
plt.plot(time, h, marker="o", label="Altitude")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Altitude [-]")
plt.figure()
plt.title("Velocity")
plt.plot(time, v, marker="o", label="Velocity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Velocity [-]")
plt.figure()
plt.title("Mass")
plt.plot(time, m, marker="o", label="Mass")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Mass [-]")
plt.figure()
plt.title("Thrust profile")
plt.plot(time, T, marker="o", label="Thrust")
plt.plot(time, drag, marker="o", label="Drag")
plt.plot(time, g, marker="o", label="Gravity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Thrust [-]")
plt.legend(loc="best")
print("===== ====")
print("===== Bryson-Denham Problem ====")
print("===== ====")
class BrysonDenham:
def __init__(self):
self.max_x = 1.0 / 9.0
def dynamics_Bryson(prob, obj, section):
x = prob.states(0, section)
v = prob.states(1, section)
u = prob.controls(0, section)
dx = Dynamics(prob, section)
dx[0] = v
dx[1] = u
return dx()
def equality_Bryson(prob, obj):
x = prob.states_all_section(0)
v = prob.states_all_section(1)
u = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
result.add(x[0] - 0.0)
result.add(v[0] - 1.0)
result.add(x[-1] - 0.0)
result.add(v[-1] + 1.0)
result.add(tf - 1.0)
return result()
def inequality_Bryson(prob, obj):
x = prob.states_all_section(0)
v = prob.states_all_section(1)
u = prob.controls_all_section(0)
result = Condition()
# lower bounds
result.add(x - 0.0)
# upper bounds
result.add(obj.max_x - x)
return result()
def cost_Bryson(prob, obj):
u = prob.controls_all_section(0)
# integrated = 0.5 * sum(u**2 *prob.w[0])
integrated = 0.0
return integrated
def running_cost_Bryson(prob, obj):
u = prob.controls_all_section(0)
return 0.5 * u**2
time_init = [0, 1.0]
nodes = [30]
num_states = [2]
num_controls = [1]
max_iteration = 10
prob = Problem(time_init, nodes, num_states, num_controls, max_iteration)
obj = BrysonDenham()
x_init = Guess.constant(prob.time_all_section, 0.1)
prob.set_states_all_section(0, x_init)
prob.dynamics = [dynamics_Bryson]
prob.knot_states_smooth = []
prob.cost = cost_Bryson
prob.running_cost = running_cost_Bryson
prob.equality = equality_Bryson
prob.inequality = inequality_Bryson
prob.solve(obj)
prob.to_csv("test.csv")
prob.plot()
x = prob.states_all_section(0)
v = prob.states_all_section(1)
u = prob.controls_all_section(0)
time = prob.time_update()
plt.figure()
plt.subplot(3, 1, 1)
plt.plot(time, x, "C0-o", label="x")
plt.ylabel("x(t)")
plt.legend()
plt.grid()
plt.title("Bryson-Denham Problem")
plt.subplot(3, 1, 2)
plt.plot(time, v, "C1-o", label="v")
plt.ylabel("v(t)")
plt.legend()
plt.grid()
plt.subplot(3, 1, 3)
plt.plot(time, u, "C2-o", label="u")
plt.ylabel("u(t)")
plt.legend()
plt.grid()
print("===== ====")
print("===== Unit scaling Test ====")
print("===== ====")
section = 0
x = prob.states(0, section)
v = prob.states(1, section)
u = prob.controls(0, section)
dx = Dynamics(prob)
dx[0] = v
dx[1] = u
dx()
print("===== ====")
print("===== Bad Brachistochrone Problem ====")
print("===== ====")
class Ball:
def __init__(self):
self.g = 9.8 # gravity [m/s2]
self.l = 600000 # goal [m]
self.h = 300000 # depth limit [m]
def dynamics_Brachistochrone(prob, obj, section):
x = prob.states(0, section)
y = prob.states(1, section)
v = prob.states(2, section)
theta = prob.controls(0, section)
dx = Dynamics(prob, section)
dx[0] = v * np.sin(theta)
dx[1] = v * np.cos(theta)
dx[2] = obj.g * np.cos(theta)
return dx()
def equality_Brachistochrone(prob, obj):
x = prob.states_all_section(0)
y = prob.states_all_section(1)
v = prob.states_all_section(2)
theta = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
# result.add(x[0] - 0.0)
# result.add(y[0] - 0.0)
# result.add(v[0] - 0.0)
# result.add(x[-1] - obj.l)
# result.add(y[-1] - 0.0)
result.equal(x[0], 0.0)
result.equal(y[0], 0.0)
result.equal(v[0], 0.0)
result.equal(x[-1], obj.l)
result.equal(y[-1], 0.0)
return result()
def inequality_Brachistochrone(prob, obj):
x = prob.states_all_section(0)
y = prob.states_all_section(1)
v = prob.states_all_section(2)
theta = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
# # lower bounds
# result.add(tf - 500)
# result.add(x - 0)
# result.add(y - 0)
# result.add(theta - 0)
# # upper bounds
# result.add(np.pi - theta)
# result.add(obj.l - x)
# result.add(700 - tf)
# lower bounds
result.lower_bound(tf, 500)
result.lower_bound(x, 0)
result.lower_bound(y, 0)
result.lower_bound(theta, 0)
# upper bounds
result.upper_bound(theta, np.pi)
result.upper_bound(x, obj.l)
result.upper_bound(tf, 700)
return result()
def cost_Brachistochrone(prob, obj):
tf = prob.time_final(-1)
return tf
def cost_derivative_Brachistochrone(prob, obj):
jac = Condition(prob.number_of_variables)
index_tf = prob.index_time_final(0)
# index_tf = prob.index_time_final(-1)
jac.change_value(index_tf, 1)
return jac()
time_init = [0.0, 700.0]
n = [30]
num_states = [3]
num_controls = [1]
max_iteration = 10
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
obj = Ball()
unit_x = 300000
unit_y = 100000
unit_time = 100
prob.set_unit_states_all_section(0, unit_x)
prob.set_unit_states_all_section(1, unit_y)
prob.set_unit_states_all_section(2, unit_x / unit_time)
prob.set_unit_controls_all_section(0, 1.0)
prob.set_unit_time(unit_time)
half_nodes = int(prob.nodes[0] / 2)
theta_init = Guess.linear(prob.time_all_section, 0.0, np.pi)
x_init = Guess.linear(prob.time_all_section, 0.0, obj.l)
y_init0 = Guess.linear(prob.time_all_section[:half_nodes], 0, obj.h)
y_init1 = Guess.linear(prob.time_all_section[half_nodes:], obj.h, 0)
y_init = np.hstack((y_init0, y_init1))
v_init0 = Guess.linear(prob.time_all_section[:half_nodes], 0, obj.h)
v_init1 = Guess.linear(prob.time_all_section[half_nodes:], obj.h, 0)
v_init = np.hstack((v_init0, v_init1))
prob.set_states_all_section(0, x_init)
prob.set_states_all_section(1, y_init)
prob.set_controls_all_section(0, theta_init)
prob.dynamics = [dynamics_Brachistochrone]
prob.knot_states_smooth = []
prob.cost = cost_Brachistochrone
prob.cost_derivative = cost_derivative_Brachistochrone
prob.equality = equality_Brachistochrone
prob.inequality = inequality_Brachistochrone
def display_func():
tf = prob.time_final(-1)
print("tf: {0:.5f}".format(tf))
prob.solve(obj, display_func)
x = prob.states_all_section(0)
y = prob.states_all_section(1)
v = prob.states_all_section(2)
theta = prob.controls_all_section(0)
time = prob.time_update()
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(time, x, marker="o", label="x")
plt.plot(time, y, marker="o", label="y")
plt.plot(time, v, marker="o", label="v")
plt.grid()
plt.ylabel("position [m], velocity [m/s]")
plt.legend(loc="best")
plt.subplot(2, 1, 2)
plt.plot(time, theta, marker="o", label="gamma")
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("angle [rad]")
plt.legend(loc="best")
plt.figure()
plt.plot(x, y, marker="o", label="trajectry")
plt.grid()
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.legend(loc="best")
plt.gca().invert_yaxis()
prob.plot("Bad Brachistochrone")
# plt.show()
|
{"hexsha": "b6b2f947575f7889ab5ad568e14b66007adf6afb", "size": 53441, "ext": "py", "lang": "Python", "max_stars_repo_path": "OpenGoddard/optimize.py", "max_stars_repo_name": "likping/OpenGoddard", "max_stars_repo_head_hexsha": "0906ee85038de85d7683e19532df62fcd53a9e28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 81, "max_stars_repo_stars_event_min_datetime": "2017-03-06T07:38:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T17:50:34.000Z", "max_issues_repo_path": "OpenGoddard/optimize.py", "max_issues_repo_name": "likping/OpenGoddard", "max_issues_repo_head_hexsha": "0906ee85038de85d7683e19532df62fcd53a9e28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-05-17T14:07:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-17T05:36:28.000Z", "max_forks_repo_path": "OpenGoddard/optimize.py", "max_forks_repo_name": "likping/OpenGoddard", "max_forks_repo_head_hexsha": "0906ee85038de85d7683e19532df62fcd53a9e28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2017-05-21T17:29:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T08:19:58.000Z", "avg_line_length": 32.3884848485, "max_line_length": 97, "alphanum_fraction": 0.5581669505, "include": true, "reason": "import numpy,from scipy", "num_tokens": 13500}
|
[STATEMENT]
lemma set1_FGcontra_bound:
fixes x :: "(_, 'co1, 'co2, 'co3, 'co4, 'co5,
'contra1, 'contra2, 'contra3, 'contra4, 'contra5, 'f1, 'f2) FGcontra"
shows "card_of (set1_FGcontra x) <o (bd_FGcontra :: ('co1, 'co2, 'co3, 'co4, 'co5,
'contra1, 'contra2, 'contra3, 'contra4, 'contra5, 'f1, 'f2) FGcontrabd rel)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. |set1_FGcontra x| <o bd_FGcontra
[PROOF STEP]
unfolding set1_FGcontra_def bd_FGcontra_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. |set1_F x| <o bd_F
[PROOF STEP]
using set1_F_bound
[PROOF STATE]
proof (prove)
using this:
|set1_F ?x| <o bd_F
goal (1 subgoal):
1. |set1_F x| <o bd_F
[PROOF STEP]
.
|
{"llama_tokens": 347, "file": "BNF_CC_Composition", "length": 3}
|
# utils.py
# Ben Cook (bcook@cfa.harvard.edu)
import numpy as np
from scipy.misc import logsumexp
from astropy.io import fits
import os, sys
# A module to create various utility functions
def make_pcmd(mags):
pcmd = np.copy(mags)
n_filters = pcmd.shape[0]
for i in range(1, n_filters):
pcmd[i] = mags[i] - mags[i-1]
return pcmd
def pcmd_to_mags(pcmd):
mags = np.copy(pcmd)
n_filters = mags.shape[0]
for i in range(1, n_filters):
mags[i] = pcmd[i] + mags[i-1]
return mags
def mean_mags(pcmd):
mags = pcmd_to_mags(pcmd)
mag_factor = 0.4 * np.log(10) # convert from base 10 to base e
weights = float(1) / mags.shape[1] # evenly weight each pixel
return logsumexp(mag_factor*mags, b=weights, axis=1)/mag_factor
def mean_mags_old(pcmd):
mags = pcmd_to_mags(pcmd)
mag_factor = -0.4 * np.log(10) # convert from base 10 to base e
weights = float(1) / mags.shape[1] # evenly weight each pixel
return logsumexp(mag_factor*mags, b=weights, axis=1)
def make_hess(pcmd, bins, err_min=2.):
mags = pcmd[0]
colors = pcmd[1:]
n_colors = colors.shape[0]
n = pcmd.shape[1] # total number of pixels
counts = []
for i in range(n_colors):
c, _, _ = np.histogram2d(mags, colors[i],
bins=[bins[0], bins[i+1]])
if np.sum(c) < n: # if some pixels fell outside bins, add to corner of Hess grid
c[0, 0] += (n - np.sum(c))
counts += [c]
if n_colors == 0:
c, _ = np.histogram(mags, bins=bins[0])
counts += [c]
counts = np.array(counts)
counts[counts <= 0.] = 0.
err = np.sqrt(counts)
# inflate small errors
err[err <= err_min] = err_min
# err += err_min * np.exp(-err)
# normalize by number of pixels
hess = counts / n
err /= n
return counts, hess, err
class DataSet(object):
def __init__(self, file_names, filter_classes):
assert(len(file_names) == len(filter_classes))
self.n_bands = len(filter_classes)
headers = []
with fits.open(file_names[0]) as hdu:
if len(hdu) > 1:
data = hdu['SCI'].data
else:
data = hdu['PRIMARY'].data
self.im_shape = data.shape
self.images = np.zeros((self.im_shape[0], self.im_shape[1],
self.n_bands))
headers.append(hdu[0].header)
self.images[:, :, 0] = data
for i, f in enumerate(file_names[1:]):
with fits.open(f) as hdu:
if len(hdu) > 1:
data = hdu['SCI'].data
else:
data = hdu['PRIMARY'].data
self.images[:, :, i+1] = data
headers.append(hdu[0].header)
self.headers = np.array(headers)
assert(self.images.ndim == 3) # else the images weren't matching sizes
filters = []
for filt, header in zip(filter_classes, headers):
filters.append(filt(exposure=header['EXPTIME']))
self.filters = np.array(filters)
def get_pcmd(self, bool_matrix, bands=None):
if bands is not None:
assert(max(bands) < self.n_bands)
assert(min(bands) <= 0)
pixels = self.images[bool_matrix, bands]
else:
bands = np.arange(self.n_bands)
pixels = self.images[bool_matrix, :]
assert(bool_matrix.shape == self.im_shape)
filts = self.filters[bands]
mags = np.zeros_like(pixels.T)
for i in bands:
flux = pixels[:, i] * filts[i]._exposure # convert to counts
mags[i] = filts[i].counts_to_mag(flux)
pcmd = make_pcmd(mags)
return pcmd
def get_image(self, bool_matrix=None, downsample=1, bands=None):
if bands is None:
bands = np.arange(self.n_bands)
if bool_matrix is None:
images = np.copy(self.images[::downsample, ::downsample, :])
for i, b in enumerate(bands):
images[:, :, i] *= self.filters[b]._exposure
xmin, ymin = 0, 0
xmax, ymax = self.im_shape
else:
assert(bool_matrix.shape == self.images.shape[:2])
bool_matrix = bool_matrix[::downsample, ::downsample]
x, y = np.where(bool_matrix)
xmin, xmax = min(x), max(x)+1
ymin, ymax = min(y), max(y)+1
images = np.zeros((xmax-xmin, ymax-ymin, 3))
for a in [xmin, xmax, ymin, ymax]:
a *= downsample
bools = bool_matrix[xmin:xmax, ymin:ymax]
for i, b in enumerate(bands):
images[bools, i] = self.images[::downsample,
::downsample][bool_matrix, b]
images[:, :, i] *= self.filters[b]._exposure
return images, (ymin, ymax, xmin, xmax)
class PrintRedirect:
"""
Returns a context within which all stdout is redirected
"""
def __init__(self, logfile=None):
self._original_stdout = sys.stdout
if logfile is None:
self.logfile = os.devnull
else:
self.logfile = logfile
def __enter__(self):
sys.stdout = open(self.logfile, 'a')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
class RegularPrint:
"""
Context within print behaves as usual
"""
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
{"hexsha": "3ff77e9e7b1226eadc5d240194b7aa73c6cbfb80", "size": 5699, "ext": "py", "lang": "Python", "max_stars_repo_path": "pcmdpy/utils/utils.py", "max_stars_repo_name": "johnnygreco/pcmdpy", "max_stars_repo_head_hexsha": "fe38db999f4445c98bde168867274654b2be4dbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pcmdpy/utils/utils.py", "max_issues_repo_name": "johnnygreco/pcmdpy", "max_issues_repo_head_hexsha": "fe38db999f4445c98bde168867274654b2be4dbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pcmdpy/utils/utils.py", "max_forks_repo_name": "johnnygreco/pcmdpy", "max_forks_repo_head_hexsha": "fe38db999f4445c98bde168867274654b2be4dbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3131868132, "max_line_length": 89, "alphanum_fraction": 0.5513247938, "include": true, "reason": "import numpy,from scipy,from astropy", "num_tokens": 1510}
|
\documentclass[a4paper,12pt,titlepage]{scrartcl}
\usepackage[utf8]{inputenc}
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
linkcolor=black,
filecolor=magenta,
urlcolor=blue,
}
\usepackage{graphicx}
\graphicspath{ {./images/} }
\usepackage{fancyhdr}
\usepackage{lastpage}
\usepackage{listings}
\usepackage{float}
\pagestyle{fancy}
\fancyhf{}
\rfoot{Page \thepage \hspace{1pt} of \pageref{LastPage}}
\title{\textbf{KiloGuide}\\User Guide}
\titlehead{\centering\includegraphics{kilogo.png}}
\author{Simon Lejoly}
\date{May 2021}
\begin{document}
\maketitle
\newpage
\tableofcontents
\newpage
\section{Overview}
\textbf{KiloGuide} is a simple guide about \href{https://kilobotics.com}{kilobots}. \emph{Kilobots} are small physical robots designed by the University of Harvard and commercialized by the K-Team, commonly used to study swarm robotics. KiloGuide explains how to perform various tasks such as calibration or code compilation and provides multiple small projects to illustrate how coding for kilobots is done.
\section{Features}
KiloGuide is divided in two sections: \textbf{guides} and \textbf{tutorials}.
The \textbf{guides} aim to explain you basic kilobots operations, such as "transferring code to a kilobot", "calibrating kilobots" or "using Kilo-GUI".
The \textbf{tutorials} focus on the implementation part. With tutorials, you will learn how to code for kilobots through easy and diverse projects.
\subsection{Guides}
KiloGuide currently contains 5 guides:
\subsubsection{Getting started with kilobots}
Learn how to turn your kilobots on and off. Discover the components of a kilobot and what it can do with them. Know how to calibrate your kilobots.
\subsubsection{Coding for kilobots}
Learn the basics of robot programming, which language and which code templates are used to write programs for kilobots.
\subsubsection{Compile your code}
Learn how to convert your code into an executable file, that will be read by your kilobots.
\subsubsection{Transfer and run your program}
Learn how to transfer the executable file into your kilobots, start, pause and stop the program.
\subsubsection{Use the debug feature}
Learn how to use the simple debug feature with kilobots, to know what is not working in your programs.
\subsection{Tutorials}
KiloGuide currently contains 5 tutorials:
\subsubsection{Race Around the World}
Learn the basics of robot programming with one single kilobot. Write a simple program to turn on its LED and make it move along a race track.
\subsubsection{Full Metal Kilobot}
Get into a more complex program and use communication between two kilobots in a creative way. One of the kilobots is the instructor, yelling orders to the rookie, which must then execute them... with more or less precision.
\subsubsection{King-o-bot's Games}
In this tutorial inspired by medieval knight tournaments, the kilobots must fight in duel, going one toward the other at astounding speeds. The first to freak out loses and shall not be King-o-bot's great champion!
\subsubsection{Morphogenetics}
In the body of living creatures, a cell can sometimes approximate its distance to another cell by analysing the neighbouring concentration of chemicals produced by that cell. Let's put this idea in practice with kilobots!
\subsubsection{Rush Hour}
In this tutorial, a lot of kilobots are placed in a limited space. As they move around randomly, they must display different colors depending on the number of kilobots they detect around them. The goal is to obtain some heat-map of the kilobot concentrations at a high scale.
\section{Access / Download}
\subsection{Access from the Internet}
You can easily access KiloGuide online following \href{https://simlej18.github.io/KiloGuide/}{this link}.
\subsection{Download}
If you want to have access to KiloGuide offline, you may download it from \href{https://www.mediafire.com/file/olqogopejhlbe3z/KiloGuide.zip/file}{this website}.
\section{Getting started}
\subsection{Getting started from the online version}
If you followed the link above, you should already see the homepage of KiloGuide.
\subsection{Getting started from the downloaded version}
The downloaded file should be a zip archive. Un-zip it and enter the KiloGuide directory. You should find a file named \emph{"index.html"}. Open this file in your Internet browser. You should now see KiloGuide's homepage!
\subsection{Getting started with kilobots}
If you have never used kilobots before, the \emph{"Getting started"} guide is a good starting point. You can access it from the navigation bar (see below).
\subsection{Navigate through the guide}
\begin{figure}[H]
\makebox[\textwidth][c]{\includegraphics[scale=0.68]{expl.png}}
\end{figure}
\subsubsection{Navigation bar}
On the left side of the website you will find the navigation bar. It is composed of two pages (\emph{"Home"} and \emph{"About"}) and two categories (\emph{"Guides"} and \emph{"tutorials"}), each containing 6 other pages.
You can access each page by clicking on it. Note that clicking on a category will lead you nowhere. However, the \emph{"Guide summary"} and \emph{"Tutorials summary"} pages are meant to give you an overview of all the guides / tutorials.
\subsubsection{"Next" and "Previous" buttons}
If you wish to read each page of the guide one after the other, you should use the \emph{"Next"} and \emph{"Previous"} buttons. You can access these buttons at the bottom of the navigation bar. You will also find them at the end of each page.
\begin{center}
\includegraphics[scale=0.6]{previous&next.png}
\end{center}
\subsubsection{Search engine}
You may want to look for information about one specific aspect of kilobots. To help you find them, KiloGuide is equipped with a \emph{search engine}. You can access this search engine at the top of the navigation bar.
Once you have written what you are looking for, press "enter" to be redirected to the \emph{result page}. This page displays all the sections of KiloGuide related to the keywords you have entered.
\begin{center}
\includegraphics[scale=0.3]{kilosearch.png}
\end{center}
\section{Troubleshooting}
Here are the most common problems that can occur when using KiloGuide:\\
\textbf{The zip archive cannot be un-zipped}
A problem might have happened during the download process. Download KiloGuide a second time and try again. Make sure that your internet browser and operating system are up-to-date and that your access to the Internet is stable.\\
\textbf{The navigation bar does not appear}
When the window is too thin (on mobile devices, for example), KiloGuide hides the navigation bar. You can access it either by expanding your window horizontally or by clicking the little drawer icon \includegraphics[height=11px]{drawer.png} in the top-left corner.\\
\textbf{Some parts of the guide seem too obscure / complicated}
KiloGuide is fully-independant from the team designing the kilobot and its programming library. If you have troubles with the kilobot environment, consider visiting the sources listed in the "Resources and troubleshooting" section of KiloGuide's homepage.
\section{FAQ}
Here are some frequently asked questions about KiloGuide:\\
\textbf{I spotted a mistake in KiloGuide. How can I report it?}
You can report any mistake in the "issues" section of \href{https://github.com/SimLej18/KiloGuide}{KiloGuide's GitHub repository}. Before posting an issue, make sure that it was not already spotted by another user.
\emph{Note that KiloGuide comes with no guarantee in terms of maintenance and that your issue may remain unsolved.}\\
\textbf{I want to contribute to the KiloGuide project. How can I do so?}
You can contribute to the KiloGuide project by submitting a pull request in the "pull requests" section of \href{https://github.com/SimLej18/KiloGuide}{KiloGuide's GitHub repository}.
Before doing so, you might want to understand how KiloGuide is implemented. You will find useful information in \emph{KiloGuide's developper guide}, which is available in the repository.
\emph{Note that KiloGuide comes with no guarantee in terms of maintenance and that your pull request may not be approved or even reviewed.}\\
\textbf{Are there differences between the online version and the downloadable version of KiloGuide?}
To this day, both versions are completely identical. However, if KiloGuide was to receive an update, previous downloaded copies would be deprecated and should be re-downloaded. The online version on the other hand, will always stay up-to-date.
\section{Contact}
KiloGuide and its user guide were written by \emph{Simon Lejoly} (\href{mailto:simonlejoly@icloud.com}{simonlejoly@icloud.com}) and are part of a project conducted by the \emph{University of Namur}.
Some of the content of this user guide, including images, may be subject to copyright.\\\\
\begin{center}
\includegraphics[scale=0.3]{unamur.png}
\end{center}
\end{document}
|
{"hexsha": "f5af53793407fab072e4de3e4a03a2696d5691d2", "size": 8968, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/UserGuide/main.tex", "max_stars_repo_name": "SimLej18/KiloGuide", "max_stars_repo_head_hexsha": "5100df4da103f9d29113231ad87024a83c239faf", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-10T15:40:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-10T15:40:53.000Z", "max_issues_repo_path": "doc/UserGuide/main.tex", "max_issues_repo_name": "SimLej18/KiloGuide", "max_issues_repo_head_hexsha": "5100df4da103f9d29113231ad87024a83c239faf", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/UserGuide/main.tex", "max_forks_repo_name": "SimLej18/KiloGuide", "max_forks_repo_head_hexsha": "5100df4da103f9d29113231ad87024a83c239faf", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5339805825, "max_line_length": 408, "alphanum_fraction": 0.784455843, "num_tokens": 2177}
|
"""
KMATools
Package for parsing various files produced by KMA. Tested on KMA 1.3.22.
"""
module KMATools
using BioSymbols: DNA
imap(f) = x -> Iterators.map(f, x)
ifilter(f) = x -> Iterators.filter(f, x)
const SPA_HEADER = join(
[
"#Template",
"Num",
"Score",
"Expected",
"Template_length",
"Query_Coverage",
"Template_Coverage",
"Depth",
"tot_query_Coverage",
"tot_template_Coverage",
"tot_depth",
"q_value",
"p_value",
],
'\t'
)
"""
parse_spa(io::IO, path::String) -> Vector{NamedTuple}
Parse .spa file, returning a vector of `NamedTuple` with the following names:
`template, num, score, expected, tlen, qcov, tcov, depth, total_qcov, total_tcov,`
`total_depth, qval, pval`.
Coverages and identities are represented as fractions (`Float64`) in [0.0, 1.0]
"""
function parse_spa(io::IO, path::String)
counted_lines = enumerate(eachline(io))
header = let
it = iterate(counted_lines)
header = if it === nothing
error("Missing header from file \"$path\"")
else
last(first(it))
end
if strip(header) != SPA_HEADER
error("Malformed header in file \"$path\"")
end
header
end
fields = fill(SubString("", 1:0), 13)
counted_lines |>
ifilter(i -> !isempty(strip(last(i)))) |>
imap() do (line_number, line)
strip_split!(fields, line, UInt8('\t'))
return (;
template = String(fields[1]),
num = parse(UInt, fields[2], base=10),
score = parse(UInt, fields[3], base=10),
expected = parse(UInt, fields[4], base=10),
tlen = parse(UInt, fields[5], base=10),
qcov = round(parse(Float64, fields[6]) / 100, digits=6),
tcov = round(parse(Float64, fields[7]) / 100, digits=6),
depth = parse(Float64, fields[8]),
total_qcov = round(parse(Float64, fields[9]) / 100, digits=6),
total_tcov = round(parse(Float64, fields[10]) / 100, digits=6),
total_depth = parse(Float64, fields[11]),
qval = parse(Float64, fields[12]),
pval = parse(Float64, fields[13]),
)
end |>
collect
end
const RES_HEADER = join(
[
"#Template",
"Score",
"Expected",
"Template_length",
"Template_Identity",
"Template_Coverage",
"Query_Identity",
"Query_Coverage",
"Depth",
"q_value",
"p_value",
],
'\t'
)
"""
parse_res(io::IO, path::String) -> Vector{NamedTuple}
Parse .res file, returning a vector of `NamedTuple` with the following names:
`template, score, expected, tlen, tid, tcov, qid, qcov, depth qval, pval`.
Coverages and identities are represented as fractions (`Float64`) in [0.0, 1.0]
"""
function parse_res(io::IO, path::String)
counted_lines = enumerate(eachline(io))
header = let
it = iterate(counted_lines)
header = if it === nothing
error("Missing header from file \"$path\"")
else
last(first(it))
end
if strip(header) != RES_HEADER
error("Malformed header in file \"$path\"")
end
header
end
fields = fill(SubString("", 1:0), 11)
counted_lines |>
ifilter(i -> !isempty(strip(last(i)))) |>
imap() do (line_number, line)
strip_split!(fields, line, UInt8('\t'))
return (;
template = String(fields[1]),
score = parse(UInt, fields[2], base=10),
expected = parse(UInt, fields[3], base=10),
tlen = parse(UInt, fields[4], base=10),
tid = round(parse(Float64, fields[5]) / 100, digits=6),
tcov = round(parse(Float64, fields[6]) / 100, digits=6),
qid = round(parse(Float64, fields[7]) / 100, digits=6),
qcov = round(parse(Float64, fields[8]) / 100, digits=6),
depth = parse(Float64, fields[9]),
qval = parse(Float64, fields[10]),
pval = parse(Float64, fields[11]),
)
end |>
collect
end
function strip_split!(v::Vector{SubString{String}}, s::Union{String, SubString{String}}, sep::UInt8)
n = 0
start = 1
@inbounds for i in 1:ncodeunits(s)
if codeunit(s, i) == sep
n += 1
n >= length(v) && throw(BoundsError(v, n+1))
substr = SubString(s, start, i-1)
v[n] = strip(substr)
start = i + 1
end
end
n + 1 != length(v) && error("Incorrect number of fields for strip_split!")
@inbounds v[n+1] = strip(SubString(s, start, ncodeunits(s)))
v
end
"""
parse_map(io::IO, path::String) -> Vector{Tuple{String, Vector{Row}}}
Parse .mat file, returning a vector of sequence matrices. A sequence matrix
is a `Tuple{String, Vector{Row}}` with the sequence name as the first part. The `Row`
is `Tuple{DNA, NTuple{6, UInt32}}`, with the 6 depths being the number of
A, C, G, T, N and gap, respectively.
"""
function parse_mat(io::IO, path::String)
rowT = Tuple{DNA, NTuple{6, UInt32}}
result = Vector{Tuple{String, Vector{rowT}}}()
current = Vector{rowT}()
fields = Vector{SubString{String}}(undef, 7)
linedepths = Vector{UInt32}(undef, 6)
header = nothing
for line in eachline(io) |> imap(strip) |> ifilter(!isempty)
if startswith(line, '#')
if !isempty(current)
@assert header isa String
push!(result, (header, copy(current)))
empty!(current)
end
header = String(line[2:end])
continue
end
isnothing(header) && error("Expected header in file \"$path\"")
strip_split!(fields, line, UInt8('\t'))
if ncodeunits(first(fields)) != 1
error("Multi-character reference nucleotide in file \"$path\"")
end
refnuc = DNA(first(first(line)))
depth_tuple = ntuple(i -> parse(UInt32, @inbounds(fields[i+1]), base=10), Val(6))
@inbounds for i in 1:6
linedepths[i] = parse(UInt32, fields[i+1], base=10)
end
push!(current, (refnuc, depth_tuple))
end
isempty(current) || push!(result, (header, current))
return result
end
export parse_spa, parse_res, parse_mat
end # module
|
{"hexsha": "87550d9a620c3846f5a3641f3ad26d60a81df6d4", "size": 6619, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/KMATools.jl", "max_stars_repo_name": "jakobnissen/KMATools.jl", "max_stars_repo_head_hexsha": "ec29bb5282c80dcab46b2d8fa399b788e33ed4df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/KMATools.jl", "max_issues_repo_name": "jakobnissen/KMATools.jl", "max_issues_repo_head_hexsha": "ec29bb5282c80dcab46b2d8fa399b788e33ed4df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/KMATools.jl", "max_forks_repo_name": "jakobnissen/KMATools.jl", "max_forks_repo_head_hexsha": "ec29bb5282c80dcab46b2d8fa399b788e33ed4df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9303482587, "max_line_length": 100, "alphanum_fraction": 0.5463060885, "num_tokens": 1746}
|
# # from models.MyGANet4 import GANet
# #
# # model = GANet()
# # for name, module in model.named_children():
# # print(name)
#
# import torch
# import torch.nn as nn
#
# a = torch.randn(2, 3, 2, 2) # 右图
# b = torch.ones(2, 1, 2, 2) # disp
# print(a)
#
# def warp(x, disp):
# """
# warp an image/tensor (im2) back to im1, according to the optical flow
# x: [B, C, H, W] (im2)
# flo: [B, 2, H, W] flow
# """
# B, C, H, W = x.size()
# # mesh grid
# xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
# yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
# xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
# yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
# vgrid = torch.cat((xx, yy), 1).float()
#
# # vgrid = Variable(grid)
# vgrid[:, :1, :, :] = vgrid[:, :1, :, :] + disp
#
# # scale grid to [-1,1]
# vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
# vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
#
# vgrid = vgrid.permute(0, 2, 3, 1)
# output = nn.functional.grid_sample(x, vgrid,align_corners=True)
# return output
#
# o = warp(a,b)
#
# print(o)
from models.CasGANet10 import GANet
from models.MyGANet9 import GANet
from models.GANet11 import GANet
import numpy as np
import datetime
import torch
model = GANet()
print('parameters:{}'.format(np.sum([p.numel() for p in model.parameters()]).item()))
model = torch.nn.DataParallel(model).cuda()
model.eval()
input1 = torch.randn(1, 3, 384, 768).cuda()
input2 = torch.randn(1, 3, 384, 768).cuda()
t = 0.
for i in range(10):
with torch.no_grad():
start = datetime.datetime.now()
out1 = model(input1, input2)
end = datetime.datetime.now()
t += (end - start).total_seconds()
print(t/10)
|
{"hexsha": "0358d05b3ae93118c34878e2a379824528aaabea", "size": 1803, "ext": "py", "lang": "Python", "max_stars_repo_path": "view.py", "max_stars_repo_name": "hx-Tang/GANet", "max_stars_repo_head_hexsha": "8935c9d3d82189fa6f940c2a877534a398a041e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "view.py", "max_issues_repo_name": "hx-Tang/GANet", "max_issues_repo_head_hexsha": "8935c9d3d82189fa6f940c2a877534a398a041e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "view.py", "max_forks_repo_name": "hx-Tang/GANet", "max_forks_repo_head_hexsha": "8935c9d3d82189fa6f940c2a877534a398a041e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5147058824, "max_line_length": 85, "alphanum_fraction": 0.5529672768, "include": true, "reason": "import numpy", "num_tokens": 680}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 14:51:28 2018
@author: yujika
"""
import pickle
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#%matplotlib qt
import util
def corners_unwarp(img, nx, ny, mtx, dist):
img_und = cv2.undistort(img, mtx, dist)
gray = cv2.cvtColor( img_und, cv2.COLOR_BGR2GRAY )
ret, corners = cv2.findChessboardCorners(gray, (nx, ny) )
if ( ret ):
img_corner = cv2.drawChessboardCorners(img_und, (9,6), corners,ret)
src = np.float32([[corners[0,0,:]],[corners[1,0,:]],[corners[nx,0,:]],[corners[nx+1,0,:]]])
h,w,c = img.shape
dst = np.float32([[0.5*w/nx,0.5*h/ny],[1.5*w/nx,0.5*h/ny],[0.5*w/nx,1.5*h/ny],[1.5*w/nx,1.5*h/ny]])
M = cv2.getPerspectiveTransform(src,dst)
warped = cv2.warpPerspective(img_corner,M,(w,h))
return warped,M
return img_und, M
def corners_undist_unwarp(img, corners, nx, ny ):
src = np.float32([[corners[0,0,:]],[corners[1,0,:]],[corners[nx,0,:]],[corners[nx+1,0,:]]])
h,w,c = img.shape
dst = np.float32([[0.5*w/nx,0.5*h/ny],[1.5*w/nx,0.5*h/ny],[0.5*w/nx,1.5*h/ny],[1.5*w/nx,1.5*h/ny]])
M = cv2.getPerspectiveTransform(src,dst)
warped = cv2.warpPerspective(img,M,(w,h))
return warped
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
#cv2.imshow('img',img)
#cv2.waitKey(500)
plt.imshow(img)
plt.show()
#cv2.destroyAllWindows()
#Calibrate camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
nx=9
ny=6
img = cv2.imread('../camera_cal/calibration1.jpg')
img_und = cv2.undistort(img, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(img_und)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig('../output_images/undistort_output.png')
plt.show()
test_file_name = glob.glob('../test_images/straight_lines*.jpg')
for image_file in test_file_name:
base_fn = os.path.basename(image_file).split('.')[0]
image_org = mpimg.imread(image_file)
image_undistort = cv2.undistort(image_org, mtx, dist, None, mtx)
plt.imshow(image_undistort)
plt.show()
plt.imsave('../output_images/' + 'undistort_'+base_fn + '.png', image_undistort )
image = image_undistort
hls_binary = util.hls_select(image, thresh=(90, 255))
ksize = 9 # Choose a larger odd number to smooth gradient measurements
gradx = util.abs_sobel_thresh(hls_binary, orient='x', sobel_kernel=ksize, thresh=(40, 100))
grady = util.abs_sobel_thresh(hls_binary, orient='y', sobel_kernel=ksize, thresh=(40, 100))
mag_binary = util.mag_thresh(hls_binary, sobel_kernel=ksize, mag_thresh=(30, 100))
dir_binary = util.dir_threshold(hls_binary, sobel_kernel=ksize, thresh=(0.7, 1.3))
combined = np.zeros_like(binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
src_points = np.float32([[571,467],
[717,467],
[1105,720],
[205,720]])
dst_points = np.float32([[1280/4, 0 ],
[1280/4*3, 0 ],
[1280/4*3, 720],
[1280/4, 720]
])
bird_view = util.warper(combined, src_points, dst_points)
plt.title('COMBINED bird view ' + image_file)
plt.imshow(bird_view,cmap='gray')
plt.show()
plt.imsave('../output_images/' + 'binary_combo_warped_' + base_fn + '.png', bird_view*255, cmap='gray' )
bird_view_rgb = util.warper(image_undistort, src_points, dst_points)
bird_view_rgb = cv2.polylines(bird_view_rgb,np.array([dst_points],dtype=np.int32),True, ( 255, 0, 0) ,thickness=10)
image_roi = cv2.polylines(image_undistort, np.array([src_points],dtype=np.int32),True, ( 255, 0, 0 ), thickness=10)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
f.tight_layout()
ax1.imshow(image_roi)
ax1.set_title('Original Image', fontsize=24)
ax2.imshow(bird_view_rgb)
ax2.set_title('bird view Image', fontsize=24)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig('../output_images/' + 'warped_' + base_fn + '.jpg')
plt.show()
save_pickle = {
'mtx' : mtx,
'dist': dist,
'src_points' : src_points,
'dst_points' : dst_points
}
with open(util.camera_mtx_file_name,'wb' ) as f:
pickle.dump(save_pickle, f, pickle.HIGHEST_PROTOCOL)
|
{"hexsha": "fee2d7dc35559927031b95c10c362674814f0f0b", "size": 5568, "ext": "py", "lang": "Python", "max_stars_repo_path": "my_work/camera_calibration.py", "max_stars_repo_name": "yujika/CarND-Advanced-Lane-Lines", "max_stars_repo_head_hexsha": "d8f671c60930353f7cd3e7b1c12f7d81fe50ab6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "my_work/camera_calibration.py", "max_issues_repo_name": "yujika/CarND-Advanced-Lane-Lines", "max_issues_repo_head_hexsha": "d8f671c60930353f7cd3e7b1c12f7d81fe50ab6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_work/camera_calibration.py", "max_forks_repo_name": "yujika/CarND-Advanced-Lane-Lines", "max_forks_repo_head_hexsha": "d8f671c60930353f7cd3e7b1c12f7d81fe50ab6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1558441558, "max_line_length": 119, "alphanum_fraction": 0.6294899425, "include": true, "reason": "import numpy", "num_tokens": 1726}
|
""" This module determines Auger, radiative, surface, and trap-assited recombination.
Used primarily by find_current function in single_cell_power.
Uses spectral.py to get radiative recombination and carriers.py for carrier concentration.
Created 12/18/2019 by Nicholas Irvin"""
import numpy as np
import math
from scipy.integrate import trapz
import spectral, carrier_models
# Constants
q = 1.60217646e-19 # Charge of electron (C)
k = 1.380658e-23 # J/K Boltzmann's constant
c = 2.99792458e8 # m/s The speed of light
h = 6.6260755e-34 # J*s Planck's constant (not h-bar)
g = 2*np.pi/(c**2)/(h**3)
inf = float('inf') # define infinity
def Auger(volt, photocollection, stack, P_PR):
""" Calculate radiative + Auger-Meitner recombination currents for Richter parametrization
at room temperature. Voltage in volts, thickness in cm, cell temperature in kelvin.
Outputs recombination in cm^3 s^-1 at 300 K."""
if (298 < stack.T_cell < 300.1) and stack.composition[stack.layer_num] == 'Si':
return Auger_Richter(volt, photocollection, stack, P_PR)
if stack.composition[stack.layer_num] == 'Si':
if 273.15 <= stack.T_cell <= 300:
Ca = 1.66e-30 # (cm^6/s) Amipolar Auger coefficient according to Sinton (Recombination ir Highly Injected Silicon), but
# value overestimates lifetimes for lowly doped silicon according to Richter
else:
Ca = 1.1e-28/(stack.T_cell-193) + 2.1e-33*stack.T_cell # (cm^6/s) Amipolar Auger coefficient
# To calculate Auger recombination as function of temeprature from Sisi Wang 2012 parametrization. This parametrization was only done for 243 to 473 K with injection level of 5*10^16 cm^-3"""
if stack.composition[stack.layer_num] == 'GaNP':
Ca = 1e-30 # is value for GaP http://www.ioffe.ru/SVA/NSM/Semicond/GaP/electric.html
if stack.composition[stack.layer_num] == 'CdTe':
Ca = 9e-32 # (cm^6/s) https://www-osapublishing-org.ezproxy1.lib.asu.edu/DirectPDFAccess/1E8B0161-98FF-B1DC-5B7839CB2DAD90A3_309963/oe-23-2-1256.pdf?da=1&id=309963&seq=0&mobile=no Time resolved photo-luminescent decay characterization of mercury cadmium telluride focal plane arrays or 6e-32 https://www.osti.gov/pages/servlets/purl/1419412
if stack.composition[stack.layer_num] == 'CIS':
Ca = 6e-30 # (cm^6/s) https://onlinelibrary-wiley-com.ezproxy1.lib.asu.edu/doi/epdf/10.1002/pssc.200778414 Is Auger recombination responsible for the efficiency rollover
if stack.composition[stack.layer_num] == 'CIGS':
Ca = 1.2e-30 # (cm^6/s) https://onlinelibrary-wiley-com.ezproxy1.lib.asu.edu/doi/epdf/10.1002/pssc.200778414 Is Auger recombination responsible for the efficiency rollover
if stack.composition[stack.layer_num] == 'perovskite triple cation':
Ca = 1e-28 # (cm^6/s) from Supplementary Material of "Benefit from Photon Recycling at the Maximum Power Point of State-of-the-Art Perovskite Solar Cells"
if stack.composition[stack.layer_num] == 'perovskite MaPI':
Ca = 5.4e-28 # (cm^6/s) from "Hybrid Perovskite Films Approaching the Radiative Limit With Over 90% Photoluminescence Quantum Efficiency."
if stack.composition[stack.layer_num] == 'GaAs':
Ca = 7e-30 # (cm^6/s) Amipolar Auger coefficient from Strauss's 1993 “Auger recombination in intrinsic GaAs”
W = stack.thickness
carriers = carrier_models.Carriers(volt, stack)
if volt >.01:
if stack.dn_z[0] != 1:
dn_z = stack.dn_z
Z = stack.Z
n = carriers.n - carriers.dn + dn_z
p = carriers.p - carriers.dn + dn_z
J_Auger = q*Ca*trapz((n**2*p - carriers.n0**2*carriers.p0
+ n*p**2 - carriers.n0*carriers.p0**2), Z)/2*1e4 # 1e4 for units of A/m^2. with undoped: n^2p ~ ni^3*e^(3qV/2kT)
tau_Auger = trapz(dn_z, Z)/(J_Auger*1e-4/q) # 1e4 for units of A/m^2. with undoped: n^2p ~ ni^3*e^(3qV/2kT)
else:
J_Auger = q*Ca*W*(carriers.n**2*carriers.p - carriers.n0**2*carriers.p0
+ carriers.n*carriers.p**2 - carriers.n0*carriers.p0**2)/2*1e4 # 1e4 for units of A/m^2. with undoped: J_Auger = q*Ca*W*carriers.ni**3*np.exp(3*q*volt/(2*k*stack.T_cell))*1e4
tau_Auger = q*W*carriers.dn/(J_Auger*1e-4)
else:
J_Auger = 0
tau_Auger = inf
return J_Auger, tau_Auger # (A/m^2)
# special Auger function only for Si
def Auger_Richter(volt, photocollection, stack, P_PR):
""" Calculate radiative + Auger recombination currents for Richter parametrization
at room temperature. Voltage in volts, thickness in cm, cell temperature in kelvin.
Outputs recombination in cm^3 s^-1 at 300 K.
From 'Improved quantitative description of Auger recombination in crystalline silicon.'"""
carriers = carrier_models.Carriers(volt, stack)
ni_eff = carrier_models.find_ni_eff(volt, carriers.Nd, 0, stack.T_cell, stack)
N0_eeh = 3.3e17
N0_ehh = 7e17
g_eeh = 1 + 13*(1-np.tanh((carriers.n0/N0_eeh)**0.66 ))
g_ehh = 1 + 7.5*(1-np.tanh((carriers.p0/N0_ehh)**0.63))
# # If trying to match doing the Richter's parametrization (instead of calculating radiative recombination), uncomment this block
# B_low = 4.73e-15 # (cm^3/s) Low carrier injection and doping value for radiative
# U = (U_Auger + (1-P_PR)*(carriers.n*carriers.p - ni_eff**2)*B_low*B_rel(carriers.n, carriers.p, stack.T_cell)
# + carriers.dn/stack.trap_lifetime)
## and also zero out radiative recombination in the line "J_radiative = q*flux.flux # A/m^2"
if volt >.01:
if stack.dn_z[0] != 1:
dn_z = stack.dn_z
Z = stack.Z
n = carriers.n - carriers.dn + dn_z
p = carriers.p - carriers.dn + dn_z
U_Auger = (n*p - ni_eff**2)*(2.5e-31*g_eeh*carriers.n0
+ 8.5e-32*g_ehh*carriers.p0 + 3e-29*dn_z**0.92)
J_Auger = q*trapz(U_Auger, Z)*1e4 # (A/m^2)
tau_Auger = trapz(dn_z, Z)/(J_Auger*1e-4/q) # 1e4 for units of A/m^2. with undoped: n^2p ~ ni^3*e^(3qV/2kT)
else:
U_Auger = (carriers.n*carriers.p - ni_eff**2)*(2.5e-31*g_eeh*carriers.n0
+ 8.5e-32*g_ehh*carriers.p0 + 3e-29*carriers.dn**0.92)
J_Auger = q*stack.thickness*(U_Auger*1e4) # (A/m^2)
tau_Auger = carriers.dn/(J_Auger*1e-4/(q*stack.thickness)) # 1e4 for units of A/m^2. with undoped: n^2p ~ ni^3*e^(3qV/2kT)
else:
J_Auger = 0
tau_Auger = inf
return J_Auger, tau_Auger
def trap_assisted_recombination(volt, stack):
if volt >.01:
carriers = carrier_models.Carriers(volt, stack)
if stack.dn_z[0] != 1:
dn_z = stack.dn_z
Z = stack.Z
n = carriers.n - carriers.dn + dn_z
p = carriers.p - carriers.dn + dn_z
J_trap = (q*
trapz((p*n - carriers.ni_eff**2)/(stack.trap_lifetime*(n + carriers.ni_eff + p + carriers.ni_eff)),Z)*1e4) # (A/m^2) trap-assisted recombination current
else:
n = carriers.n
p = carriers.p
J_trap = (q*stack.thickness*
(p*n - carriers.ni_eff**2)/(stack.trap_lifetime*(n + carriers.ni_eff + p + carriers.ni_eff))*1e4) # (A/m^2) trap-assisted recombination current
else:
J_trap = 0
return J_trap
class Recombination:
def __init__(self, volt, E1, E2, photocollection, stack):
""" Compile different types of recombination."""
W = stack.thickness
stack.dn_z = np.ones(101)
if stack.nonradiative_recombination_modeling == 'Yes' and volt!=0:
carriers = carrier_models.Carriers(volt, stack)
photon_recycling = spectral.photon_recycling(carriers, volt, photocollection, stack)
alpha, alpha_total, PR, P_PR_E, E, internal_emission = photon_recycling.alpha, photon_recycling.alpha_total, stack.P_PR, stack.P_PR_E, photon_recycling.energies, photon_recycling.internal_emission # average distance traveled of recycled photons
def radiative(self, stack): # store net radiative recombinations emitted out front and rear
flux = spectral.Flux(E1, E2, stack.T_cell, volt, photocollection, stack)
J_radiative = q*flux.flux # A/m^2
self.J_radiative = J_radiative # A/m^2
J_rad_front = q*flux.front_flux # A/m^2
self.J_rad_front = J_rad_front # A/m^2
self.J_rad_back = q*flux.back_flux # A/m^2
if stack.nonradiative_recombination_modeling == 'Yes' and volt!=0:
PR = stack.photon_recycling
self.J_FCA = q*W*trapz(PR.internal_emission*PR.P_FCA_E, PR.energies)
# need to interpolate arrays fromstack. eVs to E
# self.J_FCA = q*W*trapz((photocollection.EQE/photocollection.absorptance)*PR.internal_emission*PR.P_FCA_E, PR.energies)
self.J_radiative += self.J_FCA
radiative(self, stack)
J_rec = self.J_radiative
def find_rad_lifetime(): # (s)
if volt < .01:
self.radiative_lifetime = inf
return inf
else:
if stack.dn_z[0]!=1 and stack.anything_variable[0] != 'absorptance': # first iteration or if using the absorptance model
dn = trapz(stack.dn_z, stack.Z)/W
else:
dn = carrier_models.Carriers(volt, stack).dn
tau_rad = inf if(dn==0) else(q*W*dn/(self.J_radiative)*1e4)
self.radiative_lifetime = tau_rad
return tau_rad
if stack.nonradiative_recombination_modeling == 'No' or volt==0:
self.carriers = 'N/A'
if stack.nonradiative_recombination_modeling == 'Yes':
self.carriers = carrier_models.Carriers(volt, stack)
self.dn = 0
JAuger = 0
self.J_Auger = 0
self.J_FCA = 0
self.J_trap = 0
self.J_SRV = 0
SRV_lifetime = inf
trap_lifetime = inf
self.Auger_lifetime = inf
self.radiative_lifetime = inf
radiative_lifetime = inf
self.diffusion_length = 1e9
self.rad_diffusion_length = 1e9
self.diffusivity = 1e9
self.P_PR = 0
self.base_resistivity = 0
else:
self.base_resistivity = carrier_models.base_resistivity(carriers, stack)
self.carriers = carriers
self.dn = carriers.dn
self.P_PR = photon_recycling.P_PR
radiative_lifetime = find_rad_lifetime()
# now, photon-recycling diffusivity
if stack.diffusion_limited == 'Yes':
electrical_diffusivity = carrier_models.find_diffusivity(carriers, stack)
def diffusivity_prefactor(z_limit):
# outputs the diffusivity prefactor as a function of the z limit. This is 1/3 in Dumke (1957), but here we limit the limits of the integral from infinity to z limit, which is related to absorber thickness.
# for speed, this was fit to the integral of (z*np.exp(-L)*(L-z)/L), with z from 0 to z_limit, and L from z to infinity
C = 0.357
Q = 1.75
B = 2.735
v = 1.968
def fitted_function(z):
return 1/3*C**(1/v)/(C+Q*z**(-B))**(1/v) # fits well above z_limit = .15
return np.piecewise(z_limit, [z_limit< 0.15, z_limit>= 0.15], [lambda z_limit: 0.5*(z_limit)**2, fitted_function]) # 4.99994*1e-1*(z_limit)**2 fits well below z_limit = 0.35
diffusivity_Dumke_factor = diffusivity_prefactor(alpha_total*W/2)
spontaneous_radiative_lifetime = radiative_lifetime*(1-PR)
spectral_diffusivity_Dumke = P_PR_E/spontaneous_radiative_lifetime*diffusivity_Dumke_factor*1/alpha_total**2 *(alpha/alpha_total) # (cm^2/s)
photon_recycling_diffusivity = trapz(spectral_diffusivity_Dumke*internal_emission, E) / trapz(internal_emission, E)
self.photon_recycling_D = photon_recycling_diffusivity
self.diffusivity = electrical_diffusivity + photon_recycling_diffusivity
""" Effective lifetime"""
if stack.lifetimes != []: # in run.py, option to specific lifetimes, bulk_lifetimes, or trap_lifetimes
J_rec += q*stack.thickness*carriers.dn/stack.lifetime*1e4 # 1e4 converts 1/cm^2 to 1/m^2
self.J_Auger = 0
self.J_trap = J_rec - self.J_radiative
self.J_SRV = 0
trap_lifetime = 1/(1/stack.lifetime - 1/radiative_lifetime)
self.Auger_lifetime = inf
SRV_lifetime = inf
else:
""" SRV """
if stack.SRVs != [] and stack.SRVs != [0]:
# From Dimensionless solution of the equation describing the effect of surface recombination on carrier decay in semiconductors:
SRV_lifetime = W/(stack.SRV) + (2*W/np.pi)**2/self.diffusivity
# Assumes S2 = 0, ie one relatvely perfect contact
J_SRV = q*stack.thickness*carriers.dn/SRV_lifetime*1e4 # Surface
self.J_SRV = J_SRV # A/m^2
J_rec += J_SRV
else:
SRV_lifetime = inf
self.J_SRV = 0
""" Bulk """
if stack.bulk_lifetimes != []:
bulk_lifetime = stack.bulk_lifetimes[stack.layer_num]
J_bulk = q*stack.thickness*carriers.dn/stack.bulk_lifetime*1e4
J_rec += J_bulk
self.J_Auger = 0
self.J_trap = 0
trap_lifetime = 1/(1/bulk_lifetime - 1/radiative_lifetime - 1/SRV_lifetime)
self.Auger_lifetime = inf
SRV_lifetime = inf
""" Auger """
else:
auger = Auger(volt, photocollection, stack, P_PR=self.P_PR)
JAuger = auger[0]
self.Auger_lifetime = auger[1]
self.J_Auger = JAuger # A/m^2
J_rec += JAuger
""" trap """
trap_lifetimes= stack.trap_lifetimes
if trap_lifetimes == []:
trap_lifetime = inf
self.J_trap = 0
else:
trap_lifetime = stack.trap_lifetime # used as fixed parameter
# from https://ieeexplore-ieee-org.ezproxy1.lib.asu.edu/stamp/stamp.jsp?tp=&arnumber=97400&tag=1
J_trap = (q*stack.thickness*
(carriers.p*carriers.n - carriers.ni_eff**2)/(trap_lifetime*(carriers.n + carriers.ni_eff + carriers.p + carriers.ni_eff))*1e4) # (A/m^2) trap-assisted recombination current
J_rec += J_trap
self.J_trap = J_trap # A/m^2
def find_effective_lifetime(self, stack):
if (stack.nonradiative_recombination_modeling=='No') or [trap_lifetime, self.radiative_lifetime, self.Auger_lifetime] == [inf, inf, inf]:
self.bulk_lifetime = 1e9
self.lifetime = 1e9 # effective lifetime
else:
if volt == 0: # at V=0, the bulk lifetime becomes calculable, so take an arbitrary value
self.bulk_lifetime = 1e9
else:
self.bulk_lifetime = 1/(1/self.radiative_lifetime + 1/self.Auger_lifetime + 1/trap_lifetime) # 1e-4 converts 1/cm^2 to 1/m^2
self.lifetime = 1/(1/trap_lifetime + 1/self.radiative_lifetime + 1/self.Auger_lifetime + 1/SRV_lifetime)
if(stack.nonradiative_recombination_modeling == 'Yes') and (stack.diffusion_limited=='Yes') and self.diffusivity<1e8:
self.diffusion_length = np.sqrt(self.bulk_lifetime*self.diffusivity) # (cm))
self.electrical_diffusion_length = np.sqrt(self.bulk_lifetime*electrical_diffusivity) # (cm))
self.ideal_electrical_diffusion_length = np.sqrt(radiative_lifetime*self.diffusivity) # (cm)) is the radiative lifetime negative still?
self.photon_recycling_L = np.sqrt(photon_recycling_diffusivity*self.bulk_lifetime)
if stack.diffusion_limited == 'Yes':
stack.put_voltage_dependent_Jgen('On') # if remotely diffusion limit, then recalcualte parameters with voltage
elif stack.dopant_density>1e15: # if can calculate free-carrier absorption FCA, then recalcualte parameters with voltage
stack.put_voltage_dependent_Jgen('On') # just stack.voltage_dependent_Jgen = 'On'
else:
stack.put_voltage_dependent_Jgen('Off')
else: # ignore difussion
self.diffusion_length = 1e9 # (cm))
self.electrical_diffusion_length = 1e9 # (cm))
self.rad_diffusion_length = 1e9 # (cm))
self.diffusivity = 1e9
self.ideal_electrical_diffusion_length = 1e9
self.photon_recycling_D = 1e9
self.photon_recycling_L = 1e9
find_effective_lifetime(self, stack)
# loop between diffusion length and radiative lifetime to satisfy interdependance
if stack.diffusion_limited == 'Yes':
for i in range(100):
old_radiative_lifetime = radiative_lifetime
photocollection = spectral.Photocollection(E1, stack, volt=volt, diffusion_length=self.diffusion_length, diffusivity=self.diffusivity, absorptance=photocollection.absorptance, rear_emittance=photocollection.rear_emittance, rec=self)
radiative(self, stack) # need to re add in J_rad
radiative_lifetime = find_rad_lifetime()
# redo Auger
auger = Auger(volt, photocollection, stack, P_PR=self.P_PR)
self.Auger_lifetime = auger[1]
self.J_Auger = auger[0] # A/m^2
# redo trap-assited recombination (SRH)
self.J_trap = trap_assisted_recombination(volt, stack)
find_effective_lifetime(self, stack) # recalculate L
# On recalculation, Si and GaAs typically converges within one loop, CdTe takes 15 loops sometimes.
if math.isclose(old_radiative_lifetime, radiative_lifetime, rel_tol=1e-4):
break
# J_rec = self.J_radiative + J_nonrad
J_rec = self.J_radiative + self.J_Auger + self.J_trap + self.J_SRV
if stack.nonradiative_recombination_modeling == 'No':
J_rec = self.J_radiative/stack.fc_rec_ratio # simple nonradiative recombination factor fc_rec_ratio is the ratio of net radiative recombination to net recombination
self.J_recombination = J_rec
self.ERE = self.J_rad_front/J_rec # External Radiative Efficiency
self.trap_lifetime = trap_lifetime
self.SRV_lifetime = SRV_lifetime
self.radiative_lifetime = radiative_lifetime
stack.rec = self
stack.photocollection = photocollection
|
{"hexsha": "5412d79f5e1997a4d7b90194978fcf8bc371e2c6", "size": 20311, "ext": "py", "lang": "Python", "max_stars_repo_path": "recombination.py", "max_stars_repo_name": "npirvin/Radiative-Transport-PV", "max_stars_repo_head_hexsha": "d5236bfae5789fd2be5bb0190e962a83c24b3ae7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "recombination.py", "max_issues_repo_name": "npirvin/Radiative-Transport-PV", "max_issues_repo_head_hexsha": "d5236bfae5789fd2be5bb0190e962a83c24b3ae7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recombination.py", "max_forks_repo_name": "npirvin/Radiative-Transport-PV", "max_forks_repo_head_hexsha": "d5236bfae5789fd2be5bb0190e962a83c24b3ae7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.893557423, "max_line_length": 354, "alphanum_fraction": 0.5882034366, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5433}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Description: Using a two-layer network to predict the ozone layer thickness
from data above Palmerston North in New Zealand between 1996 and 2004.
"""
from pylab import *
import numpy as np #numerical package for scientific computing
import mlpcn
#ozone layer thickness above Palmerston North in New Zealand between 1996 and 2004
pnoz = loadtxt('data/PNoz.data')
#create [day,ozone] array
#inputs = np.concatenate((np.transpose(np.ones((1,np.shape(pnoz)[0]))*np.arange(np.shape(pnoz)[0])),np.transpose(np.ones((1,np.shape(pnoz)[0]))*pnoz[:,2])),axis=1)
#normalise data
pnoz[:,2] = pnoz[:,2]- pnoz[:,2].mean()
pnoz[:,2] = pnoz[:,2]/pnoz[:,2].max()
#assemble input vectors: x(a+t) = f(x(a),x(a-t),x(a-2t),...,x(a-kt))
t = 1 #stepsize
k = 4 #k points in the past used to predict the future point
lastPoint = np.shape(pnoz)[0]-t*(k+1)
inputs = np.zeros((lastPoint,k))
targets = np.zeros((lastPoint,1))
for i in range(lastPoint):
inputs[i,:] = pnoz[i:i+t*k:t,2]
targets[i] = pnoz[i+t*(k+1),2]
train = inputs[:-400:2,:]
traintarget = targets[:-400:2]
valid = inputs[1:-400:2,:]
validtarget = targets[1:-400:2]
test = inputs[-400:,:]
testtarget = targets[-400:]
"""
# randomly order the data
change = np.arange(np.shape(inputs)[0])
np.random.shuffle(change)
inputs = inputs[change,:]
targets = targets[change,:]
"""
#plot ozone versus days
xlabel('Days')
ylabel('normalized ozone')
plot(np.arange(0,2*np.shape(pnoz[:-400:2,2])[0],2),pnoz[:-400:2,2],'.r',label='train data')
plot(np.arange(1,2*np.shape(pnoz[1:-400:2,2])[0],2),pnoz[1:-400:2,2],'.g',label='valid data')
plot(np.arange(np.shape(pnoz[:-400,2])[0],np.shape(pnoz[:,2])[0],1),pnoz[-400:,2],'.b',label='test data')
legend(loc = 'upper right')
show()
net = mlpcn.mlpcn(train,traintarget,4,0.2,'linear','batch')
trainerror = np.array([])
validerror = np.array([])
print('\nStart Train Error',net.errfunc(net.mlpfwd(train,True)[1],traintarget))
print('Start Valid Error',net.errfunc(net.mlpfwd(valid,True)[1],validtarget))
print('...perceptron training...')
(trainerror,validerror) = net.mlptrain_automatic(valid,validtarget,100)
#for n in range(100):
# trainerror = np.append(trainerror,net.errfunc(net.mlpfwd(train,True)[1],traintarget))
# validerror = np.append(validerror,net.errfunc(net.mlpfwd(valid,True)[1],validtarget))
# net.mlptrain(100)
print('Final Train Error',net.errfunc(net.mlpfwd(train,True)[1],traintarget))
print('Final Valid Error',net.errfunc(net.mlpfwd(valid,True)[1],validtarget))
plot(np.arange(len(trainerror)),trainerror,'-b',label = 'train error')
plot(np.arange(len(validerror)),validerror,'-r',label = 'valid error')
legend(loc = 'upper right')
show()
testout = net.mlpfwd(test,True)[1]
print('Test Error:',net.errfunc(testout,testtarget))
plot(np.arange(np.shape(test)[0]),testout,'.')
plot(np.arange(np.shape(test)[0]),testtarget,'x')
legend(('Predictions','Targets'))
show()
|
{"hexsha": "2d15eec0d4762acbb7b30763403602bb1c11cbde", "size": 2953, "ext": "py", "lang": "Python", "max_stars_repo_path": "ffnn/time_series_problem.py", "max_stars_repo_name": "RaoulMa/NeuralNets", "max_stars_repo_head_hexsha": "f49072ac88686f753f9b5815d6cc5e71d536c3d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-03T11:06:33.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-03T11:06:33.000Z", "max_issues_repo_path": "ffnn/time_series_problem.py", "max_issues_repo_name": "RaoulMa/BasicNeuralNets", "max_issues_repo_head_hexsha": "f49072ac88686f753f9b5815d6cc5e71d536c3d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ffnn/time_series_problem.py", "max_forks_repo_name": "RaoulMa/BasicNeuralNets", "max_forks_repo_head_hexsha": "f49072ac88686f753f9b5815d6cc5e71d536c3d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0842105263, "max_line_length": 163, "alphanum_fraction": 0.6874365052, "include": true, "reason": "import numpy", "num_tokens": 929}
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import math
fig, ax = plt.subplots()
# pos1 = ax.get_position()
# pos2 = [pos1.x0 + 0.1, pos1.y0 + 0.1, pos1.width, pos1.height]
# ax.set_position(pos2)
class Block:
def __init__(self,x1,y1,x2,y2,vex=0, vey=0):
self.fr = np.array([x1,y1])
self.to = np.array([x2,y2])
self.entryVel = np.array([vex,vey])
self.exitVel = np.array([0,0])
self.accVec = np.array([0,0])
self.accPt = np.array([0,0])
self.arcLen = 0
self.arcRadius = 0
lineSegs = []
lineSegs.append(Block(0,0,1,0,1,0))
lineSegs.append(Block(1,0,1,1))
# lineSegs.append(Block(1,0,0.707,0.707))
curveSegs = []
# # Start segment - acceleration in direction of travel
# l0 = lineSegs[0]
# initSeg = Block(l0.fr[0], l0.fr[1], (l0.fr[0]+l0.to[0])/2, (l0.fr[1]+l0.to[1])/2)
# initSeg.accPt = np.array([l0.fr[0], l0.fr[1]])
# initSeg.accVec = np.array([l0.to[0], l0.to[1]])
# curveSegs.append(initSeg)
# Curve segments
for linSIdx in range(len(lineSegs)-1):
l1 = lineSegs[linSIdx]
l2 = lineSegs[linSIdx+1]
crvS = Block((l1.fr[0]+l1.to[0])/2, (l1.fr[1]+l1.to[1])/2,(l2.fr[0]+l2.to[0])/2, (l2.fr[1]+l2.to[1])/2, l1.entryVel[0], l1.entryVel[1])
#crvS = Block(l1.fr[0], l1.fr[1], l2.to[0], l2.to[1])
# Calculate acceleration vector
midPt = np.array([(crvS.fr[0]+crvS.to[0])/2, (crvS.fr[1]+crvS.to[1])/2])
accVec = np.array([crvS.fr[1]-crvS.to[1], crvS.to[0]-crvS.fr[0]])
crvS.accPt = midPt
crvS.accVec = accVec
# Arc length
mL1 = math.atan2(l1.to[1]-l1.fr[1], l1.to[0]-l1.fr[0])
mL2 = math.atan2(l2.to[1]-l2.fr[1], l2.to[0]-l2.fr[0])
lenFromTo = math.sqrt((crvS.to[0] - crvS.fr[0]) ** 2 + (crvS.to[1] - crvS.fr[1]) ** 2)
if abs(mL1) != abs(mL2):
alpha = math.pi - abs(mL2 - mL1)
print(mL1 * 180 / math.pi, mL2 * 180 / math.pi, alpha * 180 / math.pi)
beta = alpha/2
print(lenFromTo)
rad = lenFromTo * math.sin(beta) / math.sin(math.pi-alpha)
print(rad)
arcLen = rad * (math.pi - alpha)
elif mL1 == mL2:
arcLen = lenFromTo
radialAccFactor = 0
rad = 1e20
else:
arcLen = 0
rad = 0
print("arcLen", arcLen, "ardRad", rad)
crvS.arcLen = arcLen
crvS.arcRadius = rad
#
# Add to list of segments
curveSegs.append(crvS)
# # End segment - acceleration in opposite direction of travel
# endIdx = len(lineSegs)-1
# lN = lineSegs[endIdx]
# endSeg = Block(lN.fr[0], lN.fr[1], (lN.fr[0]+lN.to[0])/2, (lN.fr[1]+lN.to[1])/2)
# endSeg.accPt = np.array([lN.fr[0], lN.fr[1]])
# endSeg.accVec = np.array([-lN.to[0], -lN.to[1]])
# curveSegs.append(endSeg)
# Generate path
pathPoints = []
vel = lineSegs[0].entryVel
curPt = lineSegs[0].fr
distInCurve = 0
tInc = 0.01
crvSegIdx = 0
t = 0
while t < 10:
crvS = curveSegs[crvSegIdx]
dX = vel[0] * tInc
dY = vel[1] * tInc
newPt = np.array([curPt[0] + dX, curPt[1] + dY])
distInCurve += math.sqrt(dX**2 + dY**2)
vel = np.array([vel[0] + crvS.accVec[0]*tInc, vel[1] + crvS.accVec[1]*tInc])
#print(newPt)
pathPoints.append(newPt)
curPt = newPt
t+=tInc
# Check if curve finished
# doneX = curPt[0] >= crvS.to[0] if (crvS.to[0] > crvS.fr[0]) else curPt[0] <= crvS.to[0]
# doneY = curPt[1] >= crvS.to[1] if (crvS.to[1] > crvS.fr[1]) else curPt[1] <= crvS.to[1]
# if doneX and doneY:
if distInCurve >= crvS.arcLen:
crvSegIdx += 1
distInCurve = 0
if crvSegIdx >= len(curveSegs):
break
# Plot
fig.set_size_inches(10,10)
for linS in lineSegs:
(line_xs, line_ys) = zip(*[linS.fr, linS.to])
ax.add_line(Line2D(line_xs, line_ys, linewidth=2,color="blue"))
for crvS in curveSegs:
(line_xs, line_ys) = zip(*[crvS.fr, crvS.to])
ax.add_line(Line2D(line_xs, line_ys, linewidth=2,color="green"))
perpLine = np.array([crvS.accPt, [crvS.accPt[0]+crvS.accVec[0], crvS.accPt[1]+crvS.accVec[1]]])
#print(perpLine)
(line_xs, line_ys) = zip(*[perpLine[0], perpLine[1]])
ax.add_line(Line2D(line_xs, line_ys, linewidth=2,color="red"))
(scat_xs, scat_ys) = zip(*pathPoints)
ax.scatter(scat_xs, scat_ys, 100, "orange")
ax.scatter([-0.1],[-0.1],s=0.01)
plt.show()
|
{"hexsha": "5d64e6e179111f826d6af6aac68286c280dd0ec8", "size": 4290, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tests/TestPipelinePlanner/TestCurveFollow.py", "max_stars_repo_name": "TheFactory22/RBotFirmware", "max_stars_repo_head_hexsha": "cb7ea74869189f015578cb31ddc0516c72f1ea00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2017-03-05T00:38:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T17:27:48.000Z", "max_issues_repo_path": "Tests/TestPipelinePlanner/TestCurveFollow.py", "max_issues_repo_name": "TheFactory22/RBotFirmware", "max_issues_repo_head_hexsha": "cb7ea74869189f015578cb31ddc0516c72f1ea00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-01-01T16:57:43.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-04T23:08:48.000Z", "max_forks_repo_path": "Tests/TestPipelinePlanner/TestCurveFollow.py", "max_forks_repo_name": "TheFactory22/RBotFirmware", "max_forks_repo_head_hexsha": "cb7ea74869189f015578cb31ddc0516c72f1ea00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2017-08-07T06:09:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T01:27:21.000Z", "avg_line_length": 30.6428571429, "max_line_length": 139, "alphanum_fraction": 0.5972027972, "include": true, "reason": "import numpy", "num_tokens": 1711}
|
#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/vector_proxy.hpp>
#include <cmath>
#include <cstring>
#include <memory>
#include <vector>
#include <limits>
#include <algorithm>
#include <utility>
#include <iterator>
#include "statespace.hpp"
using namespace boost::numeric::ublas;
class ChangeEvent {
/* struct (supporting comparison) representing
a change-of-active-SSMs event. */
public:
int t;
int i_ssm;
bool is_start;
friend bool operator< (const ChangeEvent &c1, const ChangeEvent &c2);
};
bool operator< (const ChangeEvent &c1, const ChangeEvent &c2) {
if (c1.t < c2.t) {
return true;
} else if (c1.t == c2.t) {
// ends come before starts
return c1.is_start > c2.is_start;
}
return false;
}
int active_set_dimension(const std::vector<StateSpaceModel *> & ssms, vector<int> & active_set) {
vector<int>::const_iterator it;
int dimension = 0;
for (it = active_set.begin(); it < active_set.end(); ++it) {
if (*it < 0) continue;
StateSpaceModel *ssm = ssms[*it];
if (!ssm) continue;
dimension += ssm->max_dimension;
}
return dimension;
}
TransientCombinedSSM::TransientCombinedSSM(\
std::vector<StateSpaceModel *> & ssms, const vector<int> & start_idxs,
const vector<int> & end_idxs, const std::vector<const double * > & scales,
double obs_noise) : ssms(ssms), start_idxs(start_idxs),
end_idxs(end_idxs), scales(scales),
obs_noise(obs_noise), n_ssms(ssms.size()),
active_ssm_cache1_k(-1), active_ssm_cache2_k(-1) {
this->ssms_tmp.resize(this->n_ssms);
this->n_steps = *(max_element(end_idxs.begin(), end_idxs.end()));
/* Compute a list of ChangeEvents, each representing the
* activation or deactivation of a component SSM. The
* list is sorted by the timestep at which the event
* occurs (and secondarily, with deactivation events at
* a timestep before activation events). */
std::vector<ChangeEvent> events(start_idxs.size() + end_idxs.size());
for (unsigned i=0; i < n_ssms; ++i) {
events[2*i].t = start_idxs[i];
events[2*i].i_ssm = i;
events[2*i].is_start = true;
events[2*i+1].t = end_idxs[i];
events[2*i+1].i_ssm = i;
events[2*i+1].is_start = false;
}
std::sort(events.begin(), events.end());
/*
Build a sorted list of changepoints (timesteps at which the set of active
SSMs changes), along with the set of active SSMs at each changepoint.
Also compute the max total dimension of the state space, by computing the
dimension of each active set as we add it to the active_sets matrix.
*/
// allocate the active_sets matrix by first computing the largest number of
// SSMs that might be active at one time
unsigned int n_active = 0;
unsigned int max_active = 0;
for(std::vector<ChangeEvent>::const_iterator idx = events.begin(); idx < events.end(); ++idx) {
if (idx->is_start) n_active++; else n_active--;
if (n_active > max_active) max_active = n_active;
}
active_sets.resize(2*n_ssms, max_active);
vector<int> active_set(max_active);
for (unsigned i=0; i < max_active; ++i) active_set(i) = -1;
int t_prev = events[0].t, max_dimension = 0, i=0;
std::vector<ChangeEvent>::const_iterator idx;
for(i=0, idx = events.begin();
idx < events.end(); ++idx) {
// printf("event t %d i_ssm %d start %s\n", idx->t, idx->i_ssm, idx->is_start ? "true" : "false");
// if this represents a change, add a new changepoint
if (idx->t != t_prev) {
changepoints.push_back(t_prev);
// printf(" change detected (t_prev %d), copying to active_set %d\n", t_prev, i);
// copy the current active set of SSMs
// into the active_sets matrix (terminated with -1)
for (unsigned k=0, j=0; k < max_active; ++k) {
if (active_set(k) >= 0) {
active_sets(i, j++) = active_set(k);
}
if (j < max_active) {
active_sets(i, j) = -1;
}
}
/*printf("matr active set(%d, ...) is", i);
for(int tmpi=0; tmpi < max_active; ++tmpi) {
printf(" %d", active_sets(i, tmpi));
}
printf("\n");*/
t_prev = idx->t;
i++;
int current_dim = active_set_dimension(this->ssms, active_set);
if (current_dim > max_dimension) max_dimension = current_dim;
}
// update the active set: add the new SSM if this
// is a start event, otherwise remove it from
// the active set.
vector<int>::iterator it;
if (idx->is_start) {
it = std::find (active_set.begin(), active_set.end(), -1);
if (it == active_set.end()) {
printf("ERROR: need to start new SSM but no room in active_set vector!\n");
exit(-1);
} else {
*it = idx->i_ssm;
}
} else {
it = std::find (active_set.begin(), active_set.end(), idx->i_ssm);
if (it == active_set.end()) {
printf("ERROR: trying to remove SSM %d but it is not present in the active_set vector!\n", idx->i_ssm);
exit(-1);
} else {
*it = -1;
}
}
}
this->max_dimension = max_dimension;
this->is_cssm = false;
}
TransientCombinedSSM::~TransientCombinedSSM() {
return;
};
int TransientCombinedSSM::active_set_idx(int k) {
if (k == this->active_ssm_cache1_k)
return this->active_ssm_cache1_v;
else if (k == this->active_ssm_cache2_k)
return this->active_ssm_cache2_v;
std::vector<int>::iterator it = std::upper_bound(this->changepoints.begin(), this->changepoints.end(), k);
int i = it - this->changepoints.begin() - 1;
/*printf("active set at time k=%d is %d, changepoints %d %d %d %d\n", k, i,
i-1 >= 0 ? this->changepoints[i-1] : -1,
i >= 0 ? this->changepoints[i] : -1,
i+1 < this->changepoints.size() ? this->changepoints[i+1] : -1,
i+2 < this->changepoints.size() ? this->changepoints[i+2] : -1);*/
this->active_ssm_cache2_k = this->active_ssm_cache1_k;
this->active_ssm_cache2_v = this->active_ssm_cache1_v;
this->active_ssm_cache1_k = k;
this->active_ssm_cache1_v = i;
return i;
}
unsigned int TransientCombinedSSM::state_size_at_timestep(unsigned int k) {
unsigned int total_state_size = 0;
int asidx = this->active_set_idx(k);
if (asidx < 0) {
return 0;
}
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int i_ssm = *it;
StateSpaceModel * ssm = this->ssms[i_ssm];
if (!ssm) continue;
unsigned int state_size = ssm->max_dimension;
total_state_size += state_size;
}
return total_state_size;
}
int TransientCombinedSSM::apply_transition_matrix(const double * x, int k, double * result) {
// first, loop over ssms active at the *previous*
// timestep in order to cache the location of each
// ssm in the previous state space.
int j = 0;
int asidx_prev = this->active_set_idx(k-1);
int asidx = this->active_set_idx(k);
bool same_active_set = (asidx==asidx_prev);
if (k == 0) {
printf("ERROR: requested transition INTO timestep 0 (invalid)!");
exit(-1);
}
if (asidx < 0) {
return 0;
}
if (!same_active_set) {
matrix_row < matrix<int> > old_ssm_indices = row(this->active_sets, asidx_prev);
for (matrix_row < matrix<int> >::const_iterator it = old_ssm_indices.begin();
it < old_ssm_indices.end() && *it >= 0; ++it) {
// skip any null SSMs
int i_ssm = *it;
StateSpaceModel * ssm = this->ssms[i_ssm];
if (!ssm) continue;
this->ssms_tmp[i_ssm] = j;
j += ssm->max_dimension;
}
}
//printf("transition to time %d (asidx %d, %d):\n", k, asidx_prev, asidx);
// now apply the transition to the current time
int i=0;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
// skip any null SSMs
int i_ssm = *it;
StateSpaceModel * ssm = this->ssms[i_ssm];
if (!ssm) continue;
unsigned int state_size = ssm->max_dimension;
if (this->start_idxs[i_ssm] == k) {
/* new ssms just get filled in as zero
(prior means will be added by the
transition_bias operator) */
for (unsigned j=i; j < i+state_size; ++j) {
result[j] = 0;
}
//printf(" new ssm %d active from %d to %d\n", i_ssm, i, i+state_size);
} else {
/* this ssm is persisting from the
* previous timestep, so just run the
* transition */
unsigned int j = same_active_set ? i : this->ssms_tmp[i_ssm];
ssm->apply_transition_matrix(x+j, k-this->start_idxs[i_ssm], result+i);
//printf(" transitioning ssm %d, prev %d, in state %d to %d (sidx %d eidx %d)\n", i_ssm, j, i, i+state_size, this->start_idxs[i_ssm], this->end_idxs[i_ssm]);
}
i += state_size;
}
return i;
}
int TransientCombinedSSM::apply_transition_matrix( const matrix<double> &X,
unsigned int x_row_offset,
int k,
matrix<double> &result,
unsigned int r_row_offset,
unsigned int n) {
int j = x_row_offset;
int asidx_prev = this->active_set_idx(k-1);
int asidx = this->active_set_idx(k);
bool same_active_set = (asidx==asidx_prev);
if (asidx < 0) {
return 0;
}
if (!same_active_set) {
matrix_row < matrix<int> > old_ssm_indices = row(this->active_sets, asidx_prev);
for (matrix_row < matrix<int> >::const_iterator it = old_ssm_indices.begin();
it < old_ssm_indices.end() && *it >= 0; ++it) {
// skip any null SSMs
int i_ssm = *it;
StateSpaceModel * ssm = this->ssms[i_ssm];
if (!ssm) continue;
this->ssms_tmp[i_ssm] = j;
j += ssm->max_dimension;
}
}
int i=x_row_offset;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int i_ssm = *it;
StateSpaceModel * ssm = this->ssms[i_ssm];
if (!ssm) continue;
unsigned int state_size = ssm->max_dimension;
if (this->start_idxs[i_ssm] == k) {
/* new ssms just get filled in as zero
(prior means will be added by the
transition_bias operator) */
for (unsigned j=i; j < i+state_size; ++j) {
for (unsigned jj=0; jj < n; ++jj) result(j, jj) = 0;
}
} else {
unsigned int j = same_active_set ? i : this->ssms_tmp[i_ssm];
//printf("MATR transitioning ssm %d, prev %d, in state %d to %d (sidx %d eidx %d)\n", i_ssm, j, i, i+state_size, this->start_idxs[i_ssm], this->end_idxs[i_ssm]);
ssm->apply_transition_matrix(X, j, k-this->start_idxs[i_ssm], result, i, n);
}
i += state_size;
}
return i-x_row_offset;
}
void TransientCombinedSSM::transition_bias(int k, double *result) {
int asidx = this->active_set_idx(k);
if (asidx < 0) return;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
// skip any null SSMs
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
if (!ssm) continue;
if (this->start_idxs[j] == k) {
ssm->prior_mean(result);
} else {
ssm->transition_bias(k-this->start_idxs[j], result);
}
result += ssm->max_dimension;
}
}
void TransientCombinedSSM::transition_noise_diag(int k, double *result) {
int asidx = this->active_set_idx(k);
if (asidx < 0) return;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
// skip any null SSMs
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
if (!ssm) continue;
if (this->start_idxs[j] == k) {
ssm->prior_vars(result);
} else {
ssm->transition_noise_diag(k-this->start_idxs[j], result);
}
result += ssm->max_dimension;
}
}
double TransientCombinedSSM::apply_observation_matrix(const double *x, int k) {
double r = 0;
int asidx = this->active_set_idx(k);
if (asidx < 0) return 0.0;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
if (!ssm) continue;
const double * scale = this->scales[j];
double ri = ssm->apply_observation_matrix(x, k - this->start_idxs[j]);
if (scale) ri *= scale[k - this->start_idxs[j]];
r += ri;
if (ssm) x += ssm->max_dimension;
}
return r;
}
void TransientCombinedSSM::apply_observation_matrix(const matrix<double> &X,
unsigned int row_offset, int k,
double *result, double *result_tmp, unsigned int n) {
for (unsigned i=0; i < n; ++i) {
result[i] = 0;
result_tmp[i] = 0;
}
int asidx = this->active_set_idx(k);
if (asidx < 0) return;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
if (!ssm) continue;
const double * scale = this->scales[j];
unsigned int state_size = ssm->max_dimension;
ssm->apply_observation_matrix(X, row_offset,
k-this->start_idxs[j],
result_tmp, NULL, n);
// printf("TSSM step %d applying obs matrix on ssm %d state_size %d start_idx %d at row_offset %d n %d scale[%d] %f result[0] %f\n", k, j, state_size, this->start_idxs[j], row_offset, n, k-this->start_idxs[j], scale? scale[k-this->start_idxs[j]] : 1.0, result_tmp[0]);
if (scale) {
for (unsigned ii=0; ii < n; ++ii) {
result[ii] += scale[k-this->start_idxs[j]] * result_tmp[ii];
}
} else {
for (unsigned ii=0; ii < n; ++ii) {
result[ii] += result_tmp[ii];
}
}
row_offset += state_size;
}
}
double TransientCombinedSSM::observation_bias(int k) {
double bias = 0;
int asidx = this->active_set_idx(k);
if (asidx < 0) return 0.0;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
// skip any null SSMs
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
const double * scale = this->scales[j];
int kk = k - this->start_idxs[j];
double b = ssm ? ssm->observation_bias(kk) : 1.0;
if (scale) b *= scale[kk];
bias += b;
}
return bias;
}
double TransientCombinedSSM::observation_noise(int k) {
return this->obs_noise;
}
bool TransientCombinedSSM::stationary(int k) {
matrix_row < matrix<int> > s1 = row(this->active_sets, this->active_set_idx(k));
if (k > 0) {
matrix_row < matrix<int> > s2 = row(this->active_sets, this->active_set_idx(k-1));
for (unsigned i=0; i < s1.size() && !(s1(i) == -1 && s2(i) == -1); ++i) {
if (s1(i) != s2(i)) return false;
}
}
for (matrix_row < matrix<int> >::const_iterator it = s1.begin();
it < s1.end() && *it >= 0; ++it) {
// skip any null SSMs
int j = *it;
if (this->scales[j]) return false;
StateSpaceModel * ssm = this->ssms[j];
if (ssm && !ssm->stationary(k-this->start_idxs[j])) return false;
}
return true;
}
int TransientCombinedSSM::prior_mean(double *result) {
/*
TODO: propagate through the forward model whenever
*/
double * r1 = result;
int asidx = this->active_set_idx(0);
if (asidx < 0) return 0;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
if (!ssm) continue;
int state_size = ssm->max_dimension;
for(int i=0; i < state_size; ++i) {
result[i] = 0;
}
ssm->prior_mean(result);
/* if this component starts at a negative time, push the
* prior through the transition model to get the induced
* distribution at time zero */
if (this->start_idxs[j] < 0) {
double * tmp_result = (double *) malloc(sizeof(double) * ssm->max_dimension);
if (tmp_result==NULL) {
printf("memory error, malloc failed!\n");
exit(-1);
}
for (unsigned k=1; k <= -this->start_idxs[j]; ++k) {
ssm->apply_transition_matrix(result, k, tmp_result);
ssm->transition_bias(k, tmp_result);
memcpy(result, tmp_result, ssm->max_dimension);
}
free(tmp_result);
}
result += state_size;
}
return result-r1;
}
int TransientCombinedSSM::prior_vars(double *result) {
double * r1 = result;
int asidx = this->active_set_idx(0);
if (asidx < 0) return 0;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
if (!ssm) continue;
ssm->prior_vars(result);
/*printf("ssm %d max dimension %d\n", j, ssm->max_dimension);
for (double * q = result; q < result+ssm->max_dimension; ++q) {
printf(" initial var %d = %f\n", q-result, *q);
}*/
if (this->start_idxs[j] < 0) {
matrix<double> P (ssm->max_dimension, ssm->max_dimension);
matrix<double> P2 (ssm->max_dimension, ssm->max_dimension);
P.clear();
for (int i=0; i < ssm->max_dimension; ++i) {
P(i,i) = result[i];
}
for (unsigned k=1; k <= -this->start_idxs[j]; ++k) {
ssm->apply_transition_matrix(P, 0, k, P2, 0, ssm->max_dimension); // P2 = FP
noalias(P) = trans(P2); // P = PF'
ssm->apply_transition_matrix(P, 0, k, P2, 0, ssm->max_dimension); // P2 = FPF'
P = P2;
ssm->transition_noise_diag(k, result);
for (int i=0; i < ssm->max_dimension; ++i) P(i,i) += result[i];
}
for (int i=0; i < ssm->max_dimension; ++i) result[i] = P(i,i);
}
/*for (double * q = result; q < result+ssm->max_dimension; ++q) {
printf(" post-prop var %d = %f\n", q-result, *q);
}*/
result += ssm->max_dimension;
}
/*for (double * q = r1; q < result; ++q) {
printf("prior var %d = %f\n", q-r1, *q);
}*/
return result-r1;
}
void TransientCombinedSSM::init_coef_priors(std::vector<vector<double> > & cmeans,
std::vector<vector<double> > & cvars) {
for (unsigned j=0; j < this->n_ssms; ++j) {
StateSpaceModel * ssm = this->ssms[j];
if (ssm && ssm->is_cssm) {
CompactSupportSSM *cssm = (CompactSupportSSM *) ssm;
vector<double> mean(cssm->coef_means);
vector<double> var(cssm->coef_vars);
cmeans.push_back(mean);
cvars.push_back(var);
} else {
vector<double> mean;
vector<double> var;
cmeans.push_back(mean);
cvars.push_back(var);
}
}
}
void TransientCombinedSSM::extract_all_coefs(FilterState &cache, int k,
std::vector<vector<double> > & cmeans,
std::vector<vector<double> > & cvars) {
/*
Assumes cache has a valid, current P matrix.
*/
int asidx = this->active_set_idx(k);
if (asidx < 0) return;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
unsigned int state_offset = 0;
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
if (!ssm) continue;
if (ssm->is_cssm) {
CompactSupportSSM *cssm = (CompactSupportSSM *) ssm;
cssm->extract_coefs(cache.xk, cache.P,
state_offset, k - this->start_idxs[j],
cmeans[j],
cvars[j]);
}
state_offset += ssm->max_dimension;
}
}
void TransientCombinedSSM::extract_component_means(double *xk, int k,
std::vector<vector<double> > & means) {
int asidx = this->active_set_idx(k);
if (asidx < 0) return;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
unsigned int state_offset = 0;
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
int kk = k - this->start_idxs[j];
means[j](kk) = ssm ? ssm->apply_observation_matrix(xk + state_offset, kk) : 1.0;
means[j](kk) += ssm ? ssm->observation_bias(kk) : 0.0;
if (ssm) state_offset += ssm->max_dimension;
}
}
void TransientCombinedSSM::extract_component_vars(matrix<double> &P, matrix<double> &P_tmp, int k,
std::vector<vector<double> > & vars) {
int asidx = this->active_set_idx(k);
if (asidx < 0) return;
matrix_row < matrix<int> > ssm_indices = row(this->active_sets, asidx);
unsigned int state_offset = 0;
double * v_tmp = (double *) malloc(sizeof(double) * P.size1());
double * v_tmp2 = (double *) malloc(sizeof(double) * P.size1());
if (!v_tmp || !v_tmp2) {
printf("memory allocation error in extract_component_vars!");
exit(-1);
}
for (matrix_row < matrix<int> >::const_iterator it = ssm_indices.begin();
it < ssm_indices.end() && *it >= 0; ++it) {
int j = *it;
StateSpaceModel * ssm = this->ssms[j];
int kk = k - this->start_idxs[j];
if (ssm) {
// just be lazy and copy the submatrix for this component into its own matrix.
subrange(P_tmp, 0, ssm->max_dimension, 0, ssm->max_dimension) = subrange(P, state_offset, state_offset+ssm->max_dimension, state_offset, state_offset+ssm->max_dimension);
ssm->apply_observation_matrix(P_tmp, 0, kk, v_tmp, v_tmp2, ssm->max_dimension);
vars[j](kk) = ssm->apply_observation_matrix(v_tmp, kk);
state_offset += ssm->max_dimension;
//printf("%d: set var at ssm %d kk %d = %f\n", k, j, kk, vars[j](kk));
} else {
vars[j](kk) = 0.0;
}
}
free(v_tmp);
free(v_tmp2);
}
|
{"hexsha": "888fb6da2f3f9d92199e55594465861cb872c42b", "size": 22163, "ext": "cc", "lang": "C++", "max_stars_repo_path": "models/statespace/fast_c/transient_combined.cc", "max_stars_repo_name": "davmre/sigvisa", "max_stars_repo_head_hexsha": "91a1f163b8f3a258dfb78d88a07f2a11da41bd04", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/statespace/fast_c/transient_combined.cc", "max_issues_repo_name": "davmre/sigvisa", "max_issues_repo_head_hexsha": "91a1f163b8f3a258dfb78d88a07f2a11da41bd04", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/statespace/fast_c/transient_combined.cc", "max_forks_repo_name": "davmre/sigvisa", "max_forks_repo_head_hexsha": "91a1f163b8f3a258dfb78d88a07f2a11da41bd04", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4815340909, "max_line_length": 272, "alphanum_fraction": 0.6242837161, "num_tokens": 6735}
|
# -*- coding: utf-8
# This script is the PyLaGriT version of LaGriT tutorial example at
# https://lanl.github.io/LaGriT/pages/tutorial/stratigraphy/index.html.
# Written by Guoyan Jiang (gyjiang@whu.edu.cn) with technical support
# from Dylan Harp (dharp@lanl.gov) and Terry Miller (tamiller@lanl.gov).
# Import PyLaGriT class from pylagrit module
from pylagrit import PyLaGriT
import numpy
# Variables
maxX = 4000 # Max value in x direction
maxY = 4000 # Max value in y direction
maxZ = 3000 # Max value in z direction
numX = 51 # Number of points in x direction
numY = 51 # Number of points in y direction
numZ = 26 # Number of points in z direction
# Create PyLaGriT object
# This assumes that pylagritrc is being used so that lagrit_exe option does not need to be specified
lg = PyLaGriT()
#********************************************
# 01 Built HEX Mesh
#********************************************
# Create mesh object
mo = lg.create_hex()
mo.createpts_brick_xyz((numX, numY, numZ), (0,0,0), (maxX, maxY, maxZ))
# Save the mesh object
mo.dump('tmp_hex_01.inp')
# Set vertices (imt) and cells (itetlcr) to 1
mo.setatt('imt', 1)
mo.setatt('itetclr', 1)
# Set node type from connectivity of mesh
mo.resetpts_itp()
#********************************************
# 02 Use pset’s to identify (for setting boundary conditions,
# initial conditions, etc.) a set of vertices on the top
# surface of the mesh
#********************************************
# Create a pset named p_top, which contains all nodes (stride = 1 0 0)
# where the node’s Z value (zic) is greater than or equal to (ge) the top of the mesh (maxZ)
pset0 = mo.pset_attribute('zic', maxZ, 'ge', (1,0,0), 'p_top')
# Define three cylindrical objects
pset1 = mo.pset_geom((0,0,-1), (1100,360,10000), (1500,1500,0), 'rtz', (1,0,0), 'p_circle1')
pset2 = mo.pset_geom((0,0,-1), (1100,360,10000), (2500,2500,0), 'rtz', (1,0,0), 'p_circle2')
pset3 = mo.pset_geom((0,0,-1), (1100,360,10000), (2500,1500,0), 'rtz', (1,0,0), 'p_circle3')
# Intersect four psets, points belonging to the union of all given sets are preserved into the pset p_region
pset4 = mo.pset_inter([pset0, pset1, pset2, pset3], 'p_region')
# Map psets to an attribute
mo.addatt('id_top_region', vtype='vint', rank='scalar') # Creat a node-based attribute id_top_region within the mesh object
mo.setatt('id_top_region', 1) #Fill the entire attribute with 1
pset1.setatt('id_top_region', 2) #Color all nodes in the pset p_circle1 with the value 2
pset2.setatt('id_top_region', 3)
pset3.setatt('id_top_region', 4)
pset4.setatt('id_top_region', 5)
# Release the psets from memory
pset0.delete()
pset1.delete()
pset2.delete()
pset3.delete()
pset4.delete()
#********************************************
# 03 Build some surfaces to define stratigraphy.
# In a real model, the surfaces would come from some geologic framework model
# and would define geologic or hydro-geologic horizons and topography.
#********************************************
mosurf1 = lg.create_qua() # Create the top surface
p1 = (-20, -20, 1000)
p2 = (4020, -20, 1500)
p3 = (4020, 4020, 2500)
p4 = (-20, 4020, 500)
pts = [p1, p2, p3, p4]
nnodes = (numX, numY, 1)
mosurf1.quadxy(nnodes, pts)
#mosurf1.paraview()
mosurf1.minmax_xyz()
mosurf1.dump('tmp_surf1_quad.inp')
mosurf2 = lg.create_qua() # Create the bottom surface
p1 = (-20, -20, 1800)
p2 = (4020, -20, 2100)
p3 = (4020, 4020, 2800)
p4 = (-20, 4020, 800)
pts = [p1, p2, p3, p4]
nnodes = (numX, numY, 1)
mosurf2.quadxy(nnodes, pts)
#mosurf2.paraview()
mosurf2.minmax_xyz()
mosurf2.dump('tmp_surf2_quad.inp')
#********************************************
# 04 Use the surfaces to define regions and set
# vertex and cell ids
#********************************************
# Define Regions
sf1 = mosurf1.surface('sf1')
sf2 = mosurf2.surface('sf2')
r1 = mo.region('le ' + str(sf1))
r2 = mo.region('gt ' + str(sf1) + ' and ' + 'le ' + str(sf2))
r3 = mo.region('gt ' + str(sf2))
mosurf1.delete()
mosurf2.delete()
# Create Eltsets and PSets from Regions
pset1 = mo.pset_region(r1)
pset2 = mo.pset_region(r2)
pset3 = mo.pset_region(r3)
eltset1 = mo.eltset_region(r1)
eltset2 = mo.eltset_region(r2)
eltset3 = mo.eltset_region(r3)
#Set Attributes from Eltsets and PSets
pset1.setatt('imt', 1)
pset2.setatt('imt', 2)
pset3.setatt('imt', 3)
eltset1.setatt('itetclr', 1)
eltset2.setatt('itetclr', 2)
eltset3.setatt('itetclr', 3)
#********************************************
# 05 Build a fault surface and define stratigraphy
# on each side of the fault
#********************************************
# Create fault surface and surfaces to either side of fault
mosurf1_fminus = lg.create_qua()
p1 = (-20, -20, 1000)
p2 = (4020, -20, 1500)
p3 = (4020, 4020, 2500)
p4 = (-20, 4020, 500)
pts = [p1, p2, p3, p4]
nnodes = (numX, numY, 1)
mosurf1_fminus .quadxy(nnodes, pts)
#mosurf1_fminus .paraview()
mosurf1_fminus .minmax_xyz()
mosurf1_fminus .dump('tmp_s1_fm.inp')
mosurf2_fminus = lg.create_qua()
p1 = (-20, -20, 1800)
p2 = (4020, -20, 2100)
p3 = (4020, 4020, 2800)
p4 = (-20, 4020, 800)
pts = [p1, p2, p3, p4]
nnodes = (numX, numY, 1)
mosurf2_fminus.quadxy(nnodes, pts)
#mosurf2_fminus.paraview()
mosurf2_fminus.minmax_xyz()
mosurf2_fminus.dump('tmp_s2_fm.inp')
mosurf1_fplus = lg.create_qua()
p1 = (-20, -20, 1400)
p2 = (4020, -20, 1900)
p3 = (4020, 4020, 2900)
p4 = (-20, 4020, 900)
pts = [p1, p2, p3, p4]
nnodes = (numX, numY, 1)
mosurf1_fplus.quadxy(nnodes, pts)
#mosurf1_fplus.paraview()
mosurf1_fplus.minmax_xyz()
mosurf1_fplus.dump('mosurf1_fplus.inp')
mosurf2_fplus = lg.create_qua()
p1 = (-20, -20, 2200)
p2 = (4020, -20, 2500)
p3 = (4020, 4020, 3200)
p4 = (-20, 4020, 1200)
pts = [p1, p2, p3, p4]
nnodes = (numX, numY, 1)
mosurf2_fplus.quadxy(nnodes, pts)
#mosurf2_fplus.paraview()
mosurf2_fplus.minmax_xyz()
mosurf2_fplus.dump('mosurf2_fplus.inp')
mosurf_fault = lg.create_qua()
p1 = (-20, -20, -1.e4)
p2 = (4020, -20, -1.e4)
p3 = (4020, 4020, 1.e4)
p4 = (-20, 4020, 1.e4)
pts = [p1, p2, p3, p4]
nnodes = (numX, numY, 1)
mosurf_fault.quadxy(nnodes, pts)
#mosurf_fault.paraview()
mosurf_fault.minmax_xyz()
mosurf_fault.dump('mosurf_fault.inp')
# Define geometry of hydrostratigraphic model
sf1_fm = mosurf1_fminus.surface('sf1_fm')
sf2_fm = mosurf2_fminus.surface('sf2_fm')
sf1_fp = mosurf1_fplus.surface('sf1_fp')
sf2_fp = mosurf2_fplus.surface('sf2_fp')
sf_f = mosurf_fault.surface('sf_f')
r1_fm = mo.region('le ' + str(sf1_fm) + ' and ' + 'le ' + str(sf_f))
r2_fm = mo.region('gt ' + str(sf1_fm) + ' and ' + 'le ' + str(sf2_fm) + ' and ' + 'le ' + str(sf_f))
r3_fm = mo.region('gt ' + str(sf2_fm) + ' and ' + 'le ' + str(sf_f))
r1_fp = mo.region('le ' + str(sf1_fp) + ' and ' + 'gt ' + str(sf_f))
r2_fp = mo.region('gt ' + str(sf1_fp) + ' and ' + 'le ' + str(sf2_fp) + ' and ' + 'gt ' + str(sf_f))
r3_fp = mo.region('gt ' + str(sf2_fp) + ' and ' + 'gt ' + str(sf_f))
mosurf1_fminus.delete()
mosurf2_fminus.delete()
mosurf1_fplus.delete()
mosurf2_fplus.delete()
mosurf_fault.delete()
# Set fault node and element materials
pset1_fm = mo.pset_region(r1_fm)
pset2_fm = mo.pset_region(r2_fm)
pset3_fm = mo.pset_region(r3_fm)
pset1_fp = mo.pset_region(r1_fp)
pset2_fp = mo.pset_region(r2_fp)
pset3_fp = mo.pset_region(r3_fp)
eltset1_fm = mo.eltset_region(r1_fm)
eltset2_fm = mo.eltset_region(r2_fm)
eltset3_fm = mo.eltset_region(r3_fm)
eltset1_fp = mo.eltset_region(r1_fp)
eltset2_fp = mo.eltset_region(r2_fp)
eltset3_fp = mo.eltset_region(r3_fp)
#Set Attributes from Eltsets and PSets
mo.setatt('imt', 7)
mo.setatt('itetclr', 7)
pset1_fm.setatt('imt', 1)
pset2_fm.setatt('imt', 2)
pset3_fm.setatt('imt', 3)
pset1_fp.setatt('imt', 4)
pset2_fp.setatt('imt', 5)
pset3_fp.setatt('imt', 6)
eltset1_fm.setatt('itetclr', 1)
eltset2_fm.setatt('itetclr', 2)
eltset3_fm.setatt('itetclr', 3)
eltset1_fp.setatt('itetclr', 4)
eltset2_fp.setatt('itetclr', 5)
eltset3_fp.setatt('itetclr', 6)
#********************************************
# 06 Define a polyline and truncate the exterior boundary of the mesh with the polyline
#********************************************
# Read boundary polygon file
mobndry = lg.read('basin_bnd_ply_rescale.inp')
# Extrude the polyline into a vertical surface
mofence = mobndry.extrude(3200, 'const', 'volume', [0, 0, -1])
mobndry.minmax_xyz()
mofence.minmax_xyz()
# Translate the extrusion to make it cover the vertical extent of the hex mesh
mofence.trans((0, 0, -3100), (0, 0 ,0))
mofence.minmax_xyz()
#mofence.paraview()
#mofence.dump('3D_vertical_surface.inp')
#mo.dump('cube.inp')
# Truncate mesh
sf_bndry = mofence.surface('sf_bndry')
r_bndry = mo.region('ge ' + str(sf_bndry))
pset_bndry = mo.pset_region(r_bndry)
mobndry.delete()
mofence.delete()
# Method 1: Only remove a cell if ALL vertices are outside
e_delete1 = pset_bndry.eltset('exclusive')
# Method 2: Remove a cell if the centroid (average of all vertices) is outside
e_delete2 = mo.eltset_region(r_bndry)
# Method 3: Remove a cell if one or more vertices are outside
e_delete3 = pset_bndry.eltset('inclusive')
#mo.addatt('id_in_out_bndry', vtype='vint', rank='scalar', length='nelements')
mo.add_element_attribute('id_in_out_bndry', vtype='vint')
mo.setatt('id_in_out_bndry', 4) #Fill the entire attribute with 4
e_delete3.setatt('id_in_out_bndry', 3)
e_delete2.setatt('id_in_out_bndry', 2)
e_delete1.setatt('id_in_out_bndry', 1)
eltset4 = mo.eltset_attribute('id_in_out_bndry', 4, 'eq')
eltset3 = mo.eltset_attribute('id_in_out_bndry', 3, 'eq')
#eltset2 = mo.eltset_attribute('id_in_out_bndry', 2, 'eq')
#eltset1 = mo.eltset_attribute('id_in_out_bndry', 1, 'eq')
mo.rmpoint_eltset(eltset4, False, False)
mo.rmpoint_eltset(eltset3, True, True)
#********************************************
# 07 Refine the mesh around the fault
#********************************************
f_zone = mo.intersect_elements(sf_f, 'f_zone')
fz_i = mo.eltset_attribute('f_zone', 0, 'gt') #Non-zero indicates intersection
fz_i.refine()
mo.delatt('f_zone')
mo.status (brief=True)
#sf_f.delete()
#sf1_fm.delete()
#sf2_fm.delete()
#sf1_fp.delete()
#sf1_fp.delete()
#sf_bndry.delete()
#********************************************
# 08 Insert a couple of 'wells' by refining the mesh and identifying a line of nodes
# that will be the well source/sink for boundary conditions.
#********************************************
Well1X = 1234.56
Well1Y = 1987.65
Well2X = 2243.21
Well2Y = 1212.34
Radius = 25
NRadius = 2
#Well 1
mowell1 = lg.create_tet()
mowell1.createpts_rtz((NRadius, 9, numZ), (0, 0, 3100), (Radius, 360, 1500)) #Create a cylindrical point cloud
mowell1.filter() # Filter (delete) points that are too close ( default distance <=1.e-16) or duplicate points
mowell1.rmpoint_compress() # Remove all marked nodes and correct the itet array
mowell1.setatt('imt', 1)
mowell1.connect() # Connect the point cloud
mowell1.resetpts_itp()
mowell1.minmax_xyz()
mowell1.trans((0, 0, 0), (Well1X, Well1Y, 0))
mowell1.minmax_xyz()
#mowell1.paraview()
mowell1.dump('tmp_well1.inp')
#Well 2
mowell2 = lg.create_tet()
mowell2.createpts_rtz((NRadius, 9, numZ), (0, 0, 3100), (Radius, 360, 2200))
mowell2.filter()
mowell2.rmpoint_compress()
mowell2.setatt('imt', 1)
mowell2.connect()
mowell2.resetpts_itp()
mowell2.minmax_xyz()
mowell2.trans((0, 0, 0), (Well2X, Well2Y, 0))
mowell2.minmax_xyz()
#mowell2.paraview()
mowell2.dump('tmp_well2.inp')
# Join the two distinct wells into a single mesh object
mowells = lg.merge([mowell1, mowell2])
mowells.dump('tmp_wells.inp')
#mowells.paraview()
# Refine the mo around the wells
# First pass refinement
w_zone = mo.intersect_elements(mowells, 'w_zone')
wz_i = mo.eltset_attribute('w_zone', 0, 'gt') #Non-zero indicates intersection
wz_i.refine()
mo.setatt('w_zone', 0)
#wz_i.delete()
# Second pass refinement
w_zone = mo.intersect_elements(mowells, 'w_zone')
wz_i = mo.eltset_attribute('w_zone', 0, 'gt') #Non-zero indicates intersection
wz_i.refine()
mo.setatt('w_zone', 0)
#wz_i.delete()
mohex = mo.grid2grid_tree_to_fe() #Quadtree or octree grid to grid
#mo.status (brief=True)
# Identify the column of vertices closest to the well center.
#Well1
mo_pts1 = lg.create()
mo_pts1.createpts_rtz((2, 2, 1000), (0, 0, 3100), (Radius, 360, 2200))
mo_pts1.trans((0, 0, 0), (Well1X, Well1Y, 0))
#Well2
mo_pts2 = lg.create()
mo_pts2.createpts_rtz((2, 2, 1000), (0, 0, 3100), (Radius, 360, 2200))
mo_pts2.trans((0, 0, 0), (Well2X, Well2Y, 0))
mo_pts = lg.merge([mo_pts1, mo_pts2])
mo_pts.filter()
mo_pts.rmpoint_compress()
# Compute a distance field attribute
mo.compute_distance(mo_pts, option='distance_field', attname='dfield_well')
mo_pts1.delete()
mo_pts2.delete()
mo_pts.delete()
mowell1.delete()
mowell2.delete()
mowells.delete()
# Describe all nodes within 32, 16, 8, 4, 2 and 1 meters of the wells.
pwell = mo.pset_attribute('dfield_well', 1.0, 'le', (1,0,0), 'pwell1')
pwell.dump('zone_radius_01.0.zone')
pwell = mo.pset_attribute('dfield_well', 2.0, 'le', (1,0,0), 'pwell2')
pwell.dump('zone_radius_02.0.zone')
pwell = mo.pset_attribute('dfield_well', 4.0, 'le', (1,0,0), 'pwell4')
pwell.dump('zone_radius_04.0.zone')
pwell = mo.pset_attribute('dfield_well', 8.0, 'le', (1,0,0), 'pwell8')
pwell.dump('zone_radius_08.0.zone')
pwell = mo.pset_attribute('dfield_well', 16.0, 'le', (1,0,0), 'pwell16')
pwell.dump('zone_radius_16.0.zone')
pwell = mo.pset_attribute('dfield_well', 32.0, 'le', (1,0,0), 'pwell32')
pwell.dump('zone_radius_32.0.zone')
mo.dump('Hex_mesh.inp')
#********************************************
# 09 Convert hex mesh to tet mesh
#********************************************
motet = mohex.copypts()
motet.setatt('imt', 1)
motet.setatt('itp', 0)
motet.connect(option1='check_interface')
motet.resetpts_itp()
motet.interpolate_voronoi('imt', mohex, 'imt')
motet.interpolate_map('itetclr', mohex, 'itetclr')
#Remove all nodes and elements with imt and itetclr values of 7
motet.rmmat(7)
#pset7 = motet.pset_attribute('imt', 7, 'eq', (1,0,0), 'pset7')
#motet.rmpoint_pset(pset7)
#eltset7 = motet.eltset_attribute('itetclr', 7, 'eq')
#motet.rmpoint_eltset(eltset7, True, True)
motet.rmpoint_compress()
motet.resetpts_itp()
# Visualize connected mesh using ParaView
# This assumes that pylagritrc is being used so that exe option does not need to be specified
#motet.paraview()
motet.dump('Tet_mesh.inp')
#********************************************
# 10 Write tet mesh files for FEHM
# FEHM uses node based materials and properties
#********************************************
#motet.resetpts_parent()
motet.filter()
motet.rmpoint_compress()
motet.resetpts_itp()
motet.minmax('imt')
motet.setatt('itetclr', 1)
#motet. tri_mesh_output_prep()
motet.dump_fehm('Example3')
|
{"hexsha": "ee27d7525781010754fb16441f4547037966812f", "size": 15421, "ext": "py", "lang": "Python", "max_stars_repo_path": "PyLaGriT/examples/stratigraphic_hex_mesh_tutorial.py", "max_stars_repo_name": "daniellivingston/LaGriT-CI-Test", "max_stars_repo_head_hexsha": "8c23f94150a69532be0ef8a33cd999585009530d", "max_stars_repo_licenses": ["CNRI-Python"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2017-02-09T17:54:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T22:22:32.000Z", "max_issues_repo_path": "PyLaGriT/examples/stratigraphic_hex_mesh_tutorial.py", "max_issues_repo_name": "keurfonluu/LaGriT", "max_issues_repo_head_hexsha": "7561fb46658861d61bb4a20e654e718f868e1a18", "max_issues_repo_licenses": ["CNRI-Python"], "max_issues_count": 166, "max_issues_repo_issues_event_min_datetime": "2017-01-26T17:15:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:36:28.000Z", "max_forks_repo_path": "PyLaGriT/examples/stratigraphic_hex_mesh_tutorial.py", "max_forks_repo_name": "daniellivingston/LaGriT", "max_forks_repo_head_hexsha": "decd0ce0e5dab068034ef382cabcd134562de832", "max_forks_repo_licenses": ["Intel"], "max_forks_count": 63, "max_forks_repo_forks_event_min_datetime": "2017-02-08T21:56:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T06:48:36.000Z", "avg_line_length": 32.1270833333, "max_line_length": 128, "alphanum_fraction": 0.637377602, "include": true, "reason": "import numpy", "num_tokens": 5170}
|
[STATEMENT]
lemma and_num_2097152_128: "(AND) (0b00000000001000000000000000000000::word32)
(0b00000000000000000000000010000000::word32) = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 2097152 AND 128 = 0
[PROOF STEP]
by simp
|
{"llama_tokens": 162, "file": "SPARCv8_SparcModel_MMU_Sparc_Properties", "length": 1}
|
///=======================================================================
// Copyright 2015-2020 Clemson University
// Authors: Bradley S. Meyer
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//=======================================================================
#ifndef RANK_SPANNING_BRANCHINGS_HPP
#define RANK_SPANNING_BRANCHINGS_HPP
/*
* Rank spanning branchings
* Camerini et al Algorithm
*
* Requirement:
* directed graph with single root vertex
*/
#include <vector>
#include <limits>
#include <boost/concept_check.hpp>
#include <boost/config.hpp>
#include <boost/foreach.hpp>
#include <boost/graph/properties.hpp>
#include <boost/graph/filtered_graph.hpp>
#include <boost/graph/named_function_params.hpp>
#include <boost/graph/iteration_macros.hpp>
#include <boost/graph/depth_first_search.hpp>
#include <boost/heap/fibonacci_heap.hpp>
#include <boost/pending/disjoint_sets.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/optional.hpp>
#include <boost/unordered_set.hpp>
using namespace boost;
namespace rsb {
namespace detail {
typedef adjacency_list < vecS, vecS, bidirectionalS,
no_property, no_property > BranchingGraph;
typedef graph_traits<BranchingGraph>::vertex_descriptor BranchingVertex;
// Structure to store edges and compare them by weight.
template <typename Edge, typename WeightMap, typename Compare>
struct EdgeNode
{
Edge edge;
typename property_traits<WeightMap>::value_type weight;
Compare compare;
EdgeNode(){}
EdgeNode(
const Edge& e,
const typename property_traits<WeightMap>::value_type & w,
Compare c ) :
edge( e ), weight( w ), compare( c ) {}
bool operator<( EdgeNode const & rhs ) const
{ return compare( weight, rhs.weight ); }
};
// Insert edges from graph into queue, taking into account constraints.
template <typename Graph, typename Edge, typename WeightMap,
typename Compare, typename MergablePriorityQueueMap>
bool
insert_edges
(
const Graph& g,
WeightMap& weight_map,
Compare& comp,
MergablePriorityQueueMap& in_edges,
const unordered_set<Edge>& include_edges,
const unordered_set<Edge>& exclude_edges
)
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
unordered_set<Vertex> include_vertices, in_set, out_set;
typename unordered_set<Vertex>::iterator it;
// Insert vertices in sets to check for spanning branching.
BGL_FORALL_VERTICES_T( v, g, Graph )
{
in_set.insert( v );
out_set.insert( v );
}
// Insert edges that must be present and note the invertex. Remove
// vertices from relevant sets.
BOOST_FOREACH( const Edge& e, include_edges )
{
include_vertices.insert( target( e, g ) );
in_edges[target( e, g )].push(
EdgeNode<Edge,WeightMap,Compare>( e, get( weight_map, e ), comp )
);
it = in_set.find( target( e, g ) );
if( it != in_set.end() ) { in_set.erase( it ); }
it = out_set.find( source( e, g ) );
if( it != out_set.end() ) { out_set.erase( it ); }
}
// Insert edges, but not edges to be excluded or into vertices that
// already have in edges. Remove vertices from relevant sets.
BOOST_FOREACH( const Edge &e, edges(g) )
{
if(
include_vertices.find( target(e, g) ) == include_vertices.end()
&&
exclude_edges.find( e ) == exclude_edges.end()
)
{
if( source(e, g) != target(e, g) )
{
in_edges[target(e,g)].push(
EdgeNode<Edge,WeightMap,Compare>( e, get( weight_map, e ), comp )
);
it = in_set.find( target( e, g ) );
if( it != in_set.end() ) { in_set.erase( it ); }
it = out_set.find( source( e, g ) );
if( it != out_set.end() ) { out_set.erase( it ); }
}
}
}
// Check for correct number of roots.
//
if( in_set.size() != 1 )
return false; // Zero roots or more than one root.
else if( out_set.find( *in_set.begin() ) != out_set.end() )
return false; // Root is isolated.
else
return true;
}
// Retrieve the path back from leaf to root in cycle branching.
void
find_back_path(
BranchingGraph& cycle_branching,
std::vector<BranchingVertex>& bv
)
{
BGL_FORALL_INEDGES( bv[0], e, cycle_branching, BranchingGraph )
{
bv.insert( bv.begin(), source( e, cycle_branching ) );
find_back_path( cycle_branching, bv );
}
}
// Expand cycles.
template <typename Graph, typename Edge, typename IndexMap,
typename WeightMap, typename Compare>
void
expand(
const Graph& g,
IndexMap& v_id,
BranchingGraph& cycle_branching,
unordered_set<BranchingVertex>& root_set,
std::vector<EdgeNode< Edge, WeightMap, Compare> >& beta
)
{
BOOST_FOREACH( BranchingVertex v, root_set )
{
if(
in_degree( v, cycle_branching ) == 0 &&
out_degree( v, cycle_branching ) != 0
)
{
std::vector<BranchingVertex> bv;
bv.push_back( v_id[target( beta[v].edge, g )] );
find_back_path( cycle_branching, bv );
for( std::size_t i = 0; i < bv.size() - 1; i++ )
{
beta[bv[i+1]] = beta[bv[i]];
clear_vertex( bv[i], cycle_branching );
}
}
}
// Remove isolated vertices.
std::vector<BranchingVertex> vertices_to_remove_from_set;
BOOST_FOREACH( BranchingVertex v, root_set )
{
if(
in_degree( v, cycle_branching ) == 0 &&
out_degree( v, cycle_branching ) == 0
)
{
vertices_to_remove_from_set.push_back( v );
}
}
BOOST_FOREACH( BranchingVertex v, vertices_to_remove_from_set )
{
root_set.erase( v );
}
}
// Camerini et al. BEST routine.
template <typename EdgeNodeType, typename MergablePriorityQueue,
typename Graph, typename Edge, typename IndexMap,
typename WeightMap, typename Rank, typename Pred,
typename Compare>
void
best_spanning_branching( const Graph& g,
unordered_set<Edge>& branching,
IndexMap& v_id,
WeightMap& weight_map,
Compare& comp,
Rank rank,
Pred pred1,
Pred pred2,
unordered_set<Edge>& include_edges,
unordered_set<Edge>& exclude_edges
)
{
// Define types.
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::vertices_size_type vertex_idx_t;
typedef std::map<Vertex, EdgeNodeType> exit_map_t;
// Create various objects. Note in particular the two disjoint
// sets. The set of weakly connected components is used to determine
// cycles. The set of strongly connected components is used to
// represent supervertices (condensed cycles).
unordered_set<Vertex> unvisited_vertex_set;
Vertex root_vertex;
BranchingGraph cycle_branching;
vertex_idx_t n = num_vertices(g);
std::map<Vertex, MergablePriorityQueue> in_edges;
// Create disjoint sets. The set of weakly connected components is
// used to determine cycles. The set of strongly connected components
// is used to represent supervertices (condensed cycles).
disjoint_sets<Rank, Pred>
weak_cc( rank, pred1 ), strong_cc( rank, pred2 );
if(
!insert_edges(
g,
weight_map,
comp,
in_edges,
include_edges,
exclude_edges
)
) return;
std::vector<EdgeNodeType> beta( 2 * n );
std::map<Vertex, vertex_idx_t> parent;
BGL_FORALL_VERTICES_T( v, g, Graph )
{
weak_cc.make_set( v );
strong_cc.make_set( v );
parent[v] = v_id[v];
add_vertex( cycle_branching );
unvisited_vertex_set.insert( v );
}
while( !unvisited_vertex_set.empty() )
{
typename unordered_set<Vertex>::iterator it =
unvisited_vertex_set.begin();
if( in_edges[*it].empty() )
{
root_vertex = *it;
unvisited_vertex_set.erase( it );
}
else
{
EdgeNodeType critical_edge_node = in_edges[*it].top();
beta[parent[*it]] = critical_edge_node;
// Done with this vertex.
unvisited_vertex_set.erase( it );
// Check for cycle and, if present, condense.
if(
weak_cc.find_set( source( critical_edge_node.edge, g ) ) !=
weak_cc.find_set( target( critical_edge_node.edge, g ) )
)
{
weak_cc.union_set(
source( critical_edge_node.edge, g ),
target( critical_edge_node.edge, g )
);
}
else
{
BranchingVertex v_new = add_vertex( cycle_branching );
EdgeNodeType least_costly_edge_node = critical_edge_node;
boost::unordered_set<Vertex> cycle_vertex_set;
for(
Vertex v = source( critical_edge_node.edge, g );
cycle_vertex_set.find( strong_cc.find_set( v ) ) ==
cycle_vertex_set.end();
v = source( beta[parent[strong_cc.find_set( v )]].edge, g )
)
{
Vertex u = strong_cc.find_set( v );
cycle_vertex_set.insert( u );
add_edge( v_new, parent[u], cycle_branching );
if(
comp( beta[parent[u]].weight, least_costly_edge_node.weight )
)
{
least_costly_edge_node = beta[parent[u]];
}
}
Vertex new_repr = *cycle_vertex_set.begin();
BOOST_FOREACH( Vertex u, cycle_vertex_set )
{
strong_cc.link( u, new_repr );
new_repr = strong_cc.find_set( new_repr );
}
BOOST_FOREACH( Vertex v, cycle_vertex_set )
{
exit_map_t v_exit;
BOOST_FOREACH( EdgeNodeType en, in_edges[v] )
{
if( strong_cc.find_set( source( en.edge, g ) ) != new_repr )
{
en.weight += least_costly_edge_node.weight -
beta[parent[v]].weight;
Vertex u = strong_cc.find_set( source( en.edge, g ) );
if( v_exit.find(u) != v_exit.end() )
{
if( comp( v_exit[u].weight, en.weight ) )
{
v_exit[u] = en;
}
}
else
{
v_exit[u] = en;
}
}
}
MergablePriorityQueue tmp_queue;
BOOST_FOREACH( typename exit_map_t::value_type& t, v_exit )
{
tmp_queue.push( t.second );
}
in_edges[v].swap( tmp_queue );
}
BOOST_FOREACH( Vertex v, cycle_vertex_set )
{
if( v != new_repr ) in_edges[new_repr].merge( in_edges[v] );
}
unvisited_vertex_set.insert( new_repr );
parent[new_repr] = v_new;
}
}
}
// Create a set containing possible roots of the cycle branching.
// In each cycle expansion, remove isolated vertices of the
// cycle branching to avoid considering them in subsequent cycle
// expansions.
unordered_set<BranchingVertex> root_set;
BGL_FORALL_VERTICES( u, cycle_branching, BranchingGraph )
{
root_set.insert( u );
}
while( !root_set.empty() )
{
expand( g, v_id, cycle_branching, root_set, beta );
}
BGL_FORALL_VERTICES_T( v, g, Graph )
{
if( v != root_vertex )
{
branching.insert( beta[v_id[v]].edge );
}
}
}
// Depth-first visitor to set up pre-order and post-order maps.
template<typename OrderMap>
class dfs_order_visitor:public default_dfs_visitor
{
public:
dfs_order_visitor(
OrderMap& pr,
OrderMap& po
) : m_pr( pr ), m_po( po ) { td = 0; tf = 0; }
template<typename Vertex, typename Graph>
void discover_vertex(const Vertex u, const Graph & g)
{
m_pr[u] = td++;
}
template <typename Vertex, typename Graph>
void finish_vertex(const Vertex u, const Graph & g)
{
m_po[u] = tf++;
}
OrderMap& m_pr;
OrderMap& m_po;
private:
std::size_t td;
std::size_t tf;
};
// The ancestor checker. A vertex u is a proper ancestor of
// vertex v (in the parent branching) if pr[u] < pr[v] and po[u] > po[v].
template<typename OrderMap>
struct ancestor_checker
{
OrderMap& m_pr;
OrderMap& m_po;
ancestor_checker( OrderMap& pr, OrderMap& po ) :
m_pr( pr ), m_po( po ) {}
template<typename Vertex>
bool operator()( const Vertex& v1, const Vertex& v2 ) const
{ return ( m_pr[v1] < m_pr[v2] && m_po[v1] > m_po[v2] ); }
};
// Create a new branching from an input edge set.
template<typename Graph, typename Edge, typename IndexMap>
BranchingGraph
create_branching_graph_from_edge_set(
Graph& g,
IndexMap& v_id,
const unordered_set<Edge>& branching
)
{
BranchingGraph new_branching;
for( size_t i = 0; i < num_vertices( g ); i++ )
{
add_vertex( new_branching );
}
BOOST_FOREACH( const Edge& e, branching )
{
add_edge(
v_id[source( e, g )], v_id[target( e, g )], new_branching
);
}
return new_branching;
}
// Camerini et al. SEEK routine. Find the in edge that, when removed,
// gives the next best branching for a vertex and the weight difference.
template <typename Graph, typename Edge, typename IndexMap,
typename MergablePriorityQueue, typename EdgeNodeType,
typename Compare, typename OrderMap, typename OptionalWeight>
void
seek_next_edge_weight_diff(
Graph& g,
MergablePriorityQueue& in_edges,
IndexMap& v_id,
ancestor_checker<OrderMap>& is_ancestor,
const unordered_set<Edge>& branching,
EdgeNodeType& b,
Compare& comp,
Edge& return_edge,
OptionalWeight& delta
)
{
if( branching.find( b.edge ) != branching.end() )
{
for(
typename MergablePriorityQueue::ordered_iterator ei =
in_edges.ordered_begin();
ei != in_edges.ordered_end();
ei++
)
{
if( (*ei).edge != b.edge )
{
if(
!is_ancestor(
v_id[target( b.edge, g )],
v_id[source( (*ei).edge, g )]
)
)
{
if( !delta || comp( b.weight - (*ei).weight, delta.get() ) )
{
delta = b.weight - (*ei).weight;
return_edge = b.edge;
break;
}
}
}
}
}
}
// Camerini et al. NEXT routine. Find the edge that, when removed,
// gives the next best branching and the resulting weight difference.
template <typename EdgeNodeType, typename MergablePriorityQueue,
typename Graph, typename Edge, typename IndexMap,
typename WeightMap, typename Rank, typename Pred,
typename Compare>
boost::optional<
std::pair<Edge, typename property_traits<WeightMap>::value_type>
>
next_spanning_branching( const Graph& g,
const unordered_set<Edge>& branching,
IndexMap& v_id,
WeightMap& weight_map,
Compare& comp,
Rank rank,
Pred pred1,
Pred pred2,
unordered_set<Edge>& include_edges,
unordered_set<Edge>& exclude_edges
)
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef std::map<Vertex, EdgeNodeType> exit_map_t;
typedef typename property_traits<WeightMap>::value_type weight_t;
Edge return_edge;
boost::optional<weight_t> delta;
EdgeNodeType b;
// Create various objects.
boost::optional<
std::pair<Edge, typename property_traits<WeightMap>::value_type>
> next_edge_and_weight_delta;
unordered_set<Vertex> unvisited_vertex_set;
std::map<Vertex, MergablePriorityQueue> in_edges;
std::map<Vertex, EdgeNodeType> max_e;
// Create disjoint sets. The set of weakly connected components is
// used to determine cycles. The set of strongly connected components
// is used to represent supervertices (condensed cycles).
disjoint_sets<Rank, Pred>
weak_cc( rank, pred1 ), strong_cc( rank, pred2 );
// Create a branching graph to check whether a vertex is an ancestor of
// another vertex in the branching.
BranchingGraph ancestor_branching =
create_branching_graph_from_edge_set( g, v_id, branching );
// Insert edges.
if(
!insert_edges(
g,
weight_map,
comp,
in_edges,
include_edges,
exclude_edges
)
)
{
return next_edge_and_weight_delta;
}
// Initialize data structures and find start vertex for depth
// first search.
BranchingVertex start_vertex;
BGL_FORALL_VERTICES_T( v, g, Graph )
{
weak_cc.make_set( v );
strong_cc.make_set( v );
unvisited_vertex_set.insert( v );
if( in_edges[v].empty() ) { start_vertex = v_id[v]; }
}
// Create the ancestor checker from ancestor_branching.
typedef std::map<BranchingVertex, std::size_t> OrderMap;
OrderMap pr;
OrderMap po;
dfs_order_visitor<OrderMap> vis(pr, po);
depth_first_search(
ancestor_branching, visitor(vis).root_vertex( start_vertex )
);
ancestor_checker<OrderMap> is_ancestor(pr, po);
// Main loop.
while( !unvisited_vertex_set.empty() )
{
typename unordered_set<Vertex>::iterator it =
unvisited_vertex_set.begin();
if( in_edges[*it].empty() )
{
unvisited_vertex_set.erase( it );
}
else
{
// Get largest in edge with ties solved in favor of edges in
// input branching.
for(
typename MergablePriorityQueue::ordered_iterator ei =
in_edges[*it].ordered_begin();
ei != in_edges[*it].ordered_end();
ei++
)
{
if( comp( (*ei).weight, in_edges[*it].top().weight ) )
break;
b = *ei;
if( branching.find( b.edge ) != branching.end() )
break;
}
max_e[*it] = b;
// Seek next edge weight difference.
seek_next_edge_weight_diff(
g, in_edges[*it], v_id, is_ancestor, branching, b, comp,
return_edge, delta
);
// Done with this vertex.
unvisited_vertex_set.erase( it );
// Check for cycle and, if present, condense.
if(
weak_cc.find_set( source( b.edge, g ) ) !=
weak_cc.find_set( target( b.edge, g ) )
)
{
weak_cc.union_set( source( b.edge, g ), target( b.edge, g ) );
}
else
{
EdgeNodeType least_costly_edge_node = b;
boost::unordered_set<Vertex> cycle_vertex_set;
for(
Vertex v = source( b.edge, g );
cycle_vertex_set.find( strong_cc.find_set( v ) ) ==
cycle_vertex_set.end();
v = source( max_e[strong_cc.find_set( v )].edge, g )
)
{
Vertex u = strong_cc.find_set( v );
cycle_vertex_set.insert( u );
if( comp( max_e[u].weight, least_costly_edge_node.weight ) )
{
least_costly_edge_node = max_e[u];
}
}
Vertex new_repr = *cycle_vertex_set.begin();
BOOST_FOREACH( Vertex v, cycle_vertex_set )
{
strong_cc.link( v, new_repr );
new_repr = strong_cc.find_set( new_repr );
}
// Adjust arc weights and remove parallel arcs. Keep the
// the largest weight in arc and the largest viable alternative
// arc from each source outside the cycle. Make sure that
// an arc from the branching is among the added edges, if present.
BOOST_FOREACH( Vertex v, cycle_vertex_set )
{
std::vector<exit_map_t> v_exit(3);
BOOST_FOREACH( EdgeNodeType en, in_edges[v] )
{
if( strong_cc.find_set( source( en.edge, g ) ) != new_repr )
{
en.weight += least_costly_edge_node.weight - max_e[v].weight;
Vertex u = strong_cc.find_set( source( en.edge, g ) );
if( branching.find( en.edge ) != branching.end() )
{
v_exit[0][u] = en;
}
else
{
if( v_exit[1].find(u) != v_exit[1].end() )
{
if( comp( v_exit[1][u].weight, en.weight ) )
{
if(
!is_ancestor(
v_id[target( v_exit[1][u].edge, g )],
v_id[source( v_exit[1][u].edge, g )]
)
)
{
v_exit[2][u] = v_exit[1][u];
}
v_exit[1][u] = en;
}
else if(
v_exit[2].find(u) == v_exit[2].end() ||
comp( v_exit[2][u].weight, en.weight )
)
{
if(
!is_ancestor(
v_id[target( en.edge, g )],
v_id[source( en.edge, g )]
)
)
{
v_exit[2][u] = en;
}
}
}
else
{
v_exit[1][u] = en;
}
}
}
}
MergablePriorityQueue tmp_queue;
BOOST_FOREACH( exit_map_t& exit_map, v_exit )
{
BOOST_FOREACH( typename exit_map_t::value_type& t, exit_map )
{
tmp_queue.push( t.second );
}
}
in_edges[v].swap( tmp_queue );
}
BOOST_FOREACH( Vertex v, cycle_vertex_set )
{
if( v != new_repr ) in_edges[new_repr].merge( in_edges[v] );
}
unvisited_vertex_set.insert( new_repr );
}
}
}
if( delta )
{
next_edge_and_weight_delta = std::make_pair( return_edge, delta.get() );
}
return next_edge_and_weight_delta;
}
// Class to filter graph.
template<typename EdgeSet>
class branching_filter
{
public:
branching_filter(){}
branching_filter( const EdgeSet * _es ) : p_es( _es ){}
template <typename Edge>
bool operator()( const Edge& e ) const
{
if( p_es->find( e ) != p_es->end() )
return true;
else
return false;
}
private:
const EdgeSet * p_es;
};
// Structure to store branchings and compare them by weight.
template<typename Edge, typename WeightMap, typename Compare>
struct BranchingEntry
{
Edge edge;
typename property_traits<WeightMap>::value_type weight;
Compare compare;
unordered_set<Edge> branching;
unordered_set<Edge> include_edges;
unordered_set<Edge> exclude_edges;
BranchingEntry(){}
BranchingEntry(
const typename property_traits<WeightMap>::value_type& w,
const Edge& e,
Compare& comp,
const unordered_set<Edge>& b,
const unordered_set<Edge>& include,
const unordered_set<Edge>& exclude
) :
edge( e ), weight( w ), compare( comp ),
branching( b ), include_edges( include ),
exclude_edges( exclude ){}
bool operator<( BranchingEntry const & rhs ) const
{ return compare( weight, rhs.weight ); }
};
// Compute the weight of a branching.
template<typename WeightMap, typename Edge>
typename property_traits<WeightMap>::value_type
compute_branching_weight(
WeightMap& w,
const unordered_set<Edge>& branching
)
{
typedef typename property_traits<WeightMap>::value_type weight_t;
boost::optional<weight_t> weight;
BOOST_FOREACH( const Edge& e, branching )
{
if( !weight )
{
weight = get( w, e );
}
else
{
weight = weight.get() + get( w, e );
}
}
return weight.get();
}
// Routine implementation.
template <template<class...> class PriorityQueue,
typename Graph, typename BranchingProcessor, typename IndexMap,
typename WeightMap, typename Compare,
typename Rank, typename Parent>
void
rank_spanning_branchings_impl( const Graph& g,
BranchingProcessor bp,
IndexMap v_id,
WeightMap w,
Compare comp,
Rank rank,
Parent pred1,
Parent pred2
)
{
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
typedef typename graph_traits<Graph>::edge_descriptor Edge;
typedef typename property_traits<WeightMap>::value_type weight_t;
typedef BranchingEntry<Edge, WeightMap, Compare> branching_entry_t;
typedef EdgeNode<Edge, WeightMap, Compare> edge_node_t;
typedef PriorityQueue<branching_entry_t> branching_queue_t;
typedef PriorityQueue<edge_node_t> edge_node_queue_t;
BOOST_CONCEPT_ASSERT(( VertexAndEdgeListGraphConcept<Graph> ));
BOOST_CONCEPT_ASSERT(( ReadablePropertyMapConcept<IndexMap, Vertex> ));
BOOST_CONCEPT_ASSERT(( ReadablePropertyMapConcept<WeightMap, Edge> ));
BOOST_CONCEPT_ASSERT(( heap::PriorityQueue<branching_queue_t> ));
BOOST_CONCEPT_ASSERT(( heap::MergablePriorityQueue<edge_node_queue_t> ));
unordered_set<Edge> best_branching, empty_set;
boost::optional<std::pair<Edge, weight_t> > next_edge_and_weight_delta;
Edge e;
branching_queue_t branching_queue;
best_spanning_branching<edge_node_t, edge_node_queue_t>
( g,
best_branching,
v_id,
w,
comp,
rank,
pred1,
pred2,
empty_set,
empty_set
);
branching_filter<unordered_set<Edge> > filter1( &best_branching );
filtered_graph<Graph, branching_filter<unordered_set<Edge> > >
fg1( g, filter1 );
if( !bp( fg1 ) ) return;
next_edge_and_weight_delta =
next_spanning_branching<edge_node_t, edge_node_queue_t>
( g,
best_branching,
v_id,
w,
comp,
rank,
pred1,
pred2,
empty_set,
empty_set
);
if( next_edge_and_weight_delta )
{
branching_queue.push(
branching_entry_t(
compute_branching_weight( w, best_branching ) -
(next_edge_and_weight_delta.get()).second,
(next_edge_and_weight_delta.get()).first,
comp,
best_branching,
empty_set,
empty_set
)
);
}
else
{
return;
}
while( !branching_queue.empty() )
{
unordered_set<Edge> branching;
branching_entry_t P = branching_queue.top();
branching_queue.pop();
unordered_set<Edge> include_edges = P.include_edges;
unordered_set<Edge> exclude_edges = P.exclude_edges;
include_edges.insert( P.edge );
exclude_edges.insert( P.edge );
best_spanning_branching<edge_node_t, edge_node_queue_t>
( g,
branching,
v_id,
w,
comp,
rank,
pred1,
pred2,
P.include_edges,
exclude_edges
);
branching_filter<unordered_set<Edge> > filter( &branching );
filtered_graph<Graph, branching_filter<unordered_set<Edge> > >
fg( g, filter );
if( !bp( fg ) ) return;
next_edge_and_weight_delta =
next_spanning_branching<edge_node_t, edge_node_queue_t>
( g,
P.branching,
v_id,
w,
comp,
rank,
pred1,
pred2,
include_edges,
P.exclude_edges
);
if( next_edge_and_weight_delta )
{
branching_queue.push(
branching_entry_t(
compute_branching_weight( w, P.branching ) -
(next_edge_and_weight_delta.get()).second,
(next_edge_and_weight_delta.get()).first,
comp,
P.branching,
include_edges,
P.exclude_edges
)
);
}
next_edge_and_weight_delta =
next_spanning_branching<edge_node_t, edge_node_queue_t>
( g,
branching,
v_id,
w,
comp,
rank,
pred1,
pred2,
P.include_edges,
exclude_edges
);
if( next_edge_and_weight_delta )
{
branching_queue.push(
branching_entry_t(
P.weight - (next_edge_and_weight_delta.get()).second,
(next_edge_and_weight_delta.get()).first,
comp,
branching,
P.include_edges,
exclude_edges
)
);
}
}
}
template <template<class...> class PriorityQueue,
typename Graph, typename BranchingProcessor, typename IndexMap,
typename WeightMap, typename Compare>
void
rank_spanning_branchings_dispatch2( const Graph& g,
BranchingProcessor bp,
IndexMap id_map,
WeightMap weight_map,
Compare compare
)
{
typename graph_traits<Graph>::vertices_size_type n = num_vertices(g);
if( num_vertices( g ) == 0 ) return; // Nothing to do.
typedef typename graph_traits<Graph>::vertices_size_type vertex_idx_t;
typedef typename graph_traits<Graph>::vertex_descriptor Vertex;
// Set up rank and parent for disjoint sets.
std::vector<vertex_idx_t> rank( n );
std::vector<Vertex> pred1( n ), pred2( n );
rank_spanning_branchings_impl<PriorityQueue>(
g,
bp,
id_map,
weight_map,
compare,
make_iterator_property_map( rank.begin(), id_map, rank[0] ),
make_iterator_property_map( pred1.begin(), id_map, pred1[0] ),
make_iterator_property_map( pred2.begin(), id_map, pred2[0])
);
}
template <template<class...> class PriorityQueue,
typename Graph, typename BranchingProcessor, typename Compare,
typename P, typename T, typename R>
void rank_spanning_branchings_dispatch1(
const Graph& g,
BranchingProcessor bp,
Compare compare,
const bgl_named_params<P, T, R>& params
)
{
detail::rank_spanning_branchings_dispatch2<PriorityQueue>(
g,
bp,
choose_param(
get_param( params, vertex_index_t()), get( vertex_index, g )
),
choose_param(
get_param( params, edge_weight_t()), get( edge_weight, g )
),
compare
);
}
template <template<class...> class PriorityQueue,
typename Graph, typename BranchingProcessor,
typename P, typename T, typename R>
void rank_spanning_branchings_dispatch1(
const Graph& g,
BranchingProcessor bp,
param_not_found,
const bgl_named_params<P, T, R>& params
)
{
typedef
typename
property_traits<
typename property_map<Graph, edge_weight_t>::const_type
>::value_type weight_t;
BOOST_CONCEPT_ASSERT(( ComparableConcept<weight_t> ));
detail::rank_spanning_branchings_dispatch2<PriorityQueue>(
g,
bp,
choose_param(
get_param( params, vertex_index_t()), get( vertex_index, g )
),
choose_param(
get_param( params, edge_weight_t()), get( edge_weight, g )
),
std::less<weight_t>()
);
}
} // namespace detail
template <template<class...> class PriorityQueue = heap::fibonacci_heap,
typename Graph, typename BranchingProcessor,
typename P, typename T, typename R>
void
inline rank_spanning_branchings(
const Graph& g,
BranchingProcessor bp,
const bgl_named_params<P, T, R>& params
)
{
detail::rank_spanning_branchings_dispatch1<PriorityQueue>(
g,
bp,
get_param( params, distance_compare_t() ),
params
);
}
template <template<class...> class PriorityQueue = heap::fibonacci_heap,
typename Graph, typename BranchingProcessor>
void
inline rank_spanning_branchings( const Graph& g,
BranchingProcessor bp
)
{
bgl_named_params<int,int> params(0);
rank_spanning_branchings<PriorityQueue>( g, bp, params );
}
} // namespace rsb
#endif // RANK_SPANNING_BRANCHINGS_HPP
|
{"hexsha": "4a34c1af48ac632a6a65417ab610d99aecff54f5", "size": 36220, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/rank_spanning_branchings.hpp", "max_stars_repo_name": "mbradle/rank_spanning_branchings", "max_stars_repo_head_hexsha": "86aa045beebe0e5f273f0ee1bce5e91ca4f4f079", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/rank_spanning_branchings.hpp", "max_issues_repo_name": "mbradle/rank_spanning_branchings", "max_issues_repo_head_hexsha": "86aa045beebe0e5f273f0ee1bce5e91ca4f4f079", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/rank_spanning_branchings.hpp", "max_forks_repo_name": "mbradle/rank_spanning_branchings", "max_forks_repo_head_hexsha": "86aa045beebe0e5f273f0ee1bce5e91ca4f4f079", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0457097033, "max_line_length": 80, "alphanum_fraction": 0.5291275538, "num_tokens": 7620}
|
#####
##### Tests copied from IterativeSolvers.jl
##### https://github.com/JuliaLinearAlgebra/IterativeSolvers.jl/blob/v0.9.2/src/lsmr.jl
#####
# Type used in Dampenedtest
# solve (A'A + diag(v).^2 ) x = A'b
# using LSMR in the augmented space à = [A ; diag(v)] b̃ = [b; zeros(size(A, 2)]
struct DampenedMatrix{Tv,TA<:AbstractMatrix{Tv},TD<:AbstractVector{Tv}} <: AbstractMatrix{Tv}
A::TA
diagonal::TD
end
function Base.size(A::DampenedMatrix)
m, n = size(A.A)
l = length(A.diagonal)
(m + l, n)
end
function Base.size(A::DampenedMatrix, dim::Integer)
m, n = size(A.A)
l = length(A.diagonal)
dim == 1 ? (m + l) : (dim == 2 ? n : 1)
end
function LinearAlgebra.mul!(y::AbstractVector{Tv}, mw::DampenedMatrix, x::AbstractVector{Tv}) where {Tv}
m₁ = size(mw.A, 1)
m₂ = size(mw, 1)
mul!(view(y, 1:m₁), mw.A, x)
y[m₁+1:m₂] .= mw.diagonal .* x
return y
end
function LinearAlgebra.mul!(y::AbstractVector, mw::Adjoint{Tv,<:DampenedMatrix}, x::AbstractVector) where {Tv}
m₁ = size(mw.parent.A, 1)
m₂ = size(mw.parent, 1)
mul!(y, adjoint(mw.parent.A), view(x, 1:m₁))
y .+= mw.parent.diagonal .* view(x, m₁+1:m₂)
return y
end
"""
Produces the m × n submatrix from
A = [ 1
1 2
2 3
3 4
...
n ]
suitably padded by zeros.
"""
function sol_matrix(m, n)
mn = min(m, n)
I, J, V = SparseArrays.spdiagm_internal(-1 => 1.0 : mn - 1, 0 => 1.0 : mn)
sparse(I, J, V, m, n)
end
@testset "Small dense matrix" for T = (Float32, Float64, ComplexF32, ComplexF64)
Random.seed!(501)
A = rand(T, 10, 5)
b = rand(T, 10)
x = QSM.lsmr(A, b)
@test norm(x - A\b) ≤ √eps(real(T))
end
@testset "SOL test" for (m, n, damp) = ((10, 10, 0), (20, 10, 0), (20, 10, 0.1))
# Test adapted from the BSD-licensed Matlab implementation at
# http://www.stanford.edu/group/SOL/software/lsqr.html
# Michael Saunders, Systems Optimization Laboratory,
# Dept of MS&E, Stanford University.
#-----------------------------------------------------------------------
# 11 Apr 1996: First version for distribution with lsqr.m.
# Michael Saunders, Dept of EESOR, Stanford University.
A = sol_matrix(m, n)
x = float(n : -1 : 1)
b = A * x
x_lsmr = QSM.lsmr(A, b, atol = 1e-7, btol = 1e-7, conlim = 1e10, maxit = 10n)
@test norm(b - A * x) ≤ 1e-4
end
@testset "Dampened test" for (m, n) = ((10, 10), (20, 10))
Random.seed!(501)
# Test used to make sure A, b can be generic matrix / vector
b = rand(m)
A = rand(m, n)
v = rand(n)
A′ = DampenedMatrix(A, v)
b′ = [b; zeros(n)]
x = QSM.lsmr(A′, b′)
@test norm((A'A + Matrix(Diagonal(v)) .^ 2)x - A'b) ≤ 1e-3
end
|
{"hexsha": "d4842457ea79c45503def75e71aec19db3b62fe9", "size": 2781, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/utils/lsmr.jl", "max_stars_repo_name": "jondeuce/QSM.jl", "max_stars_repo_head_hexsha": "f960d8fe99f11d610be70051d79c2aba51f483f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-16T09:49:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T07:13:13.000Z", "max_issues_repo_path": "test/utils/lsmr.jl", "max_issues_repo_name": "aTrotier/QSM.jl", "max_issues_repo_head_hexsha": "c94973161c322382a2b77220983ebeec3825044f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-14T00:20:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T00:20:04.000Z", "max_forks_repo_path": "test/utils/lsmr.jl", "max_forks_repo_name": "aTrotier/QSM.jl", "max_forks_repo_head_hexsha": "c94973161c322382a2b77220983ebeec3825044f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-15T23:32:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T09:48:50.000Z", "avg_line_length": 29.9032258065, "max_line_length": 110, "alphanum_fraction": 0.5616684646, "num_tokens": 992}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Use the same scale on x and y axis
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Plot data #################
#fig, ax = plt.subplots(figsize=(5, 5))
fig, (ax1, ax2) = plt.subplots(ncols=2)
ax1.plot([0, 1])
ax2.plot([0, 1])
ax2.axis('equal') # <- SAME SCALE ON X AND Y
# Save file #################
plt.savefig("axis_equal.png")
# Plot ######################
plt.show()
|
{"hexsha": "4040d9f4f7700f6f2225db6ea39bd8967be6636b", "size": 492, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/matplotlib/axis_equal.py", "max_stars_repo_name": "jeremiedecock/snippets", "max_stars_repo_head_hexsha": "4bd4e7f459eee610d5cf19f845299ca942ff4b64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:01:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T08:20:04.000Z", "max_issues_repo_path": "python/matplotlib/axis_equal.py", "max_issues_repo_name": "jeremiedecock/snippets", "max_issues_repo_head_hexsha": "4bd4e7f459eee610d5cf19f845299ca942ff4b64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-22T02:36:10.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-22T02:36:10.000Z", "max_forks_repo_path": "python/matplotlib/axis_equal.py", "max_forks_repo_name": "jeremiedecock/snippets", "max_forks_repo_head_hexsha": "4bd4e7f459eee610d5cf19f845299ca942ff4b64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2017-10-31T09:48:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T15:59:45.000Z", "avg_line_length": 16.4, "max_line_length": 57, "alphanum_fraction": 0.5548780488, "include": true, "reason": "import numpy", "num_tokens": 135}
|
module RocketLowercaseOperatorTest
using Test
using Rocket
include("../test_helpers.jl")
@testset "operator: lowercase()" begin
println("Testing: operator lowercase()")
run_proxyshowcheck("Lowercase", lowercase())
run_testset([
(
source = from("Hello, world") |> lowercase(),
values = @ts(['h', 'e', 'l', 'l', 'o', ',', ' ', 'w', 'o', 'r', 'l', 'd', c])
),
(
source = completed() |> lowercase(),
values = @ts(c)
),
(
source = faulted(String, "e") |> lowercase(),
values = @ts(e("e")),
source_type = String
),
(
source = never() |> lowercase(),
values = @ts()
)
])
end
end
|
{"hexsha": "42f35e98e586368dd97387d3fb8bfbc50063dcdc", "size": 778, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/operators/test_operator_lowercase.jl", "max_stars_repo_name": "hgeorgako/Rocket.jl", "max_stars_repo_head_hexsha": "9661dad340e9a079ebd6ed57dcf9e5db31af637f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 109, "max_stars_repo_stars_event_min_datetime": "2020-02-04T00:32:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T06:39:36.000Z", "max_issues_repo_path": "test/operators/test_operator_lowercase.jl", "max_issues_repo_name": "biaslab/Rx.jl", "max_issues_repo_head_hexsha": "ffaf60bbcd3c104ca8132fe22149d3ce2e26be03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-03-18T09:44:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T11:08:28.000Z", "max_forks_repo_path": "test/operators/test_operator_lowercase.jl", "max_forks_repo_name": "biaslab/Rx.jl", "max_forks_repo_head_hexsha": "ffaf60bbcd3c104ca8132fe22149d3ce2e26be03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-02-26T15:49:08.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-06T17:25:43.000Z", "avg_line_length": 21.027027027, "max_line_length": 89, "alphanum_fraction": 0.4524421594, "num_tokens": 187}
|
#!/usr/bin/env python
import argparse
import marsyas
import marsyas_util
import time
import numpy
import cv
from cv_utils import *
import math
# This program will perform real-time spectral analysis.
# TODO: Put axis indicators in the plots!
#
# The basic functionality is as follows:
# Source -> Window -> Spectra -> Output
#
# These are the parameters we want to set:
# For the analysis:
Window_len = 2048 # The number of samples in each analysis window
Window_step = 512 # The step (in samples) between two consecutive analysis
Zero_padding = 1 # After windowing, the signal will be zero-padded to this value times its length
Min_freq = 0 # Hz. The minimum frequency that will be analyzed
Max_freq = 3000 # Hz. The maximum frequency that will be analyzed
# The following lines will determine the structure of the marsystem
spec_analyzer = ["Series/analysis", ["AudioSource/asrc", "Sum/summation", "ShiftInput/sft", "Windowing/win","Spectrum/spk","PowerSpectrum/pspk"]]
net = marsyas_util.create(spec_analyzer)
snet = marsyas_util.mar_refs(spec_analyzer)
# This is the configuration for the MarSystem
fs = 44100.0
net.updControl("mrs_natural/inSamples", Window_step);
net.updControl("mrs_real/israte", fs);
net.updControl(snet["sft"]+"/mrs_natural/winSize", Window_len);
net.updControl(snet["win"]+"/mrs_natural/zeroPadding", Window_len * (Zero_padding-1));
net.updControl(snet["win"]+"/mrs_string/type", "Hanning"); # "Hamming", "Hanning", "Triangle", "Bartlett", "Blackman"
net.updControl(snet["asrc"]+"/mrs_natural/nChannels", 2);
net.updControl(snet["asrc"]+"/mrs_bool/initAudio", marsyas.MarControlPtr.from_bool(True));
net.updControl(snet["pspk"]+"/mrs_string/spectrumType", "logmagnitude2"); # "power", "magnitude", "decibels", "logmagnitude" (for 1+log(magnitude*1000), "logmagnitude2" (for 1+log10(magnitude)), "powerdensity"
# These variables will avoid having to re-calculate stuff
DFT_SIZE = Window_len * Zero_padding; # This is the size of the DFT
DFT_SIZE_2 = net.getControl(snet["win"]+"/mrs_natural/onSamples").to_natural();
print "Debug parameters"
print DFT_SIZE
print DFT_SIZE_2
freq_bin = fs/DFT_SIZE; # this is the frequency hop for every frequency bin in the DFT
print freq_bin
# This is the size of data that will be shown
visible_time = 10; # Seconds
minK = int(math.floor(Min_freq/freq_bin))
maxK = int(math.ceil(Max_freq/freq_bin))
deltaK = maxK-minK+1
print minK, maxK, deltaK
nTime = int(math.ceil(visible_time*(fs*1.0/Window_step)))
# Allocate memory for the image
Int_Buff = numpy.zeros([deltaK, nTime])
#print deltaK
#print nTime
mat = cv.CreateMat(nTime, deltaK, cv.CV_32FC1)
cv.NamedWindow("Marsyas Spectral Analysis", cv.CV_WINDOW_AUTOSIZE)
try:
while 1:
net.tick()
out = net.getControl("mrs_realvec/processedData").to_realvec()
out = numpy.array(out)
out = out[minK:maxK+1]
out = out [::-1]
if numpy.max(out)>0:
out = out/numpy.max(out)
else:
print numpy.max(out)
if numpy.ndim(out)==1:
out = numpy.array([out])
Int_Buff = Int_Buff[:,1:]
Int_Buff = numpy.hstack([Int_Buff,numpy.transpose(out)])
im = array2cv(Int_Buff)
cv.ShowImage("Marsyas Spectral Analysis", im)
cv.WaitKey(10)
except KeyboardInterrupt:
print "Halted!"
pass
|
{"hexsha": "cd1c7fa020c180191116e69a38b6b8268de2ebbc", "size": 3222, "ext": "py", "lang": "Python", "max_stars_repo_path": "marsyas-vamp/marsyas/src/marsyas_python/spectral_analysis.py", "max_stars_repo_name": "jaouahbi/VampPlugins", "max_stars_repo_head_hexsha": "27c2248d1c717417fe4d448cdfb4cb882a8a336a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "marsyas-vamp/marsyas/src/marsyas_python/spectral_analysis.py", "max_issues_repo_name": "jaouahbi/VampPlugins", "max_issues_repo_head_hexsha": "27c2248d1c717417fe4d448cdfb4cb882a8a336a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "marsyas-vamp/marsyas/src/marsyas_python/spectral_analysis.py", "max_forks_repo_name": "jaouahbi/VampPlugins", "max_forks_repo_head_hexsha": "27c2248d1c717417fe4d448cdfb4cb882a8a336a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.202247191, "max_line_length": 211, "alphanum_fraction": 0.7420856611, "include": true, "reason": "import numpy", "num_tokens": 937}
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from PIL import Image
from tqdm import tqdm
from pathlib import Path
from plipy.ddl_inpainting import DDLInpaintingConv
from plipy.dpdpl_inpainting import DPDPLInpaintingConv
NUM_SAMPLES = 100
def psnr(im, imref, d=1):
mse = np.mean((im - imref)**2)
return 10 * np.log(d * d / mse) / np.log(10)
def create_line(pt1, pt2, side=10, k=10):
u = pt1 - pt2
u /= np.linalg.norm(u)
S1 = pt2 - k * u
S2 = pt1 + k * u
v = S2 - S1
n = np.linalg.norm(v)
v /= n
alphas = np.linspace(0, n, side)
return alphas, v, S1
path = Path(__file__).resolve().parents[1]
im = Image.open(str(path / "data/flowers.png"))
im_gray = im.convert("L")
im_gray_resized = im_gray.resize((128, 128), Image.ANTIALIAS)
rho = 0.5
im_to_process = np.array(im_gray_resized) / 255.
omega = np.random.random(im_to_process.shape)
omega = (omega > rho).astype(float)
im_inpainting = omega * im_to_process
# Synthesis
print("Synthesis")
scores = []
psnrs = []
min_score = np.inf
max_score = 0
top_prior = None
bad_prior = None
for i in tqdm(range(NUM_SAMPLES)):
ddl = DDLInpaintingConv(50, 20, lambd=0.01, kernel_size=8,
learn_steps=False)
loss = ddl.training_process(im_inpainting[None, :, :], omega[None, :, :])
D = ddl.get_prior()
im_result_conv = np.clip(ddl.eval(), 0, 1)[0]
if loss < min_score:
min_score = loss
bad_prior = D.copy()
if loss > max_score:
max_score = loss
top_prior = D.copy()
scores.append(loss)
psnrs.append(psnr(im_result_conv, im_to_process))
scores = np.array(scores) - min_score
logbins = np.logspace(np.log10(scores[np.where(scores > 0)].min()), np.log10(scores.max()), 20)
logbins = np.concatenate([np.array([0]), logbins])
plt.hist(scores, bins=logbins)
plt.xlabel("Loss")
plt.xscale("symlog")
plt.title("Density of minima: Synthesis")
plt.savefig(str(path / "figures/density_synthesis_loss.png"))
plt.clf()
psnrs = np.array(psnrs) - min(psnrs)
logbins = np.logspace(np.log10(psnrs[np.where(psnrs > 0)].min()), np.log10(psnrs.max()), 20)
logbins = np.concatenate([np.array([0]), logbins])
plt.hist(psnrs, bins=logbins)
plt.xlabel("PSNR")
plt.xscale("symlog")
plt.title("Density of minima: Synthesis")
plt.savefig(str(path / "figures/density_synthesis_psnr.png"))
plt.clf()
alphas, v, S1 = create_line(top_prior, bad_prior, side=100, k=10)
score_line = np.zeros(alphas.shape)
for i in range(alphas.shape[0]):
ddl.dictionary = torch.nn.Parameter(torch.tensor(S1 + alphas[i] * v,
dtype=torch.float,
device=ddl.device))
ddl.rescale()
ddl.compute_lipschitz()
score_line[i] = ddl.cost(ddl.Y_tensor, ddl(ddl.Y_tensor))
plt.plot(alphas, score_line)
plt.xlabel("Distance along unit random vector")
plt.ylabel("Loss")
plt.title("Shapes in 1D: Synthesis")
plt.savefig(str(path / "figures/shape_minima_synthesis.png"))
plt.clf()
# Analysis
print("Analysis")
scores = []
psnrs = []
min_score = np.inf
max_score = 0
top_prior = None
bad_prior = None
for i in tqdm(range(NUM_SAMPLES)):
dpdpl = DPDPLInpaintingConv(50, 20, lambd=0.01, kernel_size=4,
learn_steps=False)
loss = dpdpl.training_process(im_inpainting[None, :, :], omega[None, :, :])
P = dpdpl.get_prior()
im_result_conv = np.clip(dpdpl.eval(), 0, 1)[0]
if loss < min_score:
min_score = loss
bad_prior = P.copy()
if loss > max_score:
max_score = loss
top_prior = P.copy()
scores.append(loss)
psnrs.append(psnr(im_result_conv, im_to_process))
scores = np.array(scores) - min_score
logbins = np.logspace(np.log10(scores[np.where(scores > 0)].min()), np.log10(scores.max()), 20)
logbins = np.concatenate([np.array([0]), logbins])
plt.hist(scores, bins=logbins)
plt.xlabel("Loss")
plt.xscale("symlog")
plt.title("Density of minima: Analysis")
plt.savefig(str(path / "figures/density_analysis_loss.png"))
plt.clf()
psnrs = np.array(psnrs) - min(psnrs)
logbins = np.logspace(np.log10(psnrs[np.where(psnrs > 0)].min()), np.log10(psnrs.max()), 20)
logbins = np.concatenate([np.array([0]), logbins])
plt.hist(psnrs, bins=logbins)
plt.xlabel("PSNR")
plt.xscale("symlog")
plt.title("Density of minima: Analysis")
plt.savefig(str(path / "figures/density_analysis_psnr.png"))
plt.clf()
alphas, v, S1 = create_line(top_prior, bad_prior, side=100, k=10)
score_line = np.zeros(alphas.shape)
for i in range(alphas.shape[0]):
dpdpl.prior = torch.nn.Parameter(torch.tensor(S1 + alphas[i] * v,
dtype=torch.float,
device=ddl.device))
dpdpl.rescale()
dpdpl.compute_lipschitz_prior()
score_line[i] = dpdpl.cost(dpdpl.Y_tensor, dpdpl(dpdpl.Y_tensor))
plt.plot(alphas, score_line)
plt.xlabel("Distance along unit random vector")
plt.ylabel("Loss")
plt.title("Shapes in 1D: Analysis")
plt.savefig(str(path / "figures/shape_minima_analysis.png"))
|
{"hexsha": "49ccf7ff0e01b9e865e04556a7be486707d49106", "size": 5129, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/others/inpainting_minima.py", "max_stars_repo_name": "bmalezieux/plipy", "max_stars_repo_head_hexsha": "35d17cad908219013fa43d81c4aeb6bb46ce9384", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-15T04:51:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T14:20:59.000Z", "max_issues_repo_path": "experiments/others/inpainting_minima.py", "max_issues_repo_name": "bmalezieux/plipy", "max_issues_repo_head_hexsha": "35d17cad908219013fa43d81c4aeb6bb46ce9384", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/others/inpainting_minima.py", "max_forks_repo_name": "bmalezieux/plipy", "max_forks_repo_head_hexsha": "35d17cad908219013fa43d81c4aeb6bb46ce9384", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5752688172, "max_line_length": 95, "alphanum_fraction": 0.6535387015, "include": true, "reason": "import numpy", "num_tokens": 1455}
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.math import psd_kernels
@test_util.test_all_tf_execution_regimes
class StudentTProcessRegressionModelTest(test_util.TestCase):
def testInstantiate(self):
df = np.float64(1.)
# 5x5 grid of index points in R^2 and flatten to 25x2
index_points = np.linspace(-4., 4., 5, dtype=np.float64)
index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)
index_points = np.reshape(index_points, [-1, 2])
# ==> shape = [25, 2]
# Kernel with batch_shape [2, 4, 1, 3]
amplitude = np.array([1., 2.], np.float64).reshape([2, 1, 1, 1])
length_scale = np.array([.1, .2, .3, .4], np.float64).reshape(
[1, 4, 1, 1])
observation_noise_variance = np.array(
[1e-5, 1e-6, 1e-9], np.float64).reshape([1, 1, 1, 3])
observation_index_points = (
np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))
observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)
def cholesky_fn(x):
return tf.linalg.cholesky(
tf.linalg.set_diag(x, tf.linalg.diag_part(x) + 1.))
kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
stprm = tfd.StudentTProcessRegressionModel(
df=df,
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance,
cholesky_fn=cholesky_fn)
batch_shape = [2, 4, 1, 3]
event_shape = [25]
sample_shape = [7, 2]
print(stprm.batch_shape)
print(stprm.kernel.batch_shape)
print(stprm.kernel.schur_complement.batch_shape)
print(stprm.kernel.schur_complement.base_kernel.batch_shape)
self.assertIs(cholesky_fn, stprm.cholesky_fn)
samples = stprm.sample(sample_shape, seed=test_util.test_seed())
self.assertAllEqual(stprm.batch_shape_tensor(), batch_shape)
self.assertAllEqual(stprm.event_shape_tensor(), event_shape)
self.assertAllEqual(self.evaluate(samples).shape,
sample_shape + batch_shape + event_shape)
def testMeanSameAsGPRM(self):
df = np.float64(3.)
index_points = np.linspace(-4., 4., 5, dtype=np.float64)
index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)
index_points = np.reshape(index_points, [-1, 2])
# Kernel with batch_shape [5, 3]
amplitude = np.array([1., 2., 3., 4., 5.], np.float64).reshape([5, 1])
length_scale = np.array([.1, .2, .3], np.float64).reshape(
[1, 3])
observation_noise_variance = np.array(
[1e-5, 1e-6, 1e-9], np.float64).reshape([1, 3])
observation_index_points = (
np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))
observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)
kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
stprm = tfd.StudentTProcessRegressionModel(
df=df,
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance)
gprm = tfd.GaussianProcessRegressionModel(
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance)
self.assertAllClose(self.evaluate(stprm.mean()), self.evaluate(gprm.mean()))
def testLogProbNearGPRM(self):
# For large df, the log_prob calculations should be the same.
df = np.float64(1e6)
index_points = np.linspace(-4., 4., 5, dtype=np.float64)
index_points = np.stack(np.meshgrid(index_points, index_points), axis=-1)
index_points = np.reshape(index_points, [-1, 2])
# Kernel with batch_shape [5, 3]
amplitude = np.array([1., 2., 3., 4., 5.], np.float64).reshape([5, 1])
length_scale = np.array([.1, .2, .3], np.float64).reshape(
[1, 3])
observation_noise_variance = np.array(
[1e-5, 1e-6, 1e-9], np.float64).reshape([1, 3])
observation_index_points = (
np.random.uniform(-1., 1., (3, 7, 2)).astype(np.float64))
observations = np.random.uniform(-1., 1., (3, 7)).astype(np.float64)
kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
stprm = tfd.StudentTProcessRegressionModel(
df=df,
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance)
gprm = tfd.GaussianProcessRegressionModel(
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance)
x = np.linspace(-3., 3., 25)
self.assertAllClose(
self.evaluate(stprm.log_prob(x)),
self.evaluate(gprm.log_prob(x)), rtol=2e-5)
def testMeanVarianceAndCovariancePrecomputed(self):
amplitude = np.array([1., 2.], np.float64).reshape([2, 1])
length_scale = np.array([.1, .2, .3], np.float64).reshape([1, 3])
observation_noise_variance = np.array([1e-9], np.float64)
df = np.float64(3.)
observation_index_points = (
np.random.uniform(-1., 1., (1, 1, 7, 2)).astype(np.float64))
observations = np.random.uniform(-1., 1., (1, 1, 7)).astype(np.float64)
index_points = np.random.uniform(-1., 1., (6, 2)).astype(np.float64)
kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
stprm = tfd.StudentTProcessRegressionModel(
df=df,
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance,
validate_args=True)
precomputed_stprm = tfd.StudentTProcessRegressionModel.precompute_regression_model(
df=df,
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance,
validate_args=True)
self.assertAllClose(self.evaluate(precomputed_stprm.covariance()),
self.evaluate(stprm.covariance()))
self.assertAllClose(self.evaluate(precomputed_stprm.variance()),
self.evaluate(stprm.variance()))
self.assertAllClose(self.evaluate(precomputed_stprm.mean()),
self.evaluate(stprm.mean()))
@test_util.disable_test_for_backend(
disable_numpy=True, disable_jax=True,
reason='Numpy and JAX have no notion of CompositeTensor/saved_model')
def testPrecomputedCompositeTensor(self):
amplitude = np.array([1., 2.], np.float64).reshape([2, 1])
length_scale = np.array([.1, .2, .3], np.float64).reshape([1, 3])
observation_noise_variance = np.array([1e-9], np.float64)
observation_index_points = (
np.random.uniform(-1., 1., (1, 1, 7, 2)).astype(np.float64))
observations = np.random.uniform(-1., 1., (1, 1, 7)).astype(np.float64)
index_points = np.random.uniform(-1., 1., (6, 2)).astype(np.float64)
kernel = psd_kernels.ExponentiatedQuadratic(amplitude, length_scale)
precomputed_stprm = tfd.StudentTProcessRegressionModel.precompute_regression_model(
df=3.,
kernel=kernel,
index_points=index_points,
observation_index_points=observation_index_points,
observations=observations,
observation_noise_variance=observation_noise_variance,
validate_args=True)
flat = tf.nest.flatten(precomputed_stprm, expand_composites=True)
unflat = tf.nest.pack_sequence_as(
precomputed_stprm, flat, expand_composites=True)
self.assertIsInstance(unflat, tfd.StudentTProcessRegressionModel)
# Check that we don't recompute the divisor matrix on flattening /
# unflattening.
self.assertIs(
precomputed_stprm.kernel.schur_complement._precomputed_divisor_matrix_cholesky, # pylint:disable=line-too-long
unflat.kernel.schur_complement._precomputed_divisor_matrix_cholesky)
# TODO(b/196219597): Enable this test once STPRM works across TF function
# boundaries.
# index_observations = np.random.uniform(-1., 1., (6,)).astype(np.float64)
# @tf.function
# def log_prob(d):
# return d.log_prob(index_observations)
# lp = self.evaluate(precomputed_stprm.log_prob(index_observations))
# self.assertAllClose(lp, self.evaluate(log_prob(precomputed_stprm)))
# self.assertAllClose(lp, self.evaluate(log_prob(unflat)))
def testEmptyDataMatchesStPPrior(self):
df = np.float64(3.5)
amp = np.float64(.5)
len_scale = np.float64(.2)
index_points = np.random.uniform(-1., 1., (10, 1)).astype(np.float64)
# k_xx - k_xn @ (k_nn + sigma^2) @ k_nx + sigma^2
mean_fn = lambda x: x[:, 0]**2
kernel = psd_kernels.ExponentiatedQuadratic(amp, len_scale)
stp = tfd.StudentTProcess(
df,
kernel,
index_points,
mean_fn=mean_fn,
validate_args=True)
stprm_nones = tfd.StudentTProcessRegressionModel(
df,
kernel=kernel,
index_points=index_points,
mean_fn=mean_fn,
validate_args=True)
stprm_zero_shapes = tfd.StudentTProcessRegressionModel(
df,
kernel=kernel,
index_points=index_points,
observation_index_points=tf.ones([0, 1], tf.float64),
observations=tf.ones([0], tf.float64),
mean_fn=mean_fn,
validate_args=True)
for stprm in [stprm_nones, stprm_zero_shapes]:
self.assertAllClose(
self.evaluate(stp.mean()), self.evaluate(stprm.mean()))
self.assertAllClose(self.evaluate(stp.covariance()),
self.evaluate(stprm.covariance()))
self.assertAllClose(self.evaluate(stp.variance()),
self.evaluate(stprm.variance()))
observations = np.random.uniform(-1., 1., 10).astype(np.float64)
self.assertAllClose(self.evaluate(stp.log_prob(observations)),
self.evaluate(stprm.log_prob(observations)))
def testCopy(self):
# 5 random index points in R^2
index_points_1 = np.random.uniform(-4., 4., (5, 2)).astype(np.float32)
# 10 random index points in R^2
index_points_2 = np.random.uniform(-4., 4., (10, 2)).astype(np.float32)
observation_index_points_1 = (
np.random.uniform(-4., 4., (7, 2)).astype(np.float32))
observation_index_points_2 = (
np.random.uniform(-4., 4., (9, 2)).astype(np.float32))
observations_1 = np.random.uniform(-1., 1., 7).astype(np.float32)
observations_2 = np.random.uniform(-1., 1., 9).astype(np.float32)
# ==> shape = [6, 25, 2]
mean_fn = lambda x: np.array([0.], np.float32)
kernel_1 = psd_kernels.ExponentiatedQuadratic()
kernel_2 = psd_kernels.ExpSinSquared()
stprm1 = tfd.StudentTProcessRegressionModel(
df=5.,
kernel=kernel_1,
index_points=index_points_1,
observation_index_points=observation_index_points_1,
observations=observations_1,
mean_fn=mean_fn,
validate_args=True)
stprm2 = stprm1.copy(
kernel=kernel_2,
index_points=index_points_2,
observation_index_points=observation_index_points_2,
observations=observations_2)
precomputed_stprm1 = (
tfd.StudentTProcessRegressionModel.precompute_regression_model(
df=5.,
kernel=kernel_1,
index_points=index_points_1,
observation_index_points=observation_index_points_1,
observations=observations_1,
mean_fn=mean_fn,
validate_args=True))
precomputed_stprm2 = precomputed_stprm1.copy(index_points=index_points_2)
self.assertIs(precomputed_stprm1.mean_fn, precomputed_stprm2.mean_fn)
self.assertIs(precomputed_stprm1.kernel, precomputed_stprm2.kernel)
event_shape_1 = [5]
event_shape_2 = [10]
self.assertIsInstance(stprm1.kernel.schur_complement.base_kernel,
psd_kernels.ExponentiatedQuadratic)
self.assertIsInstance(stprm2.kernel.schur_complement.base_kernel,
psd_kernels.ExpSinSquared)
self.assertAllEqual(self.evaluate(stprm1.batch_shape_tensor()),
self.evaluate(stprm2.batch_shape_tensor()))
self.assertAllEqual(self.evaluate(stprm1.event_shape_tensor()),
event_shape_1)
self.assertAllEqual(self.evaluate(stprm2.event_shape_tensor()),
event_shape_2)
self.assertAllEqual(self.evaluate(stprm1.index_points), index_points_1)
self.assertAllEqual(self.evaluate(stprm2.index_points), index_points_2)
if __name__ == '__main__':
test_util.main()
|
{"hexsha": "f7188051fe659ac1411c3c3c3d773672836caf24", "size": 13881, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_probability/python/distributions/student_t_process_regression_model_test.py", "max_stars_repo_name": "jakee417/probability-1", "max_stars_repo_head_hexsha": "ae7117f37ac441bc7a888167ea23e5e620c5bcde", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3670, "max_stars_repo_stars_event_min_datetime": "2018-02-14T03:29:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:19:52.000Z", "max_issues_repo_path": "tensorflow_probability/python/distributions/student_t_process_regression_model_test.py", "max_issues_repo_name": "jakee417/probability-1", "max_issues_repo_head_hexsha": "ae7117f37ac441bc7a888167ea23e5e620c5bcde", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1395, "max_issues_repo_issues_event_min_datetime": "2018-02-24T02:28:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:12:06.000Z", "max_forks_repo_path": "tensorflow_probability/python/distributions/student_t_process_regression_model_test.py", "max_forks_repo_name": "jakee417/probability-1", "max_forks_repo_head_hexsha": "ae7117f37ac441bc7a888167ea23e5e620c5bcde", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1135, "max_forks_repo_forks_event_min_datetime": "2018-02-14T01:51:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T02:24:11.000Z", "avg_line_length": 40.4693877551, "max_line_length": 119, "alphanum_fraction": 0.6809307687, "include": true, "reason": "import numpy", "num_tokens": 3480}
|
include("train.jl")
using UnicodePlots
# get the data
# characters: [^0-9a-zA-Z&:,./()[]_-] THIS IS OLD
raw = []
open("case_names/data.txt") do f
line = 0
while !eof(f)
push!(raw, readline(f))
end # while
end # do
#println(raw[1:20])
#chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ -[]()0123456789"
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ -"
raw = uppercase.(raw)
binary_split = [[[0 #==1/(length(chars)*2)==# for _ in 1:(length(chars)+1)] for i = 1:length(x)+1] for x in raw]
for s = 1:length(binary_split)
for c = 1:length(raw[s])
binary_split[s][c][findfirst(raw[s][c], chars)] = 1
end # for
binary_split[s][end][end] = 1
end # for
input_chars = 3
# split the cases to serve as inputs for the RNN.
DMatrix = Vector{Float64}[]
Y = Vector{Float64}[]
for s in binary_split
for c = 1:length(s) -1
push!(DMatrix, [])
push!(Y, s[c+1])
if c < input_chars
for i = 1:(input_chars - c)
append!( DMatrix[end], zeros(length(chars)) )
end # for
end # if
if c != 0
for i = (max(c - input_chars + 1, 1)):c
append!( DMatrix[end], s[c][1:end-1] )
end # for
end # if
end # for
end # for
input_dim = (length(chars)) * input_chars
hidden_dim = (length(chars)+1) * 2
output_dim = (length(chars)+1)
dims = [hidden_dim for i in 1:3]
prepend!(dims, input_dim)
append!(dims, output_dim)
println(string("Dimensions: ", dims))
nn_results = train_network(dims, reverse(DMatrix)[1:500], reverse(Y)[1:500], chars, epochs=50, η = 0.001)
|
{"hexsha": "1c16d1fe27172b9bf2d9e74544d951790e515aa7", "size": 1595, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "case_names/main.jl", "max_stars_repo_name": "Squalm/NeuralNetworks", "max_stars_repo_head_hexsha": "913a43e419c768e3ff29baab96c1f2871bc5e5bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "case_names/main.jl", "max_issues_repo_name": "Squalm/NeuralNetworks", "max_issues_repo_head_hexsha": "913a43e419c768e3ff29baab96c1f2871bc5e5bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "case_names/main.jl", "max_forks_repo_name": "Squalm/NeuralNetworks", "max_forks_repo_head_hexsha": "913a43e419c768e3ff29baab96c1f2871bc5e5bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.1527777778, "max_line_length": 112, "alphanum_fraction": 0.5811912226, "num_tokens": 494}
|
#!/usr/bin/env python3
import cv2
import numpy as np
import depthai as dai
# Weights to use when blending depth/rgb image (should equal 1.0)
rgbWeight = 0.4
depthWeight = 0.6
def updateBlendWeights(percent_rgb):
"""
Update the rgb and depth weights used to blend depth/rgb image
@param[in] percent_rgb The rgb weight expressed as a percentage (0..100)
"""
global depthWeight
global rgbWeight
rgbWeight = float(percent_rgb)/100.0
depthWeight = 1.0 - rgbWeight
# Optional. If set (True), the ColorCamera is downscaled from 1080p to 720p.
# Otherwise (False), the aligned depth is automatically upscaled to 1080p
downscaleColor = True
fps = 30
# The disparity is computed at this resolution, then upscaled to RGB resolution
monoResolution = dai.MonoCameraProperties.SensorResolution.THE_720_P
# Create pipeline
pipeline = dai.Pipeline()
queueNames = []
# Define sources and outputs
camRgb = pipeline.create(dai.node.ColorCamera)
left = pipeline.create(dai.node.MonoCamera)
right = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
rgbOut = pipeline.create(dai.node.XLinkOut)
disparityOut = pipeline.create(dai.node.XLinkOut)
rgbOut.setStreamName("rgb")
queueNames.append("rgb")
disparityOut.setStreamName("disp")
queueNames.append("disp")
#Properties
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setFps(fps)
if downscaleColor: camRgb.setIspScale(2, 3)
# For now, RGB needs fixed focus to properly align with depth.
# This value was used during calibration
camRgb.initialControl.setManualFocus(130)
left.setResolution(monoResolution)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setFps(fps)
right.setResolution(monoResolution)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setFps(fps)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# LR-check is required for depth alignment
stereo.setLeftRightCheck(True)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
# Linking
camRgb.isp.link(rgbOut.input)
left.out.link(stereo.left)
right.out.link(stereo.right)
stereo.disparity.link(disparityOut.input)
# Connect to device and start pipeline
with dai.Device(pipeline) as device:
frameRgb = None
frameDisp = None
# Configure windows; trackbar adjusts blending ratio of rgb/depth
rgbWindowName = "rgb"
depthWindowName = "depth"
blendedWindowName = "rgb-depth"
cv2.namedWindow(rgbWindowName)
cv2.namedWindow(depthWindowName)
cv2.namedWindow(blendedWindowName)
cv2.createTrackbar('RGB Weight %', blendedWindowName, int(rgbWeight*100), 100, updateBlendWeights)
while True:
latestPacket = {}
latestPacket["rgb"] = None
latestPacket["disp"] = None
queueEvents = device.getQueueEvents(("rgb", "disp"))
for queueName in queueEvents:
packets = device.getOutputQueue(queueName).tryGetAll()
if len(packets) > 0:
latestPacket[queueName] = packets[-1]
if latestPacket["rgb"] is not None:
frameRgb = latestPacket["rgb"].getCvFrame()
cv2.imshow(rgbWindowName, frameRgb)
if latestPacket["disp"] is not None:
frameDisp = latestPacket["disp"].getFrame()
maxDisparity = stereo.initialConfig.getMaxDisparity()
# Optional, extend range 0..95 -> 0..255, for a better visualisation
if 1: frameDisp = (frameDisp * 255. / maxDisparity).astype(np.uint8)
# Optional, apply false colorization
if 1: frameDisp = cv2.applyColorMap(frameDisp, cv2.COLORMAP_HOT)
frameDisp = np.ascontiguousarray(frameDisp)
cv2.imshow(depthWindowName, frameDisp)
# Blend when both received
if frameRgb is not None and frameDisp is not None:
# Need to have both frames in BGR format before blending
if len(frameDisp.shape) < 3:
frameDisp = cv2.cvtColor(frameDisp, cv2.COLOR_GRAY2BGR)
blended = cv2.addWeighted(frameRgb, rgbWeight, frameDisp, depthWeight, 0)
cv2.imshow(blendedWindowName, blended)
frameRgb = None
frameDisp = None
if cv2.waitKey(1) == ord('q'):
break
|
{"hexsha": "37fb67cde274c3fafcced47adf57af4997c801d3", "size": 4309, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/StereoDepth/rgb_depth_aligned.py", "max_stars_repo_name": "MambaWong/depthai-python-1", "max_stars_repo_head_hexsha": "0d15abd77fd82b4a70e096ea5bb99237a17c9862", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-03-10T15:10:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-01T22:58:04.000Z", "max_issues_repo_path": "examples/StereoDepth/rgb_depth_aligned.py", "max_issues_repo_name": "MambaWong/depthai-python-1", "max_issues_repo_head_hexsha": "0d15abd77fd82b4a70e096ea5bb99237a17c9862", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-03-11T20:42:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-10T11:53:49.000Z", "max_forks_repo_path": "examples/StereoDepth/rgb_depth_aligned.py", "max_forks_repo_name": "MambaWong/depthai-python-1", "max_forks_repo_head_hexsha": "0d15abd77fd82b4a70e096ea5bb99237a17c9862", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-23T19:20:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-12T00:20:34.000Z", "avg_line_length": 33.6640625, "max_line_length": 102, "alphanum_fraction": 0.7150150847, "include": true, "reason": "import numpy", "num_tokens": 1078}
|
# Copyright (c) 2012, Nicolo Fusi
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import unittest
import numpy as np
import GPy
from ..models import BayesianGPLVM
class BGPLVMTests(unittest.TestCase):
def test_bias_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
Y -= Y.mean(axis=0)
k = GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
def test_linear_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
Y -= Y.mean(axis=0)
k = GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
def test_rbf_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
Y -= Y.mean(axis=0)
k = GPy.kern.rbf(input_dim) + GPy.kern.white(input_dim, 0.00001)
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
def test_rbf_bias_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
Y -= Y.mean(axis=0)
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
def test_rbf_line_kern(self):
N, num_inducing, input_dim, D = 10, 3, 2, 4
X = np.random.rand(N, input_dim)
k = GPy.kern.rbf(input_dim) + GPy.kern.linear(input_dim) + GPy.kern.white(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
Y -= Y.mean(axis=0)
k = GPy.kern.rbf(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
def test_linear_bias_kern(self):
N, num_inducing, input_dim, D = 30, 5, 4, 30
X = np.random.rand(N, input_dim)
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
K = k.K(X)
Y = np.random.multivariate_normal(np.zeros(N),K,input_dim).T
Y -= Y.mean(axis=0)
k = GPy.kern.linear(input_dim) + GPy.kern.bias(input_dim) + GPy.kern.white(input_dim, 0.00001)
m = BayesianGPLVM(Y, input_dim, kernel=k, num_inducing=num_inducing)
m.randomize()
self.assertTrue(m.checkgrad())
if __name__ == "__main__":
print "Running unit tests, please be (very) patient..."
unittest.main()
|
{"hexsha": "1192448a9772dc394bff01dee62e3333b7ef8bc3", "size": 3661, "ext": "py", "lang": "Python", "max_stars_repo_path": "GPy/testing/bgplvm_tests.py", "max_stars_repo_name": "rokroskar/GPy", "max_stars_repo_head_hexsha": "0f8dbba56d480902c86cfe8bad9e79d9eabae009", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-08-04T21:28:11.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-04T21:28:11.000Z", "max_issues_repo_path": "GPy/testing/bgplvm_tests.py", "max_issues_repo_name": "rokroskar/GPy", "max_issues_repo_head_hexsha": "0f8dbba56d480902c86cfe8bad9e79d9eabae009", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GPy/testing/bgplvm_tests.py", "max_forks_repo_name": "rokroskar/GPy", "max_forks_repo_head_hexsha": "0f8dbba56d480902c86cfe8bad9e79d9eabae009", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5697674419, "max_line_length": 103, "alphanum_fraction": 0.6255121551, "include": true, "reason": "import numpy", "num_tokens": 1126}
|
using OrderedBinning
using Test
@testset "non-strinct, zero tolerance" begin
boundaries = 0:3
ob = ordered_bins(boundaries; strict = false)
@test ob(-1) == 0
@test ob(0) == 1
@test ob(0.5) == 1
@test ob(3) == 3
@test ob(4) == 4
for _ in 1:100
x = rand(Bool) ? rand(0:3) : rand() * 3.0
i = ob(x)
@test boundaries[i] ≤ x ≤ boundaries[i + 1]
if x < 3
@test x < boundaries[i + 1]
end
end
end
@testset "strint, 0.5 tolerance" begin
boundaries = 0:3
ob = ordered_bins(boundaries; strict = true, tolerance = 0.5)
@test_throws DomainError ob(-1)
@test ob(0) == 1
@test ob(0.5) == 1
@test ob(3) == 3
@test ob(3.5) == 3
@test_throws DomainError ob(4)
for _ in 1:100
x = rand(Bool) ? rand(0:3) : rand() * 3.0
i = ob(x)
@test boundaries[i] ≤ x ≤ boundaries[i + 1]
if x < 3
@test x < boundaries[i + 1]
end
end
end
|
{"hexsha": "9960db7358fcc2b6c3a0e6f0bc10ea628b8b76f7", "size": 983, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "tpapp/OrderedBinning.jl", "max_stars_repo_head_hexsha": "517821fdfa122e7952f2725e652c44e996900a06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "tpapp/OrderedBinning.jl", "max_issues_repo_head_hexsha": "517821fdfa122e7952f2725e652c44e996900a06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "tpapp/OrderedBinning.jl", "max_forks_repo_head_hexsha": "517821fdfa122e7952f2725e652c44e996900a06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.575, "max_line_length": 65, "alphanum_fraction": 0.515768057, "num_tokens": 368}
|
[STATEMENT]
lemma atom_in_atom_image [simp]: "atom j \<in> atom ` V \<longleftrightarrow> j \<in> V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (atom j \<in> atom ` V) = (j \<in> V)
[PROOF STEP]
by auto
|
{"llama_tokens": 89, "file": "Incompleteness_Pseudo_Coding", "length": 1}
|
type Packet
ptr::Ptr{Void}
function Packet(ptr::Ptr{Void})
p = new(ptr)
finalizer(p, destroy)
p
end
end
function Packet()
Packet(ccall((:sfPacket_create, libcsfml_network), Ptr{Void}, ()))
end
function copy(packet::Packet)
return Packet(ccall((:sfPacket_copy, libcsfml_network), Ptr{Void}, (Ptr{Void},), packet.ptr))
end
function destroy(packet::Packet)
ccall((:sfPacket_destroy, libcsfml_network), Void, (Ptr{Void},), packet.ptr)
end
function clear(packet::Packet)
ccall((:sfPacket_clear, libcsfml_network), Void, (Ptr{Void},), packet.ptr)
end
function get_data_size(packet::Packet)
return ccall((:sfPacket_getDataSize, libcsfml_network), Int64, (Ptr{Void},), packet.ptr)
end
function read_bool(packet::Packet)
return Bool(ccall((:sfPacket_readBool, libcsfml_network), UInt8, (Ptr{Void},), packet.ptr))
end
function read_string(packet::Packet)
str = ""
str = bytestring(ccall((:sjPacket_readString, "libjuliasfml"), Ptr{Cchar}, (Ptr{Void}, Ptr{Cchar},), packet.ptr, str))
return str
end
function read_int(packet::Packet)
return Int(ccall((:sfPacket_readInt32, libcsfml_network), Int32, (Ptr{Void},), packet.ptr))
end
function read_float(packet::Packet)
return ccall((:sfPacket_readFloat, libcsfml_network), Cfloat, (Ptr{Void},), packet.ptr)
end
function read_double(packet::Packet)
return ccall((:sfPacket_readDouble, libcsfml_network), Cdouble, (Ptr{Void},), packet.ptr)
end
function write(packet::Packet, value::Bool)
ccall((:sfPacket_writeBool, libcsfml_network), Void, (Ptr{Void}, Int32,), packet.ptr, value)
end
function write(packet::Packet, val::Integer)
ccall((:sfPacket_writeInt32, libcsfml_network), Void, (Ptr{Void}, Int32,), packet.ptr, val)
end
function write(packet::Packet, val::Cfloat)
ccall((:sfPacket_writeFloat, libcsfml_network), Void, (Ptr{Void}, Cfloat,), packet.ptr, val)
end
function write(packet::Packet, val::Cdouble)
ccall((:sfPacket_writeDouble, libcsfml_network), Void, (Ptr{Void}, Cdouble,), packet.ptr, val)
end
function write(packet::Packet, string::AbstractString)
ccall((:sfPacket_writeString, libcsfml_network), Void, (Ptr{Void}, Ptr{Cchar},), packet.ptr, string)
end
|
{"hexsha": "e655d69d418a27c2926c12e8a6828a2661381550", "size": 2225, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/julia/Network/packet.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/SFML.jl-d50d9232-525f-5d35-8703-6ae49672cafe", "max_stars_repo_head_hexsha": "368097db31544432de82b92c40e10080b5f8eea1", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 99, "max_stars_repo_stars_event_min_datetime": "2015-03-27T22:45:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T21:19:24.000Z", "max_issues_repo_path": "src/julia/Network/packet.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/SFML.jl-d50d9232-525f-5d35-8703-6ae49672cafe", "max_issues_repo_head_hexsha": "368097db31544432de82b92c40e10080b5f8eea1", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2015-04-28T03:23:58.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-10T09:30:10.000Z", "max_forks_repo_path": "src/julia/Network/packet.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/SFML.jl-d50d9232-525f-5d35-8703-6ae49672cafe", "max_forks_repo_head_hexsha": "368097db31544432de82b92c40e10080b5f8eea1", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2015-06-03T12:30:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T23:23:52.000Z", "avg_line_length": 30.4794520548, "max_line_length": 122, "alphanum_fraction": 0.7168539326, "num_tokens": 651}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.