text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import sys
sys.path.append('..')
import os
import glob
from tqdm import tqdm
import time
import shutil
import json
import numpy as np
from converter.nii_reader import Nii_Reader
from converter.utils import save_as_hdf5
# Different samples are saved in different folder
def nii_to_hdf5(input_path, save_path, annotation_list, target_format, resample=True):
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
shutil.rmtree(save_path)
os.makedirs(save_path)
image_path = os.path.join(input_path,'MR')
label_path = os.path.join(input_path,'Mask')
start = time.time()
for item in tqdm(os.scandir(image_path)):
lab_path = os.path.join(label_path,'mask_' + item.name.lower())
try:
reader = Nii_Reader(item.path, target_format, lab_path, annotation_list, trunc_flag=False, normalize_flag=False)
except:
print("Error data: %s" % item.name.split('.')[0])
continue
else:
if resample:
images = reader.get_resample_images().astype(np.int16)
labels = reader.get_resample_labels().astype(np.uint8)
else:
images = reader.get_raw_images().astype(np.int16)
labels = reader.get_raw_labels().astype(np.uint8)
hdf5_path = os.path.join(save_path, item.name.split('.')[0] + '.hdf5')
save_as_hdf5(images, hdf5_path, 'image')
save_as_hdf5(labels, hdf5_path, 'label')
print("run time: %.3f" % (time.time() - start))
if __name__ == "__main__":
json_file = './static_files/spine.json'
with open(json_file, 'r') as fp:
info = json.load(fp)
nii_to_hdf5(info['nii_path'], info['npy_path'], info['annotation_list'], info['target_format'],resample=False)
|
{"hexsha": "c06a77801affadd6428b77e55122b87d17ba6344", "size": 1818, "ext": "py", "lang": "Python", "max_stars_repo_path": "converter/nii2npy.py", "max_stars_repo_name": "shijun18/Spine_Seg", "max_stars_repo_head_hexsha": "90c41d8ee08235c43bd3a5236da5a0ee7066fced", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-07-26T08:01:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T12:40:30.000Z", "max_issues_repo_path": "converter/nii2npy.py", "max_issues_repo_name": "shijun18/Spine_Seg", "max_issues_repo_head_hexsha": "90c41d8ee08235c43bd3a5236da5a0ee7066fced", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "converter/nii2npy.py", "max_forks_repo_name": "shijun18/Spine_Seg", "max_forks_repo_head_hexsha": "90c41d8ee08235c43bd3a5236da5a0ee7066fced", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8947368421, "max_line_length": 124, "alphanum_fraction": 0.6430143014, "include": true, "reason": "import numpy", "num_tokens": 437}
|
\section{Program gto\char`_fasta\char`_rand\char`_extra\char`_chars}
The \texttt{gto\char`_fasta\char`_rand\char`_extra\char`_chars} substitutes in the DNA sequence the outside ACGT chars by random ACGT symbols. It works both in FASTA and Multi-FASTA file formats.\\
For help type:
\begin{lstlisting}
./gto_fasta_rand_extra_chars -h
\end{lstlisting}
In the following subsections, we explain the input and output parameters.
\subsection*{Input parameters}
The \texttt{gto\char`_fasta\char`_rand\char`_extra\char`_chars} program needs two streams for the computation, namely the input and output standard. The input stream is a FASTA or Multi-FASTA file.\\
The attribution is given according to:
\begin{lstlisting}
Usage: ./gto_fasta_rand_extra_chars [options] [[--] args]
or: ./gto_fasta_rand_extra_chars [options]
It substitues in the DNA sequence the outside ACGT chars by random ACGT symbols.
It works both in FASTA and Multi-FASTA file formats
-h, --help show this help message and exit
Basic options
< input.fasta Input FASTA or Multi-FASTA file format (stdin)
> output.fasta Output FASTA or Multi-FASTA file format (stdout)
Example: ./gto_fasta_rand_extra_chars < input.mfasta > output.mfasta
\end{lstlisting}
An example of such an input file is:
\begin{lstlisting}
>AB000264 |acc=AB000264|descr=Homo sapiens mRNA
ANAAGACGGCCTCCTGCTGCTGCTGCTCTCCGGGGCCACGNCCCTGGAGGGTCCNCCGCTGCCCTGCTGCCATTGNCNCC
NGCCCCACCTAAGGAAAAGCAGCCTCCTGACTTTCCTCGCTTGGGCCGAGACAGCGAGCATATGCNGGAAGCGGCAGGAA
GNGGTTTGAGTGGACCTCCNGGCCCCTCATAGGAGAGGAAGCNNGGGAGGTGGCCAGGCGGCAGGAAGCAGGCCAGTGNC
GCGAATCCGNGCGCCGGGACAGAATCTCCTGCAAAGCCCTGCAGGAACTTCTTCTGGAAGACCTTCTCCACCCCCCCNNN
TAAANNNTCACCCATGAATGCTCACGCAANTTTAATTACAGACCTGAA
>AB000263 |acc=AB000263|descr=Homo sapiens mRNA
GCGAATCCGNGCGCCGGGACAGAATCTCCTTCTCCACCCCCCCNNNTGCAAAGCCCTGCAGGAACTTCTTCTGGAAGACC
NGCCCCACCTAAGGAAAAGCAGCCTCCAGGAACTGACTTTCCTCGCTTGGGCCGAGACAGCGAGCATATGCNGGAAGCGG
ANAAGACGGCCTCCTGCTGCTGCTGCTCTCCGGGGCCACGNCCCTGGCNCCAGGGTCCNCCGCTGCCCTGCTGCCATTGN
GAGGAAGCNNGGGAGGTGGCCAGGCGGCAGGAAGCAGGCCAGTGNCGNGGTTTGAGTGGACCTCCNGGCCCCTCATAGGA
TCACGCAANTTTAATTACAGACCTGAATAAANNNTCACCCATGAATGC
\end{lstlisting}
\subsection*{Output}
The output of the \texttt{gto\char`_fasta\char`_rand\char`_extra\char`_chars} program is a FASTA or Multi-FASTA file.\\
Using the input above, an output example of this is:
\begin{lstlisting}
>AB000264 |acc=AB000264|descr=Homo sapiens mRNA
ATAAGACGGCCTCCTGCTGCTGCTGCTCTCCGGGGCCACGGCCCTGGAGGGTCCCCCGCTGCCCTGCTGCCATTGTCCCC
TGCCCCACCTAAGGAAAAGCAGCCTCCTGACTTTCCTCGCTTGGGCCGAGACAGCGAGCATATGCGGGAAGCGGCAGGAA
GAGGTTTGAGTGGACCTCCCGGCCCCTCATAGGAGAGGAAGCCGGGGAGGTGGCCAGGCGGCAGGAAGCAGGCCAGTGTC
GCGAATCCGGGCGCCGGGACAGAATCTCCTGCAAAGCCCTGCAGGAACTTCTTCTGGAAGACCTTCTCCACCCCCCCTTG
TAAAAGATCACCCATGAATGCTCACGCAAATTTAATTACAGACCTGAA
>AB000263 |acc=AB000263|descr=Homo sapiens mRNA
GCGAATCCGTGCGCCGGGACAGAATCTCCTTCTCCACCCCCCCATCTGCAAAGCCCTGCAGGAACTTCTTCTGGAAGACC
GGCCCCACCTAAGGAAAAGCAGCCTCCAGGAACTGACTTTCCTCGCTTGGGCCGAGACAGCGAGCATATGCGGGAAGCGG
AGAAGACGGCCTCCTGCTGCTGCTGCTCTCCGGGGCCACGTCCCTGGCTCCAGGGTCCTCCGCTGCCCTGCTGCCATTGC
GAGGAAGCGGGGGAGGTGGCCAGGCGGCAGGAAGCAGGCCAGTGGCGCGGTTTGAGTGGACCTCCTGGCCCCTCATAGGA
TCACGCAACTTTAATTACAGACCTGAATAAAATGTCACCCATGAATGC
\end{lstlisting}
|
{"hexsha": "47bad20f4fcd9b3e5191b270e3f7b513b3ce385f", "size": 3254, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "manual/sections/FASTA_tools/FastaRandExtraChars.tex", "max_stars_repo_name": "olgafajarda/gto", "max_stars_repo_head_hexsha": "c0345c3f902eaab4811f5ceff6b24b569dbdd080", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "manual/sections/FASTA_tools/FastaRandExtraChars.tex", "max_issues_repo_name": "olgafajarda/gto", "max_issues_repo_head_hexsha": "c0345c3f902eaab4811f5ceff6b24b569dbdd080", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manual/sections/FASTA_tools/FastaRandExtraChars.tex", "max_forks_repo_name": "olgafajarda/gto", "max_forks_repo_head_hexsha": "c0345c3f902eaab4811f5ceff6b24b569dbdd080", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.3442622951, "max_line_length": 199, "alphanum_fraction": 0.8690842041, "num_tokens": 1323}
|
import scipy
from py2scad import *
class Capillary_Enclosure(Basic_Enclosure):
def __init__(self,params):
self.params = params
self.add_sensor_cutout()
self.add_capillary_holes()
self.add_guide_tap_holes()
self.add_led_tap_holes()
self.add_led_cable_hole()
super(Capillary_Enclosure,self).__init__(self.params)
def make(self):
super(Capillary_Enclosure,self).make()
self.make_sensor()
self.make_capillary()
self.make_guide_plates()
self.make_led_pcb()
self.make_diffuser()
self.make_led_standoffs()
self.make_capillary_clamp_thru_holes()
self.make_capillary_clamp()
def get_assembly(self,**kwargs):
"""
Get enclosure assembly
"""
try:
show_sensor = kwargs['show_sensor']
except KeyError:
show_sensor = True
try:
show_capillary = kwargs['show_capillary']
except KeyError:
show_capillary = True
try:
show_guide_plates = kwargs['show_guide_plates']
except KeyError:
show_guide_plates = True
try:
show_guide_top = kwargs['show_guide_top']
except KeyError:
show_guide_top = True
try:
show_led_pcb = kwargs['show_led_pcb']
except KeyError:
show_led_pcb = True
try:
show_diffuser = kwargs['show_diffuser']
except KeyError:
show_diffuser = True
try:
show_diffuser_standoffs = kwargs['show_diffuser_standoffs']
except KeyError:
show_diffuser_standoffs = True
try:
explode = kwargs['explode']
except KeyError:
explode = (0,0,0)
try:
show_clamp = kwargs['show_clamp']
except KeyError:
show_clamp = True
explode_x, explode_y, explode_z = explode
parts_list = super(Capillary_Enclosure,self).get_assembly(**kwargs)
x,y,z = self.params['inner_dimensions']
wall_thickness = self.params['wall_thickness']
# Add sensor
sensor_x, sensor_y, sensor_z = self.params['sensor_dimensions']
sensor = self.sensor
z_shift = -0.5*z-0.5*sensor_z - explode_z
sensor = Translate(sensor,v=(0,0,z_shift))
sensor = Color(sensor,rgba=(0.5,0.5,0.5))
if show_sensor:
parts_list.append(sensor)
# Add capillary
cap_offset_x = self.params['capillary_hole_offset']
cap_hole_diam = self.params['capillary_diam']
y_shift = cap_offset_x
z_shift = -0.5*z + 0.5*cap_hole_diam - explode_z
capillary = self.capillary
capillary = Translate(self.capillary,v=(0,y_shift,z_shift))
if show_capillary:
parts_list.append(capillary)
# Add guide plate
guide_x, guide_y, guide_z = self.params['guide_plate_dimensions']
y_shift = 0.5*guide_y + 0.5*self.params['capillary_diam'] + cap_offset_x
z_shift = -0.5*z + 0.5*guide_z
guide_plate_pos = Translate(self.guide_plate_pos,v=[0,y_shift,z_shift])
y_shift = -0.5*guide_y - 0.5*self.params['capillary_diam'] + cap_offset_x
guide_plate_neg = Translate(self.guide_plate_neg,v=[0,y_shift,z_shift])
y_shift = cap_offset_x
z_shift = -0.5*z + 1.5*guide_z
guide_plate_top = Translate(self.guide_plate_top,v=[0,y_shift,z_shift])
if show_guide_plates:
parts_list.extend([guide_plate_pos,guide_plate_neg])
if show_guide_top:
parts_list.extend([guide_plate_top])
# Add led pcb
pcb_x, pcb_y, pcb_z = self.params['led_pcb_dimensions']
z_shift = 0.5*z - 0.5*pcb_z
led_pcb = Translate(self.led_pcb,v=(0,0,z_shift))
if show_led_pcb:
parts_list.append(led_pcb)
# Add diffuser
diff_x, diff_y, diff_z = self.params['diffuser_dimensions']
diffuser_standoff_height = self.params['diffuser_standoff_height']
z_shift = 0.5*z - pcb_z - 0.5*diff_z - diffuser_standoff_height
diffuser = Translate(self.diffuser,v=(0,0,z_shift))
if show_diffuser:
parts_list.append(diffuser)
# Add diffuser standoffs
led_hole_tuples = self.get_led_holes()
z_shift = 0.5*z - pcb_z- 0.5*self.params['diffuser_standoff_height']
for x_shift,y_shift, dummy in led_hole_tuples:
if x_shift < 0:
standoff = self.diffuser_standoff_neg
else:
standoff = self.diffuser_standoff_pos
standoff = Translate(standoff,v=(x_shift,y_shift,z_shift))
if show_diffuser_standoffs:
parts_list.append(standoff)
# Add capillary clamp
bottom_x_overhang = self.params['bottom_x_overhang']
clamp_x, clamp_y, clamp_z = self.clamp_size
x_shift = 0.5*self.bottom_x - 0.5*bottom_x_overhang
z_shift = -0.5*z + 0.5*wall_thickness + cap_hole_diam
capillary_clamp = Translate(self.capillary_clamp,v=(x_shift,0,z_shift))
if show_clamp:
parts_list.append(capillary_clamp)
return parts_list
def get_box_projection(self,show_ref_cube=True, spacing_factor=4):
"""
Get 2D projected layout of parts for laser cutting.
"""
parts_list = super(Capillary_Enclosure,self).get_projection(show_ref_cube,spacing_factor)
# Add capillary clamp
thickness = self.params['wall_thickness']
clamp_x, clamp_y, clamp_z = self.clamp_size
x_shift = 0.5*self.bottom_x + 0.5*clamp_x + spacing_factor*thickness
y_shift = 0.5*self.bottom_y + 0.5*clamp_y + spacing_factor*thickness
clamp = Translate(self.capillary_clamp,v=(x_shift,y_shift,0))
parts_list.append(Projection(clamp))
return parts_list
def get_guide_side_projection(self,show_ref_cube=True,spacing_factor=2):
"""
Get 2D projected layout of the two side guide plates for laser cutting.
"""
parts_list = []
guide_x, guide_y, guide_z = self.params['guide_plate_dimensions']
thickness = self.params['wall_thickness']
# Add the side guide plates
y_shift = 0.5*guide_y + 0.5*spacing_factor*thickness
guide_plate_pos = Translate(self.guide_plate_pos,v=(0,y_shift,0))
guide_plate_pos = Projection(guide_plate_pos)
parts_list.append(guide_plate_pos)
guide_plate_neg = Translate(self.guide_plate_neg,v=(0,-y_shift,0))
guide_plate_neg = Projection(guide_plate_neg)
parts_list.append(guide_plate_neg)
# Add reference cube
ref_cube = Cube(size=(INCH2MM, INCH2MM, INCH2MM))
x_shift = 0.5*guide_x + 0.5*INCH2MM + spacing_factor*thickness
ref_cube = Translate(ref_cube,v=(x_shift,0,0))
ref_cube = Projection(ref_cube)
if show_ref_cube:
parts_list.append(ref_cube)
return parts_list
def get_guide_top_projection(self,show_ref_cube=True,spacing_factor=2):
"""
Get 2D projected layout of the top guide plate for laser cutting.
"""
parts_list = []
top_x, top_y, top_z = self.get_guide_plate_top_dim()
thickness = self.params['wall_thickness']
# Add top guide plate
guide_plate_top = Projection(self.guide_plate_top)
parts_list.append(guide_plate_top)
# Add reference cube
ref_cube = Cube(size=(INCH2MM, INCH2MM, INCH2MM))
x_shift = 0.5*top_x + 0.5*INCH2MM + spacing_factor*thickness
ref_cube = Translate(ref_cube,v=(x_shift,0,0))
ref_cube = Projection(ref_cube)
if show_ref_cube:
parts_list.append(ref_cube)
return parts_list
def get_diffuser_projection(self,show_ref_cube=True,spacing_factor=2):
"""
Get 2D projected layout of the diffuser for laser cutting.
"""
parts_list = []
diff_x, diff_y, diff_z = self.params['diffuser_dimensions']
thickness = self.params['wall_thickness']
# Add diffuser
diffuser = Projection(self.diffuser)
parts_list.append(diffuser)
# Add reference cube
ref_cube = Cube(size=(INCH2MM, INCH2MM, INCH2MM))
x_shift = 0.5*diff_x + 0.5*INCH2MM + spacing_factor*thickness
ref_cube = Translate(ref_cube,v=(x_shift,0,0))
ref_cube = Projection(ref_cube)
if show_ref_cube:
parts_list.append(ref_cube)
return parts_list
def add_capillary_holes(self):
"""
Add holes for capillary positioning
"""
hole_x, hole_y, hole_r = self.params['capillary_hole_size']
hole_y = 2*hole_y
hole_offset_x = self.params['capillary_hole_offset']
x,y,z = self.params['inner_dimensions']
panel_list= ('left', 'right')
hole_list = []
for panel in panel_list:
pos_x = hole_offset_x
pos_y = -0.5*z
hole = {
'panel' : panel,
'type' : 'rounded_square',
'location' : (pos_x, pos_y),
'size' : (hole_x, hole_y, hole_r),
}
hole_list.append(hole)
self.params['hole_list'].extend(hole_list)
def add_sensor_cutout(self):
"""
Add cutout for sensor
"""
hole_list = []
sensor_width = self.params['sensor_width']
sensor_length = self.params['sensor_length']
hole_offset = self.params['sensor_hole_offset']
x_pos = 0
# WBD
#y_pos = -hole_offset;
y_pos = 0
hole = {
'panel' : 'bottom',
'type' : 'square',
'location' : (x_pos, y_pos),
'size' : (sensor_length, sensor_width),
}
hole_list.append(hole)
self.params['hole_list'].extend(hole_list)
def add_sensor_cable_hole(self):
"""
Add cable hole for sensor.
"""
hole_list = []
hole_width = self.params['sensor_cable_hole_width']
x,y,z = self.params['inner_dimensions']
x_pos = 0
y_pos = -0.5*z + 0.5*hole_width
hole = {
'panel' : 'front',
'type' : 'round',
'location' : (x_pos, y_pos),
'size' : hole_width,
}
hole_list.append(hole)
x_pos = 0
y_pos = -0.5*z
hole = {
'panel' : 'front',
'type' : 'square',
'location' : (x_pos, y_pos),
'size' : (hole_width, hole_width),
}
hole_list.append(hole)
self.params['hole_list'].extend(hole_list)
def make_sensor(self):
sensor_x, sensor_y, sensor_z = self.params['sensor_dimensions']
hole_offset = self.params['sensor_hole_offset']
hole_diam = self.params['sensor_mount_hole_diam']
hole_space = self.params['sensor_mount_hole_space']
# Create hole list
hole_list = []
for i in (-1,1):
x_pos = i*0.5*hole_space
y_pos = hole_offset
hole = (x_pos, y_pos,hole_diam)
hole_list.append(hole)
# Create sensor
sensor = plate_w_holes(sensor_x, sensor_y, sensor_z, hole_list)
# WBD
#self.sensor = Translate(sensor,v=(0,-hole_offset,0))
self.sensor = Translate(sensor,v=(0,0,0))
def make_capillary(self):
diameter = self.params['capillary_diam']
length = self.params['capillary_length']
r = 0.5*diameter
capillary = Cylinder(h=length,r1=r,r2=r)
capillary = Rotate(capillary, a=90, v=(0,1,0))
self.capillary = capillary
def make_guide_plates(self):
guide_x, guide_y, guide_z = self.params['guide_plate_dimensions']
hole_diam = self.params['guide_thru_hole_diam']
hole_offset = self.params['guide_hole_offset']
# Create pos and neg guide plates
hole_list_pos = []
hole_list_neg = []
for i in (-1,1):
x_pos = i*(0.5*guide_x - hole_offset)
y_pos = 0.5*guide_y - hole_offset
hole_pos = (x_pos, y_pos, hole_diam)
hole_neg = (x_pos, -y_pos, hole_diam)
hole_list_pos.append(hole_pos)
hole_list_neg.append(hole_neg)
self.guide_plate_pos = plate_w_holes(guide_x, guide_y, guide_z, holes=hole_list_pos)
self.guide_plate_neg = plate_w_holes(guide_x, guide_y, guide_z, holes=hole_list_neg)
# Create top guide plate
top_x, top_y, top_z = self.get_guide_plate_top_dim()
hole_list_top = self.get_guide_plate_holes(hole_type='through')
self.guide_plate_top = plate_w_holes(top_x,top_y,top_z,holes=hole_list_top)
def add_guide_tap_holes(self):
hole_tuples = self.get_guide_plate_holes(hole_type='tap')
hole_offset = self.params['capillary_hole_offset']
hole_list = []
for x,y,diam in hole_tuples:
hole = {
'panel' : 'bottom',
'type' : 'round',
'location' : (x,y+hole_offset),
'size' : diam,
}
hole_list.append(hole)
self.params['hole_list'].extend(hole_list)
def get_guide_plate_holes(self,hole_type='through'):
guide_x, guide_y, guide_z = self.params['guide_plate_dimensions']
hole_offset = self.params['guide_hole_offset']
if hole_type == 'through':
hole_diam = self.params['guide_thru_hole_diam']
else:
hole_diam = self.params['guide_tap_hole_diam']
hole_list = []
top_x, top_y, top_z = self.get_guide_plate_top_dim()
for i in (-1,1):
for j in (-1,1):
x_pos = i*(0.5*top_x - hole_offset)
y_pos = j*(0.5*top_y - hole_offset)
hole = (x_pos, y_pos, hole_diam)
hole_list.append(hole)
return hole_list
def get_guide_plate_top_dim(self):
guide_x, guide_y, guide_z = self.params['guide_plate_dimensions']
top_x = guide_x
top_y = 2*guide_y + self.params['capillary_diam']
top_z = guide_z
return top_x, top_y, top_z
def make_led_pcb(self):
led_x, led_y, led_z = self.params['led_pcb_dimensions']
hole_list = self.get_led_holes(hole_type='through')
#print hole_list
self.led_pcb = plate_w_holes(led_x, led_y, led_z, holes=hole_list)
def make_diffuser(self):
diff_x, diff_y, diff_z = self.params['diffuser_dimensions']
hole_list = self.get_led_holes(hole_type='through')
self.diffuser = plate_w_holes(diff_x, diff_y, diff_z, holes=hole_list)
def add_led_tap_holes(self):
hole_tuples = self.get_led_holes(hole_type='tap')
hole_list = []
for x,y,diam in hole_tuples:
hole = {
'panel' : 'top',
'type' : 'round',
'location' : (x,y),
'size' : diam,
}
hole_list.append(hole)
self.params['hole_list'].extend(hole_list)
def get_led_holes(self, hole_type='through'):
led_x, led_y, led_z = self.params['led_pcb_dimensions']
hole_offset = self.params['led_pcb_hole_offset']
if hole_type == 'through':
diam = self.params['led_pcb_thru_hole_diam']
else:
diam = self.params['led_pcb_tap_hole_diam']
hole_list = []
for i in (-1,1):
x_pos = i*(0.5*led_x - hole_offset)
y_pos = 0.5*led_y - hole_offset
hole = (x_pos, y_pos, diam)
hole_list.append(hole)
return hole_list
def make_led_standoffs(self):
height = self.params['diffuser_standoff_height']
diam = self.params['diffuser_standoff_diam']
radius = 0.5*diam
self.diffuser_standoff_pos = Cylinder(h=height,r1=radius,r2=radius)
self.diffuser_standoff_neg = Cylinder(h=height,r1=radius,r2=radius)
def add_led_cable_hole(self):
hole_size_x, hole_size_y = self.params['led_cable_hole_size']
hole_pos_x, hole_pos_y = self.params['led_cable_hole_pos']
#print hole_pos_x, hole_pos_y
hole = {
'panel' : 'bottom',
'type' : 'square',
'location' : (hole_pos_x, hole_pos_y),
'size' : (hole_size_x, hole_size_y),
}
self.params['hole_list'].append(hole)
def make_capillary_clamp_thru_holes(self):
inner_x, inner_y, inner_z = self.params['inner_dimensions']
wall_thickness = self.params['wall_thickness']
bottom_x_overhang = self.params['bottom_x_overhang']
hole_diam = self.params['capillary_clamp_thru_hole_diam']
hole_offset = self.params['capillary_clamp_hole_offset']
hole_list = []
for i in (-1,1):
x_pos = i*(0.5*self.bottom_x - 0.5*bottom_x_overhang)
y_pos = hole_offset
hole = {
'panel' : 'bottom',
'type' : 'round',
'location' : (x_pos, y_pos),
'size' : hole_diam,
}
hole_list.append(hole)
self.params['hole_list'].extend(hole_list)
self.add_holes(hole_list)
def make_capillary_clamp(self):
bottom_x_overhang = self.params['bottom_x_overhang']
wall_thickness = self.params['wall_thickness']
clamp_length = self.params['capillary_clamp_length']
clamp_tolerance = self.params['capillary_clamp_tolerance']
clamp_radius = self.params['capillary_clamp_radius']
hole_offset = self.params['capillary_clamp_hole_offset']
hole_diam = self.params['capillary_clamp_tap_hole_diam']
clamp_x = bottom_x_overhang - 2*clamp_tolerance
clamp_y = clamp_length
clamp_z = wall_thickness
self.clamp_size = clamp_x, clamp_y, clamp_z
hole_list = [(0,hole_offset,hole_diam)]
clamp = plate_w_holes(clamp_x,clamp_y,clamp_z,hole_list,radius=clamp_radius)
self.capillary_clamp = clamp
|
{"hexsha": "d77eab44ca521ea8a0675fccfd53eb25a8a9255e", "size": 18520, "ext": "py", "lang": "Python", "max_stars_repo_path": "capillary_enclosure.py", "max_stars_repo_name": "iorodeo/capillary_sensor_enclosure", "max_stars_repo_head_hexsha": "31eabacec098ab5600d79cbcdadc03ab42a044d1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "capillary_enclosure.py", "max_issues_repo_name": "iorodeo/capillary_sensor_enclosure", "max_issues_repo_head_hexsha": "31eabacec098ab5600d79cbcdadc03ab42a044d1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "capillary_enclosure.py", "max_forks_repo_name": "iorodeo/capillary_sensor_enclosure", "max_forks_repo_head_hexsha": "31eabacec098ab5600d79cbcdadc03ab42a044d1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.746031746, "max_line_length": 97, "alphanum_fraction": 0.5951403888, "include": true, "reason": "import scipy", "num_tokens": 4609}
|
[STATEMENT]
lemma sub_inserted2:"\<lbrakk>Y \<subseteq> insert a X; \<not> Y \<subseteq> X\<rbrakk> \<Longrightarrow> Y = (Y - {a}) \<union> {a}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>Y \<subseteq> insert a X; \<not> Y \<subseteq> X\<rbrakk> \<Longrightarrow> Y = Y - {a} \<union> {a}
[PROOF STEP]
by blast
|
{"llama_tokens": 133, "file": "Group-Ring-Module_Algebra1", "length": 1}
|
#!/bin/python
'''
script for compiling single CMX exposures into coadds and runnign them through
redrock
'''
import os
import glob
import h5py
import fitsio
import numpy as np
#dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/andes"
dir_output = "/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd"
def get_dates(tileid):
dates = glob.glob(os.path.join(dir_redux, 'tiles', str(tileid), "*"))
dates = [os.path.basename(date) for date in dates]
return dates
def get_exposures(tileid, date):
cframes = glob.glob(os.path.join(dir_redux, 'tiles', str(tileid), date,
'cframe-b*.fits'))
exps = [cframe.split('-')[-1].split('.fits')[0] for cframe in cframes]
return np.unique(exps)
def get_spectograph(tileid, date, exp):
cframes = glob.glob(os.path.join(dir_redux, 'tiles', str(tileid), date,
'cframe-b*-%s.fits' % exp.zfill(8)))
spectographs = [os.path.basename(cframe).split('-')[1].split('b')[-1]
for cframe in cframes]
return spectographs
def coadd(tileid, date, exp, spec):
''' combine spectra from b r z spectographs for given (tileid, date, exp,
spec) into a single coadd.
'''
cframe = os.path.join(dir_redux, 'tiles', str(tileid), date,
'cframe-[brz]%s-%s.fits' % (spec, exp.zfill(8)))
fcoadd = os.path.join(dir_output,
'coadd-%s-%s-%s-%s.fits' % (str(tileid), date, spec, exp.zfill(8)))
cmd = 'desi_coadd_spectra --coadd-cameras -i %s -o %s' % (cframe, fcoadd)
print(' >>> %s' % cmd)
os.system(cmd)
return None
def rr_coadd(tileid, date, exp, spec):
''' run redrock on specified coadd
'''
fcoadd = os.path.join(dir_output,
'coadd-%s-%s-%s-%s.fits' % (str(tileid), date, spec, exp.zfill(8)))
frr = os.path.join(dir_output,
'redrock-%s-%s-%s-%s.h5' % (str(tileid), date, spec, exp.zfill(8)))
fzbest = os.path.join(dir_output,
'zbest-%s-%s-%s-%s.fits' % (str(tileid), date, spec, exp.zfill(8)))
script = '\n'.join([
"#!/bin/bash",
"#SBATCH -N 1",
"#SBATCH -C haswell",
"#SBATCH -q regular",
'#SBATCH -J rr_%s_%s' % (exp, spec),
'#SBATCH -o _rr_%s_%s.o' % (exp, spec),
"#SBATCH -t 00:10:00",
"",
"export OMP_NUM_THREADS=1",
"export OMP_PLACES=threads",
"export OMP_PROC_BIND=spread",
"",
"",
"conda activate desi",
"",
"srun -n 32 -c 2 --cpu-bind=cores rrdesi_mpi -o %s -z %s %s" % (frr, fzbest, fcoadd),
""])
# create the script.sh file, execute it and remove it
f = open('script.slurm','w')
f.write(script)
f.close()
os.system('sbatch script.slurm')
os.system('rm script.slurm')
return None
def get_ztrue(tileid, date, exp, spec, clobber=False):
''' compile redshift truth table for each of the (tileid, date, exp, spec)
spectra.
'''
from pydl.pydlutils.spheregroup import spherematch
fztrue = os.path.join(dir_output,
'ztrue-%s-%s-%s-%s.hdf5' % (str(tileid), date, spec, exp.zfill(8)))
if os.path.isfile(fztrue) and not clobber:
return None
# read coadd
fcoadd = os.path.join(dir_output,
'coadd-%s-%s-%s-%s.fits' % (str(tileid), date, spec, exp.zfill(8)))
if not os.path.isfile(fcoadd): return None
coadd = fitsio.read(fcoadd)
print('coadd --- %s' % os.path.basename(fcoadd))
# get ra and dec range of coadd
ra_min, ra_max = coadd['TARGET_RA'].min(), coadd['TARGET_RA'].max()
dec_min, dec_max = coadd['TARGET_DEC'].min(), coadd['TARGET_DEC'].max()
print('%.f < RA < %.f, %.f < DEC < %.f' % (ra_min, ra_max, dec_min, dec_max))
has_ztrue = np.zeros(len(coadd['TARGET_RA'])).astype(bool)
ztrue = np.repeat(-999., len(coadd['TARGET_RA']))
# only imaging
imaging_cat_kw = ['cfhtls-d3', 'hsc-pdr1', 'hsc-pdr2', 'SpIES', 'cosmos-acs']
# loop through Rongpu's matched truth tables
dir_match = lambda ns: \
'/global/cfs/cdirs/desi/target/analysis/truth/dr9sv/%s/matched' % ns
for nors in ['north', 'south']:
fmatches = glob.glob(os.path.join(dir_match(nors), '[!ls-]*match.fits'))
for fmatch in fmatches:
imaging = False
for k in imaging_cat_kw:
if k in fmatch: imaging =True
if imaging: continue
# read match
truth = fitsio.read(fmatch)
print(' match to ... %s %s' % (nors, os.path.basename(fmatch)))
ra_col, dec_col, z_col = None, None, None
for k in ['ra', 'RA', 'R.A.', 'RAJ2000', 'ALPHA', 'alpha']:
if k in truth.dtype.names:
ra_col = k
for k in ['dec', 'DEC', 'Dec', 'Dec.', 'DEJ2000', 'DECJ2000',
'DELTA', 'delta']:
if k in truth.dtype.names:
dec_col = k
for k in ['cz', 'z', 'z1', 'Z', 'ZBEST', 'V', 'ZHELIO']:
if k in truth.dtype.names:
z_col = k
z_factor = 1.
if k in ['cz', 'V']:
z_factor = 1./3.e5
# check if exposure is within RA, Dec range
_ra_min, _ra_max = truth[ra_col].min(), truth[ra_col].max()
_dec_min, _dec_max = truth[dec_col].min(), truth[dec_col].max()
if ((_ra_min > ra_max) | (_ra_max < ra_min) | (_dec_min > dec_max)
| (_dec_max < dec_min)):
print(' ... out of range: %.f < RA < %.f, %.f < DEC < %.f' %
(_ra_min, _ra_max, _dec_min, _dec_max))
continue
# match RA/Dec
match = spherematch(
truth[ra_col], truth[dec_col],
coadd['TARGET_RA'], coadd['TARGET_DEC'],
0.000277778)
if len(match[0]) == 0: # no matches
print(' ... no matches')
continue
print(' ... has %i matches' % len(match[0]))
conflict = np.zeros(len(match[0])).astype(bool)
if np.sum(has_ztrue[match[1]]) > 0:
# target already has true redshift
already_has = np.arange(len(match[1]))[has_ztrue[match[1]]]
z_already = ztrue[match[1]][already_has]
z_new = z_factor * (truth[z_col][match[0]][already_has]).astype(float)
has_conflict = np.abs(z_already - z_new) > 0.01
conflict[already_has[has_conflict]] = True
print(' has %i conflicting redshifts' % np.sum(conflict))
ztrue[match[1]] = z_factor * (truth[z_col][match[0]]).astype(float)
has_ztrue[match[1]] = True
ztrue[match[1]][conflict] = -999.
print('%i total maches' % np.sum(has_ztrue))
f = h5py.File(fztrue, 'w')
f.create_dataset('has_ztrue', data=has_ztrue)
f.create_dataset('ztrue', data=ztrue)
f.close()
return None
if __name__=="__main__":
bgs_minisv_tiles = [70500, 70502, 70510]
bgs_sv0_tiles = [66000, 66014, 66003]
bgs_tiles = bgs_minisv_tiles + bgs_sv0_tiles
bgs_tiles = [66003]
for tile in bgs_tiles:
dates = get_dates(tile)
for date in dates:
exps = get_exposures(tile, date)
for exp in exps:
spectographs = get_spectograph(tile, date, exp)
for spec in spectographs:
#coadd(tile, date, exp, spec)
rr_coadd(tile, date, exp, spec)
#get_ztrue(tile, date, exp, spec, clobber=True)
|
{"hexsha": "fe457c5b95a7bfdd4b5e57702aa4f31fe03c1241", "size": 7806, "ext": "py", "lang": "Python", "max_stars_repo_path": "run/cmx/cmx_exps.py", "max_stars_repo_name": "changhoonhahn/feasiBGS", "max_stars_repo_head_hexsha": "b5f535f12cf64babc9e25bcec75edd45d8668f74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-24T15:02:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-24T15:02:34.000Z", "max_issues_repo_path": "run/cmx/cmx_exps.py", "max_issues_repo_name": "michaelJwilson/feasiBGS", "max_issues_repo_head_hexsha": "63975b1e60f6f93f3b5020ee51ca565f325b918d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-10-23T16:02:01.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-04T18:53:20.000Z", "max_forks_repo_path": "run/cmx/cmx_exps.py", "max_forks_repo_name": "michaelJwilson/feasiBGS", "max_forks_repo_head_hexsha": "63975b1e60f6f93f3b5020ee51ca565f325b918d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-12T00:19:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-12T00:19:41.000Z", "avg_line_length": 37.3492822967, "max_line_length": 94, "alphanum_fraction": 0.5418908532, "include": true, "reason": "import numpy", "num_tokens": 2316}
|
/* hello world in r_egg */
write@syscall(4);
exit@syscall(1);
main@global(128) {
.var0 = "hi!\n";
write(1,.var0, 4);
exit(0);
}
|
{"hexsha": "21d439f040f4fc1dcf9d21560315d4fdaf4634c4", "size": 132, "ext": "r", "lang": "R", "max_stars_repo_path": "standalone/pruntime/rizin/test/unit/legacy_unit/rz_gg/hi.r", "max_stars_repo_name": "ndkazu/guessNumber-vs-Bot", "max_stars_repo_head_hexsha": "6e756977ce849137c62edb0716df6926583da9b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "standalone/pruntime/rizin/test/unit/legacy_unit/rz_gg/hi.r", "max_issues_repo_name": "ndkazu/guessNumber-vs-Bot", "max_issues_repo_head_hexsha": "6e756977ce849137c62edb0716df6926583da9b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-17T00:14:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-17T00:14:27.000Z", "max_forks_repo_path": "standalone/pruntime/rizin/test/unit/legacy_unit/rz_gg/hi.r", "max_forks_repo_name": "ndkazu/guessNumber-vs-Bot", "max_forks_repo_head_hexsha": "6e756977ce849137c62edb0716df6926583da9b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 13.2, "max_line_length": 26, "alphanum_fraction": 0.5909090909, "num_tokens": 51}
|
"""
File: pylinex/nonlinear/RankDecider.py
Author: Keith Tauscher
Date: 20 Apr 2018
Description: File containing a class which represents an IC-minimizer over a
discrete grid defined by a set of basis vector groups.
"""
import numpy as np
from distpy import Expression, KroneckerDeltaDistribution, DistributionSet
from ..util import int_types, real_numerical_types, sequence_types,\
create_hdf5_dataset, get_hdf5_value
from ..basis import Basis, BasisSet
from ..model import Model, BasisModel, CompositeModel,\
load_model_from_hdf5_group
from ..loglikelihood import GaussianLoglikelihood
from .LeastSquareFitter import LeastSquareFitter
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class RankDecider(object):
"""
Class which chooses a rank for multiple data components using DIC-like
loglikelihoods which varies the number of coefficients to use in each of a
number of bases as the parameters of the likelihood.
"""
def __init__(self, names, basis_set, data, error, expression,\
parameter_penalty=1, **non_basis_models):
"""
Initializes a new TruncationLoglikelihood with the given basis_sum,
data, and error.
names: list of string names of data components
basis_set: BasisSet objects containing basis with the largest number
of basis vectors allowed for each component. All of
basis_set.names must be in names, but some of names may not
be in basis_set.names; those that are not in basis_set.names
should have non_basis_models given as kwargs
data: 1D data vector to fit
error: 1D vector of noise level estimates for data
expression: Expression object which forms full model from submodels.
The ith submodel (with i starting at 0), corresponding to
the ith name in the names list, should be represented by
{i} in the expression string
parameter_penalty: the logL parameter penalty for adding a parameter in
any given model. Should be a non-negative constant.
It defaults to 1, which is the penalty used for the
Deviance Information Criterion (DIC)
**non_basis_models: extra keyword arguments whose keys are elements of
names which are not in basis_set.names, if any
exist, and whose values are model objects
corresponding to those data components
"""
self.names = names
self.basis_set = basis_set
self.data = data
self.error = error
self.expression = expression
self.parameter_penalty = parameter_penalty
self.non_basis_models = non_basis_models
@property
def names(self):
"""
Property storing the names of the data components.
"""
if not hasattr(self, '_names'):
raise AttributeError("names was referenced before it was set.")
return self._names
@names.setter
def names(self, value):
"""
Setter for the names of data components.
value: sequence of string names
"""
if type(value) in sequence_types:
if all([isinstance(element, basestring) for element in value]):
self._names = [element for element in value]
else:
raise TypeError("Not all elements of names were strings.")
else:
raise TypeError("names was set to a non-sequence.")
@property
def basis_set(self):
"""
Property storing the BasisSet object
"""
if not hasattr(self, '_basis_set'):
raise AttributeError("basis_set was referenced before it was set.")
return self._basis_set
@basis_set.setter
def basis_set(self, value):
"""
Setter for the basis_set object.
value: a BasisSet object
"""
if isinstance(value, BasisSet):
if all([(name in self.names) for name in value.names]):
self._basis_set = value
else:
raise ValueError("basis_set had at least one key that was " +\
"not in names.")
else:
raise TypeError("basis_set was set to a non-BasisSet object.")
@property
def data(self):
"""
Property storing the data to fit.
"""
if not hasattr(self, '_data'):
raise AttributeError("data referenced before it was set.")
return self._data
@data.setter
def data(self, value):
"""
Setter for the data to fit.
value: must be a 1-dimensional numpy.ndarray
"""
if type(value) in sequence_types:
value = np.array(value)
if value.ndim == 1:
self._data = value
else:
raise ValueError("Data for RankDecider must be 1D.")
else:
raise TypeError("data was not given as a sequence.")
@property
def error(self):
"""
Property storing the error on the data given.
"""
if not hasattr(self, '_error'):
raise AttributeError("error referenced before it was set.")
return self._error
@error.setter
def error(self, value):
"""
Setter for the error used to define the likelihood.
value: must be a numpy.ndarray of the same shape as the data property
"""
value = np.array(value)
if value.shape == self.data.shape:
self._error = value
elif value.shape == (self.data.shape * 2):
self._error = value
else:
raise ValueError("error given was not the same shape as the data.")
@property
def expression(self):
"""
Property storing the Expression object which allows for the combination
of all of the sets of basis vectors.
"""
if not hasattr(self, '_expression'):
raise AttributeError("expression was referenced before it was " +\
"set.")
return self._expression
@expression.setter
def expression(self, value):
"""
Setter for the Expression object which allows for the combination of
all of the sets of basis vectors.
value: an Expression object which has as many arguments as the
basis_set has names.
"""
if isinstance(value, Expression):
if value.num_arguments == len(self.names):
self._expression = value
else:
raise ValueError("expression had a different number of " +\
"arguments than the RankDecider had submodels.")
else:
raise TypeError("expression was set to a non-Expression object.")
@property
def parameter_penalty(self):
"""
Property storing the penalty imposed on the log-likelihood when an
extra parameter is included in any given model.
"""
if not hasattr(self, '_parameter_penalty'):
raise AttributeError("parameter_penalty was referenced before " +\
"it was set.")
return self._parameter_penalty
@parameter_penalty.setter
def parameter_penalty(self, value):
"""
Setter for the penalty assessed when an extra parameter is included in
any given model.
value: a non-negative number
"""
if type(value) in real_numerical_types:
if value >= 0:
self._parameter_penalty = value
else:
raise ValueError("parameter_penalty was set to a negative " +\
"number.")
else:
raise TypeError("parameter_penalty was set to a non-number.")
@property
def non_basis_models(self):
"""
Property storing the non-basis models, whose numbers of terms don't
vary.
"""
if not hasattr(self, '_non_basis_models'):
raise AttributeError("non_basis_models was referenced before " +\
"it was set.")
return self._non_basis_models
@non_basis_models.setter
def non_basis_models(self, value):
"""
Setter for the non-basis models.
value: dictionary whose keys are elements of names which are not in
basis_set.names, if any exist, and whose values are model
objects corresponding to those data components
"""
if isinstance(value, dict):
keys = [key for key in value]
if all([isinstance(key, basestring) for key in keys]):
if (set(keys) & set(self.basis_set.names)):
raise ValueError("A key of non_basis_models also has a " +\
"basis in basis_set.")
else:
all_names_set = (set(keys) | set(self.basis_set.names))
if (all_names_set == set(self.names)):
if all([isinstance(value[key], Model)\
for key in keys]):
self._non_basis_models = value
else:
raise TypeError("Not all values of " +\
"non_basis_models were Model objects.")
else:
raise ValueError(("The following names were " +\
"neither keys of non_basis_models or names in " +\
"the basis_set: {0}. The following names were " +\
"keys of non_basis_models, but were not in the " +\
"names given at initialization: {1}.").format(\
set(self.names) - all_names_set,\
set(keys) - set(self.names)))
else:
raise TypeError("Not all keys of non_basis_models were " +\
"strings.")
else:
raise TypeError("non_basis_models was set to a non-dict.")
def fill_hdf5_group(self, group, data_link=None, error_link=None):
"""
Fills the given hdf5 group with information about this RankDecider.
group: the group to fill with information about this RankDecider
data_link: link like that returned by pylinex.h5py_extensions.HDF5Link
error_link: link like that returned by pylinex.h5py_extensions.HDF5Link
"""
group.attrs['class'] = 'RankDecider'
create_hdf5_dataset(group, 'names', data=self.names)
create_hdf5_dataset(group, 'data', data=self.data, link=data_link)
create_hdf5_dataset(group, 'error', data=self.error, link=error_link)
self.basis_set.fill_hdf5_group(group.create_group('basis_set'))
self.expression.fill_hdf5_group(group.create_group('expression'))
group.attrs['parameter_penalty'] = self.parameter_penalty
subgroup = group.create_group('non_basis_models')
for name in self.non_basis_models:
self.non_basis_models[name].fill_hdf5_group(\
subgroup.create_group(name))
@staticmethod
def load_from_hdf5_group(group):
"""
Loads a RankDecider object from an hdf5 file group in which it was
previously saved.
group: the hdf5 file group from which to load a RankDecider object
returns: the RankDecider object loaded from the given hdf5 file group
"""
try:
assert group.attrs['class'] == 'RankDecider'
except:
raise ValueError("group doesn't appear to point to a " +\
"RankDecider object.")
names = get_hdf5_value(group['names'])
data = get_hdf5_value(group['data'])
error = get_hdf5_value(group['error'])
basis_set = BasisSet.load_from_hdf5_group(group['basis_set'])
expression = Expression.load_from_hdf5_group(group['expression'])
parameter_penalty = group.attrs['parameter_penalty']
(subgroup, non_basis_models) = (group['non_basis_models'], {})
for name in subgroup:
non_basis_models[name] = load_model_from_hdf5_group(subgroup[name])
return RankDecider(names, basis_set, data, error, expression,\
parameter_penalty=parameter_penalty, **non_basis_models)
def __eq__(self, other):
"""
Checks if self is equal to other.
other: an object to check for equality
returns: True if other and self have the same properties
"""
if not isinstance(other, RankDecider):
return False
if self.names != other.names:
return False
if self.basis_set != other.basis_set:
return False
if not np.allclose(self.data, other.data):
return False
if not np.allclose(self.error, other.error):
return False
if self.expression != other.expression:
return False
if self.parameter_penalty != other.parameter_penalty:
return False
if set(self.non_basis_models.keys()) ==\
set(other.non_basis_models.keys()):
for key in self.non_basis_models:
if self.non_basis_models[key] != other.non_basis_models[key]:
return False
return True
else:
return False
def model_from_nterms(self, **nterms):
"""
Creates a model from the given number of terms for each basis.
nterms: kwargs with names in self.basis_set.names as keys and integer
numbers of terms as values
returns: a CompositeModel object that includes all submodels
"""
if set(nterms.keys()) == set(self.basis_set.names):
models = []
for name in self.names:
if name in nterms:
model = BasisModel(self.basis_set[name][:nterms[name]])
else:
model = self.non_basis_models[name]
models.append(model)
return CompositeModel(self.expression, self.names, models)
else:
raise ValueError("The keys of nterms were not identical to the " +\
"names of basis_set.")
def loglikelihood_from_nterms(self, **nterms):
"""
Creates a loglikelihood with a model created from the given number of
terms for each basis.
nterms: kwargs with names in self.basis_set.names as keys and integer
numbers of terms as values
returns: a GaussianLoglikelihood object with a CompositeModel
"""
return GaussianLoglikelihood(self.data, self.error,\
self.model_from_nterms(**nterms))
def starting_point_from_nterms(self, true_parameters, true_curves, nterms):
"""
true_parameters: dictionary containing true parameter vectors indexed
by name
true_curves: dictionary of the form {(true_curve[name], suberror[name])
for name in true_curve_names}
nterms: dictionary with names in self.basis_set.names as keys and
integer numbers of terms as values
returns: (loglikelihood, starting_parameters)
"""
loglikelihood = self.loglikelihood_from_nterms(**nterms)
starting_point = []
for (iname, name) in enumerate(self.names):
if name in true_parameters:
starting_point.append(true_parameters[name])
else:
submodel = loglikelihood.model.models[iname]
if name in true_curves:
starting_point.append(\
submodel.quick_fit(*true_curves[name])[0])
elif submodel.num_parameters != 0:
raise ValueError("A submodel has parameters but was " +\
"given neither of the true_parameters or " +\
"true_curves dictionaries.")
return (loglikelihood, np.concatenate(starting_point))
def best_parameters_from_nterms(self, true_parameters, true_curves,\
nterms, **bounds):
"""
Finds the best parameters (and the loglikelihood to which they apply)
for given nterms.
true_parameters: dictionary containing true parameter vectors indexed
by name
true_curves: dictionary of the form {(true_curve[name], suberror[name])
for name in true_curve_names}
nterms: dictionary with names in self.basis_set.names as keys and
integer numbers of terms as values
bounds: tuples of form (minimum, maximum) where either may be None for
each parameter for which bounds should be obeyed
returns: (loglikelihood, max_likelihood_parameters)
"""
(loglikelihood, starting_point) = self.starting_point_from_nterms(\
true_parameters, true_curves, nterms)
guess_distribution = KroneckerDeltaDistribution(starting_point)
guess_distribution_set = DistributionSet([(guess_distribution,\
loglikelihood.parameters, None)])
least_square_fitter = LeastSquareFitter(\
loglikelihood=loglikelihood, prior_set=guess_distribution_set,\
**bounds)
least_square_fitter.run()
return (loglikelihood, least_square_fitter.argmin)
def information_criterion_from_nterms(self, true_parameters, true_curves,\
nterms, **bounds):
"""
Finds the best parameters (and the loglikelihood to which they apply)
for given nterms.
true_parameters: dictionary containing true parameter vectors indexed
by name
true_curves: dictionary of the form {(true_curve[name], suberror[name])
for name in true_curve_names}
nterms: dictionary with names in self.basis_set.names as keys and
integer numbers of terms as values
bounds: tuples of form (minimum, maximum) where either may be None for
each parameter for which bounds should be obeyed
returns: (loglikelihood, max_likelihood_parameters)
"""
(loglikelihood, max_likelihood_parameters) =\
self.best_parameters_from_nterms(true_parameters, true_curves,\
nterms, **bounds)
loglikelihood_value = loglikelihood(max_likelihood_parameters)
varying_num_parameters = sum([nterms[key] for key in nterms])
penalty = (varying_num_parameters * self.parameter_penalty)
return ((-2.) * (loglikelihood_value - penalty))
def minimize_information_criterion(self, starting_nterms, true_parameters,\
true_curves, return_trail=False, can_backtrack=False,\
verbose=True, **bounds):
"""
Minimizes the information criterion over the grid of possible nterms
through finite difference descent.
true_parameters: dictionary containing true parameter vectors indexed
by name
true_curves: dictionary of the form {(true_curve[name], suberror[name])
for name in true_curve_names}
return_trail: if True, return value contains not only the final nterms
dictionary, but also the trail used to get there from the
start
bounds: tuples of form (minimum, maximum) where either may be None for
each parameter for which bounds should be obeyed
returns: (last_nterms, nterms_trail) if return_trail else last_nterms
"""
nterms = {name: starting_nterms[name] for name in self.basis_set.names}
information_criterion = self.information_criterion_from_nterms(\
true_parameters, true_curves, nterms, **bounds)
previous_nterms = []
done = False
iteration_number = 0
while not done:
iteration_number += 1
if verbose:
print("Iteration #{:d} starting nterms: {}".format(\
iteration_number, nterms))
possible_next_nterms =\
[{} for index in range(2 * len(self.basis_set.names))]
for (iname, name) in enumerate(self.basis_set.names):
for index in range(2 * len(self.basis_set.names)):
difference = (((2 * (index % 2)) - 1) *\
(1 if (iname == (index / 2)) else 0))
possible_next_nterms[index][name] =\
nterms[name] + difference
indices_to_delete = []
for (nti, nt) in enumerate(possible_next_nterms):
deleted = False
if can_backtrack and (nt in previous_nterms):
indices_to_delete.append(nti)
elif (not can_backtrack) and (len(previous_nterms) != 0) and\
(nt == previous_nterms[-1]):
indices_to_delete.append(nti)
else:
for name in self.basis_set.names:
if deleted:
continue
if (nt[name] < 1) or (nt[name] >\
self.basis_set[name].num_basis_vectors):
indices_to_delete.append(nti)
deleted = True
pnt = []
for index in range(len(possible_next_nterms)):
if index not in indices_to_delete:
pnt.append(possible_next_nterms[index])
possible_next_nterms = pnt
if len(possible_next_nterms) == 0:
done = True
continue
information_criteria = [self.information_criterion_from_nterms(\
true_parameters, true_curves, nt, **bounds)\
for nt in possible_next_nterms]
information_criteria_argmin = np.argmin(information_criteria)
information_criteria_min =\
information_criteria[information_criteria_argmin]
if information_criterion < information_criteria_min:
done = True
continue
information_criterion = information_criteria_min
previous_nterms.append(nterms)
nterms = possible_next_nterms[information_criteria_argmin]
if verbose:
print("Final nterms: {}".format(nterms))
if return_trail:
return (nterms, previous_nterms)
else:
return nterms
|
{"hexsha": "3993f8fbf7deaaac0aa010e4415e278b016bc3c3", "size": 23147, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylinex/nonlinear/RankDecider.py", "max_stars_repo_name": "CU-NESS/pylinex", "max_stars_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pylinex/nonlinear/RankDecider.py", "max_issues_repo_name": "CU-NESS/pylinex", "max_issues_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pylinex/nonlinear/RankDecider.py", "max_forks_repo_name": "CU-NESS/pylinex", "max_forks_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3937728938, "max_line_length": 79, "alphanum_fraction": 0.5906164946, "include": true, "reason": "import numpy", "num_tokens": 4597}
|
from lib.torch.constraints import apply_linear_constraint
import numpy as np
import torch
from torch.autograd import Variable
import pytest
def test_apply_linear_constraint():
def lin(x):
"""sum(x) >= 0"""
return x.sum(-1, keepdim=True)
x = Variable(torch.ones(1, 10))
# test equality constraint
y = apply_linear_constraint(lin, 0, x).data.numpy()
np.testing.assert_allclose(y, 0.0)
# test inequality constraint
y = apply_linear_constraint(lin, 0, x, inequality=True)
np.testing.assert_almost_equal(y.data.numpy(), x.data.numpy())
# test inequality constraint
y = apply_linear_constraint(lin, 0, -x, inequality=True)
np.testing.assert_allclose(y.data.numpy(), 0.0)
# test shape
assert y.size() == x.size()
@pytest.mark.skip()
def test_precip_functional():
lhf = torch.FloatTensor([1, 10]).unsqueeze(-1)
z = torch.linspace(0, 16e3, 34)
w = Variable(torch.ones(34))
fq = (np.exp(-z/1e3) ) * 100
fq = Variable(torch.stack([fq, fq]))
def linear(x):
return - (w * x).sum(-1, keepdim=True) / 1000.
a = - lhf * 86400 / 2.51e6
a = Variable(a)
fqt_modified = apply_linear_constraint(linear, a, fq, inequality=True).data
prec_modified = precip_from_q(fqt_modified, lhf, w.data)
prec = precip_from_q(fq.data, lhf, w.data)
eps = 1e-7
print("original precip", prec)
print("modified precip", prec_modified)
assert (prec_modified > -eps).all()
|
{"hexsha": "c3ab156724ba3ad976816d935f4ef6f867d4297b", "size": 1484, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_constraints.py", "max_stars_repo_name": "nbren12/nn_atmos_param", "max_stars_repo_head_hexsha": "cb138f0b211fd5743e56ad659aec38c082d2b3ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-09-16T20:55:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-06T11:27:50.000Z", "max_issues_repo_path": "tests/test_constraints.py", "max_issues_repo_name": "nbren12/nn_atmos_param", "max_issues_repo_head_hexsha": "cb138f0b211fd5743e56ad659aec38c082d2b3ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-04-07T07:40:39.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-20T06:56:08.000Z", "max_forks_repo_path": "tests/test_constraints.py", "max_forks_repo_name": "nbren12/nn_atmos_param", "max_forks_repo_head_hexsha": "cb138f0b211fd5743e56ad659aec38c082d2b3ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5862068966, "max_line_length": 79, "alphanum_fraction": 0.6556603774, "include": true, "reason": "import numpy", "num_tokens": 419}
|
[STATEMENT]
lemma rev_slice:
"n + k + LENGTH('a::len) = LENGTH('b::len) \<Longrightarrow>
slice n (word_reverse (w::'b word)) = word_reverse (slice k w :: 'a word)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n + k + LENGTH('a) = LENGTH('b) \<Longrightarrow> slice n (word_reverse w) = word_reverse (slice k w)
[PROOF STEP]
unfolding slice_def word_size
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. n + k + LENGTH('a) = LENGTH('b) \<Longrightarrow> slice1 (LENGTH('b) - n) (word_reverse w) = word_reverse (slice1 (LENGTH('b) - k) w)
[PROOF STEP]
by (simp add: rev_slice1)
|
{"llama_tokens": 246, "file": null, "length": 2}
|
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall O E Eprime A B C Oprime Aprime Bprime Cprime Eprimeprime C2 B0 : Universe, ((wd_ O E /\ (wd_ Oprime Eprime /\ (wd_ A O /\ (wd_ B O /\ (wd_ C O /\ (wd_ A E /\ (wd_ Eprimeprime O /\ (wd_ O Oprime /\ (wd_ E Eprimeprime /\ (wd_ Eprimeprime A /\ (wd_ E Eprime /\ (wd_ O Eprime /\ (wd_ Oprime Eprimeprime /\ (wd_ E Oprime /\ (wd_ Eprime C2 /\ (wd_ Aprime C2 /\ (wd_ Oprime Aprime /\ (wd_ A Aprime /\ (wd_ C Cprime /\ (wd_ B Bprime /\ (col_ O E A /\ (col_ O E B /\ (col_ O E C /\ (col_ Oprime Eprime Aprime /\ (col_ Oprime Eprime Bprime /\ (col_ Oprime Eprime Cprime /\ (col_ O Eprimeprime O /\ (col_ O Eprimeprime Oprime /\ (col_ O Eprimeprime C2 /\ (col_ Eprimeprime B O /\ col_ O E B0)))))))))))))))))))))))))))))) -> col_ O E Eprimeprime)).
Proof.
time tac.
Qed.
End FOFProblem.
|
{"author": "janicicpredrag", "repo": "Larus", "sha": "a095ca588fbb0e4a64a26d92946485bbf85e1e08", "save_path": "github-repos/coq/janicicpredrag-Larus", "path": "github-repos/coq/janicicpredrag-Larus/Larus-a095ca588fbb0e4a64a26d92946485bbf85e1e08/benchmarks/coq-problems/col-trans/col_trans_1281.v"}
|
import numpy as np
from astropy.cosmology import FlatLambdaCDM
from abc import ABC, abstractmethod
class BaseCosmoBNNPrior(ABC):
"""Abstract base class for a cosmology-aware BNN prior
"""
def __init__(self, bnn_omega):
self._check_cosmology_config_validity(bnn_omega)
self._define_cosmology(bnn_omega.cosmology)
for cosmo_comp in ['cosmology', 'redshift', 'kinematics']:
setattr(self, cosmo_comp, bnn_omega[cosmo_comp])
self.sample_redshifts = getattr(self, 'sample_redshifts_from_{:s}'.format(self.redshift.model))
def _raise_config_error(self, missing_key, parent_config_key, bnn_prior_class):
"""Convenience function for raising errors related to config values
"""
raise ValueError("{:s} must be specified in the config inside {:s} for {:s}".format(missing_key, parent_config_key, bnn_prior_class))
def _check_cosmology_config_validity(self, bnn_omega):
"""Check whether the config file specified the hyperparameters for all the fields
required for cosmology-aware BNN priors, e.g. cosmology, redshift, galaxy kinematics
"""
required_keys = ['cosmology', 'redshift', 'kinematics']
for possible_missing_key in required_keys:
if possible_missing_key not in bnn_omega:
self._raise_cfg_error(possible_missing_key, 'bnn_omega', cls.__name__)
def _define_cosmology(self, cosmology_cfg):
"""Set the cosmology, with which to generate all the training samples, based on the config
Parameters
----------
cosmology_cfg : dict
Copy of `cfg.bnn_omega.cosmology`
"""
self.cosmo = FlatLambdaCDM(**cosmology_cfg)
def sample_param(self, hyperparams):
"""Assigns a sampling distribution
"""
dist = hyperparams.pop('dist')
return getattr(baobab.distributions, 'sample_{:s}'.format(dist))(**hyperparams)
def eval_param_pdf(self, eval_at, hyperparams):
"""Assigns and evaluates the PDF
"""
dist = hyperparams.pop('dist')
return getattr(baobab.distributions, 'eval_{:s}_pdf'.format(dist))(**hyperparams)
def sample_redshifts_from_differential_comoving_volume(self, redshifts_cfg):
"""Sample redshifts from the differential comoving volume,
on a grid with the range and resolution specified in the config
Parameters
----------
redshifts_cfg : dict
Copy of `cfg.bnn_omega.redshift`
Returns
-------
tuple
the tuple of floats that are the realized z_lens, z_src
"""
z_grid = np.arange(**redshifts_cfg.grid)
dVol_dz = self.cosmo.differential_comoving_volume(z_grid).value
dVol_dz_normed = dVol_dz/np.sum(dVol_dz)
sampled_z = np.random.choice(z_grid, 2, replace=False, p=dVol_dz_normed)
z_lens = np.min(sampled_z)
z_src = np.max(sampled_z)
while z_src < z_lens + redshifts_cfg.min_diff:
sampled_z = np.random.choice(z_grid, 2, replace=False, p=dVol_dz_normed)
z_lens = np.min(sampled_z)
z_src = np.max(sampled_z)
return z_lens, z_src
def sample_redshifts_from_independent_dist(self, redshifts_cfg):
"""Sample lens and source redshifts from independent distributions, while enforcing that the lens redshift is smaller than source redshift
Parameters
----------
redshifts_cfg : dict
Copy of `cfg.bnn_omega.redshift`
Returns
-------
tuple
the tuple of floats that are the realized z_lens, z_src
"""
z_lens = self.sample_param(redshifts_cfg.z_lens.copy())
z_src = self.sample_param(redshifts_cfg.z_src.copy())
while z_src < z_lens + redshifts_cfg.min_diff:
z_lens = self.sample_param(redshifts_cfg.z_lens.copy())
z_src = self.sample_param(redshifts_cfg.z_src.copy())
return z_lens, z_src
|
{"hexsha": "6d0c76f0219f399160746fc95c5ad73085530fd8", "size": 4021, "ext": "py", "lang": "Python", "max_stars_repo_path": "baobab/bnn_priors/base_cosmo_bnn_prior.py", "max_stars_repo_name": "jiwoncpark/baobab", "max_stars_repo_head_hexsha": "2a9a1b3eafbafef925bedab4b3137a3505a9b750", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-09-11T15:11:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T08:24:52.000Z", "max_issues_repo_path": "baobab/bnn_priors/base_cosmo_bnn_prior.py", "max_issues_repo_name": "jiwoncpark/baobab", "max_issues_repo_head_hexsha": "2a9a1b3eafbafef925bedab4b3137a3505a9b750", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 52, "max_issues_repo_issues_event_min_datetime": "2019-08-29T00:39:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-02T22:49:41.000Z", "max_forks_repo_path": "baobab/bnn_priors/base_cosmo_bnn_prior.py", "max_forks_repo_name": "jiwoncpark/baobab", "max_forks_repo_head_hexsha": "2a9a1b3eafbafef925bedab4b3137a3505a9b750", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-26T23:38:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-18T10:07:04.000Z", "avg_line_length": 38.6634615385, "max_line_length": 146, "alphanum_fraction": 0.6590400398, "include": true, "reason": "import numpy,from astropy", "num_tokens": 963}
|
#include <string.h>
#include <stdlib.h>
#include <registryFunction.h>
#include <aSubRecord.h>
#include <menuFtype.h>
#include <errlog.h>
#include <epicsString.h>
#include <epicsExport.h>
#include "epicsTypes.h"
#include <string>
#include <iostream>
#include <map>
#include <iterator>
#include <algorithm>
#include "buffer_parsing.h"
#include "buffer_parsing_utils.h"
#include <boost/function.hpp>
/**
* Base function used to parse readings.
* Called from an aSub record.
*
* Args:
* prec: Pointer to an aSub record.
* Parser: Parsing function to parse the readings.
* Returns:
* long: 0 if succesfully.
*/
static long aSubParser(aSubRecord *prec,
boost::function<void (std::map<int, std::string>::iterator, std::map<int, aSubOutputParameters>)> Parser) {
if (prec->fta != menuFtypeSTRING)
{
errlogSevPrintf(errlogMajor, "%s incorrect input argument type A", prec->name);
return 1;
}
try {
// Create channel lookup maps
std::map<int, std::string> channel_readings = parse_input(prec->a, prec->noa);
if (channel_readings.empty()) {
errlogSevPrintf(errlogInfo, "Buffer readings contain no channel information.");
return 2;
}
std::map<int, aSubOutputParameters> channel_outputs = asub_channel_output(prec);
// Iterator over the map
for (std::map<int, std::string>::iterator it = channel_readings.begin();
it != channel_readings.end();
++it) {
Parser(it, channel_outputs);
}
}
catch (std::invalid_argument& e) {
errlogSevPrintf(errlogMajor, "%s Invalid argument exception: %s", prec->name, e.what());
return 3;
}
catch (std::logic_error& e) {
errlogSevPrintf(errlogMajor, "%s Logic Error Exception: %s", prec->name, e.what());
return 4;
}
catch (std::exception& e) {
errlogSevPrintf(errlogMajor, "%s Exception: %s", prec->name, e.what());
return 5;
}
catch (...) {
errlogSevPrintf(errlogMajor, "%s unknown exception", prec->name);
return 6;
}
return 0;
}
// Parses the values from readings and sets the
// value to the respective channels.
//
// Called from an aSub record.
long ParseReadingsForValue(aSubRecord *prec) {
return aSubParser(prec, set_double_value);
}
// Parses the units from readings and sets the
// unit to the respective channels.
//
// Called from an aSub record.
long ParseReadingsForUnit(aSubRecord *prec) {
return aSubParser(prec, set_unit_value);
}
|
{"hexsha": "8087a066d4232000d3c699604630d48cb303f8af", "size": 2559, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Keithley2001Sup/src/buffer_parsing.cpp", "max_stars_repo_name": "ISISComputingGroup/EPICS-Keithley_2001", "max_stars_repo_head_hexsha": "5f8edf73001d6e8a4fba82683c282fa241ab34ee", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Keithley2001Sup/src/buffer_parsing.cpp", "max_issues_repo_name": "ISISComputingGroup/EPICS-Keithley_2001", "max_issues_repo_head_hexsha": "5f8edf73001d6e8a4fba82683c282fa241ab34ee", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Keithley2001Sup/src/buffer_parsing.cpp", "max_forks_repo_name": "ISISComputingGroup/EPICS-Keithley_2001", "max_forks_repo_head_hexsha": "5f8edf73001d6e8a4fba82683c282fa241ab34ee", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0795454545, "max_line_length": 111, "alphanum_fraction": 0.6486908949, "num_tokens": 654}
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from math import ceil, floor
import numpy as np
from yaml import safe_load
from maro.backends.frame import FrameBase, SnapshotList
from maro.data_lib.cim import CimDataContainerWrapper, Order, Stop
from maro.event_buffer import AtomEvent, CascadeEvent, EventBuffer, MaroEvents
from maro.simulator.scenarios import AbsBusinessEngine
from maro.simulator.scenarios.helpers import DocableDict
from maro.simulator.scenarios.matrix_accessor import MatrixAttributeAccessor
from maro.streamit import streamit
from .common import Action, ActionScope, ActionType, DecisionEvent
from .event_payload import EmptyReturnPayload, LadenReturnPayload, VesselDischargePayload, VesselStatePayload
from .events import Events
from .frame_builder import gen_cim_frame
from .ports_order_export import PortOrderExporter
metrics_desc = """
CIM metrics used provide statistics information until now (may be in the middle of current tick).
It contains following keys:
order_requirements (int): Accumulative orders until now.
container_shortage (int): Accumulative shortage until now.
operation_number (int): Total empty operation (both load and discharge) cost,
the cost factors can be configured through 'load_cost_factor' and 'dsch_cost_factor' in configuration file.
"""
class CimBusinessEngine(AbsBusinessEngine):
"""Cim business engine, used simulate CIM related problem."""
def __init__(
self, event_buffer: EventBuffer, topology: str, start_tick: int, max_tick: int,
snapshot_resolution: int, max_snapshots: int, additional_options: dict = None
):
super().__init__(
"cim", event_buffer, topology, start_tick, max_tick,
snapshot_resolution, max_snapshots, additional_options
)
# Update self._config_path with current file path.
self.update_config_root_path(__file__)
# Load data from wrapper.
self._data_cntr: CimDataContainerWrapper = CimDataContainerWrapper(
self._config_path, max_tick, self._topology
)
# Create a copy of config object to expose to others, and not affect generator.
self._config = {}
config_path = os.path.join(self._config_path, "config.yml")
if os.path.exists(config_path):
with open(config_path) as fp:
self._config = safe_load(fp)
self._vessels = []
self._ports = []
self._frame = None
self._full_on_ports: MatrixAttributeAccessor = None
self._full_on_vessels: MatrixAttributeAccessor = None
self._vessel_plans: MatrixAttributeAccessor = None
self._port_orders_exporter = PortOrderExporter("enable-dump-snapshot" in additional_options)
self._load_cost_factor: float = self._data_cntr.load_cost_factor
self._dsch_cost_factor: float = self._data_cntr.dsch_cost_factor
# Used to collect total cost to avoid to much snapshot querying.
self._total_operate_num: float = 0
self._init_frame()
# Snapshot list should be initialized after frame.
self._snapshots = self._frame.snapshots
self._register_events()
# As we already unpack the route to the max tick, we can insert all departure events at the beginning.
self._load_departure_events()
# Since there is no Arrival Event at the very beginning, init the vessel states maunally.
self._init_vessel_plans()
self._stream_base_info()
@property
def configs(self):
"""dict: Configurations of CIM business engine."""
return self._config
@property
def frame(self) -> FrameBase:
"""FrameBase: Frame of current business engine."""
return self._frame
@property
def snapshots(self) -> SnapshotList:
"""SnapshotList: Snapshot list of current frame."""
return self._snapshots
def step(self, tick: int):
"""Called at each tick to generate orders and arrival events.
Args:
tick (int): Tick to generate orders.
"""
# At each tick:
# 1. Generate orders for this tick.
# 2. Transfer orders into events (ORDER).
# 3. Check and add vessel arrival event (atom and cascade).
total_empty_number = sum(
[node.empty for node in self._ports + self._vessels])
for order in self._data_cntr.get_orders(tick, total_empty_number):
# Use cascade event to support insert sub events.
order_evt = self._event_buffer.gen_cascade_event(tick, Events.ORDER, order)
self._event_buffer.insert_event(order_evt)
self._port_orders_exporter.add(order)
# Used to hold decision event of this tick, we will append this at the end
# to make sure all the other logic finished.
# TODO: Remove it after event priority is supported.
decision_evt_list = []
for vessel in self._vessels:
vessel_idx: int = vessel.idx
loc_idx: int = vessel.next_loc_idx
stop: Stop = self._data_cntr.vessel_stops[vessel_idx, loc_idx]
port_idx: int = stop.port_idx
# At the beginning the vessel is parking at port, will not invoke arrive event.
if loc_idx > 0:
# Check if there is any arrive event.
if stop.arrival_tick == tick:
arrival_payload = VesselStatePayload(port_idx, vessel_idx)
# This vessel will arrive at current tick.
arrival_event = self._event_buffer.gen_atom_event(
tick, Events.VESSEL_ARRIVAL, arrival_payload)
# Then it will load full.
load_event = self._event_buffer.gen_atom_event(
tick, Events.LOAD_FULL, arrival_payload)
self._event_buffer.insert_event(arrival_event)
self._event_buffer.insert_event(load_event)
# Generate cascade event and payload.
decision_payload = DecisionEvent(
tick, port_idx, vessel_idx, self.snapshots, self.action_scope, self.early_discharge
)
decision_event: CascadeEvent = self._event_buffer.gen_decision_event(tick, decision_payload)
decision_evt_list.append(decision_event)
if loc_idx > 0 and stop.arrival_tick == tick:
self._vessel_plans[vessel_idx, port_idx] = stop.arrival_tick
# Insert the cascade events at the end.
for event in decision_evt_list:
self._event_buffer.insert_event(event)
def post_step(self, tick: int):
"""Post-process after each step.
Args:
tick (int): Tick to process.
"""
self._stream_data()
if (tick + 1) % self._snapshot_resolution == 0:
# Update acc_fulfillment before take snapshot.
for port in self._ports:
port.acc_fulfillment = port.acc_booking - port.acc_shortage
# Before go to next tick, we will take a snapshot first.
self._frame.take_snapshot(self.frame_index(tick))
# Reset port statistics (by tick) fields.
for port in self._ports:
port.shortage = 0
port.booking = 0
port.fulfillment = 0
port.transfer_cost = 0
return tick + 1 == self._max_tick
def reset(self, keep_seed: bool = False):
"""Reset the business engine, it will reset frame value."""
self._snapshots.reset()
self._frame.reset()
self._reset_nodes()
self._data_cntr.reset(keep_seed)
# Insert departure event again.
self._load_departure_events()
self._init_vessel_plans()
self._total_operate_num = 0
def action_scope(self, port_idx: int, vessel_idx: int) -> ActionScope:
"""Get the action scope of specified agent.
Args:
port_idx (int): Index of specified agent.
vessel_idx (int): Index of specified vessel to take the action.
Returns:
ActionScope: Contains load and discharge scope.
"""
port = self._ports[port_idx]
vessel = self._vessels[vessel_idx]
return ActionScope(load=min(port.empty, vessel.remaining_space), discharge=vessel.empty)
def early_discharge(self, vessel_idx: int) -> int:
"""Get the early discharge number of specified vessel.
Args:
vessel_idx (int): Index of specified vessel.
"""
return self._vessels[vessel_idx].early_discharge
def get_metrics(self) -> DocableDict:
"""Get metrics information for cim scenario.
Args:
dict: A dict that contains "perf", "total_shortage" and "total_cost",
and can use help method to show help docs.
"""
total_shortage = sum([p.acc_shortage for p in self._ports])
total_booking = sum([p.acc_booking for p in self._ports])
return DocableDict(
metrics_desc,
order_requirements=total_booking,
container_shortage=total_shortage,
operation_number=self._total_operate_num
)
def get_node_mapping(self) -> dict:
"""Get node name mappings related with this environment.
Returns:
dict: Node name to index mapping dictionary.
"""
return {
"ports": self._data_cntr.port_mapping,
"vessels": self._data_cntr.vessel_mapping
}
def get_event_payload_detail(self) -> dict:
"""dict: Event payload details of current scenario."""
return {
Events.ORDER.name: Order.summary_key,
Events.RETURN_FULL.name: LadenReturnPayload.summary_key,
Events.VESSEL_ARRIVAL.name: VesselStatePayload.summary_key,
Events.LOAD_FULL.name: VesselStatePayload.summary_key,
Events.DISCHARGE_FULL.name: VesselDischargePayload.summary_key,
Events.PENDING_DECISION.name: DecisionEvent.summary_key,
Events.LOAD_EMPTY.name: Action.summary_key,
Events.DISCHARGE_EMPTY.name: Action.summary_key,
Events.VESSEL_DEPARTURE.name: VesselStatePayload.summary_key,
Events.RETURN_EMPTY.name: EmptyReturnPayload.summary_key
}
def get_agent_idx_list(self) -> list:
"""Get port index list related with this environment.
Returns:
list: A list of port index.
"""
return [i for i in range(self._data_cntr.port_number)]
def dump(self, folder: str):
self._port_orders_exporter.dump(folder)
def _init_nodes(self):
# Init ports.
for port_settings in self._data_cntr.ports:
port = self._ports[port_settings.index]
port.set_init_state(port_settings.name,
port_settings.capacity, port_settings.empty)
# Init vessels.
for vessel_setting in self._data_cntr.vessels:
vessel = self._vessels[vessel_setting.index]
vessel.set_init_state(
vessel_setting.name,
self._data_cntr.container_volume,
vessel_setting.capacity,
self._data_cntr.route_mapping[vessel_setting.route_name],
vessel_setting.empty
)
# Init vessel plans.
self._vessel_plans[:] = -1
def _reset_nodes(self):
# Reset both vessels and ports.
# NOTE: This should be called after frame.reset.
for port in self._ports:
port.reset()
for vessel in self._vessels:
vessel.reset()
# Reset vessel plans.
self._vessel_plans[:] = -1
def _register_events(self):
"""Register events."""
register_handler = self._event_buffer.register_event_handler
register_handler(Events.RETURN_FULL, self._on_full_return)
register_handler(Events.RETURN_EMPTY, self._on_empty_return)
register_handler(Events.ORDER, self._on_order_generated)
register_handler(Events.LOAD_FULL, self._on_full_load)
register_handler(Events.VESSEL_ARRIVAL, self._on_arrival)
register_handler(Events.VESSEL_DEPARTURE, self._on_departure)
register_handler(Events.DISCHARGE_FULL, self._on_discharge)
register_handler(MaroEvents.TAKE_ACTION, self._on_action_received)
def _load_departure_events(self):
"""Insert leaving event at the beginning as we already unpack the root to a loop at the beginning."""
for vessel_idx, stops in enumerate(self._data_cntr.vessel_stops[:]):
for stop in stops:
payload = VesselStatePayload(stop.port_idx, vessel_idx)
dep_evt = self._event_buffer.gen_atom_event(stop.leave_tick, Events.VESSEL_DEPARTURE, payload)
self._event_buffer.insert_event(dep_evt)
def _init_vessel_plans(self):
for vessel in self._vessels:
vessel.is_parking = 1 if vessel.last_loc_idx == vessel.next_loc_idx else 0
stop: Stop = self._data_cntr.vessel_stops[vessel.idx, vessel.last_loc_idx]
vessel.loc_port_idx = stop.port_idx
# Initialize the past and future stop list.
past_stops = self._data_cntr.vessel_past_stops[vessel.idx, vessel.last_loc_idx, vessel.next_loc_idx]
future_stops = self._data_cntr.vessel_future_stops[vessel.idx, vessel.last_loc_idx, vessel.next_loc_idx]
vessel.set_stop_list(past_stops, future_stops)
# Update the vessel plans.
for plan_port_idx, plan_tick in self._data_cntr.vessel_planned_stops[
vessel.idx, vessel.route_idx, vessel.last_loc_idx
]:
self._vessel_plans[vessel.idx, plan_port_idx] = plan_tick
def _init_frame(self):
"""Initialize the frame based on data generator."""
port_num = self._data_cntr.port_number
vessel_num = self._data_cntr.vessel_number
stop_num = (self._data_cntr.past_stop_number,
self._data_cntr.future_stop_number)
self._frame = gen_cim_frame(
port_num, vessel_num, stop_num, self.calc_max_snapshots())
self._ports = self._frame.ports
self._vessels = self._frame.vessels
self._full_on_ports = self._frame.matrix[0]["full_on_ports"]
self._full_on_vessels = self._frame.matrix[0]["full_on_vessels"]
self._vessel_plans = self._frame.matrix[0]["vessel_plans"]
self._init_nodes()
def _get_reachable_ports(self, vessel_idx: int):
"""Get ports that specified vessel can reach (for order), return a list of tuple (port_id, arrival_tick).
Args:
vessel_idx (int): Index of specified vessel.
Returns:
Reachable port index list of specified vessel.
"""
vessel = self._vessels[vessel_idx]
return self._data_cntr.reachable_stops[vessel_idx, vessel.route_idx, vessel.next_loc_idx]
def _get_pending_full(self, src_port_idx: int, dest_port_idx: int):
"""Get pending full number from src_port_idx to dest_port_idx."""
return self._full_on_ports[src_port_idx, dest_port_idx]
def _set_pending_full(self, src_port_idx: int, dest_port_idx: int, value):
"""Set the full number from src_port_idx to dest_port_idx."""
assert value >= 0
self._full_on_ports[src_port_idx, dest_port_idx] = value
def _on_order_generated(self, event: CascadeEvent):
"""When there is an order generated, we should do:
1. Generate a LADEN_RETURN event by configured buffer time: \
The event will be inserted to the immediate_event_list ASAP if the configured buffer time is 0, \
else the event will be inserted to the event buffer directly.
2. Update port state: on_shipper +, empty -.
Args:
event (CascadeEvent): Order event object.
"""
order: Order = event.payload
src_port = self._ports[order.src_port_idx]
execute_qty = order.quantity
src_empty = src_port.empty
src_port.booking += execute_qty
src_port.acc_booking += execute_qty
# Check if there is any shortage.
if src_empty < order.quantity:
# Booking & shortage.
shortage_qty = order.quantity - src_empty
src_port.shortage += shortage_qty
src_port.acc_shortage += shortage_qty
execute_qty = src_empty
# Update port state.
src_port.empty -= execute_qty
# Full containers that pending to return.
src_port.on_shipper += execute_qty
buffer_ticks = self._data_cntr.full_return_buffers[src_port.idx]
payload = LadenReturnPayload(
src_port_idx=order.src_port_idx, dest_port_idx=order.dest_port_idx, quantity=execute_qty
)
laden_return_evt = self._event_buffer.gen_atom_event(
tick=event.tick + buffer_ticks, event_type=Events.RETURN_FULL, payload=payload
)
# If buffer_tick is 0, we should execute it as this tick.
if buffer_ticks == 0:
event.add_immediate_event(laden_return_evt)
else:
self._event_buffer.insert_event(laden_return_evt)
def _on_full_return(self, event: AtomEvent):
"""Handler for processing the event that full containers are returned from shipper.
Once the full containers are returned, the containers are ready to be loaded. The workflow is:
1. First move the container from on_shipper to full (update state: on_shipper -> full).
2. Then append the container to the port pending list.
"""
payload: LadenReturnPayload = event.payload
src_port = self._ports[payload.src_port_idx]
src_port.on_shipper -= payload.quantity
src_port.full += payload.quantity
pending_full_number = self._get_pending_full(
payload.src_port_idx, payload.dest_port_idx)
self._set_pending_full(
payload.src_port_idx, payload.dest_port_idx, pending_full_number + payload.quantity)
def _on_full_load(self, event: AtomEvent):
"""Handler for processing event that a vessel need to load full containers from current port.
When there is a vessel arrive at a port:
1. Discharge full (we ignore this action here, as we will generate a discharge event \
after a vessel have loaded any full).
2. Load full by destination id, and generate discharge event.
3. Update vessel.state to PARKING.
4. Fill future stop list.
5. Early discharge.
Args:
event (AtomEvent): Arrival event object.
"""
arrival_obj: VesselStatePayload = event.payload
vessel_idx: int = arrival_obj.vessel_idx
port_idx: int = arrival_obj.port_idx
vessel = self._vessels[vessel_idx]
port = self._ports[port_idx]
container_volume = self._data_cntr.container_volume
vessel_capacity = vessel.capacity
# Update vessel state.
vessel.last_loc_idx = vessel.next_loc_idx
# NOTE: This remaining space do not contains empty, as we can early discharge them if no enough space.
remaining_space = vessel_capacity - vessel.full * container_volume
# How many containers we can load.
acceptable_number = floor(remaining_space / container_volume)
total_load_qty = 0
for next_port_idx, arrival_tick in self._get_reachable_ports(vessel_idx):
full_number_to_next_port = self._get_pending_full(
port_idx, next_port_idx)
if acceptable_number > 0 and full_number_to_next_port > 0:
# We can load some full.
loaded_qty = min(full_number_to_next_port, acceptable_number)
total_load_qty += loaded_qty
# Update port state.
self._set_pending_full(
port_idx, next_port_idx, full_number_to_next_port - loaded_qty)
port.full -= loaded_qty
vessel.full += loaded_qty
# Update state.
self._full_on_vessels[vessel_idx, next_port_idx] += loaded_qty
acceptable_number -= loaded_qty
# Generate a discharge event, as we know when the vessel will arrive at destination.
payload = VesselDischargePayload(vessel_idx, port_idx, next_port_idx, loaded_qty)
dsch_event = self._event_buffer.gen_cascade_event(arrival_tick, Events.DISCHARGE_FULL, payload)
self._event_buffer.insert_event(dsch_event)
# Early discharge.
total_container = vessel.full + vessel.empty
vessel.early_discharge = 0
if total_container * container_volume > vessel.capacity:
early_discharge_number = \
total_container - ceil(vessel.capacity / container_volume)
vessel.empty -= early_discharge_number
port.empty += early_discharge_number
vessel.early_discharge = early_discharge_number
def _on_arrival(self, event: AtomEvent):
"""Handler for processing event when there is a vessel arriving at the port.
When the vessel arriving at the port:
1. Update the location index.
2. Update the future stops information of this vessel.
3. Update the vessel plan.
Args:
event (AtomEvent): Arrival event object.
"""
arrival_payload: VesselStatePayload = event.payload
vessel_idx = arrival_payload.vessel_idx
vessel = self._vessels[vessel_idx]
# Update vessel location so that later logic will get correct value.
vessel.last_loc_idx = vessel.next_loc_idx
vessel.is_parking = 1
stop: Stop = self._data_cntr.vessel_stops[vessel.idx, vessel.next_loc_idx]
vessel.loc_port_idx = stop.port_idx
# We should update the future stop list once the vessel arrives.
future_stops = self._data_cntr.vessel_future_stops[vessel.idx, vessel.last_loc_idx, vessel.next_loc_idx]
vessel.set_stop_list(None, future_stops)
# Update vessel plans.
for plan_port_idx, plan_tick in self._data_cntr.vessel_planned_stops[
vessel_idx, vessel.route_idx, vessel.last_loc_idx
]:
self._vessel_plans[vessel_idx, plan_port_idx] = plan_tick
def _on_departure(self, event: AtomEvent):
"""Handler for processing event when there is a vessel leaving from port.
When the vessel departing from port:
1. Update location to next stop.
2. Update the past stops information of this vessel.
Args:
event (AtomEvent): Departure event object.
"""
departure_payload: VesselStatePayload = event.payload
vessel_idx = departure_payload.vessel_idx
vessel = self._vessels[vessel_idx]
# As we have unfold all the route stop, we can just location ++.
vessel.next_loc_idx += 1
vessel.is_parking = 0
vessel.loc_port_idx = -1
# We should update the past stop list once the vessel departs.
past_stops = self._data_cntr.vessel_past_stops[vessel.idx, vessel.last_loc_idx, vessel.next_loc_idx]
vessel.set_stop_list(past_stops, None)
def _on_discharge(self, event: CascadeEvent):
"""Handler for processing event the there are some full need to be discharged.
1. Discharge specified qty of full from vessel into port.on_consignee.
2. Generate a empty_return event by configured buffer time:
a. If buffer time is 0, then insert into immediate_event_list to process it ASAP.
b. Or insert into event buffer.
Args:
event (AtomEvent): Discharge event object.
"""
discharge_payload: VesselDischargePayload = event.payload
vessel_idx = discharge_payload.vessel_idx
port_idx = discharge_payload.port_idx
vessel = self._vessels[vessel_idx]
port = self._ports[port_idx]
discharge_qty: int = discharge_payload.quantity
vessel.full -= discharge_qty
port.on_consignee += discharge_qty
self._full_on_vessels[vessel_idx, port_idx] -= discharge_qty
buffer_ticks = self._data_cntr.empty_return_buffers[port.idx]
payload = EmptyReturnPayload(port_idx=port.idx, quantity=discharge_qty)
mt_return_evt = self._event_buffer.gen_atom_event(
tick=event.tick + buffer_ticks, event_type=Events.RETURN_EMPTY, payload=payload
)
if buffer_ticks == 0:
event.add_immediate_event(mt_return_evt)
else:
self._event_buffer.insert_event(mt_return_evt)
def _on_empty_return(self, event: AtomEvent):
"""Handler for processing event when there are some empty container return to port.
Args:
event (AtomEvent): Empty-return event object.
"""
payload: EmptyReturnPayload = event.payload
port = self._ports[payload.port_idx]
port.on_consignee -= payload.quantity
port.empty += payload.quantity
def _on_action_received(self, event: CascadeEvent):
"""Handler for processing actions from agent.
Args:
event (CascadeEvent): Action event object with expected payload: {vessel_id: empty_number_to_move}}.
"""
actions = event.payload
if actions:
if type(actions) is not list:
actions = [actions]
for action in actions:
vessel_idx = action.vessel_idx
port_idx = action.port_idx
move_num = action.quantity
vessel = self._vessels[vessel_idx]
port = self._ports[port_idx]
port_empty = port.empty
vessel_empty = vessel.empty
action_type: ActionType = getattr(action, "action_type", None)
# Make it compatible with previous action.
if action_type is None:
action_type = ActionType.DISCHARGE if move_num > 0 else ActionType.LOAD
# Make sure the move number is positive, as we have the action type.
move_num = abs(move_num)
if action_type == ActionType.DISCHARGE:
assert(move_num <= vessel_empty)
port.empty = port_empty + move_num
vessel.empty = vessel_empty - move_num
else:
assert(move_num <= min(port_empty, vessel.remaining_space))
port.empty = port_empty - move_num
vessel.empty = vessel_empty + move_num
# Align the event type to make the output readable.
event.event_type = Events.DISCHARGE_EMPTY if action_type == ActionType.DISCHARGE else Events.LOAD_EMPTY
# Update transfer cost for port and metrics.
self._total_operate_num += move_num
port.transfer_cost += move_num
self._vessel_plans[vessel_idx, port_idx] += self._data_cntr.vessel_period[vessel_idx]
def _stream_base_info(self):
if streamit:
streamit.info(self._scenario_name, self._topology, self._max_tick)
streamit.complex("config", self._config)
def _stream_data(self):
if streamit:
port_number = len(self._ports)
vessel_number = len(self._vessels)
for port in self._ports:
streamit.data(
"port_details", index=port.index, capacity=port.capacity, empty=port.empty, full=port.full,
on_shipper=port.on_shipper, on_consignee=port.on_consignee, shortage=port.shortage,
acc_shortage=port.acc_shortage, booking=port.booking, acc_booking=port.acc_booking,
fulfillment=port.fulfillment, acc_fulfillment=port.acc_fulfillment, transfer_cost=port.transfer_cost
)
for vessel in self._vessels:
streamit.data(
"vessel_details", index=vessel.index, capacity=vessel.capacity, empty=vessel.empty,
full=vessel.full, remaining_space=vessel.remaining_space, early_discharge=vessel.early_discharge,
route_idx=vessel.route_idx, last_loc_idx=vessel.last_loc_idx, next_loc_idx=vessel.next_loc_idx,
past_stop_list=vessel.past_stop_list[:], past_stop_tick_list=vessel.past_stop_tick_list[:],
future_stop_list=vessel.future_stop_list[:], future_stop_tick_list=vessel.future_stop_tick_list[:]
)
vessel_plans = np.array(self._vessel_plans[:]).reshape(vessel_number, port_number)
a, b = np.where(vessel_plans > -1)
for vessel_index, port_index in list(zip(a, b)):
streamit.data(
"vessel_plans", vessel_index=vessel_index,
port_index=port_index, planed_arrival_tick=vessel_plans[vessel_index, port_index]
)
full_on_ports = np.array(self._full_on_ports[:]).reshape(port_number, port_number)
a, b = np.where(full_on_ports > 0)
for from_port_index, to_port_index in list(zip(a, b)):
streamit.data(
"full_on_ports", from_port_index=from_port_index,
dest_port_index=to_port_index, quantity=full_on_ports[from_port_index, to_port_index]
)
full_on_vessels = np.array(self._full_on_vessels[:]).reshape(vessel_number, port_number)
a, b = np.where(full_on_vessels > 0)
for vessel_index, port_index in list(zip(a, b)):
streamit.data(
"full_on_vessels", vessel_index=vessel_index, port_index=port_index,
quantity=full_on_vessels[vessel_index, port_index]
)
|
{"hexsha": "d3e9ed364f01ca9d15b3a19df892bc05cb919027", "size": 30184, "ext": "py", "lang": "Python", "max_stars_repo_path": "maro/simulator/scenarios/cim/business_engine.py", "max_stars_repo_name": "anukaal/maro", "max_stars_repo_head_hexsha": "21c88f4ef93729d51fc1a5b1a957150c51af2574", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maro/simulator/scenarios/cim/business_engine.py", "max_issues_repo_name": "anukaal/maro", "max_issues_repo_head_hexsha": "21c88f4ef93729d51fc1a5b1a957150c51af2574", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "maro/simulator/scenarios/cim/business_engine.py", "max_forks_repo_name": "anukaal/maro", "max_forks_repo_head_hexsha": "21c88f4ef93729d51fc1a5b1a957150c51af2574", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8731836196, "max_line_length": 120, "alphanum_fraction": 0.6497813411, "include": true, "reason": "import numpy", "num_tokens": 6456}
|
/***************************************************************************
* Copyright (C) 2008 by Mikhail Zaslavskiy *
* mikhail.zaslavskiy@ensmp.fr *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#ifndef ALGORITHM_H
#define ALGORITHM_H
#define EPSILON 1e-100
#include "rpc.h"
#include "graph.h"
#include <math.h>
#include "hungarian.h"
#include <gsl/gsl_blas.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_permutation.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_eigen.h>
#include <vector>
#include <iostream>
/**
Class of graph matching results
*/
class match_result
{
public:
match_result(){gm_P=NULL;gm_P_exact=NULL;salgo="";}
std::vector<double> vd_trace;
int inum_iteration;
double dres;
gsl_matrix* gm_P;
gsl_matrix* gm_P_exact;
double dtime;
double dfvalue;
double dfvalue_exact;
std::string salgo;
~match_result(){};
};
/**
Parent class for all graph matching algorithms
@author Mikhail Zaslavskiy <mikhail.zaslavskiy@ensmp.fr>
*/
class algorithm : public rpc
{
public:
algorithm(std::string );
algorithm();
match_result gmatch(graph& g, graph& h,gsl_matrix* gm_P_i=NULL, gsl_matrix* gm_ldh=NULL,double dalpha_ldh=-1);//common stuff,
virtual match_result match(graph& g, graph& h, gsl_matrix* gm_P_i=NULL, gsl_matrix* gm_ldh=NULL,double dalpha_ldh=-1)=0;//particular method implementation
double graph_dist(graph &g,graph &h,gsl_matrix* gm_P,char cscore_matrix);
double graph_dist(graph &g, graph &h,char cscore_matrix);
~algorithm();
const gsl_matrix* get_ldhmatrix(){return gm_ldh;};
void set_ldhmatrix(const gsl_matrix* _gm_A);
protected:
gsl_matrix *gm_ldh;
double dalpha_ldh;
void update_C_hungarian(gsl_matrix* gm_C,int isign=1, bool bback=false);
double f_qcv(gsl_matrix *gm_Ag_d,gsl_matrix *gm_Ah_d,gsl_matrix* gm_P,gsl_matrix * gm_temp,bool bqcv=false);
char cdesc_matrix,cscore_matrix;
parameter pdebug,pdebug_f;
bool bverbose;
std::string sverbfile;
std::ofstream fverbose;
long long N;
double df_norm;
bool bnosymm;
};
#endif
|
{"hexsha": "e866f1779ad99a76531b02b271358ff9d3266df7", "size": 3326, "ext": "h", "lang": "C", "max_stars_repo_path": "code/clustered_setup/fgm-master/LSGMcode-master/algorithms/graphm-0.52/algorithm.h", "max_stars_repo_name": "mk2510/jointGraphMatchingAndClustering", "max_stars_repo_head_hexsha": "52f579a07d106cb241d21dbc29a2ec9e9c77b254", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2015-08-27T14:10:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-08T21:38:55.000Z", "max_issues_repo_path": "code/clustered_setup/fgm-master/LSGMcode-master/algorithms/graphm-0.52/algorithm.h", "max_issues_repo_name": "mk2510/jointGraphMatchingAndClustering", "max_issues_repo_head_hexsha": "52f579a07d106cb241d21dbc29a2ec9e9c77b254", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2015-02-20T01:53:58.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-24T11:14:00.000Z", "max_forks_repo_path": "code/clustered_setup/fgm-master/LSGMcode-master/algorithms/graphm-0.52/algorithm.h", "max_forks_repo_name": "mk2510/jointGraphMatchingAndClustering", "max_forks_repo_head_hexsha": "52f579a07d106cb241d21dbc29a2ec9e9c77b254", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2016-08-23T11:44:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-06T01:41:25.000Z", "avg_line_length": 34.2886597938, "max_line_length": 159, "alphanum_fraction": 0.6013229104, "num_tokens": 780}
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import numpy as np
from kglib.utils.graph.iterate import multidigraph_data_iterator, multidigraph_node_data_iterator, \
multidigraph_edge_data_iterator
def encode_types(graph, node_types, edge_types):
node_iterator = multidigraph_node_data_iterator(graph)
encode_categorically(node_iterator, node_types, 'type', 'categorical_type')
edge_iterator = multidigraph_edge_data_iterator(graph)
encode_categorically(edge_iterator, edge_types, 'type', 'categorical_type')
return graph
def create_input_graph(graph, features_field="features"):
input_graph = graph.copy()
augment_data_fields(multidigraph_data_iterator(input_graph),
("input", "categorical_type", "encoded_value"),
features_field)
input_graph.graph[features_field] = np.array([0.0] * 5, dtype=np.float32)
return input_graph
def create_target_graph(graph, features_field="features"):
target_graph = graph.copy()
target_graph = encode_solutions(target_graph, solution_field="solution", encoded_solution_field="encoded_solution",
encodings=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]))
augment_data_fields(multidigraph_data_iterator(target_graph),
("encoded_solution",),
features_field)
target_graph.graph[features_field] = np.array([0.0] * 5, dtype=np.float32)
return target_graph
def augment_data_fields(graph_data_iterator, fields_to_augment, augmented_field):
"""
Returns a graph with features built from augmenting data fields found in the graph
Args:
graph_data_iterator: iterator over the data for elements in a graph
fields_to_augment: the fields of the data dictionaries to augment together
augmented_field: the field in which to store the augmented fields
Returns:
None, updates the graph in-place
"""
for data in graph_data_iterator:
data[augmented_field] = np.hstack([np.array(data[field], dtype=float) for field in fields_to_augment])
def encode_solutions(graph, solution_field="solution", encoded_solution_field="encoded_solution",
encodings=np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])):
"""
Determines the encoding to use for a solution category
Args:
graph: Graph to update
solution_field: The property in the graph that holds the value of the solution
encoded_solution_field: The property in the graph to use to hold the new solution value
encodings: An array, a row from which will be picked as the new solution based on using the current solution
as a row index
Returns: Graph with updated `encoded_solution_field`
"""
for data in multidigraph_data_iterator(graph):
solution = data[solution_field]
data[encoded_solution_field] = encodings[solution]
return graph
def encode_categorically(graph_data_iterator, all_categories, category_field, encoding_field):
"""
Encodes the type found in graph data as an integer according to the index it is found in `all_types`
Args:
graph_data_iterator: An iterator of data in the graph (node data, edge data or combined node and edge data)
all_categories: The full list of categories to be encoded in this order
category_field: The data field containing the category to encode
encoding_field: The data field to use to store the encoding
Returns:
"""
for data in graph_data_iterator:
data[encoding_field] = all_categories.index(data[category_field])
|
{"hexsha": "dddce8726fa95307c3d66c4b854eaf9f07afbcd2", "size": 4430, "ext": "py", "lang": "Python", "max_stars_repo_path": "kglib/kgcn/pipeline/encode.py", "max_stars_repo_name": "lolski/kglib", "max_stars_repo_head_hexsha": "2265009bc066454accb88cdaad8769b920d5df39", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kglib/kgcn/pipeline/encode.py", "max_issues_repo_name": "lolski/kglib", "max_issues_repo_head_hexsha": "2265009bc066454accb88cdaad8769b920d5df39", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kglib/kgcn/pipeline/encode.py", "max_forks_repo_name": "lolski/kglib", "max_forks_repo_head_hexsha": "2265009bc066454accb88cdaad8769b920d5df39", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6422018349, "max_line_length": 119, "alphanum_fraction": 0.7139954853, "include": true, "reason": "import numpy", "num_tokens": 970}
|
import unittest
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
import os
class TestingActivity6_01(unittest.TestCase):
def setUp(self) -> None:
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
self.data = pd.read_csv(os.path.join(ROOT_DIR, '..', 'Datasets', 'boston_house_prices.csv'))
def test_dataset_shape(self):
self.assertEqual(self.data.shape, (506, 14))
def test_decision_tree_scores(self):
data_final = self.data.fillna(-1)
train, val = train_test_split(data_final, test_size=0.2, random_state=11)
x_train = train.drop(columns=['PRICE'])
y_train = train['PRICE'].values
x_val = val.drop(columns=['PRICE'])
y_val = val['PRICE'].values
train_mae_values, val_mae_values = {}, {}
# Decision Tree
dt_params = {
'criterion': 'mae',
'min_samples_leaf': 15,
'random_state': 11
}
dt = DecisionTreeRegressor(**dt_params)
dt.fit(x_train, y_train)
dt_preds_train = dt.predict(x_train)
dt_preds_val = dt.predict(x_val)
train_mae_values['dt'] = mean_absolute_error(y_true=y_train, y_pred=dt_preds_train)
val_mae_values['dt'] = mean_absolute_error(y_true=y_val, y_pred=dt_preds_val)
mae_scores = pd.concat([pd.Series(train_mae_values, name='train'),
pd.Series(val_mae_values, name='val')],
axis=1)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'dt']['train'].values[0], 2.38440594, places=4)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'dt']['val'].values[0], 3.28235294, places=4)
def test_knn_scores(self):
data_final = self.data.fillna(-1)
train, val = train_test_split(data_final, test_size=0.2, random_state=11)
x_train = train.drop(columns=['PRICE'])
y_train = train['PRICE'].values
x_val = val.drop(columns=['PRICE'])
y_val = val['PRICE'].values
train_mae_values, val_mae_values = {}, {}
# k-Nearest Neighbours
knn_params = {
'n_neighbors': 5
}
knn = KNeighborsRegressor(**knn_params)
knn.fit(x_train, y_train)
knn_preds_train = knn.predict(x_train)
knn_preds_val = knn.predict(x_val)
train_mae_values['knn'] = mean_absolute_error(y_true=y_train, y_pred=knn_preds_train)
val_mae_values['knn'] = mean_absolute_error(y_true=y_val, y_pred=knn_preds_val)
mae_scores = pd.concat([pd.Series(train_mae_values, name='train'),
pd.Series(val_mae_values, name='val')],
axis=1)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'knn']['train'].values[0], 3.45554455, places=4)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'knn']['val'].values[0], 3.97803922, places=4)
def test_random_forest_scores(self):
data_final = self.data.fillna(-1)
train, val = train_test_split(data_final, test_size=0.2, random_state=11)
x_train = train.drop(columns=['PRICE'])
y_train = train['PRICE'].values
x_val = val.drop(columns=['PRICE'])
y_val = val['PRICE'].values
train_mae_values, val_mae_values = {}, {}
# Random Forest
rf_params = {
'n_estimators': 20,
'criterion': 'mae',
'max_features': 'sqrt',
'min_samples_leaf': 10,
'random_state': 11,
'n_jobs': -1
}
rf = RandomForestRegressor(**rf_params)
rf.fit(x_train, y_train)
rf_preds_train = rf.predict(x_train)
rf_preds_val = rf.predict(x_val)
train_mae_values['rf'] = mean_absolute_error(y_true=y_train, y_pred=rf_preds_train)
val_mae_values['rf'] = mean_absolute_error(y_true=y_val, y_pred=rf_preds_val)
mae_scores = pd.concat([pd.Series(train_mae_values, name='train'),
pd.Series(val_mae_values, name='val')],
axis=1)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'rf']['train'].values[0], 2.31612005, places=4)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'rf']['val'].values[0], 3.029828431, places=4)
def test_gradient_boosting_scores(self):
data_final = self.data.fillna(-1)
train, val = train_test_split(data_final, test_size=0.2, random_state=11)
x_train = train.drop(columns=['PRICE'])
y_train = train['PRICE'].values
x_val = val.drop(columns=['PRICE'])
y_val = val['PRICE'].values
train_mae_values, val_mae_values = {}, {}
# Gradient Boosting
gbr_params = {
'n_estimators': 20,
'criterion': 'mae',
'max_features': 'sqrt',
'max_depth': 3,
'min_samples_leaf': 10,
'random_state': 11
}
gbr = GradientBoostingRegressor(**gbr_params)
gbr.fit(x_train, y_train)
gbr_preds_train = gbr.predict(x_train)
gbr_preds_val = gbr.predict(x_val)
train_mae_values['gbr'] = mean_absolute_error(y_true=y_train, y_pred=gbr_preds_train)
val_mae_values['gbr'] = mean_absolute_error(y_true=y_val, y_pred=gbr_preds_val)
mae_scores = pd.concat([pd.Series(train_mae_values, name='train'),
pd.Series(val_mae_values, name='val')],
axis=1)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'gbr']['train'].values[0], 2.46343592, places=4)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'gbr']['val'].values[0], 3.058634, places=4)
def test_stacking_model_scores(self):
data_final = self.data.fillna(-1)
train, val = train_test_split(data_final, test_size=0.2, random_state=11)
x_train = train.drop(columns=['PRICE'])
y_train = train['PRICE'].values
x_val = val.drop(columns=['PRICE'])
y_val = val['PRICE'].values
train_mae_values, val_mae_values = {}, {}
# Decision Tree
dt_params = {
'criterion': 'mae',
'min_samples_leaf': 15,
'random_state': 11
}
# k-Nearest Neighbours
knn_params = {
'n_neighbors': 5
}
# Random Forest
rf_params = {
'n_estimators': 20,
'criterion': 'mae',
'max_features': 'sqrt',
'min_samples_leaf': 10,
'random_state': 11,
'n_jobs': -1
}
# Gradient Boosting
gbr_params = {
'n_estimators': 20,
'criterion': 'mae',
'max_features': 'sqrt',
'max_depth': 3,
'min_samples_leaf': 10,
'random_state': 11
}
# stacking model
num_base_predictors = 4
x_train_with_metapreds = np.zeros((x_train.shape[0], x_train.shape[1] + num_base_predictors))
x_train_with_metapreds[:, :-num_base_predictors] = x_train
x_train_with_metapreds[:, -num_base_predictors:] = -1
kf = KFold(n_splits=5, random_state=11)
for train_indices, val_indices in kf.split(x_train):
kfold_x_train, kfold_x_val = x_train.iloc[train_indices], x_train.iloc[val_indices]
kfold_y_train, kfold_y_val = y_train[train_indices], y_train[val_indices]
predictions = []
dt = DecisionTreeRegressor(**dt_params)
dt.fit(kfold_x_train, kfold_y_train)
predictions.append(dt.predict(kfold_x_val))
knn = KNeighborsRegressor(**knn_params)
knn.fit(kfold_x_train, kfold_y_train)
predictions.append(knn.predict(kfold_x_val))
gbr = GradientBoostingRegressor(**gbr_params)
gbr.fit(kfold_x_train, kfold_y_train)
predictions.append(gbr.predict(kfold_x_val))
for i, preds in enumerate(predictions):
x_train_with_metapreds[val_indices, -(i + 1)] = preds
x_val_with_metapreds = np.zeros((x_val.shape[0], x_val.shape[1] + num_base_predictors))
x_val_with_metapreds[:, :-num_base_predictors] = x_val
x_val_with_metapreds[:, -num_base_predictors:] = -1
predictions = []
dt = DecisionTreeRegressor(**dt_params)
dt.fit(x_train, y_train)
predictions.append(dt.predict(x_val))
knn = KNeighborsRegressor(**knn_params)
knn.fit(x_train, y_train)
predictions.append(knn.predict(x_val))
gbr = GradientBoostingRegressor(**gbr_params)
gbr.fit(x_train, y_train)
predictions.append(gbr.predict(x_val))
for i, preds in enumerate(predictions):
x_val_with_metapreds[:, -(i + 1)] = preds
lr = LinearRegression(normalize=True)
lr.fit(x_train_with_metapreds, y_train)
lr_preds_train = lr.predict(x_train_with_metapreds)
lr_preds_val = lr.predict(x_val_with_metapreds)
train_mae_values['lr'] = mean_absolute_error(y_true=y_train, y_pred=lr_preds_train)
val_mae_values['lr'] = mean_absolute_error(y_true=y_val, y_pred=lr_preds_val)
mae_scores = pd.concat([pd.Series(train_mae_values, name='train'),
pd.Series(val_mae_values, name='val')],
axis=1)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'lr']['train'].values[0], 2.24627884, places=2)
self.assertAlmostEqual(mae_scores[mae_scores.index == 'lr']['val'].values[0], 2.87408434, places=2)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "3570b65682ebb2ceada5322d64b6df226c37a769", "size": 10066, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter06/unittests/testActivity6_01.py", "max_stars_repo_name": "nijinjose/The-Supervised-Learning-Workshop", "max_stars_repo_head_hexsha": "33a2fec1e202dc1394116ed7a194bd8cabb61d49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-03-24T20:35:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T19:19:48.000Z", "max_issues_repo_path": "Chapter06/unittests/testActivity6_01.py", "max_issues_repo_name": "thisabhijit/The-Supervised-Learning-Workshop", "max_issues_repo_head_hexsha": "33a2fec1e202dc1394116ed7a194bd8cabb61d49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter06/unittests/testActivity6_01.py", "max_forks_repo_name": "thisabhijit/The-Supervised-Learning-Workshop", "max_forks_repo_head_hexsha": "33a2fec1e202dc1394116ed7a194bd8cabb61d49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 50, "max_forks_repo_forks_event_min_datetime": "2020-01-03T10:22:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-15T07:54:26.000Z", "avg_line_length": 34.830449827, "max_line_length": 110, "alphanum_fraction": 0.6146433539, "include": true, "reason": "import numpy", "num_tokens": 2465}
|
import numpy as np
import random, itertools
from torchio import Transform, DATA
'''
Data augmentation on-the-fly
For Brats data, use affine transformation. Ref: https://www.frontiersin.org/articles/10.3389/fncom.2019.00083/full
Example code https://github.com/pytorch/vision/blob/master/torchvision/transforms/transforms.py
'''
# class Compose(object):
# """Composes several transforms together.
#
# Args:
# transforms (List[Transform]): list of transforms to compose.
#
# Example:
# >>> transforms.Compose([
# >>> transforms.CenterCrop(10),
# >>> transforms.ToTensor(),
# >>> ])
# """
# def __init__(self, transforms, aug_seed=0):
# self.transforms = transforms
# self.set_random_state(aug_seed)
#
# def __call__(self, data):
# for t in self.transforms:
# data = t(data)
# return data
#
# def __repr__(self):
# format_string = self.__class__.__name__ + '('
# for t in self.transforms:
# format_string += '\n'
# format_string += ' {0}'.format(t)
# format_string += '\n)'
# return format_string
#
# def set_random_state(self, seed=None):
# for i, t in enumerate(self.transforms):
# t.set_random_state(seed=(seed+i))
#
# class Transform(object):
# """basse class for all transformation"""
# def set_random_state(self, seed=None):
# self.rng = np.random.RandomState(seed)
#
####################################
# Customized Transformations
####################################
# class ToTensor(Transform):
# """
# Converts a numpy.ndarray (W x H x (D x C)) to a torch.FloatTensor of shape (C x D x W x H).
# """
# def __init__(self, dim=3):
# self.dim = dim
#
# def __call__(self, img):
# if isinstance(img, np.ndarray):
# H, W, _ = img.shape
# # handle numpy array
# img = torch.from_numpy(img.reshape((H,W,-1,self.dim)).transpose((3, 2, 0, 1)))
# # backward compatibility
# return img.float()
class Normalize(Transform):
'''
Normalize a torch.FloatTensor of (C, D, H, W) to the same shape. with its mean and std are unscaled (300+).
Given mean and std of size (C, ),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
'''
def __init__(self, mean, std):
self.mean = mean
self.std = std
self.probability = 1.0
def apply_transform(self, sample):
for image_dict, mn, std in zip(sample.get_images_dict().values(), self.mean, self.std):
# max: 1488.2520751953125, min: -0.3330691158771515, mean: 29.41769790649414, std: 94.70357513427734
normalize = image_dict[DATA].sub_(mn).div_(std)
# max: 4.941013813018799, min: -0.4125528335571289, mean: -0.30555957555770874, std: 0.340593159198761
image_dict[DATA] = normalize
return sample
|
{"hexsha": "f69ce960c424fe660cd971000d0e288bccf0c122", "size": 3090, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/brats_transforms.py", "max_stars_repo_name": "weinajin/GloRe_brain", "max_stars_repo_head_hexsha": "ba291206504a4e754fa19811f73aee11d3734b16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/brats_transforms.py", "max_issues_repo_name": "weinajin/GloRe_brain", "max_issues_repo_head_hexsha": "ba291206504a4e754fa19811f73aee11d3734b16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/brats_transforms.py", "max_forks_repo_name": "weinajin/GloRe_brain", "max_forks_repo_head_hexsha": "ba291206504a4e754fa19811f73aee11d3734b16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1136363636, "max_line_length": 115, "alphanum_fraction": 0.572815534, "include": true, "reason": "import numpy", "num_tokens": 777}
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mixerNew.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from pyqtgraph import ImageView
import cv2
import numpy as np
# class Ui_MainWindow(object):
class Ui_MainWindow(QtGui.QMainWindow):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
#MainWindow.resize(1131, 923)
MainWindow.resize(1400, 800)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout_2 = QtWidgets.QGridLayout()
self.gridLayout_2.setObjectName("gridLayout_2")
self.img1_combo = QtWidgets.QComboBox(self.centralwidget)
self.img1_combo.setObjectName("comboBox")
self.img1_combo.addItem("")
self.img1_combo.addItem("")
self.img1_combo.addItem("")
self.img1_combo.addItem("")
self.img1_combo.addItem("")
self.gridLayout_2.addWidget(self.img1_combo, 0, 1, 1, 1)
self.gridLayout_3 = QtWidgets.QGridLayout()
self.gridLayout_3.setObjectName("gridLayout_3")
self.img2_label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.img2_label.setFont(font)
self.img2_label.setObjectName("label_2")
self.gridLayout_3.addWidget(self.img2_label, 0, 0, 1, 1)
self.img2_combo = QtWidgets.QComboBox(self.centralwidget)
self.img2_combo.setObjectName("comboBox_2")
self.img2_combo.addItem("")
self.img2_combo.addItem("")
self.img2_combo.addItem("")
self.img2_combo.addItem("")
self.img2_combo.addItem("")
self.gridLayout_3.addWidget(self.img2_combo, 0, 1, 1, 1)
self.img2 = ImageView(self.centralwidget)
self.img2.setObjectName("img2")
self.gridLayout_3.addWidget(self.img2, 1, 0, 1, 1)
self.img2_component = ImageView(self.centralwidget)
self.img2_component.setObjectName("img2_component")
self.gridLayout_3.addWidget(self.img2_component, 1, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout_3, 2, 0, 1, 2)
self.img1 = ImageView(self.centralwidget)
# self.img1 = QtWidgets.QLabel(self.centralwidget)
self.img1.setObjectName("img1")
# self.img1.setScaledContents(True)
# self.img1.setScaledContents(True)
self.gridLayout_2.addWidget(self.img1, 1, 0, 1, 1)
self.img1_component = ImageView(self.centralwidget)
self.img1_component.setObjectName("img1_component")
self.gridLayout_2.addWidget(self.img1_component, 1, 1, 1, 1)
self.img1_label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(20)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.img1_label.setFont(font)
self.img1_label.setObjectName("label")
self.gridLayout_2.addWidget(self.img1_label, 0, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout_2)
self.horizontalLayout.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
# self.verticalLayout_2.setEnabled(False)
self.gridLayout_5 = QtWidgets.QGridLayout()
self.gridLayout_5.setObjectName("gridLayout_5")
self.output_channel = QtWidgets.QComboBox(self.centralwidget)
self.output_channel.setObjectName("output_channel")
self.output_channel.addItem("")
self.output_channel.addItem("")
self.output_channel.addItem("")
self.gridLayout_5.addWidget(self.output_channel, 0, 1, 1, 2)
self.Component1 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(17)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.Component1.setFont(font)
self.Component1.setTextFormat(QtCore.Qt.AutoText)
self.Component1.setAlignment(QtCore.Qt.AlignCenter)
self.Component1.setObjectName("Component1")
self.gridLayout_5.addWidget(self.Component1, 2, 0, 1, 1)
self.component1_slider = QtWidgets.QSlider(self.centralwidget)
self.component1_slider.setMaximum(100)
self.component1_slider.setSingleStep(10)
self.component1_slider.setOrientation(QtCore.Qt.Horizontal)
self.component1_slider.setObjectName("component1_slider")
self.component1_slider.setValue(100)
# self.component1_slider.setEnabled(False)
self.gridLayout_5.addWidget(self.component1_slider, 3, 1, 1, 2)
self.component1_img = QtWidgets.QComboBox(self.centralwidget)
self.component1_img.setObjectName("component1_img")
self.component1_img.addItem("")
self.component1_img.addItem("")
self.component1_img.addItem("")
self.gridLayout_5.addWidget(self.component1_img, 2, 1, 1, 1)
self.component2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(17)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.component2.setFont(font)
self.component2.setTextFormat(QtCore.Qt.AutoText)
self.component2.setAlignment(QtCore.Qt.AlignCenter)
self.component2.setObjectName("component2")
self.gridLayout_5.addWidget(self.component2, 5, 0, 1, 1)
self.component2_img = QtWidgets.QComboBox(self.centralwidget)
self.component2_img.setObjectName("component2_img")
self.component2_img.addItem("")
self.component2_img.addItem("")
self.component2_img.addItem("")
self.gridLayout_5.addWidget(self.component2_img, 5, 1, 1, 1)
self.mixer_output = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(17)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.mixer_output.setFont(font)
self.mixer_output.setTextFormat(QtCore.Qt.AutoText)
self.mixer_output.setAlignment(QtCore.Qt.AlignCenter)
self.mixer_output.setObjectName("mixer_output")
self.gridLayout_5.addWidget(self.mixer_output, 0, 0, 1, 1)
self.component1_type = QtWidgets.QComboBox(self.centralwidget)
self.component1_type.setObjectName("component1_type")
self.component1_type.addItem("")
self.component1_type.addItem("")
self.component1_type.addItem("")
self.component1_type.addItem("")
self.component1_type.addItem("")
self.gridLayout_5.addWidget(self.component1_type, 2, 2, 1, 1)
self.component2_type = QtWidgets.QComboBox(self.centralwidget)
self.component2_type.setObjectName("component2_type")
self.component2_type.addItem("")
self.component2_type.addItem("")
self.component2_type.addItem("")
self.component2_type.addItem("")
self.component2_type.addItem("")
self.gridLayout_5.addWidget(self.component2_type, 5, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.gridLayout_5.addItem(spacerItem, 4, 0, 1, 3)
self.component2_slider = QtWidgets.QSlider(self.centralwidget)
self.component2_slider.setMaximum(100)
self.component2_slider.setSingleStep(10)
self.component2_slider.setOrientation(QtCore.Qt.Horizontal)
self.component2_slider.setObjectName("component2_slider")
self.gridLayout_5.addWidget(self.component2_slider, 6, 1, 1, 2)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.gridLayout_5.addItem(spacerItem1, 1, 0, 1, 3)
self.gridLayout_4 = QtWidgets.QGridLayout()
self.gridLayout_4.setObjectName("gridLayout_4")
self.output2 = ImageView(self.centralwidget)
self.output2.setObjectName("output2")
self.gridLayout_4.addWidget(self.output2, 2, 1, 1, 1)
self.output1 = ImageView(self.centralwidget)
self.output1.setObjectName("output1")
self.gridLayout_4.addWidget(self.output1, 2, 0, 1, 1)
self.horizontalLayout_8 = QtWidgets.QHBoxLayout()
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.output1_label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(17)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.output1_label.setFont(font)
self.output1_label.setTextFormat(QtCore.Qt.AutoText)
self.output1_label.setAlignment(QtCore.Qt.AlignCenter)
self.output1_label.setObjectName("output1_label")
self.horizontalLayout_8.addWidget(self.output1_label)
self.output2_label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Ubuntu Mono")
font.setPointSize(17)
font.setBold(True)
font.setItalic(True)
font.setWeight(75)
self.output2_label.setFont(font)
self.output2_label.setTextFormat(QtCore.Qt.AutoText)
self.output2_label.setAlignment(QtCore.Qt.AlignCenter)
self.output2_label.setObjectName("output2_label")
self.horizontalLayout_8.addWidget(self.output2_label)
self.gridLayout_4.addLayout(self.horizontalLayout_8, 1, 0, 1, 2)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
self.gridLayout_4.addItem(spacerItem2, 0, 0, 1, 2)
self.gridLayout_5.addLayout(self.gridLayout_4, 7, 0, 1, 3)
self.verticalLayout_2.addLayout(self.gridLayout_5)
self.horizontalLayout.addLayout(self.verticalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1131, 22))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuFile.menuAction())
self.actionOpen1 = QtWidgets.QAction(MainWindow)
self.actionOpen1.setObjectName("actionOpen1")
self.actionOpen1.setText("Image 1")
self.menuFile.addAction(self.actionOpen1)
self.actionOpen2 = QtWidgets.QAction(MainWindow)
self.actionOpen2.setObjectName("actionOpen2")
self.actionOpen2.setText("Image 2")
self.menuFile.addAction(self.actionOpen2)
# self.pause = QtWidgets.QPushButton(self.centralwidget)
# self.pause.setGeometry(QtCore.QRect(315, 1, 35, 35))
# self.pause.setText("open")
# self.pause.setObjectName("pause")
# self.pause.setShortcut("Ctrl+o")
# self.output2.close()
# self.img1_component.close()
# self.output1.close()
# self.img1.close()
# self.img2_component.close()
# self.img2.close()
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# self.pause.clicked.connect(lambda:self.opensignal())
#self.actionOpen.triggered.connect(lambda:self.Components())
self.counter=-1
self.images=[self.img1,self.img2,self.img1_component,self.img2_component,self.output1,self.output2]
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.img1_combo.setItemText(0, _translate("MainWindow", "Choose a Component"))
self.img1_combo.setItemText(1, _translate("MainWindow", "Magnitude"))
self.img1_combo.setItemText(2, _translate("MainWindow", "Phase"))
self.img1_combo.setItemText(3, _translate("MainWindow", "Real"))
self.img1_combo.setItemText(4, _translate("MainWindow", "Imaginary"))
self.img2_label.setText(_translate("MainWindow", "Image 2"))
self.img2_combo.setItemText(0, _translate("MainWindow", "Choose a Component"))
self.img2_combo.setItemText(1, _translate("MainWindow", "Magnitude"))
self.img2_combo.setItemText(2, _translate("MainWindow", "Phase"))
self.img2_combo.setItemText(3, _translate("MainWindow", "Real"))
self.img2_combo.setItemText(4, _translate("MainWindow", "Imaginary"))
self.img1_label.setText(_translate("MainWindow", "Image 1"))
self.output_channel.setItemText(0, _translate("MainWindow", "Choose Output Channel"))
self.output_channel.setItemText(1, _translate("MainWindow", "Output 1"))
self.output_channel.setItemText(2, _translate("MainWindow", "Output 2"))
self.Component1.setText(_translate("MainWindow", "Component 1"))
self.component1_img.setItemText(0, _translate("MainWindow", "Choose Image"))
self.component1_img.setItemText(1, _translate("MainWindow", "Image 1"))
self.component1_img.setItemText(2, _translate("MainWindow", "Image 2"))
self.component2.setText(_translate("MainWindow", "Component 2"))
self.component2_img.setItemText(0, _translate("MainWindow", "Choose Image"))
self.component2_img.setItemText(1, _translate("MainWindow", "Image 1"))
self.component2_img.setItemText(2, _translate("MainWindow", "Image 2"))
self.mixer_output.setText(_translate("MainWindow", "Mixer Output"))
self.component1_type.setItemText(0, _translate("MainWindow", "Choose a Component to mix"))
self.component1_type.setItemText(1, _translate("MainWindow", "Magnitude"))
self.component1_type.setItemText(2, _translate("MainWindow", "Phase"))
self.component1_type.setItemText(3, _translate("MainWindow", "Real"))
self.component1_type.setItemText(4, _translate("MainWindow", "Imaginary"))
self.component2_type.setItemText(0, _translate("MainWindow", "Choose a Component to mix"))
self.component2_type.setItemText(1, _translate("MainWindow", "Magnitude"))
self.component2_type.setItemText(2, _translate("MainWindow", "Phase"))
self.component2_type.setItemText(3, _translate("MainWindow", "Real"))
self.component2_type.setItemText(4, _translate("MainWindow", "Imaginary"))
self.output1_label.setText(_translate("MainWindow", "Outpu1 1"))
self.output2_label.setText(_translate("MainWindow", "Output 2"))
self.menuFile.setTitle(_translate("MainWindow", "Open"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
{"hexsha": "80850b236c85d7c47f7800eac4f0d4e81d7b2138", "size": 15719, "ext": "py", "lang": "Python", "max_stars_repo_path": "FFT-ImageMixer/Part-A/mixer.py", "max_stars_repo_name": "Radwa-Saeed/Didital-Signal-Processing-PyQt-GUI", "max_stars_repo_head_hexsha": "8d97e2925a20dd6a74d4bc0613bfceea668f2731", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FFT-ImageMixer/Part-A/mixer.py", "max_issues_repo_name": "Radwa-Saeed/Didital-Signal-Processing-PyQt-GUI", "max_issues_repo_head_hexsha": "8d97e2925a20dd6a74d4bc0613bfceea668f2731", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FFT-ImageMixer/Part-A/mixer.py", "max_forks_repo_name": "Radwa-Saeed/Didital-Signal-Processing-PyQt-GUI", "max_forks_repo_head_hexsha": "8d97e2925a20dd6a74d4bc0613bfceea668f2731", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.537704918, "max_line_length": 113, "alphanum_fraction": 0.6923468414, "include": true, "reason": "import numpy", "num_tokens": 3525}
|
# Imports
import random
import numpy as np
# Snake classes
class snake_obj:
def __init__(self, head):
self.head = head
@property
def length(self):
length = 0
part = self.head
while part:
part = part.next
length += 1
return length
class snake_part:
def __init__(self, pos, direction) -> None:
self.pos = pos
self.direction = direction
self.previous = None
self.next = None
#draw(snake, apple, game_size, score)
class game:
def __init__(self, game_size=20):
self.game_size = game_size
self.rewards = 0
head = snake_part([random.randint(0, self.game_size-1), random.randint(0, self.game_size-1)], [0, 0])
self.snake = snake_obj(head)
self.apple = [random.randint(0, self.game_size-2), random.randint(0, self.game_size-2)]
self.eaten = False
def draw(self, score=0):
for y in range(self.game_size, -1, -1):
for x in range(0, self.game_size+1):
item = "-"
if self.apple[0] == x and self.apple[1] == y:
item = "@"
# Check snake positions
part = self.snake.head
while part:
if part.pos[0] == x and part.pos[1] == y:
item = "*"
break
part = part.next
print (item, end="")
print ("")
print (f"\n {score}")
def move_snake(self, snake, direction):
# Move to last part
part = snake.head
while part.next:
part = part.next
# Change directions of all the parts
while part.previous:
part.direction = part.previous.direction
part = part.previous
# Change direction of snake head
snake.head.direction = direction
# Change part positions
part = snake.head
while part:
x, y = part.direction
part.pos[0] += x
part.pos[1] += y
part = part.next
def check_collisions(self, snake, game_size):
"""Returns whether the snake has hit the wall or itself"""
head = snake.head
# Wall collisions
if head.pos[0] > game_size or head.pos[0] < 0 or head.pos[1] > game_size or head.pos[1] < 0:
return True
head_x, head_y = head.pos
# Check for snake collisions
part = head.next
while part:
x, y = part.pos
if head_x == x and head_y == y and snake.length > 2:
return True
part = part.next
return False
def run_cycle(self, move):
eaten = False
# Get inital distance to apple
x1 = self.snake.head.pos[0]
y1 = self.snake.head.pos[1]
distance_to_apple1 = np.sqrt((self.apple[0] - x1)**2 + (self.apple[1] - y1)**2)
if self.eaten:
self.apple = [random.randint(0, self.game_size-2), random.randint(0, self.game_size-2)]
self.eaten = False
if move == "u":
direction = [0, 1]
elif move == "d":
direction = [0, -1]
elif move == "r":
direction = [1, 0]
else:
direction = [-1, 0]
self.move_snake(self.snake, direction)
# Check if apple has been eaten
if self.snake.head.pos[0] == self.apple[0] and self.snake.head.pos[1] == self.apple[1]:
self.eaten = True
eaten = True
# Get to last part of snake
part = self.snake.head
while part.next:
part = part.next
# Add new snake part
part.next = snake_part([part.pos[0] - part.direction[0], part.pos[1] - part.direction[1]], [0, 0])
part.next.previous = part
end = self.check_collisions(self.snake, self.game_size)
# Get final distance to apple
x2 = self.snake.head.pos[0]
y2 = self.snake.head.pos[1]
distance_to_apple2 = np.sqrt((self.apple[0] - x2)**2 + (self.apple[1] - y2)**2)
# Calculate reward
if eaten:
reward = 0.7
elif end:
reward = -0.5
else:
if distance_to_apple2 < distance_to_apple1:
reward = 0.1
else:
reward = -0.1
# Return rewards, game state, and if the game is over
return reward, self, end
print ('done')
if __name__ == '__main__':
a = game()
while True:
a.draw()
move = input()
end = a.run_cycle(move)
print (end)
|
{"hexsha": "b83bfffa1b22761232639df882f214d8b9f0b710", "size": 4668, "ext": "py", "lang": "Python", "max_stars_repo_path": "game.py", "max_stars_repo_name": "NoahBlack012/snake_game_ml", "max_stars_repo_head_hexsha": "0bc77f199e2759f9b43dd7c4bb07994dc121a521", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "game.py", "max_issues_repo_name": "NoahBlack012/snake_game_ml", "max_issues_repo_head_hexsha": "0bc77f199e2759f9b43dd7c4bb07994dc121a521", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "game.py", "max_forks_repo_name": "NoahBlack012/snake_game_ml", "max_forks_repo_head_hexsha": "0bc77f199e2759f9b43dd7c4bb07994dc121a521", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7324840764, "max_line_length": 110, "alphanum_fraction": 0.5158526135, "include": true, "reason": "import numpy", "num_tokens": 1173}
|
// @@@LICENSE
//
// Copyright (c) 2009-2013 LG Electronics, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// LICENSE@@@
#include <iostream>
#include <boost/regex.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include "commands/PopProtocolCommand.h"
#include "commands/PopProtocolCommandErrorResponse.h"
#include "stream/BaseOutputStream.h"
#include "PopConfig.h"
using namespace boost;
using namespace std;
const char* const PopProtocolCommand::STATUS_STRING_OK = "+OK";
const char* const PopProtocolCommand::STATUS_STRING_ERR = "-ERR";
const char* const PopProtocolCommand::CRLF = "\r\n";
PopProtocolCommand::PopProtocolCommand(PopSession& session, Priority priority)
: PopSessionCommand(session, priority),
m_handleResponseSlot(this, &PopProtocolCommand::ReceiveResponse),
m_includesCRLF(false),
m_status(Status_Err),
m_errorCode(MailError::NONE)
{
}
PopProtocolCommand::~PopProtocolCommand()
{
}
void PopProtocolCommand::SendCommand(const std::string& request)
{
try
{
m_requestStr = request; // 'm_requestStr' will be used to report error
std::string reqStr = request + CRLF;
OutputStreamPtr outputStreamPtr = m_session.GetOutputStream();
outputStreamPtr->Write(reqStr.c_str());
MojLogDebug(m_log, "Sent command: '%s'", request.c_str());
m_session.GetLineReader()->WaitForLine(m_handleResponseSlot, PopConfig::READ_TIMEOUT_IN_SECONDS);
} catch (const MailNetworkDisconnectionException& nex) {
NetworkFailure(MailError::NO_NETWORK, nex);
} catch (const std::exception& ex) {
MojLogError(m_log, "Exception in send command: '%s'", ex.what());
m_errorCode = MailError::CONNECTION_FAILED;
NetworkFailure(m_errorCode, ex);
} catch (...) {
MojLogError(m_log, "Unknown exception in sending command");
m_errorCode = MailError::CONNECTION_FAILED;
NetworkFailure(m_errorCode, MailException("Unknown exception in sending command", __FILE__, __LINE__));
}
}
MojErr PopProtocolCommand::ReceiveResponse()
{
try{
m_responseFirstLine = m_session.GetLineReader()->ReadLine(m_includesCRLF);
} catch (const MailNetworkTimeoutException& nex) {
m_errorCode = MailError::CONNECTION_TIMED_OUT;
NetworkFailure(m_errorCode, nex);
} catch (const MailNetworkDisconnectionException& nex) {
m_errorCode = MailError::NO_NETWORK;
NetworkFailure(m_errorCode, nex);
} catch (const std::exception& ex) {
MojLogError(m_log, "Exception in receiving pop response: '%s'", ex.what());
m_errorCode = MailError::CONNECTION_FAILED;
NetworkFailure(m_errorCode, ex);
} catch (...) {
MojLogError(m_log, "Unknown exception in receiving pop response");
m_errorCode = MailError::CONNECTION_FAILED;
NetworkFailure(m_errorCode, MailException("Unknown exception in receiving pop response", __FILE__, __LINE__));
}
if (m_errorCode == MailError::CONNECTION_FAILED
|| m_errorCode == MailError::CONNECTION_TIMED_OUT
|| m_errorCode == MailError::NO_NETWORK) {
return MojErrInternal;
}
try {
MojLogDebug(m_log, "Response %s", m_responseFirstLine.c_str());
ParseResponseFirstLine();
if (m_errorCode == MailError::BAD_USERNAME_OR_PASSWORD
|| m_errorCode == MailError::ACCOUNT_LOCKED
|| m_errorCode == MailError::ACCOUNT_UNAVAILABLE
|| m_errorCode == MailError::ACCOUNT_UNKNOWN_AUTH_ERROR
|| m_errorCode == MailError::ACCOUNT_WEB_LOGIN_REQUIRED) {
m_session.LoginFailure(m_errorCode, m_serverMessage);
Failure(MailException("Login failure", __FILE__, __LINE__));
return MojErrInternal;
} else {
HandleResponse(m_responseFirstLine);
}
} catch (const std::exception& e) {
MojLogError(m_log, "Exception in processing pop response: '%s'", e.what());
Failure(e);
} catch (...) {
MojLogError(m_log, "Unknown exception in processing pop response");
Failure(MailException("Unknown exception in processing pop response", __FILE__, __LINE__));
}
return MojErrNone;
}
void PopProtocolCommand::ParseResponseFirstLine()
{
try{
// initialize status string with the response first line
if (m_responseFirstLine.find(STATUS_STRING_OK) != string::npos) {
m_status = Status_Ok;
m_serverMessage = m_responseFirstLine.substr(strlen(STATUS_STRING_OK));
} else if (m_responseFirstLine.find(STATUS_STRING_ERR) != string::npos) {
m_status = Status_Err;
m_serverMessage = m_responseFirstLine.substr(strlen(STATUS_STRING_ERR));
AnalyzeCommandResponse(m_serverMessage);
} else {
m_status = Status_Err;
m_serverMessage = m_responseFirstLine;
}
} catch (const std::exception& e) {
MojLogError(m_log, "Exception in parsing the first line: '%s'", e.what());
Failure(e);
} catch (...) {
MojLogError(m_log, "Unknown exception parsing the first line");
Failure(MailException("Unknown exception parsing the first line", __FILE__, __LINE__));
}
//MojLogInfo(m_log, "Status: %i, server message: %s", m_status, m_serverMessage.c_str());
}
void PopProtocolCommand::AnalyzeCommandResponse(const std::string& serverMessage)
{
if (!serverMessage.empty() && IsHotmail()) {
AnalyzeHotmailResponse(serverMessage);
}
}
bool PopProtocolCommand::IsHotmail() {
std::string host = m_session.GetAccount()->GetHostName();
return host.find("live.com") != std::string::npos;
}
void PopProtocolCommand::AnalyzeHotmailResponse(const std::string& serverMessage)
{
// Don't want to catch the exception in following codes. Just let it be thrown and
// caught as app error so that we can be notified in RDX report and fix it accordingly.
MojLogError(m_log, "server msg:%s", serverMessage.data());
if (boost::icontains(serverMessage, PopProtocolCommandErrorResponse::HOTMAIL_LOGIN_TOO_FREQUENT)||
boost::icontains(serverMessage, PopProtocolCommandErrorResponse::HOTMAIL_LOGIN_LIMIT_EXCEED)||
boost::icontains(serverMessage, PopProtocolCommandErrorResponse::HOTMAIL_MAILBOX_NOT_OPENED)) {
MojLogError(m_log, "ACCOUNT_UNAVAILABLE");
m_errorCode = MailError::ACCOUNT_UNAVAILABLE;
return;
}
if (boost::icontains(serverMessage, PopProtocolCommandErrorResponse::HOTMAIL_MAILBOX_NOT_OPENED)) {
m_errorCode = MailError::ACCOUNT_UNAVAILABLE;
return;
}
if (boost::icontains(serverMessage, PopProtocolCommandErrorResponse::HOTMAIL_WEB_LOGIN_REQUIRED)) {
m_errorCode = MailError::ACCOUNT_WEB_LOGIN_REQUIRED;
return;
}
}
void PopProtocolCommand::AnalyzeMailException(const MailException& ex)
{
MojRefCountedPtr<SocketConnection> connection = m_session.GetConnection();
if (connection.get() && connection->GetErrorInfo().errorCode != MailError::NONE) {
NetworkFailure(MailError::CONNECTION_FAILED, ex);
} else {
MojLogError(m_log, "Mail exception in receiving multilines response: '%s'", ex.what());
Failure(ex);
}
}
|
{"hexsha": "ee4cc408f26513b43671e48e26d700fb5ff44f4b", "size": 7159, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "pop/src/commands/PopProtocolCommand.cpp", "max_stars_repo_name": "webOS-ports/mojomail", "max_stars_repo_head_hexsha": "49358ac2878e010f5c6e3bd962f047c476c11fc3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2015-01-09T02:20:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-02T08:14:23.000Z", "max_issues_repo_path": "mojomail/pop/src/commands/PopProtocolCommand.cpp", "max_issues_repo_name": "openwebos/app-services", "max_issues_repo_head_hexsha": "021d509d609fce0cb41a0e562650bdd1f3bf4e32", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2019-05-11T19:17:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T16:04:36.000Z", "max_forks_repo_path": "mojomail/pop/src/commands/PopProtocolCommand.cpp", "max_forks_repo_name": "openwebos/app-services", "max_forks_repo_head_hexsha": "021d509d609fce0cb41a0e562650bdd1f3bf4e32", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2015-01-09T02:21:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-02T02:37:10.000Z", "avg_line_length": 36.5255102041, "max_line_length": 112, "alphanum_fraction": 0.7551333985, "num_tokens": 1801}
|
import numpy as np
import sqlite3
from utils import image_ids_to_pair_id, pair_id_to_image_ids, blob_to_array, array_to_blob
from database import COLMAPDatabase
class MatchesList:
def __init__(self, num_images, database=None):
# `[[]] * N` create a list containing the same list object N times!!!
# self.matches_list = [[]] * num_images
self.matches_list = [[] for _ in range(num_images)]
if database is not None:
matches_results = database.execute("select * FROM matches")
for matches_result in matches_results:
pair_id, rows, cols, matches = matches_result
image_id1, image_id2 = pair_id_to_image_ids(pair_id)
# if image_id1 == 1:
# print(image_id1, image_id2)
matches = blob_to_array(matches, np.uint32, (rows, cols))
# print(image_id1, image_id2, rows)
if rows > 0:
# print(image_id1, image_id2)
matches_1 = self._sort_matches(matches)
matches_2 = matches[:, [1,0]]
# image_id is 1-based
# we keep this convention and only subtract it by 1 when we use it as index
# we make this adjacency list symmetric for easier construction of tracks
# at the expense of memory and computation(sorting)
self.matches_list[image_id1-1].append((image_id2, matches_1))
self.matches_list[image_id2-1].append((image_id1, matches_2))
# self.matches_list[image_id1-1].append(image_id2)
# self.matches_list[image_id2-1].append(image_id1)
# print(self.matches_list[0])
# print(self.matches_list[1])
# print(self.matches_list[2])
# print(self.matches_list[3])
# print([nbr[0] for nbr in self.matches_list[2]])
# print(self.matches_list[1][:3])
def __getitem__(self, idx):
return self.matches_list[idx]
def _sort_matches(self, matches):
# matches (n, 2) array.
# sort according to first col
order = np.argsort(matches[:, 0])
return matches[order, :]
# class Tracks:
# def __init__(self):
# self.track_list = []
# pass
def test_matchlist():
db = COLMAPDatabase.connect("data/yan2017/colmap_street/match.db")
image_results = db.execute("SELECT * FROM images")
num_images = len(list(image_results))
# print(num_images)
matches_list = MatchesList(num_images, database=db)
if __name__ == "__main__":
test_matchlist()
|
{"hexsha": "a69e396120288ecde3d34a6fece99dbc64b68767", "size": 2656, "ext": "py", "lang": "Python", "max_stars_repo_path": "yan2017/matches_list.py", "max_stars_repo_name": "lxxue/colmap", "max_stars_repo_head_hexsha": "1e2cbf61160205fd53d9fe7a1a8df359b870cae3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "yan2017/matches_list.py", "max_issues_repo_name": "lxxue/colmap", "max_issues_repo_head_hexsha": "1e2cbf61160205fd53d9fe7a1a8df359b870cae3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "yan2017/matches_list.py", "max_forks_repo_name": "lxxue/colmap", "max_forks_repo_head_hexsha": "1e2cbf61160205fd53d9fe7a1a8df359b870cae3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6417910448, "max_line_length": 95, "alphanum_fraction": 0.6005271084, "include": true, "reason": "import numpy", "num_tokens": 612}
|
import numpy as np
import numba as nb
import mcmc.util as util
import mcmc.util_2D as u2
spec = [
('basis_number', nb.int64),
('extended_basis_number', nb.int64),
('t_end', nb.float64),
('t_start', nb.float64),
('dt', nb.float64),
('t', nb.float64[::1]),
('index', nb.int64[:,::1]),
('Dmatrix', nb.float64[:,::1]),
('Imatrix', nb.float64[:,::1]),
('cosFun', nb.float64[:,::1]),
('sinFun', nb.float64[:,::1]),
('eigenFun', nb.complex128[:,::1]),
('prepared',nb.boolean)
]
@nb.jitclass(spec)
class FourierAnalysis:
def __init__(self, basis_number,extended_basis_number,t_start = 0,t_end=1):
self.basis_number = basis_number
self.extended_basis_number = extended_basis_number
self.t_end = t_end
self.t_start = t_start
self.Dmatrix = -(2*np.pi)**2*np.diag(np.arange(-(self.basis_number-1),self.basis_number)**2)
self.Imatrix = np.eye(2*self.basis_number-1)
self.prepare()
def prepare(self):
self.t = np.linspace(self.t_start,self.t_end,self.extended_basis_number)
self.dt = self.t[1] - self.t[0]
self.eigenFun = np.empty((self.basis_number,self.extended_basis_number),dtype=np.complex128)
self.cosFun = np.empty((self.basis_number,self.extended_basis_number),dtype=np.float64)
self.sinFun = np.empty((self.basis_number,self.extended_basis_number),dtype=np.float64)
for i in range(self.basis_number):
self.eigenFun[i,:] = util.eigenFunction1D(-i,self.t)
self.cosFun[i,:] = np.cos(2*np.pi*self.t*i)
self.sinFun[i,:] = np.sin(2*np.pi*self.t*i)
self.index = self.createUindex()
self.prepared = True
def inverseFourierLimited(self,uHalf):
y = np.zeros(self.sinFun.shape[1],dtype=np.float64)
for i in range(1,len(uHalf)):
for j in range(self.sinFun.shape[1]):
y[j] += 2*(uHalf[i].real*self.cosFun[i,j] - uHalf[i].imag*self.sinFun[i,j])
# y += 2*(u[i].real*cosFun[i,:] - u[i].imag*sinFun[i,:])
for j in range(self.sinFun.shape[1]):
y[j] += uHalf[0].real
# y += u[0].real
return y
def fourierTransformHalf(self,ut):
uHalf = np.zeros(self.eigenFun.shape[0],np.complex128)
for i in range(self.eigenFun.shape[0]):
# uHalf[i] = inner(ut,eigenFun[i,:])*dt
uHalf[i] = util.inner(ut,self.eigenFun[i,:])
return uHalf*self.dt
def constructU(self,uHalf):
"""
Construct Toeplitz Matrix
"""
#using native scipy toeplitz function is not working in nopython mode
# uFull = np.concatenate((uHalf,np.zeros(len(uHalf)-1)), axis=None)
# U = sla.toeplitz(uFull).conj()
# LU = len(uHalf)
#np.zeros only supported with two argument, but somehow if I include dtype in np.zeros it
#is not working, so this is the solution
Ushape = (2*self.basis_number-1,2*self.basis_number-1)
U = np.zeros(Ushape,dtype=np.complex128)
# U = U.astype(complex)
for i in nb.prange(2*self.basis_number-1):
for j in nb.prange(2*self.basis_number-1):
index = i-j #(j-i)#
if 0<= index <self.basis_number :
U[i,j] = uHalf[index]
continue
if 0< -index < self.basis_number:
U[i,j] = uHalf[-index].conjugate()
# continue
return U
def constructMatexplicit(self,uHalf,fun):
temp = fun(self.inverseFourierLimited(uHalf))
temp2 = self.fourierTransformHalf(temp)
return self.constructU(temp2)
def createUindex(self):
shape = (2*self.basis_number-1,2*self.basis_number-1)
index = np.zeros(shape,dtype=np.int64)
for i in nb.prange(2*self.basis_number-1):
for j in nb.prange(2*self.basis_number-1):
index[i,j] = (i-j)+(2*self.basis_number-1)
return index
def constructU_with_Index(self,uHalf):
uprepared = util.extend(util.symmetrize(uHalf),2*self.basis_number)
# with nb.objmode(U='complex128[:,:]'):
# res = u.extend2D(symmetrize_2D(uHalf),2*n-1)[index]
U = uprepared[self.index]
return U
# spec2D = [
# ('basis_number', nb.int64),
# ('extended_basis_number', nb.int64),
# ('basis_number_2D', nb.int64),
# ('basis_number_2D_sym', nb.int64),
# ('extended_basis_number_2D', nb.int64),
# ('extended_basis_number_2D_sym', nb.int64),
# ('t_end', nb.float64),
# ('t_start', nb.float64),
# ('dt', nb.float64),
# ('t', nb.float64[::1]),
# ('Dmatrix', nb.float64[:,::1]),
# ('Imatrix', nb.float64[:,::1]),
# ('ix', nb.int64[:,::1]),
# ('iy', nb.int64[:,::1]),
# ('Index',nb.typeof(u2.createUindex(2)))
# ]
# ORDER = 'C'
# @nb.jitclass(spec2D)
# class FourierAnalysis_2D:
# def __init__(self,basis_number,extended_basis_number,t_start = 0,t_end=1):
# self.basis_number = basis_number
# self.extended_basis_number = extended_basis_number
# self.basis_number_2D = (2*basis_number-1)*basis_number
# self.basis_number_2D_sym = (2*basis_number-1)*(2*basis_number-1)
# self.extended_basis_number_2D = (2*extended_basis_number-1)*extended_basis_number
# self.extended_basis_number_2D_sym = (2*extended_basis_number-1)*(2*extended_basis_number-1)
# self.t_end = t_end
# self.t_start = t_start
# self.ix = np.zeros(2*self.basis_number-1,2*self.basis_number-1,dtype=np.int64)
# self.iy = np.zeros(2*self.basis_number-1,2*self.basis_number-1,dtype=np.int64)
# temp = np.arange(-(self.basis_number-1),self.basis_number)
# for i in range(2*self.basis_number-1)
# self.ix[i,:] = temp
# self.iy[:,i] = temp
# # d_diag = np.zeros((2*self.basis_number-1)**2)
# # for i in range(2*self.basis_number-1):
# # for j in range(2*self.basis_number-1):
# # d_diag[i*10+j] = (i**2+j**2)
# # self.Dmatrix = -(2*np.pi)**2*np.diag(d_diag)
# self.Imatrix = np.eye((2*self.basis_number-1)**2)
# Index = u2.createUindex(self.basis_number)
# self.Index = Index
# def inverseFourierLimited(self,uHalf):
# return u2.irfft2(uHalf,self.extended_basis_number)
# def fourierTransformHalf(self,z):
# return u2.rfft2(z,self.basis_number)
# def constructU(self,uHalf):
# """
# Construct Toeplitz Matrix
# """
# return u2.constructU(uHalf,self.Index)
# def constructMatexplicit(self,uHalf,fun):
# temp = fun(self.inverseFourierLimited(uHalf))
# temp2 = self.fourierTransformHalf(temp)
# return self.constructU(temp2)
|
{"hexsha": "537a0e2b8d51e529fdf130c54fcc352a41f65c70", "size": 7043, "ext": "py", "lang": "Python", "max_stars_repo_path": "Legacy/mcmc/fourier.py", "max_stars_repo_name": "puat133/MCMC-MultiSPDE", "max_stars_repo_head_hexsha": "2beca39f32c0cdd7664baeacd495b193850d8e7d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-23T09:32:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-23T09:32:43.000Z", "max_issues_repo_path": "Legacy/mcmc/fourier.py", "max_issues_repo_name": "puat133/MCMC-MultiSPDE", "max_issues_repo_head_hexsha": "2beca39f32c0cdd7664baeacd495b193850d8e7d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Legacy/mcmc/fourier.py", "max_forks_repo_name": "puat133/MCMC-MultiSPDE", "max_forks_repo_head_hexsha": "2beca39f32c0cdd7664baeacd495b193850d8e7d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.790960452, "max_line_length": 101, "alphanum_fraction": 0.5788726395, "include": true, "reason": "import numpy,import numba", "num_tokens": 1957}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
data = pd.read_csv('201213177_data.csv', engine='python')
# Remove the column named 'department' because it's not part of data analysis.
data_var = data.drop(['municipio'], axis=1)
del data_var['2020-11-11']
# Normalize the data
data_norm = (data_var-data_var.min())/(data_var.max() - data_var.min())
# Search for the optimal number of clusters
# Calculating how similar the individuals are within the clusters.
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, max_iter=300)
# Apply KMeans to the database
kmeans.fit(data_norm)
wcss.append(kmeans.inertia_)
# Plotting WCSS Results to Form the Jambu Elbow
# WCSS. It is an indicator of how similar individuals are within the clusters.
# Uncomment the following lines if you want to see how Jambu Elbow looks like.
# plt.plot(range(1,11), wcss)
# plt.title('Jambu Elbow')
# plt.xlabel('# Clusters')
# plt.ylabel('WCSS')
# plt.show()
# Applying the k-means method to the database
# Create the model
clustering = KMeans(n_clusters=3, max_iter=300)
# Apply the model to the database
clustering.fit(data_norm)
# Adding the classification to the original file
# The results of clustering are saved in labels_ inside the model
data['KMeans_Clusters'] = clustering.labels_
# print(data)
# Visualizing the clusters that were formed
# We will apply principal component analysis to get an idea of how the clusters were formed
pca = PCA(n_components=2)
pca_covid = pca.fit_transform(data_norm)
pca_covid_df = pd.DataFrame(data=pca_covid, columns=['Component_1', 'Component_2'])
pca_covid_towns = pd.concat([pca_covid_df, data[['KMeans_Clusters']]], axis=1)
# print(pca_covid_towns)
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Component 1', fontsize=15)
ax.set_ylabel('Component 2', fontsize=15)
ax.set_title('Principal Components', fontsize=20)
color_theme = np.array(['blue', 'green', 'orange'])
ax.scatter(x=pca_covid_towns.Component_1, y=pca_covid_towns.Component_2, c=color_theme[pca_covid_towns.KMeans_Clusters], s=50)
plt.show()
|
{"hexsha": "31e060dadfbdbddaaae0cad157687dab55fa4bb4", "size": 2191, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/201213177.py", "max_stars_repo_name": "luisferliza/CoronavirusML", "max_stars_repo_head_hexsha": "5298b6f000ee5597de55f850a963fa15004b44d6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-26T03:21:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-02T15:03:59.000Z", "max_issues_repo_path": "src/201213177.py", "max_issues_repo_name": "luisferliza/CoronavirusML", "max_issues_repo_head_hexsha": "5298b6f000ee5597de55f850a963fa15004b44d6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2020-11-10T02:52:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-16T07:32:26.000Z", "max_forks_repo_path": "src/201213177.py", "max_forks_repo_name": "luisferliza/CoronavirusML", "max_forks_repo_head_hexsha": "5298b6f000ee5597de55f850a963fa15004b44d6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 50, "max_forks_repo_forks_event_min_datetime": "2020-11-10T02:08:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-16T06:33:34.000Z", "avg_line_length": 34.7777777778, "max_line_length": 126, "alphanum_fraction": 0.7562756732, "include": true, "reason": "import numpy", "num_tokens": 589}
|
from scipy.stats import norm
import numpy as np
alpha = 0.05
z = norm().ppf( 1 - alpha / 2)
p = 0.85
N = 100
Cn = (p - z * np.sqrt(p * (1 - p) / N), p + z * np.sqrt(p * (1 - p) / N))
_ = ["%0.4f" % x for x in Cn]
print(_)
# bootstrap
X = np.zeros(N)
X[:85] = 1
B = []
for _ in range(500):
s = np.random.randint(X.shape[0], size=X.shape[0])
B.append(X[s].mean())
se = np.sqrt(np.var(B))
Cn = (p - z * se, p + z * se)
_ = ["%0.4f" % x for x in Cn]
print(_)
Cn = (np.percentile(B, alpha * 100), np.percentile(B, (1 - alpha) * 100))
_ = ["%0.4f" % x for x in Cn]
print(_)
from sklearn.datasets import load_iris
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split, StratifiedKFold
X, y = load_iris(return_X_y=True)
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3)
model = GaussianNB().fit(Xtrain, ytrain)
hy = model.predict(Xtest)
X = np.where(ytest == hy, 1, 0)
p = X.mean()
N = X.shape[0]
Cn = (p - z * np.sqrt(p * (1 - p) / N), p + z * np.sqrt(p * (1 - p) / N))
_ = ["%0.4f" % x for x in Cn]
print(_)
# B = []
# for _ in range(5000):
# s = np.random.randint(X.shape[0], size=X.shape[0])
# B.append(X[s].mean())
#
# Cn = (np.percentile(B, alpha * 100), np.percentile(B, (1 - alpha) * 100))
# _ = ["%0.4f" % x for x in Cn]
# print(_)
#
# se = np.sqrt(np.var(B))
# Cn = (p - z * se, p + z * se)
# _ = ["%0.4f" % x for x in Cn]
# print(_)
X, y = load_iris(return_X_y=True)
kf = StratifiedKFold(n_splits=10, shuffle=True)
hy = np.empty_like(y)
for tr, ts in kf.split(X, y):
model = GaussianNB().fit(X[tr], y[tr])
hy[ts] = model.predict(X[ts])
X = np.where(y == hy, 1, 0)
p = X.mean()
N = X.shape[0]
Cn = (p - z * np.sqrt(p * (1 - p) / N), p + z * np.sqrt(p * (1 - p) / N))
_ = ["%0.4f" % x for x in Cn]
print(_)
B = []
for _ in range(500):
s = np.random.randint(X.shape[0], size=X.shape[0])
B.append(X[s].mean())
se = np.sqrt(np.var(B))
Cn = (p - z * se, p + z * se)
_ = ["%0.4f" % x for x in Cn]
print(_)
Cn = (np.percentile(B, alpha * 100), np.percentile(B, (1 - alpha) * 100))
_ = ["%0.4f" % x for x in Cn]
print(_)
from mlxtend.evaluate import bootstrap_point632_score
X, y = load_iris(return_X_y=True)
cl = GaussianNB()
B = bootstrap_point632_score(cl, X, y, n_splits=500)
Cn = (np.percentile(B, alpha * 100), np.percentile(B, (1 - alpha) * 100))
_ = ["%0.4f" % x for x in Cn]
print(_)
### macro Recall
from scipy.stats import norm
import numpy as np
from sklearn.datasets import load_iris
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import recall_score
alpha = 0.05
z = norm().ppf( 1 - alpha / 2)
X, y = datasets.load_breast_cancer(return_X_y=True)
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)
hy = np.empty_like(y)
for tr, ts in kf.split(X, y):
# model = RandomForestClassifier().fit(X[tr], y[tr])
model = GaussianNB().fit(X[tr], y[tr])
hy[ts] = model.predict(X[ts])
B = []
for _ in range(500):
s = np.random.randint(X.shape[0], size=X.shape[0])
_ = recall_score(y[s], hy[s], average="macro")
B.append(_)
p = np.mean(B)
se = np.sqrt(np.var(B))
Cn = (p - z * se, p + z * se)
_ = ["%0.4f" % x for x in Cn]
print(_)
Cn = (np.percentile(B, alpha * 100), np.percentile(B, (1 - alpha) * 100))
_ = ["%0.4f" % x for x in Cn]
print(_)
from mlxtend.evaluate import bootstrap_point632_score
X, y = datasets.load_breast_cancer(return_X_y=True)
cl = GaussianNB()
cl = RandomForestClassifier()
B = bootstrap_point632_score(cl, X, y, n_splits=500,
scoring_func=lambda y, hy: recall_score(y, hy, average="macro"))
Cn = (np.percentile(B, alpha * 100), np.percentile(B, (1 - alpha) * 100))
_ = ["%0.4f" % x for x in Cn]
print(_)
## Wilcoxon
import numpy as np
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import recall_score, f1_score
from scipy.stats import wilcoxon
K = 30
kf = StratifiedKFold(n_splits=K, shuffle=True, random_state=0)
X, y = datasets.load_breast_cancer(return_X_y=True)
P = []
for tr, ts in kf.split(X, y):
forest = RandomForestClassifier().fit(X[tr], y[tr]).predict(X[ts])
# naive = ExtraTreesClassifier().fit(X[tr], y[tr]).predict(X[ts])
naive = GaussianNB().fit(X[tr], y[tr]).predict(X[ts])
P.append([recall_score(y[ts], hy, average="macro") for hy in [forest, naive]])
P = np.array(P)
print(wilcoxon(P[:, 0], P[:, 1]))
p = P[:, 0] - P[:, 1]
_ = np.sqrt(K) * np.mean(p) / np.std(p)
print("%0.4f" % _)
forest = np.empty_like(y)
naive = np.empty_like(y)
alpha=0.05
for tr, ts in kf.split(X, y):
forest[ts] = RandomForestClassifier().fit(X[tr], y[tr]).predict(X[ts])
naive[ts] = GaussianNB().fit(X[tr], y[tr]).predict(X[ts])
B = []
for _ in range(500):
s = np.random.randint(X.shape[0], size=X.shape[0])
f = recall_score(y[s], forest[s], average="macro")
n = recall_score(y[s], naive[s], average="macro")
B.append([f, n])
Cn = (np.percentile(B, alpha * 100, axis=0), np.percentile(B, (1 - alpha) * 100, axis=0))
_ = ["%0.4f" % x for x in Cn[0]]
print(_)
_ = ["%0.4f" % x for x in Cn[1]]
print(_)
alpha = 0.0125
z = norm().ppf( 1 - alpha / 2)
p = 0.87
N = 1000
Cn = (p - z * np.sqrt(p * (1 - p) / N), p + z * np.sqrt(p * (1 - p) / N))
_ = ["%0.4f" % x for x in Cn]
print(_)
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
X, y = datasets.load_boston(return_X_y=True)
K = 10
kf = KFold(n_splits=K, shuffle=True, random_state=0)
hy = np.empty_like(y)
for tr, ts in kf.split(X, y):
hy[ts] = LinearRegression().fit(X[tr], y[tr]).predict(X[ts])
B = []
for _ in range(500):
s = np.random.randint(X.shape[0], size=X.shape[0])
f = r2_score(y[s], hy[s])
B.append(f)
alpha = 0.05
Cn = (np.percentile(B, alpha * 100, axis=0), np.percentile(B, (1 - alpha) * 100, axis=0))
["%0.2f" % x for x in Cn]
|
{"hexsha": "a2e443cf2d8523d2e7f91a6b51c3a7436d3beda7", "size": 6305, "ext": "py", "lang": "Python", "max_stars_repo_path": "codigo/comparacion.py", "max_stars_repo_name": "INGEOTEC/AprendizajeComputacional", "max_stars_repo_head_hexsha": "96d2bab8911313d2655cfc05965393c01c4efac9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-05T16:53:31.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-01T05:36:52.000Z", "max_issues_repo_path": "codigo/comparacion.py", "max_issues_repo_name": "INGEOTEC/AprendizajeComputacional", "max_issues_repo_head_hexsha": "96d2bab8911313d2655cfc05965393c01c4efac9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codigo/comparacion.py", "max_forks_repo_name": "INGEOTEC/AprendizajeComputacional", "max_forks_repo_head_hexsha": "96d2bab8911313d2655cfc05965393c01c4efac9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-05-17T19:06:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-06T16:58:07.000Z", "avg_line_length": 27.1767241379, "max_line_length": 93, "alphanum_fraction": 0.6285487708, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2212}
|
/**
* @file stabrk3.cc
* @brief NPDE homework StabRK3 code
* @author Unknown, Oliver Rietmann, Philippe Peter
* @date 13.04.2021
* @copyright Developed at ETH Zurich
*/
#include "stabrk3.h"
#include <Eigen/Core>
#include <cmath>
#include <iomanip>
#include <iostream>
#include <vector>
namespace StabRK3 {
/* SAM_LISTING_BEGIN_0 */
Eigen::Vector2d PredPrey(Eigen::Vector2d y0, double T, unsigned int M) {
double h = T / M;
Eigen::Vector2d y = y0;
#if SOLUTION
// Define right-hand-saide function for Lotka-Volterra ODE
auto f = [](Eigen::Vector2d y) -> Eigen::Vector2d {
return {(1 - y(1)) * y(0), (y(0) - 1) * y(1)};
};
// Main timstepping loop: uniform stepsize
for (int j = 0; j < M; ++j) {
// Compute increments and updates according to \lref{def:rk} for the method
// described by the Butcher scheme \prbeqref{eq:rkesv}
Eigen::Vector2d k1 = f(y);
Eigen::Vector2d k2 = f(y + h * k1);
Eigen::Vector2d k3 = f(y + (h / 4.) * k1 + (h / 4.) * k2);
y = y + (h / 6.) * k1 + (h / 6.) * k2 + (2. * h / 3.) * k3;
}
#else
//====================
// Your code goes here
//====================
#endif
return y;
}
/* SAM_LISTING_END_0 */
/* SAM_LISTING_BEGIN_1 */
void SimulatePredPrey() {
#if SOLUTION
// Parameters
double T = 1.0;
Eigen::Vector2d y0(100.0, 1.0);
// (Approximate) reference solution
Eigen::Vector2d y_ref = PredPrey(y0, T, std::pow(2, 14));
Eigen::ArrayXd error(12);
Eigen::ArrayXd M(12);
// Studying the error for geometrically increasing numbers of equidistant
// timesteps is the most appropriate approach to empirically exploring
// algebraic convergence.
M << 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192;
// Compute errors
for (int i = 0; i < M.size(); ++i) {
Eigen::Vector2d y = PredPrey(y0, T, M(i));
error(i) = (y - y_ref).norm();
}
// Print error table
PrintErrorTable(M, error);
#else
//====================
// Your code goes here
//====================
#endif
}
void PrintErrorTable(const Eigen::ArrayXd& M, const Eigen::ArrayXd& error) {
std::cout << std::setw(15) << "N" << std::setw(15) << "error" << std::setw(15)
<< "rate" << std::endl;
// Formatted output in C++
for (unsigned int i = 0; i < M.size(); ++i) {
std::cout << std::setw(15) << M(i) << std::setw(15) << error(i);
if (i > 0) {
std::cout << std::setw(15) << std::log2(error(i - 1) / error(i));
}
std::cout << std::endl;
}
}
/* SAM_LISTING_END_1 */
} // namespace StabRK3
|
{"hexsha": "df2df6fa1b8461e2346812fec719a43e30e4c25d", "size": 2525, "ext": "cc", "lang": "C++", "max_stars_repo_path": "developers/StabRK3/mastersolution/stabrk3.cc", "max_stars_repo_name": "yiluchen1066/NPDECODES", "max_stars_repo_head_hexsha": "f7b1d96555bace59aba2b65f3ef1e95fa7a9017c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15.0, "max_stars_repo_stars_event_min_datetime": "2019-04-29T11:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T05:10:58.000Z", "max_issues_repo_path": "developers/StabRK3/mastersolution/stabrk3.cc", "max_issues_repo_name": "yiluchen1066/NPDECODES", "max_issues_repo_head_hexsha": "f7b1d96555bace59aba2b65f3ef1e95fa7a9017c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12.0, "max_issues_repo_issues_event_min_datetime": "2020-02-29T15:05:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T13:51:07.000Z", "max_forks_repo_path": "developers/StabRK3/mastersolution/stabrk3.cc", "max_forks_repo_name": "yiluchen1066/NPDECODES", "max_forks_repo_head_hexsha": "f7b1d96555bace59aba2b65f3ef1e95fa7a9017c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26.0, "max_forks_repo_forks_event_min_datetime": "2020-01-09T15:59:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T16:27:33.000Z", "avg_line_length": 26.8617021277, "max_line_length": 80, "alphanum_fraction": 0.5805940594, "num_tokens": 867}
|
/*
* Copyright (c) 2013-2014, Filippo Basso <bassofil@dei.unipd.it>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder(s) nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef UNIPD_CALIBRATION_IMPL_CALIBRATION_PCL_UTILITIES_POINT_PLANE_EXTRACTION_HPP_
#define UNIPD_CALIBRATION_IMPL_CALIBRATION_PCL_UTILITIES_POINT_PLANE_EXTRACTION_HPP_
#include <Eigen/Geometry>
#include <Eigen/StdVector>
#include <pcl/impl/point_types.hpp>
#include <pcl/PCLHeader.h>
#include <pcl/PointIndices.h>
#include <pcl/segmentation/extract_clusters.h>
#include <pcl/visualization/keyboard_event.h>
#include <vector>
#include <calibration_pcl/utilities/point_plane_extraction.h>
namespace unipd
{
namespace calib
{
template <typename PCLPointT_>
void PointPlaneExtraction<PCLPointT_>::setInputCloud(const PointCloudConstPtr & cloud)
{
cloud_ = cloud;
cloud_tree_->setInputCloud(cloud_);
NormalEstimator normal_estimator;
normal_estimator.setInputCloud(cloud_);
normal_estimator.setSearchMethod(cloud_tree_);
normal_estimator.setKSearch(100); // TODO setRadiusSearch
//normal_estimator.setRadiusSearch(0.05f);
normal_estimator.compute(*cloud_normals_);
}
template <typename PCLPointT_>
bool PointPlaneExtraction<PCLPointT_>::extract(PlaneInfo & plane_info) const
{
if (not plane_info.indices)
plane_info.indices = boost::make_shared<Indices1>();
else
plane_info.indices->clear();
pcl::PointIndices init_indices;
std::vector<float> sqr_distances;
cloud_tree_->radiusSearch(point_, radius_, init_indices.indices, sqr_distances);
if (init_indices.indices.size() < 50)
{
//ROS_ERROR_STREAM("Not enough points found near (" << point_.x << ", " << point_.y << ", " << point_.z << ")");
return false;
}
Eigen::VectorXf coefficients(Eigen::VectorXf::Zero(4));
ModelNormalPlane model(cloud_);
model.setNormalDistanceWeight(0.2);
model.setInputNormals(cloud_normals_);
Eigen::VectorXf new_coefficients(4);
model.optimizeModelCoefficients(init_indices.indices, coefficients, new_coefficients);
if (coefficients == new_coefficients)
return false;
coefficients = new_coefficients;
boost::shared_ptr<std::vector<int> > all_cloud_indices = model.getIndices();
model.setIndices(init_indices.indices);
std::vector<Scalar> distances;
model.getDistancesToModel(coefficients, distances);
Eigen::VectorXd v = Eigen::VectorXd::Map(&distances[0], distances.size());
plane_info.std_dev = 0.0;
if (v.size() > 0)
plane_info.std_dev = std::sqrt(v.cwiseAbs2().mean() - std::pow(v.mean(), 2));
model.setIndices(all_cloud_indices);
model.selectWithinDistance(coefficients, 5 * plane_info.std_dev, *plane_info.indices);
model.optimizeModelCoefficients(*plane_info.indices, coefficients, new_coefficients);
if (coefficients == new_coefficients)
return false;
coefficients = new_coefficients;
model.setNormalDistanceWeight(0.05);
model.selectWithinDistance(coefficients, 5 * plane_info.std_dev, *plane_info.indices);
std::vector<pcl::PointIndices> cluster_indices;
pcl::EuclideanClusterExtraction<PCLPointT_> ec;
ec.setClusterTolerance(radius_ / 8);
ec.setMinClusterSize(plane_info.indices->size() / 8);
ec.setMaxClusterSize(plane_info.indices->size());
//ec.setSearchMethod(cloud_tree_);
ec.setInputCloud(cloud_);
ec.setIndices(plane_info.indices);
ec.extract(cluster_indices);
if (cluster_indices.empty())
return false;
Size1 max_index = 0;
Size1 max_size = cluster_indices[0].indices.size();
for (Size1 i = 1; i < cluster_indices.size(); ++i)
{
if (cluster_indices[i].indices.size() > max_size)
{
max_size = cluster_indices[i].indices.size();
max_index = i;
}
}
*plane_info.indices = cluster_indices[max_index].indices;
plane_info.plane.normal()[0] = coefficients[0];
plane_info.plane.normal()[1] = coefficients[1];
plane_info.plane.normal()[2] = coefficients[2];
plane_info.plane.offset() = coefficients[3];
return true;
}
} // namespace calib
} // namespace unipd
#endif // UNIPD_CALIBRATION_sIMPL_CALIBRATION_PCL_UTILITIES_POINT_PLANE_EXTRACTION_HPP_
|
{"hexsha": "06c1022e9387971e64673729580dc8e17f909e61", "size": 5724, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "calibration_pcl/include/impl/calibration_pcl/utilities/point_plane_extraction.hpp", "max_stars_repo_name": "pionex/calibration_toolkit", "max_stars_repo_head_hexsha": "ef9a6f7c6e83ac8c1a052f6f4f4887d60f001212", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 23.0, "max_stars_repo_stars_event_min_datetime": "2016-08-02T04:40:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T22:08:55.000Z", "max_issues_repo_path": "calibration_pcl/include/impl/calibration_pcl/utilities/point_plane_extraction.hpp", "max_issues_repo_name": "pionex/calibration_toolkit", "max_issues_repo_head_hexsha": "ef9a6f7c6e83ac8c1a052f6f4f4887d60f001212", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2015-03-19T07:53:09.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-06T21:55:47.000Z", "max_forks_repo_path": "calibration_pcl/include/impl/calibration_pcl/utilities/point_plane_extraction.hpp", "max_forks_repo_name": "pionex/calibration_toolkit", "max_forks_repo_head_hexsha": "ef9a6f7c6e83ac8c1a052f6f4f4887d60f001212", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12.0, "max_forks_repo_forks_event_min_datetime": "2017-06-15T00:18:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T21:16:09.000Z", "avg_line_length": 37.9072847682, "max_line_length": 118, "alphanum_fraction": 0.7321802935, "num_tokens": 1357}
|
# Density Fitting
Density fitting is an extremely useful tool to reduce the computational scaling of many quantum chemical methods.
Density fitting works by approximating the four-index electron repulsion integral (ERI) tensors from Hartree-Fock
theory, $g_{\mu\nu\lambda\sigma} = (\mu\nu|\lambda\sigma)$, by
$$(\mu\nu|\lambda\sigma) \approx \widetilde{(\mu\nu|P)}[J^{-1}]_{PQ}\widetilde{(Q|\lambda\sigma)}$$
where the Coulomb metric $[J]_{PQ}$ and the three-index integral $\widetilde{(Q|\lambda\sigma)}$ are defined as
\begin{align}
[J]_{PQ} &= \int P(r_1)\frac{1}{r_{12}}Q(r_2){\rm d}^3r_1{\rm d}^3r_2\\
\widetilde{(Q|\lambda\sigma)} &= \int Q(r_1)\frac{1}{r_{12}}\lambda(r_2)\sigma(r_2){\rm d}^3r_1{\rm d}^3r_2
\end{align}
To simplify the density fitting notation, the inverse Coulomb metric is typically folded into the three-index tensor:
\begin{align}
(P|\lambda\sigma) &= [J^{-\frac{1}{2}}]_{PQ}\widetilde{(Q|\lambda\sigma)}\\
g_{\mu\nu\lambda\sigma} &\approx (\mu\nu|P)(P|\lambda\sigma)
\end{align}
These transformed three-index tensors can then be used to compute various quantities, including the four-index ERIs,
as well as Coulomb (J) and exchange (K) matrices, and therefore the Fock matrix (F). Before we go any further, let's
see how to generate these transformed tensors using <span style='font-variant: small-caps'> Psi4</span>.
First, let's import <span style='font-variant: small-caps'> Psi4</span> and set up some global options, as well as
define a molecule and initial wavefunction:
```python
# ==> Psi4 & NumPy options, Geometry Definition <==
import numpy as np
import psi4
# Set numpy defaults
np.set_printoptions(precision=5, linewidth=200, suppress=True)
# Set Psi4 memory & output options
psi4.set_memory(int(2e9))
psi4.core.set_output_file('output.dat', False)
# Geometry specification
mol = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
symmetry c1
""")
# Psi4 options
psi4.set_options({'basis': 'aug-cc-pvdz',
'scf_type': 'df',
'e_convergence': 1e-10,
'd_convergence': 1e-10})
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option('basis'))
```
## Building the Auxiliary Basis Set
One component of our density-fitted tensors $g_{\mu\nu\lambda\sigma} \approx (\mu\nu|P)(P|\lambda\sigma)$ which
is unique from their exact, canonical counterparts $(\mu\nu|\lambda\sigma)$ is the additional "auxiliary" index, $P$.
This index corresponds to inserting a resolution of the identity, which is expanded in an auxiliary basis set $\{P\}$.
In order to build our density-fitted integrals, we first need to generate this auxiliary basis set. Fortunately,
we can do this with the `psi4.core.BasisSet` object:
~~~python
# Build auxiliary basis set
aux = psi4.core.BasisSet.build(mol, "DF_BASIS_SCF", "", "JKFIT", "aug-cc-pVDZ")
~~~
There are special fitting basis sets that are optimal for a given orbital basis. As we will be building J and K
objects we want the `JKFIT` basis associated with the orbital `aug-cc-pVDZ` basis. This basis is straightfowardly
named `aug-cc-pVDZ-jkfit`.
```python
# Build auxiliary basis set
aux = psi4.core.BasisSet.build(mol, "DF_BASIS_SCF", "", "JKFIT", "aug-cc-pVDZ")
```
## Building Density-Fitted ERIs
Now, we can use our orbital and auxiliary basis sets to construct the `Qpq` object with the inverted metric. As the
tensors are very similar to full ERI's we can use the same computer for both with the aid of a "zero basis". If we
think carefully about the $\widetilde{(Q|\lambda\sigma)}$ and $(\mu\nu|\lambda\sigma)$ we should note that on the
right and left hand sides the two gaussian basis functions are contracted to a single density.
Specifically, for $\widetilde{(Q|\lambda\sigma)}$ the right hand side is a single basis function without being
multiplied by another, so we can "trick" the MintsHelper object into computing these quanties if we have a "basis
set" which effectively does not act on another. This is, effectively, what a "zero basis" does.
The $[J^{-\frac{1}{2}}]_{PQ}$ object can be built in a similar way where we use the Psi4 Matrix's built in `power`
function to raise this to the $-\frac{1}{2}$ power. The call `Matrix.power(-0.5, 1.e-14)` will invert the Matrix to
the $-\frac{1}{2}$ while guarding against values smaller than 1.e-14. Recall that machine epsilon is ~1.e-16, when
these small values are taken to a negative fractional power they could become very large and dominate the resulting
matrix even though they are effectively noise before the inversion.
~~~python
orb = wfn.basisset()
zero_bas = psi4.core.BasisSet.zero_ao_basis_set()
# Build MintsHelper Instance
mints = psi4.core.MintsHelper(orb)
# Build (P|pq) raw 3-index ERIs, dimension (1, Naux, nbf, nbf)
Ppq = mints.ao_eri(aux, zero_bas, orb, orb)
# Build and invert the metric
metric = mints.ao_eri(aux, zero_bas, aux, zero_bas)
metric.power(-0.5, 1.e-14)
# Remove the excess dimensions of Ppq & metric
Ppq = np.squeeze(Ppq)
metric = np.squeeze(metric)
# Contract Ppq & Metric to build Qso
Qso = np.einsum('QP,Ppq->Qpq', metric, Ppq, optimize=True)
~~~
```python
# ==> Build Density-Fitted Integrals <==
# Get orbital basis & build zero basis
orb = wfn.basisset()
zero_bas = psi4.core.BasisSet.zero_ao_basis_set()
# Build instance of MintsHelper
mints = psi4.core.MintsHelper(orb)
# Build (P|pq) raw 3-index ERIs, dimension (1, Naux, nbf, nbf)
Ppq = mints.ao_eri(aux, zero_bas, orb, orb)
# Build & invert Coulomb metric, dimension (1, Naux, 1, Naux)
metric = mints.ao_eri(aux, zero_bas, aux, zero_bas)
metric.power(-0.5, 1.e-14)
# Remove excess dimensions of Ppq, & metric
Ppq = np.squeeze(Ppq)
metric = np.squeeze(metric)
# Build the Qso object
Qpq = np.einsum('QP,Ppq->Qpq', metric, Ppq, optimize=True)
```
## Example: Building a Density-Fitted Fock Matrix
Now that we've obtained our `Qpq` tensors, we may use them to build the Fock matrix. To do so, since we aren't
implementing a fully density-fitted RHF program, we'll first need to get a density matrix and one-electron Hamiltonian
from somewhere. Let's get them from a converged HF wavefunction, so we can check our work later:
```python
# ==> Compute SCF Wavefunction, Density Matrix, & 1-electron H <==
scf_e, scf_wfn = psi4.energy('scf', return_wfn=True)
D = scf_wfn.Da()
H = scf_wfn.H()
```
Now that we have our density-fitted integrals and a density matrix, we can build a Fock matrix. There are several
different algorithms which we can successfully use to do so; for now, we'll use a simple algorithm and `np.einsum()`
to illustrate how to perform contractions with these density fitted tensors and leave a detailed discussion of those
algorithms/different tensor contraction methods elsewhere. Recall that the Fock matrix, $F$, is given by
$$F = H + 2J - K,$$
where $H$ is the one-electron Hamiltonian matrix, $J$ is the Coulomb matrix, and $K$ is the exchange matrix. The
Coulomb and Exchange matrices have elements guven by
\begin{align}
J[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\nu|\lambda\sigma)D_{\lambda\sigma}\\
K[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\lambda|\nu\sigma)D_{\lambda\sigma}.
\end{align}
When employing conventional 4-index ERI tensors, computing both $J$ and $K$ involves contracting over four unique
indices, which involves four distinct loops -- one over each unique index in the contraction. Therefore, the
scaling of this procedure is $\mathcal{O}(N^4)$, where $N$ is the number of iterations in each loop (one for each
basis function). The above expressions can be coded using `np.einsum()` to handle the tensor contractions:
~~~python
J = np.einsum('pqrs,rs->pq', I_pqrs, D, optimize=True)
K = np.einsum('prqs,rs->pq', I_pqrs, D, optimize=True)
~~~
for exact ERIs `I_pqrs`. If we employ density fitting, however, we can reduce this scaling by reducing the number
of unique indices involved in the contractions. Substituting in the density-fitted $(P|\lambda\sigma)$ tensors into
the above expressions, we obtain the following:
\begin{align}
J[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\nu|P)(P|\lambda\sigma)D_{\lambda\sigma}\\
K[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\lambda|P)(P|\nu\sigma)D_{\lambda\sigma}.
\end{align}
Naively, this seems like we have actually *increased* the scaling of our algorithm, because we have added the $P$
index to the expression, bringing the total to five unique indices, meaning this would scale like . We've actually
made our lives easier, however: with three different tensors to contract, we can perform one contraction at a time!
For $J$, this works out to the following two-step procedure:
\begin{align}
\chi_P &= (P|\lambda\sigma)D_{\lambda\sigma} \\
J[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\nu|P)\chi_P
\end{align}
In the cell below, using `np.einsum()` and our `Qpq` tensor, try to construct `J`:
```python
# Two-step build of J with Qpq and D
X_Q = np.einsum('Qpq,pq->Q', Qpq, D, optimize=True)
J = np.einsum('Qpq,Q->pq', Qpq, X_Q, optimize=True)
```
Each of the above contractions, first constructing the `X_Q` intermediate and finally the full Coulomb matrix `J`, only involve three unique indices. Therefore, the Coulomb matrix build above scales as $\mathcal{O}(N_{\rm aux}N^2)$. Notice that we have distinguished the number of auxiliary ($N_{\rm aux}$) and orbital ($N$) basis functions; this is because auxiliary basis sets are usually around double the size of their corresponding orbital counterparts.
We can play the same intermediate trick for building the Exchange matrix $K$:
\begin{align}
\zeta_{P\nu\lambda} &= (P|\nu\sigma)D_{\lambda\sigma} \\
K[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\lambda|P)\zeta_{P\nu\lambda}
\end{align}
Just like with $J$, try building $K$ in the cell below:
```python
# Two-step build of K with Qpq and D
Z_Qqr = np.einsum('Qrs,sq->Qrq', Qpq, D, optimize=True)
K = np.einsum('Qpq,Qrq->pr', Qpq, Z_Qqr, optimize=True)
```
Unfortunately, our two-step $K$ build does not incur a reduction in the overall scaling of the algorithm, with each contraction above scaling as $\mathcal{O}(N^3N_{\rm aux})$. The major benefit of density fitting for $K$ builds comes in the form of the small storage overhead of the three-index `Qpq` tensors compared to the full four-index `I_pqrs` tensors. Even when exploiting the full eight-fold symmetry of the $(\mu\nu|\lambda\sigma)$ integrals, storing `I_pqrs` for a system with 3000 AO basis functions will require 81 TB of space, compared to a mere 216 GB to store the full `Qpq` object when exploiting the twofold symmetry of $(P|\lambda\sigma)$.
Now that we've built density-fitted versions of the $J$ and $K$ matrices, let's check our work by comparing a Fock matrix built using our $J$ and $K$ with the fully converged Fock matrix from our original SCF/aug-cc-pVDZ computation.
Below, build F using the one-electron Hamiltonian from the converged SCF wavefuntion and our $J$ and $K$ matrices. Then, get the converged $F$ from the SCF wavefunction:
```python
# Build F from SCF 1 e- Hamiltonian and our density-fitted J & K
F = H + 2 * J - K
# Get converged Fock matrix from converged SCF wavefunction
scf_F = scf_wfn.Fa()
```
Feeling lucky? Execute the next cell to see if you've computed $J$, $K$, and $F$ correctly:
```python
if np.allclose(F, scf_F):
print("Nicely done!! Your density-fitted Fock matrix matches Psi4!")
else:
print("Whoops...something went wrong. Try again!")
```
Nicely done!! Your density-fitted Fock matrix matches Psi4!
Finally, we can remember the identity of the $D$ matrix for SCF which will be $D_{\lambda\sigma} = C_{\lambda i} C_{\sigma i}$, where $i$ is the occupied index. We can factor our $K$ build once more:
\begin{align}
D_{\lambda\sigma} &= C_{\lambda i} C_{\sigma i} \\
\zeta_{P\nu i} &= (P|\nu\sigma)C_{\sigma i} \\
K[D_{\lambda\sigma}]_{\mu\nu} &= \zeta_{P\nu i}\zeta_{P\mu i}
\end{align}
Consider the ratio between the number of basis functions and the size of the occupied index. Why would the above be beneficial?
## References
1. F. Weigend, Phys. Chem. Chem. Phys. 4, 4285 (2002).
2. O. Vahtras, J. Alml ̈of, and M. W. Feyereisen, Chem. Phys. Lett. 213, 514 (1993).
3. B. I. Dunlap, J. W. D. Connolly, and J. R. Sabin, J. Chem. Phys. 71, 3396 (1979).
4. J. L. Whitten, J. Chem. Phys. 58, 4496 (1973).
|
{"hexsha": "2ab4ddbd66c0090b79cec1d5dd8ba8a25d801103", "size": 16973, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Example/Psi4Numpy/03-HatreeFock/density-fitting.ipynb", "max_stars_repo_name": "yychuang/109-2-compchem-lite", "max_stars_repo_head_hexsha": "cbf17e542f9447e89fb48de1b28759419ffff956", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 214, "max_stars_repo_stars_event_min_datetime": "2017-03-01T08:04:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T08:52:04.000Z", "max_issues_repo_path": "Example/Psi4Numpy/03-HatreeFock/density-fitting.ipynb", "max_issues_repo_name": "yychuang/109-2-compchem-lite", "max_issues_repo_head_hexsha": "cbf17e542f9447e89fb48de1b28759419ffff956", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 100, "max_issues_repo_issues_event_min_datetime": "2017-03-03T13:20:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-05T18:20:27.000Z", "max_forks_repo_path": "Example/Psi4Numpy/03-HatreeFock/density-fitting.ipynb", "max_forks_repo_name": "yychuang/109-2-compchem-lite", "max_forks_repo_head_hexsha": "cbf17e542f9447e89fb48de1b28759419ffff956", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 150, "max_forks_repo_forks_event_min_datetime": "2017-02-17T19:44:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T05:52:43.000Z", "avg_line_length": 42.4325, "max_line_length": 677, "alphanum_fraction": 0.6040181465, "converted": true, "num_tokens": 3668}
|
[STATEMENT]
lemma suminf_eq_zero_iff:
assumes "summable f" and pos: "\<And>n. 0 \<le> f n"
shows "suminf f = 0 \<longleftrightarrow> (\<forall>n. f n = 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (suminf f = (0::'a)) = (\<forall>n. f n = (0::'a))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. suminf f = (0::'a) \<Longrightarrow> \<forall>n. f n = (0::'a)
2. \<forall>n. f n = (0::'a) \<Longrightarrow> suminf f = (0::'a)
[PROOF STEP]
assume L: "suminf f = 0"
[PROOF STATE]
proof (state)
this:
suminf f = (0::'a)
goal (2 subgoals):
1. suminf f = (0::'a) \<Longrightarrow> \<forall>n. f n = (0::'a)
2. \<forall>n. f n = (0::'a) \<Longrightarrow> suminf f = (0::'a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
suminf f = (0::'a)
[PROOF STEP]
have f: "(\<lambda>n. \<Sum>i<n. f i) \<longlonglongrightarrow> 0"
[PROOF STATE]
proof (prove)
using this:
suminf f = (0::'a)
goal (1 subgoal):
1. (\<lambda>n. sum f {..<n}) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
using summable_LIMSEQ[of f] assms
[PROOF STATE]
proof (prove)
using this:
suminf f = (0::'a)
summable f \<Longrightarrow> (\<lambda>n. sum f {..<n}) \<longlonglongrightarrow> suminf f
summable f
(0::'a) \<le> f ?n
goal (1 subgoal):
1. (\<lambda>n. sum f {..<n}) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(\<lambda>n. sum f {..<n}) \<longlonglongrightarrow> (0::'a)
goal (2 subgoals):
1. suminf f = (0::'a) \<Longrightarrow> \<forall>n. f n = (0::'a)
2. \<forall>n. f n = (0::'a) \<Longrightarrow> suminf f = (0::'a)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<lambda>n. sum f {..<n}) \<longlonglongrightarrow> (0::'a)
[PROOF STEP]
have "\<And>i. (\<Sum>n\<in>{i}. f n) \<le> 0"
[PROOF STATE]
proof (prove)
using this:
(\<lambda>n. sum f {..<n}) \<longlonglongrightarrow> (0::'a)
goal (1 subgoal):
1. \<And>i. sum f {i} \<le> (0::'a)
[PROOF STEP]
by (metis L \<open>summable f\<close> order_refl pos sum.infinite sum_le_suminf)
[PROOF STATE]
proof (state)
this:
sum f {?i} \<le> (0::'a)
goal (2 subgoals):
1. suminf f = (0::'a) \<Longrightarrow> \<forall>n. f n = (0::'a)
2. \<forall>n. f n = (0::'a) \<Longrightarrow> suminf f = (0::'a)
[PROOF STEP]
with pos
[PROOF STATE]
proof (chain)
picking this:
(0::'a) \<le> f ?n
sum f {?i} \<le> (0::'a)
[PROOF STEP]
show "\<forall>n. f n = 0"
[PROOF STATE]
proof (prove)
using this:
(0::'a) \<le> f ?n
sum f {?i} \<le> (0::'a)
goal (1 subgoal):
1. \<forall>n. f n = (0::'a)
[PROOF STEP]
by (simp add: order.antisym)
[PROOF STATE]
proof (state)
this:
\<forall>n. f n = (0::'a)
goal (1 subgoal):
1. \<forall>n. f n = (0::'a) \<Longrightarrow> suminf f = (0::'a)
[PROOF STEP]
qed (metis suminf_zero fun_eq_iff)
|
{"llama_tokens": 1282, "file": null, "length": 13}
|
module PrivateMultiplicativeWeights
using
Distributions: Laplace, wsample
using
Printf,
Hadamard,
LinearAlgebra,
Random,
IterTools,
Statistics,
Distributed,
export
mwem,
MWParameters,
Tabular,
Histogram,
HistogramQueries,
SeriesRangeQueries,
RangeQueries,
RangeQuery,
Parities,
FactorParities,
Interval,
gosper,
BinaryItr,
maximum_error,
kl_divergence_error,
mean_squared_error,
queriesMatrix,
evaluate,
get,
kl_divergence,
complete_way_marginals_indices,
fourierCoefficients,
calculateMarginal,
Marginals,
get_query_vector_from_interval,
hadamard_basis_vector
import
Base: eltype, length, iterate
include("interface.jl")
include("histogram.jl")
include("rangequeries.jl")
include("factors.jl")
include("gosper.jl")
include("parities.jl")
include("error.jl")
include("mwem.jl")
"""
PrivateMultiplicativeWeights
A simple and practical algorithm for differentially private data analysis.
"""
PrivateMultiplicativeWeights
end # module
|
{"hexsha": "1ffd4d572ceb94a14c4fea3de62809d707d77020", "size": 1091, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PrivateMultiplicativeWeights.jl", "max_stars_repo_name": "giladroyz/MWEM-project", "max_stars_repo_head_hexsha": "e587b6bae85a2bf47b09e33e12dfac0adfa7e2ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/PrivateMultiplicativeWeights.jl", "max_issues_repo_name": "giladroyz/MWEM-project", "max_issues_repo_head_hexsha": "e587b6bae85a2bf47b09e33e12dfac0adfa7e2ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PrivateMultiplicativeWeights.jl", "max_forks_repo_name": "giladroyz/MWEM-project", "max_forks_repo_head_hexsha": "e587b6bae85a2bf47b09e33e12dfac0adfa7e2ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.5303030303, "max_line_length": 74, "alphanum_fraction": 0.7112740605, "num_tokens": 267}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[b5paper, 11pt, openany, titlepage]{book}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage[pdftex]{graphicx,color}
%\usepackage[T1,plmath]{polski}
\usepackage[cp1250]{inputenc}
\usepackage{indentfirst}
\usepackage[numbers,sort&compress]{natbib}
\usepackage{geometry}
\newgeometry{tmargin=3.6cm, bmargin=3.6cm, lmargin=3.2cm, rmargin=3.2cm}
\usepackage{multirow}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{pdflscape}
\usepackage{adjustbox}
\usepackage{etoolbox}
\usepackage{chapterbib}
% fix long section titles in toc
\usepackage{booktabs}
\usepackage{suffix}
\usepackage{textcomp}
\usepackage{hyperref}
\usepackage{xspace}
\usepackage{textcomp}
\usepackage{csquotes}
\usepackage{subcaption}
\renewcommand{\figurename}{Fig.}
\renewcommand{\tablename}{Tab.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{Data-driven based approach for damage detection}
\author{Abdalraheem A. Ijjeh}
\maketitle
\tableofcontents
\chapter[Data-driven approach]{Data-driven based approach for damage detection}
%\newpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
Engineering structures such as buildings, roads, tunnels, power generation systems, rotating machinery, and aircrafts are considered very important in our modern life.
However, such structures are prone to various types of damage, therefore it is essential to maintain them and keep them safe during their operational lifetime.
Health monitoring presents an essential tool in management activities as it allows identifying early and progressive structural damage~\cite{farrar2007introduction}.
Obtained data from monitoring structures are large and need to be transformed into valuable information to assist the development and design of maintenance activities, improve safety, reduce uncertainties and extend our knowledge regarding the monitored structure.
Structural health monitoring (SHM) is one of the most robust
tools for managing infrastructure.
Traditionally, the procedure of performing an autonomous damage identification for engineering structures whether civil, mechanical or aerospace is referred to as SHM~\cite{farrar2001vibration}.
SHM aims to describe a real-time evaluation of a structure during its life-cycle~\cite{Balageas2010}.
Moreover, SHM assists in detecting and characterizing damage in a structure as a whole or its parts.
Damage detection in a structure is crucial since it may reduce safety and performance during its operational lifetime~\cite{Yuan2016}.
Furthermore, the SHM approach involves monitoring a structure continuously through an array of sensors that periodically measure the response of the structure then extracting the sensitive damage features from these measurements to perform statistical analysis on these features to examine the condition of the structure.
Generally, there are two approaches to SHM: physics-based and
data-based.
In the physics-based approach, the inverse problem method is applied in which numerical models such as finite element models are implemented.
Furthermore, damage identification results by comparing the registered readings from the structure and the estimated data from the models.
On the other hand, the data-based approach is related to the artificial intelligence domain (machine learning and deep learning), in which artificial models are developed to learn the behavior of the structure based on earlier registered data that leads to performing pattern recognition for the damage identification.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The data-based approach can be applied in both supervised and unsupervised learning~\cite{worden2007application}.
Supervised learning can be utilised in the field of SHM where data of the damaged and undamaged conditions are available in which the detection models can train~\cite{figueiredo2018machine}.
On the other hand, unsupervised learning is applied when data of undamaged cases are only available, therefore the detection models train only on such data~\cite{figueiredo2018machine}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
In this chapter, several machine learning (ML) and deep learning (DL) techniques for damage detection are presented.
Furthermore, ML techniques such as principal component analysis (PCA), Gaussian mixture models (GMMS), Mahalanobis squared distance (MSD) and Bioinspired algorithms will be illustrated.
For the DL approach, techniques such as artificial neural networks ANN, Convolutional neural networks (ConvNet/CNN), and recurrent neural networks (RNN) will be illustrated.
Furthermore, data acquired based on the guided waves approach and vibration-based approach will be presented.
%%%%%%%%%%%%%%%% Section 2 %%%%%%%%%%%%%%%%%%%%%%%
\input{Machine_learning_approach}
%%%%%%%%%%%%%%%% Section 3 %%%%%%%%%%%%%%%%%%%%%%%
\input{Deep_learning_approach}
%%%%%%%%%%%%%%%% Section 4 %%%%%%%%%%%%%%%%%%%%%%%
\input{Wave_propagation_based_methods}
%%%%%%%%%%%%%%%% Section 5 %%%%%%%%%%%%%%%%%%%%%%%
\input{vibratraion_based_approach.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Summary}
In this chapter, the author discussed problems with conventional damage detection techniques for SHM and the importance of the artificial intelligence approach.
Furthermore, in the second section of the chapter, the author introduced the ML approach in the SHM field.
Moreover, several techniques for feature extraction such as PCA, MSD, and GMMs were described.
Further, several classification models such as SVM, KNN, and decision trees were introduced.
In the third section, the author presents the deep learning approach, in which techniques such as CNN and RNN were presented.
Finally, the author presents several deep learning techniques for damage detection used regarding the SHM field based on guided waves and vibration approaches.
\section*{Acknowledgments}
This work was funded by the Polish National Science Center under grant agreement no 2018/31/B/ST8/00454.
Also, I would like to thank my supervisor Professor Pawel Kudela for his consistent support and guidance during the running of this project.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\bibliography{biblography.bib}
\bibliographystyle{unsrt}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
{"hexsha": "7826d4cb98c69cb86136cada4863cdb085b9adb2", "size": 6577, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "reports/monograph/Ijjeh/ChapterTwo/monograph_template.tex", "max_stars_repo_name": "IFFM-PAS-MISD/aidd", "max_stars_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-03T05:36:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T05:36:07.000Z", "max_issues_repo_path": "reports/monograph/Ijjeh/ChapterTwo/monograph_template.tex", "max_issues_repo_name": "IFFM-PAS-MISD/aidd", "max_issues_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reports/monograph/Ijjeh/ChapterTwo/monograph_template.tex", "max_forks_repo_name": "IFFM-PAS-MISD/aidd", "max_forks_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 69.2315789474, "max_line_length": 322, "alphanum_fraction": 0.7217576403, "num_tokens": 1325}
|
import sys
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
from fhi_lib.geometry import Point
class ImgCoord():
def __init__(self, info):
self.mask = info[0].astype(np.uint8)
self.roi = info[1]
self.class_id= info[2]
def draw_point_of_interest(self, img):
img = cv2.circle(img, (self.x_interest, self.y_interest), 0, (0, 0, 255), 5)
img = cv2.circle(img, (self.x_interest, self.y_interest), 15, (100,255,100), 3)
return img
def get_point_of_interest(self):
raise NotImplementedError()
def update_interest_pt(self, pt):
self.x_interest = pt[0]
self.y_interest = pt[1]
class Type1_2Coord(ImgCoord):
def __init__(self, info):
super().__init__(info)
def get_point_of_interest(self):
epsilon = 2
contours, _ = cv2.findContours(image=self.mask,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)
contour = contours[0][:,0,:]
pts_x = contour[:,0]
pts_y = contour[:,1]
pts_ux = np.mean(pts_x)
### Select the points near x mean ###
selected_pts_mask = (pts_x < pts_ux + epsilon) & (pts_x > pts_ux - epsilon)
selected_pts_x = pts_x[selected_pts_mask]
selected_pts_y = pts_y[selected_pts_mask]
selected_pts_uy = np.mean(selected_pts_y)
### Find min y that is greater than y_mean ###
conditioned_min_y = 99999
for i, y in enumerate(selected_pts_y):
if y < conditioned_min_y and y > selected_pts_uy:
conditioned_min_y = y
### Take the average of x coordinates of the points with same y coordinates ###
selected_pts_y_mask = selected_pts_y == conditioned_min_y
interested_pts_x = selected_pts_x[selected_pts_y_mask]
self.x_interest = int(np.mean(interested_pts_x))
self.y_interest = conditioned_min_y
return Point((self.x_interest,self.y_interest))
class Type3_4Coord(ImgCoord):
def __init__(self, info):
super().__init__(info)
def get_point_of_interest(self):
approx_y_selection_range = 20
contours, _ = cv2.findContours(image=self.mask,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)
approx = cv2.approxPolyDP(contours[0], 20, True)
approx = approx[:,0,:]
approx_y = approx[:,1]
approx_y_max = np.max(approx_y)
selected_pt_mask_max = approx_y > (approx_y_max-approx_y_selection_range)
approx_max_pts = approx[selected_pt_mask_max]
approx_left_corner = approx_max_pts[0]
for pt in approx_max_pts:
if pt[0] < approx_left_corner[0]:
approx_left_corner = pt
self.x_interest = approx_left_corner[0]
self.y_interest = approx_left_corner[1]
return Point(approx_left_corner)
|
{"hexsha": "d68ccc40d7708621f78ee425dd8f36dad1145387", "size": 2565, "ext": "py", "lang": "Python", "max_stars_repo_path": "fhi_lib/img_coordinate.py", "max_stars_repo_name": "yhsueh/FHI_RCNN", "max_stars_repo_head_hexsha": "f12df17049d5c72d1a7cec89e3943013150177a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fhi_lib/img_coordinate.py", "max_issues_repo_name": "yhsueh/FHI_RCNN", "max_issues_repo_head_hexsha": "f12df17049d5c72d1a7cec89e3943013150177a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fhi_lib/img_coordinate.py", "max_forks_repo_name": "yhsueh/FHI_RCNN", "max_forks_repo_head_hexsha": "f12df17049d5c72d1a7cec89e3943013150177a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9036144578, "max_line_length": 81, "alphanum_fraction": 0.7263157895, "include": true, "reason": "import numpy", "num_tokens": 730}
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
COLORS = {'train': 'b', 'test': 'r'}
def plot_validation_curve(train_scores, test_scores, train_sizes, expected_score=None, ax=None, stat_error=True):
ax = _plot_generic_curve(
train_scores=train_scores,
test_scores=test_scores,
x_pars=train_sizes,
expected_score=expected_score,
ax=ax,
stat_error=stat_error
)
ax.set_ylabel('score')
ax.set_xlabel('training sample size')
def plot_learning_curve(train_scores, test_scores, param_range, expected_score=None, ax=None, stat_error=True):
ax = _plot_generic_curve(
train_scores=train_scores,
test_scores=test_scores,
x_pars=param_range,
expected_score=expected_score,
ax=None,
stat_error=stat_error
)
ax.set_ylabel('score')
ax.set_xlabel('parameter value')
def _plot_generic_curve(train_scores, test_scores, x_pars, expected_score=None, ax=None, stat_error=True):
train_score_mean, train_score_error, test_score_mean, test_score_error = \
get_score_means_and_errors(train_scores, test_scores, stat_error=stat_error)
if ax is None:
ax = plt.gca()
ax.plot(
x_pars,
train_score_mean,
color=COLORS['train'],
linestyle='none',
marker='o',
label='train'
)
plt.fill_between(
x_pars,
train_score_mean + train_score_error,
train_score_mean - train_score_error,
alpha=0.15,
color=COLORS['train']
)
ax.plot(
x_pars,
test_score_mean,
color=COLORS['test'],
linestyle='none',
marker='s',
label='test'
)
plt.fill_between(
x_pars,
test_score_mean + test_score_error,
test_score_mean - test_score_error,
alpha=0.15,
color=COLORS['test']
)
if expected_score is not None:
plt.plot(
(x_pars[0], x_pars[-1]),
(expected_score, expected_score),
linestyle='--',
color='k'
)
ax.legend()
return ax
def get_score_means_and_errors(train_scores, test_scores, stat_error=True):
train_score_mean = np.mean(train_scores, axis=1)
test_score_mean = np.mean(test_scores, axis=1)
if stat_error:
train_score_error = np.std(train_scores, axis=1) / np.sqrt(len(train_scores[0]))
test_score_error = np.std(test_scores, axis=1) / np.sqrt(len(test_scores[0]))
else:
train_score_error = np.std(train_scores, axis=1)
test_score_error = np.std(test_scores, axis=1)
return train_score_mean, train_score_error, test_score_mean, test_score_error
def plot_probability_distributions(clf, X, y):
prob = clf.predict_proba(X)
plt.hist([p for p, y in zip(prob[:,0], y) if y == 0], bins=np.linspace(0, 1, 25), alpha=0.6, color='r')
plt.hist([p for p, y in zip(prob[:,0], y) if y == 1], bins=np.linspace(0, 1, 25), alpha=0.6, color='b')
|
{"hexsha": "a4fabe54657fe1ca9150e2b004d354311f08fae8", "size": 3019, "ext": "py", "lang": "Python", "max_stars_repo_path": "SmallSampleClassification/my_lib/plotting.py", "max_stars_repo_name": "RaulRPrado/LearningDataScience", "max_stars_repo_head_hexsha": "0a7d6cfffadd7cdd657be0609b7f088a19f9deb5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-05T13:49:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T13:49:27.000Z", "max_issues_repo_path": "SmallSampleClassification/my_lib/plotting.py", "max_issues_repo_name": "RaulRPrado/LearningDataScience", "max_issues_repo_head_hexsha": "0a7d6cfffadd7cdd657be0609b7f088a19f9deb5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SmallSampleClassification/my_lib/plotting.py", "max_forks_repo_name": "RaulRPrado/LearningDataScience", "max_forks_repo_head_hexsha": "0a7d6cfffadd7cdd657be0609b7f088a19f9deb5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.214953271, "max_line_length": 113, "alphanum_fraction": 0.6412719444, "include": true, "reason": "import numpy", "num_tokens": 747}
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an implementation of LiteGEM:
"""
import math
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.distributed as dist
import pgl
import pgl.nn as gnn
from pgl.utils.logger import log
from pahelix.networks.compound_encoder import AtomEmbedding, AtomFloatEmbedding, BondEmbedding
def batch_norm_1d(num_channels):
"""tbd"""
if dist.get_world_size() > 1:
return nn.SyncBatchNorm.convert_sync_batchnorm(nn.BatchNorm1D(num_channels))
else:
return nn.BatchNorm1D(num_channels)
def norm_layer(norm_type, nc):
"""tbd"""
# normalization layer 1d
norm = norm_type.lower()
if norm == 'batch':
layer = batch_norm_1d(nc)
elif norm == 'layer':
layer = nn.LayerNorm(nc)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm)
return layer
def act_layer(act_type, inplace=False, neg_slope=0.2, n_prelu=1):
"""tbd"""
# activation layer
act = act_type.lower()
if act == 'relu':
layer = nn.ReLU()
elif act == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
elif act == 'swish':
layer = nn.Swish()
else:
raise NotImplementedError('activation layer [%s] is not found' % act)
return layer
def Linear(input_size, hidden_size, with_bias=True):
"""tbd"""
fan_in = input_size
bias_bound = 1.0 / math.sqrt(fan_in)
fc_bias_attr = paddle.ParamAttr(initializer=nn.initializer.Uniform(
low=-bias_bound, high=bias_bound))
negative_slope = math.sqrt(5)
gain = math.sqrt(2.0 / (1 + negative_slope ** 2))
std = gain / math.sqrt(fan_in)
weight_bound = math.sqrt(3.0) * std
fc_w_attr = paddle.ParamAttr(initializer=nn.initializer.Uniform(
low=-weight_bound, high=weight_bound))
if not with_bias:
fc_bias_attr = False
return nn.Linear(
input_size, hidden_size, weight_attr=fc_w_attr, bias_attr=fc_bias_attr)
class MLP(paddle.nn.Sequential):
"""tbd"""
def __init__(self, channels, act='swish', norm=None, bias=True, drop=0., last_lin=False):
m = []
for i in range(1, len(channels)):
m.append(Linear(channels[i - 1], channels[i], bias))
if (i == len(channels) - 1) and last_lin:
pass
else:
if norm is not None and norm.lower() != 'none':
m.append(norm_layer(norm, channels[i]))
if act is not None and act.lower() != 'none':
m.append(act_layer(act))
if drop > 0:
m.append(nn.Dropout(drop))
self.m = m
super(MLP, self).__init__(*self.m)
class LiteGEMConv(paddle.nn.Layer):
"""tbd"""
def __init__(self, config, with_efeat=True):
super(LiteGEMConv, self).__init__()
log.info("layer_type is %s" % self.__class__.__name__)
self.config = config
self.with_efeat = with_efeat
self.aggr = self.config["aggr"]
self.learn_t = self.config["learn_t"]
self.learn_p = self.config["learn_p"]
self.init_t = self.config["init_t"]
self.init_p = self.config["init_p"]
self.eps = 1e-7
self.emb_dim = self.config["emb_dim"]
if self.with_efeat:
self.bond_encoder = BondEmbedding(self.config["bond_names"], self.emb_dim)
self.concat = config["concat"]
if self.concat:
self.fc_concat = Linear(self.emb_dim * 3, self.emb_dim)
assert self.aggr in ['softmax_sg', 'softmax', 'power']
channels_list = [self.emb_dim]
for i in range(1, self.config["mlp_layers"]):
channels_list.append(self.emb_dim * 2)
channels_list.append(self.emb_dim)
self.mlp = MLP(channels_list,
norm=self.config["norm"],
last_lin=True)
if self.learn_t and self.aggr == "softmax":
self.t = self.create_parameter(
shape=[1, ],
dtype='float32',
default_initializer=nn.initializer.Constant(value=self.init_t))
else:
self.t = self.init_t
if self.learn_p:
self.p = self.create_parameter(
shape=[1, ],
dtype='float32',
default_initializer=nn.initializer.Constant(value=self.init_p))
def send_func(self, src_feat, dst_feat, edge_feat):
"""tbd"""
if self.with_efeat:
if self.concat:
h = paddle.concat([dst_feat['h'], src_feat['h'], edge_feat['e']], axis=1)
h = self.fc_concat(h)
else:
h = src_feat["h"] + edge_feat["e"]
else:
h = src_feat["h"]
msg = {"h": F.swish(h) + self.eps}
return msg
def recv_func(self, msg):
"""tbd"""
if self.aggr == "softmax":
alpha = msg.reduce_softmax(msg["h"] * self.t)
out = msg['h'] * alpha
out = msg.reduce_sum(out)
return out
elif self.aggr == "power":
raise NotImplementedError
def forward(self, graph, nfeat, efeat=None):
"""tbd"""
if efeat is not None:
if self.with_efeat:
efeat = self.bond_encoder(efeat)
msg = graph.send(src_feat={"h": nfeat},
dst_feat={"h": nfeat},
edge_feat={"e": efeat},
message_func=self.send_func)
else:
msg = graph.send(src_feat={"h": nfeat},
dst_feat={"h": nfeat},
message_func=self.send_func)
out = graph.recv(msg=msg, reduce_func=self.recv_func)
out = nfeat + out
out = self.mlp(out)
return out
class LiteGEM(paddle.nn.Layer):
"""tbd"""
def __init__(self, config, with_efeat=True):
super(LiteGEM, self).__init__()
log.info("gnn_type is %s" % self.__class__.__name__)
self.config = config
self.with_efeat = with_efeat
self.num_layers = config["num_layers"]
self.drop_ratio = config["dropout_rate"]
self.virtual_node = config["virtual_node"]
self.emb_dim = config["emb_dim"]
self.norm = config["norm"]
self.num_tasks = config["num_tasks"]
self.atom_names = config["atom_names"]
self.atom_float_names = config["atom_float_names"]
self.bond_names = config["bond_names"]
self.gnns = paddle.nn.LayerList()
self.norms = paddle.nn.LayerList()
if self.virtual_node:
log.info("using virtual node in %s" % self.__class__.__name__)
self.mlp_virtualnode_list = paddle.nn.LayerList()
self.virtualnode_embedding = self.create_parameter(
shape=[1, self.emb_dim],
dtype='float32',
default_initializer=nn.initializer.Constant(value=0.0))
for layer in range(self.num_layers - 1):
self.mlp_virtualnode_list.append(MLP([self.emb_dim] * 3,
norm=self.norm))
for layer in range(self.num_layers):
self.gnns.append(LiteGEMConv(config, with_efeat=not self.with_efeat))
self.norms.append(norm_layer(self.norm, self.emb_dim))
self.atom_embedding = AtomEmbedding(self.atom_names, self.emb_dim)
self.atom_float_embedding = AtomFloatEmbedding(self.atom_float_names, self.emb_dim)
if self.with_efeat:
self.init_bond_embedding = BondEmbedding(self.config["bond_names"], self.emb_dim)
self.pool = gnn.GraphPool(pool_type="sum")
if not self.config["graphnorm"]:
self.gn = gnn.GraphNorm()
hidden_size = self.emb_dim
if self.config["clf_layers"] == 3:
log.info("clf_layers is 3")
self.graph_pred_linear = nn.Sequential(
Linear(hidden_size, hidden_size // 2),
batch_norm_1d(hidden_size // 2),
nn.Swish(),
Linear(hidden_size // 2, hidden_size // 4),
batch_norm_1d(hidden_size // 4),
nn.Swish(),
Linear(hidden_size // 4, self.num_tasks)
)
elif self.config["clf_layers"] == 2:
log.info("clf_layers is 2")
self.graph_pred_linear = nn.Sequential(
Linear(hidden_size, hidden_size // 2),
batch_norm_1d(hidden_size // 2),
nn.Swish(),
Linear(hidden_size // 2, self.num_tasks)
)
else:
self.graph_pred_linear = Linear(hidden_size, self.num_tasks)
def forward(self, g):
"""tbd"""
h = self.atom_embedding(g.node_feat)
h += self.atom_float_embedding(g.node_feat)
if self.virtual_node:
virtualnode_embedding = self.virtualnode_embedding.expand(
[g.num_graph, self.virtualnode_embedding.shape[-1]])
h = h + paddle.gather(virtualnode_embedding, g.graph_node_id)
# print("virt0: ", np.sum(h.numpy()))
if self.with_efeat:
edge_emb = self.init_bond_embedding(g.edge_feat)
else:
edge_emb = g.edge_feat
h = self.gnns[0](g, h, edge_emb)
if self.config["graphnorm"]:
h = self.gn(g, h)
# print("h0: ", np.sum(h.numpy()))
for layer in range(1, self.num_layers):
h1 = self.norms[layer - 1](h)
h2 = F.swish(h1)
h2 = F.dropout(h2, p=self.drop_ratio, training=self.training)
if self.virtual_node:
virtualnode_embedding_temp = self.pool(g, h2) + virtualnode_embedding
virtualnode_embedding = self.mlp_virtualnode_list[layer - 1](virtualnode_embedding_temp)
virtualnode_embedding = F.dropout(
virtualnode_embedding,
self.drop_ratio,
training=self.training)
h2 = h2 + paddle.gather(virtualnode_embedding, g.graph_node_id)
# print("virt_h%s: " % (layer), np.sum(h2.numpy()))
h = self.gnns[layer](g, h2, edge_emb) + h
if self.config["graphnorm"]:
h = self.gn(g, h)
# print("h%s: " % (layer), np.sum(h.numpy()))
h = self.norms[self.num_layers - 1](h)
h = F.dropout(h, p=self.drop_ratio, training=self.training)
h_graph = self.pool(g, h)
# return graph, node, edge representation
return h_graph, h, edge_emb
|
{"hexsha": "66f7b2ff23d1bef698e7da165ea39509a7262d1a", "size": 11435, "ext": "py", "lang": "Python", "max_stars_repo_path": "pahelix/model_zoo/light_gem_model.py", "max_stars_repo_name": "agave233/PaddleHelix", "max_stars_repo_head_hexsha": "e5578f72c2a203a27d9df7da111f1ced826c1429", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 454, "max_stars_repo_stars_event_min_datetime": "2020-11-21T01:02:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T12:53:40.000Z", "max_issues_repo_path": "pahelix/model_zoo/light_gem_model.py", "max_issues_repo_name": "chupvl/PaddleHelix", "max_issues_repo_head_hexsha": "6e082f89b8090c3c360593d40a08bffc884165dd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 161, "max_issues_repo_issues_event_min_datetime": "2020-12-12T06:35:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T11:31:13.000Z", "max_forks_repo_path": "pahelix/model_zoo/light_gem_model.py", "max_forks_repo_name": "chupvl/PaddleHelix", "max_forks_repo_head_hexsha": "6e082f89b8090c3c360593d40a08bffc884165dd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 108, "max_forks_repo_forks_event_min_datetime": "2020-12-07T09:01:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T14:42:29.000Z", "avg_line_length": 34.7568389058, "max_line_length": 104, "alphanum_fraction": 0.5770878881, "include": true, "reason": "import numpy", "num_tokens": 2690}
|
import torch
import tensorflow as tf
import os
import numpy as np
import cv2
class EpisodeScalerSummary(object):
"""docstring for EpisodeScalerSummary."""
def __init__(self):
self.episode_scalers = {}
self.final_scalers = {}
self.reset()
def at_step(self, step_scalers={}):
'''
call this at episode step
'''
for key in step_scalers.keys():
if key in self.episode_scalers.keys():
self.episode_scalers[key] += step_scalers[key]
else:
self.episode_scalers[key] = step_scalers[key]
def at_done(self, done_scalers={}):
'''
call this at episode done
'''
for key in self.episode_scalers.keys():
if key in self.final_scalers.keys():
self.final_scalers[key] += [self.episode_scalers[key]]
else:
self.final_scalers[key] = [self.episode_scalers[key]]
self.episode_scalers[key] = 0.0
for key in done_scalers.keys():
if key in self.final_scalers.keys():
self.final_scalers[key] += [done_scalers[key]]
else:
self.final_scalers[key] = [done_scalers[key]]
def to_print_str(self):
'''
get a print string of final_scalers
'''
print_str = ''
for key in self.final_scalers.keys():
if len(self.final_scalers[key]) > 0:
print_str += '[{} - {:.2f}|{:.2f}|{:.2f}|{} (Min|Mean|Max|Num)]'.format(
key,
np.min(self.final_scalers[key]),
np.mean(self.final_scalers[key]),
np.max(self.final_scalers[key]),
len(self.final_scalers[key]),
)
else:
print_str += '[{}-still summarizing the first episode]'.format(
key,
)
return print_str
def get_final_scalers_len(self):
'''
get the length of summaried final_scalers
'''
if (len(self.final_scalers.keys()) > 0):
return len(self.final_scalers[list(self.final_scalers.keys())[0]])
else:
return 0
def summary(self, mode='mean'):
'''
get final_scalers summary
mode: min, mean, max, recent
'''
final_scalers_mean = {}
for key in self.final_scalers.keys():
if mode in ['min']:
final_scalers_mean[key] = np.min(self.final_scalers[key])
elif mode in ['mean']:
final_scalers_mean[key] = np.mean(self.final_scalers[key])
elif mode in ['max']:
final_scalers_mean[key] = np.max(self.final_scalers[key])
elif mode in ['recent']:
final_scalers_mean[key] = self.final_scalers[key][-1]
return final_scalers_mean
def reset(self):
'''
reset summary
'''
for key in self.final_scalers.keys():
self.final_scalers[key] = []
for key in self.episode_scalers.keys():
self.episode_scalers[key] = 0.0
|
{"hexsha": "2cf5c6e0373b1c2db8ac4b97b707450568aee7ae", "size": 3194, "ext": "py", "lang": "Python", "max_stars_repo_path": "assets/logger.py", "max_stars_repo_name": "YuhangSong/Arena-Baselines-Depreciated", "max_stars_repo_head_hexsha": "78c33994e67aede7565dda3f68f5cebe0d5ee6e6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assets/logger.py", "max_issues_repo_name": "YuhangSong/Arena-Baselines-Depreciated", "max_issues_repo_head_hexsha": "78c33994e67aede7565dda3f68f5cebe0d5ee6e6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assets/logger.py", "max_forks_repo_name": "YuhangSong/Arena-Baselines-Depreciated", "max_forks_repo_head_hexsha": "78c33994e67aede7565dda3f68f5cebe0d5ee6e6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.419047619, "max_line_length": 88, "alphanum_fraction": 0.5256731371, "include": true, "reason": "import numpy", "num_tokens": 695}
|
from functools import cmp_to_key
import numpy as np
from matplotlib.ticker import FuncFormatter
from scipy import signal
from pydynamo_brain.files import TraceCache
from pydynamo_brain.ui.baseMatplotlibCanvas import BaseMatplotlibCanvas
from pydynamo_brain.ui.common import createAndShowInfo
import pydynamo_brain.util as util
_TRACE_CACHE = TraceCache()
# Utility to pad (min, max) range by a small amount and return the new ones
def _addPad(lo, hi, pad):
mid = (lo + hi) / 2
haf = hi - mid
return mid - (1 + pad) * haf, mid + (1 + pad) * haf
# Combine similar traces, by sorting by hz (first) and samples (second)
def _traceCompare(idAndTraceA, idAndTraceB):
idA, traceA = idAndTraceA
idB, traceB = idAndTraceB
if traceA.rate != traceB.rate:
return traceA.rate - traceB.rate
if len(traceA.data) != len(traceB.data):
return len(traceA.data) - len(traceB.data)
return 1 if idA < idB else -1
_TRACE_COMPARE_KEY = cmp_to_key(_traceCompare)
# Draws all traces from one timepoint as an intensity plot
class AllTracesCanvas(BaseMatplotlibCanvas):
def __init__(self, parent, fullState, treeIdx, *args, **kwargs):
tracePaths = []
if treeIdx < len(fullState.traces):
tracePaths = fullState.traces[treeIdx]
self.allTraces = _TRACE_CACHE.getAllTraces(tracePaths)
self.allTraceWindow = parent
self.intensity = None # Hacky, but this gets set in the figure creation
self.colorbar = None
super(AllTracesCanvas, self).__init__(parent, *args, in3D=False, **kwargs)
self.updateColorbarHack()
def compute_initial_figure(self):
infoBox = createAndShowInfo("Applying filters...", self.allTraceWindow)
# Initial figure, also used on update.
ax = self.axes[0]
everyIdAndTrace = list(self.allTraces.items())
everyIdAndTrace.sort(key=_TRACE_COMPARE_KEY)
nTraces = len(everyIdAndTrace)
if nTraces == 0:
print ("No traces :( Skipping intensity plot")
return
maxSec, hzAtMax, lenAtMax = None, None, None
for (id, trace) in everyIdAndTrace:
traceSec = len(trace.data) / trace.rate
if maxSec is None or maxSec < traceSec:
maxSec, hzAtMax, lenAtMax = traceSec, trace.rate, len(trace.data)
resultPlot = np.zeros((nTraces, lenAtMax))
for idx, (id, trace) in enumerate(everyIdAndTrace):
samples = self.allTraceWindow.applyFilters(trace.data, trace.rate)
if trace.rate != hzAtMax:
traceSec = len(samples) / trace.rate
targetLen = int(traceSec * hzAtMax)
samples = signal.resample(samples, targetLen)
resultPlot[idx, :len(samples)] = samples
self.intensity = ax.imshow(resultPlot, cmap='hot')
ax.set_title(self.allTraceWindow.getTitle())
# X axis is time
ax.get_xaxis().set_major_formatter(FuncFormatter(lambda x, pos: "%.2fs" % (x / hzAtMax)))
ax.set_xlabel("Time (sec)")
# Y axis is POI:
ax.set_ylabel("POI")
infoBox.hide()
def needToUpdate(self):
for ax in self.axes:
ax.cla()
self.compute_initial_figure()
self.updateColorbarHack()
self.draw()
def updateColorbarHack(self):
# Note: matplotlib doesn't like it when you edit the colorbar under it.
# Currently it keeps shrinking...I need to fix this properly.
if self.colorbar is not None:
self.fig.delaxes(self.fig.axes[1])
vPad, hPad = 0.1, 0.15
self.fig.subplots_adjust(top=1.0-vPad, bottom=vPad, right=1.0, left=hPad)
self.colorbar = self.fig.colorbar(self.intensity, ax=self.axes[0], shrink=0.5)
|
{"hexsha": "6dced6e6fe4966bade06b8a0be01dd040e71e2b8", "size": 3792, "ext": "py", "lang": "Python", "max_stars_repo_path": "pydynamo_brain/pydynamo_brain/ui/traces/allTracesCanvas.py", "max_stars_repo_name": "ubcbraincircuits/pyDynamo", "max_stars_repo_head_hexsha": "006eb6edb5e54670574dbfdf7d249e9037f01ffc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-12-16T22:32:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-03T05:42:12.000Z", "max_issues_repo_path": "pydynamo_brain/pydynamo_brain/ui/traces/allTracesCanvas.py", "max_issues_repo_name": "padster/pyDynamo", "max_issues_repo_head_hexsha": "006eb6edb5e54670574dbfdf7d249e9037f01ffc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-15T18:14:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-15T18:14:36.000Z", "max_forks_repo_path": "pydynamo_brain/pydynamo_brain/ui/traces/allTracesCanvas.py", "max_forks_repo_name": "padster/pyDynamo", "max_forks_repo_head_hexsha": "006eb6edb5e54670574dbfdf7d249e9037f01ffc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-21T23:03:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T23:03:24.000Z", "avg_line_length": 34.1621621622, "max_line_length": 97, "alphanum_fraction": 0.6503164557, "include": true, "reason": "import numpy,from scipy", "num_tokens": 973}
|
# Copyright 2021 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mapper module."""
import numpy as np
from mindconverter.graph_based_converter.mapper.base import ONNXToMindSporeMapper
from mindconverter.graph_based_converter.constant import ExchangeMessageKeywords, TemplateKeywords, \
WeightType
class SelectMapper(ONNXToMindSporeMapper):
"""Select mapper."""
@staticmethod
def _operation_name_in_ms(*args, **kwargs):
return "P.Select"
@staticmethod
def _convert_params(**kwargs):
return dict()
@staticmethod
def _convert_trained_weights(**kwargs):
weights = kwargs.get('weight', list())
onnx_location_tensors = [(SelectMapper._find_onnx_name_by_index(i, weights),
SelectMapper._find_location_by_index(i, weights),
SelectMapper._find_val_by_index(i, weights)) for i, _ in enumerate(weights)]
input_shape = kwargs.get('params').get('input_shape')
trainable_weights = dict()
for onnx_location_tensor in onnx_location_tensors:
if isinstance(onnx_location_tensor[2], np.ndarray) and onnx_location_tensor[2].shape:
value = onnx_location_tensor[2]
if input_shape != value.shape:
idx_diffs = [idx for idx, v in enumerate(input_shape) if v != value.shape[idx]]
if len(idx_diffs) == 1:
idx_diff = idx_diffs[0]
data = value.repeat(input_shape[idx_diff], idx_diff)
else:
raise AttributeError('Unsupported attributes in SelectMapper.')
else:
data = value
location = onnx_location_tensor[1]
trainable_weights[f"input_{location}"] = {'data': data,
'type': WeightType.PARAMETER.value,
'onnx_name': onnx_location_tensor[0]}
return trainable_weights
@staticmethod
def _generate_snippet_template(**kwargs):
op = kwargs.get('operation')
args = kwargs.get('converted_params', dict())
weights = kwargs.get('weights', list())
trainable_params = kwargs.get('trainable_params', dict())
if not op:
raise ValueError("Can not get MindSpore operation name.")
onnx_location_tensors = [(SelectMapper._find_onnx_name_by_index(i, weights),
SelectMapper._find_location_by_index(i, weights),
SelectMapper._find_val_by_index(i, weights)) for i, _ in enumerate(weights)]
input_shape = kwargs.get('raw_params').get('input_shape')
variable_slot = 'var_0'
init_template = f"self.{{{variable_slot}}} = {op}()"
inputs_in_construct = [f"{{{ExchangeMessageKeywords.VariableScope.value.INPUTS.value}}}"]
init_template_list, construct_template = SelectMapper._generate_init_construct(variable_slot, args, input_shape,
init_template,
inputs_in_construct,
onnx_location_tensors)
template = {
variable_slot: {
TemplateKeywords.INIT.value: init_template_list,
TemplateKeywords.CONSTRUCT.value: [construct_template]
}
}
exchange_msg = SelectMapper._generate_exchange_msg_select(variable_slot, op, args, weights, trainable_params,
onnx_location_tensors)
outputs_list = [f"opt_{{{variable_slot}}}"]
outputs_mapping = ((0, 0),)
return template, exchange_msg, outputs_list, outputs_mapping
@staticmethod
def _generate_init_construct(variable_slot, args, input_shape, init_template, inputs_in_construct,
onnx_location_tensors):
"""Generate init template and construct template."""
init_template_list = [init_template]
for onnx_location_tensor in onnx_location_tensors:
location = onnx_location_tensor[1]
inputs_in_construct.insert(location, f"self.{{{variable_slot}}}_input_{location}")
value = onnx_location_tensor[2]
if value.shape:
variable_slot_param_name = f"{variable_slot}/input_{location}"
init_input = f"self.{{{variable_slot}}}_input_{location} = {{{variable_slot_param_name}}}"
else:
args[f"input_{location}"] = value.tolist()
init_input = f"self.{{{variable_slot}}}_input_{location} = " \
f"Tensor({{input_{location}}} * np.ones({tuple(input_shape)}).astype(np.{value.dtype}))"
init_template_list.append(init_input)
construct_template = f"opt_{{{variable_slot}}} = self.{{{variable_slot}}}({', '.join(inputs_in_construct)})"
return init_template_list, construct_template
@staticmethod
def _generate_exchange_msg_select(variable_slot, op, args, weights, trainable_params, onnx_location_tensors):
"""Generate exchange_msg for select mapper."""
exchange_msg = SelectMapper._generate_exchange_msg(variable_slot=variable_slot, op=op, args=args,
weights=weights, trainable_params=trainable_params)
declared_key = ExchangeMessageKeywords.VariableScope.value.PARAMETERS_DECLARED.value
for onnx_location_tensor in onnx_location_tensors:
value = onnx_location_tensor[2]
if value.shape:
if not exchange_msg[variable_slot].get(declared_key):
exchange_msg[variable_slot][declared_key] = {f"input_{onnx_location_tensor[1]}": ""}
else:
exchange_msg[variable_slot][declared_key][f"input_{onnx_location_tensor[1]}"] = ""
return exchange_msg
|
{"hexsha": "2b7e6badc80aeb6871af3228e2c624f47a4a53ff", "size": 6808, "ext": "py", "lang": "Python", "max_stars_repo_path": "ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/onnx/ops/select_mapper.py", "max_stars_repo_name": "mindspore-ai/mindinsight", "max_stars_repo_head_hexsha": "8c57fdd62eb7f8653662be2208633386ac82e8d7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 216, "max_stars_repo_stars_event_min_datetime": "2020-03-28T02:11:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:20:09.000Z", "max_issues_repo_path": "ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/onnx/ops/select_mapper.py", "max_issues_repo_name": "mindspore-ai/mindinsight", "max_issues_repo_head_hexsha": "8c57fdd62eb7f8653662be2208633386ac82e8d7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2020-03-31T03:00:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-03T13:01:06.000Z", "max_forks_repo_path": "ecosystem_tools/mindconverter/mindconverter/graph_based_converter/mapper/onnx/ops/select_mapper.py", "max_forks_repo_name": "mindspore-ai/mindinsight", "max_forks_repo_head_hexsha": "8c57fdd62eb7f8653662be2208633386ac82e8d7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2020-03-28T02:41:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-24T12:20:25.000Z", "avg_line_length": 52.3692307692, "max_line_length": 120, "alphanum_fraction": 0.6019388954, "include": true, "reason": "import numpy", "num_tokens": 1290}
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('runprefixes', type=str, nargs='+',
help='Prefixes of the ordinate and time txt files to compare.')
parser.add_argument('--ordinate', type=str, help='Name of the ordinate variable to plot vs time (in the txt file name)',
default='denerdt')
parser.add_argument('--logtime', action='store_true', help='If --logtime, plot Log10(time).')
parser.add_argument('--tlo', type=float, help='Time lower limit')
parser.add_argument('--thi', type=float, help='Time upper limit')
args = parser.parse_args()
ofiles = []
tfiles = []
for prefix in args.runprefixes:
ofiles += glob.glob('{}_{}.txt'.format(prefix, args.ordinate))
tfiles += glob.glob('{}_time.txt'.format(prefix))
if len(ofiles) == 0 or len(tfiles) == 0:
exit()
ordinate_data = [np.genfromtxt(of) for of in ofiles]
time_data = [np.genfromtxt(tf) for tf in tfiles]
## Define RGBA to HEX
def rgba_to_hex(rgba):
r = int(rgba[0]*255.0)
g = int(rgba[1]*255.0)
b = int(rgba[2]*255.0)
return '#{:02X}{:02X}{:02X}'.format(r,g,b)
# Figure out time axis limits
if args.tlo and args.thi:
ltlim = [args.tlo, args.thi]
elif args.tlo:
ltlim = [args.tlo, time_data[0][-1]]
elif args.thi:
ltlim = [time_data[0][0], args.thi]
else:
ltlim = [time_data[0][0], time_data[0][-1]]
if args.logtime:
time_data = [np.log10(td) for td in time_data]
ltlim = np.log10(ltlim)
xlim = ltlim
if args.logtime:
xlabel = '$\\mathrm{Log_{10}~Time~(s)}$'
else:
xlabel = '$\\mathrm{Time~(s)}$'
# Get set of colors to use for abundances
cm = plt.get_cmap('nipy_spectral')
clist = [cm(1.0*i/len(time_data)) for i in range(len(time_data))]
hexclist = [rgba_to_hex(ci) for ci in clist]
# Plot ordinate vs. time
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_prop_cycle(cycler('color', hexclist))
for i, (od, td) in enumerate(zip(ordinate_data, time_data)):
ax.plot(td, np.log10(od), label='$\\mathrm{' + '{}'.format(args.runprefixes[i].replace('_', '\_') + '}$'))
lgd = ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0)
ax.set_xlim(xlim)
plt.xlabel(xlabel)
plt.ylabel('$\\mathrm{' + '{}'.format(args.ordinate) + '}$')
plt.savefig('cf_{}_v_time.eps'.format(args.ordinate),
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.savefig('cf_{}_v_time.png'.format(args.ordinate), dpi=300,
bbox_extra_artists=(lgd,), bbox_inches='tight')
|
{"hexsha": "090e2bbb3b133a859e76c101e9469fa83f295552", "size": 2616, "ext": "py", "lang": "Python", "max_stars_repo_path": "unit_test/burn_cell/burn_cell_compare_ordinate.py", "max_stars_repo_name": "doreenfan/Microphysics", "max_stars_repo_head_hexsha": "bbfabaae0a98af32dbf353a7747a8ca787710ac6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "unit_test/burn_cell/burn_cell_compare_ordinate.py", "max_issues_repo_name": "doreenfan/Microphysics", "max_issues_repo_head_hexsha": "bbfabaae0a98af32dbf353a7747a8ca787710ac6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unit_test/burn_cell/burn_cell_compare_ordinate.py", "max_forks_repo_name": "doreenfan/Microphysics", "max_forks_repo_head_hexsha": "bbfabaae0a98af32dbf353a7747a8ca787710ac6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.974025974, "max_line_length": 120, "alphanum_fraction": 0.6655198777, "include": true, "reason": "import numpy", "num_tokens": 777}
|
import os
import time
import playsound
import speech_recognition as sr
from gtts import gTTS
import cv2
import numpy as np
import webbrowser
print("How can i help you")
def speak(text):
tts=gTTS(text=text,lang="en")
filename="voice.mp3"
tts.save(filename)
playsound.playsound(filename)
def get_audio():
r=sr.Recognizer()
with sr.Microphone() as source:
audio=r.listen(source)
said=""
try :
said=r.recognize_google(audio)
print(said)
except Exception as e:
print("Exception"+str(e))
return said
text=get_audio()
if "open Google" in text:
speak("Opening google for you")
webbrowser.open_new("https://www.google.com")
elif "open YouTube" in text:
speak("Opening youtube for you")
webbrowser.open_new("https://www.youtube.com")
elif "open text" in text:
speak("Opening your text file")
open("WiFi_ESP8266.txt","r")
else:
f=open("newtext.txt","w")
f.write(text)
|
{"hexsha": "d517b1a9bff0631f01bdd8fa4e1668440a88fd38", "size": 1049, "ext": "py", "lang": "Python", "max_stars_repo_path": "Machine Learning Projects/Speech To Text/code5.py", "max_stars_repo_name": "anshu1905/IoT-and-ML", "max_stars_repo_head_hexsha": "78e8fdb83201564867ac4f07beb922265f516237", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Machine Learning Projects/Speech To Text/code5.py", "max_issues_repo_name": "anshu1905/IoT-and-ML", "max_issues_repo_head_hexsha": "78e8fdb83201564867ac4f07beb922265f516237", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Machine Learning Projects/Speech To Text/code5.py", "max_forks_repo_name": "anshu1905/IoT-and-ML", "max_forks_repo_head_hexsha": "78e8fdb83201564867ac4f07beb922265f516237", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.98, "max_line_length": 51, "alphanum_fraction": 0.6167778837, "include": true, "reason": "import numpy", "num_tokens": 255}
|
import pandas as pd
import numpy as np
import pickle
from sklearn.multioutput import MultiOutputClassifier, ClassifierChain
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import os
def load_data(difficulty):
"""Loads DataFrames saved in pickle files based on their difficulty level."""
filelist = [f for f in os.listdir('level_df')]
full = pd.DataFrame()
for f in filelist:
if f.endswith(f"{difficulty}.pkl"):
with open(f"./level_df/{f}", 'rb') as d:
df = pickle.load(d)
full = pd.concat([full, df], axis = 0, ignore_index = True, sort = True)
else:
continue
full.dropna(subset = list(full.iloc[:, 0:13].columns), axis = 0, inplace = True)
full.fillna(999, inplace = True)
return full
def train_model(model, df):
"""This function trains the model specified in 'model', either a sklearn multilabel classifier implemented
with a Random Forest classifier, or a Chain Classifier implemented with a Random Forest classifier.
model = 'multi' or 'chain'
"""
X = df.iloc[:, 0: 13]
y = df[list(filter(lambda x: str(x).startswith('notes'), df.columns))]
if model == 'multi':
multi = MultiOutputClassifier(RandomForestClassifier()).fit(X, y)
return (multi, list(y.columns))
elif model == 'chain':
columns = {}
for index, value in enumerate(y.columns):
columns.update({value: index})
constant = ['notes_type_0', 'notes_lineIndex_0', 'notes_lineLayer_0',
'notes_cutDirection_0', 'notes_type_1', 'notes_lineIndex_1', 'notes_lineLayer_1',
'notes_cutDirection_1', 'notes_type_3', 'notes_lineIndex_3',
'notes_lineLayer_3', 'notes_cutDirection_3']
order = [columns[x] for x in constant]
chain = ClassifierChain(RandomForestClassifier(), order = order).fit(X, y)
return (chain, constant)
def load_and_train():
"""This function loads the data from pickle files and trains models, saving them as pickle files in the
models directory."""
difficulties = ['expert', 'expertPlus']
models = ['chain']
for difficulty in difficulties:
df = load_data(difficulty)
print(f"Loaded {difficulty} data successfully.")
for model in models:
print(f"Training {difficulty} {model} model.")
RF = train_model(model, df)
with open(f"./models/{model}_{difficulty}.pkl", 'wb') as f:
pickle.dump(RF, f)
print(f"Succesfully trained and saved {difficulty} {model} model.")
|
{"hexsha": "d8a03e56d77e1446b5a82247499abfd5a9aa061b", "size": 2672, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/RFClassifier.py", "max_stars_repo_name": "wvsharber/BeatMapSynthesizer", "max_stars_repo_head_hexsha": "9497ad8dcb0567eaab6b5102121c982dd0e74d54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2020-04-20T13:52:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T20:36:50.000Z", "max_issues_repo_path": "src/RFClassifier.py", "max_issues_repo_name": "wvsharber/BeatMapSynthesizer", "max_issues_repo_head_hexsha": "9497ad8dcb0567eaab6b5102121c982dd0e74d54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-04-19T13:35:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T20:48:30.000Z", "max_forks_repo_path": "src/RFClassifier.py", "max_forks_repo_name": "wvsharber/BeatMapSynthesizer", "max_forks_repo_head_hexsha": "9497ad8dcb0567eaab6b5102121c982dd0e74d54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-04-19T13:40:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T20:21:27.000Z", "avg_line_length": 44.5333333333, "max_line_length": 110, "alphanum_fraction": 0.6410928144, "include": true, "reason": "import numpy", "num_tokens": 622}
|
# an example of a greta function
# pull out the necessary internal functions from greta
op <- .internals$nodes$constructors$op
#' @importFrom stats var median quantile
#' @import greta
#'
#' @title compute the Bayesian R square for a greta regression model
#' @export
#'
#' @description Compute a Bayesian version of R-square for regression models (GLMs ...)
#'
#' @usage ## S3 method for class 'mcmc.list'
#' bayes_R2(y, pred, draws, summary = TRUE, probs = c(0.1, 0.9))
#'
#' @param y a greta array, the response variables
#' @param pred a greta array, the linear predictor
#' @param draws a greta_mcmc_list, posterior draws as returned from calling greta sampling algorithm (ie mcmc)
#' @param summary a logical, if TRUE (default) the function output summary statistics (mean, sd, 80% credible intervals) for the R2, if FALSE the raw values are returned
#' @param probs a vector of two numeric specifying the lower and upper limits for the credible intervals (default to 0.1, 0.9), only used if summary=TRUE
#'
#' @return If summary=TRUE a 1 x C matrix is returned (C = length(probs) + 2) containing summary statistics of Bayesian R-squared values. If summary = FALSE the posterior samples of the R-squared values are returned as a numeric vector of length S (S is the number of samples)
#'
#' @details See https://github.com/jgabry/bayes_R2/blob/master/bayes_R2.pdf for a description of the computation. Note that R2 only univariate models are supported. This function is largely inspired from brms::bayes_R2.
#'
#'
#' @examples
#' \dontrun{
#' intercept <- normal(0, 1)
#' slope <- normal(0, 1)
#' sd_resid <- cauchy(0, 1, truncation = c(0, 100))
#'
#' x <- runif(100)
#' y <- as_data(rnorm(100, 1 + 2 * x, 1))
#'
#' pred <- intercept + slope * x
#' distribution(y) <- normal(pred, sd_resid)
#'
#' m <- model(intercept, slope, sd_resid)
#' drr <- mcmc(m)
#'
#' bayes_R2(y, pred, drr)
#' }
bayes_R2 <- function(y, pred, draws, summary = TRUE, probs = c(0.1, 0.9)){
# test that correct objects are being passed
## to dev
# get the posterior draws for the linear predictor
ypred <- calculate(pred, values = draws)
# posterior residuals
e <- -1 * sweep(as.matrix(ypred), 2, as.matrix(y))
# variance in linear predictors
var_ypred <- apply(as.matrix(ypred), 1, var)
# variance in residuals
var_e <- apply(e, 1, var)
# R2 values
post_r2 <- var_ypred / (var_ypred + var_e)
# if summary=FALSE return these values
if(summary){
# get summary stats
med <- median(post_r2)
mad <- median(abs(post_r2 - med))
ci <- quantile(post_r2, probs = probs)
out <- matrix(c(med, mad, ci),ncol = 4,
dimnames = list("",
c("median", "mad", "low-ci", "high-ci")))
return(out)
} else{
return(post_r2)
}
}
|
{"hexsha": "74bc225949f5e9ec8c73ab5232d15756e4290c02", "size": 2835, "ext": "r", "lang": "R", "max_stars_repo_path": "R/bayes_R2.r", "max_stars_repo_name": "lionel68/greta.checks", "max_stars_repo_head_hexsha": "bb5cb6b4fca11a79367f1370409a5d17d4a0ccac", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-14T02:48:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-14T02:48:30.000Z", "max_issues_repo_path": "R/bayes_R2.r", "max_issues_repo_name": "lionel68/greta.checks", "max_issues_repo_head_hexsha": "bb5cb6b4fca11a79367f1370409a5d17d4a0ccac", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/bayes_R2.r", "max_forks_repo_name": "lionel68/greta.checks", "max_forks_repo_head_hexsha": "bb5cb6b4fca11a79367f1370409a5d17d4a0ccac", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0, "max_line_length": 276, "alphanum_fraction": 0.6638447972, "num_tokens": 823}
|
# https://en.wikipedia.org/wiki/Tak_(function)
module BenchTarai
using BenchmarkTools
module SeqTarai
tarai(x, y, z) =
if y < x
tarai(tarai(x - 1, y, z), tarai(y - 1, z, x), tarai(z - 1, x, y))
else
y
end
end # module SeqTarai
module BaseTarai
tarai(x, y, z) =
if y < x
a = Threads.@spawn tarai(x - 1, y, z)
b = Threads.@spawn tarai(y - 1, z, x)
c = tarai(z - 1, x, y)
tarai(fetch(a)::Int, fetch(b)::Int, c)
else
y
end
end # module BaseTarai
module TapirTarai
using Base.Experimental: Tapir
tarai(x, y, z) =
if y < x
Tapir.@output a b c
Tapir.@sync begin
Tapir.@spawn a = tarai(x - 1, y, z)
Tapir.@spawn b = tarai(y - 1, z, x)
c = tarai(z - 1, x, y)
end
tarai(a, b, c)
else
y
end
end # module TapirTarai
module WSTarai
using Base.Experimental: Tapir
using TapirSchedulers
tarai(x, y, z) =
if y < x
Tapir.@output a b c
Tapir.@sync WorkStealingTaskGroup() begin
Tapir.@spawn a = tarai(x - 1, y, z)
Tapir.@spawn b = tarai(y - 1, z, x)
c = tarai(z - 1, x, y)
end
tarai(a, b, c)
else
y
end
end # module WSTarai
function setup(xyz = [(3, 1, 10)])
suite = BenchmarkGroup()
for (x::Int, y::Int, z::Int) in xyz
s0 = suite["x=$x, y=$y, z=$z"] = BenchmarkGroup()
s0["seq"] = @benchmarkable SeqTarai.tarai($x, $y, $z)
s0["base"] = @benchmarkable BaseTarai.tarai($x, $y, $z)
s0["tapir"] = @benchmarkable TapirTarai.tarai($x, $y, $z)
s0["ws"] = @benchmarkable WSTarai.tarai($x, $y, $z)
end
return suite
end
function clear() end
end # module BenchTarai
|
{"hexsha": "a00a3bc9c0ddcee995834ebda34127fe7db72abd", "size": 1775, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "benchmark/TapirSchedulersBenchmarks/src/bench_tarai.jl", "max_stars_repo_name": "cesmix-mit/TapirSchedulers.jl", "max_stars_repo_head_hexsha": "fd80ca0e78bfacacf6b337cae5f586f45994d945", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-15T01:40:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-15T01:40:06.000Z", "max_issues_repo_path": "benchmark/TapirSchedulersBenchmarks/src/bench_tarai.jl", "max_issues_repo_name": "cesmix-mit/TapirSchedulers.jl", "max_issues_repo_head_hexsha": "fd80ca0e78bfacacf6b337cae5f586f45994d945", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark/TapirSchedulersBenchmarks/src/bench_tarai.jl", "max_forks_repo_name": "cesmix-mit/TapirSchedulers.jl", "max_forks_repo_head_hexsha": "fd80ca0e78bfacacf6b337cae5f586f45994d945", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.4022988506, "max_line_length": 73, "alphanum_fraction": 0.5278873239, "num_tokens": 635}
|
import sys
import copy
import math
import numpy as np
"""This file contains various gradient optimisers"""
# class for simple gradient descent
class SimpleGradientDescent:
def __init__(self, eta, layers, weight_decay=0.0):
# learning rate
self.eta = eta
# number of layers
self.layers = layers
# number of calls
self.calls = 1
# learning rate controller
self.lrc = 1.0
# weight decay
self.weight_decay = weight_decay
# function for gradient descending
def descent(self, network, gradient):
for i in range(self.layers):
network[i]['weight'] = network[i]['weight'] - ((self.eta / self.lrc) * gradient[i][
'weight']) - (self.eta * self.weight_decay * network[i]['weight'])
network[i]['bias'] -= ((self.eta / self.lrc) * gradient[i]['bias'])
self.calls += 1
if self.calls % 10 == 0:
self.lrc += 1.0
# class for Momentum gradient descent
class MomentumGradientDescent:
def __init__(self, eta, layers, gamma, weight_decay=0.0):
# learning rate
self.eta = eta
self.gamma = gamma
# number of layers
self.layers = layers
# number of calls
self.calls = 1
# rate learning controller
self.lrc = 1
# historical momentum
self.momentum = None
# weight decay
self.weight_decay = weight_decay
# function for gradient descending
def descent(self, network, gradient):
"""http://cse.iitm.ac.in/~miteshk/CS7015/Slides/Teaching/pdf/Lecture5.pdf , Slide 70"""
gamma = min(1 - 2 ** (-1 - math.log((self.calls / 250.0) + 1, 2)), self.gamma)
if self.momentum is None:
# copy the structure
self.momentum = copy.deepcopy(gradient)
# initialize momentum- refer above lecture slide 36
for i in range(self.layers):
self.momentum[i]['weight'] = (self.eta / self.lrc) * gradient[i]['weight']
self.momentum[i]['bias'] = (self.eta / self.lrc) * gradient[i]['bias']
else:
# update momentum
for i in range(self.layers):
self.momentum[i]['weight'] = gamma * self.momentum[i]['weight'] + (self.eta / self.lrc) * gradient[i][
'weight']
self.momentum[i]['bias'] = gamma * self.momentum[i]['bias'] + (self.eta / self.lrc) * gradient[i][
'bias']
# the descent
for i in range(self.layers):
network[i]['weight'] = network[i]['weight'] - self.momentum[i]['weight'] - (
(self.eta / self.lrc) * self.weight_decay * network[i][
'weight'])
network[i]['bias'] -= self.momentum[i]['bias']
self.calls += 1
if self.calls % 10 == 0:
self.lrc += 1.0
# class for NAG
class NAG:
def __init__(self, eta, layers, gamma, weight_decay=0.0):
# learning rate
self.eta = eta
self.gamma = gamma
# number of layers
self.layers = layers
# number of calls
self.calls = 1
# historical momentum
self.momentum = None
# learning rate controller
self.lrc = 1.0
# weight decay
self.weight_decay = weight_decay
# function for lookahead. Call this before forward propagation.
def lookahead(self, network):
# case when no momentum has been generated yet.
if self.momentum is None:
pass
else:
# update the gradient using momentum
for i in range(self.layers):
network[i]['weight'] -= self.gamma * self.momentum[i]['weight']
network[i]['bias'] -= self.gamma * self.momentum[i]['bias']
# function for gradient descending
def descent(self, network, gradient):
# the descent
for i in range(self.layers):
network[i]['weight'] = network[i]['weight'] - ((self.eta / self.lrc) * gradient[i][
'weight']) - ((self.eta / self.lrc) * self.weight_decay * network[i]['weight'])
network[i]['bias'] -= self.eta * gradient[i]['bias']
gamma = min(1 - 2 ** (-1 - math.log((self.calls / 250.0) + 1, 2)), self.gamma)
# generate momentum for the next time step next
if self.momentum is None:
# copy the structure
self.momentum = copy.deepcopy(gradient)
# initialize momentum
for i in range(self.layers):
self.momentum[i]['weight'] = (self.eta / self.lrc) * gradient[i]['weight']
self.momentum[i]['bias'] = (self.eta / self.lrc) * gradient[i]['bias']
else:
# update momentum: http://cse.iitm.ac.in/~miteshk/CS7015/Slides/Teaching/pdf/Lecture5.pdf , slide: 46
for i in range(self.layers):
self.momentum[i]['weight'] = gamma * self.momentum[i]['weight'] + ((self.eta / self.lrc) * gradient[i][
'weight'])
self.momentum[i]['bias'] = gamma * self.momentum[i]['bias'] + (
(self.eta / self.lrc) * gradient[i]['bias'])
self.calls += 1
if self.calls % 10 == 0:
self.lrc += 1.0
"""As mentioned in this paper: https://arxiv.org/pdf/1609.04747.pdf
RMSProp, ADAM and NADAM have adaptive learning rates so they do not need a lrc"""
class RMSProp:
def __init__(self, eta, layers, beta, weight_decay=0.0):
# learning rate
self.eta = eta
# decay parameter for denominator
self.beta = beta
# number of layers
self.layers = layers
# number of calls
self.calls = 1
# epsilon
self.epsilon = 0.001
# to implement update rule for RMSProp
self.update = None
# weight decay
self.weight_decay = weight_decay
# function for gradient descending
def descent(self, network, gradient):
# generate update for the next time step
if self.update is None:
# copy the structure
self.update = copy.deepcopy(gradient)
# initialize update at time step 1 assuming that update at time step 0 is 0
for i in range(self.layers):
self.update[i]['weight'] = (1 - self.beta) * (gradient[i]['weight']) ** 2
self.update[i]['bias'] = (1 - self.beta) * (gradient[i]['bias']) ** 2
else:
for i in range(self.layers):
self.update[i]['weight'] = self.beta * self.update[i]['weight'] + (1 - self.beta) * (gradient[i][
'weight']) ** 2
self.update[i]['bias'] = self.beta * self.update[i]['bias'] + (1 - self.beta) * (
gradient[i]['bias']) ** 2
# Now we use the update rule for RMSProp
for i in range(self.layers):
network[i]['weight'] = network[i]['weight'] - np.multiply(
(self.eta / np.sqrt(self.update[i]['weight'] + self.epsilon)),
gradient[i]['weight']) - self.weight_decay * network[i]['weight']
network[i]['bias'] = network[i]['bias'] - np.multiply(
(self.eta / np.sqrt(self.update[i]['bias'] + self.epsilon)), gradient[i]['bias'])
self.calls += 1
# class for ADAM: Reference: https://arxiv.org/pdf/1412.6980.pdf?source=post_page---------------------------
"""Using the previous gradients instead of the previous updates allows the algorithm to continue changing
direction even when the learning rate has annealed significantly toward the end of training, resulting
in more precise fine-grained convergence"""
class ADAM:
def __init__(self, eta, layers, weight_decay=0.0, beta1=0.9, beta2=0.999, eps=1e-8):
# learning rate
self.eta = eta
self.beta1 = beta1
self.beta2 = beta2
# number of layers
self.layers = layers
# number of calls
self.calls = 1
# first moment vector m_t: defined as a decaying mean over the previous gradients
self.momentum = None
self.t_momentum = None
# second moment vector v_t
self.second_momentum = None
self.t_second_momentum = None
# epsilon
self.eps = eps
# weight decay
self.weight_decay = weight_decay
# function for gradient descending
def descent(self, network, gradient):
if self.momentum is None:
# copy the structure
self.momentum = copy.deepcopy(gradient)
self.second_momentum = copy.deepcopy(gradient)
for i in range(self.layers):
# first momentum initialization
self.momentum[i]['weight'][:] = np.zeros_like(gradient[i]['weight'])
self.momentum[i]['bias'][:] = np.zeros_like(gradient[i]['bias'])
# second momentum initialization
self.second_momentum[i]['weight'][:] = np.zeros_like(gradient[i]['weight'])
self.second_momentum[i]['bias'][:] = np.zeros_like(gradient[i]['bias'])
self.t_momentum = copy.deepcopy(self.momentum)
self.t_second_momentum = copy.deepcopy(self.second_momentum)
for i in range(self.layers):
# Update biased first moment estimate: Moving average of gradients
self.momentum[i]['weight'] = self.beta1 * self.momentum[i]['weight'] + (1 - self.beta1) * gradient[i][
'weight']
self.momentum[i]['bias'] = self.beta1 * self.momentum[i]['bias'] + (1 - self.beta1) * gradient[i]['bias'
]
# Update biased second raw moment estimate: rate adjusting parameter update similar to RMSProp
self.second_momentum[i]['weight'] = self.beta2 * self.second_momentum[i]['weight'] + (
1 - self.beta2) * np.power(gradient[i][
'weight'], 2)
self.second_momentum[i]['bias'] = self.beta2 * self.second_momentum[i]['bias'] + (
1 - self.beta2) * np.power(gradient[i]['bias'
], 2)
# bias correction
for i in range(self.layers):
self.t_momentum[i]['weight'][:] = (1 / (1 - (self.beta1 ** self.calls))) * self.momentum[i]['weight']
self.t_momentum[i]['bias'][:] = (1 / (1 - (self.beta1 ** self.calls))) * self.momentum[i]['bias']
self.t_second_momentum[i]['weight'][:] = (1 / (1 - (self.beta2 ** self.calls))) * self.second_momentum[i][
'weight']
self.t_second_momentum[i]['bias'][:] = (1 / (1 - (self.beta2 ** self.calls))) * self.second_momentum[i][
'bias']
# the descent
for i in range(self.layers):
# temporary variable for calculation
temp = np.sqrt(self.t_second_momentum[i]['weight'])
# add epsilon to square root of temp
temp_eps = temp + self.eps
# inverse everything
temp_inv = 1 / temp_eps
# perform descent: Update rule for weight along with l2 regularisation
network[i]['weight'] = network[i]['weight'] - self.eta * (
np.multiply(temp_inv, self.t_momentum[i]['weight'])) - (
self.eta * self.weight_decay * network[i]['weight'])
# now we do the same for bias
# temporary variable for calculation
temp = np.sqrt(self.t_second_momentum[i]['bias'])
# add epsilon to square root of temp
temp_eps = temp + self.eps
# inverse everything
temp_inv = 1 / temp_eps
# perform descent for weight
network[i]['bias'] -= self.eta * np.multiply(temp_inv, self.t_momentum[i]['bias'])
self.calls += 1
# Reference: https://openreview.net/pdf?id=OM0jvwB8jIp57ZJjtNEZ
class NADAM:
def __init__(self, eta, layers, weight_decay=0.0, beta1=0.9, beta2=0.999, eps=1e-8):
# learning rate
self.eta = eta
self.beta1 = beta1
self.beta2 = beta2
# number of layers
self.layers = layers
# number of calls
self.calls = 1
# first moment vector m_t: defined as a decaying mean over the previous gradients
self.momentum = None
# second moment vector v_t
self.second_momentum = None
# epsilon
self.eps = eps
# weight decay
self.weight_decay = weight_decay
# function for gradient descending: Algorithm 2 Page 3
def descent(self, network, gradient):
if self.momentum is None:
# copy the structure
self.momentum = copy.deepcopy(gradient)
self.second_momentum = copy.deepcopy(gradient)
# initialize momentums
for i in range(self.layers):
# first momentum initialization
self.momentum[i]['weight'] = (1 - self.beta1) * gradient[i]['weight']
self.momentum[i]['bias'] = (1 - self.beta1) * gradient[i]['bias']
# second momentum initialization
self.second_momentum[i]['weight'] = (1 - self.beta2) * np.power(gradient[i]['weight'], 2)
self.second_momentum[i]['bias'] = (1 - self.beta2) * np.power(gradient[i]['bias'], 2)
else:
for i in range(self.layers):
# Update biased first moment estimate: Moving average of gradients
self.momentum[i]['weight'] = self.beta1 * self.momentum[i]['weight'] + (1 - self.beta1) * \
gradient[i][
'weight']
self.momentum[i]['bias'] = self.beta1 * self.momentum[i]['bias'] + (1 - self.beta1) * gradient[i][
'bias'
]
# Update biased second raw moment estimate: rate adjusting parameter update similar to RMSProp
self.second_momentum[i]['weight'] = self.beta2 * self.second_momentum[i]['weight'] + (
1 - self.beta2) * np.power(gradient[i][
'weight'], 2)
self.second_momentum[i]['bias'] = self.beta2 * self.second_momentum[i]['bias'] + (
1 - self.beta2) * np.power(gradient[i]['bias'
], 2)
# bias correction
m_t_hat = copy.deepcopy(self.momentum)
v_t_hat = copy.deepcopy(self.second_momentum)
for i in range(self.layers):
m_t_hat[i]['weight'] = (self.beta1 / (1 - (self.beta1 ** self.calls))) * self.momentum[i][
'weight'] + ((1 - self.beta1) / (1 - (self.beta1 ** self.calls))) * gradient[i]['weight']
m_t_hat[i]['bias'] = (self.beta1 / (1 - (self.beta1 ** self.calls))) * self.momentum[i]['bias'] + (
(1 - self.beta1) / (1 - (self.beta1 ** self.calls))) * gradient[i]['bias']
v_t_hat[i]['weight'] = (self.beta2 / (1 - (self.beta2 ** self.calls))) * \
self.second_momentum[i][
'weight']
v_t_hat[i]['bias'] = (self.beta2 / (1 - (self.beta2 ** self.calls))) * self.second_momentum[i][
'bias']
# the descent
for i in range(self.layers):
# temporary variable for calculation
temp = np.sqrt(self.second_momentum[i]['weight'] + self.eps)
# inverse everything
temp_inv = 1 / temp
# perform descent for weight
network[i]['weight'] = network[i]['weight'] - self.eta * (
np.multiply(temp_inv, m_t_hat[i]['weight'])) - (self.eta * self.weight_decay * network[i]['weight'])
# now we do the same for bias
# temporary variable for calculation
temp = np.sqrt(self.second_momentum[i]['bias']) + self.eps
# inverse everything
temp_inv = 1 / temp
# perform descent for weight
network[i]['bias'] -= self.eta * np.multiply(temp_inv, v_t_hat[i]['bias'])
self.calls += 1
|
{"hexsha": "566ff10ef3ddd4840a59ddade7269e5cb7c43026", "size": 16268, "ext": "py", "lang": "Python", "max_stars_repo_path": "Assignment1/optimiser.py", "max_stars_repo_name": "utsavdey/Fundamentals_Of_Deep_Learning_Assignments", "max_stars_repo_head_hexsha": "c1b2fc49e929ab09760f083aa8b052845afad48f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Assignment1/optimiser.py", "max_issues_repo_name": "utsavdey/Fundamentals_Of_Deep_Learning_Assignments", "max_issues_repo_head_hexsha": "c1b2fc49e929ab09760f083aa8b052845afad48f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assignment1/optimiser.py", "max_forks_repo_name": "utsavdey/Fundamentals_Of_Deep_Learning_Assignments", "max_forks_repo_head_hexsha": "c1b2fc49e929ab09760f083aa8b052845afad48f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9675675676, "max_line_length": 119, "alphanum_fraction": 0.5453036636, "include": true, "reason": "import numpy", "num_tokens": 3822}
|
using DirectDependents
using Test
@testset "DirectDependents.jl" begin
@test !isempty(direct_dependents("RecipesBase"))
end
|
{"hexsha": "46a16d337638b45283e00bfc2fd7f2101f519c95", "size": 129, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "daschw/DirectDependents.jl", "max_stars_repo_head_hexsha": "67b730f852488aedb3544cb461275ece8507ccb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-03-31T10:43:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T05:57:42.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "daschw/DirectDependents.jl", "max_issues_repo_head_hexsha": "67b730f852488aedb3544cb461275ece8507ccb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-13T09:39:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-15T12:49:16.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "daschw/DirectDependents.jl", "max_forks_repo_head_hexsha": "67b730f852488aedb3544cb461275ece8507ccb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.4285714286, "max_line_length": 52, "alphanum_fraction": 0.7906976744, "num_tokens": 36}
|
export PermutationLayer
import Base: eltype
@doc raw"""
The permutation layer specifies an invertible mapping ``{\bf{y}} = g({\bf{x}}) = P{\bf{x}}`` where ``P`` is a permutation matrix.
"""
struct PermutationLayer{ T } <: AbstractLayer
dim :: Int
P :: PermutationMatrix{T}
end
function PermutationLayer(dim::T) where { T <: Integer }
# create random permutation matrix
P = PermutationMatrix(dim)
# return layer
return PermutationLayer(dim, P)
end
function PermutationLayer(P::PermutationMatrix)
# create random permutation matrix
@assert size(P,1) == size(P,2) "The passed permutation matrix is not square."
# return layer
return PermutationLayer(size(P,1), P)
end
struct PermutationLayerPlaceholder <: AbstractLayerPlaceholder end
@doc raw"""
`PermutationLayer()` creates a layer that randomly shuffles its input values. The corresponding permutation matrix and its dimensionality are (randomly) generated during model creation.
"""
PermutationLayer() = PermutationLayerPlaceholder() # the function creates a placeholder, of which the dimensionality is set later on.
# prepare placeholder
_prepare(dim::Int, layer::PermutationLayerPlaceholder) = (PermutationLayer(dim), )
function _prepare(dim::Int, layer::PermutationLayer)
@assert dim == size(getP(layer),1) == size(getP(layer),2) "The size of the passed permutation matrix does not comply with the dimensionality of the input."
return (layer, )
end
# compile layer
compile(layer::PermutationLayer, params) = throw(ArgumentError("The permutation matrix does not have any parameters."))
compile(layer::PermutationLayer) = layer
# fetch number of parameters of layer
nr_params(layer::PermutationLayer) = 0
# get-functions for the PermutationLayer structure
getP(layer::PermutationLayer) = layer.P
getmat(layer::PermutationLayer) = layer.P
getdim(layer::PermutationLayer) = layer.dim
# custom Base function for the PermutationLayer structure
eltype(layer::PermutationLayer{T}) where { T } = eltype(T)
eltype(::Type{PermutationLayer{T}}) where { T } = eltype(T)
# forward pass through the permutation layer
function _forward(layer::PermutationLayer, input::AbstractVector{ <: Real })
# fetch variables
P = getP(layer)
# determine result
result = P*input
# return result
return result
end
forward(layer::PermutationLayer, input::AbstractVector{ <: Real }) = _forward(layer, input)
Broadcast.broadcasted(::typeof(forward), layer::PermutationLayer, input::AbstractVector{ <: AbstractVector{ <: Real } }) = broadcast(_forward, Ref(layer), input)
# inplace forward pass through the permutation layer
function forward!(output::AbstractVector{ <: Real }, layer::PermutationLayer, input::AbstractVector{ <: Real })
# fetch variables
P = getP(layer)
# determine result
mul!(output, P, input)
end
# backward pass through the permutation layer
function _backward(layer::PermutationLayer, output::AbstractVector{ <: Real })
# fetch variables
P = getP(layer)
# determine result
result = P'*output
# return result
return result
end
backward(layer::PermutationLayer, output::AbstractVector{ <: Real }) = _backward(layer, output)
Broadcast.broadcasted(::typeof(backward), layer::PermutationLayer, output::AbstractVector{ <: AbstractVector{ <: Real } }) = broadcast(_backward, Ref(layer), output)
# inplace backward pass through the additive coupling layer
function backward!(input::AbstractVector{ <: Real }, layer::PermutationLayer, output::AbstractVector{ <: Real })
# fetch variables
P = getP(layer)
# determine result
mul!(input, P', output)
end
# jacobian of the additive coupling layer
function _jacobian(layer::PermutationLayer, input::AbstractVector{ <: Real })
# return result
return getP(layer)
end
jacobian(layer::PermutationLayer, input::AbstractVector{ <: Real }) = _jacobian(layer, input)
Broadcast.broadcasted(::typeof(jacobian), layer::PermutationLayer, input::AbstractVector{ <: AbstractVector{ <: Real } }) = broadcast(_jacobian, Ref(layer), input)
# inverse jacobian of the additive coupling layer
function _inv_jacobian(layer::PermutationLayer, output::AbstractVector{ <: Real })
# return result
return getP(layer)'
end
inv_jacobian(layer::PermutationLayer, output::AbstractVector{ <: Real }) = _inv_jacobian(layer, output)
Broadcast.broadcasted(::typeof(inv_jacobian), layer::PermutationLayer, output::AbstractVector{ <: AbstractVector{ <: Real } }) = broadcast(_inv_jacobian, Ref(layer), output)
# extra utility functions
det_jacobian(layer::PermutationLayer, input::AbstractVector{ <: Real }) = det(getP(layer))
det_jacobian(layer::PermutationLayer) = det(getP(layer))
absdet_jacobian(layer::PermutationLayer, input::AbstractVector{ <: Real }) = 1.0
absdet_jacobian(layer::PermutationLayer) = 1.0
logdet_jacobian(layer::PermutationLayer, input::AbstractVector{ <: Real }) = 0.0
logdet_jacobian(layer::PermutationLayer) = 0.0
logabsdet_jacobian(layer::PermutationLayer, input::AbstractVector{ <: Real }) = 0.0
logabsdet_jacobian(layer::PermutationLayer) = 0.0
detinv_jacobian(layer::PermutationLayer, output::AbstractVector{ <: Real }) = det(getP(layer)')
detinv_jacobian(layer::PermutationLayer) = det(getP(layer)')
absdetinv_jacobian(layer::PermutationLayer, output::AbstractVector{ <: Real }) = 1.0
absdetinv_jacobian(layer::PermutationLayer) = 1.0
logdetinv_jacobian(layer::PermutationLayer, output::AbstractVector{ <: Real }) = 0.0
logdetinv_jacobian(layer::PermutationLayer) = 0.0
logabsdetinv_jacobian(layer::PermutationLayer, output::AbstractVector{ <: Real }) = 0.0
logabsdetinv_jacobian(layer::PermutationLayer) = 0.0
|
{"hexsha": "90c79acafbec2feea5f8dd231278bb41dc945b37", "size": 6094, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/nodes/flow/layers/permutation_layer.jl", "max_stars_repo_name": "HoangMHNguyen/ReactiveMP.jl", "max_stars_repo_head_hexsha": "f3e848ab171e0786e3d8eb6a0843dbf6dacc7415", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2021-03-28T13:18:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:52:52.000Z", "max_issues_repo_path": "src/nodes/flow/layers/permutation_layer.jl", "max_issues_repo_name": "HoangMHNguyen/ReactiveMP.jl", "max_issues_repo_head_hexsha": "f3e848ab171e0786e3d8eb6a0843dbf6dacc7415", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2021-03-17T16:07:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T08:50:44.000Z", "max_forks_repo_path": "src/nodes/flow/layers/permutation_layer.jl", "max_forks_repo_name": "HoangMHNguyen/ReactiveMP.jl", "max_forks_repo_head_hexsha": "f3e848ab171e0786e3d8eb6a0843dbf6dacc7415", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-07-12T18:48:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T17:19:20.000Z", "avg_line_length": 40.357615894, "max_line_length": 185, "alphanum_fraction": 0.6924844109, "num_tokens": 1427}
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Train a residual policy on top of a learned agent.
Usage:
Use case --> flags to set
1) Use base agent
a) Use feats from base agent --> network && bc_ckpt_to_load
b) Learn new feats --> network && bc_ckpt_to_load && rl_observation_network
c) Init feats from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network
&& init_feats_from_bc && predict_residual
2) Use RL only
a) Learn new feats --> rl_observation_network (if input type is visual)
b) Init feats & policy from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network && init_from_bc
c) Init feats from base agent but finetune
--> network && bc_ckpt_to_load && rl_observation_network
&& init_feats_from_bc
3) Use base controller + rl observation net from scratch
--> base_controller && rl_observation_network
"""
import os
from absl import app
from absl import flags
from acme import specs
import numpy as np
import tensorflow as tf
from rrlfd.residual import agents
from rrlfd.residual import eval_utils
from rrlfd.residual import setup
from tensorflow.io import gfile
flags.DEFINE_string('domain', None, 'Domain from which to load task.')
flags.DEFINE_string('task', None, 'Task to solve.')
flags.DEFINE_enum('input_type', 'depth', ['depth', 'rgb', 'rgbd', 'full_state'],
'Input modality.')
flags.DEFINE_integer('num_episodes', 10000, 'Number of episodes to run for.')
flags.DEFINE_integer('seed', 2, 'Experiment seed.')
flags.DEFINE_integer('eval_seed', 1, 'Environtment seed for evaluation.')
flags.DEFINE_boolean('increment_eval_seed', False,
'If True, increment eval seed after each eval episode.')
flags.DEFINE_integer('num_eval_episodes', 100,
'Number of episodes to evaluate.')
flags.DEFINE_boolean('collapse_in_eval', True,
'If True, collapse RL policy to its mean in evaluation.')
flags.DEFINE_boolean('stop_if_stuck', False,
'If True, end episode if observations and actions are '
'stuck.')
flags.DEFINE_boolean('end_on_success', False,
'If True, end episode early if success criteria is met.')
flags.DEFINE_integer('eval_freq', 100_000,
'Frequency (in environment training steps) with which to '
'evaluate policy.')
flags.DEFINE_boolean('eval_only', False,
'If True, evaluate policy ckpts of trained policy.')
# Flags for BC agent.
flags.DEFINE_boolean('binary_grip_action', True,
'If True, use open/close action space for gripper. Else '
'use gripper velocity.')
flags.DEFINE_enum('action_norm', 'unit', ['unit', 'zeromean_unitvar'],
'Which normalization to apply to actions.')
flags.DEFINE_enum('residual_action_norm', 'unit',
['none', 'unit', 'zeromean_unitvar', 'centered'],
'Which normalization to apply to residual actions.')
flags.DEFINE_float('residual_action_norm_scale', 1.0,
'Factor by which to scale residual actions. Applied to raw '
'predictions in none, unit and centered normalisation, and '
'to standard deviation in the case of zeromean_unitvar.')
flags.DEFINE_enum('signals_norm', 'none', ['none', 'unit', 'zeromean_unitvar'],
'Which normalization to apply to scalar observations.')
flags.DEFINE_string('original_demos_file', None,
'Dataset used to compute stats for action normalization.')
flags.DEFINE_integer('max_demos_to_load', None,
'Maximum number of demos from demos_file (in order) to '
'use to compute action stats.')
flags.DEFINE_integer('max_demo_length', None,
'If set, trim demonstrations to this length.')
flags.DEFINE_float('val_size', 0.05,
'Amount of data to exlude from action normalisation stats. '
'If < 1, the fraction of total loaded data points. Else the '
'number of data points.')
flags.DEFINE_boolean('val_full_episodes', True,
'If True, split data into train and validation on an '
'episode basis. Else split by individual time steps.')
flags.DEFINE_string('last_activation', None,
'Activation function to apply to network output, if any.')
flags.DEFINE_list('fc_layer_sizes', [],
'Sizes of fully connected layers to add on top of bottleneck '
'layer, if any.')
flags.DEFINE_integer('num_input_frames', 3,
'Number of frames to condition base policy on.')
flags.DEFINE_integer('image_size', None, 'Size of rendered images.')
flags.DEFINE_integer('crop_margin_size', 16,
'If crop_frames is True, the number of pixels to crop '
'from each dimension.')
flags.DEFINE_boolean('crop_frames', True,
'If True, crop input frames by 16 pixels in H and W.')
flags.DEFINE_list('target_offsets', [0, 10, 20, 30],
'Offsets in time for actions to predict in behavioral '
'cloning.')
flags.DEFINE_enum('network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Policy network of base policy.')
flags.DEFINE_boolean('bn_before_concat', False,
'If True, add a batch norm layer before concatenating '
'scalar featuses to visual features.')
flags.DEFINE_float('weight_decay', 5e-4, 'Weight decay for training.')
flags.DEFINE_boolean('predict_residual', True,
'If True, train a residual agent. Else train RL from '
'scratch without base agent.')
flags.DEFINE_enum('rl_observation_network', None,
['resnet18', 'resnet18_narrow32', 'resnet50', 'simple_cnn',
'hand_vil'],
'Observation network of residual policy. If None, '
'observation network of base agent is reused.')
flags.DEFINE_boolean('late_fusion', False,
'If True, fuse stacked frames after convolutional layers. '
'If False, fuse at network input.')
flags.DEFINE_string('policy_init_path', None,
'If set, initialize network weights from a pickle file at '
'this path.')
flags.DEFINE_string('rl_observation_network_ckpt', None,
'If set, checkpoint from which to load observation network '
'weights.')
flags.DEFINE_string('base_controller', None,
'If set, a black-box controller to use for base actions.')
flags.DEFINE_string('bc_ckpt_to_load', None,
'If set, checkpoint from which to load base policy.')
flags.DEFINE_string('rl_ckpt_to_load', None,
'If set, checkpoint from which to load residual policy.')
flags.DEFINE_string('original_demos_path', None,
'If set, path to the original demonstration dataset (to '
'restore normalization statistics). If not set, inferred '
'from BC checkpoint path.')
flags.DEFINE_boolean('init_from_bc', False,
'If True, use BC agent loaded from bc_ckpt_to_load as '
'initialization for RL observation and policy nets.')
flags.DEFINE_boolean('init_feats_from_bc', False,
'If True, initialize RL observation network with BC.')
flags.DEFINE_string('logdir', None, 'Location to log results to.')
flags.DEFINE_boolean('load_saved', False,
'If True, load saved model from checkpoint. Else train '
'from scratch.')
flags.DEFINE_string('base_visible_state', 'robot',
'State features on which to condition the base policy.')
flags.DEFINE_string('residual_visible_state', 'robot',
'State features on which to condition the residual policy. '
'If using full state, the BC net features are replaced '
'with these true state features in input to RL policy.')
flags.DEFINE_float('bernoulli_rate', 0.,
'Fraction of time to use bernoulli exploration for gripper '
'action.')
flags.DEFINE_float('sticky_rate', 0.,
'Stickiness rate of bernoulli exploration for gripper '
'action.')
flags.DEFINE_string('job_id', None,
'Subdirectory to add to logdir to identify run. Set '
'automatically to XM id or datetime if None.')
flags.DEFINE_integer('base_policy_success', None,
'No-op flag used to identify base policy.')
flags.DEFINE_boolean('freeze_rl_observation_network', False,
'If True, do not update acme observation network weights. '
'Else train critic and observation net jointly.')
FLAGS = flags.FLAGS
def train_residual(
env_loop, num_episodes, logdir, eval_freq, num_eval_episodes,
collapse_in_eval, eval_seed, increment_eval_seed, stop_if_stuck):
"""Train residual for num_episodes episodes."""
# TODO(minttu): Should bernoulli rate and sticky rate be defined here instead?
total_steps = env_loop.run(
num_episodes=num_episodes,
out_dir=logdir,
ckpt_freq=min(50_000, eval_freq),
eval_freq=eval_freq,
num_eval_episodes=num_eval_episodes,
collapse_in_eval=collapse_in_eval,
eval_seed=eval_seed,
increment_eval_seed=increment_eval_seed,
stop_if_stuck=stop_if_stuck)
if logdir is not None:
setup.save_acme_agent(env_loop.actor, logdir)
return total_steps
def main(_):
np.random.seed(FLAGS.seed)
tf.random.set_seed(FLAGS.seed)
counter = setup.setup_counting()
logdir, env_logger, agent_logger, summary_writer, _ = setup.setup_logging(
FLAGS.logdir)
base_state = setup.set_visible_features(
FLAGS.domain, FLAGS.task, FLAGS.base_visible_state)
residual_state = setup.set_visible_features(
FLAGS.domain, FLAGS.task, FLAGS.residual_visible_state)
print('Base policy state features', base_state)
print('Residual policy state features', residual_state)
image_size = FLAGS.image_size
if image_size is None:
# Default sizes.
image_size = {
'adroit': 128,
'mime': 240,
}[FLAGS.domain]
# Whether BCAgent's network is used for visual features (expects frames in a
# certain shape).
use_base_agent_image_shape = (
FLAGS.predict_residual or FLAGS.freeze_rl_observation_network)
visible_state = (
list(set(base_state + residual_state)) if FLAGS.predict_residual
else residual_state)
env_loop = setup.make_environment_loop(
domain=FLAGS.domain,
task=FLAGS.task,
seed=FLAGS.seed,
input_type=FLAGS.input_type,
num_input_frames=FLAGS.num_input_frames,
visible_state=visible_state,
image_size=image_size,
use_base_agent_image_shape=use_base_agent_image_shape,
late_fusion=FLAGS.late_fusion,
max_train_episode_steps=FLAGS.max_episode_steps,
agent=None,
counter=counter,
env_logger=env_logger,
summary_writer=summary_writer)
env = env_loop._environment # pylint: disable=protected-access
environment_spec = specs.make_environment_spec(env)
print('Environment spec', environment_spec)
base_agent = None
# Create BC agent. In residual RL, it is used as the base agent, and in
# standalone RL it may be used for action and observation space normalization.
if FLAGS.bc_ckpt_to_load or FLAGS.original_demos_file:
base_agent = setup.load_saved_bc_agent(
ckpt_to_load=FLAGS.bc_ckpt_to_load,
network_type=FLAGS.network,
late_fusion=FLAGS.late_fusion,
input_type=FLAGS.input_type,
domain=FLAGS.domain,
binary_grip_action=FLAGS.binary_grip_action,
num_input_frames=FLAGS.num_input_frames,
crop_frames=FLAGS.crop_frames,
full_image_size=image_size,
crop_margin_size=FLAGS.crop_margin_size,
target_offsets=[int(t) for t in FLAGS.target_offsets],
visible_state_features=base_state,
action_norm=FLAGS.action_norm,
signals_norm=FLAGS.signals_norm,
last_activation=FLAGS.last_activation,
fc_layer_sizes=[int(i) for i in FLAGS.fc_layer_sizes],
weight_decay=FLAGS.weight_decay,
max_demos_to_load=FLAGS.max_demos_to_load,
max_demo_length=FLAGS.max_demo_length,
val_size=FLAGS.val_size,
val_full_episodes=FLAGS.val_full_episodes,
split_seed=FLAGS.split_seed,
env=env,
task=FLAGS.task)
print('action normalization mean\n', base_agent.action_space.mean)
print('action normalization std\n', base_agent.action_space.std)
obs_network_type = None
include_base_feats = True
if ((FLAGS.bc_ckpt_to_load is None and FLAGS.policy_init_path is None)
or (FLAGS.init_from_bc and not FLAGS.freeze_rl_observation_network)
or FLAGS.init_feats_from_bc):
obs_network_type = FLAGS.rl_observation_network
include_base_feats = False
if FLAGS.residual_visible_state == 'full':
include_base_feats = False
include_base_action = FLAGS.predict_residual
residual_spec = setup.define_residual_spec(
residual_state, env, base_agent,
action_norm=FLAGS.residual_action_norm,
action_norm_scale=FLAGS.residual_action_norm_scale,
include_base_action=include_base_action,
include_base_feats=include_base_feats,
base_network=FLAGS.network)
binary_grip_action = FLAGS.init_from_bc and FLAGS.binary_grip_action
residual_agent, eval_policy = setup.make_acme_agent(
environment_spec=environment_spec,
residual_spec=residual_spec,
obs_network_type=obs_network_type,
crop_frames=FLAGS.crop_frames,
full_image_size=image_size,
crop_margin_size=FLAGS.crop_margin_size,
late_fusion=FLAGS.late_fusion,
binary_grip_action=binary_grip_action,
input_type=FLAGS.input_type,
counter=counter,
logdir=logdir,
agent_logger=agent_logger)
if FLAGS.init_from_bc:
setup.init_policy_networks(base_agent.network, residual_agent)
if not FLAGS.freeze_rl_observation_network:
setup.init_observation_networks(base_agent.network, residual_agent)
if FLAGS.init_feats_from_bc:
setup.init_observation_networks(base_agent.network, residual_agent)
# agent_class = (
# agents.ResidualAgent if FLAGS.predict_residual else agents.RLAgent)
if FLAGS.predict_residual:
agent_class = agents.ResidualAgent
else:
if FLAGS.freeze_rl_observation_network:
agent_class = agents.FixedObservationAgent
else:
agent_class = agents.RLAgent
agent = agent_class(
base_agent=base_agent,
rl_agent=residual_agent,
action_space='tool_lin' if FLAGS.domain == 'mime' else FLAGS.task,
action_norm=FLAGS.residual_action_norm,
action_norm_scale=FLAGS.residual_action_norm_scale,
signals_norm=FLAGS.signals_norm,
rl_eval_policy=eval_policy,
feats_spec=residual_spec.observations,
state_keys=residual_state,
bernoulli_rate=FLAGS.bernoulli_rate,
sticky_rate=FLAGS.sticky_rate,
rl_observation_network_type=FLAGS.rl_observation_network,
rl_input_type=FLAGS.input_type,
rl_num_input_frames=FLAGS.num_input_frames,
base_controller=FLAGS.base_controller,
env=env)
env_loop.actor = agent
if FLAGS.eval_only:
ckpts = gfile.Glob(os.path.join(logdir, 'policy_*.index'))
print(os.path.join(logdir, 'policy_*.index'))
print(ckpts)
for ckpt in ckpts:
ckpt = ckpt.replace('.index', '')
loaded_steps = setup.load_agent(agent, ckpt)
total_steps = loaded_steps
eval_utils.eval_agent(
env_loop, FLAGS.task, FLAGS.eval_seed, FLAGS.increment_eval_seed,
FLAGS.num_eval_episodes, loaded_steps, FLAGS.collapse_in_eval,
FLAGS.stop_if_stuck, FLAGS.num_episodes, total_steps, logdir,
summary_writer=None, eval_id='late')
else:
if FLAGS.rl_ckpt_to_load is None:
total_steps = train_residual(
env_loop, FLAGS.num_episodes, logdir, FLAGS.eval_freq,
FLAGS.num_eval_episodes, FLAGS.collapse_in_eval, FLAGS.eval_seed,
FLAGS.increment_eval_seed, FLAGS.stop_if_stuck)
loaded_steps = 'final'
else:
loaded_steps = setup.load_agent(agent, FLAGS.rl_ckpt_to_load)
total_steps = loaded_steps
logdir = os.path.dirname(FLAGS.rl_ckpt_to_load)
eval_utils.eval_agent(
env_loop, FLAGS.task, FLAGS.eval_seed, FLAGS.increment_eval_seed,
FLAGS.num_eval_episodes, loaded_steps, FLAGS.collapse_in_eval,
FLAGS.stop_if_stuck, FLAGS.num_episodes, total_steps, logdir,
summary_writer)
if __name__ == '__main__':
app.run(main)
|
{"hexsha": "634b0fccab27c3552b1794373b64b0eabd3f0b1b", "size": 17621, "ext": "py", "lang": "Python", "max_stars_repo_path": "rrlfd/residual/train.py", "max_stars_repo_name": "shaun95/google-research", "max_stars_repo_head_hexsha": "d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-13T21:48:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T21:48:52.000Z", "max_issues_repo_path": "rrlfd/residual/train.py", "max_issues_repo_name": "shaun95/google-research", "max_issues_repo_head_hexsha": "d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rrlfd/residual/train.py", "max_forks_repo_name": "shaun95/google-research", "max_forks_repo_head_hexsha": "d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-30T07:20:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:20:29.000Z", "avg_line_length": 43.6163366337, "max_line_length": 80, "alphanum_fraction": 0.6845241473, "include": true, "reason": "import numpy", "num_tokens": 3810}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 2018
@author: Fei Yan
"""
import numpy as np
from scipy.linalg import eig
from qutip import *
import logging
log = logging.getLogger('LabberDriver')
# import scipy.constants as const
# #Constants.
# h = const.h #planck constant
# h_bar = const.hbar #h_bar
# e = const.e #electron charge
# phi0= h/(2*e) #flux quantum
# RQ = h/(2*e)**2 #quantum resistance
def Ej_SQUID(flux,Ej_sum,d):
# effective Ej of a SQUID
return Ej_sum * np.abs(np.cos(np.pi*flux)) * np.sqrt(1+d**2*np.tan(np.pi*flux)**2) #[GHz]
def freq_SQUID(Ej, Ec):
return np.sqrt(8 * Ej *Ec) - Ec
def freq_LC(L,C):
# frequency of LC oscillator
# L [nH]
# C [fH]
return 1/(2*np.pi)/np.sqrt(L*C)/1e-3 #[GHz]
def Z_LC(L,C):
# impedence of LC oscillator
return np.sqrt(L/C)*1e3 #[Ohm]
def U(H,t):
# unitary propagator generated by H over time t
H = Qobj(H)
return Qobj(-1j * H * t).expm()
def T(A, U, *args, **kwargs):
A = Qobj(A)
U = Qobj(U)
return U * A * U.dag()
def Qflatten(Q):
return Qobj(Q.full())
def eigensolve(H):
# find eigensolution of H
H = H.full()
vals, vecs = eig(H)
#idx = vals.argsort()[::-1] #Descending Order
idx = vals.argsort() #Ascending Order
vals = vals[idx]
vecs = vecs[:,idx]
return np.real(vals), vecs
def level_identify(vals, vecs, list_table, list_select):
# identify and sort eigen solutions according to "list_select"
v_idx = []
for k, str_level in enumerate(list_select):
idx_sort = np.argsort(np.abs(vecs[list_table.index(str_level),:]))
count = 1
while True:
if idx_sort[-count] in v_idx:
count += 1
else:
v_idx.append(idx_sort[-count])
break
return vals[v_idx], vecs[:,v_idx]
class MultiQubitHamiltonian():
def __init__(self):
# init with some default settings
self.nQubit = 3
self.nTrunc = 4
# self.nShow = 4
self.bDesignParam_Q1 = False
self.bDesignParam_Q2 = False
self.bDesignParam_Q3 = False
self.sQubitType_Q1 = '2-JJ'
self.sQubitType_Q2 = '2-JJ'
self.sQubitType_Q3 = '2-JJ'
# frequencies [GHz]
self.dFreq_Q1 = 4.0
self.dFreq_Q2 = 4.0
self.dFreq_Q3 = 4.0
self.dAnh_Q1 = -0.3
self.dAnh_Q2 = -0.3
self.dAnh_Q3 = -0.3
# capacitances [fF]
self.dC1 = 80.0
self.dC2 = 80.0
self.dC3 = 80.0
self.dC12 = 1.0
self.dC23 = 1.0
self.dC13 = 0.02
# inductances [nH]
#
# designer parameter set
# josephson energy [GHz]
self.dEj_Q1 = 10.0
self.dEj_Q2 = 10.0
self.dEj_Q3 = 10.0
# charging energy [GHz]
self.dEc_Q1 = 0.2
self.dEc_Q2 = 0.2
self.dEc_Q3 = 0.2
# SQUID asymmetry |A1-A2|/(A1+A2)
self.dAsym_Q1 = 0.0
self.dAsym_Q2 = 0.0
self.dAsym_Q3 = 0.0
# flux bias [Phi0]
self.dFlux_Q1 = 0.0
self.dFlux_Q2 = 0.0
self.dFlux_Q3 = 0.0
# #
# # calculate partial coupling coefficients (approximate)
# self.c12 = self.dC12 / np.sqrt(self.dC1 * self.dC2)
# self.c23 = self.dC23 / np.sqrt(self.dC2 * self.dC3)
# self.c13 = self.dC13 / np.sqrt(self.dC1 * self.dC3)
# #
# if simCfg is not None:
# # update simulation options
# self.updateSimCfg(simCfg)
def updateSimCfg(self, simCfg):
# update simulation options
for key, value in simCfg.items():
if hasattr(self, key):
setattr(self, key, value)
# update capacitance coupling coefficient
self.c12 = self.dC12 / np.sqrt(self.dC1 * self.dC2)
self.c23 = self.dC23 / np.sqrt(self.dC2 * self.dC3)
self.c13 = self.dC13 / np.sqrt(self.dC1 * self.dC3)
# update frequencies if using designer parameter set
if self.bDesignParam_Q1:
if self.sQubitType_Q1 == '2-JJ':
setattr(self, 'dFreq_Q1', freq_SQUID(Ej_SQUID(self.dFlux_Q1,self.dEj_Q1,self.dAsym_Q1), self.dEc_Q1))
setattr(self, 'dAnh_Q1', -self.dEc_Q1)
if self.bDesignParam_Q2:
if self.sQubitType_Q2 == '2-JJ':
setattr(self, 'dFreq_Q2', freq_SQUID(Ej_SQUID(self.dFlux_Q2,self.dEj_Q2,self.dAsym_Q2), self.dEc_Q2))
setattr(self, 'dAnh_Q2', -self.dEc_Q2)
if self.bDesignParam_Q3:
if self.sQubitType_Q3 == '2-JJ':
setattr(self, 'dFreq_Q3', freq_SQUID(Ej_SQUID(self.dFlux_Q3,self.dEj_Q3,self.dAsym_Q3), self.dEc_Q3))
setattr(self, 'dAnh_Q3', -self.dEc_Q3)
def generateOperators(self):
# generate basic operators. matrix truncated at nTrunc
I = qeye(self.nTrunc)
a = destroy(self.nTrunc)
x = a + a.dag()
p = -1j*(a - a.dag())
aa = a.dag() * a
aaaa = a.dag() * a.dag() * a * a
return {'I':I, 'a':a, 'x':x, 'p':p, 'aa':aa, 'aaaa':aaaa}
def generateSubHamiltonian_1Q(self):
# generate partial Hamiltonian in 3-qubit system
OP = self.generateOperators()
# self Hamiltonian operators
self.H_Q1_aa = Qflatten(tensor(OP['aa']))
self.H_Q1_aaaa = Qflatten(tensor(OP['aaaa']))
# drive Hamiltonian operators
self.H_dr_Q1_x = Qflatten(tensor(OP['x']))
self.H_dr_Q1_p = Qflatten(tensor(OP['p']))
def generateHamiltonian_1Q_cap(self):
# construct 3-qubit Hamiltonian
self.generateSubHamiltonian_1Q()
# self Hamiltonian
self.H_Q1 = self.dFreq_Q1 * self.H_Q1_aa + self.dAnh_Q1/2 * self.H_Q1_aaaa
# system Hamiltonian
self.H_sys = self.H_Q1 + self.H_Q2 + self.H_12
def generateLabel_1Q(self):
# generate 3-qubit number state label list
list_label_gen = ["0","1","2","3","4","5","6","7"]
self.list_label_table = []
for k1 in np.arange(self.nTrunc):
self.list_label_table.append(list_label_gen[k1])
def generateSubHamiltonian_2Q(self):
# generate partial Hamiltonian in 3-qubit system
OP = self.generateOperators()
# self Hamiltonian operators
self.H_Q1_aa = Qflatten(tensor(OP['aa'], OP['I']))
self.H_Q1_aaaa = Qflatten(tensor(OP['aaaa'], OP['I']))
self.H_Q2_aa = Qflatten(tensor(OP['I'], OP['aa']))
self.H_Q2_aaaa = Qflatten(tensor(OP['I'], OP['aaaa']))
# coupling Hamiltonian operators
self.H_12_xx = Qflatten(tensor(OP['x'], OP['x']))#
self.H_12_pp = Qflatten(tensor(OP['p'], OP['p']))
# drive Hamiltonian operators
self.H_dr_Q1_x = Qflatten(tensor(OP['x'], OP['I']))
self.H_dr_Q2_x = Qflatten(tensor(OP['I'], OP['x']))
self.H_dr_Q1_p = Qflatten(tensor(OP['p'], OP['I']))
self.H_dr_Q2_p = Qflatten(tensor(OP['I'], OP['p']))
def generateHamiltonian_2Q_cap(self):
# construct 3-qubit Hamiltonian
self.generateSubHamiltonian_2Q()
# self Hamiltonian
self.H_Q1 = self.dFreq_Q1 * self.H_Q1_aa + self.dAnh_Q1/2 * self.H_Q1_aaaa
self.H_Q2 = self.dFreq_Q2 * self.H_Q2_aa + self.dAnh_Q2/2 * self.H_Q2_aaaa
# coupling Hamiltonian
self.g_12 = 0.5 * self.c12 * np.sqrt(self.dFreq_Q1 * self.dFreq_Q2)
self.H_12 = self.g_12 * self.H_12_pp
# system Hamiltonian
self.H_sys = self.H_Q1 + self.H_Q2 + self.H_12
def generateLabel_2Q(self):
# generate 3-qubit number state label list
list_label_gen = ["0","1","2","3","4","5","6","7"]
self.list_label_table = []
for k1 in np.arange(self.nTrunc):
for k2 in np.arange(self.nTrunc):
self.list_label_table.append(list_label_gen[k1] + list_label_gen[k2])
def generateSubHamiltonian_3Q(self):
# generate partial Hamiltonian in 3-qubit system
OP = self.generateOperators()
# self Hamiltonian operators
self.H_Q1_aa = Qflatten(tensor(OP['aa'], OP['I'], OP['I']))
self.H_Q1_aaaa = Qflatten(tensor(OP['aaaa'], OP['I'], OP['I']))
self.H_Q2_aa = Qflatten(tensor(OP['I'], OP['aa'], OP['I']))
self.H_Q2_aaaa = Qflatten(tensor(OP['I'], OP['aaaa'], OP['I']))
self.H_Q3_aa = Qflatten(tensor(OP['I'], OP['I'], OP['aa']))
self.H_Q3_aaaa = Qflatten(tensor(OP['I'], OP['I'], OP['aaaa']))
# coupling Hamiltonian operators
self.H_12_xx = Qflatten(tensor(OP['x'], OP['x'], OP['I']))
self.H_23_xx = Qflatten(tensor(OP['I'], OP['x'], OP['x']))
self.H_13_xx = Qflatten(tensor(OP['x'], OP['I'], OP['x'])) #
self.H_12_pp = Qflatten(tensor(OP['p'], OP['p'], OP['I']))
self.H_23_pp = Qflatten(tensor(OP['I'], OP['p'], OP['p']))
self.H_13_pp = Qflatten(tensor(OP['p'], OP['I'], OP['p']))
# drive Hamiltonian operators
self.H_dr_Q1_x = Qflatten(tensor(OP['x'], OP['I'], OP['I']))
self.H_dr_Q2_x = Qflatten(tensor(OP['I'], OP['x'], OP['I']))
self.H_dr_Q3_x = Qflatten(tensor(OP['I'], OP['I'], OP['x']))
self.H_dr_Q1_p = Qflatten(tensor(OP['p'], OP['I'], OP['I']))
self.H_dr_Q2_p = Qflatten(tensor(OP['I'], OP['p'], OP['I']))
self.H_dr_Q3_p = Qflatten(tensor(OP['I'], OP['I'], OP['p']))
def generateHamiltonian_3Q_cap(self):
# construct 3-qubit Hamiltonian
self.generateSubHamiltonian_3Q()
# self Hamiltonian
self.H_Q1 = self.dFreq_Q1 * self.H_Q1_aa + self.dAnh_Q1/2 * self.H_Q1_aaaa
self.H_Q2 = self.dFreq_Q2 * self.H_Q2_aa + self.dAnh_Q2/2 * self.H_Q2_aaaa
self.H_Q3 = self.dFreq_Q3 * self.H_Q3_aa + self.dAnh_Q3/2 * self.H_Q3_aaaa
# coupling Hamiltonian
self.g_12 = 0.5 * self.c12 * np.sqrt(self.dFreq_Q1 * self.dFreq_Q2)
self.H_12 = self.g_12 * self.H_12_pp
self.g_23 = 0.5 * self.c23 * np.sqrt(self.dFreq_Q2 * self.dFreq_Q3)
self.H_23 = self.g_23 * self.H_23_pp
self.g_13 = 0.5 * (self.c12 * self.c23 + self.c13) * np.sqrt(self.dFreq_Q1 * self.dFreq_Q3)
self.H_13 = self.g_13 * self.H_13_pp
# system Hamiltonian
self.H_sys = self.H_Q1 + self.H_Q2 + self.H_Q3 + self.H_12 + self.H_23 + self.H_13
def generateLabel_3Q(self):
# generate 3-qubit number state label list
list_label_gen = ["0","1","2","3","4","5","6","7"]
self.list_label_table = []
for k1 in np.arange(self.nTrunc):
for k2 in np.arange(self.nTrunc):
for k3 in np.arange(self.nTrunc):
self.list_label_table.append(list_label_gen[k1] + list_label_gen[k2] + list_label_gen[k3])
|
{"hexsha": "0f1e97534f28b86287ed219219c037154981ae7f", "size": 9426, "ext": "py", "lang": "Python", "max_stars_repo_path": "QSolver/QSolver_ForDriver.py", "max_stars_repo_name": "roniwinik/Drivers", "max_stars_repo_head_hexsha": "ba473bc21d1b5321da1e6caadec5b4d624282edc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2015-11-16T13:35:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T11:02:14.000Z", "max_issues_repo_path": "QSolver/QSolver_ForDriver.py", "max_issues_repo_name": "roniwinik/Drivers", "max_issues_repo_head_hexsha": "ba473bc21d1b5321da1e6caadec5b4d624282edc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 30, "max_issues_repo_issues_event_min_datetime": "2015-11-16T14:37:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-22T19:39:34.000Z", "max_forks_repo_path": "QSolver/QSolver_ForDriver.py", "max_forks_repo_name": "roniwinik/Drivers", "max_forks_repo_head_hexsha": "ba473bc21d1b5321da1e6caadec5b4d624282edc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2015-11-12T18:31:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T12:59:35.000Z", "avg_line_length": 32.0612244898, "max_line_length": 105, "alphanum_fraction": 0.6644387863, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 3404}
|
\clearpage
\subsection{C Parameter Declaration (with Arrays)} % (fold)
\label{sub:c_parameter_declaration_with_arrays_}
\csyntax{csynt:type-decl-parameter-decl}{Parameter Declarations (with Arrays)}{type-decl/parameter-decl-with-types}
% subsection c_parameter_declaration_with_arrays_ (end)
|
{"hexsha": "6e70c1c82ede0c8756a786106c3f19b35a4d29ab", "size": 293, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "topics/type-decl/pascal/pas-parameter-decl-with-types.tex", "max_stars_repo_name": "thoth-tech/programming-arcana", "max_stars_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-10T04:50:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-10T04:50:54.000Z", "max_issues_repo_path": "topics/type-decl/pascal/pas-parameter-decl-with-types.tex", "max_issues_repo_name": "thoth-tech/programming-arcana", "max_issues_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-29T19:45:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-29T19:45:10.000Z", "max_forks_repo_path": "topics/type-decl/pascal/pas-parameter-decl-with-types.tex", "max_forks_repo_name": "macite/programming-arcana", "max_forks_repo_head_hexsha": "8f3040983d420129f90bcc4bd69a96d8743c412c", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-06-02T03:18:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T07:42:53.000Z", "avg_line_length": 41.8571428571, "max_line_length": 115, "alphanum_fraction": 0.8225255973, "num_tokens": 71}
|
%%Ex. 8 Extracting an individual element of an array
a = [3 6 7];
b = [1 9 4 5];
c = a(2) + b(4)
%Output: c = 11
|
{"author": "TheAlgorithms", "repo": "MATLAB-Octave", "sha": "e150b77ad256de46c1ce3815c3d7945ac4fc28dc", "save_path": "github-repos/MATLAB/TheAlgorithms-MATLAB-Octave", "path": "github-repos/MATLAB/TheAlgorithms-MATLAB-Octave/MATLAB-Octave-e150b77ad256de46c1ce3815c3d7945ac4fc28dc/matlab_for_beginners/part_1(learn_basic_programing)/individual_eL_add.m"}
|
# Using Android IP Webcam video .jpg stream (tested) in Python2 OpenCV3
import urllib
import cv2
import numpy as np
import time
import subprocess
import urllib
import cam_find
import socket
import bluetooth
# Replace the URL with your own IPwebcam shot.jpg IP:port
url='http://192.168.43.1:8080/shot.jpg'
def searchitem():
imgResp = urllib.urlopen(url)
# Numpy to convert into a array
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
# Finally decode the array to OpenCV usable format ;)
img = cv2.imdecode(imgNp,-1)
# put the image on screen
cv2.imshow('IPWebcam',img)
cv2.imwrite('input.jpg',img)
#To give the processor some less stress
#time.sleep(0.1)
# Quit if q is pressed
obj = cam_find.find_object()
subprocess.call('echo '+obj+'|festival --tts', shell=True)
time.sleep(2)
return obj
while True:
# Use urllib to get the image from the IP camera
print("waiting for voice command")
server_socket=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
port=1
server_socket.bind(("",port))
server_socket.listen(1)
client_socket,address=server_socket.accept()
print("connection accepted",address)
data=client_socket.recv(1024)
print("received:%s"%data)
if(data=='search'):
print("item to search")
time.sleep(1)
server_socket=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
port=1
server_socket.bind(("",port))
server_socket.listen(1)
client_socket,address=server_socket.accept()
print("connection accepted",address)
data1=client_socket.recv(1024)
print("received:%s"%data1)
obj=searchitem()
print (obj)
if(obj==data1):
print("item found")
subprocess.call(["sudo","espeak","item found"])
else:
print("item not found")
subprocess.call(["sudo","espeak","item not found"])
|
{"hexsha": "5e5d9fc4f65093a894acd291aad83ef0e23730b9", "size": 2038, "ext": "py", "lang": "Python", "max_stars_repo_path": "programs-pi/rd.py", "max_stars_repo_name": "AshwinRaikar88/rhok", "max_stars_repo_head_hexsha": "5be70c78605e81e1af191a006460fa48e849c62b", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-08T14:20:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-08T14:20:47.000Z", "max_issues_repo_path": "programs-pi/rd.py", "max_issues_repo_name": "AshwinRaikar88/rhok", "max_issues_repo_head_hexsha": "5be70c78605e81e1af191a006460fa48e849c62b", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "programs-pi/rd.py", "max_forks_repo_name": "AshwinRaikar88/rhok", "max_forks_repo_head_hexsha": "5be70c78605e81e1af191a006460fa48e849c62b", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1142857143, "max_line_length": 72, "alphanum_fraction": 0.6270853778, "include": true, "reason": "import numpy", "num_tokens": 498}
|
"""Routines and classes related to RPyC package"""
from . import module as module_utils, net, py3, strpack
import numpy as np
import importlib
rpyc=importlib.import_module("rpyc") # Python 2 compatibility (importing module from a module with the same name)
import pickle
import warnings
import socket
_default_packers={"numpy":np.ndarray.tostring,"pickle":pickle.dumps}
_default_unpackers={"pickle":pickle.loads}
def _is_tunnel_service(serv):
return hasattr(serv,"tunnel_socket")
def _obtain_single(proxy, serv):
if _is_tunnel_service(serv):
loc_serv=serv.peer
async_send=rpyc.async_(serv.tunnel_send)
async_send(proxy,packer="pickle")
data=pickle.loads(loc_serv.tunnel_recv())
return data
else:
return rpyc.classic.obtain(proxy)
_numpy_block_size=int(2**20)
def obtain(proxy, serv=None):
"""
Obtain a remote netfref object by value (i.e., copy it to the local Python instance).
Wrapper around :func:`rpyc.utils.classic.obtain` with some special cases handling.
`serv` specifies the current remote service. If it is of type :class:`SocketTunnelService`, use its socket tunnel for faster transfer.
"""
if not isinstance(proxy,rpyc.BaseNetref):
return proxy
if isinstance(proxy, np.ndarray):
elsize=np.prod(proxy.shape,dtype="u8")
bytesize=proxy.dtype.itemsize*elsize
if bytesize>_numpy_block_size:
if _is_tunnel_service(serv):
loc_serv=serv.peer
async_send=rpyc.async_(serv.tunnel_send)
async_send(proxy,packer="numpy")
data=loc_serv.tunnel_recv()
return np.frombuffer(data,dtype=proxy.dtype.str).reshape(proxy.shape)
else:
fproxy=proxy.flatten()
loc=np.zeros(elsize,dtype=proxy.dtype.str)
block_size=_numpy_block_size//proxy.dtype.itemsize
for pos in range(0,elsize,block_size):
loc[pos:pos+block_size]=rpyc.classic.obtain(fproxy[pos:pos+block_size])
return loc.reshape(proxy.shape)
return rpyc.classic.obtain(proxy)
def transfer(obj, serv):
"""
Send a local object to the remote PC by value (i.e., copy it to the remote Python instance).
A 'reversed' version of :func:`obtain`.
"""
return serv.transfer(obj)
class SocketTunnelService(rpyc.SlaveService):
"""
Extension of the standard :class:`rpyc.core.service.SlaveService` with built-in network socket tunnel for faster data transfer.
In order for the tunnel to work, services on both ends need to be subclasses of :class:`SocketTunnelService`.
Because of the initial setup protocol, the two services are asymmetric: one should be 'server' (corresponding to the listening server),
and one should be 'client' (external connection). The roles are decided by the `server` constructor parameter.
"""
_tunnel_block_size=int(2**20)
def __init__(self, server=False):
rpyc.SlaveService.__init__(self)
self.server=server
_default_tunnel_timeout=10.
def _recv_socket(self, addr):
"""Set up a listener to receive a socket connection from the other service."""
def listen(s):
s.set_timeout(self._default_tunnel_timeout)
self.tunnel_socket=s
remote_call=rpyc.async_(self._conn.root._send_socket)
def port_func(port):
remote_call(addr,port)
net.listen(None,0,listen,port_func=port_func,timeout=self._default_tunnel_timeout,connections_number=1,socket_args={"nodelay":True})
def _send_socket(self, dst_addr, dst_port):
"""Set up a client socket to connect to the other service."""
self.tunnel_socket=net.ClientSocket(timeout=self._default_tunnel_timeout,nodelay=True)
self.tunnel_socket.connect(dst_addr,dst_port)
def tunnel_send(self, obj, packer=None):
"""
Send data through the socket tunnel.
If `packer` is not ``None``, it defines a function to convert `obj` to a bytes string.
"""
packer=_default_packers.get(packer,packer)
if packer:
obj=packer(obj)
nchunks=(len(obj)-1)//self._tunnel_block_size+1
self.tunnel_socket.send_fixedlen(strpack.pack_uint(nchunks,4,">"))
for pos in range(0,len(obj),self._tunnel_block_size):
self.tunnel_socket.send_decllen(obj[pos:pos+self._tunnel_block_size])
def tunnel_recv(self, unpacker=None):
"""
Receive data sent through the socket tunnel.
If `unpacker` is not ``None``, it defines a function to convert the received bytes string into an object.
"""
nchunks=strpack.unpack_uint(self.tunnel_socket.recv_fixedlen(4),">")
chunks=[]
for _ in range(nchunks):
chunks.append(self.tunnel_socket.recv_decllen())
obj=b"".join(chunks)
unpacker=_default_unpackers.get(unpacker,unpacker)
return unpacker(obj) if unpacker else obj
def obtain(self, proxy):
"""Execute :func:`obtain` on the local instance"""
return obtain(proxy,self)
def transfer(self, obj):
"""Execute :func:`transfer` on the local instance"""
return self.peer.obtain(obj)
def on_connect(self, conn):
rpyc.SlaveService.on_connect(self,conn)
self.peer=conn.root
if not self.server:
s=socket.fromfd(conn.fileno(),socket.AF_INET,socket.SOCK_STREAM)
src_addr=s.getsockname()[0]
s.close()
self._recv_socket(src_addr)
def on_disconnect(self, conn):
try:
self.tunnel_socket.close()
except AttributeError:
pass
rpyc.SlaveService.on_disconnect(self,conn)
class DeviceService(SocketTunnelService):
"""
Device RPyC service.
Expands on :class:`SocketTunnelService` by adding :meth:`get_device` method,
which opens local devices, tracks them, and closes them automatically on disconnect.
"""
def __init__(self, verbose=False):
SocketTunnelService.__init__(self,server=True)
self.verbose=verbose
def on_connect(self, conn):
SocketTunnelService.on_connect(self,conn)
self.devices=[]
if self.verbose:
print("Connected client {}".format(self._conn))
def on_disconnect(self, conn):
for dev in self.devices:
try:
dev.close()
except:
pass
self.devices=[]
if self.verbose:
print("Disconnected client {}".format(self._conn))
SocketTunnelService.on_disconnect(self,conn)
def get_device(self, module, cls, *args, **kwargs):
"""
Connect to a device.
`cls` and `module` are names of the device class and the containing module
(for module name the ``"pylablib.aux_libs.devices"`` prefix can be omitted)
"""
try:
module=importlib.import_module(module)
except ModuleNotFoundError:
module=importlib.import_module(module_utils.get_library_name()+".aux_libs.devices."+module)
module._rpyc=True
cls=module.__dict__[cls]
dev=cls(*args,**kwargs)
self.devices.append(dev)
return dev
def run_device_service(port=18812, verbose=False):
"""Start :class:`DeviceService` at the given port"""
rpyc.ThreadedServer(rpyc.utils.helpers.classpartial(DeviceService,verbose=verbose),port=port).start()
def connect_device_service(addr, port=18812, timeout=3, attempts=2):
"""
Connect to the :class:`DeviceService` running at the given address and port
`timeout` and `attempts` define respectively timeout of a single connection attempt, and the number of attempts
(RPyC default is 3 seconds timeout and 6 attempts).
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
s=rpyc.SocketStream.connect(addr,port,timeout=timeout,attempts=attempts)
return rpyc.connect_stream(s,SocketTunnelService).root
except net.socket.timeout:
return None
|
{"hexsha": "62706294c183137aa3216da43365aed110ce0467", "size": 8140, "ext": "py", "lang": "Python", "max_stars_repo_path": "pylablib/core/utils/rpyc.py", "max_stars_repo_name": "AlexShkarin/pyLabLib-v0", "max_stars_repo_head_hexsha": "1c3c59d4bcbea4a16eee916033972ee13a7d1af6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-03-06T08:31:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T15:02:06.000Z", "max_issues_repo_path": "pylablib/core/utils/rpyc.py", "max_issues_repo_name": "AlexShkarin/pyLabLib-v0", "max_issues_repo_head_hexsha": "1c3c59d4bcbea4a16eee916033972ee13a7d1af6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-09-10T17:25:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-07T20:49:22.000Z", "max_forks_repo_path": "pylablib/core/utils/rpyc.py", "max_forks_repo_name": "AlexShkarin/pyLabLib-v0", "max_forks_repo_head_hexsha": "1c3c59d4bcbea4a16eee916033972ee13a7d1af6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-08-16T09:02:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T10:58:53.000Z", "avg_line_length": 40.7, "max_line_length": 140, "alphanum_fraction": 0.6648648649, "include": true, "reason": "import numpy", "num_tokens": 1858}
|
# %%
import warnings
from functools import partial
from itertools import product
import numpy as np
import pandas as pd
from graspologic.utils import symmetrize
from hyppo.ksample import KSample
from joblib import Parallel, delayed
from scipy.stats import ks_2samp, mannwhitneyu, ttest_ind
from tqdm import tqdm
from src import compute_pr_at_k, generate_truncnorm_sbms
warnings.filterwarnings("ignore")
# %%
def compute_statistic(test, pop1, pop2):
if test.__name__ == "ttest_ind":
test_statistics, pvals = ttest_ind(pop1, pop2, axis=0)
np.nan_to_num(test_statistics, copy=False)
np.nan_to_num(pvals, copy=False)
else: # for other tests, do by edge
n = pop1.shape[-1]
test_statistics = np.zeros((n, n))
pvals = np.zeros((n, n))
for i in range(n):
for j in range(i + 1, n):
x_ij = pop1[:, i, j]
y_ij = pop2[:, i, j]
if test.__name__ == "multiscale_graphcorr":
tmp, pval, _ = test(x_ij, y_ij, is_twosamp=True, reps=1)
elif test.__name__ == "test":
tmp, pval = KSample("Dcorr").test(x_ij, y_ij)
else:
# print(test.__name__, x_ij, y_ij)
tmp, pval = test(x_ij, y_ij)
test_statistics[i, j] = tmp
pvals[i, j] = pval
test_statistics = symmetrize(test_statistics, method="triu")
pvals = symmetrize(pvals, method="triu")
return test_statistics, pvals
def run_experiment(tests, m, block_1, block_2, mean_1, mean_2, var_1, var_2, ks, reps):
precisions = []
recalls = []
for _ in range(reps):
tmp_precisions = []
tmp_recalls = []
pop1, pop2, true_labels = generate_truncnorm_sbms(
m=m,
block_1=block_1,
block_2=block_2,
mean_1=mean_1,
mean_2=mean_2,
var_1=var_1,
var_2=var_2,
)
for test in tests:
test_statistics, pvalues = compute_statistic(test, pop1, pop2)
if test.__name__ == "multiscale_graphcorr":
precision, recall = compute_pr_at_k(
k=ks, true_labels=true_labels, test_statistics=test_statistics
)
else:
precision, recall = compute_pr_at_k(
k=ks, true_labels=true_labels, pvalues=pvalues
)
tmp_precisions.append(precision)
tmp_recalls.append(recall)
precisions.append(tmp_precisions)
recalls.append(tmp_recalls)
precisions = np.array(precisions).mean(axis=0)
recalls = np.array(recalls).mean(axis=0)
to_append = [m, mean_1, mean_2, var_1, var_2, *precisions, *recalls]
return to_append
# %%
tests = [ttest_ind, mannwhitneyu, ks_2samp, KSample("Dcorr").test]
spacing = 100
block_1 = 5
block_2 = 15
mean_1 = 0
mean_2s = np.linspace(0, 1, spacing + 1)
var_1 = 1 / 2
var_2 = 1 / 2
ms = np.linspace(0, 500, spacing + 1).astype(int)[1:]
ks = [10]
reps = 100
args = [dict(m=m, mean_2=mean_2) for (m, mean_2) in product(ms, mean_2s)]
partial_func = partial(
run_experiment,
tests=tests,
block_1=block_1,
block_2=block_2,
mean_1=mean_1,
var_1=var_1,
var_2=var_2,
ks=ks,
reps=reps,
)
res = Parallel(n_jobs=-1, verbose=5)(delayed(partial_func)(**arg) for arg in tqdm(args))
# %%
new_res = []
for r in res:
constants = r[:5]
results = [b for a in r[5:] for b in a]
new_res.append(constants + results)
# %%
cols = [
"m",
"mean1",
"mean2",
"var_1",
"var_2",
*[f"{test.__name__}_precision_at_{k}" for test in tests for k in ks],
*[f"{test.__name__}_recall_at_{k}" for test in tests for k in ks],
]
res_df = pd.DataFrame(new_res, columns=cols)
res_df.to_csv("../../results/edge_simulation_changing_variance.csv", index=False)
|
{"hexsha": "022b923393737a486b3b0c5d2751f2cce924981c", "size": 3930, "ext": "py", "lang": "Python", "max_stars_repo_path": "supplement/S1-edge-simulation/edge_simulation_changing_means.py", "max_stars_repo_name": "neurodata/MCC", "max_stars_repo_head_hexsha": "42c6b5fab09edf10bce2143c2366199531b2626b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-01-15T02:11:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T04:06:58.000Z", "max_issues_repo_path": "supplement/S1-edge-simulation/edge_simulation_changing_means.py", "max_issues_repo_name": "neurodata/MCC", "max_issues_repo_head_hexsha": "42c6b5fab09edf10bce2143c2366199531b2626b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-04-05T16:09:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-29T21:55:27.000Z", "max_forks_repo_path": "supplement/S1-edge-simulation/edge_simulation_changing_means.py", "max_forks_repo_name": "neurodata/MCC", "max_forks_repo_head_hexsha": "42c6b5fab09edf10bce2143c2366199531b2626b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-28T14:41:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-28T14:41:22.000Z", "avg_line_length": 27.2916666667, "max_line_length": 88, "alphanum_fraction": 0.6, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1143}
|
using MarketData
facts("Data quality checks") do
cl_incomplete = TimeArray(cl.timestamp[1:5], [cl.values[1:4]; NaN], cl.colnames)
context("Determine uniformity of observations") do
@fact cl[1:5] --> is_equally_spaced
@fact cl[1:10] --> not(is_equally_spaced)
@fact require_equally_spaced(cl[1:5]) --> nothing
@fact_throws require_equally_spaced(cl[1:10])
end
context("Determine completeness of data") do
@fact cl[1:5] --> is_equally_spaced
@fact cl[1:10] --> not(is_equally_spaced)
@fact require_equally_spaced(cl[1:5]) --> nothing
@fact_throws require_equally_spaced(cl[1:10])
end
context("Determine cleanliness of data") do
@fact cl[1:5] --> is_clean
@fact cl[1:10] --> not(is_clean)
@fact cl_incomplete --> not(is_clean)
@fact require_clean(cl[1:5]) --> nothing
@fact_throws require_clean(cl[1:10])
@fact_throws require_clean(cl_incomplete)
end
end
|
{"hexsha": "2cc38c6f0c370901a36d28bb174fb79322d4eaad", "size": 1143, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/data_quality.jl", "max_stars_repo_name": "GordStephen/TimeSeriesTools.jl", "max_stars_repo_head_hexsha": "3b10959392a723e570b99ce4053134781a4d3556", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-11T17:26:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-11T17:26:19.000Z", "max_issues_repo_path": "test/data_quality.jl", "max_issues_repo_name": "GordStephen/TimeSeriesTools.jl", "max_issues_repo_head_hexsha": "3b10959392a723e570b99ce4053134781a4d3556", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-09-05T18:42:09.000Z", "max_issues_repo_issues_event_max_datetime": "2015-09-05T18:42:09.000Z", "max_forks_repo_path": "test/data_quality.jl", "max_forks_repo_name": "GordStephen/TimeSeriesTools.jl", "max_forks_repo_head_hexsha": "3b10959392a723e570b99ce4053134781a4d3556", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6363636364, "max_line_length": 84, "alphanum_fraction": 0.5581802275, "num_tokens": 301}
|
// Copyright 2018-2019 Henry Schreiner and Hans Dembinski
//
// Distributed under the 3-Clause BSD License. See accompanying
// file LICENSE or https://github.com/scikit-hep/boost-histogram for details.
#include <bh_python/pybind11.hpp>
#include <bh_python/axis.hpp>
#include <bh_python/kwargs.hpp>
#include <bh_python/register_axis.hpp>
#include <bh_python/regular_numpy.hpp>
#include <boost/mp11.hpp>
#include <vector>
template <class... Ts, class Func>
void register_axis_each(py::module& mod, Func&& function) {
using namespace boost::mp11;
using types = mp_list<Ts...>;
mp_for_each<mp_iota_c<sizeof...(Ts)>>([&](auto I) {
using T = mp_at_c<types, I>;
auto ax = register_axis<T>(mod);
function(ax);
});
}
void register_axes(py::module& mod) {
register_axis_each<axis::regular_none,
axis::regular_uflow,
axis::regular_oflow,
axis::regular_uoflow,
axis::regular_uoflow_growth,
axis::regular_circular,
axis::regular_numpy>(mod, [](auto ax) {
ax.def(py::init<unsigned, double, double>(), "bins"_a, "start"_a, "stop"_a);
});
register_axis<axis::regular_pow>(mod)
.def(py::init([](unsigned n, double start, double stop, double pow) {
return new axis::regular_pow(
bh::axis::transform::pow{pow}, n, start, stop);
}),
"bins"_a,
"start"_a,
"stop"_a,
"power"_a)
.def_property_readonly("transform", [](const axis::regular_pow& self) {
return self.transform();
});
register_axis<axis::regular_trans>(mod)
.def(py::init([](unsigned n, double start, double stop, func_transform& trans) {
return new axis::regular_trans(trans, n, start, stop);
}),
"bins"_a,
"start"_a,
"stop"_a,
"transform"_a)
.def_property_readonly("transform", [](const axis::regular_trans& self) {
return self.transform();
});
register_axis_each<axis::variable_none,
axis::variable_uflow,
axis::variable_oflow,
axis::variable_uoflow,
axis::variable_uoflow_growth,
axis::variable_circular>(
mod, [](auto ax) { ax.def(py::init<std::vector<double>>(), "edges"_a); });
register_axis_each<axis::integer_none,
axis::integer_uflow,
axis::integer_oflow,
axis::integer_uoflow,
axis::integer_growth,
axis::integer_circular>(
mod, [](auto ax) { ax.def(py::init<int, int>(), "start"_a, "stop"_a); });
register_axis_each<axis::category_int, axis::category_int_growth>(
mod, [](auto ax) { ax.def(py::init<std::vector<int>>(), "categories"_a); });
register_axis_each<axis::category_str, axis::category_str_growth>(mod, [](auto ax) {
ax.def(py::init<std::vector<std::string>>(), "categories"_a);
});
register_axis<axis::boolean>(mod, "boolean").def(py::init<>());
;
}
|
{"hexsha": "3268708741b771890a960d1686e07ac210ff10ee", "size": 3274, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/register_axis.cpp", "max_stars_repo_name": "andrzejnovak/boost-histogram", "max_stars_repo_head_hexsha": "cdbfabb1c22f5545bf3900be01f2025411e699f1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 105.0, "max_stars_repo_stars_event_min_datetime": "2019-03-08T14:59:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T12:46:17.000Z", "max_issues_repo_path": "src/register_axis.cpp", "max_issues_repo_name": "andrzejnovak/boost-histogram", "max_issues_repo_head_hexsha": "cdbfabb1c22f5545bf3900be01f2025411e699f1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 400.0, "max_issues_repo_issues_event_min_datetime": "2019-03-11T23:10:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T14:02:06.000Z", "max_forks_repo_path": "src/register_axis.cpp", "max_forks_repo_name": "andrzejnovak/boost-histogram", "max_forks_repo_head_hexsha": "cdbfabb1c22f5545bf3900be01f2025411e699f1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 25.0, "max_forks_repo_forks_event_min_datetime": "2019-03-11T18:02:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T20:14:22.000Z", "avg_line_length": 36.3777777778, "max_line_length": 88, "alphanum_fraction": 0.5568112401, "num_tokens": 761}
|
#pdm.py
#
#Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
#The Universal Permissive License (UPL), Version 1.0
#
#by Joe Hahn, joe.hahn@oracle.come, 11 September 2018
#this executes the pdm demo
#get commandline argument
try:
import sys
inputs_path = sys.argv[1]
except:
inputs_path = 'inputs_rtf.py'
#start time
import time as tm
clock_start = tm.time()
#read input parameters
import numpy as np
execfile(inputs_path)
print 'inputs_path = ', inputs_path
print 'debug = ', debug
print 'N_devices = ', N_devices
print 'sensor_sigma = ', sensor_sigma
print 'N_timesteps = ', N_timesteps
print 'time_start = ', time_start
print 'output_interval = ', output_interval
print 'strategy = ', strategy
print 'pdm_threshold_time = ', pdm_threshold_time
print 'pdm_threshold_probability = ', pdm_threshold_probability
print 'pdm_skip_time = ', pdm_skip_time
print 'N_technicians = ', N_technicians
print 'repair_duration = ', repair_duration
print 'maintenance_duration = ', maintenance_duration
print 'rn_seed = ', rn_seed
print 'issues = ', issues
#imports
print 'setting up...'
import numpy as np
import pandas as pd
#initialize values of each sensor on each device
names = ['temperature', 'pressure', 'load']
N_sensors = len(names)
IDs = np.arange(N_sensors)
values = np.zeros((N_devices, N_sensors))
sensors = {'names':names, 'IDs':IDs, 'values':values}
#initialize time of each sensor's next output
np.random.seed(rn_seed)
output_times = np.random.uniform(low=0, high=output_interval, size=values.shape).astype(int)
sensors['output_times'] = output_times
#initialize devices
IDs = np.arange(N_devices)
devices = {'IDs':IDs}
devices['sensors'] = sensors
for deviceID in devices['IDs']:
d = {'state':'operating', 'issue':'none', 'technicianID':-1, 'fail_time':-1, 'repair_start_time':-1, 'repair_complete_time':-1,
'production_rate':0.0, 'production_rate_fail_time':0.0}
for issue in issues.keys():
if (issues[issue]['fatal'] == True):
d[issue + '_repair_time'] = time_start - 1
devices[deviceID] = d
#initialize damage due to issues
N_issues = len(issues)
damage = np.zeros((N_issues, N_devices))
devices['damage'] = damage
#initialize technicians
IDs = np.arange(N_technicians)
technicians = {'IDs':IDs}
for ID in IDs:
technicians[ID] = {'location':-1}
#load pdm models as needed
models = {}
if (strategy == 'pdm'):
fatal_issues = [issue_name for issue_name, d in issues.iteritems() if (d['fatal'] == True)]
#model_folder = '/u01/bdcsce/tmp/'
model_folder = './'
for issue in fatal_issues:
y_col = issue + '_in_' + str(pdm_threshold_time)
model_file = model_folder + y_col + '_model.pkl'
print 'loading ' + model_file
with open(model_file, 'rb') as file:
import pickle as pkl
models[y_col] = pkl.load(file)
#loop over all times
repair_data = []
telemetry_data= []
from helper_fns import *
times = range(time_start, time_start + N_timesteps)
print 'operating devices...'
for time in times:
#update operating devices' sensors
update_sensors(devices, sensor_sigma)
#update damage due to issues
crud_damage = update_damage(devices, issues)
#update devices' production_rate
compute_production(devices, issues, crud_damage)
#perform predictive maintenance if desired
if (strategy == 'pdm'):
if (time%pdm_skip_time == 0):
repair_data += pdm_check(devices, issues, time, technicians, models, maintenance_duration,
pdm_threshold_time, pdm_threshold_probability, debug)
#flag any failed devices
check_devices(devices, issues, time, debug)
#send first available technicians to repair failed deviceIDs
repair_data += service_failed_devices(devices, technicians, time, repair_duration, debug)
#release devices and technicians when maintenance is complete
complete_maintenance(devices, issues, technicians, time, debug)
#generate sensor and telemetry update output_times
telemetry_data += generate_telemetry(devices, technicians, time, output_interval)
#increment time
time += 1
#convert repairs log to dataframe
import os
if (len(repair_data) > 0):
cols = ['time', 'deviceID', 'issue', 'technicianID'] + names + ['production_rate']
repairs = pd.DataFrame(data=repair_data)[cols]
print 'repairs.shape = ', repairs.shape
file = 'data/repairs_' + strategy + '.csv.gz'
repairs.to_csv(file, header=False, index=False, sep='|', compression='gzip')
print file + ' size (KB) = ', os.path.getsize(file)/(1024)
#convert telemetry to dataframe
if (len(telemetry_data) > 0):
cols = ['time', 'deviceID', 'sensor', 'value']
telemetry = pd.DataFrame(data=telemetry_data)[cols]
print 'telemetry.shape = ', telemetry.shape
file = 'data/telemetry_' + strategy + '.csv.gz'
telemetry.to_csv(file, header=False, index=False, sep='|', compression='gzip')
print file + ' size (MB) = ', os.path.getsize(file)/(1024**2)
#done
print 'execution time (min) = ', (tm.time() - clock_start)/60.0
|
{"hexsha": "826e91a9d867446b45a60317a4f82496e748fac3", "size": 5115, "ext": "py", "lang": "Python", "max_stars_repo_path": "pdm.py", "max_stars_repo_name": "JosephSe/predictive-maintenance-sim", "max_stars_repo_head_hexsha": "49c28f75b30fe7c7668f94388fc48acf247e5f4e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-07-18T16:17:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T08:59:55.000Z", "max_issues_repo_path": "pdm.py", "max_issues_repo_name": "JosephSe/predictive-maintenance-sim", "max_issues_repo_head_hexsha": "49c28f75b30fe7c7668f94388fc48acf247e5f4e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-01T06:04:17.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-01T06:04:17.000Z", "max_forks_repo_path": "pdm.py", "max_forks_repo_name": "JosephSe/predictive-maintenance-sim", "max_forks_repo_head_hexsha": "49c28f75b30fe7c7668f94388fc48acf247e5f4e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2019-05-13T11:16:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-09T11:27:41.000Z", "avg_line_length": 33.6513157895, "max_line_length": 132, "alphanum_fraction": 0.6981427175, "include": true, "reason": "import numpy", "num_tokens": 1326}
|
#!/usr/bin/python3
# number of output figures = 129
import multiprocessing
import matplotlib.patches
import numpy as np
import helper.basis
from helper.figure import Figure
import helper.plot
def drawImage(imageNumber):
fig = Figure.create(figsize=(5, 5 * aspect), scale=0.5)
ax = fig.gca()
xUnits = imageNumber * xUnitsPerImage
if startsInXUnits[pMin] < xUnits:
p = max([p for p in range(pMin, pMax + 1) if startsInXUnits[p] < xUnits])
else:
p = pMin
xUnits -= startsInXUnits[p]
for pCur in range(pMax, -1, -1):
if pCur == p:
xx = np.linspace(min(max(xUnits, 0), pCur + 1), pCur + 1, 200)
color = colorLight
else:
xx = np.linspace(0, pCur + 1, 200)
color = (colorDark if pCur < p else colorLight)
b = helper.basis.CardinalBSpline(pCur)
yy = b.evaluate(xx)
if pCur == 0: yy[-1] = 1
ax.plot(xx, yy, "-", color=color, clip_on=False)
b = helper.basis.CardinalBSpline(p)
xx = np.linspace(0, min(max(xUnits, 0), p + 1), 200)
yy = b.evaluate(xx)
ax.plot(xx, yy, "-", color=colorDark, clip_on=False)
if (0 < xUnits < p + 1) and (p > 0):
bPrev = helper.basis.CardinalBSpline(p - 1)
xx = np.linspace(max(xUnits - 1, 0), xUnits, 1000)
yy = bPrev.evaluate(xx)
xxyy = np.column_stack((np.hstack((xx, xx[::-1])),
np.hstack((np.zeros_like(yy), yy[::-1]))))
ax.add_patch(matplotlib.patches.Polygon(
xxyy, ec="none", fc=colorFace, clip_on=False))
x = np.array([xUnits])
y = b.evaluate(x)
ax.plot(x, y, "o", color=colorDark, markersize=3, clip_on=False)
ax.set_xlim(*xLim)
ax.set_ylim(*yLim)
ax.set_aspect("equal")
ax.set_axis_off()
fig.save(graphicsNumber=imageNumber+1, crop=False,
tightLayout={"pad" : 0, "h_pad" : 0, "w_pad" : 0})
numberOfImages = 129
pauseStartInXUnits = 0.2
pauseBetweenInXUnits = 0.2
pauseEndInXUnits = 0.2
pMin = 1
pMax = 3
colorBase = "anthrazit"
colorDark = helper.plot.mixColors(colorBase, 0.7)
colorLight = helper.plot.mixColors(colorBase, 0.4)
colorFace = helper.plot.mixColors(colorBase, 0.2)
startsInXUnits = {pMin : pauseStartInXUnits}
for p in range(pMin + 1, pMax + 1):
startsInXUnits[p] = startsInXUnits[p - 1] + p + pauseBetweenInXUnits
lengthInXUnits = startsInXUnits[pMax] + (pMax + 1) + pauseEndInXUnits
xUnitsPerImage = lengthInXUnits / (numberOfImages - 1)
xLim = [0, pMax + 1]
yLim = [0, 1]
aspect = (yLim[1] - yLim[0]) / (xLim[1] - xLim[0])
with multiprocessing.Pool() as pool:
pool.map(drawImage, range(numberOfImages))
|
{"hexsha": "42039adad89ca0335f4ab29072579b31da888a1f", "size": 2556, "ext": "py", "lang": "Python", "max_stars_repo_path": "gfx/py/flipBookBSpline.py", "max_stars_repo_name": "valentjn/thesis", "max_stars_repo_head_hexsha": "65a0eb7d5f7488aac93882959e81ac6b115a9ea8", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2022-01-15T19:50:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T20:16:10.000Z", "max_issues_repo_path": "gfx/py/flipBookBSpline.py", "max_issues_repo_name": "valentjn/thesis", "max_issues_repo_head_hexsha": "65a0eb7d5f7488aac93882959e81ac6b115a9ea8", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gfx/py/flipBookBSpline.py", "max_forks_repo_name": "valentjn/thesis", "max_forks_repo_head_hexsha": "65a0eb7d5f7488aac93882959e81ac6b115a9ea8", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7191011236, "max_line_length": 77, "alphanum_fraction": 0.6459311424, "include": true, "reason": "import numpy", "num_tokens": 865}
|
! RUN: %S/test_errors.sh %s %t %flang_fc1
! REQUIRES: shell
! Error tests for recursive use of derived types.
! C744 If neither the POINTER nor the ALLOCATABLE attribute is specified, the
! declaration-type-spec in the component-def-stmt shall specify an intrinsic
! type or a previously defined derived type.
program main
type :: recursive1
!ERROR: Recursive use of the derived type requires POINTER or ALLOCATABLE
type(recursive1) :: bad1
type(recursive1), pointer :: ok1
type(recursive1), allocatable :: ok2
!ERROR: Recursive use of the derived type requires POINTER or ALLOCATABLE
!ERROR: CLASS entity 'bad2' must be a dummy argument or have ALLOCATABLE or POINTER attribute
class(recursive1) :: bad2
class(recursive1), pointer :: ok3
class(recursive1), allocatable :: ok4
end type recursive1
type :: recursive2(kind,len)
integer, kind :: kind
integer, len :: len
!ERROR: Recursive use of the derived type requires POINTER or ALLOCATABLE
type(recursive2(kind,len)) :: bad1
type(recursive2(kind,len)), pointer :: ok1
type(recursive2(kind,len)), allocatable :: ok2
!ERROR: Recursive use of the derived type requires POINTER or ALLOCATABLE
!ERROR: CLASS entity 'bad2' must be a dummy argument or have ALLOCATABLE or POINTER attribute
class(recursive2(kind,len)) :: bad2
class(recursive2(kind,len)), pointer :: ok3
class(recursive2(kind,len)), allocatable :: ok4
end type recursive2
type :: recursive3(kind,len)
integer, kind :: kind = 1
integer, len :: len = 2
!ERROR: Recursive use of the derived type requires POINTER or ALLOCATABLE
type(recursive3) :: bad1
type(recursive3), pointer :: ok1
type(recursive3), allocatable :: ok2
!ERROR: Recursive use of the derived type requires POINTER or ALLOCATABLE
!ERROR: CLASS entity 'bad2' must be a dummy argument or have ALLOCATABLE or POINTER attribute
class(recursive3) :: bad2
class(recursive3), pointer :: ok3
class(recursive3), allocatable :: ok4
end type recursive3
!ERROR: Derived type 'recursive4' cannot extend itself
type, extends(recursive4) :: recursive4
end type recursive4
end program main
|
{"hexsha": "34c4d815cbcdff1bf8942e9c8074744a04d967da", "size": 2192, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "flang/test/Semantics/resolve44.f90", "max_stars_repo_name": "acidburn0zzz/llvm-project", "max_stars_repo_head_hexsha": "7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2019-04-12T18:49:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T22:23:16.000Z", "max_issues_repo_path": "flang/test/Semantics/resolve44.f90", "max_issues_repo_name": "acidburn0zzz/llvm-project", "max_issues_repo_head_hexsha": "7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 127, "max_issues_repo_issues_event_min_datetime": "2019-04-09T00:55:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T15:35:41.000Z", "max_forks_repo_path": "flang/test/Semantics/resolve44.f90", "max_forks_repo_name": "acidburn0zzz/llvm-project", "max_forks_repo_head_hexsha": "7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-04-02T18:25:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T07:11:37.000Z", "avg_line_length": 43.84, "max_line_length": 97, "alphanum_fraction": 0.7230839416, "num_tokens": 583}
|
#!/usr/bin/env python
""" Calculates numbers for use in writeup. """
from collections import defaultdict, Counter
import csv
import json
from math import log
import os
import pathlib
import random
import sys
import matplotlib.pyplot as plt
import requests
from scipy import stats
from statsmodels.stats.proportion import proportion_confint
ROOT_DIR = str(pathlib.Path(__file__).parent.parent.absolute())
from utils.models import Match, Rating, User, MatchReport
import utils.download
import utils.lookup
def likelihood_of_win_if_higher_rank(data_set_type):
matches = MatchReport.all(data_set_type)
all_match_count = len(matches)
wins = 0
total = 0
for report in matches:
# win defined as victory for higher-rated player
if report.score > 0:
wins += 1
# Do not add if no difference in rating
if report.score:
total += 1
cl, _ = proportion_confint(wins, total)
pct_win = wins/float(total)
print(data_set_type)
print('{:>7} records out of {:>7} total, win pct: {:.3f}, ci: {:.3f}'.format(total, all_match_count, pct_win, pct_win - cl))
def correlation_and_regression(data_set_type):
matches = MatchReport.by_rating(data_set_type, 0, 10000)
print(len(matches))
score_results_dict = defaultdict(lambda: [])
for report in MatchReport.all('model'):
score_results_dict[abs(report.score)].append(report.score)
# For deterimining linear regression. Note: initialized to ensure
# that the intercept is at .5
x = []#0 for _ in range(100*len(matches))]
y = []#.5 for _ in range(100*len(matches))]
for score in sorted(score_results_dict):
values = score_results_dict[score]
total = len(values)
# Do not calculate if no difference in rating
if score:
# win defined as victory for higher-rated player
wins = len([i for i in values if i > 0])
pct_win = wins / float(total)
# Add the rating difference and percent win the same
# number for the number of records with that difference
# in order to weight the regression proportionally
for _ in range(total):
x.append(score)
y.append(pct_win)
slope, intercept, r_value, p_value, stderr = stats.linregress(x, y)
print(data_set_type)
print(' Intercept: {:.3f}, Slope: {:.4f}, R value: {:.3f}, p value: {}, Std Err: {}'.format(intercept, slope, r_value, p_value, stderr))
def ratings_correlation_and_regression(data_set_type):
ctr = Counter()
for report in MatchReport.all('model'):
ctr[abs(report.score)] += 1
xs = []
log_ys = []
for x in sorted(ctr)[1:]:
if ctr[x] < 10:
continue
xs.append(x)
log_ys.append(log(ctr[x]))
slope, intercept, r_value, _, _ = stats.linregress(xs, log_ys)
print(data_set_type)
print(' Intercept: {:.3f}, Slope: {:.4f}, R value: {:.3f}'.format(intercept, slope, r_value))
if __name__ == '__main__':
for data_set_type in ('model', 'verification',):
correlation_and_regression(data_set_type)
|
{"hexsha": "193ad37d15be8b8e5e40694cffa0c7cb9869e5e3", "size": 3139, "ext": "py", "lang": "Python", "max_stars_repo_path": "elo/calculations.py", "max_stars_repo_name": "porcpine1967/aoe2_comparisons", "max_stars_repo_head_hexsha": "7c5e5e5223127325269b558d59ea4bdb5949345e", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "elo/calculations.py", "max_issues_repo_name": "porcpine1967/aoe2_comparisons", "max_issues_repo_head_hexsha": "7c5e5e5223127325269b558d59ea4bdb5949345e", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "elo/calculations.py", "max_forks_repo_name": "porcpine1967/aoe2_comparisons", "max_forks_repo_head_hexsha": "7c5e5e5223127325269b558d59ea4bdb5949345e", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0804597701, "max_line_length": 141, "alphanum_fraction": 0.6572156738, "include": true, "reason": "from scipy,from statsmodels", "num_tokens": 766}
|
import os.path as osp
import os
import numpy as np
import json
def get_index_by_label(used_labels, label):
return list(used_labels.keys())[list(used_labels.values()).index(label)]
def cache_label_name(labels_dict, label_name):
cached_keys = sorted(list(labels_dict.keys()))
if len(cached_keys) == 0:
labels_dict[0] = label_name
else:
if label_name not in list(labels_dict.values()):
last_key = max(cached_keys)
fore_key = last_key + 1
labels_dict[fore_key] = label_name
return 0
def gen_labels(ds_root):
used_labels = {}
# ds_root = '/FairMOT/data/SLY_MOT/'
for folder_type in ['train', 'test']:
seq_root = osp.join(ds_root, f'images/{folder_type}')
label_root = osp.join(ds_root, f'labels_with_ids/{folder_type}')
os.makedirs(label_root, exist_ok=True)
seqs = [s for s in os.listdir(seq_root)]
tid_curr = 0
tid_last = -1
for seq in seqs:
seq_info = open(osp.join(seq_root, seq, 'seqinfo.ini')).read()
seq_width = int(seq_info[seq_info.find('imWidth=') + 8:seq_info.find('\nimHeight')])
seq_height = int(seq_info[seq_info.find('imHeight=') + 9:seq_info.find('\nimExt')])
gt_names = sorted(os.listdir(osp.join(seq_root, seq, 'gt')))
for label_index_curr, gt_name in enumerate(gt_names):
label_name = gt_name.split('_')[1].split('.')[0]
cache_label_name(used_labels, label_name)
gt_txt = osp.join(seq_root, seq, 'gt', f'{gt_name}')
gt = np.loadtxt(gt_txt, dtype=np.float64, delimiter=',')
idx = np.lexsort(gt.T[:2, :])
gt = gt[idx, :]
seq_label_root = osp.join(label_root, seq, 'img1')
os.makedirs(seq_label_root, exist_ok=True)
for fid, tid, x, y, w, h, mark, _, _, _ in gt:
if mark == 0:
continue
fid = int(fid)
tid = int(tid)
if not tid == tid_last:
tid_curr += 1
tid_last = tid
x += w / 2
y += h / 2
label_fpath = osp.join(seq_label_root, '{:06d}.txt'.format(fid))
label_str = '{:d} {:d} {:.6f} {:.6f} {:.6f} {:.6f}\n'.format(
get_index_by_label(used_labels, label_name),
tid_curr, x / seq_width, y / seq_height, w / seq_width, h / seq_height)
with open(label_fpath, 'a') as f:
f.write(label_str)
with open(f'{osp.join(ds_root, "classes_mapping.json")}', 'w') as fp:
json.dump(used_labels, fp)
print('done generating for classes:\n'
f'{used_labels}')
|
{"hexsha": "154818c7049c0865d87a5ec8135c5b1e5ab3a4b1", "size": 2883, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/gen_labels.py", "max_stars_repo_name": "supervisely-ecosystem/FairMOT", "max_stars_repo_head_hexsha": "493aa8b626e0eac1a71d27af8eefb680210b8a11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/gen_labels.py", "max_issues_repo_name": "supervisely-ecosystem/FairMOT", "max_issues_repo_head_hexsha": "493aa8b626e0eac1a71d27af8eefb680210b8a11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gen_labels.py", "max_forks_repo_name": "supervisely-ecosystem/FairMOT", "max_forks_repo_head_hexsha": "493aa8b626e0eac1a71d27af8eefb680210b8a11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5925925926, "max_line_length": 96, "alphanum_fraction": 0.5331252168, "include": true, "reason": "import numpy", "num_tokens": 703}
|
export opt_linear_fit!
"""
opt_linear_fit!(
graph,
objfun,
discr,
linear_cref;
input = :A,
errtype = :abserr,
linlsqr = :backslash,
droptol = 0,
)
Linear fitting of a `graph` of the form
c_1 g_1(x) + c_2 g_2(x) + … + c_n g_n(x)
to the values of `objfun`, in the points `discr`. Reference to the coefficients
`c_1,…,c_n` should be given `linear_cref`.
The variable `graph` is modified during the iterations and the function has no
return value.
See [`opt_gauss_newton!`](@ref) for a description the kwarg `errtype` and
`input`, and [`solve_linlsqr!`](@ref) for the kwargs `linlsqr and `droptol`.
"""
function opt_linear_fit!(
graph,
objfun,
discr,
linear_cref;
input = :A,
errtype = :abserr,
linlsqr = :backslash,
droptol = 0,
)
objfun_vals = objfun.(discr)
vals = init_vals_eval_graph!(graph, discr, nothing, input)
eval_graph(graph, discr, vals = vals, input = input)
n = length(linear_cref)
T = eltype(valtype(vals))
A = ones(T, size(discr, 1), n)
for k = 1:n
parent = graph.parents[linear_cref[k][1]][linear_cref[k][2]]
A[:, k] = vals[parent]
end
adjust_for_errtype!(A, objfun_vals, objfun_vals, errtype)
c = solve_linlsqr!(A, objfun_vals, linlsqr, droptol)
set_coeffs!(graph, c, linear_cref)
return nothing
end
|
{"hexsha": "c2e836f88842f92ac841388ed6998eafb4e317e5", "size": 1398, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/optimization/linear_fit.jl", "max_stars_repo_name": "matrixfunctions/GraphMatFun.jl", "max_stars_repo_head_hexsha": "1fac14aa849e7f050ae5281bf6414b4356807199", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-07-09T07:33:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T22:57:51.000Z", "max_issues_repo_path": "src/optimization/linear_fit.jl", "max_issues_repo_name": "matrixfunctions/GraphMatFun.jl", "max_issues_repo_head_hexsha": "1fac14aa849e7f050ae5281bf6414b4356807199", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-07-09T17:53:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T14:48:01.000Z", "max_forks_repo_path": "src/optimization/linear_fit.jl", "max_forks_repo_name": "matrixfunctions/GraphMatFun.jl", "max_forks_repo_head_hexsha": "1fac14aa849e7f050ae5281bf6414b4356807199", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1034482759, "max_line_length": 79, "alphanum_fraction": 0.6266094421, "num_tokens": 436}
|
# Truncated and folded distributions
This tutorial will cover how to work with truncated and folded
distributions in NumPyro.
It is assumed that you're already familiar with the basics of NumPyro.
To get the most out of this tutorial you'll need some background in probability.
### Table of contents
* [0. Setup](#0)
* [1. What is a truncated distribution?](#1)
* [2. What is a folded distribution?](#2)
* [3. Sampling from truncated and folded distributions](#3)
* [4. Ready-to-use truncated and folded distributions](#4)
* [5. Building your own truncanted distributions](#5)
* [5.1 Recap of NumPyro distributions](#5.1)
* [5.2 Right-truncated normal](#5.2)
* [5.3 Left-truncated Poisson](#5.3)
* [6. References and related material](#references)
### Setup <a class="anchor" id="0"></a>
To run this notebook, we are going to need the following imports
```python
!pip install -q git+https://github.com/pyro-ppl/numpyro.git
```
```python
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import numpyro
import numpyro.distributions as dist
from jax import lax, random
from jax.scipy.special import ndtr, ndtri
from jax.scipy.stats import poisson, norm
from numpyro.distributions import (
constraints,
Distribution,
FoldedDistribution,
SoftLaplace,
StudentT,
TruncatedDistribution,
TruncatedNormal,
)
from numpyro.distributions.util import promote_shapes
from numpyro.infer import DiscreteHMCGibbs, MCMC, NUTS, Predictive
from scipy.stats import poisson as sp_poisson
numpyro.enable_x64()
RNG = random.PRNGKey(0)
PRIOR_RNG, MCMC_RNG, PRED_RNG = random.split(RNG, 3)
MCMC_KWARGS = dict(
num_warmup=2000,
num_samples=2000,
num_chains=4,
chain_method="sequential",
)
```
### 1. What are truncated distributions?
<a class="anchor" id="1"></a>
The **support** of a probability distribution is the set of values
in the domain with **non-zero probability**. For example, the
support of the normal distribution is the whole real line (even if
the density gets very small as we move away from the mean, technically
speaking, it is never quite zero). The support of the uniform distribution,
as coded in `jax.random.uniform` with the default arguments, is the interval $\left[0, 1)\right.$, because any
value outside of that interval has zero probability. The support of the Poisson distribution is the set of non-negative integers, etc.
**Truncating** a distribution makes its support smaller
so that any value outside our desired domain has zero probability. In practice, this can be useful
for modelling situations in which certain biases are introduced during data collection.
For example, some physical detectors only get triggered when the signal is above some
minimum threshold, or sometimes the detectors fail if the signal exceeds a certain value.
As a result, the **observed values are constrained to be within a limited range of values**,
even though the true signal does not have the same constraints.
See, for example, section 3.1 of _Information Theory and Learning Algorithms_ by David Mackay.
Naively, if $S$ is the support of the original density $p_Y(y)$, then by truncating to a new support
$T\subset S$ we are effectively defining a new random variable $Z$ for which the density is
$$
\begin{align}
p_Z(z) \propto
\begin{cases}
p_Y(z) & \text{if $z$ is in $T$}\\
0 & \text{if $z$ is outside $T$}\\
\end{cases}
\end{align}
$$
The reason for writing a $\propto$ (proportional to) sign instead of a strict equation is that,
defined in the above way, the resulting function does not integrate to $1$ and so it cannot be strictly considered a probability density. To make it into a probability density **we need to re-distribute the truncated mass**
among the part of the distribution that remains. To do this, we simply re-weight every point by the same constant:
$$
\begin{align}
p_Z(z) =
\begin{cases}
\frac{1}{M}p_Y(z) & \text{if $z$ is in $T$}\\
0 & \text{if $z$ is outside $T$}\\
\end{cases}
\end{align}
$$
where $M = \int_T p_Y(y)\mathrm{d}y$.
In practice, the truncation is often one-sided. This means that if, for example, the support before truncation is the interval $(a, b)$, then the support after truncation is of the form $(a, c)$ or $(c, b)$, with $a < c < b$. The figure below illustrates a left-sided truncation at zero of a normal distribution $N(1, 1)$.
<figure>
</figure>
The original distribution (left side) is truncated at the vertical dotted line. The truncated mass (orange region) is redistributed in the new support (right side image) so that the total area under the curve remains equal to 1 even after truncation. This method of re-weighting ensures that the density ratio between any two points, $p(a)/p(b)$ remains the same before and after the reweighting is done (as long as the points are inside the new support, of course).
**Note**: Truncated data is different from _censored_ data. Censoring also hides values that are outside some desired support but, contrary to truncated data, we know when a value has been censored. The typical example is the household scale which does not report values above 300 pounds. Censored data will not be covered in this tutorial.
### 2. What is a folded distribution? <a class="anchor" id="2"></a>
**Folding** is achieved by taking the absolute value of a random variable, $Z = \lvert Y \rvert$. This obviously modifies the support of the original distribution since negative values now have zero
probability:
$$
\begin{align}
p_Z(z) =
\begin{cases}
p_Y(z) + p_Y(-z) & \text{if $z\ge 0$}\\
0 & \text{if $z\lt 0$}\\
\end{cases}
\end{align}
$$
The figure below illustrates a folded normal distribution $N(1, 1)$.
<figure>
</figure>
As you can see, the resulting distribution is different from the truncated case. In particular, the density ratio between points, $p(a)/p(b)$, is in general not the same after folding. For some examples in which folding is relevant see [references 3 and 4](#references)
If the original distribution is symmetric around zero, then folding and truncating at zero have the same effect.
### 3. Sampling from truncated and folded distributions <a class="anchor" id="3"></a>
**Truncated distributions**
Usually, we already have a sampler for the pre-truncated distribution (e.g. `np.random.normal`).
So, a seemingly simple way of generating samples from the truncated distribution would be to
sample from the original distribution, and then discard the samples that are outside the
desired support. For example, if we wanted samples from a normal distribution truncated to the
support $(-\infty, 1)$, we'd simply do:
```python
upper = 1
samples = np.random.normal(size=1000)
truncated_samples = samples[samples < upper]
```
This is called **_rejection sampling_ but it is not very efficient**.
If the region we truncated had a sufficiently high probability mass, then we'd be discarding a lot of samples and it might be a while before we accumulate sufficient samples for the truncated distribution. For example, the above snippet would only result in approximately 840 truncated samples even though we initially drew 1000. This can easily get a lot worse for other combinations of parameters.
A **more efficient** approach is to use a method known as [inverse transform sampling](https://en.wikipedia.org/wiki/Inverse_transform_sampling).
In this method, we first sample from a uniform distribution in (0, 1) and then transform those samples with the inverse cumulative distribution of our truncated distribution.
This method ensures that no samples are wasted in the process, though it does have the slight complication that
**we need to calculate the inverse CDF (ICDF)** of our truncated distribution. This might sound too complicated at first but, with a bit of algebra, we can often calculate the truncated ICDF in terms of the untruncated ICDF. The untruncated ICDF for many distributions is already available.
**Folded distributions**
This case is a lot simpler. Since we already have a sampler for the pre-folded distribution, all we need to do is to take the absolute value of those samples:
```python
samples = np.random.normal(size=1000)
folded_samples = np.abs(samples)
```
### 4. Ready to use truncated and folded distributions <a class="anchor" id="4"></a>
The later sections in this tutorial will show you how to construct your own truncated and folded distributions, but you don't have to reinvent the wheel. NumPyro has [a bunch of truncated distributions](https://num.pyro.ai/en/stable/distributions.html#truncated-distributions) already implemented.
Suppose, for example, that you want a normal distribution truncated on the right.
For that purpose, we use the [TruncatedNormal](https://num.pyro.ai/en/stable/distributions.html#truncatednormal) distribution. The parameters of this distribution are `loc` and `scale`, corresponding to the `loc` and `scale` of the _untruncated_ normal, and `low` and/or `high` corresponding to the truncation points. Importantly, the `low` and `high` are **keyword only** arguments, only `loc` and `scale` are valid as positional arguments.
This is how you can use this class in a model:
```python
def truncated_normal_model(num_observations, high, x=None):
loc = numpyro.sample("loc", dist.Normal())
scale = numpyro.sample("scale", dist.LogNormal())
with numpyro.plate("observations", num_observations):
numpyro.sample("x", TruncatedNormal(loc, scale, high=high), obs=x)
```
Let's now check that we can use this model in a typical MCMC workflow.
**Prior simulation**
```python
high = 1.2
num_observations = 250
num_prior_samples = 100
prior = Predictive(truncated_normal_model, num_samples=num_prior_samples)
prior_samples = prior(PRIOR_RNG, num_observations, high)
```
**Inference**
To test our model, we run mcmc against some synthetic data.
The synthetic data can be any arbitrary sample from the prior simulation.
```python
# -- select an arbitrary prior sample as true data
true_idx = 0
true_loc = prior_samples["loc"][true_idx]
true_scale = prior_samples["scale"][true_idx]
true_x = prior_samples["x"][true_idx]
```
```python
plt.hist(true_x.copy(), bins=20)
plt.axvline(high, linestyle=":", color="k")
plt.xlabel("x")
plt.show()
```
```python
# --- Run MCMC and check estimates and diagnostics
mcmc = MCMC(NUTS(truncated_normal_model), **MCMC_KWARGS)
mcmc.run(MCMC_RNG, num_observations, high, true_x)
mcmc.print_summary()
# --- Compare to ground truth
print(f"True loc : {true_loc:3.2}")
print(f"True scale: {true_scale:3.2}")
```
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:02<00:00, 1909.24it/s, 1 steps of size 5.65e-01. acc. prob=0.93]
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 10214.14it/s, 3 steps of size 5.16e-01. acc. prob=0.95]
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 15102.95it/s, 1 steps of size 6.42e-01. acc. prob=0.90]
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 16522.03it/s, 3 steps of size 6.39e-01. acc. prob=0.90]
mean std median 5.0% 95.0% n_eff r_hat
loc -0.58 0.15 -0.59 -0.82 -0.35 2883.69 1.00
scale 1.49 0.11 1.48 1.32 1.66 3037.78 1.00
Number of divergences: 0
True loc : -0.56
True scale: 1.4
**Removing the truncation**
Once we have inferred the parameters of our model, a common task is to understand what the data would look like _without_ the truncation. In this example, this is easily done by simply "pushing" the value of `high` to infinity.
```python
pred = Predictive(truncated_normal_model, posterior_samples=mcmc.get_samples())
pred_samples = pred(PRED_RNG, num_observations, high=float("inf"))
```
Let's finally plot these samples and compare them to the original, observed data.
```python
# thin the samples to not saturate matplotlib
samples_thinned = pred_samples["x"].ravel()[::1000]
```
```python
f, axes = plt.subplots(1, 2, figsize=(15, 5), sharex=True)
axes[0].hist(
samples_thinned.copy(), label="Untruncated posterior", bins=20, density=True
)
axes[0].set_title("Untruncated posterior")
vals, bins, _ = axes[1].hist(
samples_thinned[samples_thinned < high].copy(),
label="Tail of untruncated posterior",
bins=10,
density=True,
)
axes[1].hist(
true_x.copy(), bins=bins, label="Observed, truncated data", density=True, alpha=0.5
)
axes[1].set_title("Comparison to observed data")
for ax in axes:
ax.axvline(high, linestyle=":", color="k", label="Truncation point")
ax.legend()
plt.show()
```
The plot on the left shows data simulated from the posterior distribution with the truncation removed, so we are able to see how the data would look like if it were not truncated. To sense check this, we discard the simulated samples that are above the truncation point and make histogram of those and compare it to a histogram of the true data (right plot).
**The TruncatedDistribution class**
The source code for the [TruncatedNormal](https://num.pyro.ai/en/stable/distributions.html#truncatednormal) in NumPyro uses a class called [TruncatedDistribution](https://num.pyro.ai/en/stable/distributions.html#truncateddistribution) which abstracts away the logic for `sample` and `log_prob` that
we will discuss in the next sections. At the moment, though, this logic only works continuous, symmetric distributions with _real_ support.
We can use this class to quickly construct other truncated distributions. For example, if we need a truncated [SoftLaplace](https://num.pyro.ai/en/stable/distributions.html#softlaplace) we can use the following pattern:
```python
def TruncatedSoftLaplace(
loc=0.0, scale=1.0, *, low=None, high=None, validate_args=None
):
return TruncatedDistribution(
base_dist=SoftLaplace(loc, scale),
low=low,
high=high,
validate_args=validate_args,
)
```
```python
def truncated_soft_laplace_model(num_observations, high, x=None):
loc = numpyro.sample("loc", dist.Normal())
scale = numpyro.sample("scale", dist.LogNormal())
with numpyro.plate("obs", num_observations):
numpyro.sample("x", TruncatedSoftLaplace(loc, scale, high=high), obs=x)
```
And, as before, we check that we can use this model in the steps of a typical workflow:
```python
high = 2.3
num_observations = 200
num_prior_samples = 100
prior = Predictive(truncated_soft_laplace_model, num_samples=num_prior_samples)
prior_samples = prior(PRIOR_RNG, num_observations, high)
true_idx = 0
true_x = prior_samples["x"][true_idx]
true_loc = prior_samples["loc"][true_idx]
true_scale = prior_samples["scale"][true_idx]
mcmc = MCMC(
NUTS(truncated_soft_laplace_model),
**MCMC_KWARGS,
)
mcmc.run(
MCMC_RNG,
num_observations,
high,
true_x,
)
mcmc.print_summary()
print(f"True loc : {true_loc:3.2}")
print(f"True scale: {true_scale:3.2}")
```
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:02<00:00, 1745.70it/s, 1 steps of size 6.78e-01. acc. prob=0.93]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 9294.56it/s, 1 steps of size 7.02e-01. acc. prob=0.93]
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 10412.30it/s, 1 steps of size 7.20e-01. acc. prob=0.92]
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 10583.85it/s, 3 steps of size 7.01e-01. acc. prob=0.93]
mean std median 5.0% 95.0% n_eff r_hat
loc -0.37 0.17 -0.38 -0.65 -0.10 4034.96 1.00
scale 1.46 0.12 1.45 1.27 1.65 3618.77 1.00
Number of divergences: 0
True loc : -0.56
True scale: 1.4
**Important**
The `sample` method of the [TruncatedDistribution](https://num.pyro.ai/en/stable/distributions.html#truncateddistribution) class relies on inverse-transform sampling.
This has the implicit requirement that the base distribution should have an `icdf` method already available.
If this is not the case, we will not be able to call the `sample` method on any instances of our distribution, nor use it with the `Predictive` class.
However, the `log_prob` method only depends on the `cdf` method (which is more frequently available than the `icdf`). If the `log_prob` method is available, then we _can_ use our distribution as prior/likelihood in a model.
**The FoldedDistribution class**
Similar to truncated distributions, NumPyro has the [FoldedDistribution](https://num.pyro.ai/en/stable/distributions.html#foldeddistribution) class to help you quickly construct folded distributions. Popular examples of folded distributions are the so-called "half-normal", "half-student" or "half-cauchy". As the name suggests, these distributions keep only (the positive) _half_ of the distribution. Implicit in the name of these "half" distributions is that they are centered at zero before folding. But, of course, you can fold a distribution even if its not centered at zero. For instance, this is how you would define a folded student-t distribution.
```python
def FoldedStudentT(df, loc=0.0, scale=1.0):
return FoldedDistribution(StudentT(df, loc=loc, scale=scale))
```
```python
def folded_student_model(num_observations, x=None):
df = numpyro.sample("df", dist.Gamma(6, 2))
loc = numpyro.sample("loc", dist.Normal())
scale = numpyro.sample("scale", dist.LogNormal())
with numpyro.plate("obs", num_observations):
numpyro.sample("x", FoldedStudentT(df, loc, scale), obs=x)
```
And we check that we can use our distribution in a typical workflow:
```python
# --- prior sampling
num_observations = 500
num_prior_samples = 100
prior = Predictive(folded_student_model, num_samples=num_prior_samples)
prior_samples = prior(PRIOR_RNG, num_observations)
# --- choose any prior sample as the ground truth
true_idx = 0
true_df = prior_samples["df"][true_idx]
true_loc = prior_samples["loc"][true_idx]
true_scale = prior_samples["scale"][true_idx]
true_x = prior_samples["x"][true_idx]
# --- do inference with MCMC
mcmc = MCMC(
NUTS(folded_student_model),
**MCMC_KWARGS,
)
mcmc.run(MCMC_RNG, num_observations, true_x)
# --- Check diagostics
mcmc.print_summary()
# --- Compare to ground truth:
print(f"True df : {true_df:3.2f}")
print(f"True loc : {true_loc:3.2f}")
print(f"True scale: {true_scale:3.2f}")
```
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:02<00:00, 1343.54it/s, 7 steps of size 3.51e-01. acc. prob=0.75]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:01<00:00, 3644.99it/s, 7 steps of size 3.56e-01. acc. prob=0.73]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:01<00:00, 3137.13it/s, 7 steps of size 2.62e-01. acc. prob=0.91]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:01<00:00, 3028.93it/s, 7 steps of size 1.85e-01. acc. prob=0.96]
mean std median 5.0% 95.0% n_eff r_hat
df 3.12 0.52 3.07 2.30 3.97 2057.60 1.00
loc -0.02 0.88 -0.03 -1.28 1.34 925.84 1.01
scale 2.23 0.21 2.25 1.89 2.57 1677.38 1.00
Number of divergences: 33
True df : 3.01
True loc : 0.37
True scale: 2.41
### 5. Building your own truncated distribution <a class="anchor" id="5"></a>
If the
[TruncatedDistribution](https://num.pyro.ai/en/stable/distributions.html#truncateddistribution) and
[FoldedDistribution](https://num.pyro.ai/en/stable/distributions.html#foldeddistribution)
classes are not sufficient to solve your problem,
you might want to look into writing your own truncated distribution from the ground up.
This can be a tedious process, so this section will give you some guidance and examples to help you with it.
#### 5.1 Recap of NumPyro distributions <a class="anchor" id="5.1"></a>
A NumPyro distribution should subclass [Distribution](https://num.pyro.ai/en/stable/distributions.html#distribution) and implement a few basic ingredients:
**Class attributes**
The class attributes serve a few different purposes. Here we will mainly care about two:
1. `arg_constraints`: Impose some requirements on the parameters of the distribution. Errors are raised at instantiation time if the parameters passed do not satisfy the constraints.
2. `support`: It is used in some inference algorithms like MCMC and SVI with auto-guides, where we need to perform the algorithm in the unconstrained space. Knowing the support, we can automatically reparametrize things under the hood.
We'll explain other class attributes as we go.
**The** `__init__` **method**
This is where we define the parameters of the distribution.
We also use `jax` and `lax` to promote the parameters to shapes that are valid for broadcasting.
The `__init__` method of the parent class is also required because that's where the validation of our parameters is done.
**The** `log_prob` **method**
Implementing the `log_prob` method ensures that we can do inference. As the name suggests, this method returns the logarithm of the density evaluated at the argument.
**The** `sample` **method**
This method is used for drawing independent samples from our distribution. It is particularly useful for doing prior and posterior predictive checks. Note, in particular, that this method is not needed if you only need to use your distribution as prior in a model - the `log_prob` method will suffice.
The place-holder code for any of our implementations can be written as
```python
class MyDistribution(Distribution):
# class attributes
arg_constraints = {}
support = None
def __init__(self):
pass
def log_prob(self, value):
pass
def sample(self, key, sample_shape=()):
pass
```
#### 5.2 Example: Right-truncated normal <a class="anchor" id="5.2"></a>
We are going to modify a normal distribution so that its new support is
of the form `(-inf, high)`, with `high` a real number. This could be done with the `TruncatedNormal` distribution but, for the sake of illustration, we are not going to rely on it.
We'll call our distribution `RightTruncatedNormal`. Let's write the skeleton code and then proceed to fill in the blanks.
```python
class RightTruncatedNormal(Distribution):
# <class attributes>
def __init__(self):
pass
def log_prob(self, value):
pass
def sample(self, key, sample_shape=()):
pass
```
**Class attributes**
Remember that a non-truncated normal distribution is specified in NumPyro by two parameters, `loc` and `scale`,
which correspond to the mean and standard deviation.
Looking at the [source code](https://github.com/pyro-ppl/numpyro/blob/0664c2d2dd1eb5f41ea6a0bcef91e5fa2a417ce5/numpyro/distributions/continuous.py#L1337) for the `Normal` distribution we see the following lines:
```python
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
support = constraints.real
reparametrized_params = ["loc", "scale"]
```
The `reparametrized_params` attribute is used by variational inference algorithms when constructing gradient estimators. The parameters of many common distributions with continuous support (e.g. the Normal distribution) are reparameterizable, while the parameters of discrete distributions are not. Note that `reparametrized_params` is irrelevant for MCMC algorithms like HMC. See [SVI Part III](https://pyro.ai/examples/svi_part_iii.html#Tricky-Case:-Non-reparameterizable-Random-Variables) for more details.
We must adapt these attributes to our case by including the `"high"` parameter, but there are two issues we need to deal with:
1. `constraints.real` is a bit too restrictive. We'd like `jnp.inf` to be a valid value for `high` (equivalent to no truncation), but at the moment infinity is not a valid real number. We deal with this situation by defining our own constraint. The source code for `constraints.real` is easy to imitate:
```python
class _RightExtendedReal(constraints.Constraint):
"""
Any number in the interval (-inf, inf].
"""
def __call__(self, x):
return (x == x) & (x != float("-inf"))
def feasible_like(self, prototype):
return jnp.zeros_like(prototype)
right_extended_real = _RightExtendedReal()
```
2. `support` can no longer be a class attribute as it will depend on the value of `high`. So instead we implement it as a dependent property.
Our distribution then looks as follows:
```python
class RightTruncatedNormal(Distribution):
arg_constraints = {
"loc": constraints.real,
"scale": constraints.positive,
"high": right_extended_real,
}
reparametrized_params = ["loc", "scale", "high"]
# ...
@constraints.dependent_property
def support(self):
return constraints.lower_than(self.high)
```
**The** `__init__` **method**
Once again we take inspiration from the [source code](https://github.com/pyro-ppl/numpyro/blob/0664c2d2dd1eb5f41ea6a0bcef91e5fa2a417ce5/numpyro/distributions/continuous.py#L1342) for the normal distribution. The key point is the use of `lax` and `jax` to check the shapes of the arguments passed and make sure that such shapes are consistent for broadcasting. We follow the same pattern for our use case -- all we need to do is include the `high` parameter.
In the source implementation of `Normal`, both parameters `loc` and `scale` are given defaults so that one recovers a standard normal distribution if no arguments are specified. In the same spirit, we choose `float("inf")` as a default for `high` which would be equivalent to no truncation.
```python
# ...
def __init__(self, loc=0.0, scale=1.0, high=float("inf"), validate_args=None):
batch_shape = lax.broadcast_shapes(
jnp.shape(loc),
jnp.shape(scale),
jnp.shape(high),
)
self.loc, self.scale, self.high = promote_shapes(loc, scale, high)
super().__init__(batch_shape, validate_args=validate_args)
# ...
```
**The** `log_prob` **method**
For a truncated distribution, the log density is given by
$$
\begin{align}
\log p_Z(z) =
\begin{cases}
\log p_Y(z) - \log M & \text{if $z$ is in $T$}\\
-\infty & \text{if $z$ is outside $T$}\\
\end{cases}
\end{align}
$$
where, again, $p_Z$ is the density of the truncated distribution, $p_Y$ is the density before truncation, and $M = \int_T p_Y(y)\mathrm{d}y$. For the specific case of truncating the normal distribution to the interval `(-inf, high)`, the constant $M$ is equal to the cumulative density evaluated at the truncation point. We can easily implement this log-density method because `jax.scipy.stats` already has a `norm` module that we can use.
```python
# ...
def log_prob(self, value):
log_m = norm.logcdf(self.high, self.loc, self.scale)
log_p = norm.logpdf(value, self.loc, self.scale)
return jnp.where(value < self.high, log_p - log_m, -jnp.inf)
# ...
```
**The** `sample` **method**
To implement the sample method using inverse-transform sampling, we need to also implement the inverse cumulative distribution function. For this, we can use the `ndtri` function that lives inside `jax.scipy.special`. This function returns the inverse cdf for the standard normal distribution. We can do a bit of algebra to obtain the inverse cdf of the truncated, non-standard normal. First recall that if $X\sim Normal(0, 1)$ and $Y = \mu + \sigma X$, then $Y\sim Normal(\mu, \sigma)$. Then if $Z$ is the truncated $Y$, its cumulative density is given by:
$$
\begin{align}
F_Z(y) &= \int_{-\infty}^{y}p_Z(r)dr\newline
&= \frac{1}{M}\int_{-\infty}^{y}p_Y(s)ds \quad\text{if $y < high$} \newline
&= \frac{1}{M}F_Y(y)
\end{align}
$$
And so its inverse is
$$
\begin{align}
F_Z^{-1}(u) = \left(\frac{1}{M}F_Y\right)^{-1}(u)
= F_Y^{-1}(M u)
= F_{\mu + \sigma X}^{-1}(Mu)
= \mu + \sigma F_X^{-1}(Mu)
\end{align}
$$
The translation of the above math into code is
```python
# ...
def sample(self, key, sample_shape=()):
shape = sample_shape + self.batch_shape
minval = jnp.finfo(jnp.result_type(float)).tiny
u = random.uniform(key, shape, minval=minval)
return self.icdf(u)
def icdf(self, u):
m = norm.cdf(self.high, self.loc, self.scale)
return self.loc + self.scale * ndtri(m * u)
```
With everything in place, the final implementation is as below.
```python
class _RightExtendedReal(constraints.Constraint):
"""
Any number in the interval (-inf, inf].
"""
def __call__(self, x):
return (x == x) & (x != float("-inf"))
def feasible_like(self, prototype):
return jnp.zeros_like(prototype)
right_extended_real = _RightExtendedReal()
class RightTruncatedNormal(Distribution):
"""
A truncated Normal distribution.
:param numpy.ndarray loc: location parameter of the untruncated normal
:param numpy.ndarray scale: scale parameter of the untruncated normal
:param numpy.ndarray high: point at which the truncation happens
"""
arg_constraints = {
"loc": constraints.real,
"scale": constraints.positive,
"high": right_extended_real,
}
reparametrized_params = ["loc", "scale", "high"]
def __init__(self, loc=0.0, scale=1.0, high=float("inf"), validate_args=True):
batch_shape = lax.broadcast_shapes(
jnp.shape(loc),
jnp.shape(scale),
jnp.shape(high),
)
self.loc, self.scale, self.high = promote_shapes(loc, scale, high)
super().__init__(batch_shape, validate_args=validate_args)
def log_prob(self, value):
log_m = norm.logcdf(self.high, self.loc, self.scale)
log_p = norm.logpdf(value, self.loc, self.scale)
return jnp.where(value < self.high, log_p - log_m, -jnp.inf)
def sample(self, key, sample_shape=()):
shape = sample_shape + self.batch_shape
minval = jnp.finfo(jnp.result_type(float)).tiny
u = random.uniform(key, shape, minval=minval)
return self.icdf(u)
def icdf(self, u):
m = norm.cdf(self.high, self.loc, self.scale)
return self.loc + self.scale * ndtri(m * u)
@constraints.dependent_property
def support(self):
return constraints.less_than(self.high)
```
Let's try it out!
```python
def truncated_normal_model(num_observations, x=None):
loc = numpyro.sample("loc", dist.Normal())
scale = numpyro.sample("scale", dist.LogNormal())
high = numpyro.sample("high", dist.Normal())
with numpyro.plate("observations", num_observations):
numpyro.sample("x", RightTruncatedNormal(loc, scale, high), obs=x)
```
```python
num_observations = 1000
num_prior_samples = 100
prior = Predictive(truncated_normal_model, num_samples=num_prior_samples)
prior_samples = prior(PRIOR_RNG, num_observations)
```
As before, we run mcmc against some synthetic data.
We select any random sample from the prior as the ground truth:
```python
true_idx = 0
true_loc = prior_samples["loc"][true_idx]
true_scale = prior_samples["scale"][true_idx]
true_high = prior_samples["high"][true_idx]
true_x = prior_samples["x"][true_idx]
```
```python
plt.hist(true_x.copy())
plt.axvline(true_high, linestyle=":", color="k")
plt.xlabel("x")
plt.show()
```
Run MCMC and check the estimates:
```python
mcmc = MCMC(NUTS(truncated_normal_model), **MCMC_KWARGS)
mcmc.run(MCMC_RNG, num_observations, true_x)
mcmc.print_summary()
```
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:02<00:00, 1850.91it/s, 15 steps of size 8.88e-02. acc. prob=0.88]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 7434.51it/s, 5 steps of size 1.56e-01. acc. prob=0.78]
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 7792.94it/s, 54 steps of size 5.41e-02. acc. prob=0.91]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 7404.07it/s, 9 steps of size 1.77e-01. acc. prob=0.78]
mean std median 5.0% 95.0% n_eff r_hat
high 0.88 0.01 0.88 0.88 0.89 590.13 1.01
loc -0.58 0.07 -0.58 -0.70 -0.46 671.04 1.01
scale 1.40 0.05 1.40 1.32 1.48 678.30 1.01
Number of divergences: 6310
Compare estimates against the ground truth:
```python
print(f"True high : {true_high:3.2f}")
print(f"True loc : {true_loc:3.2f}")
print(f"True scale: {true_scale:3.2f}")
```
True high : 0.88
True loc : -0.56
True scale: 1.45
Note that, even though we can recover good estimates for the true values,
we had a very high number of divergences. These divergences happen because
the data can be outside of the support that we are allowing with our priors.
To fix this, we can change the prior on `high` so that it depends on the observations:
```python
def truncated_normal_model_2(num_observations, x=None):
loc = numpyro.sample("loc", dist.Normal())
scale = numpyro.sample("scale", dist.LogNormal())
if x is None:
high = numpyro.sample("high", dist.Normal())
else:
# high is greater or equal to the max value in x:
delta = numpyro.sample("delta", dist.HalfNormal())
high = numpyro.deterministic("high", delta + x.max())
with numpyro.plate("observations", num_observations):
numpyro.sample("x", RightTruncatedNormal(loc, scale, high), obs=x)
```
```python
mcmc = MCMC(NUTS(truncated_normal_model_2), **MCMC_KWARGS)
mcmc.run(MCMC_RNG, num_observations, true_x)
mcmc.print_summary(exclude_deterministic=False)
```
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:03<00:00, 1089.76it/s, 15 steps of size 4.85e-01. acc. prob=0.93]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 8802.95it/s, 7 steps of size 5.19e-01. acc. prob=0.92]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 8975.35it/s, 3 steps of size 5.72e-01. acc. prob=0.89]
sample: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 8471.94it/s, 15 steps of size 3.76e-01. acc. prob=0.96]
mean std median 5.0% 95.0% n_eff r_hat
delta 0.01 0.01 0.00 0.00 0.01 6104.22 1.00
high 0.88 0.01 0.88 0.88 0.89 6104.22 1.00
loc -0.58 0.08 -0.58 -0.71 -0.46 3319.65 1.00
scale 1.40 0.06 1.40 1.31 1.49 3377.38 1.00
Number of divergences: 0
And the divergences are gone.
In practice, we usually want to understand how the data
would look like without the truncation. To do that in NumPyro,
there is no need of writing a separate model, we can simply
rely on the `condition` handler to push the truncation point to infinity:
```python
model_without_truncation = numpyro.handlers.condition(
truncated_normal_model,
{"high": float("inf")},
)
estimates = mcmc.get_samples().copy()
estimates.pop("high") # Drop to make sure these are not used
pred = Predictive(
model_without_truncation,
posterior_samples=estimates,
)
pred_samples = pred(PRED_RNG, num_observations=1000)
```
```python
# thin the samples for a faster histogram
samples_thinned = pred_samples["x"].ravel()[::1000]
```
```python
f, axes = plt.subplots(1, 2, figsize=(15, 5))
axes[0].hist(
samples_thinned.copy(), label="Untruncated posterior", bins=20, density=True
)
axes[0].axvline(true_high, linestyle=":", color="k", label="Truncation point")
axes[0].set_title("Untruncated posterior")
axes[0].legend()
axes[1].hist(
samples_thinned[samples_thinned < true_high].copy(),
label="Tail of untruncated posterior",
bins=20,
density=True,
)
axes[1].hist(true_x.copy(), label="Observed, truncated data", density=True, alpha=0.5)
axes[1].axvline(true_high, linestyle=":", color="k", label="Truncation point")
axes[1].set_title("Comparison to observed data")
axes[1].legend()
plt.show()
```
#### 5.3 Example: Left-truncated Poisson <a class="anchor" id="5.3"></a>
As a final example, we now implement a left-truncated Poisson distribution.
Note that a right-truncated Poisson could be reformulated as a particular
case of a categorical distribution, so we focus on the less trivial case.
**Class attributes**
For a truncated Poisson we need two parameters, the `rate` of the original Poisson
distribution and a `low` parameter to indicate the truncation point.
As this is a discrete distribution, we need to clarify whether or not the truncation point is included
in the support. In this tutorial, we'll take the convention that the truncation point `low`
_is_ part of the support.
The `low` parameter has to be given a 'non-negative integer' constraint. As it is a discrete parameter, it will not be possible to do inference for this parameter using [NUTS](https://num.pyro.ai/en/stable/mcmc.html#nuts). This is likely not a problem since the truncation point is often known in advance. However, if we really must infer the `low` parameter, it is possible to do so with [DiscreteHMCGibbs](https://num.pyro.ai/en/stable/mcmc.html#discretehmcgibbs) though one is limited to using priors with enumerate support.
Like in the case of the truncated normal, the support of this distribution will be defined as a property and not as a class attribute because it depends on the specific value of the `low` parameter.
```python
class LeftTruncatedPoisson:
arg_constraints = {
"low": constraints.nonnegative_integer,
"rate": constraints.positive,
}
# ...
@constraints.dependent_property(is_discrete=True)
def support(self):
return constraints.integer_greater_than(self.low - 1)
```
The `is_discrete` argument passed in the `dependent_property` decorator is used to tell the inference algorithms which variables are discrete latent variables.
**The** `__init__` **method**
Here we just follow the same pattern as in the previous example.
```python
# ...
def __init__(self, rate=1.0, low=0, validate_args=None):
batch_shape = lax.broadcast_shapes(
jnp.shape(low), jnp.shape(rate)
)
self.low, self.rate = promote_shapes(low, rate)
super().__init__(batch_shape, validate_args=validate_args)
# ...
```
**The** `log_prob` **method**
The logic is very similar to the truncated normal case. But this time we are truncating on the left, so the correct normalization is the complementary cumulative density:
$$
\begin{align}
M = \sum_{n=L}^{\infty} p_Y(n) = 1 - \sum_{n=0}^{L - 1} p_Y(n) = 1 - F_Y(L - 1)
\end{align}
$$
For the code, we can rely on the `poisson` module that lives inside `jax.scipy.stats`.
```python
# ...
def log_prob(self, value):
m = 1 - poisson.cdf(self.low - 1, self.rate)
log_p = poisson.logpmf(value, self.rate)
return jnp.where(value >= self.low, log_p - jnp.log(m), -jnp.inf)
# ...
```
**The** `sample` **method**
Inverse-transform sampling also works for discrete distributions. The "inverse" cdf of a discrete distribution being defined as:
$$
\begin{align}
F^{-1}(u) = \max\left\{n\in \mathbb{N} \rvert F(n) \lt u\right\}
\end{align}
$$
Or, in plain English, $F^{-1}(u)$ is the highest number for which the cumulative density is less than $u$.
However, there's currently no implementation of $F^{-1}$ for the Poisson distribution in Jax (at least, at the moment of writing this tutorial). We have to rely on our own implementation. Fortunately, we can take advantage of the discrete nature of the distribution and easily implement a "brute-force" version that will work for most cases. The brute force approach consists of simply scanning all non-negative integers in order, one by one, until the value of the cumulative density exceeds the argument $u$. The implicit requirement is that we need a way to evaluate the cumulative density for the truncated distribution, but we can calculate that:
$$
\begin{align}
F_Z(z) &= \sum_{n=0}^z p_z(n)\newline
&= \frac{1}{M}\sum_{n=L}^z p_Y(n)\quad \text{assuming $z >= L$}\newline
&= \frac{1}{M}\left(\sum_{n=0}^z p_Y(n) - \sum_{n=0}^{L-1}p_Y(n)\right)\newline
&= \frac{1}{M}\left(F_Y(z) - F_Y (L-1)\right)
\end{align}
$$
And, of course, the value of $F_Z(z)$ is equal to zero if $z < L$.
(As in the previous example, we are using $Y$ to denote the original, un-truncated variable, and we are using $Z$ to denote the truncated variable)
```python
# ...
def sample(self, key, sample_shape=()):
shape = sample_shape + self.batch_shape
minval = jnp.finfo(jnp.result_type(float)).tiny
u = random.uniform(key, shape, minval=minval)
return self.icdf(u)
def icdf(self, u):
def cond_fn(val):
n, cdf = val
return jnp.any(cdf < u)
def body_fn(val):
n, cdf = val
n_new = jnp.where(cdf < u, n + 1, n)
return n_new, self.cdf(n_new)
low = self.low * jnp.ones_like(u)
cdf = self.cdf(low)
n, _ = lax.while_loop(cond_fn, body_fn, (low, cdf))
return n.astype(jnp.result_type(int))
def cdf(self, value):
m = 1 - poisson.cdf(self.low - 1, self.rate)
f = poisson.cdf(value, self.rate) - poisson.cdf(self.low - 1, self.rate)
return jnp.where(k >= self.low, f / m, 0)
```
A few comments with respect to the above implementation:
* Even with double precision, if `rate` is much less than `low`, the above code will not work. Due to numerical limitations, one obtains that `poisson.cdf(low - 1, rate)` is equal (or very close) to `1.0`. This makes it impossible to re-weight the distribution accurately because the normalization constant would be `0.0`.
* The brute-force `icdf` is of course very slow, particularly when `rate` is high. If you need faster sampling, one option would be to rely on a faster search algorithm. For example:
```python
def icdf_faster(self, u):
num_bins = 200 # Choose a reasonably large value
bins = jnp.arange(num_bins)
cdf = self.cdf(bins)
indices = jnp.searchsorted(cdf, u)
return bins[indices]
```
The obvious limitation here is that the number of bins has to be fixed a priori (jax does not allow for dynamically sized arrays). Another option would be to rely on an _approximate_ implementation, as proposed in [this article](https://people.maths.ox.ac.uk/gilesm/codes/poissinv/paper.pdf).
* Yet another alternative for the `icdf` is to rely on `scipy`'s implementation and make use of Jax's `host_callback` module. This feature allows you to use Python functions without having to code them in `Jax`. This means that we can simply make use of `scipy`'s implementation of the Poisson ICDF! From the last equation, we can write the _truncated_ icdf as:
$$
\begin{align}
F_Z^{-1}(u) = F_Y^{-1}(Mu + F_Y(L-1))
\end{align}
$$
And in python:
```python
def scipy_truncated_poisson_icdf(args): # Note: all arguments are passed inside a tuple
rate, low, u = args
rate = np.asarray(rate)
low = np.asarray(low)
u = np.asarray(u)
density = sp_poisson(rate)
low_cdf = density.cdf(low - 1)
normalizer = 1.0 - low_cdf
x = normalizer * u + low_cdf
return density.ppf(x)
```
In principle, it wouldn't be possible to use the above function in our NumPyro distribution because it is not coded in Jax. The `jax.experimental.host_callback.call` function solves precisely that problem. The code below shows you how to use it, but keep in mind that this is currently an experimental feature so you should expect changes to the module. See the `host_callback` [docs](https://jax.readthedocs.io/en/latest/jax.experimental.host_callback.html) for more details.
```python
# ...
def icdf_scipy(self, u):
result_shape = jax.ShapeDtypeStruct(
u.shape,
jnp.result_type(float) # int type not currently supported
)
result = jax.experimental.host_callback.call(
scipy_truncated_poisson_icdf,
(self.rate, self.low, u),
result_shape=result_shape
)
return result.astype(jnp.result_type(int))
# ...
```
Putting it all together, the implementation is as below:
```python
def scipy_truncated_poisson_icdf(args): # Note: all arguments are passed inside a tuple
rate, low, u = args
rate = np.asarray(rate)
low = np.asarray(low)
u = np.asarray(u)
density = sp_poisson(rate)
low_cdf = density.cdf(low - 1)
normalizer = 1.0 - low_cdf
x = normalizer * u + low_cdf
return density.ppf(x)
class LeftTruncatedPoisson(Distribution):
"""
A truncated Poisson distribution.
:param numpy.ndarray low: lower bound at which truncation happens
:param numpy.ndarray rate: rate of the Poisson distribution.
"""
arg_constraints = {
"low": constraints.nonnegative_integer,
"rate": constraints.positive,
}
def __init__(self, rate=1.0, low=0, validate_args=None):
batch_shape = lax.broadcast_shapes(jnp.shape(low), jnp.shape(rate))
self.low, self.rate = promote_shapes(low, rate)
super().__init__(batch_shape, validate_args=validate_args)
def log_prob(self, value):
m = 1 - poisson.cdf(self.low - 1, self.rate)
log_p = poisson.logpmf(value, self.rate)
return jnp.where(value >= self.low, log_p - jnp.log(m), -jnp.inf)
def sample(self, key, sample_shape=()):
shape = sample_shape + self.batch_shape
float_type = jnp.result_type(float)
minval = jnp.finfo(float_type).tiny
u = random.uniform(key, shape, minval=minval)
# return self.icdf(u) # Brute force
# return self.icdf_faster(u) # For faster sampling.
return self.icdf_scipy(u) # Using `host_callback`
def icdf(self, u):
def cond_fn(val):
n, cdf = val
return jnp.any(cdf < u)
def body_fn(val):
n, cdf = val
n_new = jnp.where(cdf < u, n + 1, n)
return n_new, self.cdf(n_new)
low = self.low * jnp.ones_like(u)
cdf = self.cdf(low)
n, _ = lax.while_loop(cond_fn, body_fn, (low, cdf))
return n.astype(jnp.result_type(int))
def icdf_faster(self, u):
num_bins = 200 # Choose a reasonably large value
bins = jnp.arange(num_bins)
cdf = self.cdf(bins)
indices = jnp.searchsorted(cdf, u)
return bins[indices]
def icdf_scipy(self, u):
result_shape = jax.ShapeDtypeStruct(u.shape, jnp.result_type(float))
result = jax.experimental.host_callback.call(
scipy_truncated_poisson_icdf,
(self.rate, self.low, u),
result_shape=result_shape,
)
return result.astype(jnp.result_type(int))
def cdf(self, value):
m = 1 - poisson.cdf(self.low - 1, self.rate)
f = poisson.cdf(value, self.rate) - poisson.cdf(self.low - 1, self.rate)
return jnp.where(value >= self.low, f / m, 0)
@constraints.dependent_property(is_discrete=True)
def support(self):
return constraints.integer_greater_than(self.low - 1)
```
Let's try it out!
```python
def discrete_distplot(samples, ax=None, **kwargs):
"""
Utility function for plotting the samples as a barplot.
"""
x, y = np.unique(samples, return_counts=True)
y = y / sum(y)
if ax is None:
ax = plt.gca()
ax.bar(x, y, **kwargs)
return ax
```
```python
def truncated_poisson_model(num_observations, x=None):
low = numpyro.sample("low", dist.Categorical(0.2 * jnp.ones((5,))))
rate = numpyro.sample("rate", dist.LogNormal(1, 1))
with numpyro.plate("observations", num_observations):
numpyro.sample("x", LeftTruncatedPoisson(rate, low), obs=x)
```
**Prior samples**
```python
# -- prior samples
num_observations = 1000
num_prior_samples = 100
prior = Predictive(truncated_poisson_model, num_samples=num_prior_samples)
prior_samples = prior(PRIOR_RNG, num_observations)
```
**Inference**
As in the case for the truncated normal, here it is better to replace
the prior on the `low` parameter so that it is consistent with the observed data.
We'd like to have a categorical prior on `low` (so that we can use [DiscreteHMCGibbs](https://num.pyro.ai/en/stable/mcmc.html#discretehmcgibbs))
whose highest category is equal to the minimum value of `x` (so that prior and data are consistent).
However, we have to be careful in the way we write such model because Jax does not allow for dynamically sized arrays. A simple way of coding this model is to simply specify the number of categories as an argument:
```python
def truncated_poisson_model(num_observations, x=None, k=5):
zeros = jnp.zeros((k,))
low = numpyro.sample("low", dist.Categorical(logits=zeros))
rate = numpyro.sample("rate", dist.LogNormal(1, 1))
with numpyro.plate("observations", num_observations):
numpyro.sample("x", LeftTruncatedPoisson(rate, low), obs=x)
```
```python
# Take any prior sample as the true process.
true_idx = 6
true_low = prior_samples["low"][true_idx]
true_rate = prior_samples["rate"][true_idx]
true_x = prior_samples["x"][true_idx]
discrete_distplot(true_x.copy());
```
To do inference, we set `k = x.min() + 1`. Note also the use of [DiscreteHMCGibbs](https://num.pyro.ai/en/stable/mcmc.html#discretehmcgibbs):
```python
mcmc = MCMC(DiscreteHMCGibbs(NUTS(truncated_poisson_model)), **MCMC_KWARGS)
mcmc.run(MCMC_RNG, num_observations, true_x, k=true_x.min() + 1)
mcmc.print_summary()
```
sample: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:04<00:00, 808.70it/s, 3 steps of size 9.58e-01. acc. prob=0.93]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 5916.30it/s, 3 steps of size 9.14e-01. acc. prob=0.93]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 5082.16it/s, 3 steps of size 9.91e-01. acc. prob=0.92]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 6511.68it/s, 3 steps of size 8.66e-01. acc. prob=0.94]
mean std median 5.0% 95.0% n_eff r_hat
low 4.13 2.43 4.00 0.00 7.00 7433.79 1.00
rate 18.16 0.14 18.16 17.96 18.40 3074.46 1.00
```python
true_rate
```
DeviceArray(18.2091848, dtype=float64)
As before, one needs to be extra careful when estimating the truncation point.
If the truncation point is known is best to provide it.
```python
model_with_known_low = numpyro.handlers.condition(
truncated_poisson_model, {"low": true_low}
)
```
And note we can use [NUTS](https://num.pyro.ai/en/stable/mcmc.html#nuts) directly because there's no need to infer any discrete parameters.
```python
mcmc = MCMC(
NUTS(model_with_known_low),
**MCMC_KWARGS,
)
```
```python
mcmc.run(MCMC_RNG, num_observations, true_x)
mcmc.print_summary()
```
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:03<00:00, 1185.13it/s, 1 steps of size 9.18e-01. acc. prob=0.93]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 5786.32it/s, 3 steps of size 1.00e+00. acc. prob=0.92]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 5919.13it/s, 1 steps of size 8.62e-01. acc. prob=0.94]
sample: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4000/4000 [00:00<00:00, 7562.36it/s, 3 steps of size 9.01e-01. acc. prob=0.93]
mean std median 5.0% 95.0% n_eff r_hat
rate 18.17 0.13 18.17 17.95 18.39 3406.81 1.00
Number of divergences: 0
**Removing the truncation**
```python
model_without_truncation = numpyro.handlers.condition(
truncated_poisson_model,
{"low": 0},
)
pred = Predictive(model_without_truncation, posterior_samples=mcmc.get_samples())
pred_samples = pred(PRED_RNG, num_observations)
thinned_samples = pred_samples["x"][::500]
```
```python
discrete_distplot(thinned_samples.copy());
```
### References and related material <a class="anchor" id="references"></a>
1. [Wikipedia page on inverse transform sampling](https://en.wikipedia.org/wiki/Inverse_transform_sampling)
2. [David Mackay's book on information theory](http://www.inference.org.uk/itprnn/book.pdf)
3. <a class="anchor" id="ref3"></a>[Composite models with underlying folded distributions](https://www.sciencedirect.com/science/article/pii/S0377042720306427)
4. <a class="anchor" id="ref4"></a>[Application of the generalized folded-normal distribution to the process capability measures](https://link.springer.com/article/10.1007/s00170-003-2043-x)
4. [Pyro SVI tutorial part 3](https://pyro.ai/examples/svi_part_iii.html)
5. [Approximation of the inverse Poisson cumulative distribution function](https://people.maths.ox.ac.uk/gilesm/codes/poissinv/paper.pdf)
|
{"hexsha": "27cc248fb1a13d61d8c9cb619f6ee8619447d493", "size": 162336, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "notebooks/source/truncated_distributions.ipynb", "max_stars_repo_name": "karm-patel/numpyro", "max_stars_repo_head_hexsha": "34e0cdf4fa0ab9a0300a0d894d6758419fb46f40", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/source/truncated_distributions.ipynb", "max_issues_repo_name": "karm-patel/numpyro", "max_issues_repo_head_hexsha": "34e0cdf4fa0ab9a0300a0d894d6758419fb46f40", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/source/truncated_distributions.ipynb", "max_forks_repo_name": "karm-patel/numpyro", "max_forks_repo_head_hexsha": "34e0cdf4fa0ab9a0300a0d894d6758419fb46f40", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 82.9514563107, "max_line_length": 29360, "alphanum_fraction": 0.7649627932, "converted": true, "num_tokens": 15464}
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import division
import sys
import networkx as nx
import numpy as np
reload(sys)
sys.setdefaultencoding('utf8')
path4 = '/Users/amy/Desktop/rls_14/0729/'
nodes = np.loadtxt(path4 + 'rls14_nodes0729.csv', skiprows=1, delimiter=",", dtype=str)
edges = np.loadtxt(path4 + 'rls14_edges0729.csv', skiprows=1, delimiter=",", dtype=str)
print 'nodes', nodes
print 'edges', edges
G = nx.Graph()
# 生成gexf格式的文件 节点(com, repo), 边表示提交过commits, 权重为commits个数
G = nx.Graph()
for j in xrange(len(nodes)):
G.add_node(nodes[j][0])
G.nodes[nodes[j][0]]['group'] = int(nodes[j][1])
G.nodes[nodes[j][0]]['cluster_ID'] = int(nodes[j][2])
print 'j', j
for i in xrange(len(edges)):
if edges[i][0] in G and edges[i][1] in G:
G.add_edge(edges[i][0], edges[i][1], weight= int(edges[i][2]))
print 'i', i
nx.write_gexf(G, path4 + 'coms_repos14.gexf')
|
{"hexsha": "8391cd40867b9bffd1340c7e466f61795a1a7db6", "size": 910, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/form_gexf_data.py", "max_stars_repo_name": "yuxia-zhang/company_collaboration_in_OSS", "max_stars_repo_head_hexsha": "2106d45df75d89873c0364eb70fded8541c157b6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/form_gexf_data.py", "max_issues_repo_name": "yuxia-zhang/company_collaboration_in_OSS", "max_issues_repo_head_hexsha": "2106d45df75d89873c0364eb70fded8541c157b6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/form_gexf_data.py", "max_forks_repo_name": "yuxia-zhang/company_collaboration_in_OSS", "max_forks_repo_head_hexsha": "2106d45df75d89873c0364eb70fded8541c157b6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-29T08:57:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-29T08:57:18.000Z", "avg_line_length": 26.0, "max_line_length": 87, "alphanum_fraction": 0.6615384615, "include": true, "reason": "import numpy,import networkx", "num_tokens": 307}
|
# single coil reconstruction class
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from training import misc
class Reconstructor:
def __init__(self):
# inference settings
self.num_steps = 1000 # number of optimization / inference steps
self.dlatent_avg_samples = 10000 # number of latent samples to be used to calculate average latent
self.initial_learning_rate = 0.1 # initial learning rate to reach with ramp up
self.lr_rampdown_length = 0.25 # learning rate ramp down length ( 0.25 * 1000 = last 250 steps)
self.lr_rampup_length = 0.05 # learning rate ramp up length (0.1 * 1000 = first 100 steps)
# main settings
self.verbose = False # enable prints & reports for user
self.clone_net = True # clone network (beneficial for weight optimization)
self._cur_step = None # current step of inference
#************************************************************************************************************
# fourier operations defined for numpy arrays
# single-array centered fft
def fft2c_np(self,im):
return np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(im)))
# single-array centered ifft
def ifft2c_np(self,d):
return np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(d)))
#************************************************************************************************************
# fourier operations defined for TensorFlow tensors
# single-array centered fft
def fft2c(self, im):
return tf.signal.fftshift(tf.signal.fft2d(tf.signal.ifftshift(im)))
# single-array centered ifft
def ifft2c(self, d):
return tf.signal.fftshift(tf.signal.ifft2d(tf.signal.ifftshift(d)))
#************************************************************************************************************
def _info(self, *args):
if self.verbose:
print("Reconstructor: ", *args)
#************************************************************************************************************
# configure network and optimization environment including loss and variables
def set_network(self, Gs, minibatch_size = 1):
assert minibatch_size == 1
#Gs.reset_vars()
self._Gs = Gs
self.initial_Gs = Gs.clone()
print(Gs)
self._minibatch_size = minibatch_size
if self._Gs is None:
return
if self.clone_net:
self._Gs = self._Gs.clone()
# find average latent vector to be starting point of the optimization
self._info("Initializing average latent using %d samples..." % self.dlatent_avg_samples)
latent_samples = np.random.RandomState(123).randn(self.dlatent_avg_samples, *self._Gs.input_shapes[0][1:])
# latent positional encoding (not important at this step)
latent_pos = np.ones([16,32])
dlatent_samples = self._Gs.components.mapping.run(latent_samples, None,latent_pos,None,is_training=False)[:, :, :1, :] # [N, 1, 512]
# average latent vectors
self._dlatent_avg = np.mean(dlatent_samples, axis = 0, keepdims = True) # [1, 1, 512]
#************************************************************************************************************
# construct noise variables and initializer ops.
self._noise_vars = []
noise_init_ops = []
noise_normalize_ops = []
while True:
n = "G_synthesis/noise%d" % len(self._noise_vars)
if n not in self._Gs.vars:
break
v = self._Gs.vars[n]
self._noise_vars.append(v)
noise_init_ops.append(tf.assign(v, tf.random_normal(tf.shape(v), dtype = tf.float32)))
noise_mean = tf.reduce_mean(v)
noise_std = tf.reduce_mean((v - noise_mean)**2)**0.5
noise_normalize_ops.append(tf.assign(v, (v - noise_mean) / noise_std))
self._info(n, v)
self._noise_init_op = tf.group(*noise_init_ops)
self._noise_normalize_op = tf.group(*noise_normalize_ops)
#************************************************************************************************************
# construct weight tensors and initializer ops.
self._weight_vars = []
weight_init_ops = []
self.weights_ops = []
self.initial_weights = []
for w in self._Gs.vars:
# find convolutional layer weights from TensorFlow graph to optimize
if 'Conv1/weight' in w:
# print target weights to be used in inference
print(w)
m = self._Gs.vars[w]
# save a copy of each weight to initialize at the next image
m_copy = self.initial_Gs.vars[w]
self.initial_weights.append(m_copy)
self.weights_ops.append(m)
weight_init_ops.append(tf.assign(m, m_copy))
self._weight_init_op = tf.group(*weight_init_ops)
#************************************************************************************************************
# necessary settings and image output graph
self.mask = tf.Variable(tf.zeros([256,256], dtype=tf.complex64),dtype=tf.complex64)
# hold intermediate latent vectors in a single TensorFlow variable ( 1 global + k local components)
self._dlatents_var = tf.Variable(tf.zeros([1,17,15,32]), name = "dlatents_var")
# latent positional embeddings (not important at this step)
self.latent_pos = tf.Variable(tf.zeros([16,32]))
# get generated images from synthesizer to graph
self._images_expr, self.attention_maps = self._Gs.components.synthesis.get_output_for(self._dlatents_var, self.latent_pos,None,randomize_noise = False, use_pos=False)
# convert generated magnitude images to [0, 1] range
proc_images_expr = (self._images_expr + 1) / 2
#************************************************************************************************************
# build loss graph
self._target_images_var = tf.Variable(tf.zeros(proc_images_expr.shape), name = "target_images_var")
# convert target images to complex tensors
self.target_images_var_complex = tf.cast(self._target_images_var, dtype=tf.complex64)
# take centered 2d fft of target images
self.full_kspace_org_image = self.fft2c(self.target_images_var_complex[0,0,:,:])
# undersample target images
self.undersampled_kspace_org_image = tf.math.multiply(self.full_kspace_org_image, self.mask)
# same operations as above for generated images
self.proc_images_expr_complex = tf.cast(proc_images_expr, dtype=tf.complex64)
self.full_kspace_gen_image = self.fft2c(self.proc_images_expr_complex[0,0,:,:])
self.undersampled_kspace_gen_image = tf.math.multiply(self.full_kspace_gen_image,self.mask)
self._loss = tf.reduce_mean(tf.abs(self.undersampled_kspace_org_image - self.undersampled_kspace_gen_image))
#************************************************************************************************************
# set up the optimizer
self._lrate_in = tf.placeholder(tf.float32, [], name='lrate_in') # adjust learning rate variable to be able to change in every step
self._opt = dnnlib.tflib.Optimizer(learning_rate=self._lrate_in) # initalize optimizer
self._opt.register_gradients(self._loss, [self._dlatents_var]
+ self.weights_ops + self._noise_vars) # draw gradient descent way by registering gradients
self._opt_step = self._opt.apply_updates() # define a single optimization step
#************************************************************************************************************
def start(self, target_imgs, mask):
assert self._Gs is not None
# convert target images' range to [0, 1]
self.target_images_initial = target_imgs.copy()
target_imgs = np.asarray(target_imgs, dtype = "float32")
target_imgs = (target_imgs + 1) / 2
print(target_imgs.shape)
# initialize optimization state.
tflib.set_vars({self._target_images_var: target_imgs,
self._dlatents_var: np.tile(self._dlatent_avg, (self._minibatch_size, 1, 15, 1)), self.mask: mask, self.latent_pos:np.random.normal(0,1,[16,32])})
tflib.run(self._noise_init_op)
tflib.run(self._weight_init_op)
self._opt.reset_optimizer_state()
self._cur_step = 0
#************************************************************************************************************
def step(self):
assert self._cur_step is not None
if self._cur_step >= self.num_steps:
return
if self._cur_step == 0:
self._info("Running...")
# learning schedule hyperparameters.
t = self._cur_step / self.num_steps
lr_ramp = min(1.0, (1.0 - t) / self.lr_rampdown_length)
lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
lr_ramp = lr_ramp * min(1.0, t / self.lr_rampup_length)
learning_rate = self.initial_learning_rate * lr_ramp
# run a single step (normalize noises back to normal)
feed_dict = {self._lrate_in: learning_rate}
_, loss_value = tflib.run([self._opt_step, self._loss], feed_dict)
tflib.run(self._noise_normalize_op)
self._cur_step += 1
#************************************************************************************************************
def get_cur_step(self):
return self._cur_step
def get_mask(self):
return tflib.run(self.mask)
def get_dlatents(self):
return tflib.run(self._dlatents_expr)
def get_noises(self):
return tflib.run(self._noise_vars)
def untouched_images(self):
return tflib.run(self._images_expr)
def get_attention_maps(self):
return tflib.run(self.attention_maps)
# perform data consistency and return images
def get_images(self):
gen_im = tflib.run(self._images_expr)
# get current mask
mask = self.get_mask()
# adjust range of images to [0,1] before data-consistency
image_adjusted = misc.adjust_dynamic_range(gen_im[0,0,:,:], [np.min(gen_im[0,0,:,:]), np.max(gen_im[0,0,:,:])], [0,1])
target_images_ = self.target_images_initial.copy()
target_images_ = (target_images_ + 1) / 2
kspace__ = self.fft2c_np(image_adjusted)
target_images_var_complex = np.complex64(target_images_[0,0,:,:])
full_kspace_org_image = self.fft2c_np(target_images_var_complex)
# apply data-consistency
kspace__[mask>0] = full_kspace_org_image[mask>0]
images_ = np.float32(np.abs(self.ifft2c_np(kspace__)))
images_[images_>1]=1
# make non-brain regions zero (not necessary)
images_[:,0:56] = 0
images_[:,200:256] = 0
return images_[np.newaxis][np.newaxis]
|
{"hexsha": "e57e046a70235128910704cebf19fb722e4b4469", "size": 11465, "ext": "py", "lang": "Python", "max_stars_repo_path": "reconstruction_single_coil.py", "max_stars_repo_name": "icon-lab/SLATER", "max_stars_repo_head_hexsha": "4ec15a16f9cc204150c2cc48c4c3e60de2b7cbfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-08-11T18:27:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T07:48:46.000Z", "max_issues_repo_path": "reconstruction_single_coil.py", "max_issues_repo_name": "icon-lab/SLATER", "max_issues_repo_head_hexsha": "4ec15a16f9cc204150c2cc48c4c3e60de2b7cbfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reconstruction_single_coil.py", "max_forks_repo_name": "icon-lab/SLATER", "max_forks_repo_head_hexsha": "4ec15a16f9cc204150c2cc48c4c3e60de2b7cbfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2022-02-17T03:08:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T09:42:54.000Z", "avg_line_length": 46.0441767068, "max_line_length": 174, "alphanum_fraction": 0.5558656782, "include": true, "reason": "import numpy", "num_tokens": 2473}
|
import time
from pathlib import Path
from collections import deque
from typing import Optional
import numpy as np
from lib.opengl.core.base import *
from lib.opengl import *
from lib.opengl.postproc import PostProcNode
from lib.gen.automaton import ClassicAutomaton
ASSET_PATH = Path(__file__).resolve().parent.parent.parent / "assets"
def create_render_settings() -> RenderSettings:
return RenderSettings(
640, 400,
#mag_filter=gl.GL_NEAREST,
)
class WangEdge2:
TOP = 1
RIGHT = 2
BOTTOM = 4
LEFT = 8
STRINGS = {
TOP: "T",
RIGHT: "R",
BOTTOM: "B",
LEFT: "L",
}
# (y, x)
OFFSET = {
TOP: (-1, 0),
RIGHT: (0, 1),
BOTTOM: (1, 0),
LEFT: (0, -1),
}
IDX_TO_TILE = {
0: 0,
TOP: 4,
RIGHT: 1,
BOTTOM: 12,
LEFT: 3,
TOP | RIGHT: 5,
TOP | LEFT: 7,
BOTTOM | RIGHT: 13,
BOTTOM | LEFT: 15,
TOP | BOTTOM: 8,
LEFT | RIGHT: 2,
LEFT | BOTTOM | RIGHT: 14,
LEFT | TOP | RIGHT: 6,
TOP | BOTTOM | RIGHT: 9,
TOP | BOTTOM | LEFT: 11,
TOP | RIGHT | BOTTOM | LEFT: 10,
}
@classmethod
def tile_idx_to_string(cls, tile: int) -> str:
s = []
for key, name in cls.STRINGS.items():
if tile & key:
s.append(name)
return ",".join(s)
@classmethod
def get_tile_map(cls, map: np.ndarray) -> np.ndarray:
h, w = map.shape
tmap = np.ndarray(map.shape, dtype="int32")
tmap.fill(cls.IDX_TO_TILE[0])
for y in range(h):
for x in range(w):
if map[y][x]:
tile_idx = cls.TOP | cls.RIGHT | cls.BOTTOM | cls.LEFT
else:
tile_idx = 0
for key, offset in cls.OFFSET.items():
my = y + offset[0]
mx = x + offset[1]
if my >= h:
my = h - my
if mx >= w:
mx = w - mx
tile_idx |= key * int(map[my][mx])
# print(x, y, cls.tile_idx_to_string(tile_idx))
if tile_idx in cls.IDX_TO_TILE:
tmap[y][x] = cls.IDX_TO_TILE[tile_idx]
return tmap
class Map:
def __init__(
self,
width: int,
height: int,
preset: Optional[dict] = None,
):
preset = dict() if preset is None else preset
self.width = width
self.height = height
self._preset = preset
self.automaton = ClassicAutomaton(
width=self.width,
height=self.height,
born=preset.get("born") or {1, 2, 3},
survive=preset.get("survive") or {6},
)
#self.binary_map: np.ndarray = np.zeros([self.height, self.width])
@property
def binary_map(self) -> np.ndarray:
return self.automaton.cells
def tile_map(self) -> np.ndarray:
return WangEdge2.get_tile_map(self.binary_map)
def init_random(self):
self.automaton.init_random(
probability=self._preset.get("probability") or .3,
seed=self._preset.get("seed") or 23,
)
def step(self, count: int = 1):
for i in range(count):
self.automaton.step()
if False:
# smoothing the map
a.born = set()
a.survive = {3, 4, 5, 6, 7, 8}
for i in range(5):
a.step()
class TiledMapNode(PostProcNode):
def __init__(self, map: Map, name: str = "tiled"):
super().__init__(name)
self.map = map
self.map_texture = Texture2D()
self.last_step_time = 0
self.queue = deque()
#self.map_thread = Thread(target=self._map_thread_loop)
#self.map_thread.start()
def get_code(self):
return """
#line 160
const ivec2 tile_size = ivec2(32, 32);
const ivec2 tile_map_size = ivec2(4, 4);
vec2 rotate(in vec2 v, in float degree) {
float sa = sin(degree), ca = cos(degree);
return vec2(
v.x * ca - v.y * sa,
v.x * sa + v.y * ca
);
}
//vec4 tile_texture(int tile_idx,
void mainImage(out vec4 fragColor, in vec2 fragCoord, in vec2 texCoord) {
vec2 uv = (fragCoord / u_resolution.y);
uv.x -= .5 * u_resolution.y / u_resolution.x;
vec2 map_pos_f = uv;
map_pos_f = rotate(map_pos_f - .5, sin(u_time)*0.02) + .5;
map_pos_f *= 10. + 5. * sin(u_time/3.);
map_pos_f.y -= u_time * .9;
ivec2 map_pos = ivec2(map_pos_f);
map_pos.y = 20 - map_pos.y;
ivec4 map = ivec4(texelFetch(u_tex4, map_pos, 0));
vec2 tile_pos = fract(map_pos_f);
// when using bilinear mag filter, this is needed
//tile_pos = tile_pos * (float(tile_size - 1.) + .5) / float(tile_size);
//int tile_idx = int(map_pos.y + map_pos.x) % (tile_map_size.x * tile_map_size.y);
int tile_idx = map.x;
tile_pos += vec2(tile_idx % tile_map_size.x, (tile_idx / tile_map_size.x));
fragColor = texture(u_tex1, tile_pos / tile_map_size);
//fragColor = texture(u_tex2, uv);
if (uv.x < 0. || uv.x > 1. || uv.y < 0. || uv.y > 1.)
fragColor.xyz *= 0.1;
}
"""
def num_multi_sample(self) -> int:
return 32
def has_depth_output(self) -> bool:
return False
def create(self, render_settings: RenderSettings):
super().create(render_settings)
self.map.step(100)
self.map_texture.create()
self.map_texture.bind()
self._upload_map_tex()
def release(self):
super().release()
self.map_texture.release()
def render(self, rs: RenderSettings, pass_num: int):
self.map_texture.set_active_texture(3)
self.map_texture.bind()
if self.queue:
self._upload_map_tex(self.queue.pop())
#if rs.time - self.last_step_time > 1.:
# self.last_step_time = rs.time
# self.map.step(2)
# self._upload_map_tex()
self.map_texture.set_parameter(gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
super().render(rs, pass_num)
def _upload_map_tex(self, float_array: Optional[np.ndarray] = None):
if float_array is None:
float_array = self.map.tile_map().astype("float32")
self.map_texture.upload_numpy(
float_array,
width=self.map.width, input_format=gl.GL_RED, gpu_format=gl.GL_R32F,
)
def _map_thread_loop(self):
while True:
time.sleep(1)
self.map.step(2)
#self._upload_map_tex()
self.queue.append(self.map.tile_map().astype("float32"))
def create_render_graph():
graph = RenderGraph()
tile_tex = graph.add_node(Texture2DNode(
ASSET_PATH /
"w2e_curvy.png"
#"cr31" / "wang2e.png"
#"cr31" / "border.png"
#"cr31" / "quad.png"
#"cr31" / "octal.png"
#"cr31" / "pipe1.png"
#"cr31" / "mininicular.png"
))
map = Map(32, 32)
map.init_random()
print(map.tile_map())
renderer = graph.add_node(TiledMapNode(map))
graph.connect(tile_tex, 0, renderer, mag_filter=gl.GL_NEAREST)
return graph
if __name__ == "__main__":
map = np.array([
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
], dtype=int)
print(map, "\n")
print(WangEdge2.get_tile_map(map))
#print(np.convolve(map.map.flatten(), conv_mask.flatten()).reshape([5, 5]))
|
{"hexsha": "196dfe73051a92968f5d3394c4ee2d5dacc655cd", "size": 8105, "ext": "py", "lang": "Python", "max_stars_repo_path": "sketches/graphs/tiled.py", "max_stars_repo_name": "defgsus/thegame", "max_stars_repo_head_hexsha": "38a627d9108f1418b94b08831fd640dd87fbba83", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-05T11:49:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-05T11:49:26.000Z", "max_issues_repo_path": "sketches/graphs/tiled.py", "max_issues_repo_name": "defgsus/thegame", "max_issues_repo_head_hexsha": "38a627d9108f1418b94b08831fd640dd87fbba83", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sketches/graphs/tiled.py", "max_forks_repo_name": "defgsus/thegame", "max_forks_repo_head_hexsha": "38a627d9108f1418b94b08831fd640dd87fbba83", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6395759717, "max_line_length": 94, "alphanum_fraction": 0.5143738433, "include": true, "reason": "import numpy", "num_tokens": 2162}
|
from numpy.random import uniform, seed
from sys import argv, stdout, stderr
# Process arguments
if len(argv) < 3:
stderr.write("Usage: {} Q_LOW Q_HIGH [SEED_FILE]\n"
"Sample from a uniform distribution with "
"lower bound Q_LOW and upper bound Q_HIGH\n"
"If given SEED_FILE, read seed from file".format(argv[0]));
exit(1)
q_low = float(argv[1])
q_high = float(argv[2])
if len(argv) == 4:
# Open seed file
try:
seed_file = open(argv[3])
current_seed = int(seed_file.read())
seed_file.close()
except:
stderr.write("Error: could not read seed from file {}\n".format(argv[3]))
exit(1)
# Seed random number generator
seed(current_seed)
# Increment seed in seed file
seed_file = open(argv[3], 'w')
seed_file.write(str(current_seed + 1))
seed_file.close()
# Sample from uniform distribution
q_sampled = uniform(low=q_low, high=q_high)
# Print sampled parameter
stdout.write("{}\n".format(q_sampled))
|
{"hexsha": "5595ac75a65cf3fda17be80a3ecd79206f599ce3", "size": 1022, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/seed/stateful-prior-sampler.py", "max_stars_repo_name": "ThomasPak/pakman", "max_stars_repo_head_hexsha": "8d74f3b7162d04a1c72fbda9517ed19063f1a246", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-11-09T15:07:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-06T14:46:20.000Z", "max_issues_repo_path": "tests/seed/stateful-prior-sampler.py", "max_issues_repo_name": "ThomasPak/pakman", "max_issues_repo_head_hexsha": "8d74f3b7162d04a1c72fbda9517ed19063f1a246", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-16T18:12:52.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-31T19:00:09.000Z", "max_forks_repo_path": "tests/seed/stateful-prior-sampler.py", "max_forks_repo_name": "ThomasPak/pakman", "max_forks_repo_head_hexsha": "8d74f3b7162d04a1c72fbda9517ed19063f1a246", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-25T06:11:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-12T19:02:32.000Z", "avg_line_length": 25.55, "max_line_length": 81, "alphanum_fraction": 0.6438356164, "include": true, "reason": "from numpy", "num_tokens": 259}
|
'''
Module with functions for converting audio files across several formats
'''
# imports of built-in packages
import os
import sys
import csv
import re
# imports from package modules
from .common_file_ops import path_splitter, run_exec, img_fmt_converter
from .config import read_config
## get paths to required executables from config.json
exec_paths = read_config()
TIMIDITY_PATH = exec_paths["TIMIDITY_PATH"]
WAON_PATH = exec_paths["WAON_PATH"]
ARSS_PATH = exec_paths["ARSS_PATH"]
PRETTY_MIDI_EXAMPLES_PATH = exec_paths["PRETTY_MIDI_EXAMPLES_PATH"]
sys.path.append(PRETTY_MIDI_EXAMPLES_PATH)
# imports of external packages
## imports for array manipulation, image processing, audio output
import numpy as np
from PIL import Image
import scipy.io.wavfile
## imports for handling midi files
import pretty_midi
from pretty_midi_examples import reverse_pianoroll,chiptunes
import py_midicsv # package with functions used to convert between midi & csv
# ------------------------------------------------------------------------------------------------- #
# Functions for encoding/decoding integer values [used for transcribing piano roll note & velocity info to text files]
# For compatability with the functions performing the transcription, custom encoding/decoding functions should have a signature like hex2 & operate on two modes as seen in hex2
def hex2(n,direction):
"""Converts an integer to its 2-digit hexadecimal form (string) or vice versa.
Parameters
----------
n : int/str
a positive integer or 2-digit hexadecimal string.
direction : str
direction of conversion: int to str, 'reverse': str to int.
Returns
-------
out : str/int
hexadecimal string of input int (or) base-10 int of input hexadecimal string.
"""
if direction == 'forward':
out = format(n,'02x').upper()
elif direction == 'reverse':
out = int(n,16)
return out
# ------------------------------------------------------------------------------------------------- #
# Utility functions
def instrument_to_insCode(instrument):
"""Given a pretty_midi.Instrument object, returns a concise string code representing its program number & its is_drum parameter.
Parameters
----------
instrument : pretty_midi.Instrument
object of pretty_midi.Instrument class.
Returns
-------
code : str
concatenation of MIDI program code (as decimal) and ("d" if instrument.is_drum is True else "n").
"""
code = str(instrument.program)
if instrument.is_drum:
code += "d"
else:
code += "n"
return code
def check_insCode(insCode,print_res=False):
"""Checks if a given instrument code string is valid, optionally prints the result of the check.
Parameters
----------
insCode : str
string to check if it represents a valid instrument code.
print_res : bool, optional
prints the result of checking the input string, by default False.
Returns
-------
res : bool
whether or not the input insCode was valid.
"""
prog,drum = int(insCode[:-1]),insCode[-1:]
insName = ''
try:
# last character must be 'd' or 'n', if so, attempt to get the program name
if drum == 'n':
insName = pretty_midi.program_to_instrument_name(prog)
elif drum == 'd':
insName = pretty_midi.note_number_to_drum_name(prog+35)
except ValueError:
insName = '' # if the above fails, it means insCode is invalid.
res_msg = "insCode is invalid and doesn't refer to any instrument."
res = False
if insName:
res = True
res_msg = "insCode {} refers to the instrument {}.".format(insCode,insName)
if print_res:
print(res_msg)
return res
def insCode_to_instrument(insCode):
"""Given the string code for an instrument, returns the corresponding pretty_midi.Instrument object.
Parameters
----------
insCode : str
string which encodes the program number and is_drum bool of the Instrument.
Returns
-------
instrument : pretty_midi.Instrument
pretty_midi.Instrument object corresponding to the input insCode.
"""
prog,drum = int(insCode[:-1]),insCode[-1:]
name_str = 'ins_{}'.format(prog)
is_drum = False
if drum == 'd':
is_drum = True
name_str += '-drum'
instrument = pretty_midi.Instrument(prog,is_drum=is_drum,name=name_str)
return instrument
def drum_ins_to_roll(drum_ins,fs=25):
"""Given a drum Instrument (pretty_midi.Instrument object), return a 2D array: a drum roll containing velocities of each note for all timesteps. Does not process pitch bends and control changes.
Parameters
----------
drum_ins : pretty_midi.Instrument
Instrument object with is_drum attribute set to True (represents MIDI drum instrument).
fs : int, optional
Sampling frequency for drum roll columns (each column is separated by 1/fs seconds), by default 25.
Returns
-------
drum_roll : np.ndarray, shape=(128,timesteps)
2D piano roll of MIDI data for the input drum instrument.
"""
if drum_ins.notes == []:
return np.array([[]]*128)
end_time = drum_ins.get_end_time()
drum_roll = np.zeros((128, int(fs*end_time)))
# Add up drum roll matrix, note-by-note
for note in drum_ins.notes:
# Should interpolate
drum_roll[note.pitch,int(note.start*fs):int(note.end*fs)] += note.velocity
return drum_roll
# converting drum_roll back to instrument is covered in rollArr3D_to_PrettyMIDI
def rollArr2D_to_Img(roll_array,brighten,compress_colors):
"""Converts a 2D piano roll of MIDI data to a PIL Image object.
Parameters
----------
roll_array : np.ndarray, shape=(notes,timesteps)
2D Piano roll of MIDI data.
brighten : bool
whether or not to multiply pixel brightnesses by 2, i.e., bring them from the range (0,127) to (0,255).
compress_colors : bool
whether or not to compress the raw 2D np.ndarray into the 3 color channels of the output image:
True = 3 columns of the piano roll are represented by 1 column of pixels using the value of each column for the corresponding R,G,B channels.
False = 3 columns of the piano roll are represented by 3 columns of pixels using the same value for the R,G,B channels.
Returns
-------
roll_img : PIL.Image
An Image object of the input roll_array converted to RGB format.
"""
if brighten:
roll_array *= 2
if compress_colors:
# pad the roll_array with empty columns (timesteps).
# the resulting columns can then be evenly divided into groups of 3, allowing them to fit in the R,G,B channels of the output image
if (roll_array.shape[1] % 3) != 0:
pad_cols = 3 - (roll_array.shape[1] % 3)
padding = np.zeros((roll_array.shape[0],pad_cols))
roll_array = np.hstack((roll_array,padding))
roll_array = roll_array.reshape((roll_array.shape[0],roll_array.shape[1]//3,3)) # reshape into 3D array
roll_array = roll_array.astype(np.uint8)
roll_array = np.ascontiguousarray(roll_array)
roll_img = Image.fromarray(roll_array,mode='RGB').convert('RGB')
else:
roll_img = Image.fromarray(roll_array).convert('RGB')
return roll_img
def rollArr3D_to_PrettyMIDI(roll_array,ins_codes,fs=25):
"""Converts a 3D piano roll of MIDI data (combination of 2D piano rolls of multiple instruments) to a PrettyMIDI object using input instrument codes.
Expands on the function `piano_roll_to_pretty_midi` from https://github.com/craffel/pretty-midi/blob/master/examples/reverse_pianoroll.py
Parameters
----------
roll_array : np.ndarray, shape=(notes,timesteps,instruments)
3D piano roll of MIDI data (2D piano rolls of multiple instruments padded to same shape and stacked along 'timesteps' axis).
ins_codes : tuple/list
MIDI program codes for the instruments to be used to generate the .mid file. See the function instrument_to_insCode for details.
Length of ins_codes must match roll_array.shape[2].
fs : int, optional
Sampling frequency for piano roll columns (each column is separated by 1/fs seconds), by default 25.
Returns
-------
pm : pretty_midi.PrettyMIDI
PrettyMIDI object with all instruments and corresponding notes from the input roll_array.
"""
notes,frames,num_ins = roll_array.shape
pm = pretty_midi.PrettyMIDI()
instruments_used = [insCode_to_instrument(c) for c in ins_codes] # create list of Instrument objects from ins_codes
# pad 1 column of zeros for every instrument so we can acknowledge inital and ending events
roll_array = np.pad(roll_array, [(0, 0), (1, 1), (0, 0)], 'constant')
# use changes in velocities to find note on / note off events
velocity_changes = np.nonzero(np.diff(roll_array,axis=1))
# keep track on velocities and note on times for each instrument
prev_velocities = [np.zeros(notes, dtype=int) for i in range(num_ins)]
note_on_time = [np.zeros(notes) for i in range(num_ins)]
for note,time,ins in zip(*velocity_changes):
velocity = roll_array[note,time+1,ins]
time = time / fs
if velocity > 0:
if prev_velocities[ins][note] == 0:
note_on_time[ins][note] = time
prev_velocities[ins][note] = velocity
else:
pm_note = pretty_midi.Note(
velocity=prev_velocities[ins][note],
pitch=note,
start=note_on_time[ins][note],
end=time)
instruments_used[ins].notes.append(pm_note)
prev_velocities[ins][note] = 0
pm.instruments = instruments_used
return pm
# ------------------------------------------------------------------------------------------------- #
## Functions to perform conversions on individual files
def wav_to_midi(source_path,dest_path):
"""Converts a .wav file to .mid file using WaoN - http://waon.sourceforge.net
Parameters
----------
source_path :
path to input .wav file.
dest_path : str/os.path
path to output .mid file or to directory (if directory: use name of source .wav file for output .mid file).
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .mid file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".mid")
# prep cmd list for subprocess.Popen()
waon_options = ['-i', source_path, '-o', dest_path]
res = run_exec(WAON_PATH,waon_options)
timidity_options = '-Ow -o'
def midi_to_wav(source_path,dest_path,options=timidity_options):
"""Converts a .mid file to a .wav file using Timidity++ (timidity.exe) - https://sourceforge.net/projects/timidity/
Parameters
----------
source_path : str/os.path
path to input .mid file.
dest_path : str/os.path
path to .wav file or to directory (if directory: use name of source .mid file for output .wav file).
options : str, optional
space-separated string of command line options passed to timidity: meant for the output file, by default '-Ow -o'.
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .wav file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".wav")
# prep cmd list for subprocess.Popen()
full_options = [source_path]+options.split()+[dest_path]
run_exec(TIMIDITY_PATH,full_options)
def midi_to_wav_prettyMIDI(source_path,dest_path,fs=44100,drum_vol=0.25,non_drum_vol=1.0):
"""Converts a .mid file to a .wav file using the synthesize functions in the pretty_midi package.
Drum tracks are synthesized using the function `synthesize_drum_instrument` in pretty_midi/examples/chiptunes.py
Parameters
----------
source_path : str/os.path
path to input .mid file.
dest_path : str/os.path
path to output .wav file or to directory (if directory: use name of source .mid file for output .wav file).
fs : int, optional
Sample rate for .wav file, by default 44100.
drum_vol : float, optional
factor by which to multiply the drum track waveform amplitudes (a kind of `volume` control), by default 0.25.
non_drum_vol: float, optional
factor by which to multiply the NON-drum track waveform amplitudes (a kind of `volume` control), by default 1.0.
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .wav file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".wav")
midi_container = pretty_midi.PrettyMIDI(source_path)
waveforms = []
# get the waveforms of all instruments in the midi file
for ins in midi_container.instruments:
if ins.is_drum:
# if instrument is a drum, get its waveform from chiptunes
wave_f = chiptunes.synthesize_drum_instrument(ins,fs=fs)
# adjust the amplitude of its waveform by drum_vol
wave_f *= drum_vol
else:
# otherwise, get its waveform by calling its synthesize function (which will return an empty waveform if it is a drum instrument)
wave_f = ins.synthesize(fs=fs)
# adjust the amplitude of its waveform by non_drum_vol
wave_f *= non_drum_vol
waveforms.append(wave_f)
# Allocate output waveform, with #sample = max length of all waveforms
synthesized = np.zeros(np.max([w.shape[0] for w in waveforms]))
# Sum all waveforms in
for waveform in waveforms:
synthesized[:waveform.shape[0]] += waveform
# Normalize
synthesized /= np.abs(synthesized).max()
synthesized = synthesized.astype(np.float32)
# Finally write to .wav file
scipy.io.wavfile.write(dest_path,fs,synthesized)
def midi_to_roll(source_path,dest_path,out_type,fs=25,brighten=True,compress_colors=True):
"""Converts a .mid file to piano roll(s) using pretty_midi.get_piano_roll (https://craffel.github.io/pretty-midi/) and/or drum_ins_to_roll functions.
Piano roll(s) are then saved to image files or returned as arrays.
Parameters
----------
source_path : str/os.path
path to input .mid file.
dest_path : str/os.path
used when returning images: path to .png file [suffixed with instrument code] or to directory (if directory: use name of source .mid file as prefix for output .png files).
out_type : str
controls how the piano roll(s) is returned, one of {'array_r3', 'array_r2', 'sep_roll, 'one_roll'}:
'array_r3' = return as 1 raw 3D np.ndarray (instrument piano rolls merged into one) along with a tuple containing instrument codes to calling function [dest_path unused].
'array_r2' = return as 1 raw 2D np.ndarray made by using pretty_midi.get_piano_roll on the .mid file [dest_path unused].
(transcript all tracks in .mid file (except drum tracks) using default instrument: Acoustic Grand Piano -insCode="0n").
'sep_roll' = process raw 2D np.ndarrays for each instrument into separate image files using rollArr2D_to_Img and write them to dest_path.
'one_roll' = process the raw 2D np.ndarray from 'array_r2' mode into one image file using rollArr2D_to_Img and write it to dest_path.
fs : int, optional
Sampling frequency: number of columns in piano roll/image per second of audio (each column is separated by 1/fs seconds), by default 25.
brighten : bool, optional
used when returning images: see rollArr2D_to_Img for details, by default True.
compress_colors : bool, optional
used when returning images: see rollArr2D_to_Img for details, by default True.
Returns
-------
'array_r3' mode:
(stacked_roll_array,ins_codes) : (np.ndarray of shape=(notes,timesteps,instruments), tuple)
tuple of: 3D piano roll made by padding & stacking 2D Instrument piano rolls from .mid file along timesteps axis and corresponding instrument code strings.
'array_r2' mode:
one_ins_roll: np.ndarray, shape=(notes,timesteps)
2D piano roll made by using pretty_midi.get_piano_roll on the .mid file.
'sep_roll'/'one_roll' mode:
num_rolls : int
Number of piano rolls created and saved to disk.
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .png file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".png")
try:
midi_container = pretty_midi.PrettyMIDI(source_path)
except ValueError:
# print("Unable to parse \""+source_path+"\" into .png roll")
return -1
ins_codes = []
ins_rolls = []
# get the instrument code and the piano roll for all instruments in the midi file
for ins in midi_container.instruments:
code = instrument_to_insCode(ins)
roll = []
if ins.is_drum:
# if instrument is a drum, use custom function to get the piano roll
roll = drum_ins_to_roll(ins,fs=fs)
else:
# otherwise use the pretty_midi function on the instrument
roll = ins.get_piano_roll(fs=fs)
ins_codes.append(code)
ins_rolls.append(roll)
# get a separate roll using pretty_midi function on the entire midi file; flattening across instruments while ignoring drums
one_ins_roll = midi_container.get_piano_roll(fs=fs)
if out_type == 'array_r3':
max_len = max([r.shape[1] for r in ins_rolls])
padded_rolls = []
for i in range(len(ins_rolls)):
r = ins_rolls[i]
padded_r = np.pad(r,[(0,0),(0,max_len-r.shape[1])]) # pad all instrument rolls to the same shape (timesteps)
padded_rolls.append(padded_r)
stacked_roll_array = np.stack(padded_rolls,axis=-1) # stack padded rolls into a single 3D piano roll
return (stacked_roll_array,ins_codes)
elif out_type == 'sep_roll':
dest_splits = path_splitter(dest_path)
for i in range(len(ins_rolls)):
roll_array,roll_code = ins_rolls[i],ins_codes[i]
save_path = os.path.join(dest_splits['directory'],dest_splits['name']+"-ins_"+roll_code+".png")
roll_img = rollArr2D_to_Img(roll_array,brighten=brighten,compress_colors=compress_colors) # convert each roll to a PIL Image..
roll_img.save(save_path) # and save it to a separate file (name suffixed with the instrument code)
return len(ins_rolls)
else:
# if not creating separate rolls for instruments, create a single roll for entire midi track with instrument prog=0 (Piano)
if out_type == 'one_roll':
roll_img = rollArr2D_to_Img(one_ins_roll,brighten=brighten,compress_colors=compress_colors)
roll_img.save(dest_path)
return 1
elif out_type == 'array_r2':
return one_ins_roll
def rollPic_slicer(source_path,dest_folder,fs=25,compress_colors=True,slice_dur=5,slice_suffix="-sp{:03d}"):
"""Cuts up a piano roll image into slices (images) [vertical cuts] that represent a fixed duration of .mid file audio.
Parameters
----------
source_path : str/os.path
path to piano roll image.
dest_folder : str/os.path
path to directory where roll image slices are to be stored.
fs : int, optional
Sampling frequency used to generate the original roll image, by default 25.
compress_colors : bool, optional
whether compression across color channels was performed to generate the original roll image, by default True.
slice_dur : int, optional
maximum duration (in seconds) of each slice, by default 5.
slice_suffix : str, optional
formatting string used to name the slice image files, by default "-sp{:03d}".
Returns
-------
im_counter : int
Number of image slices created and saved to disk.
"""
src_splits = path_splitter(source_path)
roll_img = Image.open(source_path)
roll_img.load() # load the image from source_path as a PIL Image
W,H = roll_img.size
sl = slice_dur*fs # number of pixels that represent `slice_dur` seconds of audio
if compress_colors:
sl //= 3 # if color compression was used, 1/3-rd of the pixels represent `slice_dur` seconds of audio
x = 0
im_counter = 0
while x < W:
if x+sl > W:
break
dest_path = os.path.join(dest_folder,src_splits['name']+slice_suffix.format(im_counter)+".png")
crop_area = (x,0,x+sl,H) # tuple of coordinates for crop area
chunk = roll_img.crop(crop_area) # crop the image
chunk.load()
chunk.save(dest_path) # save the cropped image to disk
im_counter += 1
x += sl
return im_counter
def roll_to_midi(source_path,dest_path,input_array=None,ins_codes=('0n',),fs=25,compress_colors=True):
"""Converts a piano roll (image/raw array) into a .mid file. Internally transforms 2D np.ndarrays & image files to 3D np.ndarrays and converts them using rollArr3D_to_PrettyMIDI function.
Parameters
----------
source_path : str/os.path
used for conversion from image: path to piano roll image.
dest_path : str/os.path
path to .mid file or to directory (if directory: use name of source .png file appended with '-resynth' for output .mid file).
input_array : np.ndarray of shape=(notes,timesteps) or (notes,timesteps,instruments) or None, optional
if not None, will use as roll array instead of reading it from image at source_path, by default None.
ins_codes : tuple/list, optional
String codes for the instruments used to generate the .mid file (see instrument_to_insCode for details), by default ('0n',) (setting for 2D np.ndarray & image rolls).
fs : int, optional
Sampling frequency: number of columns in input roll image/array per second of audio (each column is separated by 1/fs seconds), by default 25.
compress_colors : bool, optional
used for conversion from image: whether input roll image should be treated as if compression across color channels was performed to generate it, by default True.
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .mid file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+"-resynth.mid")
roll_raw, roll_to_conv = [],[]
if input_array is not None:
roll_raw = np.array(input_array,dtype='float32')
compress_colors=None
else:
roll_img = Image.open(source_path)
roll_img.load()
roll_raw = np.asarray(roll_img,dtype='float32')
# piano roll image is for only one instrument so it is first converted to 2D array of shape=(notes,timesteps)
if compress_colors is True:
roll_to_conv = roll_raw.reshape((roll_raw.shape[0],int(roll_raw.shape[1]*3))) # unpack the color channel values into the 2nd dimension if image treated as compressed
elif compress_colors is False:
roll_to_conv = np.mean(roll_raw,axis=2) # take the mean color across the 3 color channels if image treated as un-compressed
elif compress_colors is None:
roll_to_conv = roll_raw # use input_array as-is if it was input
# for the conversion back to MIDI, roll_to_conv must be 3D array of shape=(notes,timesteps,instruments)
if len(roll_to_conv.shape) < 3:
roll_to_conv = roll_to_conv.reshape(roll_to_conv.shape+(1,)) # if originally 2D, number of instruments = 1
def lim_val(val):
if val > 127:
val -= (val % 127) * 128
return val
roll_to_conv = np.vectorize(lim_val)(roll_to_conv) # values in the piano roll are limited to the range [0,127]
roll_midi = rollArr3D_to_PrettyMIDI(roll_to_conv, ins_codes, fs=fs) # pass the 3D piano roll and the input ins_codes to function to get pretty_midi.PrettyMIDI object
roll_midi.write(dest_path) # write the contents of the PrettyMIDI object to the .mid file specified in dest_path
def midi_to_rollTxt(source_path,dest_path,all_ins=True,fs=25,enc_fn=hex2):
"""Converts a .mid file to a .txt file containing an encoded form of its piano roll: notes+velocities to be played for each instrument per timestep.
Internally uses midi_to_roll function.
If there are no notes to be played for an instrument for the timestep, a single null note is shown: 00-00.
Parameters
----------
source_path : str/os.path
path to input .mid file.
dest_path : str/os.path
path to .txt file or to directory (if directory: use name of source .mid file for output .txt file).
all_ins : bool, optional, by default True
True- transcribe notes using their corresponding instruments specified in the .mid file:
Template for each line of output .txt:
<Instrument_0_code>: <Note>-<Velo> <Note>-<Velo> ...<TAB><Instrument_1_code>: <Note>-<Velo> <Note>-<Velo> ...<TAB><NEWLINE>
False: transcribe notes in the .mid file without instrument information, the output txt file can only be read back with 1 instrument, set by default to Acoustic Grand Piano - instrument program 0.
Template for each line of output .txt:
<Note>-<Velo> <Note>-<Velo> <Note>-<Velo> <Note>-<Velo> ...<NEWLINE>
fs : int, optional
Sampling frequency: Number of lines of encoded text to generate per second of audio (each line is separated by 1/fs seconds), by default 25.
enc_fn : function, optional
specifies how notes, velocities, and instrument codes are to be encoded [see definition of hex2 function for an example], by default hex2.
Returns
-------
num_timesteps : int
Number of timesteps (seconds*fs) written to the .txt file.
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .txt file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".txt")
print("Opened .mid file: ",source_path)
txt_out = open(dest_path,'a+')
print("Writing to .txt file at: "+dest_path)
# first get roll_array from midi
num_timesteps, num_instruments = 0, 1
try:
if not all_ins:
## single instrument mode - roll to be transcribed omitting instrument information
roll_array = midi_to_roll(source_path,"",'array_r2',fs=fs)
roll_array = np.reshape(roll_array,roll_array.shape+(1,))
else:
## all instruments mode
roll_array,ins_codes = midi_to_roll(source_path,"",'array_r3',fs=fs)
except Exception:
print("failed to get roll_array")
return 0
num_timesteps = roll_array.shape[1] # get number of timesteps/columns from 2nd axis of roll_array
num_instruments = roll_array.shape[2] # get number of instruments from 3rd axis of roll_array
if all_ins:
# ins_code is of the form: <number from 0-127><n/d based on is_drum>
# encode the number part of the ins_codes using the encoding function enc_fn
encoded_ins_codes = [enc_fn(int(c[:-1]),"forward")+c[-1] for c in ins_codes]
for t in range(num_timesteps):
timestep = roll_array[:,t,:]
timestep_str = ""
for i in range(num_instruments): # will loop only once if all_ins is False
instrument = timestep[:,i]
non_zero_notes = list(np.nonzero(instrument)[0]) #indices of notes with non-zero velocity values
ins_str = encoded_ins_codes[i]+": " if all_ins else "" # include instrument code only if all_ins is True
if len(non_zero_notes) == 0:
# if no notes for instrument, add a null note: 00-00
ins_str += enc_fn(int(0),'forward')+'-'+enc_fn(int(0),'forward')+" "
else:
# otherwise, add note-velo strings separated by spaces
for n in non_zero_notes:
ins_str += enc_fn(int(n),'forward')+'-'+enc_fn(int(instrument[n]),'forward')+" "
ins_str += '\t' if all_ins else "" # tabs used to separate instruments only if all_ins is True
timestep_str += ins_str
timestep_str += '\n' # add newline after processing the timestep (column of piano roll)
txt_out.write(timestep_str)
# after writing all events to the file, write a newline and close the text object.
txt_out.write("\n")
txt_out.close()
return num_timesteps
def rollTxt_to_midi(source_path,dest_path,fs=25,dec_fn=hex2,logging=True):
"""Converts a .txt file containing an encoded form of a MIDI piano roll (notes+velocities, optionally instrument codes) to a .mid file.
Internally uses roll_to_midi function. Expects formatting as described in the function midi_to_rollTxt.
Parameters
----------
source_path : str/os.path
path to input .txt file.
dest_path : str/os.path
path to output .mid file or to directory (if directory: use name of source .txt file for output .mid file).
fs : int, optional
Sampling frequency: number of lines of encoded text to convert for producing 1 second of audio (each line is separated by 1/fs seconds), by default 25.
dec_fn : function, optional, by default hex2
function that specifies how the notes, velocities and instrument codes are to be decoded [see definition of hex2 for an example].
logging : bool, optional
whether or not to output statistics on the conversion process through print and to a log file, by default True.
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .mid file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".mid")
dest_splits = path_splitter(dest_path)
# try to read the text file
try:
txt_in = open(source_path,'r')
except IOError:
print("File at {} does not exist... Exiting program".format(source_path))
return None
# create regexes which will be used later to find instrument codes, notes & velocities from the lines of the .txt file
inst_regex, note_velo_regex = "", ""
if dec_fn == hex2:
# if decoding with hex2: notes, velocities, instrument codes (values in the range [0,127]) will be exactly 2 characters long
inst_regex = r".{2,2}[dn]:"
note_velo_regex = r".{2,2}-.{2,2}"
elif dec_fn == None:
# if no decoding required, the above values can be 1-3 characters long
inst_regex = r".{1,3}[dn]:"
note_velo_regex = r".{1,3}-.{1,3}"
inst_regex, note_velo_regex = re.compile(inst_regex), re.compile(note_velo_regex)
raw_lines = txt_in.readlines() # read the .txt file
instrument_rolls = {} # to store the piano rolls of the instruments identified in the .txt file
# variables used to log details of the conversion process
instrument_event_counts = {}
events_processed = 0
invalid_events = 0
for t in range(len(raw_lines)):
line = raw_lines[t]
inst_splits = [s for s in line.split('\t') if s != '\n' and s != ''] # expects events for each instrument to be separated by tabs
for inst_line in inst_splits:
inst_str = inst_regex.findall(inst_line)
note_velo_strs = note_velo_regex.findall(inst_line)
if not note_velo_strs: # if no valid notes are found, skip this line.
continue
inst_code = ""
try:
# if there are matches to the regex, take the first match as the inst_code (encoded) after excluding the ":" character
inst_code = inst_str[0][:-1]
# then decode the program number from the inst_code as needed
if dec_fn:
inst_code = str(dec_fn(inst_code[:-1],"reverse"))+inst_code[-1]
if not check_insCode(inst_code):
raise Exception
except Exception:
# If no regex matches, or can't decode matched inst_code or invalid inst_code:
# use a default instrument -> this will occur for text files generated with midi_to_rollTxt with the setting all_ins=False
inst_code = "0n"
# if the inst_code hasn't been seen before, create a 2D piano roll for it
if inst_code not in instrument_rolls.keys():
instrument_rolls[inst_code] = np.zeros((128,len(raw_lines)))
instrument_event_counts[inst_code] = 0
for n_v in note_velo_strs: # strs matching the regex are of the form: <Note>-<Velocity>
try:
n,v = n_v.split('-') # first separate note and velocity strings
# then decode the strings back to integers
if dec_fn:
# if decoding function is given, decode using it.
n,v = dec_fn(n,"reverse"), dec_fn(v,"reverse")
else:
# otherwise, attempt to directly convert the strings to base-10 integers
n,v = int(n), int(v)
if (not 0 <= n <= 127) or (not 0 <= v <= 127):
raise ValueError
except ValueError:
invalid_events += 1
else:
instrument_rolls[inst_code][n,t] = v # update the 2D piano roll for the instrument
# increment the logging variables
instrument_event_counts[inst_code] += 1
events_processed += 1
# collate instrument codes and their corresponding piano rolls into separate lists
instrument_codes = []
list_of_rolls = []
for code, roll in instrument_rolls.items():
instrument_codes.append(code)
list_of_rolls.append(roll)
# stack the 2D piano rolls of the instruments into a 3D piano roll
stacked_roll_array = np.stack(list_of_rolls,axis=-1)
# convert the 3D piano roll to a midi file through roll_to_midi
roll_to_midi(source_path,dest_path,input_array=stacked_roll_array,ins_codes=instrument_codes,fs=fs)
if logging: # Print logs and write them to a log file if necessary
note_count_logs = ["Instrument {}: {} events".format(code, event_count) for code,event_count in instrument_event_counts.items()]
log_strings = [
'-------------------------------------------------',
"Converted rollTxt {} to midi!".format(source_path),
'*************************************************',
"Transcribed {} events for {} instruments".format(events_processed,len(instrument_codes)),
"Ignored {} invalid events".format(invalid_events),
'-------------------------------------------------',
]
log_strings += note_count_logs
log_path = os.path.join(dest_splits['directory'],src_splits['name']+"-rollTxt_conv_log.txt")
log_file = open(log_path,'a+')
for line in log_strings:
print(line)
log_file.writelines(line+"\n" for line in log_strings) # write to log file in the same directory as the output midi file
log_file.close()
def midi_to_csv(source_path,dest_path,ret_csv_strings=False):
"""Converts a .mid file to a .csv file using py-midicsv - https://pypi.org/project/py-midicsv/
Parameters
----------
source_path : str/os.path
path to input .mid file.
dest_path : str/os.path
path to .csv file or to directory (if directory: use name of source .mid file for output .csv file).
ret_csv_strings : bool, optional
True: return list of csv formatted strings to calling function, False: write the list to a .csv file specified by dest_path, by default False.
Returns
-------
csv_str_list : list
list of csv formatted strings containing the instructions of the MIDI file; only returned if input ret_csv_strings is True.
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .csv file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".csv")
csv_str_list = py_midicsv.midi_to_csv(source_path)
if ret_csv_strings:
return csv_str_list
with open(dest_path, "w", newline="") as f:
writer = csv.writer(f)
writer.writerows(csv_str_list)
def csv_to_midi(source_path,dest_path):
"""Converts a .csv file to a .mid file using py-midicsv - https://pypi.org/project/py-midicsv/
Parameters
----------
source_path : str/os.path
path to input .csv file.
dest_path : str/os.path
path to .mid file or to directory (if directory: use name of source .csv file for output .mid file).
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .mid file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".mid")
midi_obj = py_midicsv.csv_to_midi(source_path)
with open(dest_path,"wb") as out_midi:
midi_writer = py_midicsv.FileWriter(out_midi)
midi_writer.write(midi_obj)
spec_analysis_options = '--quiet --analysis -min 27.5 -max 19912.127 --bpo 12 --pps 25 --brightness 1'
wav_sine_synth_options = '--quiet --sine -min 27.5 -max 19912.127 --pps 25 -r 44100 -f 16'
wav_noise_synth_options = '--quiet --noise -min 27.5 -max 19912.127 --pps 25 -r 44100 -f 16'
def wav_to_spectro(source_path,dest_path,options=spec_analysis_options,encode=True):
"""Converts a .wav file to a spectrogram (.png file) using ARSS - http://arss.sourceforge.net
Parameters
----------
source_path : str/os.path
path to input .wav file.
dest_path : str/os.path
path to output spectrogram (.png file) or to directory (if directory: use name of source .wav file for output .png file).
options : str, optional
space-separated string of command line options passed to ARSS: selects analysis mode, frequency range, beats per octave, pixels/sec, etc.,
by default spec_analysis_options = '--quiet --analysis -min 27.5 -max 19912.127 --bpo 12 --pps 25 --brightness 1'
encode : bool, optional
whether or not to re-encode the generated .png spectrogram as rgba - appends '-enc' to output file name, by default True
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path) # original path splits
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .png file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".png")
arss_options = ['-i', source_path, '-o', dest_path] + options.split()
res = run_exec(ARSS_PATH,arss_options)
if encode:
# if required to re-encode the png, will attempt to do so using ffmpeg.
dest_splits = path_splitter(dest_path) # take path splits again in case the path was modified earlier
enc_path = os.path.join(dest_splits['directory'],dest_splits['name']+"-enc.png")
try:
img_fmt_converter(dest_path,enc_path,"rgba")
except Exception:
print("Unable to re-encode the .png file...")
finally:
os.remove(dest_path)
def spectro_to_wav(source_path,dest_path,options=wav_noise_synth_options):
"""Converts a spectrogram/image (.png file) to a .wav file using ARSS - http://arss.sourceforge.net
Parameters
----------
source_path : str/os.path
path to input spectrogram/image (.png file)
dest_path : str/os.path
path to output .wav file or to directory (if directory: use name of source .png file for output .wav file)
options : str, optional
space-separated string of command line options passed to ARSS: selects synthesis mode (noise/sine), frequency range, beats per octave, pixels/sec, etc.,
by default wav_noise_synth_options = '--quiet --noise -min 27.5 -max 19912.127 --pps 25 -r 44100 -f 16'
"""
src_splits = path_splitter(source_path)
dest_splits = path_splitter(dest_path)
# convert .png to bitmap internally
inter_path = os.path.join(src_splits['directory'],src_splits['name']+"-24-bitmap.bmp")
img_fmt_converter(source_path,inter_path,"bgr24")
if dest_splits['extension'] == '':
# if dest_path points to a directory and not a .wav file
# default to using source file name for output file
dest_path = os.path.join(dest_path,src_splits['name']+".wav")
arss_options = ['-i', inter_path, '-o', dest_path] + options.split
res = run_exec(ARSS_PATH,arss_options)
os.remove(inter_path)
|
{"hexsha": "100e1fffa9f80482d02d912a4ceaa7e259bf9d1b", "size": 42286, "ext": "py", "lang": "Python", "max_stars_repo_path": "file_manipulators/audio_conv_ops.py", "max_stars_repo_name": "Retr0Metal98/file_manipulators", "max_stars_repo_head_hexsha": "fb9992c5e34910a3ddbb97ea88c3fe2e86477818", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "file_manipulators/audio_conv_ops.py", "max_issues_repo_name": "Retr0Metal98/file_manipulators", "max_issues_repo_head_hexsha": "fb9992c5e34910a3ddbb97ea88c3fe2e86477818", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-08-06T11:57:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-14T15:45:33.000Z", "max_forks_repo_path": "file_manipulators/audio_conv_ops.py", "max_forks_repo_name": "Retr0Metal98/file_manipulators", "max_forks_repo_head_hexsha": "fb9992c5e34910a3ddbb97ea88c3fe2e86477818", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4171240395, "max_line_length": 204, "alphanum_fraction": 0.660360403, "include": true, "reason": "import numpy,import scipy", "num_tokens": 10094}
|
"""
Integration operations.
union_poi_bank,
union_poi_bus_station,
union_poi_bar_restaurant,
union_poi_parks,
union_poi_police,
join_collective_areas,
join_with_pois,
join_with_pois_by_category,
join_with_events,
join_with_event_by_dist_and_time,
join_with_home_by_id,
merge_home_with_poi
"""
from __future__ import annotations
from collections import namedtuple
import numpy as np
from numpy import ndarray
from pandas import DataFrame, Timedelta
from pandas.core.series import Series
from pymove.preprocessing import filters
from pymove.utils.constants import (
ADDRESS,
CITY,
DATETIME,
DIST_EVENT,
DIST_HOME,
DIST_POI,
EVENT_ID,
EVENT_TYPE,
GEOMETRY,
HOME,
ID_POI,
LATITUDE,
LONGITUDE,
NAME_POI,
TRAJ_ID,
TYPE_POI,
VIOLATING,
)
from pymove.utils.distances import haversine
from pymove.utils.log import logger, progress_bar
def union_poi_bank(
data: DataFrame,
label_poi: str = TYPE_POI,
banks: list[str] | None = None,
inplace: bool = False
) -> DataFrame | None:
"""
Performs the union between the different bank categories.
For Points of Interest in a single category named 'banks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
banks : list of str, optional
Names of poi refering to banks, by default
banks = [
'bancos_filiais',
'bancos_agencias',
'bancos_postos',
'bancos_PAE',
'bank',
]
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
data with poi or None
Examples
--------
>>> from pymove.utils.integration import union_poi_bank
>>> pois_df
lat lon id type_poi
0 39.984094 116.319236 1 bank
1 39.984198 116.319322 2 randomvalue
2 39.984224 116.319402 3 bancos_postos
3 39.984211 116.319389 4 randomvalue
4 39.984217 116.319422 5 bancos_PAE
5 39.984710 116.319865 6 bancos_postos
6 39.984674 116.319810 7 bancos_agencias
7 39.984623 116.319773 8 bancos_filiais
8 39.984606 116.319732 9 banks
9 39.984555 116.319728 10 banks
>>> union_poi_bank(pois_df)
lat lon id type_poi
0 39.984094 116.319236 1 banks
1 39.984198 116.319322 2 randomvalue
2 39.984224 116.319402 3 banks
3 39.984211 116.319389 4 randomvalue
4 39.984217 116.319422 5 banks
5 39.984710 116.319865 6 banks
6 39.984674 116.319810 7 banks
7 39.984623 116.319773 8 banks
8 39.984606 116.319732 9 banks
9 39.984555 116.319728 10 banks
"""
if not inplace:
data = data.copy()
logger.debug('union bank categories to one category')
logger.debug(f'... There are {data[label_poi].nunique()} -- {label_poi}')
if banks is None:
banks = [
'bancos_filiais',
'bancos_agencias',
'bancos_postos',
'bancos_PAE',
'bank',
]
filter_bank = data[label_poi].isin(banks)
data.at[data[filter_bank].index, label_poi] = 'banks'
if not inplace:
return data
def union_poi_bus_station(
data: DataFrame,
label_poi: str = TYPE_POI,
bus_stations: list[str] | None = None,
inplace: bool = False
) -> DataFrame | None:
"""
Performs the union between the different bus station categories.
For Points of Interest in a single category named 'bus_station'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
bus_stations : list of str, optional
Names of poi refering to bus_stations, by default
bus_stations = [
'transit_station',
'pontos_de_onibus'
]
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
data with poi or None
Examples
--------
>>> from pymove.utils.integration import union_poi_bus_station
>>> pois_df
lat lon id type_poi
0 39.984094 116.319236 1 transit_station
1 39.984198 116.319322 2 randomvalue
2 39.984224 116.319402 3 transit_station
3 39.984211 116.319389 4 pontos_de_onibus
4 39.984217 116.319422 5 transit_station
5 39.984710 116.319865 6 randomvalue
6 39.984674 116.319810 7 bus_station
7 39.984623 116.319773 8 bus_station
>>> union_poi_bus_station(pois_df)
lat lon id type_poi
0 39.984094 116.319236 1 bus_station
1 39.984198 116.319322 2 randomvalue
2 39.984224 116.319402 3 bus_station
3 39.984211 116.319389 4 bus_station
4 39.984217 116.319422 5 bus_station
5 39.984710 116.319865 6 randomvalue
6 39.984674 116.319810 7 bus_station
7 39.984623 116.319773 8 bus_station
"""
if not inplace:
data = data.copy()
logger.debug('union bus station categories to one category')
if bus_stations is None:
bus_stations = [
'transit_station',
'pontos_de_onibus'
]
filter_bus_station = data[label_poi].isin(
bus_stations
)
data.at[data[filter_bus_station].index, label_poi] = 'bus_station'
if not inplace:
return data
def union_poi_bar_restaurant(
data: DataFrame,
label_poi: str = TYPE_POI,
bar_restaurant: list[str] | None = None,
inplace: bool = False
) -> DataFrame | None:
"""
Performs the union between bar and restaurant categories.
For Points of Interest in a single category named 'bar-restaurant'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
bar_restaurant : list of str, optional
Names of poi refering to bars or restaurants, by default
bar_restaurant = [
'restaurant',
'bar'
]
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
data with poi or None
Examples
--------
>>> from pymove.utils.integration import union_poi_bar_restaurant
>>> pois_df
lat lon id type_poi
0 39.984094 116.319236 1 restaurant
1 39.984198 116.319322 2 restaurant
2 39.984224 116.319402 3 randomvalue
3 39.984211 116.319389 4 bar
4 39.984217 116.319422 5 bar
5 39.984710 116.319865 6 bar-restaurant
6 39.984674 116.319810 7 random123
7 39.984623 116.319773 8 123
>>> union_poi_bar_restaurant(pois_df)
lat lon id type_poi
0 39.984094 116.319236 1 bar-restaurant
1 39.984198 116.319322 2 bar-restaurant
2 39.984224 116.319402 3 randomvalue
3 39.984211 116.319389 4 bar-restaurant
4 39.984217 116.319422 5 bar-restaurant
5 39.984710 116.319865 6 bar-restaurant
6 39.984674 116.319810 7 random123
7 39.984623 116.319773 8 123
"""
if not inplace:
data = data.copy()
logger.debug('union restaurant and bar categories to one category')
if bar_restaurant is None:
bar_restaurant = ['restaurant', 'bar']
filter_bar_restaurant = data[label_poi].isin(bar_restaurant)
data.at[data[filter_bar_restaurant].index, label_poi] = 'bar-restaurant'
if not inplace:
return data
def union_poi_parks(
data: DataFrame,
label_poi: str = TYPE_POI,
parks: list[str] | None = None,
inplace: bool = False
) -> DataFrame | None:
"""
Performs the union between park categories.
For Points of Interest in a single category named 'parks'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
parks : list of str, optional
Names of poi refering to parks, by default
parks = [
'pracas_e_parques',
'park'
]
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
data with poi or None
Examples
--------
>>> from pymove.utils.integration import union_poi_parks
>>> pois_df
lat lon id type_poi
0 39.984094 116.319236 1 pracas_e_parques
1 39.984198 116.319322 2 park
2 39.984224 116.319402 3 parks
3 39.984211 116.319389 4 random
4 39.984217 116.319422 5 123
5 39.984710 116.319865 6 park
6 39.984674 116.319810 7 parks
7 39.984623 116.319773 8 pracas_e_parques
>>> union_poi_parks(pois_df)
lat lon id type_poi
0 39.984094 116.319236 1 parks
1 39.984198 116.319322 2 parks
2 39.984224 116.319402 3 parks
3 39.984211 116.319389 4 random
4 39.984217 116.319422 5 123
5 39.984710 116.319865 6 parks
6 39.984674 116.319810 7 parks
7 39.984623 116.319773 8 parks
"""
if not inplace:
data = data.copy()
logger.debug('union parks categories to one category')
if parks is None:
parks = ['pracas_e_parques', 'park']
filter_parks = data[label_poi].isin(parks)
data.at[data[filter_parks].index, label_poi] = 'parks'
if not inplace:
return data
def union_poi_police(
data: DataFrame,
label_poi: str = TYPE_POI,
police: list[str] | None = None,
inplace: bool = False
) -> DataFrame | None:
"""
Performs the union between police categories.
For Points of Interest in a single category named 'police'.
Parameters
----------
data : DataFrame
Input points of interest data
label_poi : str, optional
Label referring to the Point of Interest category, by default TYPE_POI
police : list of str, optional
Names of poi refering to police stations, by default
police = [
'distritos_policiais',
'delegacia'
]
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
data with poi or None
Examples
--------
>>> from pymove.utils.integration import union_poi_police
>>> pois_df
lat lon id type_poi
0 39.984094 116.319236 1 distritos_policiais
1 39.984198 116.319322 2 police
2 39.984224 116.319402 3 police
3 39.984211 116.319389 4 distritos_policiais
4 39.984217 116.319422 5 random
5 39.984710 116.319865 6 randomvalue
6 39.984674 116.319810 7 123
7 39.984623 116.319773 8 bus_station
>>> union_poi_police(pois_df)
lat lon id type_poi
0 39.984094 116.319236 1 police
1 39.984198 116.319322 2 police
2 39.984224 116.319402 3 police
3 39.984211 116.319389 4 police
4 39.984217 116.319422 5 random
5 39.984710 116.319865 6 randomvalue
6 39.984674 116.319810 7 123
7 39.984623 116.319773 8 bus_station
"""
if not inplace:
data = data.copy()
logger.debug('union distritos policies and police categories')
if police is None:
police = ['distritos_policiais', 'delegacia']
filter_police = data[label_poi].isin(police)
data.at[data[filter_police].index, label_poi] = 'police'
if not inplace:
return data
def join_collective_areas(
data: DataFrame,
areas: DataFrame,
label_geometry: str = GEOMETRY,
inplace: bool = False
) -> DataFrame | None:
"""
Performs the integration between trajectories and collective areas.
Generating a new column that informs if the point of the
trajectory is inserted in a collective area.
Parameters
----------
data : geopandas.GeoDataFrame
The input trajectory data
areas : geopandas.GeoDataFrame
The input coletive areas data
label_geometry : str, optional
Label referring to the Point of Interest category, by default GEOMETRY
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
data with joined geometries or None
Examples
--------
>>> from pymove.utils.integration import join_collective_areas
>>> data
lat lon datetime id geometry
0 39.984094 116.319236 2008-10-23 05:53:05 1 POINT (116.31924 39.98409)
1 39.984198 116.319322 2008-10-23 05:53:06 1 POINT (116.31932 39.98420)
2 39.984224 116.319402 2008-10-23 05:53:11 1 POINT (116.31940 39.98422)
3 39.984211 116.319389 2008-10-23 05:53:16 1 POINT (116.31939 39.98421)
4 39.984217 116.319422 2008-10-23 05:53:21 1 POINT (116.31942 39.98422)
>>> area_c
lat lon datetime id geometry
0 39.984094 116.319236 2008-10-23 05:53:05 1 POINT (116.319236 39.984094)
1 40.006436 116.317701 2008-10-23 10:53:31 1 POINT (116.317701 40.006436)
2 40.014125 116.306159 2008-10-23 23:43:56 1 POINT (116.306159 40.014125)
3 39.984211 116.319389 2008-10-23 05:53:16 1 POINT (116.319389 39.984211)
POINT (116.32687 39.97901)
>>> join_collective_areas(gdf, area_c)
>>> gdf.head()
lat lon datetime id \
geometry violating
0 39.984094 116.319236 2008-10-23 05:53:05 1 \
POINT (116.319236 39.984094) True
1 39.984198 116.319322 2008-10-23 05:53:06 1 \
POINT (116.319322 39.984198) False
2 39.984224 116.319402 2008-10-23 05:53:11 1 \
POINT (116.319402 39.984224) False
3 39.984211 116.319389 2008-10-23 05:53:16 1 \
POINT (116.319389 39.984211) True
4 39.984217 116.319422 2008-10-23 05:53:21 1 \
POINT (116.319422 39.984217) False
"""
if not inplace:
data = data.copy()
logger.debug('Integration between trajectories and collectives areas')
Geometry = namedtuple('Geometry', 'geom coordinates')
polygons = areas[label_geometry].apply(
lambda g: Geometry(g.__class__, g.__geo_interface__.get('coordinates'))
).unique()
polygons = [p.geom(p.coordinates) for p in polygons]
data[VIOLATING] = False
for p in progress_bar(polygons, desc='Joining trajectories and areas'):
intersects = data[label_geometry].apply(lambda x: x.intersects(p))
index = data[intersects].index
data.at[index, VIOLATING] = True
if not inplace:
return data
def _reset_and_creates_id_and_lat_lon(
data: DataFrame,
df_pois: DataFrame,
lat_lon_poi: bool = True,
reset_index: bool = True
) -> tuple[ndarray, ndarray, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes.
Returns the minimum distance
between the two dataframes, and return their respective variables
(id, tags, latitude and longitude).
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
lat_lon_poi : bool, optional
Flag to determine if the ids and tags is of size equivalent to df_pois,
by default True
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
Returns
-------
distances, ids, tags, lat, lon: arrays with default values for join operation
Examples
--------
>>> from pymove.utils.integration import _reset_and_creates_id_and_lat_lon
>>> move_df.head()
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
>>> pois.head()
lat lon id type_poi name_poi
0 39.984094 116.319236 1 policia distrito_pol_1
>>> _reset_and_creates_id_and_lat_lon(move_df, pois)
(
array([inf]),
array([''], dtype=object),
array([''], dtype=object),
array([inf]),
array([inf])
)
"""
if reset_index:
logger.debug('... Resetting index to operation...')
data.reset_index(drop=True, inplace=True)
df_pois.reset_index(drop=True, inplace=True)
# create numpy array to store new column to DataFrame of movement objects
distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
ids = np.full(data.shape[0], '', dtype='object_')
tags = np.full(data.shape[0], '', dtype='object_')
# creating lat and lon array to operation
if lat_lon_poi:
lat = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(df_pois.shape[0], np.Infinity, dtype=np.float64)
else:
lat = np.full(data.shape[0], np.Infinity, dtype=np.float64)
lon = np.full(data.shape[0], np.Infinity, dtype=np.float64)
return distances, ids, tags, lat, lon
def _reset_set_window__and_creates_event_id_type(
data: DataFrame, df_events: DataFrame, time_window: float, label_date: str = DATETIME
) -> tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes.
Set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
time_window : float
Number of seconds of the time window.
label_date : str, optional
Label of data referring to the datetime, by default DATETIME
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
Examples
--------
>>> from pymove.utils.integration import
_reset_set_window__and_creates_event_id_type
>>> move_df.head()
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
>>> pois_df
lat lon event_id datetime event_type
0 39.984094 116.319236 1 2008-10-24 01:57:57 show do tropykalia
>>> _reset_set_window__and_creates_event_id_type(move_df, pois, 600)
(
0 2008-10-23 05:43:05
Name: datetime, dtype: datetime64[ns],
0 2008-10-23 06:03:05
Name: datetime, dtype: datetime64[ns],
array([inf]),
array([''], dtype=object),
array([''], dtype=object)
)
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
event_type = np.full(data.shape[0], '', dtype='object_')
event_id = np.full(data.shape[0], '', dtype='object_')
return window_starts, window_ends, current_distances, event_id, event_type
def _reset_set_window_and_creates_event_id_type_all(
data: DataFrame, df_events: DataFrame, time_window: float, label_date: str = DATETIME
) -> tuple[Series, Series, ndarray, ndarray, ndarray]:
"""
Resets the indexes of the dataframes.
Set time window, and returns
the current distance between the two dataframes, and return their
respective variables (event_id, event_type).
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input event point of interest data.
time_window : float
Number of seconds of the time window.
label_date : str
Label of data referring to the datetime.
Returns
-------
window_starts, window_ends, current_distances, event_id, event_type
arrays with default values for join operation
Examples
--------
>>> from pymove.utils.integration import _reset_set_window_and_creates_event_id_type_all # noqa
>>> move_df.head()
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
>>> pois_df
lat lon event_id datetime event_type
0 39.984094 116.319236 1 2008-10-24 01:57:57 show do tropykalia
>>> _reset_set_window_and_creates_event_id_type_all(move_df, pois, 600)
(
0 2008-10-23 05:43:05
Name: datetime, dtype: datetime64[ns],
0 2008-10-23 06:03:05
Name: datetime, dtype: datetime64[ns],
array([None], dtype=object),
array([None], dtype=object),
array([None], dtype=object)
)
"""
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_events.reset_index(drop=True, inplace=True)
# compute windows time
window_starts = data[label_date] - Timedelta(seconds=time_window)
window_ends = data[label_date] + Timedelta(seconds=time_window)
# create vector to store distances
current_distances = np.full(
data.shape[0], None, dtype=np.ndarray
)
event_type = np.full(data.shape[0], None, dtype=np.ndarray)
event_id = np.full(data.shape[0], None, dtype=np.ndarray)
return window_starts, window_ends, current_distances, event_id, event_type
def join_with_pois(
data: DataFrame,
df_pois: DataFrame,
label_id: str = TRAJ_ID,
label_poi_name: str = NAME_POI,
reset_index: bool = True,
inplace: bool = False
):
"""
Performs the integration between trajectories and the closest point of interest.
Generating two new columns referring to the
name and the distance from the point of interest closest
to each point of the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_id : str, optional
Label of df_pois referring to the Point of Interest id, by default TRAJ_ID
label_poi_name : str, optional
Label of df_pois referring to the Point of Interest name, by default NAME_POI
reset_index : bool, optional
Flag for reset index of the df_pois and data dataframes before the join,
by default True
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Examples
--------
>>> from pymove.utils.integration import join_with_pois
>>> move_df
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984559 116.326696 2008-10-23 10:37:26 1
2 40.002899 116.321520 2008-10-23 10:50:16 1
3 40.016238 116.307691 2008-10-23 11:03:06 1
4 40.013814 116.306525 2008-10-23 11:58:33 2
5 40.009735 116.315069 2008-10-23 23:50:45 2
>>> pois
lat lon id type_poi name_poi
0 39.984094 116.319236 1 policia distrito_pol_1
1 39.991013 116.326384 2 policia policia_federal
2 40.010000 116.312615 3 comercio supermercado_aroldo
>>> join_with_pois(move_df, pois)
lat lon datetime id id_poi \
dist_poi name_poi
0 39.984094 116.319236 2008-10-23 05:53:05 1 1 \
0.000000 distrito_pol_1
1 39.984559 116.326696 2008-10-23 10:37:26 1 1 \
637.690216 distrito_pol_1
2 40.002899 116.321520 2008-10-23 10:50:16 1 3 \
1094.860663 supermercado_aroldo
3 40.016238 116.307691 2008-10-23 11:03:06 1 3 \
810.542998 supermercado_aroldo
4 40.013814 116.306525 2008-10-23 11:58:33 2 3 \
669.973155 supermercado_aroldo
5 40.009735 116.315069 2008-10-23 23:50:45 2 3 \
211.069129 supermercado_aroldo
"""
if not inplace:
data = data.copy()
df_pois = df_pois.copy()
values = _reset_and_creates_id_and_lat_lon(data, df_pois, False, reset_index)
minimum_distances, ids_pois, tag_pois, lat_poi, lon_poi = values
for idx, row in progress_bar(
df_pois.iterrows(), total=len(df_pois), desc='Optimized integration with POIs'
):
# update lat and lon of current index
lat_poi.fill(row[LATITUDE])
lon_poi.fill(row[LONGITUDE])
# First iteration is minimum distances
if idx == 0:
minimum_distances = np.array(
haversine(
lat_poi,
lon_poi,
data[LATITUDE].values,
data[LONGITUDE].values
)
)
ids_pois.fill(row[label_id])
tag_pois.fill(row[label_poi_name])
else:
# compute dist between a POI and ALL
current_distances = np.float64(
haversine(
lat_poi,
lon_poi,
data[LATITUDE].values,
data[LONGITUDE].values
)
)
compare = current_distances < minimum_distances
minimum_distances = np.minimum(
current_distances, minimum_distances, dtype=np.float64
)
ids_pois[compare] = row[label_id]
tag_pois[compare] = row[label_poi_name]
data[ID_POI] = ids_pois
data[DIST_POI] = minimum_distances
data[NAME_POI] = tag_pois
logger.debug('Integration with POI was finalized')
if not inplace:
return data
def join_with_pois_by_category(
data: DataFrame,
df_pois: DataFrame,
label_category: str = TYPE_POI,
label_id: str = TRAJ_ID,
inplace: bool = False
):
"""
Performs the integration between trajectories and each type of points of interest.
Generating new columns referring to the
category and distance from the nearest point of interest
that has this category at each point of the trajectory.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input point of interest data.
label_category : str, optional
Label of df_pois referring to the point of interest category, by default TYPE_POI
label_id : str, optional
Label of df_pois referring to the point of interest id, by default TRAJ_ID
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Examples
--------
>>> from pymove.utils.integration import join_with_pois_by_category
>>> move_df
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984559 116.326696 2008-10-23 10:37:26 1
2 40.002899 116.321520 2008-10-23 10:50:16 1
3 40.016238 116.307691 2008-10-23 11:03:06 1
4 40.013814 116.306525 2008-10-23 11:58:33 2
5 40.009735 116.315069 2008-10-23 23:50:45 2
>>> pois
lat lon id type_poi name_poi
0 39.984094 116.319236 1 policia distrito_pol_1
1 39.991013 116.326384 2 policia policia_federal
2 40.010000 116.312615 3 comercio supermercado_aroldo
>>> join_with_pois_by_category(move_df, pois)
lat lon datetime id \
id_policia dist_policia id_comercio dist_comercio
0 39.984094 116.319236 2008-10-23 05:53:05 1 \
1 0.000000 3 2935.310277
1 39.984559 116.326696 2008-10-23 10:37:26 1 \
1 637.690216 3 3072.696379
2 40.002899 116.321520 2008-10-23 10:50:16 1 \
2 1385.087181 3 1094.860663
3 40.016238 116.307691 2008-10-23 11:03:06 1 \
2 3225.288831 3 810.542998
4 40.013814 116.306525 2008-10-23 11:58:33 2 \
2 3047.838222 3 669.973155
5 40.009735 116.315069 2008-10-23 23:50:45 2 \
2 2294.075820 3 211.069129
"""
if not inplace:
data = data.copy()
df_pois = df_pois.copy()
logger.debug('Integration with POIs...')
# get a vector with windows time to each point
data.reset_index(drop=True, inplace=True)
df_pois.reset_index(drop=True, inplace=True)
# create numpy array to store new column to DataFrame of movement objects
current_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
ids_pois = np.full(data.shape[0], np.NAN, dtype='object_')
unique_categories = df_pois[label_category].unique()
size_categories = len(unique_categories)
logger.debug('There are %s categories' % size_categories)
for i, c in enumerate(unique_categories, start=1):
# creating lat and lon array to operation
df_category = df_pois[df_pois[label_category] == c]
df_category.reset_index(drop=True, inplace=True)
desc = f'computing dist to {c} category ({i}/{size_categories})'
for idx, row in progress_bar(data.iterrows(), total=len(data), desc=desc):
lat_user = np.full(
df_category.shape[0], row[LATITUDE], dtype=np.float64
)
lon_user = np.full(
df_category.shape[0], row[LONGITUDE], dtype=np.float64
)
# computing distances to
distances = haversine(
lat_user,
lon_user,
df_category[LATITUDE].values,
df_category[LONGITUDE].values,
)
# get index to arg_min and min distance
index_min = np.argmin(distances)
# setting data for a single object movement
current_distances[idx] = np.min(distances)
ids_pois[idx] = df_category.at[index_min, label_id]
data['id_%s' % c] = ids_pois
data['dist_%s' % c] = current_distances
logger.debug('Integration with POI was finalized')
if not inplace:
return data
def join_with_events(
data: DataFrame,
df_events: DataFrame,
label_date: str = DATETIME,
time_window: int = 900,
label_event_id: str = EVENT_ID,
label_event_type: str = EVENT_TYPE,
inplace: bool = False
):
"""
Performs the integration between trajectories and the closest event in time window.
Generating new columns referring to the
category of the point of interest, the distance from the
nearest point of interest based on time of each point of
the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_events : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
time_window : float, optional
tolerable length of time range in `seconds` for assigning the event's
point of interest to the trajectory point, by default 900
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Examples
--------
>>> from pymove.utils.integration import join_with_events
>>> move_df
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984559 116.326696 2008-10-23 10:37:26 1
2 39.993527 116.326483 2008-10-24 00:02:14 2
3 39.978575 116.326975 2008-10-24 00:22:01 3
4 39.981668 116.310769 2008-10-24 01:57:57 3
>>> events
lat lon id datetime event_type event_id
0 39.984094 116.319236 1 2008-10-23 05:53:05 show forro_tropykalia
1 39.991013 116.326384 2 2008-10-23 10:37:26 show dia_do_municipio
2 40.010000 116.312615 3 2008-10-24 01:57:57 feira adocao_de_animais
>>> join_with_events(move_df, events)
lat lon datetime id \
event_type dist_event event_id
0 39.984094 116.319236 2008-10-23 05:53:05 1 \
show 0.000000 forro_tropykalia
1 39.984559 116.326696 2008-10-23 10:37:26 1 \
show 718.144152 dia_do_municipio
2 39.993527 116.326483 2008-10-24 00:02:14 2 \
inf
3 39.978575 116.326975 2008-10-24 00:22:01 3 \
inf
4 39.981668 116.310769 2008-10-24 01:57:57 3 \
feira 3154.296880 adocao_de_animais
Raises
------
ValueError
If feature generation fails
"""
if not inplace:
data = data.copy()
df_events = df_events.copy()
values = _reset_set_window__and_creates_event_id_type(
data, df_events, time_window, label_date
)
*_, current_distances, event_id, event_type = values
window_starts, window_ends, *_ = _reset_set_window__and_creates_event_id_type(
df_events, data, time_window, label_date
)
minimum_distances = np.full(
data.shape[0], np.Infinity, dtype=np.float64
)
for idx, row in progress_bar(
df_events.iterrows(), total=len(df_events), desc='Integration with Events'
):
df_filtered = filters.by_datetime(
data, window_starts[idx], window_ends[idx]
)
if df_filtered is None:
raise ValueError('Filtering datetime failed!')
size_filter = df_filtered.shape[0]
if size_filter > 0:
indexes = df_filtered.index
lat_event = np.full(
df_filtered.shape[0], row[LATITUDE], dtype=np.float64
)
lon_event = np.full(
df_filtered.shape[0], row[LONGITUDE], dtype=np.float64
)
# First iteration is minimum distances
if idx == 0:
minimum_distances[indexes] = haversine(
lat_event,
lon_event,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
event_id[indexes] = row[label_event_id]
event_type[indexes] = row[label_event_type]
else:
current_distances[indexes] = haversine(
lat_event,
lon_event,
df_filtered[LATITUDE].values,
df_filtered[LONGITUDE].values,
)
compare = current_distances < minimum_distances
minimum_distances = np.minimum(
current_distances, minimum_distances
)
event_id[compare] = row[label_event_id]
event_type[compare] = row[label_event_type]
data[label_event_id] = event_id
data[DIST_EVENT] = minimum_distances
data[label_event_type] = event_type
logger.debug('Integration with events was completed')
if not inplace:
return data
def join_with_event_by_dist_and_time(
data: DataFrame,
df_events: DataFrame,
label_date: str = DATETIME,
label_event_id: str = EVENT_ID,
label_event_type: str = EVENT_TYPE,
time_window: float = 3600,
radius: float = 1000,
inplace: bool = False
):
"""
Performs the integration between trajectories and events on windows.
Generating new columns referring to the category of the point of interest,
the distance between the location of the user and location of the poi
based on the distance and on time of each point of the trajectories.
Parameters
----------
data : DataFrame
The input trajectory data.
df_pois : DataFrame
The input events points of interest data.
label_date : str, optional
Label of data referring to the datetime of the input trajectory data,
by default DATETIME
label_event_id : str, optional
Label of df_events referring to the id of the event, by default EVENT_ID
label_event_type : str, optional
Label of df_events referring to the type of the event, by default EVENT_TYPE
time_window : float, optional
tolerable length of time range in `seconds`for assigning the event's
point of interest to the trajectory point, by default 3600
radius: float, optional
maximum radius of pois in `meters`, by default 1000
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Examples
--------
>>> from pymove.utils.integration import join_with_pois_by_dist_and_datetime
>>> move_df
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984559 116.326696 2008-10-23 10:37:26 1
2 39.993527 116.326483 2008-10-24 00:02:14 2
3 39.978575 116.326975 2008-10-24 00:22:01 3
4 39.981668 116.310769 2008-10-24 01:57:57 3
>>> events
lat lon id datetime type_poi name_poi
0 39.984094 116.319236 1 2008-10-23 05:53:05 show forro_tropykalia
1 39.991013 116.326384 2 2008-10-23 10:27:26 corrida racha_de_jumento
2 39.990013 116.316384 2 2008-10-23 10:37:26 show dia_do_municipio
3 40.010000 116.312615 3 2008-10-24 01:57:57 feira adocao_de_animais
>>> join_with_pois_by_dist_and_datetime(move_df, pois)
>>> move_df
lat lon datetime id \
type_poi dist_event name_poi
0 39.984094 116.319236 2008-10-23 05:53:05 1 \
[show] [0.0] [forro_tropykalia]
1 39.984559 116.326696 2008-10-23 10:37:26 1 \
[corrida, show] [718.144, 1067.53] [racha_de_jumento, dia_do_municipio]
2 39.993527 116.326483 2008-10-24 00:02:14 2 \
None None None
3 39.978575 116.326975 2008-10-24 00:22:01 3 \
None None None
4 39.981668 116.310769 2008-10-24 01:57:57 3 \
None None None
Raises
------
ValueError
If feature generation fails
"""
if label_date not in df_events:
raise KeyError("POI's DataFrame must contain a %s column" % label_date)
if not inplace:
data = data.copy()
df_events = df_events.copy()
values = _reset_set_window_and_creates_event_id_type_all(
data, df_events, time_window, label_date
)
window_start, window_end, current_distances, event_id, event_type = values
for idx, row in progress_bar(
data.iterrows(), total=len(data), desc='Integration with Events'
):
# set min and max of coordinates by radius
bbox = filters.get_bbox_by_radius(
(row[LATITUDE], row[LONGITUDE]), radius
)
# filter event by radius
df_filtered = filters.by_bbox(
df_events, bbox, inplace=False
)
if df_filtered is None:
raise ValueError('Filtering bbox failed')
# filter event by datetime
filters.by_datetime(
df_filtered,
start_datetime=window_start[idx],
end_datetime=window_end[idx],
inplace=True
)
# get df_filtered size
size_filter = df_filtered.shape[0]
if size_filter > 0:
# reseting index of data frame
df_filtered.reset_index(drop=True, inplace=True)
# create lat and lon array to operation
lat_user = np.full(
size_filter, row[LATITUDE], dtype=np.float64
)
lon_user = np.full(
size_filter, row[LONGITUDE], dtype=np.float64
)
# calculate of distances between points
distances = haversine(
lat_user,
lon_user,
df_filtered[LATITUDE].to_numpy(),
df_filtered[LONGITUDE].to_numpy()
)
current_distances[idx] = distances
event_type[idx] = df_filtered[label_event_type].to_numpy(dtype=np.ndarray)
event_id[idx] = df_filtered[label_event_id].to_numpy(dtype=np.ndarray)
data[label_event_id] = event_id
data[DIST_EVENT] = current_distances
data[label_event_type] = event_type
logger.debug('Integration with event was completed')
if not inplace:
return data
def join_with_home_by_id(
data: DataFrame,
df_home: DataFrame,
label_id: str = TRAJ_ID,
label_address: str = ADDRESS,
label_city: str = CITY,
drop_id_without_home: bool = False,
inplace: bool = False
):
"""
Performs the integration between trajectories and home points.
Generating new columns referring to the distance of the nearest
home point, address and city of each trajectory point.
Parameters
----------
data : DataFrame
The input trajectory data.
df_home : DataFrame
The input home points data.
label_id : str, optional
Label of df_home referring to the home point id, by default TRAJ_ID
label_address : str, optional
Label of df_home referring to the home point address, by default ADDRESS
label_city : str, optional
Label of df_home referring to the point city, by default CITY
drop_id_without_home : bool, optional
flag as an option to drop id's that don't have houses, by default False
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Examples
--------
>>> from pymove.utils.integration import join_with_home_by_id
>>> move_df
lat lon datetime id
0 39.984094 116.319236 2008-10-23 05:53:05 1
1 39.984559 116.326696 2008-10-23 10:37:26 1
2 40.002899 116.321520 2008-10-23 10:50:16 1
3 40.016238 116.307691 2008-10-23 11:03:06 1
4 40.013814 116.306525 2008-10-23 11:58:33 2
5 40.009735 116.315069 2008-10-23 23:50:45 2
>>> home_df
lat lon id formatted_address city
0 39.984094 116.319236 1 rua da mae quixiling
1 40.013821 116.306531 2 rua da familia quixeramoling
>>> join_with_home_by_id(move_df, home_df)
>>> move_df
id lat lon datetime dist_home \
home city
0 1 39.984094 116.319236 2008-10-23 05:53:05 0.000000 \
rua da mae quixiling
1 1 39.984559 116.326696 2008-10-23 10:37:26 637.690216 \
rua da mae quixiling
2 1 40.002899 116.321520 2008-10-23 10:50:16 2100.053501 \
rua da mae quixiling
3 1 40.016238 116.307691 2008-10-23 11:03:06 3707.066732 \
rua da mae quixiling
4 2 40.013814 116.306525 2008-10-23 11:58:33 0.931101 \
rua da familia quixeramoling
5 2 40.009735 116.315069 2008-10-23 23:50:45 857.417540 \
rua da familia quixeramoling
"""
if not inplace:
data = data.copy()
df_home = df_home.copy()
ids_without_home = []
if data.index.name is None:
logger.debug(f'...setting {label_id} as index')
data.set_index(label_id, inplace=True)
for idx in progress_bar(
data.index.unique(), total=len(data.index.unique()), desc='Integration with Home'
):
filter_home = df_home[label_id] == idx
if df_home[filter_home].shape[0] == 0:
logger.debug(f'...id: {idx} has not HOME')
ids_without_home.append(idx)
else:
home = df_home[filter_home].iloc[0]
lat_user = data.at[idx, LATITUDE].values
lon_user = data.at[idx, LONGITUDE].values
# if user has a single tuple
if not isinstance(lat_user, np.ndarray):
lat_home = home[LATITUDE].values
lon_home = home[LONGITUDE].values
data.at[idx, DIST_HOME] = haversine(
lat_user, lon_user, lat_home, lon_home
)
data.at[idx, HOME] = home[label_address]
data.at[idx, label_city] = home[label_city]
else:
lat_home = np.full(
data.loc[idx].shape[0], home[LATITUDE], dtype=np.float64
)
lon_home = np.full(
data.loc[idx].shape[0], home[LONGITUDE], dtype=np.float64
)
data.at[idx, DIST_HOME] = haversine(
lat_user, lon_user, lat_home, lon_home
)
data.at[idx, HOME] = np.array(home[label_address])
data.at[idx, label_city] = np.array(home[label_city])
data.reset_index(inplace=True)
logger.debug('... Resetting index')
if drop_id_without_home:
data.drop(data.loc[data[TRAJ_ID].isin(ids_without_home)].index, inplace=True)
if not inplace:
return data
def merge_home_with_poi(
data: DataFrame,
label_dist_poi: str = DIST_POI,
label_name_poi: str = NAME_POI,
label_id_poi: str = ID_POI,
label_home: str = HOME,
label_dist_home: str = DIST_HOME,
drop_columns: bool = True,
inplace: bool = False
):
"""
Performs or merges the points of interest and the trajectories.
Considering the starting points as other points of interest,
generating a new DataFrame.
Parameters
----------
data : DataFrame
The input trajectory data, with join_with_pois and join_with_home_by_id applied.
label_dist_poi : str, optional
Label of data referring to the distance from the nearest point of interest,
by default DIST_POI
label_name_poi : str, optional
Label of data referring to the name from the nearest point of interest,
by default NAME_POI
label_id_poi : str, optional
Label of data referring to the id from the nearest point of interest,
by default ID_POI
label_home : str, optional
Label of df_home referring to the home point, by default HOME
label_dist_home: str, optional
Label of df_home referring to the distance to the home point,
by default DIST_HOME
drop_columns : bool, optional
Flag that controls the deletion of the columns referring to the
id and the distance from the home point, by default
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Examples
--------
>>> from pymove.utils.integration import (
>>> merge_home_with_poi,
>>> join_with_home_by_id
>>> )
>>> move_df
lat lon datetime id \
id_poi dist_poi name_poi
0 39.984094 116.319236 2008-10-23 05:53:05 1 \
1 0.000000 distrito_pol_1
1 39.984559 116.326696 2008-10-23 10:37:26 1 \
1 637.690216 distrito_pol_1
2 40.002899 116.321520 2008-10-23 10:50:16 1 \
2 1385.087181 policia_federal
3 40.016238 116.307691 2008-10-23 11:03:06 1 \
2 3225.288831 policia_federal
4 40.013814 116.306525 2008-10-23 11:58:33 2 \
2 3047.838222 policia_federal
5 40.009735 116.315069 2008-10-23 23:50:45 2 \
2 2294.075820 policia_federal
>>> home_df
lat lon id formatted_address city
0 39.984094 116.319236 1 rua da mae quixiling
1 40.013821 116.306531 2 rua da familia quixeramoling
>>> join_with_home_by_id(move, home_df, inplace=True)
>>> move_df
id lat lon datetime id_poi dist_poi \
name_poi dist_home home city
0 1 39.984094 116.319236 2008-10-23 05:53:05 1 0.000000 \
distrito_pol_1 0.000000 rua da mae quixiling
1 1 39.984559 116.326696 2008-10-23 10:37:26 1 637.690216 \
distrito_pol_1 637.690216 rua da mae quixiling
2 1 40.002899 116.321520 2008-10-23 10:50:16 2 1385.087181 \
policia_federal 2100.053501 rua da mae quixiling
3 1 40.016238 16.307691 2008-10-23 11:03:06 2 3225.288831 \
policia_federal 3707.066732 rua da mae quixiling
4 2 40.013814 116.306525 2008-10-23 11:58:33 2 3047.838222 \
policia_federal 0.931101 rua da familia quixeramoling
5 2 40.009735 116.315069 2008-10-23 23:50:45 2 2294.075820 \
policia_federal 857.417540 rua da familia quixeramoling
>>> merge_home_with_poi(move_df)
id lat lon datetime id_poi \
dist_poi name_poi city
0 1 39.984094 116.319236 2008-10-23 05:53:05 rua da mae \
0.000000 home quixiling
1 1 39.984559 116.326696 2008-10-23 10:37:26 rua da mae \
637.690216 home quixiling
2 1 40.002899 116.321520 2008-10-23 10:50:16 2 \
1385.087181 policia_federal quixiling
3 1 40.016238 116.307691 2008-10-23 11:03:06 2 \
3225.288831 policia_federal quixiling
4 2 40.013814 116.306525 2008-10-23 11:58:33 rua da familia \
0.931101 home quixeramoling
5 2 40.009735 116.315069 2008-10-23 23:50:45 rua da familia \
857.417540 home quixeramoling
"""
if not inplace:
data = data.copy()
logger.debug('merge home with POI using shortest distance')
idx = data[data[label_dist_home] <= data[label_dist_poi]].index
data.loc[idx, label_name_poi] = label_home
data.loc[idx, label_dist_poi] = data.loc[idx, label_dist_home]
data.loc[idx, label_id_poi] = data.loc[idx, label_home]
if(drop_columns):
data.drop(columns=[label_dist_home, label_home], inplace=True)
if not inplace:
return data
|
{"hexsha": "90e240765c74d31b1b1f00293fc8c190218314c0", "size": 54509, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymove/utils/integration.py", "max_stars_repo_name": "JuniorNunes15/PyMove", "max_stars_repo_head_hexsha": "ee5b68282502bfcb9cf38b52dcdefed5bd927a90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 63, "max_stars_repo_stars_event_min_datetime": "2019-08-06T14:24:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T11:11:03.000Z", "max_issues_repo_path": "pymove/utils/integration.py", "max_issues_repo_name": "JuniorNunes15/PyMove", "max_issues_repo_head_hexsha": "ee5b68282502bfcb9cf38b52dcdefed5bd927a90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 49, "max_issues_repo_issues_event_min_datetime": "2019-09-20T14:06:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T22:13:43.000Z", "max_forks_repo_path": "pymove/utils/integration.py", "max_forks_repo_name": "JuniorNunes15/PyMove", "max_forks_repo_head_hexsha": "ee5b68282502bfcb9cf38b52dcdefed5bd927a90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-08-15T18:13:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-30T16:26:19.000Z", "avg_line_length": 37.827203331, "max_line_length": 100, "alphanum_fraction": 0.5911317397, "include": true, "reason": "import numpy,from numpy", "num_tokens": 15377}
|
#!/usr/bin/env python
# /****************************************************************************
# * Copyright (c) 2019 Parker Lusk and Jesus Tordesillas Torres. All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions
# * are met:
# *
# * 1. Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in
# * the documentation and/or other materials provided with the
# * distribution.
# * 3. Neither the name of this repo nor the names of its contributors may
# * be used to endorse or promote products derived from this software
# * without specific prior written permission.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# * POSSIBILITY OF SUCH DAMAGE.
# *
# ****************************************************************************/
import rospy
import numpy as np
import csv
from itertools import chain
from std_srvs.srv import Trigger, TriggerResponse
#from acl_msgs.msg import QuadGoal, ViconState
from geometry_msgs.msg import PoseStamped
from visualization_msgs.msg import MarkerArray
from solver import *
from utils import *
TAKEOFF_ALT = 2.5
class Flipper:
"""Flipper"""
def __init__(self):
#self.sub_state = rospy.Subscriber('vicon', ViconState, self.state_cb)
self.srv_flip = rospy.Service('window', Trigger, self.window_cb)
self.srv_flip = rospy.Service('line', Trigger, self.line_cb)
self.srv_flip = rospy.Service('flip_pitch', Trigger, self.flip_pitch_cb)
self.srv_flip = rospy.Service('flip', Trigger, self.flip_cb)
self.srv_flip = rospy.Service('flip_trans', Trigger, self.flip_trans_cb)
#self.srv_takeoff = rospy.Service('takeoff', Trigger, self.takeoff_cb)
# outer loop setpoints
#self.pub_goal = rospy.Publisher('goal', QuadGoal, queue_size=1)
self.pub_drone_markers=rospy.Publisher('snapshots', MarkerArray, queue_size=10)
self.pub_window=rospy.Publisher('window', Marker, queue_size=10)
# initialize members
#self.state_msg = ViconState()
# desired control rate
self.dc = 0.01
# motor spinup time, seconds
self.spinup_secs = 0
def window_cb(self, req):
success = self.generate_trajectory(WINDOW)
return TriggerResponse(success=success, message='')
def line_cb(self, req):
success = self.generate_trajectory(LINE)
return TriggerResponse(success=success, message='')
def flip_trans_cb(self, req):
success = self.generate_trajectory(FLIP_TRANS)
return TriggerResponse(success=success, message='')
def flip_pitch_cb(self, req):
success = self.generate_trajectory(FLIP_PITCH)
return TriggerResponse(success=success, message='')
def flip_cb(self, req):
success = self.generate_trajectory(FLIP)
return TriggerResponse(success=success, message='')
# def takeoff_cb(self, req):
# ts = rospy.get_time()
# # Wait for motors to spin up
# rospy.sleep(self.spinup_secs)
# goal = QuadGoal()
# goal.header.stamp = rospy.Time.now()
# goal.pos.x, goal.pos.y, goal.pos.z = (self.state_msg.pose.position.x, self.state_msg.pose.position.y, TAKEOFF_ALT)
# goal.vel.x, goal.vel.y, goal.vel.z = (0., 0., 0.)
# goal.accel.x, goal.accel.y, goal.accel.z = (0., 0., 0.)
# goal.jerk.x, goal.jerk.y, goal.jerk.z = (0., 0., 0.)
# goal.xy_mode = goal.z_mode = QuadGoal.MODE_POS
# T=0.02
# increment=0.5
# goal.pos.z=self.state_msg.pose.position.z
# for i in range(100):
# goal.pos.z=min(goal.pos.z+increment,TAKEOFF_ALT);
# rospy.sleep(T)
# self.pub_goal.publish(goal)
# return TriggerResponse(success=True, message='')
# def state_cb(self, msg):
# self.state_msg = msg
def generate_trajectory(self,type):
# start optimiz at current pose
x0 = np.zeros((40,))
x0[0] = 0#self.state_msg.pose.position.x
x0[1] = 0#self.state_msg.pose.position.y
x0[2] = 0#self.state_msg.pose.position.z
xf = np.zeros((40,))
xf = np.copy(x0)
if(type==FLIP_TRANS or type==WINDOW):
xf[0]=x0[0] + 5;
if(type==LINE):
xf[0]=x0[0] + 10;
s = Solver(JERK)
s.setTypeTrajectory(type)
s.setInitialState(x0.tolist())
s.setFinalState(xf.tolist())
if(type==FLIP or type==FLIP_TRANS or type==FLIP_PITCH or type==WINDOW):
x=x0[0]
y=x0[1]
z=5.5
r,p,yaw=0,0,0
if type==FLIP_TRANS or type==FLIP_PITCH:
yaw=3.14/2.0;
x=(x0[0]+xf[0])/2.0
if type==WINDOW:
y=x0[1]+2
yaw=3.14/2.0;
p=3.14/2.0;
z=3.5
x=(x0[0]+xf[0])/2.0
s.setGate(x,y,z,r,p,yaw)
#spawnWindowInGazebo(x,y,z,r,p,yaw)
self.pub_window.publish(getMarkerWindow(x,y,z,r,p,yaw))
s.setMaxValues([10,90,200,5000,1000000]) #Vel, accel, jerk, snap,...
s.setRadius(1)
solved=False
for dt in np.linspace(0.1, 4.0, num=50): #Line search on dt
print "Trying with dt= ",dt
s.setN(20,dt)
solved=s.solve()
if(solved==True):
break
if(solved==False):
print("No solution found after doing line search on dt")
return False;
# Visualize Markers in RVIZ
K = int(s.dt/self.dc)
n = 0
allPositions=[];
allAccelerations=[];
while not rospy.is_shutdown() and n<s.N:
# publish each step at a uniform rate
rate = rospy.Rate(1.0/self.dc)
k = 0
while not rospy.is_shutdown() and k<K:
p, v, a, j = self.getHighRateGoal(s, n, k)
allPositions.append(p);
allAccelerations.append(a)
k += 1
# increment the segment number we are working with
n += 1
self.pub_drone_markers.publish(getMarkerArray(GREEN,allPositions,allAccelerations))
#
# Optimization Results
#
csvdata = []
# how many steps are in an optimization segment
K = int(s.dt/self.dc)
n = 0
while not rospy.is_shutdown() and n<s.N:
# publish each step at a uniform rate
rate = rospy.Rate(1.0/self.dc)
k = 0
while not rospy.is_shutdown() and k<K:
p, v, a, j = self.getHighRateGoal(s, n, k)
# goal = QuadGoal()
# goal.header.stamp = rospy.Time.now()
# goal.pos.x, goal.pos.y, goal.pos.z = p
# goal.vel.x, goal.vel.y, goal.vel.z = v
# goal.accel.x, goal.accel.y, goal.accel.z = a
# goal.jerk.x, goal.jerk.y, goal.jerk.z = j
# goal.xy_mode = goal.z_mode = QuadGoal.MODE_POS
# self.pub_goal.publish(goal)
csvdata.append(','.join(map(str,chain.from_iterable((p,v,a,j)))))
# maintain uniform timing (with period dc) for each intra-segment step
rate.sleep()
k += 1
# increment the segment number we are working with
n += 1
# Publish final state goal
# goal = QuadGoal()
# goal.header.stamp = rospy.Time.now()
# goal.pos.x, goal.pos.y, goal.pos.z = xf[0:3]
# goal.vel.x, goal.vel.y, goal.vel.z = xf[3:6]
# goal.accel.x, goal.accel.y, goal.accel.z = xf[6:9]
# goal.jerk.x, goal.jerk.y, goal.jerk.z = xf[9:12]
# self.pub_goal.publish(goal)
with open("fliptraj.csv", "wb") as file:
for line in csvdata:
file.write(line)
file.write('\n')
return True
def getHighRateGoal(self, s, n, k):
# tau \in [0, dt]. Convert step index k to a time tau.
tau = k*self.dc
p = tuple(s.getPos(n, tau, ii).getValue() for ii in range(3))
v = tuple(s.getVel(n, tau, ii).getValue() for ii in range(3))
a = tuple(s.getAccel(n, tau, ii).getValue() for ii in range(3))
j = tuple(s.getJerk(n, tau, ii).getValue() for ii in range(3))
return p, v, a, j
if __name__ == '__main__':
rospy.init_node('flipper', anonymous=False)
try:
obj = Flipper()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
{"hexsha": "fd617c56f88f56202f0ae0b6808f14eb8da889f7", "size": 9708, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/flipper.py", "max_stars_repo_name": "jtorde/uav_trajectory_optimizer_gurobi", "max_stars_repo_head_hexsha": "ed50a357ee6e6bea40d6e54656f8c223fb551b3c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2019-07-28T21:03:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T03:24:48.000Z", "max_issues_repo_path": "scripts/flipper.py", "max_issues_repo_name": "duobin/uav_trajectory_optimizer_gurobi", "max_issues_repo_head_hexsha": "ed50a357ee6e6bea40d6e54656f8c223fb551b3c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/flipper.py", "max_forks_repo_name": "duobin/uav_trajectory_optimizer_gurobi", "max_forks_repo_head_hexsha": "ed50a357ee6e6bea40d6e54656f8c223fb551b3c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2019-06-22T10:10:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T03:27:48.000Z", "avg_line_length": 32.4682274247, "max_line_length": 130, "alphanum_fraction": 0.5774618871, "include": true, "reason": "import numpy", "num_tokens": 2480}
|
import numpy as np
def stoi(reference, estimation, sample_rate):
"""Wrapper to allow independent axis for STOI.
Args:
reference: Shape [..., num_samples]
estimation: Shape [..., num_samples]
sample_rate:
Returns:
"""
from pystoi.stoi import stoi as pystoi_stoi
estimation, reference = np.broadcast_arrays(estimation, reference)
if reference.ndim >= 2:
return np.array([
stoi(x_entry, y_entry, sample_rate=sample_rate)
for x_entry, y_entry in zip(reference, estimation)
])
else:
return pystoi_stoi(reference, estimation, fs_sig=sample_rate)
|
{"hexsha": "ca6126cd4d7deb25bf6297fa7cb373aadc866499", "size": 651, "ext": "py", "lang": "Python", "max_stars_repo_path": "pb_bss/evaluation/module_stoi.py", "max_stars_repo_name": "mdeegen/pb_bss", "max_stars_repo_head_hexsha": "e8c380e27d82707e8d2b2d83c5c918d47ea5d89f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 171, "max_stars_repo_stars_event_min_datetime": "2018-10-22T09:34:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T16:09:20.000Z", "max_issues_repo_path": "pb_bss/evaluation/module_stoi.py", "max_issues_repo_name": "mdeegen/pb_bss", "max_issues_repo_head_hexsha": "e8c380e27d82707e8d2b2d83c5c918d47ea5d89f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2019-03-14T09:42:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-03T07:13:03.000Z", "max_forks_repo_path": "pb_bss/evaluation/module_stoi.py", "max_forks_repo_name": "mdeegen/pb_bss", "max_forks_repo_head_hexsha": "e8c380e27d82707e8d2b2d83c5c918d47ea5d89f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 40, "max_forks_repo_forks_event_min_datetime": "2018-10-11T08:01:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T13:26:15.000Z", "avg_line_length": 25.0384615385, "max_line_length": 70, "alphanum_fraction": 0.6466973886, "include": true, "reason": "import numpy", "num_tokens": 146}
|
import numpy as np
"""
This script was used to check whether every word from the gold standard
is in the ukwac_100m corpus. The check was successful.
"""
wacfile = "../ukwac_100m/ukwac_100m_oneline.txt"
menfile = "data/MEN_dataset_natural_form_full"
wordlist_WAC = []
checklist = []
unshared_words = []
with open(wacfile, "r") as f:
print("reading",wacfile,"...")
for line in f:
line = line.rstrip()
wordlist_WAC.extend(line.split())
print("Finished reading. Tokens in the corpus:",len(wordlist_WAC))
with open(menfile, "r") as f:
for line in f:
words = line.rstrip().split()[:2]
checklist.extend(words)
wordset_WAC = set(wordlist_WAC)
checkset = set(checklist)
print("Types in the corpus: ", len(wordset_WAC))
print("Types in the checklist:", len(checkset))
unshared_words = list(checkset.difference(wordset_WAC))
if len(unshared_words) == 0:
print("All required words are in the corpus.")
else:
print(unshared_words[:min(int(np.ceil(len(unshared_words)/10)), 25)])
print("These are some of the",len(unshared_words),"words are not in the corpus.")
|
{"hexsha": "41f92766b1f1c3ce16204bbf31a819ec27ab870e", "size": 1147, "ext": "py", "lang": "Python", "max_stars_repo_path": "checker.py", "max_stars_repo_name": "SimonPreissner/fruitfly", "max_stars_repo_head_hexsha": "99dffa7c1ed31da39513eda1ddacc4f9b968a6df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-12-10T09:44:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-06T04:47:00.000Z", "max_issues_repo_path": "checker.py", "max_issues_repo_name": "SimonPreissner/fruitfly", "max_issues_repo_head_hexsha": "99dffa7c1ed31da39513eda1ddacc4f9b968a6df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "checker.py", "max_forks_repo_name": "SimonPreissner/fruitfly", "max_forks_repo_head_hexsha": "99dffa7c1ed31da39513eda1ddacc4f9b968a6df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3095238095, "max_line_length": 85, "alphanum_fraction": 0.6774193548, "include": true, "reason": "import numpy", "num_tokens": 306}
|
import argparse
import sys
import torch
import yaml
import numpy as np
import wandb
from train import load_model
from text import text_to_sequence
# Needed to unpickle waveglow model
sys.path.append("./waveglow/")
# Needed to avoid warnings
torch.nn.Module.dump_patches = True
with open("hparams.yaml") as yamlfile:
HPARAMS = yaml.safe_load(yamlfile)
def main(tacotron_artifact, waveglow_artifact, sentence_file, table_artifact):
# Initialize inference table
inference_table = wandb.Table(
columns=["predicted_caption" if table_artifact else "sentence", "audio"]
)
# Start wandb tracking and get artifacts
# run = wandb.init(job_type="inference", entity="faux", project="image-to-speech")
wandb.init(job_type="inference")
if sentence_file:
wandb.config.update({"source": sentence_file})
taco_artifact = wandb.use_artifact(tacotron_artifact)
wave_artifact = wandb.use_artifact(waveglow_artifact)
# Load sentences for inference
if sentence_file:
with open(sentence_file) as infile:
sentences = [
sentence.strip() for sentence in infile.readlines() if sentence.strip()
]
elif table_artifact:
table_artifact = wandb.use_artifact(table_artifact)
table = table_artifact.get("eval_table")
sentences = table.get_column("predicted_caption")
sentences = [(s, s.replace("<end>", "")) for s in sentences][:20]
# Load tacotron model
checkpoint_path = taco_artifact.get_path("model.pt").download()
model = load_model(HPARAMS)
model.load_state_dict(torch.load(checkpoint_path))
model.cuda().eval()
# Load waveglow model
waveglow_path = wave_artifact.get_path("pretrained-model.pt").download()
waveglow = torch.load(waveglow_path)["model"]
waveglow.cuda().eval()
for k in waveglow.convinv:
k.float()
# Tokenize sentences
sequences = [
torch.autograd.Variable(
torch.from_numpy(
np.array(text_to_sequence(text, ["english_cleaners"]))[None, :]
)
)
.cuda()
.long()
for text in sentences
]
# Run inference for each sequence
for sequence, sentence in zip(sequences, sentences):
_, mel_outputs_postnet, _, _ = model.inference(sequence)
with torch.no_grad():
audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)
audio = audio.cpu().numpy().astype(np.float32)
inference_table.add_data(
sentence[0] if table_artifact else sentence,
wandb.Audio(audio[0], sample_rate=HPARAMS["sampling_rate"]),
)
if table_artifact:
inference_table = wandb.JoinedTable(table, inference_table, "predicted_caption")
wandb.log({"eval_table": inference_table})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("tacotron", help="tacotron model artifact to load", type=str)
parser.add_argument("waveglow", help="waveglow model artifact to load", type=str)
parser.add_argument(
"-s",
"--sentences",
help="text file containing sentences to be converted to audio",
type=str,
metavar="sentences",
)
parser.add_argument(
"-t",
"--table",
help="Artifact containing table with sentences to predict on.",
)
args = parser.parse_args()
if bool(args.sentences) == bool(args.table):
print("Only one of --sentences and --table may be specified.")
exit(1)
main(args.tacotron, args.waveglow, args.sentences, args.table)
|
{"hexsha": "520ded8782057bccb26df1887102bb7a18dddc3d", "size": 3644, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference.py", "max_stars_repo_name": "bcsherma/tacotron2", "max_stars_repo_head_hexsha": "827e4d59c14da3d41cabd54864bc08fd71eef3fa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-02T01:29:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T01:29:29.000Z", "max_issues_repo_path": "inference.py", "max_issues_repo_name": "bcsherma/tacotron2", "max_issues_repo_head_hexsha": "827e4d59c14da3d41cabd54864bc08fd71eef3fa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference.py", "max_forks_repo_name": "bcsherma/tacotron2", "max_forks_repo_head_hexsha": "827e4d59c14da3d41cabd54864bc08fd71eef3fa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9649122807, "max_line_length": 88, "alphanum_fraction": 0.6580680571, "include": true, "reason": "import numpy", "num_tokens": 844}
|
import math
import numpy as np
import numpy.random as random
import torch
import torch.nn as nn
import copy
import torch.nn.functional as F
import torch.optim as optim
from scipy.stats import levy_stable
# This script is only for propagating randomly initializaed networks with square connectivity matrices (NOT FOR TRAINING!)
# derivative for tanh, only for scalar output
def dtanh(x):
m = nn.Tanh()
x = torch.autograd.Variable(x, requires_grad=True)
y = m(x)
y.backward( torch.ones_like(x) )
return x.grad
def randomize_weight(w, w_alpha, w_mult):
if mu is None:
mu = w.mean()
if sigma is None:
sigma = w.std()
return sigma * np.random.randn(*w.shape) + mu
# class randnet(object):
class randnet(nn.Module):
# add input_layer=None
def __init__(self, input_dim , width, depth, num_classes, w_alpha, w_mult, **kwargs):
super(randnet, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.w_alpha = w_alpha
self.w_mult = w_mult
if num_classes is None:
num_classes = width
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=False),
nn.Tanh(),
*layers,
nn.Linear(self.width, self.num_classes, bias=False),
nn.Tanh(),
)
if 'w_seed' in kwargs:
# set seed for weights
w_seed = kwargs.get('w_seed')
#random.seed(seed=w_seed)
# load weights w.r.t. w_alpha and w_mult (method 1)
if False:
for i, layer_id in enumerate(list(self.state_dict().keys())):
layer_dim = self.state_dict()[layer_id].shape
# simulate weight
alpha = w_alpha
beta = 0
loc = 0
scale_multiplier = w_mult
if 'w_seed' in kwargs:
random.seed(seed=w_seed + i * 100000)
scale = (1/(2*np.sqrt(layer_dim[0] * layer_dim[1])))**(1/alpha) # this is our standard unit of the scale for stable init
new_weights = levy_stable.rvs(alpha, beta, loc, scale * scale_multiplier, size=layer_dim)
self.state_dict()[layer_id].data.copy_(torch.tensor(new_weights))
# load weights w.r.t. w_alpha and w_mult (method 2)
with torch.no_grad():
ii = 0
for param in self.parameters():
param_dim = param.shape
# simulate weight
alpha = w_alpha
beta = 0
loc = 0
scale_multiplier = w_mult
if 'w_seed' in kwargs:
random.seed(seed=w_seed + ii * 100000)
scale = (1/(2*np.sqrt( int(np.prod(param_dim)) )))**(1/alpha) # this is our standard unit of the scale for stable init
# print('prenew_weights')
new_weights = levy_stable.rvs(alpha, beta, loc, scale * scale_multiplier, size=param_dim)
# print('postnew_weights')
#new_weights = torch.ones(param_dim).to(torch.double)
#param.copy_(new_weights)
param.copy_(torch.from_numpy(new_weights))
ii += 1
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.Tanh())
return layers
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
# preactivation outputs
"""
def preact_layer(self, x):
# number of hidden layers
hidden = [None] * (self.depth - 1)
x = x.view(x.size(0), -1)
ell = 2
for idx in range(self.depth - 1):
if idx == 0:
hidden[idx] = self.fc[idx](x)
else:
hidden[idx] = self.fc[ell * idx - 1: ell * idx + 1](hidden[idx - 1])
return hidden + [self.fc[-2:](hidden[-1])]
"""
def preact_layer(self, x):
x = x.view(x.size(0), self.input_dim)
hidden_all = torch.empty((self.depth+1, x.shape[0], x.shape[1])) # includes input layer
#hidden_all = torch.empty((self.depth+1, x.shape[0], x.shape[1])).type(torch.DoubleTensor)
hidden_all[0,:,:] = x # input layers
ell = 2
for idx in range(self.depth):
if idx == 0:
hidden_all[idx + 1,:,:] = self.fc[idx](x)
else:
hidden_all[idx + 1,:,:] = self.fc[ell * idx - 1: ell * idx + ell - 1](hidden_all[idx,:,:].clone())
#hidden_all[self.depth,:,:] = self.fc[-2:](hidden_all[self.depth-1,:,:].clone())
return hidden_all
# postactivation outputs (key is to return everything altogether as just one big tensor)
def postact_layer(self, x):
x = x.view(x.size(0), self.input_dim)
hidden_all = torch.empty((self.depth+1, x.shape[0], x.shape[1])) # includes input layer
#hidden_all = torch.empty((self.depth+1, x.shape[0], x.shape[1])).to(torch.double) # includes input layer
hidden_all[0,:,:] = x # input layers
ell = 2
for idx in range(self.depth - 1):
if idx == 0:
hidden_all[idx + 1,:,:] = self.fc[ell * idx: ell * idx + ell](x)
else:
hidden_all[idx + 1,:,:] = self.fc[ell * idx: ell * idx + ell](hidden_all[idx,:,:].clone())
#hidden_all[self.depth,:,:] = self.sequential[-2:](hidden_all[self.depth-1,:,:])
hidden_all[self.depth,:,:] = self.fc[-2:](hidden_all[self.depth-1,:,:].clone())
#torch.autograd.set_detect_anomaly(True)
return hidden_all
# xs is the input
#def get_acts_and_derivatives(self, xs, include_hessian=False):
# the problem for this is that the last weight matrix in all these settings are not symmetrical, i.e. for mnist W_L is 784 by 10 (might need to adjust this in the future)
"""
def jacob_ls(self, x):
# check "The Emergence of Spectral Universality in Deep Networks"
m = nn.Tanh()
preact_h = self.preact_layer(x)
# get weights
weights = [p for p in self.parameters()]
dphi_h = dtanh(preact_h[0][0])
DW_l = torch.matmul(torch.diag( dphi_h ), weights[0])
#DW_l = torch.matmul(torch.diag( m(preact_h[0][0]) ), weights[0])
DW_ls = [DW_l]
# due to the last matrix being non-square, the case l = L is not included
for i in range(1, len(preact_h) - 1):
dphi_h = dtanh(preact_h[i][0])
DW_l = torch.matmul(torch.diag( dphi_h ), weights[i])
#DW_l = torch.matmul(torch.diag( m(preact_h[i][0]) ), weights[i])
DW_ls.append(DW_l)
return DW_ls
"""
# great circle
class GreatCircle():
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
U, _, _ = np.linalg.svd(np.random.randn(output_dim, 2), full_matrices=False)
self.U = K.variable(U.T)
self.scale = K.variable(1.0)
kwargs['input_shape'] = (1, )
super(GreatCircle, self).__init__(**kwargs)
|
{"hexsha": "78cd7198785d4a0dfaa74846b8335d2d7440295d", "size": 7443, "ext": "py", "lang": "Python", "max_stars_repo_path": "nporch/Randnet.py", "max_stars_repo_name": "CKQu1/extended-criticality-dnn", "max_stars_repo_head_hexsha": "e19efd34f84dc684b31b3ba0b1e41432dfc1bb59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nporch/Randnet.py", "max_issues_repo_name": "CKQu1/extended-criticality-dnn", "max_issues_repo_head_hexsha": "e19efd34f84dc684b31b3ba0b1e41432dfc1bb59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nporch/Randnet.py", "max_forks_repo_name": "CKQu1/extended-criticality-dnn", "max_forks_repo_head_hexsha": "e19efd34f84dc684b31b3ba0b1e41432dfc1bb59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6186046512, "max_line_length": 174, "alphanum_fraction": 0.5552868467, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1850}
|
#!/usr/bin/env python3
###################################################################################
# Copyright 2021 National Technology & Engineering Solutions of Sandia, #
# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the #
# U.S. Government retains certain rights in this software. #
# If you want to use this code, please refer to the README.rst and LICENSE files. #
###################################################################################
import numpy as np
from PyNucleus.base import driver, solverFactory, INDEX
from PyNucleus.base.linear_operators import Dense_LinearOperator
from PyNucleus.fem import (simplexXiaoGimbutas, plotManager,
P0_DoFMap, NO_BOUNDARY)
from PyNucleus.multilevelSolver import hierarchyManager
from PyNucleus.nl import (multilevelDirichletCondition,
paramsForFractionalHierarchy,
nonlocalProblem)
from PyNucleus.nl.nonlocalProblems import (DIRICHLET, HOMOGENEOUS_DIRICHLET,
NEUMANN, HOMOGENEOUS_NEUMANN)
d = driver()
p = nonlocalProblem(d)
d.add('solver', acceptedValues=['lu', 'mg', 'cg-mg'])
d.add('dense', False)
d.add('forceRebuild', True)
d.add('genKernel', False)
d.add('maxiter', 100)
d.add('tol', 1e-6)
d.add('plotRHS', False)
d.add('plotOnFullDomain', True)
d.declareFigure('solution')
d.declareFigure('analyticSolution')
params = d.process()
if d.kernel != 'fractional':
# Hierarchical matrices are only implemented for fractional kernels
d.dense = True
with d.timer('hierarchy'):
params['domain'] = p.mesh
params['keepMeshes'] = 'all'
params['keepAllDoFMaps'] = True
params['assemble'] = 'ALL' if params['solver'].find('mg') >= 0 else 'last'
params['dense'] = d.dense
params['logging'] = True
params['genKernel'] = d.genKernel
hierarchies, connectors = paramsForFractionalHierarchy(p.noRef, params)
hM = hierarchyManager(hierarchies, connectors, params)
hM.setup()
mesh = hM['fine'].meshLevels[-1].mesh
assert 2*mesh.h < p.horizon.value, "h = {}, horizon = {}".format(mesh.h, p.horizon.value)
if not p.boundaryCondition == HOMOGENEOUS_DIRICHLET:
bc = multilevelDirichletCondition(hM.getLevelList(), p.domainIndicator, p.fluxIndicator)
fullDoFMap = bc.fullDoFMap
naturalDoFMap = bc.naturalDoFMap
b = naturalDoFMap.assembleRHS(p.rhs, qr=simplexXiaoGimbutas(3, mesh.dim))
bc.setDirichletData(p.dirichletData)
bc.applyRHScorrection(b)
hierarchy = bc.naturalLevels
else:
hierarchy = hM.getLevelList()
naturalDoFMap = hierarchy[-1]['DoFMap']
b = naturalDoFMap.assembleRHS(p.rhs, qr=simplexXiaoGimbutas(3, mesh.dim))
# pure Neumann condition -> project out nullspace
if p.boundaryCondition in (NEUMANN, HOMOGENEOUS_NEUMANN):
assert bc.dirichletDoFMap.num_dofs == 0, bc.dirichletDoFMap
if params['solver'].find('mg') >= 0:
bc.naturalLevels[0]['A'] = bc.naturalLevels[0]['A'] + Dense_LinearOperator.ones(*bc.naturalLevels[0]['A'].shape)
const = bc.naturalDoFMap.ones()
b -= b.inner(const)/const.inner(const)*const
u = naturalDoFMap.zeros()
with d.timer('solve'):
if params['solver'].find('mg') >= 0:
ml = solverFactory.build('mg', hierarchy=hierarchy, setup=True, tolerance=params['tol'], maxIter=params['maxiter'])
d.logger.info('\n'+str(ml))
if d.solver == 'mg':
its = ml(b, u)
res = ml.residuals
elif d.solver == 'cg-mg':
cg = solverFactory.build('cg', A=hierarchy[-1]['A'], setup=True, tolerance=params['tol'], maxIter=params['maxiter'])
cg.setPreconditioner(ml.asPreconditioner())
its = cg(b, u)
res = cg.residuals
elif d.solver == 'lu':
lu = solverFactory.build(d.solver, A=hierarchy[-1]['A'], setup=True)
its = lu(b, u)
else:
raise NotImplementedError(d.solver)
# pure Neumann condition -> add nullspace components to match analytic solution
if p.boundaryCondition in (NEUMANN, HOMOGENEOUS_NEUMANN) and p.analyticSolution is not None:
uEx = bc.naturalDoFMap.interpolate(p.analyticSolution)
u += (const.inner(uEx)-const.inner(u))/const.inner(const) * const
vectors = d.addOutputGroup('vectors')
vectors.add('u', u)
meshes = d.addOutputGroup('meshes')
meshes.add('fullMesh', mesh)
results = d.addOutputGroup('results')
results.add('full h', mesh.h)
results.add('natural DoFs', naturalDoFMap.num_dofs)
results.add('iterations', its)
if p.boundaryCondition in (DIRICHLET, ):
results.add('full DoFs', bc.fullDoFMap.num_dofs)
u_full = bc.augmentDirichlet(u)
vectors.add('u_full', u_full)
else:
u_full = bc.naturalP*u
errors = d.addOutputGroup('errors', tested=True)
resNorm = (b-hierarchy[-1]['A']*u).norm(False)
errors.add('residual norm', resNorm)
if p.analyticSolution is not None:
uEx = bc.naturalDoFMap.interpolate(p.analyticSolution)
M_natural = naturalDoFMap.assembleMass()
L2err_natural = np.sqrt(abs((u-uEx).inner(M_natural*(u-uEx))))
relL2err_natural = L2err_natural/np.sqrt(abs(uEx.inner(M_natural*uEx)))
uEx_domain = bc.domainDoFMap.interpolate(p.analyticSolution)
M_domain = bc.domainDoFMap.assembleMass()
u_domain = bc.domainDoFMap.fromArray(bc.domainR*u_full)
L2err_domain = np.sqrt(abs((u_domain-uEx_domain).inner(M_domain*(u_domain-uEx_domain))))
relL2err_domain = L2err_domain/np.sqrt(abs(uEx_domain.inner(M_domain*uEx_domain)))
Linferr_natural = np.abs((u-uEx)).max()
relLinferr_natural = Linferr_natural/np.abs(uEx).max()
vectors.add('uEx', uEx)
errors.add('L2 error natural', L2err_natural, rTol=3e-2)
errors.add('rel L2 error natural', relL2err_natural, rTol=3e-2)
errors.add('L2 error domain', L2err_domain, rTol=3e-2)
errors.add('rel L2 error domain', relL2err_domain, rTol=3e-2)
errors.add('Linf error natural', Linferr_natural, rTol=3e-2)
errors.add('rel Linf error natural', relLinferr_natural, rTol=3e-2)
if p.boundaryCondition in (DIRICHLET, NEUMANN):
uEx_full = bc.fullDoFMap.interpolate(p.analyticSolution)
M_full = bc.fullDoFMap.assembleMass()
L2err_full = np.sqrt(abs((uEx_full-u_full).inner(M_full*(uEx_full-u_full))))
vectors.add('uEx_full', uEx_full)
errors.add('L2 error including Dirichlet domain', L2err_full, rTol=3e-2)
d.logger.info('\n'+str(results+errors))
if d.startPlot('solution'):
import matplotlib.pyplot as plt
plotDefaults = {}
if p.dim == 2:
plotDefaults['flat'] = True
if p.element != 'P0':
plotDefaults['shading'] = 'gouraud'
if p.boundaryCondition in (DIRICHLET, NEUMANN):
pM = plotManager(bc.fullDoFMap.mesh, bc.fullDoFMap, defaults=plotDefaults)
if p.dim == 1:
pMerr = plotManager(bc.fullDoFMap.mesh, bc.fullDoFMap, defaults=plotDefaults)
else:
pMerr = pM
pM.add(u_full, label='solution')
if d.plotRHS:
pM.add(bc.augmentDirichlet(b), label='rhs')
if p.analyticSolution is not None:
pM.add(uEx_full, label='analytic solution')
pMerr.add(u_full-uEx_full, label='error')
else:
if d.plotOnFullDomain:
pM = plotManager(naturalDoFMap.mesh, naturalDoFMap, defaults=plotDefaults)
if p.dim == 1:
pMerr = plotManager(naturalDoFMap.mesh, naturalDoFMap, defaults=plotDefaults)
else:
pMerr = pM
else:
indicator = P0_DoFMap(naturalDoFMap.mesh, NO_BOUNDARY).interpolate(p.domainIndicator)
selectedCells = np.flatnonzero(indicator.toarray() >= 1e-9).astype(INDEX)
reducedDM = naturalDoFMap.getReducedMeshDoFMap(selectedCells)
pM = plotManager(reducedDM.mesh, reducedDM, defaults=plotDefaults)
if p.dim == 1:
pMerr = plotManager(reducedDM.mesh, reducedDM, defaults=plotDefaults)
else:
pMerr = pM
pM.add(u, label='solution')
if d.plotRHS:
pM.add(b, label='rhs')
if p.analyticSolution is not None:
pM.add(uEx, label='analytic solution')
if p.dim == 1:
pMerr.add(u-uEx, label='error')
if p.dim == 1 and p.analyticSolution is not None:
plt.subplot(1, 2, 1)
pM.plot()
plt.subplot(1, 2, 2)
pMerr.plot()
else:
pM.plot()
d.finish()
|
{"hexsha": "9e4e4bb414aad9b43d098730b765b1b99c84c4d4", "size": 8448, "ext": "py", "lang": "Python", "max_stars_repo_path": "drivers/runNonlocal.py", "max_stars_repo_name": "sandialabs/PyNucleus", "max_stars_repo_head_hexsha": "98b87cf779c2c1853ce16d47998b692f594a55a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "drivers/runNonlocal.py", "max_issues_repo_name": "sandialabs/PyNucleus", "max_issues_repo_head_hexsha": "98b87cf779c2c1853ce16d47998b692f594a55a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "drivers/runNonlocal.py", "max_forks_repo_name": "sandialabs/PyNucleus", "max_forks_repo_head_hexsha": "98b87cf779c2c1853ce16d47998b692f594a55a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6153846154, "max_line_length": 124, "alphanum_fraction": 0.6516335227, "include": true, "reason": "import numpy", "num_tokens": 2403}
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from typing import Callable
import numpy as np
class Signature:
def __init__(self, args, ret):
self.args = args
self.ret = ret
class OpRegistry:
signatures = {}
def __init__(self):
pass
@staticmethod
def register(f: Callable, args, ret):
OpRegistry.signatures[f] = Signature(args, ret)
@staticmethod
def get_signature(f: Callable) -> Signature:
return OpRegistry.signatures[f]
|
{"hexsha": "0a8df0f82580d9c09e6be0ed40eb65a2aa97c017", "size": 553, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/pyis-onnx/pyis/onnx/transpiler/ops/op_registry.py", "max_stars_repo_name": "microsoft/python-inference-script", "max_stars_repo_head_hexsha": "cbbbe9d16be0839e4df357b1bd9e8274ca44f1f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-11-29T01:49:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T10:26:46.000Z", "max_issues_repo_path": "py/pyis-onnx/pyis/onnx/transpiler/ops/op_registry.py", "max_issues_repo_name": "microsoft/python-inference-script", "max_issues_repo_head_hexsha": "cbbbe9d16be0839e4df357b1bd9e8274ca44f1f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-01T02:22:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-01T02:22:32.000Z", "max_forks_repo_path": "py/pyis-onnx/pyis/onnx/transpiler/ops/op_registry.py", "max_forks_repo_name": "microsoft/python-inference-script", "max_forks_repo_head_hexsha": "cbbbe9d16be0839e4df357b1bd9e8274ca44f1f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.12, "max_line_length": 59, "alphanum_fraction": 0.6564195298, "include": true, "reason": "import numpy", "num_tokens": 125}
|
import numpy as np
def simulation(data, params):
'''
Cema-Neige snow model
Input:
1. Data - pandas dataframe with correspondent timeseries:
'Temp'- mean daily temperature (Celsium degrees)
'Prec'- mean daily precipitation (mm/day)
2. Params - list of model parameters:
'CTG' - dimensionless weighting coefficient of the snow pack thermal state
[0, 1]
'Kf' - day-degree rate of melting (mm/(day*celsium degree))
[1, 10]
Output:
Total amount of liquid and melting precipitation daily timeseries
(for coupling with hydrological model)
'''
### reading the data ###
Temp = data['Temp']
Prec = data['Prec']
FraqSolidPrecip = np.where(Temp < -0.2, 1, 0)
CTG, Kf = params
### initialization ###
## constants ##
# melting temperature
Tmelt = 0
# Threshold for solid precip
# function for Mean Annual Solid Precipitation
def MeanAnnualSolidPrecip(data):
annual_vals = [data.Prec.ix[data.Prec.index.year == i][data.Temp < -0.2].sum()\
for i in np.unique(data.index.year)]
return np.mean(annual_vals)
MASP = MeanAnnualSolidPrecip(data)
Gthreshold = 0.9*MASP
MinSpeed = 0.1
## model states ##
G = 0
eTG = 0
PliqAndMelt = 0
### ouput of snow model
PliqAndMelt = np.zeros(len(Temp))
for t in range(len(Temp)):
### solid and liquid precipitation accounting
# liquid precipitation
Pliq = (1 - FraqSolidPrecip[t]) * Prec[t]
# solid precipitation
Psol = FraqSolidPrecip[t] * Prec[t]
### Snow pack volume before melt
G = G + Psol
### Snow pack thermal state before melt
eTG = CTG * eTG + (1 - CTG) * Temp[t]
# control eTG
if eTG > 0: eTG = 0
### potential melt
if (int(eTG) == 0) & (Temp[t] > Tmelt):
PotMelt = Kf * (Temp[t] - Tmelt)
if PotMelt > G: PotMelt = G
else:
PotMelt = 0
### ratio of snow pack cover (Gratio)
if G < Gthreshold:
Gratio = G/Gthreshold
else:
Gratio = 1
### actual melt
Melt = ((1 - MinSpeed) * Gratio + MinSpeed) * PotMelt
### snow pack volume update
G = G - Melt
### Gratio update
if G < Gthreshold:
Gratio = G/Gthreshold
else:
Gratio = 1
### Water volume to pass to the hydrological model
PliqAndMelt[t] = Pliq + Melt
return PliqAndMelt
def bounds():
'''
'CTG' - dimensionless weighting coefficient of the snow pack thermal state
[0, 1]
'Kf' - day-degree rate of melting (mm/(day*celsium degree))
[1, 10]
'''
bnds = ((0, 1), (1, 10))
return bnds
|
{"hexsha": "ce58ad875e450f01b3d4d15762f6449550bfff43", "size": 2836, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/cema_neige.py", "max_stars_repo_name": "hydrogo/2018_VinoRead_wrkshp", "max_stars_repo_head_hexsha": "1ebcaacedc9eb96e0ed749ae93225b8cf3246d2b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-04-07T19:29:53.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-20T13:07:17.000Z", "max_issues_repo_path": "models/cema_neige.py", "max_issues_repo_name": "hydrogo/2018_VinoRead_wrkshp", "max_issues_repo_head_hexsha": "1ebcaacedc9eb96e0ed749ae93225b8cf3246d2b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/cema_neige.py", "max_forks_repo_name": "hydrogo/2018_VinoRead_wrkshp", "max_forks_repo_head_hexsha": "1ebcaacedc9eb96e0ed749ae93225b8cf3246d2b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-16T16:57:32.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-16T16:57:32.000Z", "avg_line_length": 28.9387755102, "max_line_length": 87, "alphanum_fraction": 0.5553596615, "include": true, "reason": "import numpy", "num_tokens": 798}
|
import numpy
from audionmf.transforms.nmf import NMF
def nmf_matrix(matrix, max_iter=100, rank=30):
# increment the matrix to make sure it's positive
matrix_inc, min_val = increment_by_min(matrix)
# TODO save
# use Kullback-Leibler divergence
# nmf = nimfa.Nmf(matrix_inc, max_iter=max_iter, rank=rank, objective='div', update='divergence')()
# W = nmf.basis()
# H = nmf.coef()
# calculate NMF
nmf = NMF(matrix_inc, max_iter=max_iter, rank=rank)
W, H = nmf.factorize()
return W, H, min_val
def nmf_matrix_original(W, H, min_val):
# get the original matrix
matrix = numpy.matmul(W, H) - min_val
return matrix
def increment_by_min(matrix):
# increments matrix by its lowest value and returns the structure and the absolute value
min_val = abs(numpy.amin(matrix))
return matrix + min_val, min_val
|
{"hexsha": "c4e884d6a217216411210ac48f2cb0f0b305fe63", "size": 873, "ext": "py", "lang": "Python", "max_stars_repo_path": "audionmf/util/nmf_util.py", "max_stars_repo_name": "argoneuscze/AudioNMF", "max_stars_repo_head_hexsha": "04f360653bb0df3d5ed19bf2eb1459bff16a944c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "audionmf/util/nmf_util.py", "max_issues_repo_name": "argoneuscze/AudioNMF", "max_issues_repo_head_hexsha": "04f360653bb0df3d5ed19bf2eb1459bff16a944c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "audionmf/util/nmf_util.py", "max_forks_repo_name": "argoneuscze/AudioNMF", "max_forks_repo_head_hexsha": "04f360653bb0df3d5ed19bf2eb1459bff16a944c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6764705882, "max_line_length": 103, "alphanum_fraction": 0.6918671249, "include": true, "reason": "import numpy", "num_tokens": 241}
|
import os
import torch
import elf
import numpy as np
import wandb
from elf.segmentation.features import compute_rag
from torch.nn import functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from collections import namedtuple
import matplotlib.pyplot as plt
from matplotlib import cm
from multiprocessing import Process, Lock
import threading
from environments.multicut_obj_lvl_rew import MulticutEmbeddingsEnv, State
from data.spg_dset import SpgDset
from models.agent_model_obj_lvl import Agent
from models.feature_extractor import FeExtractor
from utils.exploration_functions import RunningAverage
from utils.general import soft_update_params, set_seed_everywhere, cluster_embeddings, pca_project, random_label_cmap
from utils.replay_memory import TransitionData_ts
from utils.distances import CosineDistance, L2Distance
from utils.matching import matching
from utils.yaml_conv_parser import dict_to_attrdict
from utils.training_helpers import update_env_data, supervised_policy_pretraining, state_to_cpu, Forwarder
# from timeit import default_timer as timer
class AgentSacTrainerObjLvlReward(object):
def __init__(self, cfg, global_count):
super(AgentSacTrainerObjLvlReward, self).__init__()
assert torch.cuda.device_count() == 1
self.device = torch.device("cuda:0")
torch.cuda.set_device(self.device)
torch.set_default_tensor_type(torch.FloatTensor)
self.cfg = cfg
self.global_count = global_count
self.memory = TransitionData_ts(capacity = self.cfg.mem_size)
self.best_val_reward = -np.inf
if self.cfg.distance == 'cosine':
self.distance = CosineDistance()
else:
self.distance = L2Distance()
self.fe_ext = FeExtractor(dict_to_attrdict(self.cfg.backbone), self.distance, cfg.fe_delta_dist, self.device)
self.fe_ext.embed_model.load_state_dict(torch.load(self.cfg.fe_model_name))
self.fe_ext.cuda(self.device)
self.model = Agent(self.cfg, State, self.distance, self.device)
wandb.watch(self.model)
self.model.cuda(self.device)
self.model_mtx = Lock()
MovSumLosses = namedtuple('mov_avg_losses', ('actor', 'critic', 'temperature'))
Scalers = namedtuple('Scalers', ('critic', 'actor'))
OptimizerContainer = namedtuple('OptimizerContainer',
('actor', 'critic', 'temperature', 'actor_shed', 'critic_shed', 'temp_shed'))
actor_optimizer = torch.optim.Adam(self.model.actor.parameters(), lr=self.cfg.actor_lr)
critic_optimizer = torch.optim.Adam(self.model.critic.parameters(), lr=self.cfg.critic_lr)
temp_optimizer = torch.optim.Adam([self.model.log_alpha], lr=self.cfg.alpha_lr)
lr_sched_cfg = dict_to_attrdict(self.cfg.lr_sched)
bw = lr_sched_cfg.mov_avg_bandwidth
off = lr_sched_cfg.mov_avg_offset
weights = np.linspace(lr_sched_cfg.weight_range[0], lr_sched_cfg.weight_range[1], bw)
weights = weights / weights.sum() # make them sum up to one
shed = lr_sched_cfg.torch_sched
self.mov_sum_losses = MovSumLosses(RunningAverage(weights, band_width=bw, offset=off),
RunningAverage(weights, band_width=bw, offset=off),
RunningAverage(weights, band_width=bw, offset=off))
self.optimizers = OptimizerContainer(actor_optimizer, critic_optimizer, temp_optimizer,
*[ReduceLROnPlateau(opt, patience=shed.patience,
threshold=shed.threshold, min_lr=shed.min_lr,
factor=shed.factor) for opt in
(actor_optimizer, critic_optimizer, temp_optimizer)])
self.scalers = Scalers(torch.cuda.amp.GradScaler(), torch.cuda.amp.GradScaler())
self.forwarder = Forwarder()
if self.cfg.agent_model_name != "":
self.model.load_state_dict(torch.load(self.cfg.agent_model_name))
# if "policy_warmup" in self.cfg and self.cfg.agent_model_name == "":
# supervised_policy_pretraining(self.model, self.env, self.cfg, device=self.device)
# torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, "sv_pretrained_policy_agent.pth"))
# finished with prepping
for param in self.fe_ext.parameters():
param.requires_grad = False
self.train_dset = SpgDset(self.cfg.data_dir, dict_to_attrdict(self.cfg.patch_manager), dict_to_attrdict(self.cfg.data_keys))
self.val_dset = SpgDset(self.cfg.val_data_dir, dict_to_attrdict(self.cfg.patch_manager), dict_to_attrdict(self.cfg.data_keys))
def validate(self):
"""validates the prediction against the method of clustering the embedding space"""
env = MulticutEmbeddingsEnv(self.fe_ext, self.cfg, self.device)
if self.cfg.verbose:
print("\n\n###### start validate ######", end='')
self.model.eval()
n_examples = len(self.val_dset)
taus = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
rl_scores, keys = [], None
ex_raws, ex_sps, ex_gts, ex_mc_gts, ex_embeds, ex_rl = [], [], [], [], [], []
dloader = iter(DataLoader(self.val_dset, batch_size=1, shuffle=True, pin_memory=True, num_workers=0))
acc_reward = 0
for it in range(len(self.val_dset)):
update_env_data(env, dloader, self.val_dset, self.device, with_gt_edges="sub_graph_dice" in self.cfg.reward_function)
env.reset()
state = env.get_state()
self.model_mtx.acquire()
try:
distr, _ = self.forwarder.forward(self.model, state, State, self.device, grad=False, post_data=False)
finally:
self.model_mtx.release()
action = torch.sigmoid(distr.loc)
reward, state = env.execute_action(action, None, post_images=True, tau=0.0, train=False)
acc_reward += reward[-1].item()
if self.cfg.verbose:
print(f"\nstep: {it}; mean_loc: {round(distr.loc.mean().item(), 5)}; mean reward: {round(reward[-1].item(), 5)}", end='')
embeddings = env.embeddings[0].cpu().numpy()
gt_seg = env.gt_seg[0].cpu().numpy()
gt_mc = cm.prism(env.gt_soln[0].cpu()/env.gt_soln[0].max().item()) if env.gt_edge_weights is not None else torch.zeros(env.raw.shape[-2:])
rl_labels = env.current_soln.cpu().numpy()[0]
if it < n_examples:
ex_embeds.append(pca_project(embeddings, n_comps=3))
ex_raws.append(env.raw[0].cpu().permute(1, 2, 0).squeeze())
ex_sps.append(env.init_sp_seg[0].cpu())
ex_mc_gts.append(gt_mc)
ex_gts.append(gt_seg)
ex_rl.append(rl_labels)
_rl_scores = matching(gt_seg, rl_labels, thresh=taus, criterion='iou', report_matches=False)
if it == 0:
for tau_it in range(len(_rl_scores)):
rl_scores.append(np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:]))))
keys = list(_rl_scores[0]._asdict().keys())[1:]
else:
for tau_it in range(len(_rl_scores)):
rl_scores[tau_it] += np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:])))
div = np.ones_like(rl_scores[0])
for i, key in enumerate(keys):
if key not in ('fp', 'tp', 'fn'):
div[i] = 10
for tau_it in range(len(rl_scores)):
rl_scores[tau_it] = dict(zip(keys, rl_scores[tau_it] / div))
fig, axs = plt.subplots(1, 2, figsize=(10, 10))
plt.subplots_adjust(hspace=.5)
for m in ('precision', 'recall', 'accuracy', 'f1'):
y = [s[m] for s in rl_scores]
data = [[x, y] for (x, y) in zip(taus, y)]
table = wandb.Table(data=data, columns=["IoU_threshold", m])
wandb.log({"validation/" + m: wandb.plot.line(table, "IoU_threshold", m, stroke=None, title=m)})
axs[0].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m)
axs[0].set_ylabel('Metric value')
axs[0].grid()
axs[0].legend(bbox_to_anchor=(.8, 1.65), loc='upper left', fontsize='xx-small')
axs[0].set_title('RL method')
axs[0].set_xlabel(r'IoU threshold $\tau$')
for m in ('fp', 'tp', 'fn'):
y = [s[m] for s in rl_scores]
data = [[x, y] for (x, y) in zip(taus, y)]
table = wandb.Table(data=data, columns=["IoU_threshold", m])
wandb.log({"validation/" + m: wandb.plot.line(table, "IoU_threshold", m, stroke=None, title=m)})
axs[1].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m)
axs[1].set_ylabel('Number #')
axs[1].grid()
axs[1].legend(bbox_to_anchor=(.87, 1.6), loc='upper left', fontsize='xx-small');
axs[1].set_title('RL method')
axs[1].set_xlabel(r'IoU threshold $\tau$')
wandb.log({"validation/metrics": [wandb.Image(fig, caption="metrics")]})
wandb.log({"validation_reward": acc_reward})
plt.close('all')
if acc_reward > self.best_val_reward:
self.best_val_reward = acc_reward
wandb.run.summary["validation_reward"] = acc_reward
torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, "best_checkpoint_agent.pth"))
if self.cfg.verbose:
print("\n###### finish validate ######\n", end='')
for i in range(n_examples):
fig, axs = plt.subplots(2, 3, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0})
axs[0, 0].imshow(ex_gts[i], cmap=random_label_cmap(), interpolation="none")
axs[0, 0].set_title('gt')
axs[0, 0].axis('off')
if ex_raws[i].ndim == 3:
axs[0, 1].imshow(ex_raws[i][..., 0])
else:
axs[0, 1].imshow(ex_raws[i])
axs[0, 1].set_title('raw image')
axs[0, 1].axis('off')
axs[0, 2].imshow(ex_sps[i], cmap=random_label_cmap(), interpolation="none")
axs[0, 2].set_title('superpixels')
axs[0, 2].axis('off')
axs[1, 0].imshow(ex_embeds[i])
axs[1, 0].set_title('pc proj 1-3', y=-0.15)
axs[1, 0].axis('off')
if ex_raws[i].ndim == 3:
if ex_raws[i].shape[-1] > 1:
axs[1, 1].imshow(ex_raws[i][..., 1])
else:
axs[1, 1].imshow(ex_raws[i][..., 0])
else:
axs[1, 1].imshow(ex_raws[i])
axs[1, 1].set_title('sp edge', y=-0.15)
axs[1, 1].axis('off')
axs[1, 2].imshow(ex_rl[i], cmap=random_label_cmap(), interpolation="none")
axs[1, 2].set_title('prediction', y=-0.15)
axs[1, 2].axis('off')
wandb.log({"validation/samples": [wandb.Image(fig, caption="sample images")]})
plt.close('all')
def update_critic(self, obs, action, reward):
self.optimizers.critic.zero_grad()
with torch.cuda.amp.autocast(enabled=True):
current_Q1, current_Q2 = self.forwarder.forward(self.model, obs, State, self.device, actions=action)
target_Q = reward[0]
target_Q = target_Q.detach()
critic_loss = F.mse_loss(current_Q1.squeeze(1), target_Q) + F.mse_loss(current_Q2.squeeze(1), target_Q)
self.scalers.critic.scale(critic_loss).backward()
self.scalers.critic.step(self.optimizers.critic)
self.scalers.critic.update()
return critic_loss.item(), reward[0].mean()
def update_actor_and_alpha(self, obs, reward, expl_action):
self.optimizers.actor.zero_grad()
self.optimizers.temperature.zero_grad()
obj_edge_mask_actor = obs.obj_edge_mask_actor.to(self.device)
with torch.cuda.amp.autocast(enabled=True):
distribution, actor_Q1, actor_Q2, action, side_loss = self.forwarder.forward(self.model, obs, State, self.device,
expl_action=expl_action, policy_opt=True)
obj_n_edges = obj_edge_mask_actor.sum(1)
log_prob = distribution.log_prob(action)
actor_loss = torch.tensor([0.0], device=actor_Q1[0].device)
alpha_loss = torch.tensor([0.0], device=actor_Q1[0].device)
actor_Q = torch.min(actor_Q1, actor_Q2)
obj_log_prob = (log_prob[None] * obj_edge_mask_actor[..., None]).sum(1)
obj_entropy = ((1 / 2 * (1 + (2 * np.pi * distribution.scale ** 2).log()))[None] * obj_edge_mask_actor[..., None]).sum(1).squeeze(1)
loss = (self.model.alpha.detach() * obj_log_prob - actor_Q).mean()
actor_loss = actor_loss + loss
actor_loss = actor_loss + self.cfg.side_loss_weight * side_loss
min_entropy = (self.cfg.entropy_range[1] - self.cfg.entropy_range[0]) * ((1.5 - reward[0]) / 1.5) + self.cfg.entropy_range[0]
min_entropy = min_entropy.to(self.model.alpha.device).squeeze()
entropy = obj_entropy.detach() if self.cfg.use_closed_form_entropy else -obj_log_prob.detach()
alpha_loss = alpha_loss + (self.model.alpha * (entropy - (obj_n_edges * min_entropy))).mean()
self.scalers.actor.scale(actor_loss).backward()
self.scalers.actor.scale(alpha_loss).backward()
self.scalers.actor.step(self.optimizers.actor)
self.scalers.actor.step(self.optimizers.temperature)
self.scalers.actor.update()
return actor_loss.item(), alpha_loss.item(), min_entropy.mean().item(), distribution.loc.mean().item()
def _step(self, step):
actor_loss, alpha_loss, min_entropy, loc_mean = None, None, None, None
(obs, action, reward), sample_idx = self.memory.sample()
action = action.to(self.device)
for i in range(len(reward)):
reward[i] = reward[i].to(self.device)
critic_loss, mean_reward = self.update_critic(obs, action, reward)
self.memory.report_sample_loss(critic_loss + mean_reward, sample_idx)
self.mov_sum_losses.critic.apply(critic_loss)
# self.optimizers.critic_shed.step(self.mov_sum_losses.critic.avg)
wandb.log({"loss/critic": critic_loss})
if self.cfg.actor_update_after < step and step % self.cfg.actor_update_frequency == 0:
actor_loss, alpha_loss, min_entropy, loc_mean = self.update_actor_and_alpha(obs, reward, action)
self.mov_sum_losses.actor.apply(actor_loss)
self.mov_sum_losses.temperature.apply(alpha_loss)
# self.optimizers.actor_shed.step(self.mov_sum_losses.actor.avg)
# self.optimizers.temp_shed.step(self.mov_sum_losses.actor.avg)
wandb.log({"loss/actor": actor_loss})
wandb.log({"loss/alpha": alpha_loss})
if step % self.cfg.post_stats_frequency == 0:
if min_entropy != "nl":
wandb.log({"min_entropy": min_entropy})
wandb.log({"mov_avg/critic": self.mov_sum_losses.critic.avg})
wandb.log({"mov_avg/actor": self.mov_sum_losses.actor.avg})
wandb.log({"mov_avg/temperature": self.mov_sum_losses.temperature.avg})
wandb.log({"lr/critic": self.optimizers.critic_shed.optimizer.param_groups[0]['lr']})
wandb.log({"lr/actor": self.optimizers.actor_shed.optimizer.param_groups[0]['lr']})
wandb.log({"lr/temperature": self.optimizers.temp_shed.optimizer.param_groups[0]['lr']})
if step % self.cfg.critic_target_update_frequency == 0:
soft_update_params(self.model.critic, self.model.critic_tgt, self.cfg.critic_tau)
return [critic_loss, actor_loss, alpha_loss, loc_mean]
def train_until_finished(self):
while self.global_count.value() <= self.cfg.T_max + self.cfg.mem_size:
self.model_mtx.acquire()
try:
stats = [[], [], [], []]
for i in range(self.cfg.n_updates_per_step):
_stats = self._step(self.global_count.value())
[s.append(_s) for s, _s in zip(stats, _stats)]
for j in range(len(stats)):
if any([_s is None for _s in stats[j]]):
stats[j] = "nl"
else:
stats[j] = round(sum(stats[j])/self.cfg.n_updates_per_step, 5)
if self.cfg.verbose:
print(f"step: {self.global_count.value()}; mean_loc: {stats[-1]}; n_explorer_steps {self.memory.push_count}", end="")
print(f"; cl: {stats[0]}; acl: {stats[1]}; al: {stats[3]}")
finally:
self.model_mtx.release()
self.global_count.increment()
self.memory.reset_push_count()
if self.global_count.value() % self.cfg.validatoin_freq == 0:
self.validate()
# Acts and trains model
def train_and_explore(self, rn):
self.global_count.reset()
set_seed_everywhere(rn)
wandb.config.random_seed = rn
if self.cfg.verbose:
print('###### start training ######')
print('Running on device: ', self.device)
print('found ', self.train_dset.length, " training data patches")
print('found ', self.val_dset.length, "validation data patches")
print('training with seed: ' + str(rn))
explorers = []
for i in range(self.cfg.n_explorers):
explorers.append(threading.Thread(target=self.explore))
[explorer.start() for explorer in explorers]
self.memory.is_full_event.wait()
trainer = threading.Thread(target=self.train_until_finished)
trainer.start()
trainer.join()
self.global_count.set(self.cfg.T_max + self.cfg.mem_size + 4)
[explorer.join() for explorer in explorers]
self.memory.clear()
del self.memory
torch.save(self.model.state_dict(), os.path.join(wandb.run.dir, "last_checkpoint_agent.pth"))
if self.cfg.verbose:
print('\n\n###### training finished ######')
return
def explore(self):
env = MulticutEmbeddingsEnv(self.fe_ext, self.cfg, self.device)
tau = 1
while self.global_count.value() <= self.cfg.T_max + self.cfg.mem_size:
dloader = iter(DataLoader(self.train_dset, batch_size=self.cfg.batch_size, shuffle=True, pin_memory=True, num_workers=0))
for iteration in range((len(self.train_dset) // self.cfg.batch_size) * self.cfg.data_update_frequency):
if iteration % self.cfg.data_update_frequency == 0:
update_env_data(env, dloader, self.train_dset, self.device, with_gt_edges="sub_graph_dice" in self.cfg.reward_function)
env.reset()
state = env.get_state()
if not self.memory.is_full():
action = torch.rand((env.edge_ids.shape[-1], 1), device=self.device)
else:
self.model_mtx.acquire()
try:
distr, action = self.forwarder.forward(self.model, state, State, self.device, grad=False)
finally:
self.model_mtx.release()
reward, state = env.execute_action(action, tau=max(0, tau))
for i in range(len(reward)):
reward[i] = reward[i].cpu()
self.memory.push(state_to_cpu(state, State), action.cpu(), reward)
if self.global_count.value() > self.cfg.T_max + self.cfg.mem_size:
break
return
|
{"hexsha": "035be3535f18b8a922684f7a01e6917e86723756", "size": 20020, "ext": "py", "lang": "Python", "max_stars_repo_path": "agents/sac_obj_lvl_rew.py", "max_stars_repo_name": "edosedgar/RLForSeg", "max_stars_repo_head_hexsha": "fc748d8e7d2f2a1e7ac0dddb3f268ec3025d40ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-07T23:28:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T16:52:49.000Z", "max_issues_repo_path": "agents/sac_obj_lvl_rew.py", "max_issues_repo_name": "edosedgar/RLForSeg", "max_issues_repo_head_hexsha": "fc748d8e7d2f2a1e7ac0dddb3f268ec3025d40ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-05T11:12:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-05T11:12:03.000Z", "max_forks_repo_path": "agents/sac_obj_lvl_rew.py", "max_forks_repo_name": "edosedgar/RLForSeg", "max_forks_repo_head_hexsha": "fc748d8e7d2f2a1e7ac0dddb3f268ec3025d40ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-08T14:17:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-04T18:40:48.000Z", "avg_line_length": 50.6835443038, "max_line_length": 150, "alphanum_fraction": 0.6058941059, "include": true, "reason": "import numpy", "num_tokens": 4713}
|
# This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from collections import OrderedDict
import numpy as np
from spot_motion_monitor.camera import BaseCamera
from ..config import GaussianCameraConfig
__all__ = ['GaussianCamera']
class GaussianCamera(BaseCamera):
"""This class creates a camera that produces a frame with random Poisson
noise and a Gaussian spot placed at random within the frame.
Attributes
----------
config : `config.GaussianCameraConfig`
The instance containing the camera configuration.
counter : int
The progress of the oscillation in time.
fpsFullFrame : int
The Frames per Second rate in full frame mode.
fpsRoiFrame : int
The Frames per Second rate in ROI frame mode.
height : int
The pixel height of the CCD.
postageStamp : numpy.array
The array containing the Gaussian postage stamp.
roiSize : int
The size of a (square) ROI region in pixels.
seed : int
The seed for the random number generator.
spotSize : int
The box size in pixels for the Gaussian spot.
width : int
The pixel width of the CCD.
xAmp : int
The amplitude of the x-axis oscillation.
xFreq : float
The frequency of the x-axis oscillation.
xPoint : int
The x-coordinate of the Gaussian postage stamp insertion point.
xPointOriginal : int
The x-coordinate of the original postage stamp insertion point.
yAmp : int
The amplitude of the y-axis oscillation.
yFreq : float
The frequency of the y-axis oscillation.
yPoint : int
The y-coordinate of the Gaussian postage stamp insertion point.
yPointOriginal : int
The y-coordinate of the original postage stamp insertion point.
"""
TWO_PI = 2.0 * np.pi
seed = None
def __init__(self):
"""Initalize the class.
"""
super().__init__()
self.spotSize = 20
self.height = 480
self.width = 640
self.fpsFullFrame = 24
self.fpsRoiFrame = 40
self.roiSize = 50
self.postageStamp = None
self.xPoint = None
self.yPoint = None
# Parameters for spot oscillation.
self.doSpotOscillation = True
self.counter = 0
self.xFreq = 1.0
self.xAmp = 10
self.yFreq = 2.0
self.yAmp = 5
self.xPointOriginal = None
self.yPointOriginal = None
self.config = GaussianCameraConfig()
self.modelName = self.name
def findInsertionPoint(self):
"""Determine the Gaussian spot insertion point.
"""
percentage = 0.2
xRange = percentage * self.width
yRange = percentage * self.height
# Pick lower left corner for insertion
xHalfwidth = self.width / 2
yHalfwidth = self.height / 2
self.xPoint = np.random.randint(xHalfwidth - xRange, xHalfwidth + xRange + 1)
self.yPoint = np.random.randint(yHalfwidth - yRange, yHalfwidth + yRange + 1)
self.xPointOriginal = self.xPoint
self.yPointOriginal = self.yPoint
def getCameraInformation(self):
"""Return the current camera related information.
Returns
-------
OrderedDict
The set of camera information.
"""
info = OrderedDict()
info['Model'] = self.name
info['CCD Width (pixels)'] = self.width
info['CCD Height (pixels)'] = self.height
return info
def getConfiguration(self):
"""Get the current camera configuration.
Returns
-------
`config.GaussianCameraConfig`
The set of current configuration parameters.
"""
self.config.roiSize = self.roiSize
self.config.fpsRoiFrame = self.fpsRoiFrame
self.config.fpsFullFrame = self.fpsFullFrame
self.config.doSpotOscillation = self.doSpotOscillation
self.config.xAmplitude = self.xAmp
self.config.xFrequency = self.xFreq
self.config.yAmplitude = self.yAmp
self.config.yFrequency = self.yFreq
return self.config
def getFullFrame(self):
"""Get the full frame from the CCD.
Returns
-------
numpy.array
The current full CCD frame.
"""
# Create base CCD frame
ccd = np.random.poisson(20.0, (self.height, self.width))
if self.doSpotOscillation:
self.oscillateSpot()
# Merge CCD frame and postage stamp
ccd[self.yPoint:self.yPoint + self.postageStamp.shape[1],
self.xPoint:self.xPoint + self.postageStamp.shape[0]] += self.postageStamp
return ccd
def getOffset(self):
"""Get the offset for ROI mode.
Returns
-------
(float, float)
The x, y pixel positions of the offset for ROI mode.
"""
# Offset is same for both axes since spot and ROI are square.
offset = (self.roiSize - self.spotSize) // 2
xStart = self.xPointOriginal - offset
yStart = self.yPointOriginal - offset
return (xStart, yStart)
def getRoiFrame(self):
"""Get the ROI frame from the CCD.
Returns
-------
numpy.array
The current ROI CCD frame.
"""
ccd = self.getFullFrame()
xOffset, yOffset = self.getOffset()
roi = ccd[yOffset:yOffset + self.roiSize, xOffset:xOffset + self.roiSize]
return roi
def makePostageStamp(self):
"""Create the Gaussian spot.
"""
linear_space = np.linspace(-2, 2, self.spotSize)
x, y = np.meshgrid(linear_space, linear_space)
d = np.sqrt(x * x + y * y)
sigma, mu = 0.5, 0.0
a = 200.0 / (sigma * np.sqrt(2.0 * np.pi))
self.postageStamp = a * np.exp(-((d - mu)**2 / (2.0 * sigma**2)))
self.postageStamp = self.postageStamp.astype(np.int64)
def oscillateSpot(self):
"""Calculate the oscillation of the spot.
"""
self.xPoint = int(self.xPointOriginal +
self.xAmp * np.sin(self.TWO_PI * self.xFreq * (self.counter / self.fpsRoiFrame)))
self.yPoint = int(self.yPointOriginal +
self.yAmp * np.sin(self.TWO_PI * self.yFreq * (self.counter / self.fpsRoiFrame)))
self.counter += 1
def resetOffset(self):
"""Reset the camera offsets back to zero.
For the Gaussian camera, this is a no-op.
"""
pass
def setConfiguration(self, config):
"""Set the comfiguration on the camera.
Parameters
----------
config : `config.GaussianCameraConfig`
The current configuration.
"""
self.roiSize = config.roiSize
self.fpsRoiFrame = config.fpsRoiFrame
self.fpsFullFrame = config.fpsFullFrame
self.doSpotOscillation = config.doSpotOscillation
self.xFreq = config.xFrequency
self.xAmp = config.xAmplitude
self.yFreq = config.yFrequency
self.yAmp = config.yAmplitude
def showFrameStatus(self):
"""Show frame status from the camera.
The Gaussian camera does not use this function since all frames are
good.
"""
pass
def shutdown(self):
"""Handle the shutdown of the camera.
"""
pass
def startup(self):
"""Handle the startup of the camera.
"""
np.random.seed(self.seed)
self.makePostageStamp()
self.findInsertionPoint()
def updateOffset(self, centroidX, centroidY):
"""Update the camera's internal offset values from the provided
centroid.
For the Gaussian camera, this is a no-op, but helps test the mechanism.
Parameters
----------
centroidX : float
The x component of the centroid for offset update.
centroidY : float
The y component of the centroid for offset update.
"""
pass
def waitOnRoi(self):
"""Wait on information to be updated for ROI mode use.
The Gaussian camera does not make use of this currently.
"""
pass
|
{"hexsha": "a6169d9bc4002058b12328278a319c47f7906338", "size": 8516, "ext": "py", "lang": "Python", "max_stars_repo_path": "spot_motion_monitor/camera/gaussian_camera.py", "max_stars_repo_name": "lsst-sitcom/spot_motion_monitor", "max_stars_repo_head_hexsha": "3d0242276198126240667ba13e95b7bdf901d053", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spot_motion_monitor/camera/gaussian_camera.py", "max_issues_repo_name": "lsst-sitcom/spot_motion_monitor", "max_issues_repo_head_hexsha": "3d0242276198126240667ba13e95b7bdf901d053", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-01-08T23:50:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-14T18:15:20.000Z", "max_forks_repo_path": "spot_motion_monitor/camera/gaussian_camera.py", "max_forks_repo_name": "lsst-com/spot_motion_monitor", "max_forks_repo_head_hexsha": "3d0242276198126240667ba13e95b7bdf901d053", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1941391941, "max_line_length": 107, "alphanum_fraction": 0.6067402536, "include": true, "reason": "import numpy", "num_tokens": 1989}
|
/**
* Testival.cpp
*
* Test interval class
*
* Created by Yinan Li on July 20, 2016.
*
* Hybrid Systems Group, University of Waterloo.
*/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE IntervalClass
#include <boost/test/unit_test.hpp>
//#include <boost/test/unit_test_log.hpp>
#include <cmath>
#include <iomanip>
#include "src/interval.h"
/*
test case 1: initialization
*/
BOOST_AUTO_TEST_CASE(test_init_basics)
{
rocs::interval a(2,1);
rocs::interval b(0.2, 1.5);
rocs::interval c(b);
rocs::interval d = b;
rocs::interval e(rocs::PINF, rocs::PINF);
/* test isempty() */
BOOST_CHECK(a.isempty());
BOOST_CHECK(!b.isempty());
BOOST_CHECK(!c.isempty());
BOOST_CHECK(!e.isempty());
/* test equivalence function */
BOOST_CHECK(c == b);
BOOST_CHECK(d == b);
BOOST_CHECK(a != b);
/* test set operations */
BOOST_CHECK(b.isin(0.2));
/* test print function */
// std::cout << b<< std::endl;
}
/*
test case 2: + -
*/
BOOST_AUTO_TEST_CASE(test_plus_minus)
{
using namespace rocs;
interval e(2,1); // empty interval
interval a(0.2, 1.5);
interval b(-0.3, -0.0001);
interval c = interval(-2.33, 1.09);
/* test plus */
BOOST_CHECK(interval(0.4, 3) == a + a);
BOOST_CHECK(interval(-0.1, 1.4999) == b + a);
BOOST_CHECK(interval(-0.1, 1.4999) == a + b);
BOOST_CHECK(interval(NAN, NAN) == a + e);
BOOST_CHECK(interval(7.2, 8.5) == a + 7);
BOOST_CHECK(interval(-102.33, -98.91) == -100 + c);
BOOST_CHECK(interval(NINF, PINF) == c + interval(NINF, PINF)); // support [-oo,+oo]
/* test minus */
BOOST_CHECK(interval(0.2001, 1.8) == a - b);
BOOST_CHECK(interval(-1.8, -0.2001) == b - a);
BOOST_CHECK(interval(-1.39, 2.3299) == b - c);
BOOST_CHECK(interval(NAN, NAN) == a - e); // support NAN
BOOST_CHECK(interval(-11.95, -11.6501) == b - 11.65);
BOOST_CHECK(interval(-104.13, -100.71) == -103.04 - c);
BOOST_CHECK(interval(NINF, PINF) == c - interval(NINF, PINF)); // support [-oo, +oo]
//std::cout << PINF - PINF << "\n";
//std::cout << (PINF == PINF);
}
/*
test case 3: * /
*/
BOOST_AUTO_TEST_CASE(test_mul_div)
{
using namespace rocs;
interval e(2,1); // empty interval
interval a(0.2, 1.5);
interval b(-0.3, -0.0001);
interval c(-2.33, 1.09);
interval d(0, 1);
interval g(-1.15, 0);
interval p1(-1, PINF);
interval p2(0,PINF);
interval p3(3, PINF);
interval np(NINF, PINF);
interval n1(NINF, 5);
interval n2(NINF,0);
interval n3(NINF, -0.02);
/* mul */
BOOST_CHECK(interval(0.04, 2.25) == a * a);
BOOST_CHECK(interval(-2.33*1.5, 1.09*1.5) == a * c);
BOOST_CHECK(interval(-0.45, -0.00002) == a * b);
BOOST_CHECK(interval(-2.33*1.5, 1.09*1.5) == c * a);
BOOST_CHECK(interval(-2.5397, 5.4289) == c * c);
BOOST_CHECK(interval(-0.327, 0.699) == c * b);
BOOST_CHECK(interval(-0.45, -0.00002) == b * a);
BOOST_CHECK(interval(-0.327, 0.699) == b * c);
BOOST_CHECK(interval(0.00000001, 0.09) == b * b);
BOOST_CHECK(interval(-0.3, 0) == d * b); //contain 0
BOOST_CHECK(interval(-2.33, 1.09) == c * d);
BOOST_CHECK(interval(-1.15*1.5, 0) == g * a);
BOOST_CHECK(interval(0, 1.15*0.3) == g * b);
BOOST_CHECK(interval(-1.15*1.09, 2.33*1.15) == c * g);
BOOST_CHECK(interval(-1.15*1.09, 2.33*1.15) == g * c);
BOOST_CHECK(interval(NAN, NAN) == e * a); //empty
BOOST_CHECK(interval(-1.5, PINF) == p1 * a); //infinity
BOOST_CHECK(interval(0, PINF) == p2 * a);
BOOST_CHECK(interval(NINF, -0.0003) == b * p3);
BOOST_CHECK(interval(NINF, 7.5) == a * n1);
BOOST_CHECK(interval(0, PINF) == n2 * b);
BOOST_CHECK(interval(NINF, PINF) == c * n3);
BOOST_CHECK(interval(NINF, PINF) == p1 * n1);
BOOST_CHECK(interval(NINF, PINF) == n1 * n2);
BOOST_CHECK(interval(NINF, PINF) == n1 * n3);
BOOST_CHECK(interval(0, PINF) == n2 * n3);
BOOST_CHECK(interval(NINF, PINF) == np * np);
BOOST_CHECK(interval(0, 0) == 0 * a);
BOOST_CHECK(interval(0, 0) == a * 0);
BOOST_CHECK(interval(0, 0) == 0 * p1);
BOOST_CHECK(interval(0, 0) == 0 * np);
BOOST_CHECK(interval(-1, PINF) == 1 * p1);
BOOST_CHECK(interval(0.00005, 0.15) == -0.5 * b);
BOOST_CHECK(interval(0, PINF) == -0.5 * n2);
/* div */
BOOST_CHECK(interval(0.2/1.5, 1.5/0.2) == a / a);
BOOST_CHECK(interval(-15000, -0.2/0.3) == a / b);
BOOST_CHECK(interval(-0.3/0.2, -0.0001/1.5) == b / a);
BOOST_CHECK(interval(NINF, PINF) == a / c);
BOOST_CHECK(interval(-2.33/0.2, 1.09/0.2) == c / a);
BOOST_CHECK(interval(-10900, 23300) == c / b);
BOOST_CHECK(interval(0, 5) == d / a);
BOOST_CHECK(interval(0.2, PINF) == a / d);
BOOST_CHECK(interval(-5.75, 0) == g / a);
BOOST_CHECK(interval(NINF, -0.2/1.15) == a / g);
BOOST_CHECK(interval(0, 11500) == g / b);
BOOST_CHECK(interval(NINF, PINF) == d / g);
BOOST_CHECK(interval(NAN, NAN) == a / interval(0, 0));
BOOST_CHECK(interval(0, 0) == 0 / a);
BOOST_CHECK(interval(0, 0) == 0 / b);
BOOST_CHECK(interval(NAN, NAN) == 0 / c); // num = 0, 0\in den
BOOST_CHECK(interval(NAN, NAN) == 0 / g);
BOOST_CHECK(interval(NAN, NAN) == 0 / d);
BOOST_CHECK(interval(1/1.5, 5) == 1 / a); // num > 0
BOOST_CHECK(interval(-10000, -1/0.3) == 1 / b);
BOOST_CHECK(interval(NINF, PINF) == 1 / c);
BOOST_CHECK(interval(1, PINF) == 1 / d);
BOOST_CHECK(interval(NINF, -1/1.15) == 1 / g);
BOOST_CHECK(interval(-5, -1/1.5) == -1 / a);
BOOST_CHECK(interval(1/0.3, 10000) == -1 / b);
BOOST_CHECK(interval(NINF, PINF) == -1 / c);
BOOST_CHECK(interval(NINF, -1) == -1 / d);
BOOST_CHECK(interval(1/1.15, PINF) == -1 / g);
}
/*
test case 4: sin(), cos(), tan(), atan()
*/
BOOST_AUTO_TEST_CASE(test_trigonometric_fcns)
{
using namespace rocs;
/* test sin() [cos() is implemented by sin()] */
BOOST_CHECK(interval(-1, sin(-1.1)) == sin(interval(-1.6, -1.1))); //wid = 0.5
BOOST_CHECK(interval(sin(-1), sin(-0.5)) == sin(interval(-1, -0.5)));
BOOST_CHECK(interval(sin(-0.2), sin(0.3)) == sin(interval(-0.2, 0.3))); //0
std::cout << sin(interval(-0.2, 0.3)) << '\n';
BOOST_CHECK(interval(-1, sin(5)) == sin(interval(4.5, 5))); //3pi/2
BOOST_CHECK(interval(sin(-1), sin(0.57)) == sin(interval(-1, 0.57))); //wid = 1.57
BOOST_CHECK(interval(0, 1) == sin(interval(0, 1.571))); //pi/2
BOOST_CHECK(interval(-1, sin(3.14)) == sin(interval(3.14, 4.72))); //3pi/2
BOOST_CHECK(interval(-1, sin(-3.2)) == sin(interval(-3.2, -3.2+PIIVAL))); //wid = pi
std::cout << sin(interval(-3.2, -3.2+PIIVAL)) << '\n';
BOOST_CHECK(interval(-1, 0) == sin(interval(-PIIVAL, 0)));
BOOST_CHECK(interval(-1, 1) == sin(interval(-PIIVAL/2, PIIVAL/2)));
BOOST_CHECK(interval(sin(-1), 1) == sin(interval(-1, -1+PIIVAL)));
BOOST_CHECK(interval(sin(-1), 1) == sin(interval(-1, 3.4))); //wid = 4.3
BOOST_CHECK(interval(-1, 1) == sin(interval(1.5, 5.8)));
BOOST_CHECK(interval(-1, sin(7.4)) == sin(interval(3.1, 7.4)));
BOOST_CHECK(interval(-1, 1) == sin(interval(7, 11.3)));
BOOST_CHECK(interval(-1, 1) == sin(interval(0, PI2IVAL))); //wid = 2pi
BOOST_CHECK(interval(-1, 1) == sin(interval(PI2IVAL, 2*PI2IVAL)));
BOOST_CHECK(interval(-1, 1) == sin(interval(-0.5, -0.5+PI2IVAL)));
BOOST_CHECK(interval(-1, 1) == sin(interval(PIIVAL, 5*PIIVAL))); //wid = 4pi
BOOST_CHECK(interval(-1, 1) == sin(interval(-6, 1))); //wid = 7
/* tan() */
BOOST_CHECK(interval(NINF, PINF) == tan(interval(-1.6, 2.54))); //wid = 4.14 > pi
// wid = 3.14 < pi, pi/2 \in [inf, sup]
BOOST_CHECK(interval(NINF, PINF) == tan(interval(-5.14, -2)));
BOOST_CHECK(interval(tan(4.3), tan(4.5)) == tan(interval(4.3, 4.5))); // k+=1 branch
std::cout << tan(interval(4.3, 4.5)) << '\n';
BOOST_CHECK(interval(NINF, PINF) == tan(interval(4.3, 4.8))); // k+=1 branch
BOOST_CHECK(interval(tan(-2), tan(-1.7)) == tan(interval(-2, -1.7))); // k+=1 branch
std::cout << tan(interval(-2, -1.7)) << '\n';
}
/*
test case 5: sqr(), sqrt(), power()
*/
BOOST_AUTO_TEST_CASE(test_power_root)
{
using namespace rocs;
BOOST_CHECK(interval(0.01, 25) == sqr(interval(-5, -0.1))); //sqr
BOOST_CHECK(interval(0.01, 25) == sqr(interval(0.1, 5)));
BOOST_CHECK(interval(0, 25) == sqr(interval(-5, 0.1)));
BOOST_CHECK(interval(0, 0.25) == sqr(interval(0, 0.5)));
BOOST_CHECK(interval(0, 0.25) == sqr(interval(-0.5, 0)));
BOOST_CHECK(pow(interval(0, 0.5), 2) == sqr(interval(0, 0.5))); //pow
BOOST_CHECK(pow(interval(0, 0.5), 2) == sqr(interval(0, 0.5)));
BOOST_CHECK(interval(-1, 8) == pow(interval(-1, 2), 3));
BOOST_CHECK(interval(0, 1) == pow(interval(-1, 1), 6));
BOOST_CHECK(interval(-1, 2) == pow(interval(-1, 2), 1));
BOOST_CHECK(interval(1, 2) == sqrt(interval(1, 4)));//sqrt
BOOST_CHECK(interval(0, 2) == sqrt(interval(-1, 4)));
BOOST_CHECK(interval(0, 0) == sqrt(interval(-1, 0)));
}
/*
test case 6: intersect, hull
*/
BOOST_AUTO_TEST_CASE(test_inter_hull)
{
using namespace rocs;
interval a(-0.7, 0.2);
interval b(0, 0.5);
interval c(-0.5, 0);
interval d(-3, -0.6);
interval e(NINF, -2);
interval f(0.3, PINF);
interval g(NINF, PINF);
BOOST_CHECK(interval(NAN, NAN) == intersect(b, d)); //intersect
BOOST_CHECK(c == intersect(a, c));
BOOST_CHECK(interval(0, 0.2) == intersect(a, b));
BOOST_CHECK(interval(0, 0) == intersect(b, c));
BOOST_CHECK(interval(-3, -2) == intersect(e, d));
BOOST_CHECK(interval(NAN, NAN) == intersect(e, a));
BOOST_CHECK(interval(0.3, 0.5) == intersect(b, f));
BOOST_CHECK(a == intersect(a, g));
BOOST_CHECK(interval(-3, 0.5) == hull(b, d)); //hull
BOOST_CHECK(interval(-0.7, 0.5) == hull(a, b));
BOOST_CHECK(interval(-0.5, 0.5) == hull(b, c));
BOOST_CHECK(interval(NINF, -0.6) == hull(e, d));
BOOST_CHECK(interval(NINF, 0.2) == hull(e, a));
BOOST_CHECK(a == hull(a, c));
BOOST_CHECK(g == hull(a, g));
}
/*
test case 7: bisections
*/
BOOST_AUTO_TEST_CASE(test_bisection)
{
rocs::interval a(-0.7, 0.2);
rocs::interval left = lowerhalf(a);
rocs::interval right = upperhalf(a);
BOOST_CHECK(rocs::interval(-0.7, -0.25) == left);
BOOST_CHECK(rocs::interval(-0.25, 0.2) == right);
}
|
{"hexsha": "ff739ed4ee3dbb8c35893b5436b63aa07b5b465b", "size": 10436, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/testIval.cpp", "max_stars_repo_name": "yinanl/rocs", "max_stars_repo_head_hexsha": "bf2483903e39f4c0ea254a9ef56720a1259955ad", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/testIval.cpp", "max_issues_repo_name": "yinanl/rocs", "max_issues_repo_head_hexsha": "bf2483903e39f4c0ea254a9ef56720a1259955ad", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/testIval.cpp", "max_forks_repo_name": "yinanl/rocs", "max_forks_repo_head_hexsha": "bf2483903e39f4c0ea254a9ef56720a1259955ad", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4422442244, "max_line_length": 89, "alphanum_fraction": 0.5787658107, "num_tokens": 3698}
|
using Test
td = pwd()
for dir in ["basic","hamiltonian_zoo"]
print("Including Test Dir:",dir,"\n")
for file in readdir(td*"/"*dir,join=true)
print("\tIncluding Test File:",file,"\n")
include(file)
end
end
|
{"hexsha": "90e79db04e7200dc9e6414a8c792a730cc54b9cd", "size": 233, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "qiyang-ustc/EasyHamiltonian.jl", "max_stars_repo_head_hexsha": "2a56e7de20d93714848ed90ae0e0cb6287070df8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-10-06T19:15:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T13:47:46.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "qiyang-ustc/EasyHamiltonian.jl", "max_issues_repo_head_hexsha": "2a56e7de20d93714848ed90ae0e0cb6287070df8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "qiyang-ustc/EasyHamiltonian.jl", "max_forks_repo_head_hexsha": "2a56e7de20d93714848ed90ae0e0cb6287070df8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.3, "max_line_length": 49, "alphanum_fraction": 0.6051502146, "num_tokens": 64}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class AtariNetwork(nn.Module):
n_features = 512
def __init__(self, input_shape, _, n_actions_per_head, use_cuda, n_games,
features, dropout):
super().__init__()
self._n_input = input_shape
self._n_games = n_games
self._max_actions = max(n_actions_per_head)[0]
self._features = features
self._use_cuda = use_cuda
self._n_shared = 2
self._h1 = nn.ModuleList(
[nn.Conv2d(self._n_input[0], 32, kernel_size=8, stride=4) for _ in range(
self._n_games)]
)
self._h2 = nn.ModuleList(
[nn.Conv2d(32, 64, kernel_size=4, stride=2) for _ in range(
self._n_games)]
)
self._h3 = nn.ModuleList(
[nn.Conv2d(64, 64, kernel_size=3, stride=1) for _ in range(
self._n_games)]
)
self._h4 = nn.Linear(3136, self.n_features)
self._h5 = nn.ModuleList(
[nn.Linear(self.n_features, self._max_actions) for _ in range(
self._n_games)]
)
nn.init.xavier_uniform_(self._h4.weight,
gain=nn.init.calculate_gain('relu'))
for i in range(self._n_games):
nn.init.xavier_uniform_(self._h1[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h2[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h3[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h5[i].weight,
gain=nn.init.calculate_gain('linear'))
def forward(self, state, action=None, idx=None):
state = state.float() / 255.
h = list()
for i in np.unique(idx):
idxs = np.argwhere(idx == i).ravel()
h_f = F.relu(
self._h1[i](state[idxs, :self._n_input[0]])
)
h_f = F.relu(self._h2[i](h_f))
h.append(F.relu(self._h3[i](h_f)))
cat_h3 = torch.cat(h)
if self._features == 'relu':
h_f = F.relu(self._h4(cat_h3.view(-1, 3136)))
elif self._features == 'sigmoid':
h_f = torch.sigmoid(self._h4(cat_h3.view(-1, 3136)))
else:
raise ValueError
q = [self._h5[i](h_f) for i in range(self._n_games)]
q = torch.stack(q, dim=1)
if action is not None:
action = action.long()
q_acted = torch.squeeze(
q.gather(2, action.repeat(1, self._n_games).unsqueeze(-1)), -1)
q = q_acted
if idx is not None:
idx = torch.from_numpy(idx)
if self._use_cuda:
idx = idx.cuda()
if q.dim() == 2:
q_idx = q.gather(1, idx.unsqueeze(-1))
else:
q_idx = q.gather(1, idx.view(-1, 1).repeat(
1, self._max_actions).unsqueeze(1))
q = torch.squeeze(q_idx, 1)
return q
def get_shared_weights(self):
p1 = list()
for p in self._h4.parameters():
p1.append(p.data.detach().cpu().numpy())
return p1
def set_shared_weights(self, weights):
w1 = weights
for p, w in zip(self._h4.parameters(), w1):
w_tensor = torch.from_numpy(w).type(p.data.dtype)
if self._use_cuda:
w_tensor = w_tensor.cuda()
p.data = w_tensor
def freeze_shared_weights(self):
for p in self._h4.parameters():
p.requires_grad = False
def unfreeze_shared_weights(self):
for p in self._h4.parameters():
p.requires_grad = True
class GymNetwork(nn.Module):
def __init__(self, input_shape, _, n_actions_per_head, use_cuda, features,
dropout, n_features=80):
super().__init__()
self._n_input = input_shape
self._n_games = len(n_actions_per_head)
self._max_actions = max(n_actions_per_head)[0]
self._use_cuda = use_cuda
self._n_shared = 4
self._features = features
self._h1 = nn.ModuleList(
[nn.Linear(self._n_input[i][0], n_features) for i in range(
len(input_shape))]
)
self._h2 = nn.Linear(n_features, n_features)
self._h3 = nn.Linear(n_features, n_features)
self._h4 = nn.ModuleList(
[nn.Linear(n_features, self._max_actions) for _ in range(
self._n_games)]
)
nn.init.xavier_uniform_(self._h2.weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h3.weight,
gain=nn.init.calculate_gain('relu'))
for i in range(self._n_games):
nn.init.xavier_uniform_(self._h1[i].weight,
gain=nn.init.calculate_gain('relu'))
nn.init.xavier_uniform_(self._h4[i].weight,
gain=nn.init.calculate_gain('linear'))
def forward(self, state, action=None, idx=None):
state = state.float()
h1 = list()
for i in np.unique(idx):
idxs = np.argwhere(idx == i).ravel()
h1.append(F.relu(self._h1[i](state[idxs, :self._n_input[i][0]])))
cat_h1 = torch.cat(h1)
h_f = F.relu(self._h2(cat_h1))
if self._features == 'relu':
h_f = F.relu(self._h3(h_f))
elif self._features == 'sigmoid':
h_f = torch.sigmoid(self._h3(h_f))
else:
raise ValueError
q = [self._h4[i](h_f) for i in range(self._n_games)]
q = torch.stack(q, dim=1)
if action is not None:
action = action.long()
q_acted = torch.squeeze(
q.gather(2, action.repeat(1, self._n_games).unsqueeze(-1)), -1)
q = q_acted
if idx is not None:
idx = torch.from_numpy(idx)
if self._use_cuda:
idx = idx.cuda()
if q.dim() == 2:
q_idx = q.gather(1, idx.unsqueeze(-1))
else:
q_idx = q.gather(1, idx.view(-1, 1).repeat(
1, self._max_actions).unsqueeze(1))
q = torch.squeeze(q_idx, 1)
return q
def get_shared_weights(self):
p2 = list()
p3 = list()
for p in self._h2.parameters():
p2.append(p.data.detach().cpu().numpy())
for p in self._h3.parameters():
p3.append(p.data.detach().cpu().numpy())
return p2, p3
def set_shared_weights(self, weights):
w2, w3 = weights
for p, w in zip(self._h2.parameters(), w2):
w_tensor = torch.from_numpy(w).type(p.data.dtype)
if self._use_cuda:
w_tensor = w_tensor.cuda()
p.data = w_tensor
for p, w in zip(self._h3.parameters(), w3):
w_tensor = torch.from_numpy(w).type(p.data.dtype)
if self._use_cuda:
w_tensor = w_tensor.cuda()
p.data = w_tensor
def freeze_shared_weights(self):
for p in self._h2.parameters():
p.requires_grad = False
for p in self._h3.parameters():
p.requires_grad = False
def unfreeze_shared_weights(self):
for p in self._h2.parameters():
p.requires_grad = True
for p in self._h3.parameters():
p.requires_grad = True
|
{"hexsha": "57de75decc2524ccfcf879c9b0e7cfcbd68a761b", "size": 7684, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/shared/dqn/networks.py", "max_stars_repo_name": "MushroomRL/mushroom-rl-meta", "max_stars_repo_head_hexsha": "08c13bd8115c81aba083ec62672956026e7ddd8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/shared/dqn/networks.py", "max_issues_repo_name": "MushroomRL/mushroom-rl-meta", "max_issues_repo_head_hexsha": "08c13bd8115c81aba083ec62672956026e7ddd8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/shared/dqn/networks.py", "max_forks_repo_name": "MushroomRL/mushroom-rl-meta", "max_forks_repo_head_hexsha": "08c13bd8115c81aba083ec62672956026e7ddd8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6978723404, "max_line_length": 85, "alphanum_fraction": 0.5307131702, "include": true, "reason": "import numpy", "num_tokens": 1889}
|
# Author: Andrey Boytsov <andrey.boytsov@uni.lu> <andrey.m.boytsov@gmail.com>
# License: BSD 3 clause (C) 2017
# Fitting iris dataset (from sklearn) Embedding the same data using transform function, see how close new Ys are
# to original data. Feel free to play with transformation parameters.
# Transformation is done with lightweight LION-tSNE - transformer-only thing.
import sys
import os
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
# Importing from parent directory
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import lion_tsne_lightweight
import numpy as np
from sklearn.manifold import TSNE
if __name__ == "__main__":
data = load_iris()
X = data.data
labels = data.target
norm_TSNE = TSNE(perplexity=20)
y = norm_TSNE.fit_transform(X)
lwTSNE = lion_tsne_lightweight.LionTSNELightweight(X,y)
embedder = lwTSNE.generate_lion_tsne_embedder(verbose=2, random_state=0, function_kwargs=
{'y_safety_margin':0, 'radius_x_percentile':99, 'radius_y_percentile':100})
y2 = embedder(X)
print("Mean square error between y1 and y2: ", np.mean(np.sum((y-y2)**2, axis=1)))
color_list = ['blue','orange','green']
plt.gcf().set_size_inches(10, 10)
legend_list = list()
for l in set(sorted(labels)):
plt.scatter(y[labels == l, 0], y[labels == l, 1], c=color_list[l])
legend_list.append(str(data.target_names[l]))
for l in set(sorted(labels)):
plt.scatter(y2[labels == l, 0], y2[labels == l, 1], c=color_list[l], marker='v')
legend_list.append(str(data.target_names[l])+" embedded")
# plt.xlim([-800, 1300])
# plt.ylim([-200, 200])
plt.legend(legend_list)
plt.show()
|
{"hexsha": "8ee6af5a35d593660fa434c8604e47641d60fc17", "size": 1749, "ext": "py", "lang": "Python", "max_stars_repo_path": "Experiments/TransformationLightweight/iris_double_transform.py", "max_stars_repo_name": "andreyboytsov/DynamicTSNE", "max_stars_repo_head_hexsha": "9ccd18d80c7a0bae31defde2f2336f9ea3d1965a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2017-07-17T10:30:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-24T08:31:30.000Z", "max_issues_repo_path": "Experiments/TransformationLightweight/iris_double_transform.py", "max_issues_repo_name": "andreyboytsov/DynamicTSNE", "max_issues_repo_head_hexsha": "9ccd18d80c7a0bae31defde2f2336f9ea3d1965a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Experiments/TransformationLightweight/iris_double_transform.py", "max_forks_repo_name": "andreyboytsov/DynamicTSNE", "max_forks_repo_head_hexsha": "9ccd18d80c7a0bae31defde2f2336f9ea3d1965a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0217391304, "max_line_length": 112, "alphanum_fraction": 0.700971984, "include": true, "reason": "import numpy", "num_tokens": 480}
|
#include <Access/DiskAccessStorage.h>
#include <IO/WriteHelpers.h>
#include <IO/ReadHelpers.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/WriteBufferFromFile.h>
#include <IO/ReadBufferFromString.h>
#include <Access/User.h>
#include <Access/Role.h>
#include <Access/RowPolicy.h>
#include <Access/Quota.h>
#include <Access/SettingsProfile.h>
#include <Parsers/ASTCreateUserQuery.h>
#include <Parsers/ASTCreateRoleQuery.h>
#include <Parsers/ASTCreateRowPolicyQuery.h>
#include <Parsers/ASTCreateQuotaQuery.h>
#include <Parsers/ASTCreateSettingsProfileQuery.h>
#include <Parsers/ASTGrantQuery.h>
#include <Parsers/ParserCreateUserQuery.h>
#include <Parsers/ParserCreateRoleQuery.h>
#include <Parsers/ParserCreateRowPolicyQuery.h>
#include <Parsers/ParserCreateQuotaQuery.h>
#include <Parsers/ParserCreateSettingsProfileQuery.h>
#include <Parsers/ParserGrantQuery.h>
#include <Parsers/formatAST.h>
#include <Parsers/parseQuery.h>
#include <Interpreters/InterpreterCreateUserQuery.h>
#include <Interpreters/InterpreterCreateRoleQuery.h>
#include <Interpreters/InterpreterCreateRowPolicyQuery.h>
#include <Interpreters/InterpreterCreateQuotaQuery.h>
#include <Interpreters/InterpreterCreateSettingsProfileQuery.h>
#include <Interpreters/InterpreterGrantQuery.h>
#include <Interpreters/InterpreterShowCreateAccessEntityQuery.h>
#include <Interpreters/InterpreterShowGrantsQuery.h>
#include <Common/quoteString.h>
#include <Core/Defines.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/copy.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
#include <filesystem>
#include <fstream>
namespace DB
{
namespace ErrorCodes
{
extern const int DIRECTORY_DOESNT_EXIST;
extern const int FILE_DOESNT_EXIST;
extern const int INCORRECT_ACCESS_ENTITY_DEFINITION;
extern const int LOGICAL_ERROR;
}
namespace
{
using EntityType = IAccessStorage::EntityType;
using EntityTypeInfo = IAccessStorage::EntityTypeInfo;
/// Special parser for the 'ATTACH access entity' queries.
class ParserAttachAccessEntity : public IParserBase
{
protected:
const char * getName() const override { return "ATTACH access entity query"; }
bool parseImpl(Pos & pos, ASTPtr & node, Expected & expected) override
{
ParserCreateUserQuery create_user_p;
ParserCreateRoleQuery create_role_p;
ParserCreateRowPolicyQuery create_policy_p;
ParserCreateQuotaQuery create_quota_p;
ParserCreateSettingsProfileQuery create_profile_p;
ParserGrantQuery grant_p;
create_user_p.useAttachMode();
create_role_p.useAttachMode();
create_policy_p.useAttachMode();
create_quota_p.useAttachMode();
create_profile_p.useAttachMode();
grant_p.useAttachMode();
return create_user_p.parse(pos, node, expected) || create_role_p.parse(pos, node, expected)
|| create_policy_p.parse(pos, node, expected) || create_quota_p.parse(pos, node, expected)
|| create_profile_p.parse(pos, node, expected) || grant_p.parse(pos, node, expected);
}
};
/// Reads a file containing ATTACH queries and then parses it to build an access entity.
AccessEntityPtr readEntityFile(const std::filesystem::path & file_path)
{
/// Read the file.
ReadBufferFromFile in{file_path};
String file_contents;
readStringUntilEOF(file_contents, in);
/// Parse the file contents.
ASTs queries;
ParserAttachAccessEntity parser;
const char * begin = file_contents.data(); /// begin of current query
const char * pos = begin; /// parser moves pos from begin to the end of current query
const char * end = begin + file_contents.size();
while (pos < end)
{
queries.emplace_back(parseQueryAndMovePosition(parser, pos, end, "", true, 0, DBMS_DEFAULT_MAX_PARSER_DEPTH));
while (isWhitespaceASCII(*pos) || *pos == ';')
++pos;
}
/// Interpret the AST to build an access entity.
std::shared_ptr<User> user;
std::shared_ptr<Role> role;
std::shared_ptr<RowPolicy> policy;
std::shared_ptr<Quota> quota;
std::shared_ptr<SettingsProfile> profile;
AccessEntityPtr res;
for (const auto & query : queries)
{
if (auto * create_user_query = query->as<ASTCreateUserQuery>())
{
if (res)
throw Exception("Two access entities in one file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
res = user = std::make_unique<User>();
InterpreterCreateUserQuery::updateUserFromQuery(*user, *create_user_query);
}
else if (auto * create_role_query = query->as<ASTCreateRoleQuery>())
{
if (res)
throw Exception("Two access entities in one file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
res = role = std::make_unique<Role>();
InterpreterCreateRoleQuery::updateRoleFromQuery(*role, *create_role_query);
}
else if (auto * create_policy_query = query->as<ASTCreateRowPolicyQuery>())
{
if (res)
throw Exception("Two access entities in one file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
res = policy = std::make_unique<RowPolicy>();
InterpreterCreateRowPolicyQuery::updateRowPolicyFromQuery(*policy, *create_policy_query);
}
else if (auto * create_quota_query = query->as<ASTCreateQuotaQuery>())
{
if (res)
throw Exception("Two access entities are attached in the same file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
res = quota = std::make_unique<Quota>();
InterpreterCreateQuotaQuery::updateQuotaFromQuery(*quota, *create_quota_query);
}
else if (auto * create_profile_query = query->as<ASTCreateSettingsProfileQuery>())
{
if (res)
throw Exception("Two access entities are attached in the same file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
res = profile = std::make_unique<SettingsProfile>();
InterpreterCreateSettingsProfileQuery::updateSettingsProfileFromQuery(*profile, *create_profile_query);
}
else if (auto * grant_query = query->as<ASTGrantQuery>())
{
if (!user && !role)
throw Exception("A user or role should be attached before grant in file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
if (user)
InterpreterGrantQuery::updateUserFromQuery(*user, *grant_query);
else
InterpreterGrantQuery::updateRoleFromQuery(*role, *grant_query);
}
else
throw Exception("No interpreter found for query " + query->getID(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
}
if (!res)
throw Exception("No access entities attached in file " + file_path.string(), ErrorCodes::INCORRECT_ACCESS_ENTITY_DEFINITION);
return res;
}
AccessEntityPtr tryReadEntityFile(const std::filesystem::path & file_path, Poco::Logger & log)
{
try
{
return readEntityFile(file_path);
}
catch (...)
{
tryLogCurrentException(&log, "Could not parse " + file_path.string());
return nullptr;
}
}
/// Writes ATTACH queries for building a specified access entity to a file.
void writeEntityFile(const std::filesystem::path & file_path, const IAccessEntity & entity)
{
/// Build list of ATTACH queries.
ASTs queries;
queries.push_back(InterpreterShowCreateAccessEntityQuery::getAttachQuery(entity));
if ((entity.getType() == EntityType::USER) || (entity.getType() == EntityType::ROLE))
boost::range::push_back(queries, InterpreterShowGrantsQuery::getAttachGrantQueries(entity));
/// Serialize the list of ATTACH queries to a string.
std::stringstream ss;
for (const ASTPtr & query : queries)
ss << *query << ";\n";
String file_contents = std::move(ss).str();
/// First we save *.tmp file and then we rename if everything's ok.
auto tmp_file_path = std::filesystem::path{file_path}.replace_extension(".tmp");
bool succeeded = false;
SCOPE_EXIT(
{
if (!succeeded)
std::filesystem::remove(tmp_file_path);
});
/// Write the file.
WriteBufferFromFile out{tmp_file_path.string()};
out.write(file_contents.data(), file_contents.size());
/// Rename.
std::filesystem::rename(tmp_file_path, file_path);
succeeded = true;
}
/// Calculates the path to a file named <id>.sql for saving an access entity.
std::filesystem::path getEntityFilePath(const String & directory_path, const UUID & id)
{
return std::filesystem::path(directory_path).append(toString(id)).replace_extension(".sql");
}
/// Reads a map of name of access entity to UUID for access entities of some type from a file.
std::vector<std::pair<UUID, String>> readListFile(const std::filesystem::path & file_path)
{
ReadBufferFromFile in(file_path);
size_t num;
readVarUInt(num, in);
std::vector<std::pair<UUID, String>> id_name_pairs;
id_name_pairs.reserve(num);
for (size_t i = 0; i != num; ++i)
{
String name;
readStringBinary(name, in);
UUID id;
readUUIDText(id, in);
id_name_pairs.emplace_back(id, std::move(name));
}
return id_name_pairs;
}
/// Writes a map of name of access entity to UUID for access entities of some type to a file.
void writeListFile(const std::filesystem::path & file_path, const std::vector<std::pair<UUID, std::string_view>> & id_name_pairs)
{
WriteBufferFromFile out(file_path);
writeVarUInt(id_name_pairs.size(), out);
for (const auto & [id, name] : id_name_pairs)
{
writeStringBinary(name, out);
writeUUIDText(id, out);
}
}
/// Calculates the path for storing a map of name of access entity to UUID for access entities of some type.
std::filesystem::path getListFilePath(const String & directory_path, EntityType type)
{
String file_name = EntityTypeInfo::get(type).plural_raw_name;
boost::to_lower(file_name);
file_name += ".list";
return std::filesystem::path(directory_path).append(file_name);
}
/// Calculates the path to a temporary file which existence means that list files are corrupted
/// and need to be rebuild.
std::filesystem::path getNeedRebuildListsMarkFilePath(const String & directory_path)
{
return std::filesystem::path(directory_path).append("need_rebuild_lists.mark");
}
bool tryParseUUID(const String & str, UUID & id)
{
try
{
id = parseFromString<UUID>(str);
return true;
}
catch (...)
{
return false;
}
}
}
DiskAccessStorage::DiskAccessStorage()
: IAccessStorage("disk")
{
}
DiskAccessStorage::~DiskAccessStorage()
{
stopListsWritingThread();
writeLists();
}
void DiskAccessStorage::setDirectory(const String & directory_path_)
{
Notifications notifications;
SCOPE_EXIT({ notify(notifications); });
std::lock_guard lock{mutex};
initialize(directory_path_, notifications);
}
void DiskAccessStorage::initialize(const String & directory_path_, Notifications & notifications)
{
auto canonical_directory_path = std::filesystem::weakly_canonical(directory_path_);
if (initialized)
{
if (directory_path == canonical_directory_path)
return;
throw Exception("Storage " + getStorageName() + " already initialized with another directory", ErrorCodes::LOGICAL_ERROR);
}
std::filesystem::create_directories(canonical_directory_path);
if (!std::filesystem::exists(canonical_directory_path) || !std::filesystem::is_directory(canonical_directory_path))
throw Exception("Couldn't create directory " + canonical_directory_path.string(), ErrorCodes::DIRECTORY_DOESNT_EXIST);
directory_path = canonical_directory_path;
initialized = true;
bool should_rebuild_lists = std::filesystem::exists(getNeedRebuildListsMarkFilePath(directory_path));
if (!should_rebuild_lists)
{
if (!readLists())
should_rebuild_lists = true;
}
if (should_rebuild_lists)
{
rebuildLists();
writeLists();
}
for (const auto & [id, entry] : entries_by_id)
prepareNotifications(id, entry, false, notifications);
}
void DiskAccessStorage::clear()
{
entries_by_id.clear();
for (auto type : ext::range(EntityType::MAX))
entries_by_name_and_type[static_cast<size_t>(type)].clear();
}
bool DiskAccessStorage::readLists()
{
clear();
bool ok = true;
for (auto type : ext::range(EntityType::MAX))
{
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
auto file_path = getListFilePath(directory_path, type);
if (!std::filesystem::exists(file_path))
{
LOG_WARNING(getLogger(), "File {} doesn't exist", file_path.string());
ok = false;
break;
}
try
{
for (const auto & [id, name] : readListFile(file_path))
{
auto & entry = entries_by_id[id];
entry.id = id;
entry.type = type;
entry.name = name;
entries_by_name[entry.name] = &entry;
}
}
catch (...)
{
tryLogCurrentException(getLogger(), "Could not read " + file_path.string());
ok = false;
break;
}
}
if (!ok)
clear();
return ok;
}
bool DiskAccessStorage::writeLists()
{
if (failed_to_write_lists)
return false; /// We don't try to write list files after the first fail.
/// The next restart of the server will invoke rebuilding of the list files.
if (types_of_lists_to_write.empty())
return true;
for (const auto & type : types_of_lists_to_write)
{
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
auto file_path = getListFilePath(directory_path, type);
try
{
std::vector<std::pair<UUID, std::string_view>> id_name_pairs;
id_name_pairs.reserve(entries_by_name.size());
for (const auto * entry : entries_by_name | boost::adaptors::map_values)
id_name_pairs.emplace_back(entry->id, entry->name);
writeListFile(file_path, id_name_pairs);
}
catch (...)
{
tryLogCurrentException(getLogger(), "Could not write " + file_path.string());
failed_to_write_lists = true;
types_of_lists_to_write.clear();
return false;
}
}
/// The list files was successfully written, we don't need the 'need_rebuild_lists.mark' file any longer.
std::filesystem::remove(getNeedRebuildListsMarkFilePath(directory_path));
types_of_lists_to_write.clear();
return true;
}
void DiskAccessStorage::scheduleWriteLists(EntityType type)
{
if (failed_to_write_lists)
return;
bool already_scheduled = !types_of_lists_to_write.empty();
types_of_lists_to_write.insert(type);
if (already_scheduled)
return;
/// Create the 'need_rebuild_lists.mark' file.
/// This file will be used later to find out if writing lists is successful or not.
std::ofstream{getNeedRebuildListsMarkFilePath(directory_path)};
startListsWritingThread();
}
void DiskAccessStorage::startListsWritingThread()
{
if (lists_writing_thread.joinable())
{
if (!lists_writing_thread_exited)
return;
lists_writing_thread.detach();
}
lists_writing_thread_exited = false;
lists_writing_thread = ThreadFromGlobalPool{&DiskAccessStorage::listsWritingThreadFunc, this};
}
void DiskAccessStorage::stopListsWritingThread()
{
if (lists_writing_thread.joinable())
{
lists_writing_thread_should_exit.notify_one();
lists_writing_thread.join();
}
}
void DiskAccessStorage::listsWritingThreadFunc()
{
std::unique_lock lock{mutex};
SCOPE_EXIT({ lists_writing_thread_exited = true; });
/// It's better not to write the lists files too often, that's why we need
/// the following timeout.
const auto timeout = std::chrono::minutes(1);
if (lists_writing_thread_should_exit.wait_for(lock, timeout) != std::cv_status::timeout)
return; /// The destructor requires us to exit.
writeLists();
}
/// Reads and parses all the "<id>.sql" files from a specified directory
/// and then saves the files "users.list", "roles.list", etc. to the same directory.
bool DiskAccessStorage::rebuildLists()
{
LOG_WARNING(getLogger(), "Recovering lists in directory {}", directory_path);
clear();
for (const auto & directory_entry : std::filesystem::directory_iterator(directory_path))
{
if (!directory_entry.is_regular_file())
continue;
const auto & path = directory_entry.path();
if (path.extension() != ".sql")
continue;
UUID id;
if (!tryParseUUID(path.stem(), id))
continue;
const auto access_entity_file_path = getEntityFilePath(directory_path, id);
auto entity = tryReadEntityFile(access_entity_file_path, *getLogger());
if (!entity)
continue;
const String & name = entity->getName();
auto type = entity->getType();
auto & entry = entries_by_id[id];
entry.id = id;
entry.type = type;
entry.name = name;
entry.entity = entity;
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
entries_by_name[entry.name] = &entry;
}
for (auto type : ext::range(EntityType::MAX))
types_of_lists_to_write.insert(type);
return true;
}
std::optional<UUID> DiskAccessStorage::findImpl(EntityType type, const String & name) const
{
std::lock_guard lock{mutex};
const auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
auto it = entries_by_name.find(name);
if (it == entries_by_name.end())
return {};
return it->second->id;
}
std::vector<UUID> DiskAccessStorage::findAllImpl(EntityType type) const
{
std::lock_guard lock{mutex};
const auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
std::vector<UUID> res;
res.reserve(entries_by_name.size());
for (const auto * entry : entries_by_name | boost::adaptors::map_values)
res.emplace_back(entry->id);
return res;
}
bool DiskAccessStorage::existsImpl(const UUID & id) const
{
std::lock_guard lock{mutex};
return entries_by_id.count(id);
}
AccessEntityPtr DiskAccessStorage::readImpl(const UUID & id) const
{
std::lock_guard lock{mutex};
auto it = entries_by_id.find(id);
if (it == entries_by_id.end())
throwNotFound(id);
const auto & entry = it->second;
if (!entry.entity)
entry.entity = readAccessEntityFromDisk(id);
return entry.entity;
}
String DiskAccessStorage::readNameImpl(const UUID & id) const
{
std::lock_guard lock{mutex};
auto it = entries_by_id.find(id);
if (it == entries_by_id.end())
throwNotFound(id);
return String{it->second.name};
}
bool DiskAccessStorage::canInsertImpl(const AccessEntityPtr &) const
{
return initialized;
}
UUID DiskAccessStorage::insertImpl(const AccessEntityPtr & new_entity, bool replace_if_exists)
{
Notifications notifications;
SCOPE_EXIT({ notify(notifications); });
UUID id = generateRandomID();
std::lock_guard lock{mutex};
insertNoLock(generateRandomID(), new_entity, replace_if_exists, notifications);
return id;
}
void DiskAccessStorage::insertNoLock(const UUID & id, const AccessEntityPtr & new_entity, bool replace_if_exists, Notifications & notifications)
{
const String & name = new_entity->getName();
EntityType type = new_entity->getType();
if (!initialized)
throw Exception(
"Cannot insert " + new_entity->outputTypeAndName() + " to storage [" + getStorageName()
+ "] because the output directory is not set",
ErrorCodes::LOGICAL_ERROR);
/// Check that we can insert.
auto it_by_id = entries_by_id.find(id);
if (it_by_id != entries_by_id.end())
{
const auto & existing_entry = it_by_id->second;
throwIDCollisionCannotInsert(id, type, name, existing_entry.entity->getType(), existing_entry.entity->getName());
}
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
auto it_by_name = entries_by_name.find(name);
bool name_collision = (it_by_name != entries_by_name.end());
if (name_collision && !replace_if_exists)
throwNameCollisionCannotInsert(type, name);
scheduleWriteLists(type);
writeAccessEntityToDisk(id, *new_entity);
if (name_collision && replace_if_exists)
removeNoLock(it_by_name->second->id, notifications);
/// Do insertion.
auto & entry = entries_by_id[id];
entry.id = id;
entry.type = type;
entry.name = name;
entry.entity = new_entity;
entries_by_name[entry.name] = &entry;
prepareNotifications(id, entry, false, notifications);
}
void DiskAccessStorage::removeImpl(const UUID & id)
{
Notifications notifications;
SCOPE_EXIT({ notify(notifications); });
std::lock_guard lock{mutex};
removeNoLock(id, notifications);
}
void DiskAccessStorage::removeNoLock(const UUID & id, Notifications & notifications)
{
auto it = entries_by_id.find(id);
if (it == entries_by_id.end())
throwNotFound(id);
Entry & entry = it->second;
EntityType type = entry.type;
scheduleWriteLists(type);
deleteAccessEntityOnDisk(id);
/// Do removing.
prepareNotifications(id, entry, true, notifications);
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
entries_by_name.erase(entry.name);
entries_by_id.erase(it);
}
void DiskAccessStorage::updateImpl(const UUID & id, const UpdateFunc & update_func)
{
Notifications notifications;
SCOPE_EXIT({ notify(notifications); });
std::lock_guard lock{mutex};
updateNoLock(id, update_func, notifications);
}
void DiskAccessStorage::updateNoLock(const UUID & id, const UpdateFunc & update_func, Notifications & notifications)
{
auto it = entries_by_id.find(id);
if (it == entries_by_id.end())
throwNotFound(id);
Entry & entry = it->second;
if (!entry.entity)
entry.entity = readAccessEntityFromDisk(id);
auto old_entity = entry.entity;
auto new_entity = update_func(old_entity);
if (!new_entity->isTypeOf(old_entity->getType()))
throwBadCast(id, new_entity->getType(), new_entity->getName(), old_entity->getType());
if (*new_entity == *old_entity)
return;
const String & new_name = new_entity->getName();
const String & old_name = old_entity->getName();
const EntityType type = entry.type;
auto & entries_by_name = entries_by_name_and_type[static_cast<size_t>(type)];
bool name_changed = (new_name != old_name);
if (name_changed)
{
if (entries_by_name.count(new_name))
throwNameCollisionCannotRename(type, old_name, new_name);
scheduleWriteLists(type);
}
writeAccessEntityToDisk(id, *new_entity);
entry.entity = new_entity;
if (name_changed)
{
entries_by_name.erase(entry.name);
entry.name = new_name;
entries_by_name[entry.name] = &entry;
}
prepareNotifications(id, entry, false, notifications);
}
AccessEntityPtr DiskAccessStorage::readAccessEntityFromDisk(const UUID & id) const
{
return readEntityFile(getEntityFilePath(directory_path, id));
}
void DiskAccessStorage::writeAccessEntityToDisk(const UUID & id, const IAccessEntity & entity) const
{
writeEntityFile(getEntityFilePath(directory_path, id), entity);
}
void DiskAccessStorage::deleteAccessEntityOnDisk(const UUID & id) const
{
auto file_path = getEntityFilePath(directory_path, id);
if (!std::filesystem::remove(file_path))
throw Exception("Couldn't delete " + file_path.string(), ErrorCodes::FILE_DOESNT_EXIST);
}
void DiskAccessStorage::prepareNotifications(const UUID & id, const Entry & entry, bool remove, Notifications & notifications) const
{
if (!remove && !entry.entity)
return;
const AccessEntityPtr entity = remove ? nullptr : entry.entity;
for (const auto & handler : entry.handlers_by_id)
notifications.push_back({handler, id, entity});
for (const auto & handler : handlers_by_type[static_cast<size_t>(entry.type)])
notifications.push_back({handler, id, entity});
}
ext::scope_guard DiskAccessStorage::subscribeForChangesImpl(const UUID & id, const OnChangedHandler & handler) const
{
std::lock_guard lock{mutex};
auto it = entries_by_id.find(id);
if (it == entries_by_id.end())
return {};
const Entry & entry = it->second;
auto handler_it = entry.handlers_by_id.insert(entry.handlers_by_id.end(), handler);
return [this, id, handler_it]
{
std::lock_guard lock2{mutex};
auto it2 = entries_by_id.find(id);
if (it2 != entries_by_id.end())
{
const Entry & entry2 = it2->second;
entry2.handlers_by_id.erase(handler_it);
}
};
}
ext::scope_guard DiskAccessStorage::subscribeForChangesImpl(EntityType type, const OnChangedHandler & handler) const
{
std::lock_guard lock{mutex};
auto & handlers = handlers_by_type[static_cast<size_t>(type)];
handlers.push_back(handler);
auto handler_it = std::prev(handlers.end());
return [this, type, handler_it]
{
std::lock_guard lock2{mutex};
auto & handlers2 = handlers_by_type[static_cast<size_t>(type)];
handlers2.erase(handler_it);
};
}
bool DiskAccessStorage::hasSubscriptionImpl(const UUID & id) const
{
std::lock_guard lock{mutex};
auto it = entries_by_id.find(id);
if (it != entries_by_id.end())
{
const Entry & entry = it->second;
return !entry.handlers_by_id.empty();
}
return false;
}
bool DiskAccessStorage::hasSubscriptionImpl(EntityType type) const
{
std::lock_guard lock{mutex};
const auto & handlers = handlers_by_type[static_cast<size_t>(type)];
return !handlers.empty();
}
}
|
{"hexsha": "8b249813f7cfac6945257d9b194a3bdd05533c0e", "size": 27404, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Access/DiskAccessStorage.cpp", "max_stars_repo_name": "chengy8934/ClickHouse", "max_stars_repo_head_hexsha": "c1d2d2d7f759536cdac991077110ea40023030c2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-06-08T04:11:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-24T12:27:37.000Z", "max_issues_repo_path": "src/Access/DiskAccessStorage.cpp", "max_issues_repo_name": "chengy8934/ClickHouse", "max_issues_repo_head_hexsha": "c1d2d2d7f759536cdac991077110ea40023030c2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2020-02-18T14:59:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-19T10:42:18.000Z", "max_forks_repo_path": "src/Access/DiskAccessStorage.cpp", "max_forks_repo_name": "chengy8934/ClickHouse", "max_forks_repo_head_hexsha": "c1d2d2d7f759536cdac991077110ea40023030c2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-01-04T06:43:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-14T03:17:13.000Z", "avg_line_length": 32.9375, "max_line_length": 164, "alphanum_fraction": 0.6598306817, "num_tokens": 6038}
|
import argparse
import logging
import imageio
import numpy as np
import yaml
from IPython import embed
from common import get_image_array, get_probability_for_class, get_perturbed_images
from differential_evolution import init_population, gen_children
from models.base import get_model_from_name
CONFIG = None
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
def fitness_function(prediction, target_class):
"""
For targeted attacks, the fitness function is the probability of target class
"""
return get_probability_for_class(prediction, target_class)
def get_fit_population(fathers, children, fathers_predictions, children_predictions, target_class):
final_population = list()
for i in range(len(fathers_predictions)):
father_fitness = fitness_function(fathers_predictions[i], target_class)
child_fitness = fitness_function(children_predictions[i], target_class)
if father_fitness < child_fitness:
final_population.append(children[i])
else:
final_population.append(fathers[i])
return np.array(final_population)
def find_adversary_image(image, model, target_label):
original_predictions = model.predict(np.copy(image))
true_label = original_predictions[0][0][1]
true_label_probability = original_predictions[0][0][2]
logging.info("True label: {}, Probability: {}".format(true_label, true_label_probability))
target_label_probability = get_probability_for_class(original_predictions[0], target_label)
logging.info("Target label: {}, Probability: {}".format(target_label, target_label_probability))
imageio.imwrite('output/original.jpg', image[0])
population = init_population(CONFIG)
for i in range(CONFIG["num_iterations"]):
logging.info("Iteration: {}".format(i))
perturbed_images = get_perturbed_images(image, population)
perturbed_predictions = model.predict(np.copy(perturbed_images), top=model.num_classes)
target_class_probabilities = map(lambda p: get_probability_for_class(p, target_label), perturbed_predictions)
logging.info("Probabilites for target class: Min={}, Max={}".format(min(target_class_probabilities),
max(target_class_probabilities)))
if i % 10 == 0:
imageio.imwrite('output/{}.jpg'.format(i),
perturbed_images[target_class_probabilities.index(max(target_class_probabilities))])
population_children = gen_children(population, CONFIG)
perturbed_images_children = get_perturbed_images(image, population_children)
perturbed_predictions_children = model.predict(np.copy(perturbed_images_children), top=model.num_classes)
population = get_fit_population(population, population_children,
perturbed_predictions,
perturbed_predictions_children,
target_class=target_label)
embed()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', dest='config_file', help='config file')
parser.add_argument('--input', '-i', dest='input_image', help='input image file')
parser.add_argument('--target', '-t', dest='target_class', help='target class name')
args = parser.parse_args()
CONFIG = yaml.safe_load(open(args.config_file))
model = get_model_from_name(CONFIG["model"])
target_label = args.target_class
CONFIG["img_x"], CONFIG["img_y"], CONFIG["img_channels"] = model.input_size
image_arr = get_image_array(args.input_image, config=CONFIG)
find_adversary_image(image_arr, model, target_label)
|
{"hexsha": "cddf82db8bc8bedf25a1d7e7867a9c1d25e29c23", "size": 3829, "ext": "py", "lang": "Python", "max_stars_repo_path": "targeted.py", "max_stars_repo_name": "StefanieStoppel/one-pixel-attack", "max_stars_repo_head_hexsha": "e1a984b508453f6a20adbdb2e522b4b33cc6ed90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-03-09T15:46:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T11:39:09.000Z", "max_issues_repo_path": "targeted.py", "max_issues_repo_name": "StefanieStoppel/one-pixel-attack", "max_issues_repo_head_hexsha": "e1a984b508453f6a20adbdb2e522b4b33cc6ed90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "targeted.py", "max_forks_repo_name": "StefanieStoppel/one-pixel-attack", "max_forks_repo_head_hexsha": "e1a984b508453f6a20adbdb2e522b4b33cc6ed90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-19T13:52:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-19T13:52:52.000Z", "avg_line_length": 44.0114942529, "max_line_length": 117, "alphanum_fraction": 0.7022721337, "include": true, "reason": "import numpy", "num_tokens": 751}
|
import pandas as pd
import numpy as np
import talib as tb
from indicators.indicator_utils import *
def reg_envelopes(rates, price = 'Close', deviation = 0.008, reg_window=250, reg_mean=75):
rates["new_pol"] = (rates["Close"].rolling(reg_window).apply(regression(rates,price), raw=False)).rolling(reg_mean).mean()
rates["new_pol_upper"] = rates["new_pol"].values + rates["new_pol"].values * deviation
rates["new_pol_lower"] = rates["new_pol"].values - rates["new_pol"].values * deviation
env = tb.SMA(rates['Close'],200)
env_upper = env + env * deviation
env_lower = env - env * deviation
upper, middle, lower = tb.BBANDS(rates['Close'], 200, 2, 2)
ind_upper = (env_upper + upper) / 2
ind_lower = (env_lower + lower) / 2
ind_mid = (env + middle) / 2
rates['new_pol'] = (ind_mid+ rates['new_pol'].values)/2
rates['new_pol_upper'] = (ind_upper+ rates['new_pol_upper'].values)/2
rates['new_pol_lower'] = (ind_lower + rates['new_pol_lower'].values)/2
keys = ['new_pol','new_pol_upper','new_pol_lower']
return keys
def create_zigzag(rates, pct=0.35):
ut = 1 + pct / 100
dt = 1 - pct / 100
ld = rates.index[0]
lp = rates.Close[ld]
tr = None
zzd, zzp = [ld], [lp]
for ix, ch, cl in zip(rates.index, rates.High, rates.Low):
# No initial trend
if tr is None:
if ch / lp > ut:
tr = 1
elif cl / lp < dt:
tr = -1
# Trend is up
elif tr == 1:
# New H
if ch > lp:
ld, lp = ix, ch
# Reversal
elif cl / lp < dt:
zzd.append(ld)
zzp.append(lp)
tr, ld, lp = -1, ix, cl
# Trend is down
else:
# New L
if cl < lp:
ld, lp = ix, cl
# Reversal
elif ch / lp > ut:
zzd.append(ld)
zzp.append(lp)
tr, ld, lp = 1, ix, ch
# Extrapolate the current trend
if zzd[-1] != rates.index[-1]:
zzd.append(rates.index[-1])
if tr is None:
zzp.append(rates.Close[zzd[-1]])
elif tr == 1:
zzp.append(rates.High[zzd[-1]])
else:
zzp.append(rates.Low[zzd[-1]])
x = pd.Series(zzp, index=zzd)
x = x.reindex(pd.date_range(start=rates.index.min(),
end=rates.index.max(),
freq='H'))
x = x[x.index.dayofweek < 5]
rates ['ZigZag'] = x
rates['ZigZag'] = rates['ZigZag'].interpolate()
keys = ['ZigZag']
return keys
|
{"hexsha": "df9d95d159fb721a8ed922e2fcecc82d29eea520", "size": 2703, "ext": "py", "lang": "Python", "max_stars_repo_path": "indicators/custom_indicators.py", "max_stars_repo_name": "zqngetsu96/PyForex", "max_stars_repo_head_hexsha": "09783c7c9bc4bf0cfefea1ebca8c0328a58b176c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "indicators/custom_indicators.py", "max_issues_repo_name": "zqngetsu96/PyForex", "max_issues_repo_head_hexsha": "09783c7c9bc4bf0cfefea1ebca8c0328a58b176c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "indicators/custom_indicators.py", "max_forks_repo_name": "zqngetsu96/PyForex", "max_forks_repo_head_hexsha": "09783c7c9bc4bf0cfefea1ebca8c0328a58b176c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-08T10:41:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-08T10:41:52.000Z", "avg_line_length": 30.7159090909, "max_line_length": 126, "alphanum_fraction": 0.512763596, "include": true, "reason": "import numpy", "num_tokens": 772}
|
import numpy as np
import tqdm
import pickle
from tfc.utils import MakePlot
from matplotlib.ticker import PercentFormatter
## TEST PARAMETERS: ***************************************************
tfc = pickle.load(open('data/EOL_TFC.pickle','rb'))
spe = pickle.load(open('data/EOL_Spec.pickle','rb'))
## Plot: **************************************************************
MS = 12
# import pdb; pdb.set_trace()
## Plot 1: Accuracy
bin1 = np.logspace(-17, -11, num=50, endpoint=True, base=10.0, dtype=None, axis=0)
p1 = MakePlot('Accuracy ($|L_2|$)',r'Frequency')
p1.ax[0].hist(tfc['loss'][np.where(tfc['loss'] < 1.)], bin1, edgecolor='black', linewidth=1.2, label = 'TFC',alpha = 0.75, \
weights=np.ones(len(tfc['loss'][np.where(tfc['loss'] < 1.)])) / len(tfc['loss'][np.where(tfc['loss'] < 1.)]))
p1.ax[0].yaxis.set_major_formatter(PercentFormatter(1, decimals=0, symbol='%'))
p1.ax[0].hist(spe['loss'][np.where(spe['loss'] < 1.)], bin1, edgecolor='black', linewidth=1.2, label = 'Spectral Method', alpha = 0.75, \
weights=np.ones(len(spe['loss'][np.where(spe['loss'] < 1.)])) / len(spe['loss'][np.where(spe['loss'] < 1.)]))
p1.ax[0].yaxis.set_major_formatter(PercentFormatter(1, decimals=0, symbol='%'))
p1.ax[0].set_xscale('log')
p1.ax[0].set_xlim(5e-17, 5e-15)
p1.fig.subplots_adjust(wspace=0.35, hspace=0.25)
p1.ax[0].legend()
p1.PartScreen(9.,6.)
p1.show()
# p1.save('figures/EOL_hist_L2_outerLoop')
## Plot 2: Computation time
bin2 = np.linspace(0,300,30)
p2 = MakePlot('Computation time [ms]',r'Frequency')
p2.ax[0].hist(tfc['time'][np.where(tfc['time'] < 1.)]*1000, bin2, edgecolor='black', linewidth=1.2,
label = 'TFC', alpha = 0.75, \
weights=np.ones(len(tfc['time'][np.where(tfc['time'] < 1.)])) / len(tfc['time'][np.where(tfc['time'] < 1.)]))
p2.ax[0].yaxis.set_major_formatter(PercentFormatter(1, decimals=0, symbol='%'))
p2.ax[0].hist(spe['time'][np.where(spe['time'] < 1.)]*1000, bin2, edgecolor='black', linewidth=1.2, label = 'Spectral Method', alpha = 0.75, \
weights=np.ones(len(spe['time'][np.where(spe['time'] < 1.)])) / len(spe['time'][np.where(spe['time'] < 1.)]))
p2.ax[0].yaxis.set_major_formatter(PercentFormatter(1, decimals=0, symbol='%'))
p2.ax[0].set_xlim(100, 300)
p2.fig.subplots_adjust(wspace=0.35, hspace=0.25)
p2.ax[0].legend()
p2.PartScreen(9.,6.)
p2.show()
# p2.save('figures/EOL_hist_time_outerLoop')
## Plot 3: Number of itations
bin3 = np.arange(0,50,1)
p3 = MakePlot(r'Iterations',r'Frequency')
p3.ax[0].hist(tfc['it'][np.where(tfc['time'] < 1.)], bin3, edgecolor='black', linewidth=1.2,
label = 'TFC',alpha = 0.75, \
weights=np.ones(len(tfc['it'][np.where(tfc['time'] < 1.)])) / len(tfc['it'][np.where(tfc['time'] < 1.)]))
p3.ax[0].yaxis.set_major_formatter(PercentFormatter(1, decimals=0, symbol='%'))
p3.ax[0].hist(spe['it'][np.where(spe['time'] < 1.)], bin3, edgecolor='black', linewidth=1.2, label = 'Spectral Method', alpha = 0.75, \
weights=np.ones(len(spe['it'][np.where(spe['time'] < 1.)])) / len(spe['it'][np.where(spe['time'] < 1.)]))
p3.ax[0].yaxis.set_major_formatter(PercentFormatter(1, decimals=0, symbol='%'))
p3.ax[0].set_xlim(15, 30)
p3.fig.subplots_adjust(wspace=0.35, hspace=0.25)
p3.ax[0].legend()
p3.PartScreen(9.,6.)
p3.show()
# p3.save('figures/EOL_hist_it_outerLoop')
|
{"hexsha": "c6be9fa9aed366640725d0369ddac3418c14042a", "size": 3259, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/Hunter_Johnston_Dissertation/Chapter_6/Example_6_2/plotData.py", "max_stars_repo_name": "leakec/tfc", "max_stars_repo_head_hexsha": "f814be4643270498a68bb0859720191ff7216012", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-01-04T16:30:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T22:12:45.000Z", "max_issues_repo_path": "examples/Hunter_Johnston_Dissertation/Chapter_6/Example_6_2/plotData.py", "max_issues_repo_name": "leakec/tfc", "max_issues_repo_head_hexsha": "f814be4643270498a68bb0859720191ff7216012", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-12-10T23:17:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T18:39:18.000Z", "max_forks_repo_path": "examples/Hunter_Johnston_Dissertation/Chapter_6/Example_6_2/plotData.py", "max_forks_repo_name": "leakec/tfc", "max_forks_repo_head_hexsha": "f814be4643270498a68bb0859720191ff7216012", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-27T10:34:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T13:02:49.000Z", "avg_line_length": 38.3411764706, "max_line_length": 142, "alphanum_fraction": 0.6385394293, "include": true, "reason": "import numpy", "num_tokens": 1138}
|
[STATEMENT]
lemma inf_co_total:
"co_total x \<Longrightarrow> co_total y \<Longrightarrow> co_total (x \<sqinter> y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>co_total x; co_total y\<rbrakk> \<Longrightarrow> co_total (x \<sqinter> y)
[PROOF STEP]
by (metis co_total_def order.antisym bot_least mult_right_sub_dist_inf_right)
|
{"llama_tokens": 136, "file": "Correctness_Algebras_Lattice_Ordered_Semirings", "length": 1}
|
import sys
import os
import multiprocessing
import subprocess
import scipy.stats as sci
from scipy.stats.mstats import mquantiles
from methylpy.utilities import print_checkpoint, print_error, print_warning
from methylpy.utilities import split_fastq_file
from methylpy.utilities import split_fastq_file_pbat
from methylpy.utilities import open_allc_file,index_allc_file
from methylpy.utilities import read_allc_index,bgzip_allc_file
from methylpy.utilities import check_call_mc_dependencies
import pysam
import pdb
import shlex
import itertools
import re
import glob
import io as cStr
import bisect
import gzip
import math
def run_methylation_pipeline(read_files, sample,
forward_reference, reverse_reference, reference_fasta,
libraries = None,
unmethylated_control=None,
path_to_output="", sig_cutoff=0.01,
num_procs=1, sort_mem="500M",
num_upstr_bases=0, num_downstr_bases=2,
generate_allc_file=True,
generate_mpileup_file=True,
compress_output=True,
bgzip=False,
path_to_bgzip="",
path_to_tabix="",
binom_test=False, min_cov=2,
trim_reads=True, path_to_cutadapt="",
pbat=False,check_dependency=True,
path_to_aligner="",
aligner="bowtie2",aligner_options=None,
merge_by_max_mapq=False,min_mapq=30,
remove_clonal=False,keep_clonal_stats=True,
path_to_picard="",java_options="-Xmx20g",
path_to_samtools="",
remove_chr_prefix=True,
add_snp_info=False,
adapter_seq="AGATCGGAAGAGCACACGTCTG",
max_adapter_removal=None,
overlap_length=None, zero_cap=None,
error_rate=None, min_qual_score=10,
min_read_len=30,
keep_temp_files=False,
min_base_quality=1):
"""
read_files is a list of all the fastq files you'd like to run through the pipeline.
Note that globbing is supported here (i.e., you can use * in your paths)
libraries is a list of library IDs (in the same order as the files list) indiciating which
libraries each set of fastq files belong to. If you use a glob, you only need to indicate
the library ID for those fastqs once (i.e., the length of files and libraries should be
the same)
sample is a string indicating the name of the sample you're processing. It will be included
in the output files.
forward_reference is a string indicating the path to the forward strand reference created by
build_ref
reverse_reference is a string indicating the path to the reverse strand reference created by
build_ref
reference_fasta is a string indicating the path to a fasta file containing the sequences
you used for mapping
input is the path to a bam file that contains mapped bisulfite sequencing reads
unmethylated_control is the name of the chromosome/region that you want to use to estimate
the non-conversion rate of your sample, or the non-conversion rate you'd like to use.
Consequently, control is either a string, or a decimal.
If control is a string then it should be in the following format: "chrom:start-end".
If you'd like to specify an entire chromosome simply use "chrom:"
remove_clonal is a boolean indicating that you want to remove clonal reads (PCR duplicates).
If true, picard.jar should be available in folder specified in path_to_picard.
path_to_picard is a string of the path to "picard.jar". "picard.jar" is assumed to be
in your path if this option isn't used.
path_to_samtools is a string indicating the path to the directory containing your
installation of samtools. Samtools is assumed to be in your path if this is not
provided.
path_to_aligner is a string indicating the path to the folder in which bowtie resides. Bowtie
is assumed to be in your path if this option isn't used.
aligner_options is a list of strings indicating options you'd like passed to the aligner.
num_procs is an integer indicating how many num_procs you'd like to run this function over.
trim_reads is a boolean indicating that you want to have reads trimmed by cutadapt.
path_to_cutadapt is the path to the cutadapt execuatable. Otherwise this is assumed to be in
your path.
adapter_seq is the sequence of an adapter that was ligated to the 3' end. The adapter itself and
anything that follows is trimmed.
max_adapter_removal indicates the maximum number of times to try to remove adapters. Useful when
an adapter gets appended multiple times.
overlap_length is the minimum overlap length. If the overlap between the read and the adapter is
shorter than LENGTH, the read is not modified. This reduces the no. of bases trimmed purely
due to short random adapter matches.
zero_cap causes negative quality values to be set to zero (workaround to avoid
segmentation faults in BWA).
error_rate is the maximum allowed error rate (no. of errors divided by the length of the
matching region). Default: 0.1
min_qual_score allows you to trim low-quality ends from reads before adapter removal. The
algorithm is the same as the one used by BWA (Subtract CUTOFF from all qualities; compute
partial sums from all indices to the end of the sequence; cut sequence at the index at
which the sum is minimal).
min_read_len indicates the minimum length a read must be to be kept. Reads that are too short even
before adapter removal are also discarded. In colorspace, an initial primer is not counted.
sig_cutoff is a float indicating the adjusted p-value cutoff you wish to use for determining
whether or not a site is methylated
min_cov is an integer indicating the minimum number of reads for a site to be tested.
binom_tests indicates that you'd like to use a binomial test, rather than the alternative method
outlined here: https://bitbucket.org/schultzmattd/methylpy/wiki/Methylation%20Calling
keep_temp_files is a boolean indicating that you'd like to keep the intermediate files generated
by this function. This can be useful for debugging, but in general should be left False.
bowtie2 specifies whether to use the bowtie2 aligner instead of bowtie
sort_mem is the parameter to pass to unix sort with -S/--buffer-size command
path_to_output is the path to a directory where you would like the output to be stored.
The default is the same directory as the input fastqs.
min_base_quality is an integer indicating the minimum PHRED quality score for a base to be
included in the mpileup file (and subsequently to be considered for methylation calling).
"""
if check_dependency:
check_call_mc_dependencies(path_to_samtools=path_to_samtools,
trim_reads=trim_reads,
path_to_cutadapt=path_to_cutadapt,
aligner=aligner,
path_to_aligner=path_to_aligner,
remove_clonal=remove_clonal,
path_to_picard=path_to_picard)
if libraries is None:
libraries = ["libA"]
if not isinstance(libraries, list):
if isinstance(libraries, str):
libraries = [libraries]
else:
exit("libraries must be a list of string(s)")
if len(libraries) == 1 and len(read_files) > 1:
uniq_library = libraries[0]
libraries = [uniq_library for ind in range(len(read_files))]
#Default bowtie option
if aligner_options is None:
if aligner.lower() == "minimap2":
aligner_options = ["-ax","sr","--secondary=no"]
elif aligner.lower() == "bowtie":
aligner_options = ["-S", "-k 1", "-m 1", "--chunkmbs 3072",
"--best", "--strata", "-o 4", "-e 80",
"-l 20", "-n 0"]
aligner_options.append("--phred33-quals")
else: # bowtie2
aligner_options = []
aligner_options.append("--phred33-quals")
# CASAVA >= 1.8
quality_base = 33
if len(path_to_samtools) != 0:
path_to_samtools += "/"
if len(path_to_aligner) != 0:
path_to_aligner += "/"
# output path
if len(path_to_output) != 0:
path_to_output += "/"
if not os.path.exists(path_to_output):
try:
os.makedirs(path_to_output)
except:
print_error(" Failed to create output folder!")
expanded_file_list = []
expanded_library_list = []
total_input = 0
total_unique = 0
total_clonal = 0
for path, library in zip(read_files, libraries):
glob_list = glob.glob(path)
for filen in glob_list:
expanded_file_list.append(filen)
expanded_library_list.append(library)
for current_library in set(libraries):
library_files = [filen for filen, library
in zip(expanded_file_list, expanded_library_list)
if library == current_library]
#deal with actual filename rather than path to file
lib_input, lib_unique = run_mapping(current_library, library_files, sample,
forward_reference, reverse_reference, reference_fasta,
path_to_output=path_to_output,
path_to_samtools=path_to_samtools,
path_to_aligner=path_to_aligner,
aligner=aligner,
aligner_options=aligner_options,
merge_by_max_mapq=merge_by_max_mapq,
pbat=pbat,
min_mapq=min_mapq,
num_procs=num_procs,
trim_reads=trim_reads,
path_to_cutadapt=path_to_cutadapt,
adapter_seq=adapter_seq,
max_adapter_removal=max_adapter_removal,
overlap_length=overlap_length, zero_cap=zero_cap,
quality_base=quality_base,
error_rate=error_rate,
min_qual_score=min_qual_score,
min_read_len=min_read_len,
keep_temp_files=keep_temp_files,
sort_mem=sort_mem)
total_input += lib_input
total_unique += lib_unique
## Remove clonal reads
if remove_clonal == True:
lib_clonal = remove_clonal_bam(input_bam=path_to_output+sample+"_"+
str(current_library)+"_processed_reads.bam",
output_bam=path_to_output+sample+"_"+
str(current_library)+"_processed_reads_no_clonal.bam",
metric=path_to_output+sample+"_"+
str(current_library)+".metric",
is_pe=False,
path_to_picard=path_to_picard,
java_options=java_options)
subprocess.check_call(shlex.split("rm "+path_to_output+sample+"_"+
str(current_library)+"_processed_reads.bam"))
if not keep_clonal_stats:
subprocess.check_call(shlex.split("rm "+" "+path_to_output+sample+"_"+
str(current_library)+".metric"))
total_clonal += lib_clonal
print_checkpoint("There are "+str(total_input)+" total input reads")
print_checkpoint("There are "+str(total_unique)+" uniquely mapping reads, " +
str(float(total_unique) / total_input*100)+" percent remaining")
if remove_clonal == True:
total_non_clonal = total_unique - total_clonal
print_checkpoint("There are " + str(total_non_clonal) + " non-clonal reads, " +
str(float(total_non_clonal) / total_input*100) + " percent remaining")
## Merge bam files to get final bam file
library_files = [path_to_output+sample+"_"+str(library)+"_processed_reads_no_clonal.bam"
for library in set(libraries)]
if len(library_files) > 1:
merge_bam_files(library_files, path_to_output+sample+"_processed_reads_no_clonal.bam", path_to_samtools)
subprocess.check_call(shlex.split("rm "+" ".join(library_files)))
else:
subprocess.check_call(shlex.split("mv "+library_files[0]+" "+
path_to_output+sample+"_processed_reads_no_clonal.bam"))
## If not removing clonal reads
else:
library_files = [path_to_output+sample+"_"+str(library)+
"_processed_reads.bam" for library in set(libraries)]
if len(library_files) > 1:
merge_bam_files(library_files, path_to_output+sample+"_processed_reads.bam", path_to_samtools)
subprocess.check_call(shlex.split("rm "+" ".join(library_files)))
else:
subprocess.check_call(shlex.split("mv "+library_files[0]+" "+path_to_output+sample+"_processed_reads.bam"))
if generate_allc_file:
print_checkpoint("Begin calling mCs")
if remove_clonal == True:
output_bam_file = path_to_output+sample+"_processed_reads_no_clonal.bam"
else:
output_bam_file = path_to_output+sample+"_processed_reads.bam"
call_methylated_sites(output_bam_file,
sample,
reference_fasta,
unmethylated_control=unmethylated_control,
sig_cutoff=sig_cutoff,
num_procs=num_procs,
num_upstr_bases=num_upstr_bases,
num_downstr_bases=num_downstr_bases,
generate_mpileup_file=generate_mpileup_file,
compress_output=compress_output,
bgzip=bgzip,
path_to_bgzip=path_to_bgzip,
path_to_tabix=path_to_tabix,
min_mapq=min_mapq,
min_cov=min_cov,
binom_test=binom_test,
remove_chr_prefix=remove_chr_prefix,
sort_mem=sort_mem,
path_to_files=path_to_output,
path_to_samtools=path_to_samtools,
add_snp_info=add_snp_info,
min_base_quality=min_base_quality,
keep_temp_files=keep_temp_files)
print_checkpoint("Done")
def run_mapping(current_library, library_files, sample,
forward_reference, reverse_reference, reference_fasta,
path_to_output="",
path_to_samtools="", path_to_aligner="",
aligner="bowtie2",
aligner_options=None,merge_by_max_mapq=False,
min_mapq=30,pbat=False,
num_procs=1, trim_reads=True, path_to_cutadapt="",
adapter_seq="AGATCGGAAGAGCACACGTCTG",
max_adapter_removal=None, overlap_length=None, zero_cap=None,
quality_base=None, error_rate=None,
min_qual_score=10, min_read_len=30,
keep_temp_files=False,
sort_mem="500M"):
"""
This function runs the mapping portion of the methylation calling pipeline.
current_library is the ID that you'd like to run mapping on.
libraries is a list of library IDs (in the same order as the files list) indiciating which
libraries each set of fastq files belong to. If you use a glob, you only need to indicate
the library ID for those fastqs once (i.e., the length of files and libraries should be
the same)
sample is a string indicating the name of the sample you're processing. It will be included
in the output files.
forward_reference is a string indicating the path to the forward strand reference created by
build_ref
reverse_reference is a string indicating the path to the reverse strand reference created by
build_ref
reference_fasta is a string indicating the path to a fasta file containing the sequences
you used for mapping
path_to_samtools is a string indicating the path to the directory containing your
installation of samtools. Samtools is assumed to be in your path if this is not
provided.
path_to_aligner is a string indicating the path to the folder in which bowtie resides. Bowtie
is assumed to be in your path if this option isn't used.
aligner_options is a list of strings indicating options you'd like passed to bowtie
num_procs is an integer indicating how many num_procs you'd like to run this function over
trim_reads is a boolean indicating that you want to have reads trimmed by cutadapt.
path_to_cutadapt is the path to the cutadapt execuatable. Otherwise this is assumed to be in your
path.
adapter_seq is the sequence of an adapter that was ligated to the 3' end. The adapter itself and
anything that follows is trimmed.
max_adapter_removal indicates the maximum number of times to try to remove adapters. Useful when
an adapter gets appended multiple times.
overlap_length is the minimum overlap length. If the overlap between the read and the adapter is
shorter than LENGTH, the read is not modified. This reduces the no. of bases trimmed purely
due to short random adapter matches.
zero_cap causes negative quality values to be set to zero (workaround to avoid segmentation faults
in BWA).
quality_base is the offset for quality scores. In other words, assume that quality values are
encoded as ascii(quality + QUALITY_BASE). The default (33) is usually correct, except for
reads produced by some versions of the Illumina pipeline, where this should be set to 64.
error_rate is the maximum allowed error rate (no. of errors divided by the length of the matching
region). Default: 0.1
min_qual_score allows you to trim low-quality ends from reads before adapter removal. The algorithm
is the same as the one used by BWA (Subtract CUTOFF from all qualities; compute partial sums
from all indices to the end of the sequence; cut sequence at the index at which the sum is minimal).
min_read_len indicates the minimum length a read must be to be kept. Reads that are too short even
before adapter removal are also discarded. In colorspace, an initial primer is not counted.
keep_temp_files is a boolean indicating that you'd like to keep the intermediate files generated
by this function. This can be useful for debugging, but in general should be left False.
bowtie2 specifies whether to use the bowtie2 aligner instead of bowtie
sort_mem is the parameter to pass to unix sort with -S/--buffer-size command
"""
#Default bowtie option
if aligner_options is None:
if aligner.lower() == "minimap2":
aligner_options = ["-ax", "sr","--secondary=no"]
elif aligner.lower() == "bowtie":
aligner_options = ["-S", "-k 1", "-m 1", "--chunkmbs 3072",
"--best", "--strata", "-o 4", "-e 80",
"-l 20", "-n 0"]
aligner_options.append("--phred33-quals")
else: # bowtie2
aligner_options = []
aligner_options.append("--phred33-quals")
# CASAVA >= 1.8
quality_base = 33
if len(path_to_output) != 0:
path_to_output += "/"
total_unique = 0
file_name = sample+"_"+str(current_library)
file_path = path_to_output+file_name
print_checkpoint("Begin splitting reads for "+file_name)
if pbat:
total_input = split_fastq_file_pbat(num_procs, library_files, file_path+"_split_")
else:
total_input = split_fastq_file(num_procs, library_files, file_path+"_split_")
if trim_reads:
print_checkpoint("Begin trimming reads for "+file_name)
quality_trim([file_path+"_split_"+str(i) for i in range(0, num_procs)],
output=[file_path+"_split_trimmed_"+str(i)
for i in range(0, num_procs)],
adapter_seq=adapter_seq,
error_rate=error_rate,
quality_base = quality_base,
min_qual_score=min_qual_score,
min_read_len=min_read_len,
input_format="fastq",
num_procs=num_procs,
max_adapter_removal=max_adapter_removal,
overlap_length=overlap_length,
zero_cap=zero_cap,
path_to_cutadapt=path_to_cutadapt)
subprocess.check_call(shlex.split("rm "+" ".join([file_path+"_split_"+str(i)
for i in range(0,num_procs)])))
print_checkpoint("Begin converting reads for "+file_name)
if num_procs > 1:
pool = multiprocessing.Pool(num_procs)
for inputf, output in zip([file_path+"_split_trimmed_"+str(i) for i in range(0, num_procs)],
[file_path+"_split_trimmed_converted_"+str(i)
for i in range(0, num_procs)]):
pool.apply_async(convert_reads,(inputf,output))
pool.close()
pool.join()
else:
for inputf, output in zip([file_path+"_split_trimmed_"+str(i) for i in range(0, num_procs)],
[file_path+"_split_trimmed_converted_"+str(i)
for i in range(0, num_procs)]):
convert_reads(inputf,output)
subprocess.check_call(shlex.split("rm "+
" ".join([file_path+"_split_trimmed_"+str(i)
for i in range(0,num_procs)])))
input_fastq = [file_path+"_split_trimmed_converted_"+str(i) for i in range(0, num_procs)]
else:
print_checkpoint("No trimming on reads")
print_checkpoint("Begin converting reads for "+file_name)
if num_procs > 1:
pool = multiprocessing.Pool(num_procs)
for inputf, output in zip([file_path+"_split_"+str(i) for i in range(0, num_procs)],
[file_path+"_split_converted_"+str(i) for i in range(0, num_procs)]):
pool.apply_async(convert_reads, (inputf, output))
pool.close()
pool.join()
else:
for inputf, output in zip([file_path+"_split_"+str(i) for i in range(0, num_procs)],
[file_path+"_split_converted_"+str(i) for i in range(0, num_procs)]):
convert_reads(inputf, output)
subprocess.check_call(shlex.split("rm "+" ".join([file_path+"_split_"+str(i)
for i in range(0, num_procs)])))
input_fastq = [file_path+"_split_converted_"+str(i) for i in range(0, num_procs)]
#Run bowtie
if aligner.lower() == "minimap2":
print_checkpoint("Begin Running minimap2 for "+current_library)
elif aligner.lower() == "Bowtie":
print_checkpoint("Begin Running Bowtie for "+current_library)
else:
print_checkpoint("Begin Running Bowtie2 for "+current_library)
total_unique = run_alignment(current_library,
input_fastq,
sample,
forward_reference, reverse_reference, reference_fasta,
path_to_output=path_to_output,
aligner=aligner,
aligner_options=aligner_options,
merge_by_max_mapq=merge_by_max_mapq,
min_mapq=min_mapq,
path_to_aligner=path_to_aligner, num_procs=num_procs,
keep_temp_files=keep_temp_files,
sort_mem=sort_mem)
#subprocess.check_call(shlex.split("rm " + " ".join(input_fastq)))
return total_input, total_unique
def merge_bam_files(input_files,output,path_to_samtools=""):
"""
This function will merge several bam files and create the correct header.
input_files is a list of files produced by collapse_clonal reads. In other words, they're assumed
to be named like <sample>_<processed_reads>_<lib_id>_no_clonal.bam
output is the name of the merged bam file
path_to_samtools is a string indicating the path to the directory containing your
installation of samtools. Samtools is assumed to be in your path if this is not
provided.
"""
f=open("header.sam",'w')
subprocess.check_call(shlex.split(path_to_samtools+"samtools view -H "+input_files[0]),stdout=f)
for filen in input_files:
f.write("@RG\tID:" + filen[:filen.rindex(".bam")] + "\tLB:" +
filen +
"\tSM:NA" + "\n")
f.close()
subprocess.check_call(shlex.split(path_to_samtools+"samtools merge -r -h header.sam "+ output +" "+" ".join(input_files)))
subprocess.check_call(["rm", "header.sam"])
def build_ref(input_files,
output,
buffsize=100,
aligner="bowtie2",
path_to_aligner="",
num_procs=1):
"""
Creates 2 reference files: one with all C's converted to T's, and one with all G's converted to A's
input_files is a list of files to build a reference from
output is the prefix of the two output reference files that will be created
buffsize is the number of bytes that will be read in from the reference at once
"""
if len(path_to_aligner) !=0:
path_to_aligner+="/"
if not isinstance(input_files, list):
if isinstance(input_files, str):
input_files = [input_files]
else:
sys.exit("input_files must be a list of strings")
#outf = Convert all C to T. outr = convert all G to A
with open(output+"_f.fasta", 'w') as outf, open(output+"_r.fasta", 'w') as outr:
for filen in input_files:
f = open(filen, 'r')
line = f.read(buffsize)
header = False #indicates when you are currently reading in a header
while line:
for base in line:
if header==True:
if base=="\n": #when you encounter a newline, you are no longer in a header
header=False
outf.write(base)
outr.write(base)
else:
if base==">": #all headers begin with >
header=True
outf.write(base)
outr.write(base)
elif base=="C":
outf.write("T")
outr.write(base)
elif base=="c":
outf.write("t")
outr.write(base)
elif base=="G":
outf.write(base)
outr.write("A")
elif base=="g":
outf.write(base)
outr.write("a")
else:
outf.write(base)
outr.write(base)
line = f.read(buffsize)
f.close()
# minimap2
if aligner.lower() == "minimap2":
subprocess.check_call([path_to_aligner+"minimap2",
"-t",str(num_procs),
"-d",output+"_f.mmi",
output + "_f.fasta"])
subprocess.check_call([path_to_aligner+"minimap2",
"-t",str(num_procs),
"-d",output+"_r.mmi",
output + "_r.fasta"])
return 0
# bowtie2
base_cmd = path_to_aligner+"bowtie2-build -f "
# bowtie
if aligner.lower() == "bowtie":
base_cmd = path_to_aligner+"bowtie-build -f "
if num_procs > 1:
pool = multiprocessing.Pool(2)
pool.apply_async(subprocess.check_call,(shlex.split(base_cmd + output + "_f.fasta " + output +"_f"),))
pool.apply_async(subprocess.check_call,(shlex.split(base_cmd + output + "_r.fasta " + output+ "_r"),))
pool.close()
pool.join()
else:
subprocess.check_call(shlex.split(base_cmd + output + "_f.fasta " + output +"_f"))
subprocess.check_call(shlex.split(base_cmd + output + "_r.fasta " + output+ "_r"))
subprocess.check_call(["rm",output + "_f.fasta",output + "_r.fasta"])
return 0
def convert_reads(inputf,output,buffer_line_number=100000):
"""
This function takes a fastq file as input and converts all the cytosines in reads to thymines for
mapping to bisulfite converted genomes. This function also stores an encoding of where the cytosines
were located in the header of each fastq read. See encode_c_positions for more detail.
input is a fastq file for conversion
output is the name of the file you'd like to put the converted reads in
"""
f = open(inputf,'r')
g = open(output,'w')
header = f.readline().rstrip()
header = header.replace(" ","!")
seq = f.readline()
header2 = f.readline()
qual = f.readline()
encoding = encode_c_positions(seq)
line_counts = 0
out = ""
while header:
out += header+"!"+encoding+"\n"
converted_seq = seq.replace("C","T")
out += converted_seq
out += header2
out += qual
line_counts += 4
# output
if line_counts > buffer_line_number:
g.write(out)
line_counts = 0
out = ""
# update
header = f.readline().rstrip()
header = header.replace(" ","!")
seq = f.readline()
header2 = f.readline()
qual = f.readline()
encoding = encode_c_positions(seq)
# output
if line_counts > 0:
g.write(out)
line_counts = 0
out = ""
f.close()
g.close()
def encode_c_positions(seq,is_read2=False):
"""
This function creates an encoding of where cytosine nucleotides are located in a converted read.
The encoding uses ascii characters (minus an offset) to indicate an offset into the read.
For example, the ascii character # has an integer value of 36 and indicates that a C is located
2 bases from the previous position (36 - 34). The offsets build off of one another so if the first
offset is 2 and the second offset is 5 the second C is located in the 9th position (since python indexing
starts at 0). In other words, next_c_index = prev_c_index + offset + 1.
seq is a string of nucleotides you'd like to encode.
"""
indexes = ""
prev_index = 0
if is_read2==False:
index = seq.find("C",prev_index)
offset = index + 34
while True:
if index < 0:
break
while offset >= 255:
indexes += chr(255)
offset -= 255
if offset < 34:
offset += 34
indexes += chr(offset)
prev_index = index + 1
index = seq.find("C",prev_index)
offset = index - prev_index + 34
else:
index = seq.find("G",prev_index)
offset = index + 34
while True:
if index < 0:
break
while offset >= 255:
indexes += chr(255)
offset -= 255
if offset < 34:
offset += 34
indexes += chr(offset)
prev_index = index + 1
index = seq.find("G",prev_index)
offset = index - prev_index + 34
return indexes
def decode_c_positions(seq,indexes,strand,is_read2=False):
"""
This function takes the encodings generated by encode_c_position and replaces the appropriate
positions with C nucleotides.
seq is a string of nucleotides to have Cs or Gs replaced.
indexes is a string of characters indicating the offsets for the positions of the Cs or Gs.
strand is the DNA strand (+ or -) that seq mapped to. This is important because
sequences in sam files are always represented on the forward strand
is_read2 indicates whether the read to be deconverted is a read2.
"""
prev_index = 0
new_seq=""
index = 0
saturated = False
if is_read2 == False:
if strand == "-":
seq = seq[::-1]
for char in indexes:
offset = ord(char)
if offset == 255:
index += 255
saturated = True
continue
if saturated and (offset - 34) < 34:
offset -= 68
else:
offset -= 34
index += offset
if strand == "+":
new_seq += seq[prev_index:index]+"C"
elif strand == "-":
new_seq += seq[prev_index:index]+"G"
prev_index = index + 1
index = prev_index
saturated = False
else:
if strand == "-":
seq = seq[::-1]
for char in indexes:
offset = ord(char)
if offset == 255:
index += 255
saturated = True
continue
if saturated and (offset - 34) < 34:
offset -= 68
else:
offset -= 34
index += offset
if strand == "+":
new_seq += seq[prev_index:index]+"G"
elif strand == "-":
new_seq += seq[prev_index:index]+"C"
prev_index = index + 1
index = prev_index
saturated = False
new_seq += seq[prev_index:]
if strand == "-":
new_seq = new_seq[::-1]
return new_seq
def run_alignment(current_library,library_read_files,
sample,
forward_reference,reverse_reference,reference_fasta,
path_to_output="",
path_to_samtools="",
aligner="bowtie2",
path_to_aligner="",aligner_options=None,
merge_by_max_mapq=False,min_mapq=30,
num_procs=1,keep_temp_files=False,sort_mem="500M"):
"""
This function runs bowtie on the forward and reverse converted bisulfite references
(generated by build_ref). It removes any read that maps to both the forward and reverse
strands.
files is a list of file paths to be mapped
forward_reference is a string indicating the path to the forward strand reference created by
build_ref
reverse_reference is a string indicating the path to the reverse strand reference created by
build_ref
prefix is a string that you would like prepended to the output files (e.g., the sample name)
options is a list of strings indicating options you'd like passed to bowtie
(e.g., ["-k 1","-l 2"]
path_to_aligner is a string indicating the path to the folder in which bowtie resides. Bowtie
is assumed to be in your path if this option isn't used
num_procs is an integer indicating the number of processors you'd like used for removing multi
mapping reads and for bowtie mapping
keep_temp_files is a boolean indicating that you'd like to keep the intermediate files generated
by this function. This can be useful for debugging, but in general should be left False.
bowtie2 specifies whether to use the bowtie2 aligner instead of bowtie
sort_mem is the parameter to pass to unix sort with -S/--buffer-size command
"""
if not sort_mem:
sort_option = ""
else:
sort_option = " -S "+sort_mem
if len(path_to_aligner) !=0:
path_to_aligner+="/"
if len(path_to_output) !=0:
path_to_output+="/"
prefix = path_to_output+sample+"_"+str(current_library)
#Default bowtie option
if aligner_options is None:
if aligner.lower() == "minimap2":
aligner_options = ["-ax", "sr","--secondary=no"]
elif aligner.lower() == "bowtie":
aligner_options = ["-S", "-k 1", "-m 1", "--chunkmbs 3072",
"--best", "--strata", "-o 4", "-e 80",
"-l 20", "-n 0"]
else: #bowtie 2
aligner_options = []
options = aligner_options
input_read_file = ",".join(library_read_files)
input_read_file = prefix+"_converted_reads.fastq"
subprocess.check_call(["mv",library_read_files[0],input_read_file])
if len(library_read_files) > 1:
with open(input_read_file,'a') as g:
for library_read_file in library_read_files[1:]:
with open(library_read_file,'r') as f:
g.write(f.read())
subprocess.check_call(["rm",library_read_file])
if aligner != "minimap2":
if " ".join(options).find(" -p ") == -1:
options.append("-p "+str(num_procs))
else:
if " ".join(options).find(" -t ") == -1:
options.append("-t "+str(num_procs))
if aligner.lower() == "minimap2":
args = [path_to_aligner+"minimap2"]
args.extend(options)
args.append("--for-only")
args.append(forward_reference)
args.append(input_read_file)
elif aligner.lower() == "bowtie":
args = [path_to_aligner+"bowtie"]
args.extend(options)
args.append("--norc")
args.append(forward_reference)
args.append(input_read_file)
else: # bowtie2
args = [path_to_aligner+"bowtie2"]
args.extend(options)
args.append("--norc")
args.append("-x "+forward_reference)
args.append("-U "+input_read_file)
## run
with open(prefix+"_forward_strand_hits.sam","w") as f:
subprocess.check_call(shlex.split(" ".join(args)),stdout=f)
print_checkpoint("Processing forward strand hits")
find_multi_mappers(prefix+"_forward_strand_hits.sam",
prefix,
num_procs=num_procs,
min_mapq=min_mapq,
append=False,
keep_temp_files=keep_temp_files)
if aligner.lower() == "minimap2":
args = [path_to_aligner+"minimap2"]
args.extend(options)
args.append("--rev-only")
args.append(reverse_reference)
args.append(input_read_file)
elif aligner.lower() == "bowtie":
args = [path_to_aligner+"bowtie"]
args.extend(options)
args.append("--nofw")
args.append(reverse_reference)
args.append(input_read_file)
else:
args = [path_to_aligner+"bowtie2"]
args.extend(options)
args.append("--nofw")
args.append("-x "+reverse_reference)
args.append("-U "+input_read_file)
## run
with open(prefix+"_reverse_strand_hits.sam","w") as f:
subprocess.check_call(shlex.split(" ".join(args)),stdout=f)
subprocess.check_call(["rm",input_read_file])
print_checkpoint("Processing reverse strand hits")
sam_header = find_multi_mappers(prefix+"_reverse_strand_hits.sam",
prefix,
num_procs=num_procs,
min_mapq=min_mapq,
append=True,
keep_temp_files=keep_temp_files)
## Clear temporary files
if num_procs > 1:
pool = multiprocessing.Pool(num_procs)
for file_num in range(0,num_procs):
pool.apply_async(subprocess.check_call,
(shlex.split(
"env LC_COLLATE=C sort"+sort_option+ \
" -t '\t' -k 1 -o "+prefix+"_sorted_"+str(file_num)+ \
" "+prefix+"_sorted_"+str(file_num)),))
pool.close()
pool.join()
else:
for file_num in range(0,num_procs):
subprocess.check_call(shlex.split(
"env LC_COLLATE=C sort"+sort_option + " -t '\t' -k 1 -o "+ \
prefix+"_sorted_"+str(file_num)+" "+prefix+"_sorted_"+str(file_num)))
print_checkpoint("Finding multimappers")
if merge_by_max_mapq:
total_unique = merge_sorted_multimap_max_mapq(
current_library,
[prefix+"_sorted_"+str(file_num) for file_num in range(0,num_procs)],
prefix,
reference_fasta,
path_to_samtools="")
else:
total_unique = merge_sorted_multimap(
current_library,
[prefix+"_sorted_"+str(file_num) for file_num in range(0,num_procs)],
prefix,
reference_fasta,
path_to_samtools="")
subprocess.check_call(shlex.split("rm "+" ".join([prefix+"_sorted_"+str(file_num)
for file_num in range(0,num_procs)])))
output_bam_file = prefix+"_processed_reads.bam"
if not sort_mem:
sort_option = ""
else:
sort_option = " -m "+sort_mem
try:
subprocess.check_call(shlex.split(path_to_samtools+"samtools sort "+
" -@ " + str(num_procs) +
sort_option + " " +
" -o "+output_bam_file + " " +
output_bam_file ))
except:
subprocess.check_call(shlex.split(path_to_samtools+"samtools sort "+
" -o "+output_bam_file + " " +
output_bam_file ))
return total_unique
def find_multi_mappers(inputf,output,num_procs=1,min_mapq=30,
keep_temp_files=False,append=False):
"""
This function takes a sam file generated by bowtie and pulls out any mapped reads.
It splits these mapped reads into num_procs number of files.
inputf is a string of the path to a sam file from bowtie
output is a string of the prefix you'd like prepended to the output files
The output files will be named as <output>_sorted_<index num>
num_procs is an integer indicating how many files the bowtie sam file should be split
into
keep_temp_files is a boolean indicating that you'd like to keep the intermediate files generated
by this function. This can be useful for debugging, but in general should be left False.
append is a boolean that should be False for the first bowtie sam file you process (i.e., for the forward
mapped reads) and True for the second. This option is mainly for safety. It ensures that files from
previous runs are erased.
"""
min_mapq = max(3,min_mapq)
sam_header = []
file_handles = {}
f = open(inputf,'r')
cycle = itertools.cycle(list(range(0,num_procs)))
for file_num in range(0,num_procs):
if append == False:
file_handles[file_num]=open(output+"_sorted_"+str(file_num),'w')
else:
file_handles[file_num]=open(output+"_sorted_"+str(file_num),'a')
for line in f:
#To deal with the way chromosomes were named in some of our older references
if line[0] == "@":
continue
fields = line.split("\t")
flag = int(fields[1])
# minimum QC
if fields[2] == "*" or int(fields[4]) < min_mapq or (flag & 2048 == 2048):
continue
header = fields[0].split("!")
#BIG ASSUMPTION!! NO TABS IN FASTQ HEADER LINES EXCEPT THE ONES I ADD!
if (flag & 16) == 16:
strand = "-"
elif (flag & 16) == 0:
strand = "+"
try:
seq = decode_c_positions(fields[9],header[-1],strand)
file_handles[next(cycle)].write(" ".join(header[:-1])+"\t"+"\t".join(fields[1:9])
+"\t"+seq+"\t"+"\t".join(fields[10:]))
except:
print_warning(" Failed to recover unconverted sequence for:\n"+line+"\n")
print_warning(header[-1]+"\n")
f.close()
if not keep_temp_files:
subprocess.check_call(shlex.split("rm "+inputf))
for file_num in range(0,num_procs):
file_handles[file_num].close()
def merge_sorted_multimap(current_library,files,prefix,reference_fasta,path_to_samtools=""):
"""
This function takes the files from find_multi_mappers and outputs the uniquely mapping reads.
files is a list of filenames containing the output of find_multi_mappers
output is a prefix you'd like prepended to the bam file containing the uniquely mapping reads
This file will be named as <output>+"_no_multimap_"+<index_num>
"""
output_sam_file = prefix+"_processed_reads.sam"
output_bam_file = prefix+"_processed_reads.bam"
output_handle = open(output_sam_file,'w')
#output_pipe = subprocess.Popen(
# shlex.split(path_to_samtools+"samtools view -S -b -"),
# stdin=subprocess.PIPE,stdout=output_handle)
try:
f = open(reference_fasta+".fai",'r')
except:
print("Reference fasta not indexed. Indexing.")
try:
subprocess.check_call(shlex.split(path_to_samtools+"samtools faidx "+reference_fasta))
f = open(reference_fasta+".fai",'r')
except:
sys.exit("Reference fasta wasn't indexed, and couldn't be indexed. Please try indexing it manually and running methylpy again.")
#Create sam header based on reference genome
output_handle.write("@HD\tVN:1.0\tSO:unsorted\n")
for line in f:
fields = line.split("\t")
output_handle.write("@SQ\tSN:"+fields[0]+"\tLN:"+fields[1]+"\n")
f.close()
## Merging alignment results of both strands
lines = {}
fields = {}
file_handles = {}
total_unique = 0
count= 0
for index,filen in enumerate(files):
file_handles[filen]=open(filen,'r')
lines[filen]=file_handles[filen].readline()
fields[filen] = lines[filen].split("\t")[0]
while True:
all_fields = [field for field in list(fields.values()) if field != ""]
if len(all_fields) == 0:
break
min_field = min(all_fields)
count = 0
current_line = ""
current_field = ""
for key in fields:
while fields[key] == min_field:
count += 1
current_line = lines[key]
lines[key]=file_handles[key].readline()
fields[key]=lines[key].split("\t")[0]
if count == 1:
output_handle.write(current_line)
total_unique += 1
#output_pipe.stdin.close()
output_handle.close()
for index,filen in enumerate(files):
file_handles[filen].close()
f = open(output_bam_file,'w')
subprocess.check_call(shlex.split(path_to_samtools+"samtools view -S -b -h "+output_sam_file),stdout=f)
f.close()
subprocess.check_call(shlex.split("rm "+output_sam_file))
return total_unique
def merge_sorted_multimap_max_mapq(current_library,files,prefix,reference_fasta,path_to_samtools=""):
"""
This function takes the files from find_multi_mappers and outputs the uniquely mapping reads.
files is a list of filenames containing the output of find_multi_mappers
output is a prefix you'd like prepended to the bam file containing the uniquely mapping reads
This file will be named as <output>+"_no_multimap_"+<index_num>
"""
output_sam_file = prefix+"_processed_reads.sam"
output_bam_file = prefix+"_processed_reads.bam"
output_handle = open(output_sam_file,'w')
#output_pipe = subprocess.Popen(
# shlex.split(path_to_samtools+"samtools view -S -b -"),
# stdin=subprocess.PIPE,stdout=output_handle)
try:
f = open(reference_fasta+".fai",'r')
except:
print("Reference fasta not indexed. Indexing.")
try:
subprocess.check_call(shlex.split(path_to_samtools+"samtools faidx "+reference_fasta))
f = open(reference_fasta+".fai",'r')
except:
sys.exit("Reference fasta wasn't indexed, and couldn't be indexed. Please try indexing it manually and running methylpy again.")
#Create sam header based on reference genome
output_handle.write("@HD\tVN:1.0\tSO:unsorted\n")
for line in f:
fields = line.split("\t")
output_handle.write("@SQ\tSN:"+fields[0]+"\tLN:"+fields[1]+"\n")
f.close()
## Merging alignment results of both strands
lines = {}
fields = {}
file_handles = {}
total_unique = 0
count= 0
for index,filen in enumerate(files):
file_handles[filen]=open(filen,'r')
lines[filen]=file_handles[filen].readline()
fields[filen] = lines[filen].split("\t")[0]
while True:
all_fields = [field for field in list(fields.values()) if field != ""]
if len(all_fields) == 0:
break
min_field = min(all_fields)
count, count_diff_mapq = 0, 0
current_line = ""
current_field = ""
max_mapq = -100
for key in fields:
while fields[key] == min_field:
mapq = int(lines[key].split("\t")[4])
count += 1
if mapq > max_mapq:
count_diff_mapq += 1
max_mapq = mapq
current_line = lines[key]
lines[key]=file_handles[key].readline()
fields[key]=lines[key].split("\t")[0]
if count == 1 or count_diff_mapq > 1:
output_handle.write(current_line)
total_unique += 1
#output_pipe.stdin.close()
output_handle.close()
for index,filen in enumerate(files):
file_handles[filen].close()
f = open(output_bam_file,'w')
subprocess.check_call(shlex.split(path_to_samtools+"samtools view -S -b -h "+output_sam_file),stdout=f)
f.close()
subprocess.check_call(shlex.split("rm "+output_sam_file))
return total_unique
def quality_trim(inputf, output = None, quality_base = None, min_qual_score = None, min_read_len = None,
adapter_seq = "AGATCGGAAGAGCACACGTCTG", num_procs = 1, input_format = None, error_rate = None,
max_adapter_removal = None, overlap_length = None, zero_cap = False, path_to_cutadapt = ""):
"""
Information from cutadapt documentation:
input_format:
Input file format; can be either 'fasta', 'fastq' or 'sra-fastq'. Ignored when reading csfasta/qual files
(default: auto-detect from file name extension).
adapter_seq:
Sequence of an adapter that was ligated to the 3' end. The adapter itself and anything that follows is
trimmed.
error_rate:
Maximum allowed error rate (no. of errors divided by the length of the matching region) (default: 0.1)
max_adapter_removal:
Try to remove adapters at most COUNT times. Useful when an adapter gets appended multiple times.
overlap_length:
Minimum overlap length. If the overlap between the read and the adapter is shorter than LENGTH, the read
is not modified.This reduces the no. of bases trimmed purely due to short random adapter matches.
min_read_len:
Discard trimmed reads that are shorter than LENGTH. Reads that are too short even before adapter removal
are also discarded. In colorspace, an initial primer is not counted.
output:
Write the modified sequences to this file instead of standard output and send the summary report to
standard output. The format is FASTQ if qualities are available, FASTA otherwise.
min_qual_score:
Trim low-quality ends from reads before adapter removal. The algorithm is the same as the one used by
BWA (Subtract CUTOFF from all qualities; compute partial sums from all indices to the end of the
sequence; cut sequence at the index at which the sum is minimal).
quality_base:
Assume that quality values are encoded as ascii(quality + QUALITY_BASE). The default (33) is
usually correct, except for reads produced by some versions of the Illumina pipeline, where this should
be set to 64.
zero_cap:
Change negative quality values to zero (workaround to avoid segmentation faults in BWA).
path_to_cutadapt:
Path to the folder where cutadapt executable exists. If none, assumes it can be run from current directory
input:
list of filenames
"""
if path_to_cutadapt: #see if cutadapt is installed
if path_to_cutadapt[-1]!="/":
path_to_cutadapt += "/"
path_to_cutadapt += "cutadapt"
try:
devnull = open('/dev/null', 'w')
subprocess.check_call([path_to_cutadapt], stdout=devnull, stderr=devnull)
except OSError:
sys.exit("Cutadapt must be installed to run quality_trim")
except:
devnull.close()
if not isinstance(inputf, list):
if isinstance(inputf, str):
inputf = [inputf]
else:
sys.exit("input must be a list of strings")
if not isinstance(output, list):
if isinstance(output, str):
output = [output]
else:
sys.exit("output must be a list of strings")
if len(output) != len(inputf):
sys.exit("Must provide an equal number of input and output files")
base_cmd = path_to_cutadapt
options = " --quiet "
if zero_cap:
zero = "-z "
else:
zero = ""
if input_format:
options += " -f " + input_format
if error_rate:
options += " -e " + str(error_rate)
if max_adapter_removal:
options += " -n " + str(max_adapter_removal)
if overlap_length:
options += " -O " + str(overlap_length)
if min_read_len:
options += " -m " + str(min_read_len)
if min_qual_score:
options += " -q " + str(min_qual_score)
if quality_base:
options += " --quality-base=" + str(quality_base)
options += " -a " + adapter_seq
options += " " + zero
if num_procs > 1:
pool = multiprocessing.Pool(num_procs)
#adapter trimming
for current_input,current_output in zip(inputf,output):
if output:
options += " -o " + current_output + " "
pool.apply_async(subprocess.check_call,(base_cmd + options + current_input,),{"shell":True})
pool.close()
pool.join()
else:
for current_input,current_output in zip(inputf,output):
if output:
options += " -o " + current_output + " "
subprocess.check_call(base_cmd + options + current_input, shell=True)
def remove_clonal_bam(input_bam,output_bam,metric,is_pe=False,path_to_picard="",java_options="-Xmx20g"):
"""
Running picard to remove clonal reads in input_bam and output non-clonal reads
to output_bam.
"""
subprocess.check_call(
shlex.split(
" ".join(["java",java_options,
"-jar",
path_to_picard+"/picard.jar MarkDuplicates",
"INPUT="+input_bam,
"OUTPUT="+output_bam,
"ASSUME_SORTED=true",
"REMOVE_DUPLICATES=true",
"METRICS_FILE="+metric,
"VALIDATION_STRINGENCY=LENIENT",
"QUIET=true"])
)
)
total_clonal = 0
with open(metric,'r') as f:
while True:
line = f.readline()
if line[0] != "#" and len(line) != 1:
break
line = f.readline()
fields = line.split("\t")
if is_pe:
total_clonal = fields[6]
else:
total_clonal = fields[5]
return int(total_clonal)
def fasta_iter(fasta_name,query_chrom):
"""
given a fasta file. yield tuples of header, sequence
"""
fh = open(fasta_name)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
from itertools import groupby
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">"))
seq = None
for header in faiter:
# drop the ">"
header = header.next()[1:].strip()
header = header.split(" ")[0]
# join all sequence lines to one.
if header != query_chrom:
for s in next(faiter):
pass
continue
seq = "".join(s.strip() for s in next(faiter))
return seq
def get_chromosome_sequence(fasta_name,query_chrom):
chrom_pointer = None
with open(fasta_name+".fai",'r') as f:
for line in f:
fields = line.split("\t")
if fields[0] == query_chrom:
chrom_pointer = int(fields[2])
if chrom_pointer is None: return(None)
seq = ""
with open(fasta_name,'r') as f:
f.seek(chrom_pointer)
for line in f:
if line[0] == ">": break
seq += line.rstrip("\n")
return(seq)
def call_methylated_sites(inputf, sample, reference_fasta,
unmethylated_control=None,
sig_cutoff=.01,num_procs = 1,
num_upstr_bases=0,num_downstr_bases=2,
generate_mpileup_file=True,
compress_output=True,
bgzip=False,
path_to_bgzip="",
path_to_tabix="",
buffer_line_number = 100000,
min_mapq=30,
min_cov=1,binom_test=True,
path_to_samtools="",
remove_chr_prefix=True,
sort_mem="500M",
add_snp_info=False,
path_to_files="",min_base_quality=1,
keep_temp_files=False):
"""
inputf is the path to a bam file that contains mapped bisulfite sequencing reads
sample is the name you'd like for the allc files. The files will be named like so:
allc_<sample>_<chrom>.tsv
reference is the path to a samtools indexed fasta file
control is the name of the chromosome/region that you want to use to estimate the non-conversion rate of your
sample, or the non-conversion rate you'd like to use. Consequently, control is either a string, or a decimal
If control is a string then it should be in the following format: "chrom:start-end".
If you'd like to specify an entire chromosome simply use "chrom:"
sig_cutoff is a float indicating the adjusted p-value cutoff you wish to use for determining whether or not
a site is methylated
num_procs is an integer indicating how many num_procs you'd like to run this function over
min_cov is an integer indicating the minimum number of reads for a site to be tested.
path_to_files is a string indicating the path for the output and the input bam, mpileup, or allc files
for methylation calling.
min_base_quality is an integer indicating the minimum PHRED quality score for a base to be included in the
mpileup file (and subsequently to be considered for methylation calling)
"""
if add_snp_info:
return call_methylated_sites_with_SNP_info(inputf, sample, reference_fasta,
unmethylated_control=unmethylated_control,
sig_cutoff=sig_cutoff,
num_procs=num_procs,
num_upstr_bases=num_upstr_bases,
num_downstr_bases=num_downstr_bases,
generate_mpileup_file=generate_mpileup_file,
compress_output=compress_output,
buffer_line_number=buffer_line_number,
min_mapq=min_mapq,
min_cov=min_cov,
binom_test=binom_test,
path_to_samtools=path_to_samtools,
remove_chr_prefix=remove_chr_prefix,
sort_mem=sort_mem,
path_to_files=path_to_files,
min_base_quality=min_base_quality,
keep_temp_files=keep_temp_files)
if binom_test and unmethylated_control is None:
print_error("Please specify unmethylated_control if you would like to do binomial test!\n")
#Figure out all the correct quality options based on the offset or CASAVA version given
# quality_version >= 1.8:
quality_base = 33
if len(path_to_files)!=0:
path_to_files+="/"
if len(path_to_samtools)!=0:
path_to_samtools+="/"
try:
num_procs = int(num_procs)
except:
sys.exit("num_procs must be an integer")
try:
#make sure bam file is indexed
open(inputf+".bai",'r')
except:
print_checkpoint("Input not indexed. Indexing...")
subprocess.check_call(shlex.split(path_to_samtools+"samtools index -@ {} {}".format(num_procs, inputf)))
## Check fasta index
try:
f = open(reference_fasta+".fai",'r')
except:
print("Reference fasta not indexed. Indexing.")
try:
subprocess.check_call(shlex.split(path_to_samtools+"samtools faidx "+reference_fasta))
f = open(reference_fasta+".fai",'r')
except:
sys.exit("Reference fasta wasn't indexed, and couldn't be indexed. "
+"Please try indexing it manually and running methylpy again.")
## Input
if not generate_mpileup_file:
cmd = path_to_samtools+"samtools mpileup -Q "+str(min_base_quality)+\
" -q "+str(min_mapq)+" -B -f "+reference_fasta+" "+inputf
pipes = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
fhandle = pipes.stdout
else:
with open(path_to_files+sample+"_mpileup_output.tsv",'w') as f:
subprocess.check_call(
shlex.split(
path_to_samtools+"samtools mpileup -Q "+str(min_base_quality)
+" -q "+str(min_mapq)
+" -B -f "+reference_fasta+" "+inputf),
stdout=f)
fhandle = open(path_to_files+sample+"_mpileup_output.tsv" ,'r')
## Output
if compress_output:
output_filehandler = gzip.open(path_to_files+"allc_"+sample+".tsv.gz",'wt')
output_file = path_to_files+"allc_"+sample+".tsv.gz"
else:
output_filehandler = open(path_to_files+"allc_"+sample+".tsv",'w')
output_file = path_to_files+"allc_"+sample+".tsv"
complement = {"A":"T","C":"G","G":"C","T":"A","N":"N"}
context_len = num_upstr_bases+1+num_downstr_bases
cur_chrom = ""
#cur_chrom_nochr = ""
line_counts = 0
out = ""
for line in fhandle:
fields = line.split("\t")
if fields[0] != cur_chrom:
cur_chrom = fields[0]
cur_chrom_nochr = cur_chrom
if remove_chr_prefix and cur_chrom.startswith("chr"):
cur_chrom_nochr = cur_chrom_nochr[3:]
seq = get_chromosome_sequence(reference_fasta,cur_chrom)
if seq != None:
seq = seq.upper()
if seq == None:
continue
if (not fields[2] == "C") and (not fields[2] == "G"):
continue
# indels
read_bases = fields[4]
incons_basecalls = read_bases.count("+") + read_bases.count("-")
if incons_basecalls > 0:
read_bases_no_indel = ""
index = 0
prev_index = 0
while index < len(read_bases):
if read_bases[index] == "+" or read_bases[index] == "-":
# get insert size
indel_size = ""
ind = index+1
while True:
try:
int(read_bases[ind])
indel_size += read_bases[ind]
ind += 1
except:
break
try:
# sometimes +/- does not follow by a number and
# it should be ignored
indel_size = int(indel_size)
except:
index += 1
continue
read_bases_no_indel += read_bases[prev_index:index]
index = ind + indel_size
prev_index = index
else:
index += 1
read_bases_no_indel += read_bases[prev_index:index]
fields[4] = read_bases_no_indel
# count converted and unconverted bases
if fields[2] == "C":
pos = int(fields[1])-1
try:
context = seq[(pos-num_upstr_bases):(pos+num_downstr_bases+1)]
except: # complete context is not available, skip
continue
unconverted_c = fields[4].count(".")
converted_c = fields[4].count("T")
cov = unconverted_c+converted_c
if cov > 0 and len(context) == context_len:
line_counts += 1
out += "\t".join([cur_chrom_nochr,str(pos+1),"+",context,
str(unconverted_c),str(cov),"1"])+"\n"
elif fields[2] == "G":
pos = int(fields[1])-1
try:
context = "".join([complement[base]
for base in reversed(
seq[(pos-num_downstr_bases):(pos+num_upstr_bases+1)]
)]
)
except: # complete context is not available, skip
continue
unconverted_c = fields[4].count(",")
converted_c = fields[4].count("a")
cov = unconverted_c+converted_c
if cov > 0 and len(context) == context_len:
line_counts += 1
out += "\t".join([cur_chrom_nochr,str(pos+1),"-",context,
str(unconverted_c),str(cov),"1"])+"\n"
if line_counts > buffer_line_number:
output_filehandler.write(out)
line_counts = 0
out = ""
if line_counts > 0:
output_filehandler.write(out)
line_counts = 0
out = ""
fhandle.close()
output_filehandler.close()
if generate_mpileup_file and not keep_temp_files:
subprocess.check_call(shlex.split("rm -f "+path_to_files+sample+"_mpileup_output.tsv"))
if binom_test:
print_checkpoint('Perform binomial test')
perform_binomial_test(allc_file=output_file,
sample=sample,
path_to_output=path_to_files,
unmethylated_control=unmethylated_control,
min_cov=min_cov,
sig_cutoff=sig_cutoff,
num_procs=num_procs,
sort_mem=sort_mem,
compress_output=compress_output,
buffer_line_number=buffer_line_number,
remove_chr_prefix=remove_chr_prefix)
elif not unmethylated_control is None:
non_conversion = calculate_non_conversion_rate(unmethylated_control,
output_file,
chrom_pointer=None,
remove_chr_prefix=remove_chr_prefix)
index_allc_file(output_file)
if bgzip:
bgzip_allc_file(output_file,path_to_bgzip,path_to_tabix,buffer_line_number)
return(0)
def analyze_read_basecalls(ref,read_bases):
# indels
incons_basecalls = read_bases.count("+") + read_bases.count("-")
if incons_basecalls > 0:
read_bases_no_indel = ""
index = 0
prev_index = 0
while index < len(read_bases):
if read_bases[index] == "+" or read_bases[index] == "-":
# get insert size
indel_size = ""
ind = index+1
while True:
try:
int(read_bases[ind])
indel_size += read_bases[ind]
ind += 1
except:
break
try:
indel_size = int(indel_size)
except:
indel_size = 0
read_bases_no_indel += read_bases[prev_index:index]
index = ind + indel_size
prev_index = index
else:
index += 1
read_bases_no_indel += read_bases[prev_index:index]
read_bases = read_bases_no_indel
# counting matches and mismatches
if ref == "C":
incons_basecalls += read_bases.count('a') + \
read_bases.count('g') + \
read_bases.count('t') + \
read_bases.count('G') + \
read_bases.count('A')
unconverted_c = read_bases.count(".")
converted_c = read_bases.count("T")
cons_basecalls = read_bases.count(',') + unconverted_c
return (str(cons_basecalls),str(incons_basecalls),unconverted_c,converted_c)
elif ref == "G":
incons_basecalls += read_bases.count('A') + \
read_bases.count('C') + \
read_bases.count('T') + \
read_bases.count('c') + \
read_bases.count('t')
unconverted_c = read_bases.count(",")
converted_c = read_bases.count("a")
cons_basecalls = read_bases.count('.') + unconverted_c
return (str(cons_basecalls),str(incons_basecalls),unconverted_c,converted_c)
elif ref == "T":
incons_basecalls += read_bases.count('a') + \
read_bases.count('c') + \
read_bases.count('g') + \
read_bases.count('A') + \
read_bases.count('C') + \
read_bases.count('G')
cons_basecalls = read_bases.count(',')
return (str(cons_basecalls),str(incons_basecalls))
elif ref == "A":
incons_basecalls += read_bases.count('T') + \
read_bases.count('C') + \
read_bases.count('G') + \
read_bases.count('t')+ \
read_bases.count('c')+ \
read_bases.count('g')
cons_basecalls = read_bases.count('.')
return (str(cons_basecalls),str(incons_basecalls))
else:
return ('0',str(incons_basecalls))
def call_methylated_sites_with_SNP_info(inputf, sample, reference_fasta,
unmethylated_control=None,
sig_cutoff=.01,num_procs = 1,
num_upstr_bases=0,num_downstr_bases=2,
generate_mpileup_file=True,
compress_output=True,
bgzip=False,
path_to_bgzip="",
path_to_tabix="",
buffer_line_number=100000,
min_mapq=30,
min_cov=1,binom_test=True,
path_to_samtools="",
remove_chr_prefix=True,
sort_mem="500M",
add_snp_info=False,
path_to_files="",min_base_quality=1,
keep_temp_files=False):
"""
inputf is the path to a bam file that contains mapped bisulfite sequencing reads
sample is the name you'd like for the allc files. The files will be named like so:
allc_<sample>_<chrom>.tsv
reference is the path to a samtools indexed fasta file
control is the name of the chromosome/region that you want to use to estimate the non-conversion rate of your
sample, or the non-conversion rate you'd like to use. Consequently, control is either a string, or a decimal
If control is a string then it should be in the following format: "chrom:start-end".
If you'd like to specify an entire chromosome simply use "chrom:"
sig_cutoff is a float indicating the adjusted p-value cutoff you wish to use for determining whether or not
a site is methylated
num_procs is an integer indicating how many num_procs you'd like to run this function over
min_cov is an integer indicating the minimum number of reads for a site to be tested.
path_to_files is a string indicating the path for the output and the input bam, mpileup, or allc files
for methylation calling.
min_base_quality is an integer indicating the minimum PHRED quality score for a base to be included in the
mpileup file (and subsequently to be considered for methylation calling)
"""
if binom_test and unmethylated_control is None:
print_error("Please specify unmethylated_control if you would like to do binomial test!\n")
#Figure out all the correct quality options based on the offset or CASAVA version given
# quality_version >= 1.8:
quality_base = 33
if len(path_to_files)!=0:
path_to_files+="/"
if len(path_to_samtools)!=0:
path_to_samtools+="/"
try:
num_procs = int(num_procs)
except:
sys.exit("num_procs must be an integer")
try:
#make sure bam file is indexed
open(inputf+".bai",'r')
except:
print_checkpoint("Input not indexed. Indexing...")
subprocess.check_call(shlex.split(path_to_samtools+"samtools index -@ {} {}".format(num_procs, inputf)))
## Check fasta index
try:
f = open(reference_fasta+".fai",'r')
except:
print("Reference fasta not indexed. Indexing.")
try:
subprocess.check_call(shlex.split(path_to_samtools+"samtools faidx "+reference_fasta))
f = open(reference_fasta+".fai",'r')
except:
sys.exit("Reference fasta wasn't indexed, and couldn't be indexed. "
+"Please try indexing it manually and running methylpy again.")
## Input
if not generate_mpileup_file:
cmd = path_to_samtools+"samtools mpileup -Q "+str(min_base_quality)+\
" -q "+str(min_mapq)+" -B -f "+reference_fasta+" "+inputf
pipes = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
fhandle = pipes.stdout
else:
with open(path_to_files+sample+"_mpileup_output.tsv",'w') as f:
subprocess.check_call(
shlex.split(
path_to_samtools+"samtools mpileup -Q "+str(min_base_quality)
+" -q "+str(min_mapq)
+" -B -f "+reference_fasta+" "+inputf),
stdout=f)
fhandle = open(path_to_files+sample+"_mpileup_output.tsv" ,'r')
## Output
if compress_output:
output_filehandler = gzip.open(path_to_files+"allc_"+sample+".tsv.gz",'wt')
output_file = path_to_files+"allc_"+sample+".tsv.gz"
else:
output_filehandler = open(path_to_files+"allc_"+sample+".tsv",'w')
output_file = path_to_files+"allc_"+sample+".tsv"
complement = {"A":"T","C":"G","G":"C","T":"A","N":"N"}
context_len = num_upstr_bases+1+num_downstr_bases
cur_chrom = ""
#cur_chrom_nochr = ""
line_counts = 0
out = ""
SNP_info = {}
SNP_info_end = 0
to_chrom_end = False
#for line in fhandle:
line = True
while line:
line = fhandle.readline()
fields = line.split("\t")
# get reference genome information
if fields[0] != cur_chrom:
cur_chrom = fields[0]
cur_chrom_nochr = cur_chrom
if remove_chr_prefix and cur_chrom.startswith("chr"):
cur_chrom_nochr = cur_chrom_nochr[3:]
seq = get_chromosome_sequence(reference_fasta,cur_chrom)
if seq != None:
seq = seq.upper()
to_chrom_end = False
SNP_info_end = 0
if seq == None:
continue
# get SNP information
pos = int(fields[1])-1
if pos > SNP_info_end-context_len-1 and not to_chrom_end:
anchor = fhandle.tell()
SNP_info_end = pos + 100000
new_SNP_info = {}
for tmp_pos in range(pos-context_len,pos+1):
new_SNP_info[tmp_pos] = SNP_info.get(tmp_pos,('0','0'))
SNP_info = new_SNP_info
# make sure the current position is corrected analyzed
# Otherwise, error happens if there is a region that has zero coverage
SNP_info[pos] = analyze_read_basecalls(fields[2],fields[4])
for line in fhandle:
tmp_fields = line.split("\t")
tmp_pos = int(tmp_fields[1])-1
if tmp_pos > SNP_info_end:
break
if tmp_fields[0] != cur_chrom:
to_chrom_end = True
break
SNP_info[tmp_pos] = analyze_read_basecalls(tmp_fields[2],tmp_fields[4])
fhandle.seek(anchor)
# generate allc line
if fields[2] == "C":
strand = "+"
try:
context = seq[(pos-num_upstr_bases):(pos+num_downstr_bases+1)]
except: # complete context is not available, skip
continue
incons_bases = ",".join(
[SNP_info.get(tmp_pos,('0','0'))[0]
for tmp_pos in range(pos-num_upstr_bases,pos+num_downstr_bases+1)]
)
incons_bases_cov = ",".join(
[SNP_info.get(tmp_pos,('0','0'))[1]
for tmp_pos in range(pos-num_upstr_bases,pos+num_downstr_bases+1)]
)
incons_base, incons_base_cov, unconverted_c, converted_c = SNP_info[pos]
elif fields[2] == "G":
strand = "-"
try:
context = "".join([complement[base]
for base in reversed(
seq[(pos-num_downstr_bases):(pos+num_upstr_bases+1)]
)]
)
except: # complete context is not available, skip
continue
incons_bases = ",".join([SNP_info.get(tmp_pos,('0','0'))[0]
for tmp_pos in reversed(
range(pos-num_downstr_bases,pos+num_upstr_bases+1)
)]
)
incons_bases_cov = ",".join([SNP_info.get(tmp_pos,('0','0'))[1]
for tmp_pos in reversed(
range(pos-num_downstr_bases,pos+num_upstr_bases+1)
)]
)
incons_base, incons_base_cov, unconverted_c, converted_c = SNP_info[pos]
else:
continue
cov = unconverted_c+converted_c
if cov > 0 and len(context) == context_len:
line_counts += 1
out += "\t".join([cur_chrom_nochr,str(pos+1),strand,context,
str(unconverted_c),str(cov),"1",
incons_bases,incons_bases_cov])+"\n"
if line_counts > buffer_line_number:
output_filehandler.write(out)
line_counts = 0
out = ""
if line_counts > 0:
output_filehandler.write(out)
line_counts = 0
out = ""
fhandle.close()
output_filehandler.close()
if generate_mpileup_file and not keep_temp_files:
subprocess.check_call(shlex.split("rm -f "+path_to_files+sample+"_mpileup_output.tsv"))
if binom_test:
print_checkpoint('Perform binomial test')
perform_binomial_test(allc_file=output_file,
sample=sample,
path_to_output=path_to_files,
unmethylated_control=unmethylated_control,
min_cov=min_cov,
sig_cutoff=sig_cutoff,
num_procs=num_procs,
sort_mem=sort_mem,
compress_output=compress_output,
buffer_line_number=buffer_line_number,
remove_chr_prefix=remove_chr_prefix)
elif not unmethylated_control is None:
non_conversion = calculate_non_conversion_rate(unmethylated_control,
output_file,
chrom_pointer=None,
remove_chr_prefix=remove_chr_prefix)
index_allc_file(output_file)
if bgzip:
bgzip_allc_file(output_file,path_to_bgzip,path_to_tabix,buffer_line_number)
return 0
def do_split_allc_file(allc_file,
sample,
path_to_output = "",
compress_output=True,
buffer_line_number=100000):
"""
"""
if len(path_to_output)!=0:
path_to_output+="/"
fhandle = open_allc_file(allc_file)
output_files = []
cur_chrom = ""
out = ""
line_counts = 0
for line in fhandle:
fields = line.split("\t")
if fields[0] != cur_chrom:
# output
if line_counts > 0:
output_handle.write(out)
line_counts = 0
out = ""
output_handle.close()
cur_chrom = fields[0]
if compress_output:
output_handle = gzip.open(path_to_output+"allc_"+sample+"_"+cur_chrom+".tsv.gz",'wt')
output_files.append(path_to_output+"allc_"+sample+"_"+cur_chrom+".tsv")
else:
output_handle = open(path_to_output+"allc_"+sample+"_"+cur_chrom+".tsv",'w')
output_files.append(path_to_output+"allc_"+sample+"_"+cur_chrom+".tsv")
# data
line_counts += 1
out += line
if line_counts >= buffer_line_number:
output_handle.write(out)
line_counts = 0
out = ""
if line_counts > 0:
output_handle.write(out)
line_counts = 0
out = ""
output_handle.close()
return(output_files)
def do_split_allc_file_chunk(allc_file,
sample,
num_chunks,
path_to_output = "",
compress_output=True):
"""
"""
if len(path_to_output)!=0:
path_to_output+="/"
f = open_allc_file(allc_file)
chrom_line_counts = {}
total_line_count = 0
for line in f:
fields = line.split("\t")
chrom_line_counts[fields[0]] = chrom_line_counts.get(fields[0],0)+1
total_line_count += 1
if total_line_count == 0:
return 0
chunk_size = math.ceil(float(total_line_count)/float(num_chunks))
index, cur_line_count = 0, 0
chrom2index = {}
for chrom in sorted(chrom_line_counts.keys(),key=lambda x:chrom_line_counts[x]):
if cur_line_count >= chunk_size:
cur_line_count = 0
index += 1
cur_line_count += chrom_line_counts[chrom]
chrom2index[chrom] = index
output_files = []
output_handles = {}
for index in range(index+1):
if compress_output:
output_handle = gzip.open(path_to_output+"allc_"+sample+"_"+str(index)+".tsv.gz",'wt')
output_files.append(path_to_output+"allc_"+sample+"_"+str(index)+".tsv")
else:
output_handle = open(path_to_output+"allc_"+sample+"_"+str(index)+".tsv",'w')
output_files.append(path_to_output+"allc_"+sample+"_"+str(index)+".tsv")
output_handles[index] = output_handle
f.seek(0)
for line in f:
fields = line.split("\t")
output_handles[chrom2index[fields[0]]].write(line)
for index in output_handles:
output_handles[index].close()
return(output_files)
def perform_binomial_test(allc_file,
sample,
path_to_output,
unmethylated_control,
min_cov=2,
sig_cutoff=0.01,
num_procs=1,
sort_mem="500M",
compress_output=True,
buffer_line_number=100000,
remove_chr_prefix=True):
"""
"""
if len(path_to_output)!=0:
path_to_output+="/"
# calculate non-conversion rate
chrom_pointer = read_allc_index(allc_file)
non_conversion = calculate_non_conversion_rate(unmethylated_control,
allc_file,
chrom_pointer)
# binomial test
if num_procs > 1:
if len(chrom_pointer.keys()) > 100: # with too many files open can cause problem
input_files = do_split_allc_file_chunk(allc_file,
sample,
min(100,num_procs),
path_to_output,
compress_output=False)
else:
# split allc file by chromosome
input_files = do_split_allc_file(allc_file,
sample,
path_to_output,
compress_output=False,
buffer_line_number=buffer_line_number)
output_files = [input_file+"_binom_results.tsv" for input_file in input_files]
pool=multiprocessing.Pool(num_procs)
results = []
for input_file,output_file in zip(input_files,output_files):
results.append(pool.apply_async(allc_run_binom_tests,
(input_file,output_file,non_conversion),
{"min_cov":min_cov,"sort_mem":sort_mem}))
mc_class_counts = {}
for result in results:
result_mc_class_counts = result.get()
for mc_class in result_mc_class_counts:
mc_class_counts[mc_class] = mc_class_counts.get(mc_class,0) + result_mc_class_counts[mc_class]
pool.close()
pool.join()
subprocess.check_call(shlex.split("rm "+" ".join(input_files)))
else:
output_files = [path_to_output+"allc_"+sample+".tsv_binom_results.tsv"]
output_file = output_files[0]
mc_class_counts = allc_run_binom_tests(filen=allc_file,
output_file=output_file,
non_conversion=non_conversion,
min_cov=min_cov,
sort_mem=sort_mem)
# FDR correction
p_value_cutoff = benjamini_hochberg_correction_call_methylated_sites(
files=output_files,
mc_class_counts=mc_class_counts,
sig_cutoff=sig_cutoff)
output_file = path_to_output+"allc_"+sample+".tsv"
if compress_output:
output_file += ".gz"
filter_files_by_pvalue_combined(input_files=output_files,
output_file=output_file,
best_pvalues=p_value_cutoff,
num_procs=num_procs,
sort_mem=sort_mem,
compress_output=compress_output)
# remove _binom_results.tsv files
subprocess.check_call(shlex.split("rm "+" ".join(output_files)))
def calculate_non_conversion_rate(unmethylated_control,
allc_file,
chrom_pointer=None,
remove_chr_prefix=True):
"""
"""
# Parse unmethylated_control
try:
non_conversion = float(unmethylated_control)
if non_conversion < 0 or non_conversion > 1:
print_error("Invalid unmethylated_control! "
+"It should be either a string, or a decimal between 0 and 1!\n")
else:
print_checkpoint("The non-conversion rate is "+str(non_conversion*100)+"%")
return(non_conversion)
except:
if isinstance(unmethylated_control,str):
fields = [field for field in
re.split("[\:\-]",unmethylated_control)
if len(field) > 0]
um_chrom,um_start,um_end = None,None,None
if len(fields) == 0:
print_error("Invalid unmethylated_control! "
+"It should be either a string, or a decimal between 0 and 1!\n")
# decode
fields[0] = fields[0]
if remove_chr_prefix and fields[0].startswith("chr"):
fields[0] = fields[0][3:]
if len(fields) == 1: # chrom only
um_chrom = fields[0]
elif len(fields) == 2: # chrom and start
um_chrom,um_start = fields
else:
um_chrom,um_start,um_end = fields[:3]
# further parsing
try:
if not (um_start is None):
um_start = int(um_start)
if not (um_end is None):
um_end = int(um_end)
except:
print_error("Invalid unmethylated_control! "
+"It should be either a string, or a decimal between 0 and 1!\n")
# scan allc file to set up a table for fast look-up of lines belong
# to different chromosomes
chrom_pointer = read_allc_index(allc_file)
f = open_allc_file(allc_file)
if um_chrom not in chrom_pointer:
print_error("The chromosome specified in unmethylated_control is not in the output allc file!\n")
f.seek(chrom_pointer[um_chrom])
# calculate non-conversion rate
mc, h = 0, 0
for line in f:
line = line.rstrip("\n")
fields = line.split("\t")
if fields[0] != um_chrom:
break
if not (um_end is None) and int(fields[1]) > um_end:
break
if not (um_start is None) and int(fields[1]) < um_start:
continue
mc += int(fields[4])
h += int(fields[5])
non_conversion = None
if h > 0:
non_conversion = float(mc) / float(h)
print_checkpoint("The non-conversion rate is "+str(non_conversion*100)+"%")
return(non_conversion)
else:
print_error("The chromosome and range specified in unmethylated_control "
+"is not in the output allc file!\n")
def benjamini_hochberg_correction_call_methylated_sites(files,mc_class_counts,sig_cutoff):
"""
This function is similar to the one defined here:
http://stats.stackexchange.com/questions/870/multiple-hypothesis-testing-correction-with-benjamini-hochberg-p-values-or-q-va
But takes advantage of the fact that the elements provided to it are in a sorted file.
This way, it doesn't have to load much into memory.
This link:
http://brainder.org/2011/09/05/fdr-corrected-fdr-adjusted-p-values/
was also helpful as the monotonicity correction from stats.stackexchange is not correct.
file is a string indicating the path to an allc file (generated by run_binom_tests).
mc_class_counts is a dictionary indicating the total number of statistical tests performed for each mc context
sig_cutoff is the FDR cutoff you'd like to use to indicate if a site is significant.
"""
#A dict of file_names to file handles for the benjamini hochberg correction step
input_files = {}
input_lines = {}
input_fields ={}
input_pvalues={}
test_num={}
prev_bh_value = {}
best_fdr = {}
best_pvalue = {}
output_files = {}
for filen in files:
input_files[filen]=open(filen,'r')
input_lines[filen] = input_files[filen].readline().rstrip()
input_fields[filen] = input_lines[filen].split("\t")
try:
input_pvalues[filen] = float(input_fields[filen][6])
except:
#Dummy value that will never be the minimum
input_pvalues[filen] = 2.0
min_pvalue = min(input_pvalues,key=input_pvalues.get)
#pdb.set_trace()
while [i for i in input_pvalues if input_pvalues[i]!=2.0]:
fields = input_fields[min_pvalue]
bh_value = float(fields[6]) * mc_class_counts[fields[3]] / (test_num.get(fields[3],1) + 1)
# Sometimes this correction can give values greater than 1,
# so we set those values at 1
bh_value = min(bh_value, 1.0)
prev_bh_value[fields[3]] = bh_value
#if bh_value <= sig_cutoff and bh_value >= best_fdr:
if bh_value <= sig_cutoff:
best_fdr[fields[3]] = bh_value
best_pvalue[fields[3]] = float(fields[6])
test_num[fields[3]] = test_num.get(fields[3],1) + 1
input_lines[min_pvalue]=input_files[min_pvalue].readline().rstrip()
input_fields[min_pvalue]=input_lines[min_pvalue].split("\t")
try:
input_pvalues[min_pvalue]=float(input_fields[min_pvalue][6])
except:
#Dummy value that will never be the minimum
input_pvalues[min_pvalue]=2.0
min_pvalue = min(input_pvalues,key=input_pvalues.get)
for mc_class in mc_class_counts:
best_pvalue[mc_class] = best_pvalue.get(mc_class,0)
print("The closest p-value cutoff for "
+mc_class+" at your desired FDR is "+
str(best_pvalue[mc_class])+
" which corresponds to an FDR of "+
str(best_fdr.get(mc_class,1)))
for filen in files:
input_files[filen].close()
return best_pvalue
def allc_run_binom_tests(filen,output_file,non_conversion,min_cov=1,sort_mem="500M"):
"""
This function is used to recall methylated sites. This is faster than
going through the original mpileup files.
file is a string containing the path to an mpileup file
non_conversion is a float indicating the estimated non-conversion rate and sequencing
error
min_cov is the minimum number of reads a site must have to be tested
sort_mem is the parameter to pass to unix sort with -S/--buffer-size command
"""
if sort_mem:
sort_option = " -S " + sort_mem
else:
sort_option = ""
mc_class_counts = {}
obs_pvalues = {}
reverse_complement = {"A":"T","C":"G","G":"C","T":"A","N":"N"}
f = open_allc_file(filen)
g = open(output_file,'w')
for line in f:
line = line.rstrip()
fields = line.split("\t")
mc_class = fields[3]
unconverted_c = int(fields[4])
converted_c = int(fields[5]) - unconverted_c
total = int(fields[5])
if total >= min_cov and unconverted_c != 0:
try:
p_value = obs_pvalues[(unconverted_c,total)]
except:
p_value = sci.binom.sf(unconverted_c-1,total,non_conversion)
obs_pvalues[(unconverted_c,total)] = p_value
g.write("\t".join(fields[:6])+"\t"+str(p_value)+"\n")
mc_class_counts[mc_class] = mc_class_counts.get(mc_class,0) + 1
elif total != 0:
#a dummy value that will always sort to the bottom of the BH correction and be interpreted as
#a unmethylated site
p_value = 2.0
g.write("\t".join(fields[:6])+"\t"+str(p_value)+"\n")
f.close()
g.close()
subprocess.check_call(shlex.split("sort" + sort_option + " -k 7g,7g -o "+output_file+" "+output_file))
return mc_class_counts
def filter_files_by_pvalue_combined(input_files,output_file,
best_pvalues,num_procs,
compress_output=True,
sort_mem="500M"):
"""
sort_mem is the parameter to pass to unix sort with -S/--buffer-size command
"""
if sort_mem:
sort_option = " -S " + sort_mem
else:
sort_option = ""
print_checkpoint("Begin sorting file by position")
# sort input files
if num_procs > 1:
pool=multiprocessing.Pool(num_procs)
for input_file in input_files:
pool.apply_async(subprocess.check_call,
(shlex.split(
"sort" + sort_option + " -k 1,1 -k 2,2g -o "+input_file+" "+input_file),
))
pool.close()
pool.join()
else:
for input_file in input_files:
subprocess.check_call(
shlex.split("sort" + sort_option + " -k 1,1 -k 2,2g -o "+input_file+" "+input_file))
# output file
if compress_output:
if output_file[-3:] != ".gz":
output_file += ".gz"
g = gzip.open(output_file,'wt')
else:
g = open(output_file,'w')
# write to output file
for input_file in input_files:
f = open(input_file,'r')
for line in f:
line = line.rstrip()
fields = line.split("\t")
if fields[6] != "2.0" and float(fields[6]) <= best_pvalues[fields[3]]:
g.write("\t".join(fields[:6])+"\t1\n")
else:
g.write("\t".join(fields[:6])+"\t0\n")
f.close()
g.close()
def bam_quality_mch_filter(inputf,
outputf,
reference_fasta,
min_mapq=30,
min_ch=3,
max_mch_level=0.7,
buffer_line_number=100000):
min_ch = int(min_ch)
max_mch_level = float(max_mch_level)
# quality filter
fhandle = pysam.AlignmentFile(inputf, "rb")
# open up output file
out_handle = pysam.AlignmentFile("{}.bam".format(outputf),
"wb",
template=fhandle)
ref_genome = pysam.FastaFile(reference_fasta)
out = []
for line in fhandle:
uch = 0
mch = 0
num_ch = 0
# reference
ref_seq = "".join([
ref_genome.fetch(line.reference_name, start, end)
for start, end in line.get_blocks()
])
# read sequence
seq = ""
pos = 0
for block_code, block_len in line.cigar:
if (block_code == 0) or (block_code == 2):
seq += line.seq[pos:(block_len + pos)]
pos += block_len
if not line.is_reverse:
# first base to the last -1 base
pos = ref_seq.find("C")
while (pos >= 0) & (pos < (len(ref_seq) - 1)):
if (ref_seq[pos + 1] != "G") & (ref_seq[pos + 1] != "N"):
if seq[pos] == "C":
mch += 1
elif seq[pos] == "T":
uch += 1
pos = ref_seq.find("C", pos + 1)
# last base
if pos == (len(ref_seq) - 1):
context_ref_pos = line.get_reference_positions()[-1] + 1
context_base = ref_genome.fetch(line.reference_name,
context_ref_pos,
context_ref_pos + 1)
context_base = context_base.upper()
if (context_base != "G") & (context_base != "N"):
if seq[pos] == "C":
mch += 1
elif seq[pos] == "T":
uch += 1
else:
# first base
if ref_seq[0] == "G":
context_ref_pos = line.get_reference_positions()[0] - 1
if context_ref_pos < 0: continue
context_base = ref_genome.fetch(line.reference_name,
context_ref_pos,
context_ref_pos + 1)
context_base = context_base.upper()
if (context_base != "C") & (context_base != "N"):
if seq[0] == "G":
mch += 1
elif seq[0] == "A":
uch += 1
# starting from the second base
pos = ref_seq.find("G", 1)
while (pos > 0):
if (ref_seq[pos - 1] != "C") & (ref_seq[pos - 1] != "N"):
if seq[pos] == "G":
mch += 1
elif seq[pos] == "A":
uch += 1
pos = ref_seq.find("G", pos + 1)
# apply filter
tot_ch = float(mch + uch)
if tot_ch > 0:
if (tot_ch >= min_ch) & (float(mch) / float(tot_ch) >=
max_mch_level):
continue
out.append(line)
if len(out) > buffer_line_number:
for line in out:
out_handle.write(line)
out = []
if len(out) > 0:
for line in out:
out_handle.write(line)
out = []
out_handle.close()
|
{"hexsha": "931d5c095069d59518016ff5526efeb7c5415a33", "size": 105768, "ext": "py", "lang": "Python", "max_stars_repo_path": "methylpy/call_mc_se.py", "max_stars_repo_name": "yupenghe/methylpy", "max_stars_repo_head_hexsha": "0dbf4ef30d6c4d1b98f4a53c3b08b721114c9aaa", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 96, "max_stars_repo_stars_event_min_datetime": "2017-05-04T18:35:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T14:30:00.000Z", "max_issues_repo_path": "methylpy/call_mc_se.py", "max_issues_repo_name": "yupenghe/methylpy", "max_issues_repo_head_hexsha": "0dbf4ef30d6c4d1b98f4a53c3b08b721114c9aaa", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 73, "max_issues_repo_issues_event_min_datetime": "2017-01-03T06:39:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T05:13:16.000Z", "max_forks_repo_path": "methylpy/call_mc_se.py", "max_forks_repo_name": "yupenghe/methylpy", "max_forks_repo_head_hexsha": "0dbf4ef30d6c4d1b98f4a53c3b08b721114c9aaa", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 47, "max_forks_repo_forks_event_min_datetime": "2016-07-27T17:48:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-03T01:18:17.000Z", "avg_line_length": 42.4600562023, "max_line_length": 140, "alphanum_fraction": 0.5562930187, "include": true, "reason": "import scipy,from scipy", "num_tokens": 22513}
|
import data.nat.prime
import data.list
-- import data.bool
-- set_option trace.simplify true
set_option trace.simplify.rewrite true
set_option trace.simplify.failure false
set_option trace.simplify.rewrite_failure false
namespace first
constants a b : ℤ
constant f : ℤ → ℤ
constant g : ℤ → ℤ → ℤ
#check λ x : ℤ, g (f (g a x)) (g x _)
-- λ (x : ℤ), g (f (g a x)) (g x ?M_1) : ℤ → ℤ
#check λx, g (f (g a x)) (g x b)
-- λ x <=> λx
end first
namespace trools
-- constant trool : Type
-- constants trool.true trool.false trool.maybe : trool
inductive trool
| true : trool
| false : trool
| maybe : trool
#check trool.true -- trool.true : trool
def trool_to_bool (x : trool) : bool :=
match x with
| trool.true := tt
| trool.false := ff
| _ := ff
end
def trool_to_bool₂: trool → bool
| trool.true := tt
| trool.false := ff
| _ := ff
-- def prime (p : ℕ) := 2 ≤ p ∧ ∀ m ∣ p, m = 1 ∨ m = p
def or (a b : trool) := (a = trool.true) ∨ (b = trool.true)
-- or a a = (a = trool.true) ∨ (a = trool.true) = X ∨ X = X = (a = trool.true)
-- (a = trool.true) =>
-- case 1: trool_to_bool trool.true ↔ (trool.true = trool.true)
-- case 2: trool_to_bool trool.false ↔ (trool.false = trool.true)
-- case 3: trool_to_bool trool.maybe ↔ (trool.maybe = trool.true)
--
lemma or_self (a : trool) : or a a = trool_to_bool a :=
begin
simp [or],
cases a; dsimp [trool_to_bool],
{
-- rw [eq.refl trool.true], -- do nothing :D
rw eq_self_iff_true,
rw bool.coe_sort_tt, -- I'm found it from idea of coe_sort_ff + (if exist ff then exist tt)
},
{
-- simp [trool_to_bool],
-- [bool.coe_sort_ff]: ↥ff ==> false
-- [iff_self]: false ↔ false ==> true
rw bool.coe_sort_ff,
-- apply iff.symm,
-- change false ↔ false, -- fails
simp,
-- all below not useful and generated by suggest
-- refine iff.symm _
-- refine iff_of_eq _
-- refine eq.to_iff _
-- refine iff_false_intro _
},
{
simp,
},
end
end trools
namespace axiom_examples
constants a b : ℤ
-- lemma a_less_b : a < b := sorry
-- <=>
axiom a_less_b : a < b
axiom symm_lt (x y : ℤ): x < y → y > x
axiom not_eq_imp_lt (x y : ℤ): x ≠ y → x < y ∨ x > y
example : b > a :=
begin
have s := symm_lt _ _ a_less_b,
exact s,
end
lemma p_q_not (P Q: Prop) : P = Q → P ∧ ¬ Q → false :=
begin
intro pq,
rw ← pq,
intro h,
simp at h,
exact h,
end
example (x: nat): x = x :=
begin
have h1: 1 = 1,{refl,},
have h2: 2 = 2 := rfl,
-- have h3 := h1 ∧ h2, -- wrong
have h3 := and.intro h1 h2, -- right
refl,
end
constants P Q : Type
-- lemma p_q_not (p : P)(q : Q) : p = q → p ∧ ¬ q → false := sorry
lemma example_forward_proof (p : Prop) : p = p :=
begin
-- simp, -- [simplify.rewrite] [eq_self_iff_true]: p = p ==> true
-- refl,
-- apply eq_self_iff_true p,
have h1 := eq_self_iff_true p, -- we don't need this, only for example
have h2 := h1.to_eq, -- also works: propext h1
have h3 := h1.mpr,
have h4 := h3 true.intro,
exact h4,
end
#print example_forward_proof
-- λ (p : Prop), (eq_self_iff_true p).mpr true.intro
-- this can be proved by reflexivity
lemma example_forward_proof₂ (p : Prop) : p = p := eq.refl _
#print example_forward_proof₂ -- λ (p : Prop), eq.refl p
lemma example_forward_proof₃ (p q : Prop) : p = q → (p ∧ q) = p :=
begin
assume h1 : p = q,
have h2 : p ∧ p ↔ p := and_self p,
have h3 : (p ∧ p) = p := h2.to_eq, -- iff.to_eq
have h4 : (p ∧ p) = p → (p ∧ q) = p :=
assume (h: (p ∧ p) = p), eq.subst h1 h,
show (p ∧ q) = p, from h4 h3, -- ≈ exact h4 h3, but more readable
end
example (expr : Prop): (expr ∨ expr) → true :=
begin
-- simp,
-- 0. [simplify.rewrite] [or_self]: expr ∨ expr ==> expr
-- 0. [simplify.rewrite] [forall_true_iff]: expr → true ==> true
rw or_self,
rw forall_true_iff,
exact true.intro,
end
-- id <=> ∀ A : Type, A → A
example : ∀ A : Type, A → A := λ _, id
example : ∀ A : Type, A → A := λ A, @id A
example : ∀ A : Type, A → A := assume A, @id A
example (A : Type) : A → A := id -- explicit (argument)
example {A : Type} : A → A := id -- implicit {argument}
example ⦃A : Type⦄ : A → A := id -- i don't know, but possible
example (x y : ℤ): ¬ (x = y) ∧ ¬ (x > y) → (y > x) :=
begin
assume f : ¬x = y ∧ ¬x > y,
apply symm_lt,
-- exact a_less_b, -- not works
have f₂ : _ ∧ _ := f, -- example pattern matching
clear f₂,
have h₂ : ¬ (x = y) → x ≠ y, by {
tactic.trace_state,
-- simp,
-- [ne.def]: x ≠ y ==> ¬x = y
-- [classical.not_not]: ¬¬x = y ==> x = y
-- [imp_self]: ¬x = y → ¬x = y ==> true
show_term { rw [ne.def], },
exact id, -- id <=> ∀ A : Type, A → A
tactic.trace_result,
},
have f1 := f.1,
have f2 := f.2,
clear f,
have not_eq_xy : x ≠ y := h₂ f1,
have h₃ : x < y ∨ x > y := not_eq_imp_lt _ _ not_eq_xy,
cases h₃ with first second,
{
-- first : x < y
-- i think this one command `exact first` must be solution
exact first,
},
{
-- contradiction,
exfalso,
-- have t₁ := p_q_not second f2,
-- have h₅ := p_q_not _ _,
-- have u := and.intro second f2,
-- simp [second] at u,
-- exact u,
exact absurd second f2,
},
end
end axiom_examples
namespace backward_proofs
/-
Forward proof:
From a and a → b, we have b. (Goal unchanged)
From b and b → c, we have c, as desired.
* A forward proof only manipulates theorems, not goals.
Backward proof:
To prove c, by b → c it suffices to prove b. (Goal changed from ⊢ c to ⊢ b)
To prove b, by a → b it suffices to prove a. (Goal changed from ⊢ b to ⊢ a)
To prove a, we use a.
* A Backward proof start from the goal and work backwards towards the already proved lemmas.
-/
-- Example of forward proof:
lemma fst_of_two_props :
∀ a b : Prop, a → b → (a → b) :=
begin -- ⊢ ∀ (a b : Prop), a → b → a
introv,
intros ha hb,
have h := imp_intro hb, -- it's example very trivial
exact h,
end
-- Example of backward proof:
lemma fst_of_two_props' :
∀ a b : Prop, a → b → a :=
begin -- ⊢ ∀ (a b : Prop), a → b → a
introv, -- ⊢ a → b → a (Goal changed)
intros ha hb, -- ⊢ a (Goal changed)
apply ha, -- goals accomplished (Goal changed)
end
lemma and_swap :
∀ a b : Prop, a ∧ b → b ∧ a :=
begin
intros a b hab,
apply and.intro,
-- { exact hab.2 },
-- { exact hab.1 },
{ exact and.elim_right hab },
{ exact and.elim_left hab },
end
def double (n : ℕ) := n + n
lemma nat_exists_double_iden :
∃ n : ℕ, double n = n :=
begin
apply exists.intro 0, -- ↔ use 0
refl,
end
lemma double_prop : ∀ n : ℕ, n ≠ 1 → double n ≠ n + 1 :=
begin
type_check double.equations._eqn_1, -- ∀ (n : ℕ), double n = n + n
intros,
apply not.intro,
induction n with x hx,
simp [double],
have a2 : x ≠ 0, from sorry,
dsimp only [double],
simp,
-- simp at hx ⊢, -- example how to simplify goal and hypothesis simultaniously
simp only [a],
-- show_term { trivial, }, -- λ (a_1 : false), false.rec false a_1
exact id, -- shortest proof
end
#print double_prop
end backward_proofs
namespace lemma_statements
set_option trace.simplify.rewrite_failure true
lemma my_add_comm (m n : ℕ) :
nat.add m n = nat.add n m :=
begin
simp,
simp only [backward_proofs.double_prop, nat.add_comm], --
-- output:
-- perm rejected: n + m !< m + n
-- [simplify.rewrite] [nat.add_comm]: n + m ==> m + n
-- simp works only with second part of equation n + m = m + n:
-- n + m = (m + n) ==> n + m = (n + m) ==> true
-- 1. because theorem nat.add_comm is ∀ (m n : ℕ), m + n = n + m [it's wrong]
-- 2.
-- maybe simp use lecsicographical order?
end
#print nat.add_comm
lemma and_swap (a b : Prop) :
a ∧ b → b ∧ a :=
begin
intro hab,
apply and.intro,
-- {
-- apply and.elim_right,
-- exact hab,
-- }
-- <=>
-- apply and.elim_right hab, <=>
exact hab.2,
exact hab.1,
end
/-
3.1 Structured Proofs
-/
lemma fst_of_two_props'' :
∀ a b : Prop, a → b → a :=
assume a b : Prop,
assume (ha : a)(hb : b),
show a, from ha
lemma snd_of_two_props'' :
∀ a b : Prop, a → b → b :=
assume _ b : Prop,
assume (_)(hb : b),
show _, from hb -- placeholder _ is important thing
lemma prop_comp (a b c : Prop) (hab : a → b) (hbc : b → c) :
a → c :=
assume ha : a,
have hb : b := hab ha,
have hc : c := hbc hb,
show c, from hc
#print fst_of_two_props''
-- λ (a b : Prop) (ha : a) (hb : b), show a, from ha
#print snd_of_two_props''
-- λ (_x b : Prop) (_x : _x) (hb : b), show b, from hb
#print prop_comp
-- λ (a b c : Prop) (hab : a → b) (hbc : b → c) (ha : a),
-- have hb : b, from hab ha,
-- have hc : c, from hbc hb,
-- show c, from hc
/-
3.3 Forward Reasoning about Connectives and Quantifiers
-/
-- lemma forall.one_point {α : Type} (t : α) (p : α → Prop) :
-- (∀x, x = t → p x) ↔ p t :=
-- iff.intro
-- (assume hall : ∀x, x = t → p x,
-- show p t, from
-- begin
-- apply hall t,
-- refl
-- end)
-- (assume hp : p t,
-- fix x,
-- assume heq : x = t,
-- show p x, from
-- begin
-- rewrite heq,
-- exact hp
-- end)
example : 1 = 2 → 2 = 3 → 1 = 3 :=
λ (h₁ : 1 = 2) (h₂ : 2 = 3), @eq.subst nat (λ n, 1 = n) 2 3 h₂ h₁
example : 1 = 2 → 2 = 3 → 1 = 3 := by {
intros h1 h2,
-- apply @eq.subst nat (λ n, 1 = n) 2 3 h2 h1,
-- exact eq.subst h2 h1,
exact h2 ▸ h1,
}
lemma forall.one_point' {α : Type} (t : α) (p : α → Prop) :
(∀x, x = t → p x) ↔ p t :=
iff.intro
(assume hall: ∀ x, x = t → p x,
have h₁ : t = t → p t, from hall t,
show p t, from h₁ (rfl : t = t))
(λ h₁ x (h₂ : x = t),
show p x, from (h₂.symm : t = x) ▸ (h₁ : p t)) -- λ ↔ assume
--by {
-- refine @eq.subst α _ t x h₂.symm h₁,
-- exact h₂.symm ▸ h₁ }
lemma beast_666 (beast : ℕ) :
(∀n, n = 666 → beast ≥ n) ↔ beast ≥ 666 :=
forall.one_point' 666 (λ n, nat.le n beast) -- x ≥ y ↔ ge x y ↔ le y x
-- nat.ge don't exists because developers of lean lib want to minimize number of equivalent functions
-- λ (beast : ℕ), forall.one_point' 666 (λ (n : ℕ), beast ≥ n)
#print beast_666
/-
3.4 Calculational Proofs
-/
-- Calculational proof example:
lemma two_mul_example (m n : nat) :
2 * m + n = m + n + m :=
calc 2 * m + n
= (m + m) + n : by rewrite two_mul
... = m + n + m : by cc
-- Forward proof example:
lemma two_mul_example₂ (m n : nat) :
2 * m + n = m + n + m :=
have h₁ : 2 * m + n = (m + m) + n :=
by rewrite two_mul,
have h₂ : (m + m) + n = m + n + m :=
by show_term { cc },
show _, from
eq.trans h₁ h₂
end lemma_statements
namespace proofs_by_induction
constant add : ℕ → ℕ → ℕ
axiom add_zero (m : ℕ): add m 0 = m
axiom add_succ (m n : ℕ): add n (nat.succ n) = nat.succ (add m n)
-- lemma add_zero (n : ℕ)
end proofs_by_induction
namespace induction_by_pattern_matching
def reverse {α : Type} : list α → list α
| [] := []
| (x :: xs) := reverse xs ++ [x]
-- The induction step is:
-- ih : ∀xs, reverse (reverse xs) = xs ⊢ reverse (reverse xs ++ [x]) = x :: xs
/-
We need a way to "distribute" the outer reverse over ++ to obtain a term
matches the induction hypothesis's left-hand side. The trick is to prove
and use the following lemma:
-/
-- Step 1:
-- lemma reverse_append {α : Type} :
-- ∀xs ys : list α, reverse (xs ++ ys) = reverse ys ++ reverse xs
-- | [] ys := sorry
-- | (x :: xs) ys := sorry
-- Step 2:
-- failed to prove recursive application is decreasing, well founded relation
lemma reverse_append_wrong {α : Type} :
∀ xs ys : list α, reverse (xs ++ ys) = reverse ys ++ reverse xs
| [] ys := begin
-- rw reverse,
-- simp,
rw reverse_append_wrong,
end
| (x :: xs) ys := begin
simp [reverse, reverse_append_wrong xs],
end
-- OK
lemma reverse_append {α : Type} :
∀ xs ys : list α, reverse (xs ++ ys) = reverse ys ++ reverse xs
| [] ys := begin
rw reverse,
simp only [list.nil_append, list.append_nil],
-- rw reverse_append,
end
| (x :: xs) ys := begin
simp only [reverse, reverse_append xs, list.append_assoc, list.cons_append],
end
lemma reverse_append₂ {α : Type} (xs ys : list α) :
reverse (xs ++ ys) = reverse ys ++ reverse xs :=
begin
induction xs with xs list_h ih,
case nil {
simp [reverse],
},
case cons {
simp [reverse, ih],
},
end
set_option trace.simp_lemmas false
lemma reverse_reverse {α : Type} :
∀xs : list α, reverse (reverse xs) = xs
| [] := rfl
| (x :: xs) := begin
-- rw reverse_reverse, -- failed to prove recursive application is decreasing
rw reverse,
rw reverse_append,
rw reverse_reverse,
rw reverse.equations._eqn_2,
rw reverse.equations._eqn_1,
rw list.nil_append,
rw list.cons_append,
rw list.nil_append,
-- rw reverse,
-- simp [reverse],
-- Trace output of simplify.rewrite:
-- 1. [reverse.equations._eqn_2]: reverse [x] ==> reverse list.nil ++ [x]
-- 2. [reverse.equations._eqn_1]: reverse list.nil ==> list.nil
-- 3. [list.nil_append]: list.nil ++ [x] ==> [x]
-- 4. [list.cons_append]: [x] ++ xs ==> x :: (list.nil ++ xs)
-- 5. [list.nil_append]: list.nil ++ xs ==> xs
-- 6. [eq_self_iff_true]: x = x ==> true
-- 7. [eq_self_iff_true]: xs = xs ==> true
-- 8. [and_self]: true ∧ true ==> true
end
lemma reverse_reverse₂ {α : Type} :
∀xs : list α, reverse (reverse xs) = xs
| [] := rfl
| (x :: xs) := by simp [reverse, reverse_append, reverse_reverse₂ xs]
end induction_by_pattern_matching
namespace inductive_types
namespace hidden
inductive nat : Type
| zero : nat
| succ : nat → nat
end hidden
-- <=>
namespace hidden2
constant nat : Type
constant nat.zero : nat
constant nat.succ : nat → nat
end hidden2
-- + some properties about nat.zero and nat.succ, which is why we use the "inductive" commmand
set_option trace.debug.dsimplify true
-- set_option trace.simplify false
set_option trace.simplify.context false
set_option trace.simplify.congruence false
set_option trace.simplify.canonize false
set_option trace.simplify.rewrite true
lemma succ_neq_self (n : ℕ) :
nat.succ n ≠ n :=
begin
induction n with n ih,
{
-- [nat.nat_zero_eq_zero]: 0 ==> 0
-- [ne.def]: 1 ≠ 0 ==> ¬1 = 0
simp,
},
{
rewrite ne.def,
simp only [ih, ne.def, not_false_iff],
},
-- [ne.def]: n_n.succ.succ ≠ n_n.succ ==> ¬n_n.succ.succ = n_n.succ
-- 2. [simplify.rewrite] [n_ih]: n_n.succ = n_n ==> false
-- [simplify] eq: not
-- 1. [simplify.rewrite] [not_false_iff]: ¬false ==> true
end
end inductive_types
|
{"author": "mathprocessing", "repo": "lean_mathlib_examples", "sha": "743c6456c0a3219dd1722efdd31ee6f3a113818a", "save_path": "github-repos/lean/mathprocessing-lean_mathlib_examples", "path": "github-repos/lean/mathprocessing-lean_mathlib_examples/lean_mathlib_examples-743c6456c0a3219dd1722efdd31ee6f3a113818a/src/hitchhikers_guide.lean"}
|
// (C) Copyright 2005 Matthias Troyer and Dave Abrahams
// Copyright (c) 2015 Anton Bikineev
// Copyright (c) 2015 Andreas Schaefer
// Copyright (c) 2022 Hartmut Kaiser
//
// SPDX-License-Identifier: BSL-1.0
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#pragma once
#include <hpx/config.hpp>
#include <hpx/config/endian.hpp>
#include <hpx/assert.hpp>
#include <hpx/serialization/serialization_fwd.hpp>
#include <hpx/serialization/serialize.hpp>
#include <hpx/serialization/traits/is_bitwise_serializable.hpp>
#include <hpx/serialization/traits/is_not_bitwise_serializable.hpp>
#if defined(HPX_SERIALIZATION_HAVE_BOOST_TYPES)
#include <boost/array.hpp>
#endif
#include <array>
#include <cstddef>
#include <type_traits>
namespace hpx::serialization {
template <typename T>
class array
{
public:
using value_type = T;
constexpr array(value_type* t, std::size_t s) noexcept
: m_t(t)
, m_element_count(s)
{
}
constexpr value_type* address() const noexcept
{
return m_t;
}
constexpr std::size_t count() const noexcept
{
return m_element_count;
}
template <typename Archive>
void serialize(Archive& ar, unsigned int)
{
#if !defined(HPX_SERIALIZATION_HAVE_ALL_TYPES_ARE_BITWISE_SERIALIZABLE)
// NOLINTNEXTLINE(bugprone-branch-clone)
if (ar.disable_array_optimization() || ar.endianess_differs())
{
// normal serializtion
for (std::size_t i = 0; i != m_element_count; ++i)
{
// clang-format off
ar & m_t[i];
// clang-format on
}
return;
}
#else
HPX_ASSERT(
!(ar.disable_array_optimization() || ar.endianess_differs()));
#endif
using element_type = std::remove_const_t<T>;
constexpr bool use_optimized =
std::is_default_constructible_v<element_type> &&
(hpx::traits::is_bitwise_serializable_v<element_type> ||
!hpx::traits::is_not_bitwise_serializable_v<element_type>);
if constexpr (use_optimized)
{
// try using chunking
if constexpr (std::is_same_v<Archive, input_archive>)
{
ar.load_binary_chunk(m_t, m_element_count * sizeof(T));
}
else
{
ar.save_binary_chunk(m_t, m_element_count * sizeof(T));
}
}
else
{
// normal serialization
for (std::size_t i = 0; i != m_element_count; ++i)
{
// clang-format off
ar & m_t[i];
// clang-format on
}
}
}
private:
value_type* m_t;
std::size_t m_element_count;
};
// make_array function
template <typename T>
HPX_FORCEINLINE constexpr array<T> make_array(
T* begin, std::size_t size) noexcept
{
return array<T>(begin, size);
}
#if defined(HPX_SERIALIZATION_HAVE_BOOST_TYPES)
// implement serialization for boost::array
template <typename Archive, typename T, std::size_t N>
void serialize(
Archive& ar, boost::array<T, N>& a, const unsigned int /* version */)
{
// clang-format off
ar & hpx::serialization::make_array(a.begin(), a.size());
// clang-format on
}
#endif
// implement serialization for std::array
template <typename Archive, typename T, std::size_t N>
void serialize(
Archive& ar, std::array<T, N>& a, const unsigned int /* version */)
{
// clang-format off
ar & hpx::serialization::make_array(a.data(), a.size());
// clang-format on
}
// allow our array to be serialized as prvalue
// compiler should support good ADL implementation
// but it is rather for all hpx serialization library
template <typename T>
HPX_FORCEINLINE output_archive& operator<<(output_archive& ar, array<T> t)
{
ar.save(t);
return ar;
}
template <typename T>
HPX_FORCEINLINE input_archive& operator>>(input_archive& ar, array<T> t)
{
ar.load(t);
return ar;
}
template <typename T>
HPX_FORCEINLINE output_archive& operator&(
output_archive& ar, array<T> t) //-V524
{
ar.save(t);
return ar;
}
template <typename T>
HPX_FORCEINLINE input_archive& operator&(
input_archive& ar, array<T> t) //-V524
{
ar.load(t);
return ar;
}
// serialize plain arrays:
template <typename T, std::size_t N>
HPX_FORCEINLINE output_archive& operator<<(output_archive& ar, T (&t)[N])
{
array<T> array = make_array(t, N);
ar.save(array);
return ar;
}
template <typename T, std::size_t N>
HPX_FORCEINLINE input_archive& operator>>(input_archive& ar, T (&t)[N])
{
array<T> array = make_array(t, N);
ar.load(array);
return ar;
}
template <typename T, std::size_t N>
HPX_FORCEINLINE output_archive& operator&(
output_archive& ar, T (&t)[N]) //-V524
{
array<T> array = make_array(t, N);
ar.save(array);
return ar;
}
template <typename T, std::size_t N>
HPX_FORCEINLINE input_archive& operator&(
input_archive& ar, T (&t)[N]) //-V524
{
array<T> array = make_array(t, N);
ar.load(array);
return ar;
}
} // namespace hpx::serialization
|
{"hexsha": "d5f30829a9c6dbe17895dd0ed57d476bb7c92a1f", "size": 5886, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "libs/core/serialization/include/hpx/serialization/array.hpp", "max_stars_repo_name": "bhumitattarde/hpx", "max_stars_repo_head_hexsha": "5b34d8d77b1664fa552445d44cd98e51dc69a74a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-02-08T05:55:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T05:55:09.000Z", "max_issues_repo_path": "libs/core/serialization/include/hpx/serialization/array.hpp", "max_issues_repo_name": "deepaksuresh1411/hpx", "max_issues_repo_head_hexsha": "aa18024d35fe9884a977d4b6076c764dbb8b26d1", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/core/serialization/include/hpx/serialization/array.hpp", "max_forks_repo_name": "deepaksuresh1411/hpx", "max_forks_repo_head_hexsha": "aa18024d35fe9884a977d4b6076c764dbb8b26d1", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.572815534, "max_line_length": 80, "alphanum_fraction": 0.5708460754, "num_tokens": 1401}
|
# This file is a part of Julia. License is MIT: https://julialang.org/license
"""
message(c::GitCommit, raw::Bool=false)
Return the commit message describing the changes made in commit `c`. If
`raw` is `false`, return a slightly "cleaned up" message (which has any
leading newlines removed). If `raw` is `true`, the message is not stripped
of any such newlines.
"""
function message(c::GitCommit, raw::Bool=false)
local msg_ptr::Cstring
msg_ptr = raw ? ccall((:git_commit_message_raw, :libgit2), Cstring, (Ptr{Void},), c.ptr) :
ccall((:git_commit_message, :libgit2), Cstring, (Ptr{Void},), c.ptr)
if msg_ptr == C_NULL
return nothing
end
return unsafe_string(msg_ptr)
end
"""
author(c::GitCommit)
Return the `Signature` of the author of the commit `c`. The author is
the person who made changes to the relevant file(s). See also [`committer`](@ref).
"""
function author(c::GitCommit)
ptr = ccall((:git_commit_author, :libgit2), Ptr{SignatureStruct}, (Ptr{Void},), c.ptr)
@assert ptr != C_NULL
return Signature(ptr)
end
"""
committer(c::GitCommit)
Return the `Signature` of the committer of the commit `c`. The committer is
the person who committed the changes originally authored by the [`author`](@ref), but
need not be the same as the `author`, for example, if the `author` emailed a patch to
a `committer` who committed it.
"""
function committer(c::GitCommit)
ptr = ccall((:git_commit_committer, :libgit2), Ptr{SignatureStruct}, (Ptr{Void},), c.ptr)
@assert ptr != C_NULL
return Signature(ptr)
end
function Base.show(io::IO, c::GitCommit)
authstr = sprint(show, author(c))
cmtrstr = sprint(show, committer(c))
print(io, "Git Commit:\nCommit Author: $authstr\nCommitter: $cmtrstr\nSHA: $(GitHash(c))\nMessage:\n$(message(c))")
end
""" Wrapper around `git_commit_create` """
function commit(repo::GitRepo,
refname::AbstractString,
msg::AbstractString,
author::GitSignature,
committer::GitSignature,
tree::GitTree,
parents::GitCommit...)
commit_id_ptr = Ref(GitHash())
nparents = length(parents)
parentptrs = Ptr{Void}[c.ptr for c in parents]
@check ccall((:git_commit_create, :libgit2), Cint,
(Ptr{GitHash}, Ptr{Void}, Ptr{UInt8},
Ptr{SignatureStruct}, Ptr{SignatureStruct},
Ptr{UInt8}, Ptr{UInt8}, Ptr{Void},
Csize_t, Ptr{Ptr{Void}}),
commit_id_ptr, repo.ptr, isempty(refname) ? C_NULL : refname,
author.ptr, committer.ptr,
C_NULL, msg, tree.ptr,
nparents, nparents > 0 ? parentptrs : C_NULL)
return commit_id_ptr[]
end
"""Commit changes to repository"""
function commit(repo::GitRepo, msg::AbstractString;
refname::AbstractString=Consts.HEAD_FILE,
author::Signature = Signature(repo),
committer::Signature = Signature(repo),
tree_id::GitHash = GitHash(),
parent_ids::Vector{GitHash}=GitHash[])
# Retrieve tree identifier
if iszero(tree_id)
tree_id = with(GitIndex, repo) do idx; write_tree!(idx) end
end
# Retrieve parents from HEAD
if isempty(parent_ids)
try # if throws then HEAD not found -> empty repo
push!(parent_ids, GitHash(repo, refname))
end
end
# return commit id
commit_id = GitHash()
# get necessary objects
tree = GitTree(repo, tree_id)
auth_sig = convert(GitSignature, author)
comm_sig = convert(GitSignature, committer)
parents = GitCommit[]
try
for id in parent_ids
push!(parents, GitCommit(repo, id))
end
commit_id = commit(repo, refname, msg, auth_sig, comm_sig, tree, parents...)
finally
for parent in parents
close(parent)
end
close(tree)
close(auth_sig)
close(comm_sig)
end
return commit_id
end
|
{"hexsha": "0cf8ca641076d53949a7de238b6cbb5968326325", "size": 4056, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "base/libgit2/commit.jl", "max_stars_repo_name": "Mikewl/julia", "max_stars_repo_head_hexsha": "4c5cc04156ba074a8baa028c2a8a41b9e70d56ee", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-01-04T13:02:42.000Z", "max_stars_repo_stars_event_max_datetime": "2015-05-31T14:36:56.000Z", "max_issues_repo_path": "base/libgit2/commit.jl", "max_issues_repo_name": "Mikewl/julia", "max_issues_repo_head_hexsha": "4c5cc04156ba074a8baa028c2a8a41b9e70d56ee", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-13T04:03:27.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-03T05:03:24.000Z", "max_forks_repo_path": "base/libgit2/commit.jl", "max_forks_repo_name": "Mikewl/julia", "max_forks_repo_head_hexsha": "4c5cc04156ba074a8baa028c2a8a41b9e70d56ee", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3728813559, "max_line_length": 119, "alphanum_fraction": 0.6289447732, "num_tokens": 1032}
|
from metrics.metric import Metric
from metrics.minutely_returns import MinutelyReturns
from scipy.stats import kurtosis
import numpy as np
class ReturnsVolatilityCorrelation(Metric):
def __init__(self, intervals=4):
self.mr = MinutelyReturns()
def compute(self, df):
returns = np.array(self.mr.compute(df))
volatility = abs(returns)
return [np.corrcoef(returns, volatility)[0,1]]
def visualize(self, simulated, real, plot_real=True):
self.hist(simulated, real, title="Returns/Volatility Correlation", xlabel="Correlation coefficient", plot_real=plot_real, bins=50)
|
{"hexsha": "6796b38f03373e8f2be3961f695dd26a4d3c2f13", "size": 621, "ext": "py", "lang": "Python", "max_stars_repo_path": "realism/metrics/returns_volatility_correlation.py", "max_stars_repo_name": "chris-jh-cho/abides", "max_stars_repo_head_hexsha": "0917e099b901a864751233b19eecf50a451085df", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-23T20:17:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-27T05:03:27.000Z", "max_issues_repo_path": "realism/metrics/returns_volatility_correlation.py", "max_issues_repo_name": "chris-jh-cho/abides", "max_issues_repo_head_hexsha": "0917e099b901a864751233b19eecf50a451085df", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "realism/metrics/returns_volatility_correlation.py", "max_forks_repo_name": "chris-jh-cho/abides", "max_forks_repo_head_hexsha": "0917e099b901a864751233b19eecf50a451085df", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-07T10:50:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-07T10:50:15.000Z", "avg_line_length": 34.5, "max_line_length": 138, "alphanum_fraction": 0.729468599, "include": true, "reason": "import numpy,from scipy", "num_tokens": 145}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% TorrentofShame Resume
% LuaLaTeX Template
% Version 1.0.0 (1/3/2021)
%
% Authors:
% Simon Weizman (contact@simon.weizman.us)
%
% License:
% MIT License (see included LICENSE file)
%
% !TEX encoding = utf8
% !TEX program = lualatex
% NOTE: This template must be compiled with LuaLaTeX, the previous line
% should ensure that LuaLaTeX is used automatically; however, if it does not,
% you will need to specify LuaLaTeX as the engine to use in your editor or script.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%---------------------------------------
% Packages & Other Document Config
%---------------------------------------
\documentclass[11pt, letterpaper]{resume}
\let\\\relax % Allow for lua to write \ commands (?) that work
\directlua{config = require ('config')}
\directlua{f = require ('resume')}
\graphicspath{{./images/}}
\usepackage{lipsum}
%---------------------------------------
% Personal Info
%---------------------------------------
\begin{document}
\directlua{f.profile ()}
\makeprofile%
\begin{paracol}{2}
%---------------------------------------
% Summary (personal statement)
%---------------------------------------
\section*{Summary}
\directlua{
tex.sprint(config.summary or '\\lipsum[1][1-2]')
}
\smallskip
%---------------------------------------
% Experience
%---------------------------------------
\section*{Experience}
%\expentry{} % Job Title
%{} % Employer
%{Jun 2019 -- Current} % Duration
%{} % Location
%{} % Description
\directlua{f.experience ()}
\switchcolumn% Switch to next paracol column
%---------------------------------------
% Skills
%---------------------------------------
\section*{Skills}
\directlua{f.skills ()}
\medskip
%---------------------------------------
% Education
%---------------------------------------
\section*{Education}
%\expentry{} % Major
%{Jun 2019 -- Current} % Duration
%{} % School
\begin{supertabular}{rl}
\directlua{f.education ()}
\end{supertabular}
%---------------------------------------
% Certifications
%---------------------------------------
\section*{Certifications}
%\certentry{} % Name
%{} % Issuer
%{} % Issue Date
%{} % Expire Date (leave empty if doesn't expire)
\begin{supertabular}{rl}
\directlua{f.certifications ()}
\end{supertabular}
\bigskip
%---------------------------------------
% Projects
%---------------------------------------
\section*{Projects}
%\projectentry{} % Project Name
%{} % Duration
%{} % Short Description
\directlua{f.projects ()}
%---------------------------------------
% Languages
%---------------------------------------
%\section*{Languages}
%\langentry{} % Language
%{} % Fluency
%\begin{supertabular}{rl}
%\langentry{English}
%{Native Speaker}
%\end{supertabular}
\end{paracol}
\end{document}
|
{"hexsha": "d8ac0cb57f889a0b2dadb70a8af4c2da15f35050", "size": 2990, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "simonweizman.tex", "max_stars_repo_name": "TorrentofShame/resume", "max_stars_repo_head_hexsha": "3b86e4af0603557a33e80f32cd51f32cdb112e82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simonweizman.tex", "max_issues_repo_name": "TorrentofShame/resume", "max_issues_repo_head_hexsha": "3b86e4af0603557a33e80f32cd51f32cdb112e82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simonweizman.tex", "max_forks_repo_name": "TorrentofShame/resume", "max_forks_repo_head_hexsha": "3b86e4af0603557a33e80f32cd51f32cdb112e82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.9090909091, "max_line_length": 83, "alphanum_fraction": 0.4622073579, "num_tokens": 690}
|
\chapter{hjgad}
\begin{abox}
Problem set-1
\end{abox}
\begin{enumerate}[label=\color{ocre}\textbf{\arabic*.}]
\item Consider the matrix $M=\left(\begin{array}{lll}1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1\end{array}\right)$\\
\textbf{A.} The eigenvalues of $M$ are
{\exyear{NET/JRF(JUNE-2011)}}
\begin{tasks}(4)
\task[\textbf{A.}] $0,1,2$
\task[\textbf{B.}] $0,0,3$
\task[\textbf{C.}] $1,1,1$
\task[\textbf{D.}] $-1,1,3$
\end{tasks}
\begin{answer}
\begin{align*}
\text{For eigen values }\left[\begin{array}{ccc}1-\lambda & 1 & 1 \\ 1 & 1-\lambda & 1 \\ 1 & 1 & 1-\lambda\end{array}\right]&=0\\
(1-\lambda)\left((1-\lambda)^{2}-1\right)-(1-\lambda-1)+1(1-(1-\lambda))&=0\\
(1-\lambda)\left(1+\lambda^{2}-2 \lambda-1\right)+\lambda+\lambda=0 \Rightarrow \lambda^{2}-2 \lambda-\lambda^{3}+2 \lambda^{2}+2 \lambda&=0\\
\lambda^{3}-3 \lambda^{2}=0 \Rightarrow \lambda^{2}(\lambda-3)=0 \Rightarrow \lambda&=0,0,3
\intertext{For any $n \times n$ matrix having all elements unity eigenvalues are $0,0,0, \ldots, n$.}
\end{align*}
So the correct answer is \textbf{Option (B)}
\end{answer}
\textbf{B.} The exponential of $M$ simplifies to ( $I$ is the $3 \times 3$ identity matrix)
\begin{tasks}(2)
\task[\textbf{A.}] $e^{M}=I+\left(\frac{e^{3}-1}{3}\right) M$
\task[\textbf{B.}] $e^{M}=I+M+\frac{M^{2}}{2 !}$
\task[\textbf{C.}] $e^{M}=I+3^{3} M$
\task[\textbf{D.}] $e^{M}=(e-1) M$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{We know that}
e^x&=1+x+\frac{x^2}{2!}+\frac{x^3}{3!}+.....\\
e^M&=1+M+\frac{M^2}{2!}+\frac{M^3}{3!}+.....\\
M&=\left[\begin{array}{lll}1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1\end{array}\right] \Rightarrow M^{2} =\left[\begin{array}{lll}3 & 3 & 3 \\ 3 & 3 & 3 \\ 3 & 3 & 3\end{array}\right]=3 M\\
\text{similarly}\quad M^{3}&=9 M=3^{2} M
\intertext{we can rewrite $e^M$ as,}
e^{M}&=I+M+\frac{3 M}{2 !}+\frac{3^{2} M}{3 !}+\frac{3^{3} M}{4 !}+\cdots\\
&=I+\frac{M}{3}\left[3+\frac{3^{2}}{2 !}+\frac{3^{3}}{3 !}+\frac{3^{4}}{4 !}+\cdots\right]\\
&=I+\frac{M}{3}\left[e^{3}-1\right]
\end{align*}
\end{answer}
\item A $3 \times 3$ matrix $M$ has $\operatorname{Tr}[M]=6, \operatorname{Tr}\left[M^{2}\right]=26$ and $\operatorname{Tr}\left[M^{3}\right]=90$. Which of the following can be a possible set of eigenvalues of $M ?$
{\exyear{NET/JRF(DEC-2011)}}
\begin{tasks}(4)
\task[\textbf{A.}] $\{1,1,4\}$
\task[\textbf{B.}] $\{-1,0,7\}$
\task[\textbf{C.}] $\{-1,3,4\}$
\task[\textbf{D.}] $\{2,2,2\}$
\end{tasks}
\begin{answer}
\begin{align*}
T_r[M]&=\text{sum of eigen values}\\
T_r[M^2]&=\text{sum of square of eigen values}\\
\operatorname{Tr}\left[M^{2}\right]&=(-1)^{2}+(3)^{2}+(4)^{2}\text{ also } \operatorname{Tr}\left[M^{3}\right]\\&=(-1)^{3}+(3)^{3}+(4)^{3}=90
\end{align*}
So the correct answer is \textbf{Option (C)}
\end{answer}
\item The eigen values of the matrix $A=\left(\begin{array}{lll}1 & 2 & 3 \\ 2 & 4 & 6 \\ 3 & 6 & 9\end{array}\right)$ are
{\exyear{NET/JRF(JUNE-2012)}}
\begin{tasks}(4)
\task[\textbf{A.}] $(1,4,9)$
\task[\textbf{B.}] $(0,7,7)$
\task[\textbf{C.}] $(0,1,13)$
\task[\textbf{D.}] $(0,0,14)$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{The given matrix $A$ has identical rows and columns}
\intertext{So it's eigen values are,}
\lambda=0,0\ \text{Trace}=0,0,14
\intertext{Another solution}
\text{For eigenvalues }|A-\lambda I|=0 \Rightarrow\left[\begin{array}{ccc}1-\lambda & 2 & 3 \\ 2 & 4-\lambda & 6 \\ 3 & 6 & 9-\lambda\end{array}\right]&=0\\
(1-\lambda)[(4-\lambda)(9-\lambda)-36]-2[2(9-\lambda)-18]+3[12-3(4-\lambda)]&=0\\
(1-\lambda)(4-\lambda)(9-\lambda)-36(1-\lambda)-4(9-\lambda)+36+9 \lambda&=0\\
\lambda^{3}-14 \lambda^{2}=0 \Rightarrow \lambda^{2}(\lambda-14)=0 \Rightarrow \lambda&=0,0,14
\end{align*}
So the correct answer is \textbf{Option (D)}
\end{answer}
\item The eigenvalues of the antisymmetric matrix,
$$
A=\left(\begin{array}{ccc}
0 & -n_{3} & n_{2} \\
n_{3} & 0 & -n_{1} \\
-n_{2} & n_{1} & 0
\end{array}\right)
$$
where $n_{1}, n_{2}$ and $n_{3}$ are the components of a unit vector, are
{\exyear{NET/JRF(JUNE-2012)}}
\begin{tasks}(4)
\task[\textbf{A.}] $0, i,-i$
\task[\textbf{B.}] $0,1,-1$
\task[\textbf{C.}] $0,1+i,-1,-i$
\task[\textbf{D.}] $0,0,0$
\end{tasks}
\begin{answer}
\begin{align*}
A&=\left[\begin{array}{ccc}0 & -n_{3} & n_{2} \\ n_{3} & 0 & -n_{1} \\ -n_{2} & n_{1} & 0\end{array}\right] \Rightarrow-A^{T}=\left[\begin{array}{ccc}0 & -n_{3} & n_{2} \\ n_{3} & 0 & -n_{1} \\ -n_{2} & n_{1} & 0\end{array}\right]\\
(A-\lambda I)&=0,\left[\begin{array}{ccc}0-\lambda & -n_{3} & n_{2} \\ n_{3} & 0-\lambda & -n_{1} \\ -n_{2} & n_{1} & 0-\lambda\end{array}\right]=0\\
\Rightarrow \lambda_{1}&=0 \Rightarrow \lambda_{2}=-\sqrt{-n_{1}^{2}-n_{2}^{2}-n_{3}^{2}} \Rightarrow \lambda_{3}\\&=\sqrt{-n_{1}^{2}-n_{2}^{2}-n_{3}^{2}}\\
\text{but }&\sqrt{n_{1}^{2}+n_{2}^{2}+n_{3}^{2}}=1\\
\intertext{For an antisymmetric matrix the eigen values are}
\lambda &=0,\pm i
\intertext{sum of sqares of non diagonal elements}
&=0,\pm i \sqrt{n_1^2+n_2^2+n_3^2}=0, \pm i\\
\therefore &n_1, n_2, n_3 \text{are components of a unit vector}\\
\text{ so, }\quad \lambda_{1}&=0, \lambda_{2}=i, \lambda_{3}=-i\\
A&=-A^{T}\text{ (Antisymmetric). Eigenvalues are either zero or purely imaginary.}
\end{align*}
So the correct answer is \textbf{Option (A)}
\end{answer}
\item Consider an $n \times n(n>1)$ matrix $A$, in which $A_{i j}$ is the product of the indices $i$ and $j$ $\left(\right.$ namely $\left.A_{i j}=i j\right)$. The matrix $A$
{\exyear{NET/JRF(DEC-2013)}}
\begin{tasks}(1)
\task[\textbf{A.}] Has one degenerate eigevalue with degeneracy $(n-1)$
\task[\textbf{B.}] Has two degenerate eigenvalues with degeneracies 2 and $(n-2)$
\task[\textbf{C.}] Has one degenerate eigenvalue with degeneracy $n$
\task[\textbf{D.}] Does not have any degenerate eigenvalue
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{The matrix $A$ will be like, }
A_{i j}=\left[\begin{array}{ccccccc}1 & 2 & 3 & 4 & \cdots & n \\ 2 & 4 & 6 & 8 & \cdots & \cdots & \\ 3 & 6 & 9 & 12 & \cdots & \cdots \\ 4 & 8 & 12 & 16 & \cdots & \cdots \\ \vdots & \vdots & \vdots & \vdots & \vdots & \vdots \\ n & \vdots & \vdots & \vdots & & \end{array}\right]
\intertext{The matrix $A$ is having identical rows and columns then it's eigen values will be, $(n-1)$ number of zeros and it's trace.}
\lambda&=0,0,.......,[1^2+2^2+....n^2]
\intertext{Thus the matrix has one degenerate eigen value with $n-1$ degeneracy}
\end{align*}
So the correct answer is \textbf{Option (A)}
\end{answer}
\item Consider the matrix
$$
M=\left(\begin{array}{ccc}
0 & 2 i & 3 i \\
-2 i & 0 & 6 i \\
-3 i & -6 i & 0
\end{array}\right)
$$
The eigenvalues of $M$ are
{\exyear{NET/JRF(JUNE-2014)}}
\begin{tasks}(4)
\task[\textbf{A.}] $-5,-2,7$
\task[\textbf{B.}] $-7,0,7$
\task[\textbf{C.}] $-4 i, 2 i, 2 i$
\task[\textbf{D.}] $2,3,6$
\end{tasks}
\begin{answer}
\begin{align*}
M&=\left(\begin{array}{ccc}0 & 2 i & 3 i \\ -2 i & 0 & 6 i \\ -3 i & -6 i & 0\end{array}\right), M^{+}=\left(\begin{array}{ccc}0 & 2 i & 3 i \\ -2 i & 0 & 6 i \\ -3 i & -6 i & 0\end{array}\right)\\
M^{+}&=M
\intertext{Matrix is Hermitian so roots are real and }
\lambda&=0,\pm i\sqrt{(2i)^2+(3i)^2+(6i)^2}\quad\text{property of antisymmetric matrix}\\
&=0,\pm i\sqrt{-49}\\
&=0,\pm 7=-7,0,7
\end{align*}
So the correct answer is \textbf{Option (B)}
\end{answer}
\item The column vector $\left(\begin{array}{l}a \\ b \\ a\end{array}\right)$ is a simultaneous eigenvector of $A=\left(\begin{array}{ccc}0 & 0 & 1 \\ 0 & 1 & 0 \\ 1 & 0 & 0\end{array}\right)$ and $B=\left(\begin{array}{lll}0 & 1 & 1 \\ 1 & 0 & 1 \\ 1 & 1 & 0\end{array}\right)$ if
{\exyear{NET/JRF(DEC-2014)}}
\begin{tasks}(2)
\task[\textbf{A.}] $b=0$ or $a=0$
\task[\textbf{B.}] $b=a$ or $b=-2 a$
\task[\textbf{C.}] $b=2 a$ or $b=-a$
\task[\textbf{D.}] $b=a / 2$ or $b=-a / 2$
\end{tasks}
\begin{answer}
\begin{align*}
\text{Let }b&=a\\
\left(\begin{array}{lll}0 & 0 & 1 \\ 0 & 1 & 0 \\ 1 & 0 & 0\end{array}\right)\left(\begin{array}{l}a \\ a \\ a\end{array}\right)&=\left(\begin{array}{l}a \\ a \\ a\end{array}\right)\text{ and } \left(\begin{array}{lll}0 & 1 & 1 \\ 1 & 0 & 1 \\ 1 & 1 & 0\end{array}\right)\left(\begin{array}{l}a \\ a \\ a\end{array}\right)=2\left(\begin{array}{l}a \\ a \\ a\end{array}\right)\\
\text{ Let }b&=-2 a\\
\left(\begin{array}{ccc}0 & 0 & 1 \\ 0 & 1 & 0 \\ 1 & 0 & 0\end{array}\right)\left(\begin{array}{c}a \\ -2 a \\ a\end{array}\right)&=\left(\begin{array}{c}a \\ -2 a \\ a\end{array}\right)\text{ and } \left(\begin{array}{ccc}0 & 1 & 1 \\ 1 & 0 & 1 \\ 1 & 1 & 0\end{array}\right)\left(\begin{array}{c}a \\ -2 a \\ a\end{array}\right)\\&=\left(\begin{array}{c}-a \\ 2 a \\ -a\end{array}\right)=-1\left(\begin{array}{c}a \\ -2 a \\ a\end{array}\right)
\intertext{For other combination above relation is not possible.}
\end{align*}
So the correct answer is \textbf{Option (B)}
\end{answer}
\item The matrix $M=\left(\begin{array}{ccc}1 & 3 & 2 \\ 3 & -1 & 0 \\ 0 & 0 & 1\end{array}\right)$ satisfies the equation
{\exyear{NET/JRF(DEC-2016)}}
\begin{tasks}(2)
\task[\textbf{A.}] $M^{3}-M^{2}-10 M+12 I=0$
\task[\textbf{B.}] $M^{3}+M^{2}-12 M+10 I=0$
\task[\textbf{C.}] $M^{3}-M^{2}-10 M+10 I=0$
\task[\textbf{D.}] $M^{3}+M^{2}-10 M+10 I=0$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{he characteristic equation is}
&\left|\begin{array}{ccc}(1-\lambda) & 3 & 2 \\ 3 & (-1-\lambda) & 0 \\ 0 & 0 & (1-\lambda)\end{array}\right|=0\\
&\Rightarrow \quad(1-\lambda)(-1-\lambda)(1-\lambda)-(3) \times 3(1-\lambda)=0\\
&\Rightarrow \quad-\left(\lambda^{2}-1\right)(\lambda-1)-9(1-\lambda)=0 \\&\Rightarrow \lambda^{3}-10 \lambda-\lambda^{2}+10=0
\intertext{Thus the matrix $M$ satisfies the equation}
&M^{3}-M^{2}-10 M+10 I=0
\end{align*}
So the correct answer is \textbf{Option (C)}
\end{answer}
\item Which of the following can not be the eigen values of a real $3 \times 3$ matrix
{\exyear{NET/JRF(JUNE-2017)}}
\begin{tasks}(4)
\task[\textbf{A.}] $2 i, 0,-2 i$
\task[\textbf{B.}] $1,1,1$
\task[\textbf{C.}] $e^{i \theta}, e^{-i \theta}, 1$
\task[\textbf{D.}] $i, 1,0$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{If the matrix is real then the complex eigen values always occurs with its complex conjugate. In option (d) if $i$ is an eigen value then $-i$ must also be an eigen value. But $-i$ is not given in option, hence option (d) is incorrect.}
\end{align*}
So the correct answer is \textbf{Option (D)}
\end{answer}
\item Let $\sigma_{x}, \sigma_{y}, \sigma_{z}$ be the Pauli matrices and $x^{\prime} \sigma_{x}+y^{\prime} \sigma_{y}+z^{\prime} \sigma_{z}=\exp \left(\frac{i \theta \sigma_{z}}{2}\right) \times$
$$
\left[x \sigma_{x}+y \sigma_{y}+z \sigma_{z}\right] \exp \left(-\frac{i \theta \sigma_{z}}{2}\right)
$$
Then the coordinates are related as follows
{\exyear{NET/JRF(JUNE-2017)}}
\begin{tasks}(1)
\task[\textbf{A.}] $\left(\begin{array}{l}x^{\prime} \\ y^{\prime} \\ z^{\prime}\end{array}\right)=\left(\begin{array}{ccc}\cos \theta & -\sin \theta & 0 \\ \sin \theta & \cos \theta & 0 \\ 0 & 0 & 1\end{array}\right)\left(\begin{array}{l}x \\ y \\ z\end{array}\right)$
\task[\textbf{B.}] $\left(\begin{array}{l}x^{\prime} \\ y^{\prime} \\ z^{\prime}\end{array}\right)=\left(\begin{array}{ccc}\cos \theta & \sin \theta & 0 \\ -\sin \theta & \cos \theta & 0 \\ 0 & 0 & 1\end{array}\right)\left(\begin{array}{l}x \\ y \\ z\end{array}\right)$
\task[\textbf{C.}] $\left(\begin{array}{l}x^{\prime} \\ y^{\prime} \\ z^{\prime}\end{array}\right)=\left(\begin{array}{ccc}\cos \frac{\theta}{2} & \sin \frac{\theta}{2} & 0 \\ -\sin \frac{\theta}{2} & \cos \frac{\theta}{2} & 0 \\ 0 & 0 & 1\end{array}\right)\left(\begin{array}{l}x \\ y \\ z\end{array}\right)$
\task[\textbf{D.}] $\left(\begin{array}{l}x^{\prime} \\ y^{\prime} \\ z^{\prime}\end{array}\right)=\left(\begin{array}{ccc}\cos \frac{\theta}{2} & -\sin \frac{\theta}{2} & 0 \\ \sin \frac{\theta}{2} & \cos \frac{\theta}{2} & 0 \\ 0 & 0 & 1\end{array}\right)\left(\begin{array}{l}x \\ y \\ z\end{array}\right)$
\end{tasks}
\begin{answer}
\begin{align*}
\sigma_{x}&=\left(\begin{array}{ll}0 & 1 \\ 1 & 0\end{array}\right), \sigma_{y}=\left(\begin{array}{cc}0 & -i \\ i & 0\end{array}\right)\text{ and } \sigma_{z}=\left(\begin{array}{cc}1 & 0 \\ 0 & -1\end{array}\right)\\
\text{Hence, }x \sigma_{x}+y \sigma_{y}+z \sigma_{z}&=\left(\begin{array}{cc}z & x-i y \\ x+i y & -z\end{array}\right)\\
x^{\prime} \sigma_{x}+y^{\prime} \sigma_{y}+z^{\prime} \sigma_{z}&=\left(\begin{array}{cc}z^{\prime} & x^{1}-i y^{\prime} \\ x^{\prime}+i y^{\prime} & -z^{\prime}\end{array}\right)\\
\exp \left(\frac{i \theta \sigma_{z}}{z}\right)&=\left(\begin{array}{cc}e^{i \theta / 2} & 0 \\ 0 & e^{-i \theta / 2}\end{array}\right)\text{ and exp }\left(\frac{-i \theta \sigma_{z}}{2}\right)=\left(\begin{array}{cc}e^{-i \theta / 2} & 0 \\ 0 & e^{i \theta / 2}\end{array}\right)\\
\text{Hence, }\left(\begin{array}{cc}z^{\prime} & x^{\prime}-i y^{\prime} \\ x^{\prime}+i y^{\prime} & -z^{\prime}\end{array}\right)&=\left(\begin{array}{cc}e^{i \theta / 2} & 0 \\ 0 & e^{-i \theta / 2}\end{array}\right)\left(\begin{array}{cc}z & x-i y \\ x+i y & -z\end{array}\right)\left(\begin{array}{cc}e^{-i \theta / 2} & 0 \\ 0 & e^{i \theta / 2}\end{array}\right)\\
\Rightarrow\left(\begin{array}{cc}z^{\prime} & x^{\prime}-i y^{\prime} \\ x^{\prime}+i y^{\prime} & -z^{\prime}\end{array}\right)&=\left(\begin{array}{cc}z & e^{i \theta}(x-i y) \\ e^{-i \theta}(x+i y) & -z\end{array}\right)\\
\text{Hence, }z^{\prime}&=z\text{ and }x^{\prime}-i y^{\prime}=e^{i \theta}(x-i y)\\
\text{Thus }x^{\prime}-i y^{\prime}&=[(\cos \theta) x+(\sin \theta) y]-i[(\cos \theta) y-(\sin \theta) x]\\
\text{Thus }x^{\prime}&=(\cos \theta) x+(\sin \theta) y\\
\text{And }y^{\prime}&=(-\sin \theta) x+(\cos \theta) y\\
\text{Thus, }\left(\begin{array}{l}x^{\prime} \\ y^{\prime} \\ z^{\prime}\end{array}\right)&=\left(\begin{array}{ccc}\cos \theta & \sin \theta & 0 \\ -\sin \theta & \cos \theta & 0 \\ 0 & 0 & 1\end{array}\right)\left(\begin{array}{l}x \\ y \\ z\end{array}\right)
\end{align*}
So the correct answer is \textbf{Option (B)}
\end{answer}
\item Let $A$ be a non-singular $3 \times 3$ matrix, the columns of which are denoted by the vectors $\vec{a}, \vec{b}$ and $\vec{c}$, respectively. Similarly, $\vec{u}, \vec{v}$ and $\vec{w}$ denote the vectors that form the corresponding columns of $\left(A^{T}\right)^{-1}$. Which of the following is true?
{\exyear{NET/JRF(DEC-2017)}}
\begin{tasks}(2)
\task[\textbf{A.}] $\vec{u} \cdot \vec{a}=0, \vec{u} \cdot \vec{b}=0, \vec{u} \cdot \vec{c}=1$
\task[\textbf{B.}] $\vec{u} \cdot \vec{a}=0, \vec{u} \cdot \vec{b}=1, \vec{u} \cdot \vec{c}=0$
\task[\textbf{C.}] $\vec{u} \cdot \vec{a}=1, \vec{u} \cdot \vec{b}=0, \vec{u} \cdot \vec{c}=0$
\task[\textbf{D.}] $\vec{u} \cdot \vec{a}=0, \vec{u} \cdot \vec{b}=0, \vec{u} \cdot \vec{c}=0$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{We can take any $3 \times 3$ non singular matrix in order to avoid long calculation.}
\text{Take }A&=\left[\begin{array}{ccc}1 & 0 & 0 \\ 0 & 2 & 0 \\ 0 & 0 & 3 \\ \downarrow & \downarrow & \downarrow \\ \vec{a} & \vec{b} & \vec{c}\end{array}\right] \Rightarrow\left(A^{T}\right)^{-1}=\left[\begin{array}{ccc}1 & 0 & 0 \\ 0 & 1 / 2 & 0 \\ 0 & 0 & 1 / 3 \\ \downarrow & \downarrow & \downarrow \\ \vec{u} & \vec{v} & \vec{w}\end{array}\right]
\intertext{We see that}
\vec{u} \cdot \vec{a}&=1.1+0.0+0.0=1\\
\vec{u} \cdot \vec{b}&=1.0+0.2+0.0=0\\
\vec{u} \cdot \vec{C}&=1.0+0.0+0.3=0
\end{align*}
So the correct answer is \textbf{Option (C)}
\end{answer}
\item Consider the matrix equation
$$
\left(\begin{array}{llc}
1 & 1 & 1 \\
1 & 2 & 3 \\
2 & b & 2 c
\end{array}\right)\left(\begin{array}{l}
x \\
y \\
z
\end{array}\right)=\left(\begin{array}{l}
0 \\
0 \\
0
\end{array}\right)
$$
The condition for existence of a non-trivial solution and the corresponding normalised solution (upto a sign) is
{\exyear{NET/JRF(DEC-2017)}}
\begin{tasks}(2)
\task[\textbf{A.}] $b=2 c$ and $(x, y, z)=\frac{1}{\sqrt{6}}(1,-2,1)$
\task[\textbf{B.}] $c=2 b$ and $(x, y, z)=\frac{1}{\sqrt{6}}(1,1,-2)$
\task[\textbf{C.}] $c=b+1$ and $(x, y, z)=\frac{1}{\sqrt{6}}(2,-1,-1)$
\task[\textbf{D.}] $b=c+1$ and $(x, y, z)=\frac{1}{\sqrt{6}}(1,-2,1)$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{Solution: We know that the matrix equation, $A X=0$, where $A$ is the given matrix and $X$ is a column vector has a non-zero solution if and only if $|A|=0$}
\left|\begin{array}{lll}1 & 1 & 1 \\ 1 & 2 & 3 \\ 2 & b & 2 c\end{array}\right|&=0 \Rightarrow 4 c-3 b-2 c+6+b-4=0\\
\Rightarrow 2 c-2 b+2&=0 \Rightarrow b=c+1
\intertext{we do not need to perform further calculation,}
\end{align*}
So the correct answer is \textbf{Option (D)}
\end{answer}
\item Which of the following statements is true for a $3 \times 3$ real orthogonal matrix with determinant $+1 ?$
{\exyear{NET/JRF(JUNE-2018)}}
\begin{tasks}(1)
\task[\textbf{A.}] The modulus of each of its eigenvalues need not be 1, but their product must be 1
\task[\textbf{B.}] At least one of its eigenvalues is $+1$
\task[\textbf{C.}] All of its eigenvalues must be real
\task[\textbf{D.}] None of its eigenvalues must be real
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{Solution: The characteristic equation of any $3 \times 3$ matrix is of thee form $\lambda^{3}+a \lambda^{2}+b \lambda+c=0$ which implies that at least one of the eigenvalues must be real. It is a proven fact that modulus of each eigenvalues of an orthogonal matrix is 1 .}
\intertext{If all eigenvalues of $3 \times 3$ orthogonal matrix are real then only possibilities for eigenvalues are}
\lambda_{1}&=1, \lambda_{2}=1\text{ and }\lambda_{3}=1\text{ or }\\\lambda_{1}&=-1, \lambda_{2}=-1, \lambda_{3}=1\text{ or }\\\lambda_{1}&=-1, \lambda_{2}=1, \lambda_{3}=-1\\
\intertext{Thus we see that at least one eigenvalue is $+1$. Suppose one eigenvalues is real and other two eigenvalues are complex conjugates. Now}
\lambda_{1} \lambda_{2} \lambda_{3}&=1\\
\Rightarrow \lambda_{1}(a+i b)(a-i b)&=1 \Rightarrow \lambda_{1}\left(a^{2}+b^{2}\right)=1\\
\text{ Since }a^{2}+b^{2}\text{ is always positive hence }\lambda_{1}&=1
\intertext{In this case also we see that at least one eigenvalue must be $+1$}
\end{align*}
So the correct answer is \textbf{Option (B)}
\end{answer}
\item One of the eigenvalues of the matrix $e^{A}$ is $e^{a}$, where $A=\left(\begin{array}{ccc}a & 0 & 0 \\ 0 & 0 & a \\ 0 & a & 0\end{array}\right)$. The product of the other two eigenvalues of $e^{A}$ is
{\exyear{NET/JRF(DEC-2018)}}
\begin{tasks}(4)
\task[\textbf{A.}] $e^{2 a}$
\task[\textbf{B.}] $e^{-a}$
\task[\textbf{C.}] $e^{-2 a}$
\task[\textbf{D.}] 1
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{ Eigenvalues of matrix $A$ are $a, a$ and $-a$. The product of two other eigenvalues of $A$ are $e^{a} a^{-a}=1$}
\intertext{Alternativety}
e^{\text {TraceA }}&=e^{\lambda_{1}+\lambda_{2}+\lambda_{3}}=\operatorname{det} e^{A}\\
\Rightarrow e^{\lambda_{1}} \cdot e^{\lambda_{2}+\lambda_{3}}&=\operatorname{det} e^{A} \Rightarrow e^{a} \cdot e^{\lambda_{2}} \cdot e^{\lambda_{3}}=e^{a}\\
\Rightarrow e^{\lambda_{2}} \cdot e^{\lambda_{3}}&=1
\end{align*}
So the correct answer is \textbf{Option (D)}
\end{answer}
\item A $4 \times 4$ complex matrix $A$ satisfies the relation $A^{\dagger} A=4 I$, where $I$ is the $4 \times 4$ identity matrix. The number of independent real parameters of $A$ is
{\exyear{NET/JRF(DEC-2018)}}
\begin{tasks}(4)
\task[\textbf{A.}] 32
\task[\textbf{B.}] 10
\task[\textbf{C.}] 12
\task[\textbf{D.}] 16
\end{tasks}
\begin{answer}
\begin{align*}
\text{Given that }A^{\dagger} A&=4 I \Rightarrow \frac{1}{4}\left(A^{\dagger} A\right)=I\\
\text{Let }A&=2 B\text{ then}\\
A^{\dagger}&=2 B^{\dagger}\\
\text{Therefore, }B^{\dagger} B&=I
\intertext{This shows that $B$ is a unitary matrix. The number of independent real parameters needed to specify an $n \times n$ unitary matrix is $n^{2}$. Thus, the number of independent parameter needed to specify matrix $B$ is $4^{2}=16$.}
\intertext{Now, the number of independent parameters needed to specify matrix $A$ is same as that of matrix $B$.}
\intertext{Thus the number of independent parameters needed to specify $A$ is 16}
\end{align*}
So the correct answer is \textbf{Option (D)}
\end{answer}
\item The element of a $3 \times 3$ matrix $A$ are the products if its row and column indices $A_{i j}=i j$ (where $i, j=1,2,3)$. The eigenvalues of $A$ are
{\exyear{NET/JRF(JUNE-2019)}}
\begin{tasks}(4)
\task[\textbf{A.}] $(7,7,0)$
\task[\textbf{B.}] $(7,4,3)$
\task[\textbf{C.}] $(14,0,0)$
\task[\textbf{D.}] $\left(\frac{14}{3}, \frac{14}{3}, \frac{14}{3}\right)$
\end{tasks}
\begin{answer}
\begin{align*}
\text{Since }A_{i j}&=i j
(\text{where }i, j=1,2,3, )\\
\text{We obtain the matrix }A&=\left[\begin{array}{lll}1 & 2 & 3 \\ 2 & 4 & 6 \\ 3 & 6 & 9\end{array}\right]\\
\text{For calculating eigen values }&\left|\begin{array}{ccc}1-\lambda & 2 & 3 \\ 2 & 4-\lambda & 6 \\ 3 & 6 & 9-\lambda\end{array}\right|=0
\intertext{$(1-\lambda)[(4-\lambda)(9-\lambda)-36]-2[2(9-\lambda)-18]+3(12-3(4-\lambda))=0$}
\intertext{$\Rightarrow-\lambda^{3}+\lambda^{2} \cdot 14=0 \Rightarrow \lambda^{2}(-\lambda+14)=0 \quad \Rightarrow \lambda=0,0,14$}
\intertext{Also, directly for a $3 x 3$ matrix we can write $(0,0$, Trace of A) as Eigen values.}
\end{align*}
So the correct answer is \textbf{Option (C)}
\end{answer}
\item The operator $A$ has a matrix representation $\left(\begin{array}{ll}2 & 1 \\ 1 & 2\end{array}\right)$ in the basis spanned by $\left(\begin{array}{l}1 \\ 0\end{array}\right)$ and $\left(\begin{array}{l}0 \\ 1\end{array}\right) .$ In another basis spanned by $\frac{1}{\sqrt{2}}\left(\begin{array}{l}1 \\ 1\end{array}\right)$ and $\frac{1}{\sqrt{2}}\left(\begin{array}{c}1 \\ -1\end{array}\right)$, the matrix representation of $A$ is
{\exyear{NET/JRF(JUNE-2019)}}
\begin{tasks}(4)
\task[\textbf{A.}] $\left(\begin{array}{ll}2 & 0 \\ 0 & 2\end{array}\right)$
\task[\textbf{B.}] $\left(\begin{array}{ll}3 & 0 \\ 0 & 1\end{array}\right)$
\task[\textbf{C.}] $\left(\begin{array}{ll}3 & 1 \\ 0 & 1\end{array}\right)$
\task[\textbf{D.}] $\left(\begin{array}{ll}3 & 0 \\ 1 & 1\end{array}\right)$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{The given vector $\frac{1}{\sqrt{2}}\left(\begin{array}{l}1 \\ 1\end{array}\right)$ and $\frac{1}{\sqrt{2}}\left(\begin{array}{c}1 \\ -1\end{array}\right)$ are eigen vectors of operator $A$,}
\intertext{Hence in this basis matrix $A$ is represented by diagonal matrix $D$ consisting of eigenvalues of matrix $A$ on the main diagonal. Therefore,}
D&=\left[\begin{array}{ll}3 & 0 \\ 0 & 1\end{array}\right]
\end{align*}
So the correct answer is \textbf{Option (B)}
\end{answer}
\item If the rank of an $n \times n$ matrix $A$ is $m$, where $m$ and $n$ are positive integers with $1 \leq m \leq n$, then the rank of the matrix $A^{2}$ is
{\exyear{NET/JRF(DEC-2019)}}
\begin{tasks}(4)
\task[\textbf{A.}] $m$
\task[\textbf{B.}] $m-1$
\task[\textbf{C.}] $2 \mathrm{~m}$
\task[\textbf{D.}] $m-2$
\end{tasks}
\begin{answer}
\begin{align*}
\text{ Let }\sigma_{1}&=\left[\begin{array}{ll}0 & 1 \\ 1 & 0\end{array}\right]_{2 \times 2 \atop n=2}=A \quad m=2\\
1& \leq 2 \leq 2 \quad 1 \leq m \leq n\\
A^{2}&=\left[\begin{array}{ll}1 & 0 \\ 0 & 1\end{array}\right]_{2 \times 2} m=2
\end{align*}
So the correct answer is \textbf{Option (A)}
\end{answer}
\item The eigenvalues of the $3 \times 3$ matrix $M=\left(\begin{array}{lll}a^{2} & a b & a c \\ a b & b^{2} & b c \\ a c & b c & c^{2}\end{array}\right)$ are
{\exyear{NET/JRF(JUNE-2020)}}
\begin{tasks}(2)
\task[\textbf{A.}] $a^{2}+b^{2}+c^{2}, 0,0$
\task[\textbf{B.}] $b^{2}+c^{2}, a^{2}, 0$
\task[\textbf{C.}] $a^{2}+b^{2}, c^{2}, 0$
\task[\textbf{D.}] $a^{2}+c^{2}, b^{2}, 0$
\end{tasks}
\begin{answer}
\begin{align*}
M&=\left(\begin{array}{lll}a^{2} & a b & a c \\ a b & b^{2} & b c \\ a c & b c & c^{2}\end{array}\right)
\intertext{To make it simple, Let $a=1, b=1, c=1$ so $\quad M=\left[\begin{array}{lll}1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1\end{array}\right]_{3 \times 3}$}
\Rightarrow \lambda&=3,0,0
\end{align*}
So the correct answer is \textbf{Option (A)}
\end{answer}
\end{enumerate}
\newpage
\begin{abox}
Problem set-2
\end{abox}
\begin{enumerate}[label=\color{ocre}\textbf{\arabic*.}]
\item The eigenvalues of the matrix $\left(\begin{array}{lll}2 & 3 & 0 \\ 3 & 2 & 0 \\ 0 & 0 & 1\end{array}\right)$ are
{\exyear{GATE 2010}}
\begin{tasks}(4)
\task[\textbf{A.}] $5,2,-2$
\task[\textbf{B.}] $-5,-1,-1$
\task[\textbf{C.}] $5,1,-1$
\task[\textbf{D.}] $-5,1,1$
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{The characteristic equation of the matrix $A,|A-\lambda I|=0$}
\Rightarrow|A-\lambda I|=\left|\begin{array}{ccc}2-\lambda & 3 & 0 \\ 3 & 2-\lambda & 0 \\ 0 & 0 & 1-\lambda\end{array}\right|&=0 \\\Rightarrow(1-\lambda)\left[(2-\lambda)^{2}-9\right]&=0 \\\Rightarrow \lambda=1,2-\lambda&=\pm 3\\
\Rightarrow \lambda&=5,1,-1
\end{align*}
So the correct answer is \textbf{Option (C)}
\end{answer}
\item Two matrices $A$ and $B$ are said to be similar if $B=P^{-1} A P$ for some invertible matrix $P$.Which of the following statements is NOT TRUE?
{\exyear{GATE 2011}}
\begin{tasks}(2)
\task[\textbf{A.}] Det $A=\operatorname{Det} B$
\task[\textbf{B.}] Trace of $A=$ Trace of $B$
\task[\textbf{C.}] $A$ and $B$ have the same eigenvectors
\task[\textbf{D.}] $A$ and $B$ have the same eigenvalues
\end{tasks}
\begin{answer}
\begin{align*}
\intertext{If $A$ and $B$ be square matrices of the same type and if $P$ be invertible matrix, then matrices $A$ and $B=P^{-1} A P$ have the same characteristic roots.}
\intertext{Then, $B-\lambda I=P^{-1} A P-P^{-1} \lambda I P=P^{-1}(A-\lambda I) P$ where $I$ is identity matrix.}
|B-\lambda I|&=\left|P^{-1}(A-\lambda I) P\right|=\left|P^{-1}\|A-\lambda I\| P\right|\\&=\left|A-\lambda I\left\|P^{-1}\right\| P\right|=\left|A-\lambda I \| P P^{-1}\right|\\&=|A-\lambda I|
\intertext{Thus, the matrices $A$ and $B\left(=P^{-1} A P\right)$ have the same characteristic equation and hence
same characteristic roots or eigen values. Since, the sum of the eigen values of a matrix and product of eigen values of a matrix is equal to the determinant of matrix, hence third alternative is incorrect.}
\end{align*}
So the correct answer is \textbf{Option (C)}
\end{answer}
\item A $3 \times 3$ matrix has elements such that its trace is 11 and its determinant is 36 . The eigenvalues of the matrix are all known to be positive integers. The largest eigenvalues of the matrix is
{\exyear{GATE 2011}}
\begin{tasks}(4)
\task[\textbf{A.}] 18
\task[\textbf{B.}] 12
\task[\textbf{C.}] 9
\task[\textbf{D.}] 6
\end{tasks}
\begin{answer}
We know that for any matrix\\
1. The product of eigenvalues is equals to the determinant of that matrix.\\
2. $\lambda_{1}+\lambda_{2}+\lambda_{3}+\ldots \ldots=$ Trace of matrix\\
$\lambda_{1}+\lambda_{2}+\lambda_{3}=11$ and $\lambda_{1} \lambda_{2} \lambda_{3}=36$. Hence, the largest eigen value of the matrix is $6 .$\\\\
So the correct answer is \textbf{Option (D)}
\end{answer}
\item The number of independent components of the symmetric tensor $A_{i j}$ with indices $i, j=1,2,3$ is
{\exyear{GATE 2012}}
\begin{tasks}(4)
\task[\textbf{A.}] 1
\task[\textbf{B.}] 3
\task[\textbf{C.}] 6
\task[\textbf{D.}] 9
\end{tasks}
\begin{answer}
\begin{align*}
\text{ For symmetric tensor, }A_{i j}&=\left[\begin{array}{ccc}A_{11} & A_{12} & A_{13} \\ A_{21} & A_{22} & A_{23} \\ A_{31} & A_{32} & A_{33}\end{array}\right]\\
\because A_{12}=A_{21}, \quad A_{23}&=A_{32}, A_{13}=A_{31},\text{ hence there are six independent components.}
\end{align*}
So the correct answer is \textbf{Option (C)}
\end{answer}
\item The eigenvalues of the matrix $\left(\begin{array}{lll}0 & 1 & 0 \\ 1 & 0 & 1 \\ 0 & 1 & 0\end{array}\right)$ are
{\exyear{GATE 2012}}
\begin{tasks}(4)
\task[\textbf{A.}] $0,1,1$
\task[\textbf{B.}] $0,-\sqrt{2}, \sqrt{2}$
\task[\textbf{C.}] $\frac{1}{\sqrt{2}}, \frac{1}{\sqrt{2}}, 0$
\task[\textbf{D.}] $\sqrt{2}, \sqrt{2}, 0$
\end{tasks}
\begin{answer}
\begin{align*}
|A-\lambda I|&=0 \Rightarrow\left|\begin{array}{ccc}-\lambda & 1 & 0 \\ 1 & -\lambda & 1 \\ 0 & 1 & -\lambda\end{array}\right|\\&=0 \Rightarrow-\lambda\left(\lambda^{2}-1\right)+\lambda=0 \Rightarrow \lambda\\&=0,+\sqrt{2},-\sqrt{2}
\end{align*}
So the correct answer is \textbf{Option (B)}
\end{answer}
\item The degenerate eigenvalue of the matrix $\left[\begin{array}{ccc}4 & -1 & -1 \\ -1 & 4 & -1 \\ -1 & -1 & 4\end{array}\right]$ is (your answer should be an
integer)---
{\exyear{GATE 2013}}
\begin{answer}
\begin{align*}
\left[\begin{array}{ccc}4-\lambda & -1 & -1 \\ -1 & 4-\lambda & -1 \\ -1 & -1 & 4-\lambda\end{array}\right]&=0 \Rightarrow(2-\lambda)\left[\begin{array}{ccc}1 & -1 & -1 \\ 0 & 5-\lambda & 0 \\ 0 & 0 & 5-\lambda\end{array}\right]\\&=(2-\lambda)(5-\lambda)^{2}=0 \Rightarrow \lambda\\&=2,5,5
\end{align*}
\end{answer}
\item The matrix
$$
A=\frac{1}{\sqrt{3}}\left[\begin{array}{cc}
1 & 1+i \\
1-i & -1
\end{array}\right] \text { is }
$$
{\exyear{GATE 2014}}
\begin{tasks}(4)
\task[\textbf{A.}] Orthogonal
\task[\textbf{B.}] Symmetric
\task[\textbf{C.}] Anti-symmetric
\task[\textbf{D.}] Unitary
\end{tasks}
\begin{answer}
\begin{align*}
\text{ Unitary }A^{\dagger} A=I
\end{align*}
So the correct answer is \textbf{Option (D)}
\end{answer}
\item Let $X$ be a column vector of dimension $n>1$ with at least one non-zero entry. The number of non-zero eigenvalues of the matrix $M=X X^{T}$ is
{\exyear{GATE 2017}}
\begin{tasks}(4)
\task[\textbf{A.}] 0
\task[\textbf{B.}] $n$
\task[\textbf{C.}] 1
\task[\textbf{D.}] $n-1$
\end{tasks}
\begin{answer}
\begin{align*}
\text{ Let }X&=\left[\begin{array}{l}0 \\ 0 \\ a \\ 0 \\ 0 \\ 0\end{array}\right],\text{ then} X^{T}=\left[\begin{array}{llll}0 & 0 & a \ldots & 0\end{array}\right]
\intertext{Here, $X$ is an $n \times 1$ column vector with the entry in the $i$ th row equal to a. $X^{T}$ is a row vector having entry in the $i$ th column equal to a. Then, $X X^{T}$ is an $n \times 1$ matrix having the entry in the $i$ th row and $i$ th column equal to $a^{2}$.}
\end{align*}
Hence
\begin{figure}[H]
\centering
\includegraphics[height=4cm,width=6cm]{diagram-20210823(3)-crop}
\end{figure}
Since this matrix is diagonal, its eigenvalues are $a^{2}, 0,0 \ldots \ldots 0 .$ Hence, the number of non zero eigenvalues of the matrix $X X^{T}$ is 1 .\\\\
So the correct answer is \textbf{Option (C)}
\end{answer}
\item The eigenvalues of a Hermitian matrix are all
{\exyear{GATE 2018}}
\begin{tasks}(4)
\task[\textbf{A.}] Real
\task[\textbf{B.}] Imaginary
\task[\textbf{C.}] Of modulus one
\task[\textbf{D.}] Real and positive
\end{tasks}
\begin{answer}
Eigenvalue of Hermitian matrix must be real.\\\\
So the correct answer is \textbf{Option (A)}
\end{answer}
\item During a rotation, vectors along the axis of rotation remain unchanged. For the rotation matrix $\left(\begin{array}{ccc}0 & 1 & 0 \\ 0 & 0 & -1 \\ -1 & 0 & 0\end{array}\right)$, the vector along the axis of rotation is
{\exyear{GATE 2019}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{1}{3}(2 \hat{i}-\hat{j}+2 \hat{k})$
\task[\textbf{B.}] $\frac{1}{\sqrt{3}}(\hat{i}+\hat{j}-\hat{k})$
\task[\textbf{C.}] $\frac{1}{\sqrt{3}}(\hat{i}-\hat{j}-\hat{k})$
\task[\textbf{D.}] $\frac{1}{3}(2 \hat{i}+2 \hat{j}-\hat{k})$
\end{tasks}
\begin{answer}
So the correct answer is \textbf{Option (B)}
\end{answer}
\end{enumerate}
\colorlet{ocre1}{ocre!70!}
\colorlet{ocrel}{ocre!30!}
\setlength\arrayrulewidth{1pt}
\begin{table}[H]
\centering
\arrayrulecolor{ocre}
\begin{tabular}{|p{1.5cm}|p{1.5cm}||p{1.5cm}|p{1.5cm}|}
\hline
\multicolumn{4}{|c|}{\textbf{Answer key}}\\\hline\hline
\rowcolor{ocrel}Q.No.&Answer&Q.No.&Answer\\\hline
1&\textbf{C} &2&\textbf{C}\\\hline
3&\textbf{D} &4&\textbf{C} \\\hline
5&\textbf{B} &6&\textbf{-} \\\hline
7&\textbf{D}&8&\textbf{C}\\\hline
9&\textbf{A}&10&\textbf{B}\\\hline
\end{tabular}
\end{table}
|
{"hexsha": "bd318aade5e56de9b7311765187ab3ec90f4e98c", "size": 32623, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "CSIR- Mathematical Physics/chapter/Matrix Problem set Solutions.tex", "max_stars_repo_name": "archives-futuring/CSIR-Physics-Study-Material", "max_stars_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CSIR- Mathematical Physics/chapter/Matrix Problem set Solutions.tex", "max_issues_repo_name": "archives-futuring/CSIR-Physics-Study-Material", "max_issues_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CSIR- Mathematical Physics/chapter/Matrix Problem set Solutions.tex", "max_forks_repo_name": "archives-futuring/CSIR-Physics-Study-Material", "max_forks_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.1064189189, "max_line_length": 449, "alphanum_fraction": 0.604512154, "num_tokens": 13980}
|
"""
Generate synthetic resistivity models randomly.
References
----------
https://stackoverflow.com/questions/44865023/circular-masking-an-image-in-python-using-numpy-arrays
https://docs.scipy.org/doc/numpy-1.16.0/reference/routines.random.html
https://numpy.org/doc/1.18/reference/random/index.html
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html#scipy.stats.truncnorm
https://www.sicara.ai/blog/2019-01-28-how-computer-generate-random-numbers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import generator_stop
from __future__ import print_function
import numpy as np
from SimPEG.Utils.ModelBuilder import getIndicesBlock
from SimPEG.Utils.ModelBuilder import getIndicesSphere
from scipy.signal import convolve2d
from scipy.stats import norm
from scipy.stats import truncnorm
from scipy.stats import uniform
from erinn.utils.io_utils import read_config_file
# TODO: Maybe use Generator instead of RandomState
# TODO: Check random property. Make sure every execution is different
def rand_num_shape(num_shape):
"""Randomly samples an integer from the space defined in the num_shape dictionary.
Parameters
----------
num_shape : dict
num_`shape`, the `shape` can be a circle, a rectangle or other shape.
Returns
-------
num : int
A randomly sampled integer.
"""
if num_shape['type'] == 'list':
return int(np.random.RandomState().choice(num_shape['value'], 1))
elif num_shape['type'] == 'range':
num = np.arange(*num_shape['value'])
return int(np.random.RandomState().choice(num, 1))
else:
raise ValueError(f"{num_shape['type']} is an invalid type.")
def rand_rect(x_bound, y_bound, w_range, h_range, mesh, num_rect):
stack_rect = []
for _ in range(num_rect):
width = np.random.RandomState().uniform(w_range[0], w_range[1])
height = np.random.RandomState().uniform(h_range[0], h_range[1])
x_min = np.random.RandomState().uniform(x_bound[0], x_bound[1] - width)
y_min = np.random.RandomState().uniform(y_bound[0], y_bound[1] - height)
x_max = x_min + width
y_max = y_min + height
block_idx = getIndicesBlock([x_min, y_min],
[x_max, y_max],
mesh.gridCC)[0] # return tuple. 1st is 1D integer array
stack_rect.append((block_idx, 'rect'))
return stack_rect
def rand_circle(center_x_bound, center_y_bound, radius_bound, mesh, num_circle):
stack_circle = []
for _ in range(num_circle):
center = [np.random.RandomState().uniform(center_x_bound[0], center_x_bound[1]),
np.random.RandomState().uniform(center_y_bound[0], center_y_bound[1])]
radius = np.random.RandomState().uniform(radius_bound[0], radius_bound[1])
circle_idx = getIndicesSphere(center, radius, mesh.gridCC) # return 1D bool array
stack_circle.append((circle_idx, 'circle'))
return stack_circle
def smooth2d(arr: np.ndarray, kernel_shape: tuple) -> np.ndarray:
"""Smooth 2d array using moving average.
Parameters
----------
arr : array_like
kernel_shape : sequence of ints
Returns
-------
arr : numpy.ndarray
"""
arr = np.asarray(arr)
if not arr.ndim == 2:
raise ValueError('The array to be smoothed must be a 2D array.')
if len(kernel_shape) != 2:
raise ValueError('The kernel_shape must be an integer sequence of 2 elements.')
arr = convolve2d(arr, np.ones(kernel_shape), mode='same')
normalize_matrix = convolve2d(np.ones(arr.shape),
np.ones(kernel_shape), mode='same')
return arr / normalize_matrix
def get_pd(**kwargs):
"""
Get probability distribution.
Returns
-------
pd : scipy.stats.rv_frozen
Desired probability distribution.
Other Parameters
----------------
use_hidden : bool
pdf : str
scale : str
a : float
b : float
hidden_for_a : list or tuple
hidden_for_b : list or tuple
hidden_pdf : str
"""
allowed_kwargs = {'use_hidden', 'pdf', 'scale', 'a', 'b', 'hidden_for_a', 'hidden_for_b', 'hidden_pdf'}
for key in allowed_kwargs:
if key not in kwargs:
raise ValueError('You did not input enough or correct keyword argument.')
use_hidden = kwargs['use_hidden']
pdf = kwargs['pdf']
scale = kwargs['scale']
a = kwargs['a']
b = kwargs['b']
hidden_for_a = kwargs['hidden_for_a']
hidden_for_b = kwargs['hidden_for_b']
hidden_pdf = kwargs['hidden_pdf']
if use_hidden:
if pdf == 'uniform':
pd = RandUniform(hidden_for_a, hidden_for_b, scale, hidden_pdf)
elif pdf == 'normal':
if scale == 'linear':
pd = RandTruncnorm(hidden_for_a, hidden_for_b,
0, np.inf, scale, hidden_pdf)
elif scale == 'log10':
pd = RandTruncnorm(hidden_for_a, hidden_for_b,
-np.inf, np.inf, scale, hidden_pdf)
else:
raise ValueError('You did not input enough or correct keyword argument.')
else:
raise ValueError('You did not input enough or correct keyword argument.')
else:
if pdf == 'uniform':
pd = uniform(a, b - a)
elif pdf == 'normal':
if scale == 'linear':
_a = (0 - a) / b
_b = (np.inf - a) / b
pd = truncnorm(_a, _b, loc=a, scale=b)
elif scale == 'log10':
pd = norm(loc=a, scale=b)
else:
raise ValueError('You did not input enough or correct keyword argument.')
else:
raise ValueError('You did not input enough or correct keyword argument.')
return pd
def get_rvs(**kwargs):
"""Get random Variates.
Returns
-------
rand_vars : numpy.ndarray
Other Parameters
----------------
use_hidden : bool
scale : str
pd : str
size : list or tuple
"""
allowed_kwargs = ['use_hidden', 'scale', 'pd', 'size']
for key in allowed_kwargs:
if key not in kwargs:
raise ValueError('You did not input enough or correct keyword argument.')
use_hidden = kwargs['use_hidden']
scale = kwargs['scale']
pd = kwargs['pd']
size = kwargs['size']
if use_hidden:
rand_vars = pd.rvs(size=size)
pd.new_pd()
else:
rand_vars = np.ones(size) * pd.rvs()
if scale == 'log10':
rand_vars = np.power(10, rand_vars)
return rand_vars
class RandPd(object):
def __init__(self, a_range, b_range, scale, hidden_pdf):
self.a_range = a_range # lower bound or mu(mean)
self.b_range = b_range # upper bound or std(standard deviation)
self.scale = scale # linear or log10
self.hidden_pdf = hidden_pdf # pdf for hidden variable
if self.scale == 'linear':
self.clip_a_for_a = (0 - self.a_range[0]) / self.a_range[1]
self.clip_b_for_a = (np.inf - self.a_range[0]) / self.a_range[1]
self.clip_a_for_b = (0 - self.b_range[0]) / self.b_range[1]
self.clip_b_for_b = (np.inf - self.b_range[0]) / self.b_range[1]
elif self.scale == 'log10':
self.clip_a_for_a = (-np.inf - self.a_range[0]) / self.a_range[1]
self.clip_b_for_a = (np.inf - self.a_range[0]) / self.a_range[1]
self.clip_a_for_b = (-np.inf - self.b_range[0]) / self.b_range[1]
self.clip_b_for_b = (np.inf - self.b_range[0]) / self.b_range[1]
class RandUniform(RandPd):
def __init__(self, a_range, b_range, scale, hidden_pdf):
super(RandUniform, self).__init__(a_range, b_range, scale, hidden_pdf)
self._new_para()
self.pd = uniform(loc=self.loc, scale=self.scale)
self.pd.random_state.seed() # re-seed
def new_pd(self):
self._new_para()
self.pd = uniform(loc=self.loc, scale=self.scale)
self.pd.random_state.seed() # re-seed
def rvs(self, *args, **kwargs):
return self.pd.rvs(*args, **kwargs)
def _new_para(self):
if self.hidden_pdf == 'uniform':
self.loc, self.scale = sorted([np.random.uniform(self.a_range[0], self.a_range[1]),
np.random.uniform(self.b_range[0], self.b_range[1])])
self.scale -= self.loc
elif self.hidden_pdf == 'normal':
self.loc, self.scale = sorted([truncnorm(self.clip_a_for_a, self.clip_b_for_a,
loc=self.a_range[0],
scale=self.a_range[1]).rvs(),
truncnorm(self.clip_a_for_b, self.clip_b_for_b,
loc=self.b_range[0],
scale=self.b_range[1]).rvs()])
self.scale -= self.loc
def __repr__(self):
return '\n'.join([f'loc: {self.loc}',
f'scale: {self.scale}'])
class RandTruncnorm(RandPd):
def __init__(self, mu_range, std_range, clip_a, clip_b, scale, hidden_pdf):
super(RandTruncnorm, self).__init__(mu_range, std_range, scale, hidden_pdf)
self._overwrite_clip()
self.clip_a = clip_a
self.clip_b = clip_b
self._new_para()
self.pd = truncnorm(self._a, self._b, loc=self._mu, scale=self._std)
self.pd.random_state.seed() # re-seed
def new_pd(self):
self._new_para()
self.pd = truncnorm(self._a, self._b, loc=self._mu, scale=self._std)
self.pd.random_state.seed() # re-seed
def rvs(self, *args, **kwargs):
return self.pd.rvs(*args, **kwargs)
def _new_para(self):
if self.hidden_pdf == 'uniform':
self._mu, self._std = np.random.uniform(self.a_range[0], self.a_range[1]), \
np.random.uniform(self.b_range[0], self.b_range[1])
self._std = self._std if self._std > 0 else 0
elif self.hidden_pdf == 'normal':
self._mu, self._std = truncnorm(self.clip_a_for_a, self.clip_b_for_a,
loc=self.a_range[0],
scale=self.a_range[1]).rvs(), \
truncnorm(self.clip_a_for_b, self.clip_b_for_b,
loc=self.b_range[0],
scale=self.b_range[1]).rvs()
self._std = self._std if self._std > 0 else 0
self._a = (self.clip_a - self._mu) / self._std
self._b = (self.clip_b - self._mu) / self._std
def _overwrite_clip(self):
if self.scale == 'log10':
self.clip_a_for_a = (-np.inf - self.a_range[0]) / self.a_range[1]
self.clip_b_for_a = (np.inf - self.a_range[0]) / self.a_range[1]
self.clip_a_for_b = (0 - self.b_range[0]) / self.b_range[1]
self.clip_b_for_b = (np.inf - self.b_range[0]) / self.b_range[1]
def __repr__(self):
return '\n'.join([f'a: {self._a}',
f'b: {self._b}',
f'mu: {self._mu}',
f'std: {self._std}'])
def get_random_model(config_file, mesh, num_examples=None):
config = read_config_file(config_file)
x_bound = [np.nanmin(mesh.vectorNx), np.nanmax(mesh.vectorNx)]
z_bound = [np.nanmin(mesh.vectorNy), np.nanmax(mesh.vectorNy)]
kernel_shape = (config['z_kernel_size'], config['x_kernel_size'])
if num_examples is None:
num_examples = config['num_examples']
# create the instance of resistivity "value" probability distribution
# background
pd_background = get_pd(use_hidden=config['use_hidden_background'],
pdf=config['pdf_background'],
scale=config['scale_background'],
a=config['a_background'],
b=config['b_background'],
hidden_for_a=(config['hidden_a_for_a_background'], config['hidden_b_for_a_background']),
hidden_for_b=(config['hidden_a_for_b_background'], config['hidden_b_for_b_background']),
hidden_pdf=config['hidden_pdf_background'])
# rectangle(block)
pd_rect = get_pd(use_hidden=config['use_hidden_rect'],
pdf=config['pdf_rect'],
scale=config['scale_rect'],
a=config['a_rect'],
b=config['b_rect'],
hidden_for_a=(config['hidden_a_for_a_rect'], config['hidden_b_for_a_rect']),
hidden_for_b=(config['hidden_a_for_b_rect'], config['hidden_b_for_b_rect']),
hidden_pdf=config['hidden_pdf_rect'])
# circle
pd_circle = get_pd(use_hidden=config['use_hidden_circle'],
pdf=config['pdf_circle'],
scale=config['scale_circle'],
a=config['a_circle'],
b=config['b_circle'],
hidden_for_a=(config['hidden_a_for_a_circle'], config['hidden_b_for_a_circle']),
hidden_for_b=(config['hidden_a_for_b_circle'], config['hidden_b_for_b_circle']),
hidden_pdf=config['hidden_pdf_circle'])
for _ in range(num_examples):
size = (mesh.nC,)
resistivity = get_rvs(use_hidden=config['use_hidden_background'],
scale=config['scale_background'],
pd=pd_background,
size=size)
# generate parameter for rectangle and circle
num_rect = rand_num_shape(config['num_rect'])
num_circle = rand_num_shape(config['num_circle'])
stack = rand_rect(x_bound, z_bound,
config['h_range'],
config['w_range'],
mesh, num_rect)
stack.extend(rand_circle(x_bound, z_bound,
config['radius_bound'],
mesh, num_circle))
np.random.shuffle(stack)
for _ in range(num_rect + num_circle):
elem = stack.pop()
size = resistivity[elem[0]].shape
if elem[1] == 'rect':
resistivity[elem[0]] = get_rvs(use_hidden=config['use_hidden_rect'],
scale=config['scale_rect'],
pd=pd_rect,
size=size)
elif elem[1] == 'circle':
resistivity[elem[0]] = get_rvs(use_hidden=config['use_hidden_circle'],
scale=config['scale_circle'],
pd=pd_circle,
size=size)
else:
raise NotImplementedError()
resistivity = smooth2d(resistivity.reshape(mesh.nCy, mesh.nCx),
kernel_shape) # shape is (nz, nx)
# The resistivity starts at the bottom left of the SimPEG 2d mesh.
yield resistivity.flatten()
|
{"hexsha": "369c43541b0752a58b3316ee7ac329755b902421", "size": 15845, "ext": "py", "lang": "Python", "max_stars_repo_path": "erinn/simpeg_extended/random_model.py", "max_stars_repo_name": "ravisha229/elec", "max_stars_repo_head_hexsha": "8297db51f63d5ef961672ae7ccb01c5ef18c70a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "erinn/simpeg_extended/random_model.py", "max_issues_repo_name": "ravisha229/elec", "max_issues_repo_head_hexsha": "8297db51f63d5ef961672ae7ccb01c5ef18c70a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "erinn/simpeg_extended/random_model.py", "max_forks_repo_name": "ravisha229/elec", "max_forks_repo_head_hexsha": "8297db51f63d5ef961672ae7ccb01c5ef18c70a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6282051282, "max_line_length": 116, "alphanum_fraction": 0.5546229094, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3549}
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
#
import sys, os
sys.path.append('./utils/')
import tools
import datatools as dtools
from time import time
os.environ["CUDA_VISIBLE_DEVICES"]="0"
#
import tensorflow as tf
import tensorflow_hub as hub
#############################
seed_in = 3
from numpy.random import seed
seed(seed_in)
from tensorflow import set_random_seed
set_random_seed(seed_in)
bss, ncc = [100, 200], [32, 64]
batch_size = [100, 20]
nsteps = 5
cube_sizes = np.array(ncc)
nsizes = len(cube_sizes)
bsnclist = list(zip(bss, ncc))
pad = int(0)
masktype = 'constant'
suff = 'pad%d-cic-pcic-cmask-4normmix'%pad
savepath = '../models/n10/%s/module/'%suff
ftname = ['cic']
tgname = ['pcic']
files = os.listdir(savepath)
paths = [os.path.join(savepath, basename) for basename in files]
modpath = max(paths, key=os.path.getctime)
print(modpath)
def generate_data(seed, bs, nc):
j = np.where(cube_sizes == nc)[0][0]
path = '../data/make_data_code/L%d-N%d-B1-T5/S%d/'%(bs, nc, seed)
#path = '../data/L%d-N%d-B1-T5/S%d/'%(bs, nc, seed)
mesh = {}
# mesh['s'] = np.load(path + 'fpm-s.npy')
mesh['cic'] = np.load(path + 'fpm-d.npy')
# mesh['logcic'] = np.log(1 + mesh['cic'])
# mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
# mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
# mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
# mesh['GD'] = mesh['R1'] - mesh['R2']
#
ftlist = [mesh[i].copy() for i in ftname]
ftlistpad = [np.pad(i, pad, 'wrap') for i in ftlist]
features = [np.stack(ftlistpad, axis=-1)]
return features
#####
#
tf.reset_default_graph()
module = hub.Module(modpath+'/likelihood/')
xx = tf.placeholder(tf.float32, shape=[None, None, None, None, len(ftname)], name='input')
yy = tf.placeholder(tf.float32, shape=[None, None, None, None, len(tgname)], name='labels')
samples = module(dict(features=xx, labels=yy), as_dict=True)['sample']
loglik = module(dict(features=xx, labels=yy), as_dict=True)['loglikelihood']
with tf.Session() as sess:
sess.run(tf.initializers.global_variables())
for j in range(nsizes):
bs, nc = bsnclist[j]
batch = int(batch_size[j])
for ss in range(10, 10000, batch*10):
print(ss)
seeds = np.arange(ss, ss+batch*10, 10)
try:
xxm = []
sskipseed = []
for iseed, seed in enumerate(seeds):
path = '../data/make_data_code/L%d-N%d-B1-T%d/S%d/'%(bs, nc, nsteps, seed)
try: xxm.append(generate_data(seed, bs, nc))
except Exception as e:
print('skip seed :', iseed)
print(e)
sskipseed.append(iseed)
xxm = np.concatenate(xxm, axis=0)
zeros = np.zeros((list(xxm.shape[:-1]) + [len(tgname)]))
print(xxm.shape)
preds = np.squeeze(sess.run(samples, feed_dict={xx:xxm, yy:zeros}))
print(preds.shape)
for iseed, seed in enumerate(seeds):
if iseed in sskipseed:
print('skip seed :', iseed)
continue
path = '../data/make_data_code/L%d-N%d-B1-T%d/S%d/'%(bs, nc, nsteps, seed)
np.save(path + '/%s-%s'%(suff, tgname[0]), np.squeeze(preds[iseed]))
except Exception as e: print(e)
|
{"hexsha": "69a76af11f0d745da6a152a2a97e769b34410ee2", "size": 3536, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/samplenet.py", "max_stars_repo_name": "modichirag/cosmic-rim", "max_stars_repo_head_hexsha": "3b621e676fe9aaa6cdc84adc92bb9b9d8bbe5f25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/samplenet.py", "max_issues_repo_name": "modichirag/cosmic-rim", "max_issues_repo_head_hexsha": "3b621e676fe9aaa6cdc84adc92bb9b9d8bbe5f25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/samplenet.py", "max_forks_repo_name": "modichirag/cosmic-rim", "max_forks_repo_head_hexsha": "3b621e676fe9aaa6cdc84adc92bb9b9d8bbe5f25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.842519685, "max_line_length": 94, "alphanum_fraction": 0.5675904977, "include": true, "reason": "import numpy,from numpy", "num_tokens": 990}
|
@testset "Laplace" begin
test_interface(LaplaceLikelihood(3.0), Laplace)
@test LaplaceLikelihood().β == 1
test_auglik(LaplaceLikelihood(1.0); rng=MersenneTwister(42))
# Test the custom kl divergence
λ = rand()
μ = rand()
@test kldivergence(InverseGaussian(μ, 2λ), InverseGamma(1//2, λ)) ≈
(log(2λ) / 2 - log(2π) / 2 - log(λ) / 2 + loggamma(1//2) + λ / μ)
end
|
{"hexsha": "67af19eb45c2521548ab9ef24e843bbc2c4ae531", "size": 395, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/likelihoods/laplace.jl", "max_stars_repo_name": "simsurace/AugmentedGPLikelihoods.jl", "max_stars_repo_head_hexsha": "79cde8aa87f4c3791b1d3b02a5ae048928bc6fd1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-12-14T17:10:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:53:15.000Z", "max_issues_repo_path": "test/likelihoods/laplace.jl", "max_issues_repo_name": "simsurace/AugmentedGPLikelihoods.jl", "max_issues_repo_head_hexsha": "79cde8aa87f4c3791b1d3b02a5ae048928bc6fd1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 59, "max_issues_repo_issues_event_min_datetime": "2021-12-03T14:09:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:20:03.000Z", "max_forks_repo_path": "test/likelihoods/laplace.jl", "max_forks_repo_name": "simsurace/AugmentedGPLikelihoods.jl", "max_forks_repo_head_hexsha": "79cde8aa87f4c3791b1d3b02a5ae048928bc6fd1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T19:02:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T19:02:30.000Z", "avg_line_length": 35.9090909091, "max_line_length": 73, "alphanum_fraction": 0.6278481013, "num_tokens": 152}
|
import numpy as np
import io
from gensim.models import KeyedVectors
from gensim.test.utils import datapath, get_tmpfile
from gensim.scripts.glove2word2vec import glove2word2vec
class WordEmbeddingsModel:
def __init__(self, embeddings_file_name, embeddings_dimensions):
self.embeddings_file = embeddings_file_name
self.num_dims = embeddings_dimensions
glove_file = self.embeddings_file
word2vec_glove_file = get_tmpfile("temp_embeddings_file.txt")
glove2word2vec(glove_file, word2vec_glove_file)
self.embeddings_model = KeyedVectors.load_word2vec_format(word2vec_glove_file)
self.vocab_size = len(self.embeddings_model.vocab.keys())
print("Done loading words...")
def __load_vectors(self):
f = io.open(self.embeddings_file, 'r', encoding='utf-8', newline='\n', errors='ignore')
e_model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
e_model[word] = embedding
print("Done.", len(e_model), " words loaded!")
return e_model
# Generate word embeddings for each word in the sentence:
def __getWordEmbeddings(self, tokens_list):
word_embeddings = []
for word in tokens_list:
try:
word_embeddings.append(list(self.embeddings_model[word]))
except:
error_word = word
# print("Key error :", error_word)
if len(word_embeddings) == 0:
return None
else:
return np.array(word_embeddings)
# Compute the sentence embeddings using the individual word embeddings:
def __getSentenceEmbeddings(self, word_embeddings):
avg_sent_embeddings = np.mean(word_embeddings, axis=0)
return avg_sent_embeddings
def containsWord(self, query_word):
try:
cur_vector = self.embeddings_model[query_word]
return True
except:
return False
def getVocabWords(self):
return list(self.embeddings_model.vocab.keys())
def getMostSimilarWords(self, query_word, top_k, probs=True):
if top_k == -1:
word_neighbors_probs = self.embeddings_model.most_similar(query_word, topn=self.vocab_size)
else:
word_neighbors_probs = self.embeddings_model.most_similar(query_word, topn=top_k)
if probs == True:
return word_neighbors_probs
else:
word_neighbors_list = []
for cur_neighbor in word_neighbors_probs:
word_neighbors_list.append(cur_neighbor[0])
return word_neighbors_list
def getEmbeddingsForWord(self, query_word):
if self.containsWord(query_word):
cur_word_embedding = self.__getWordEmbeddings([query_word])[0]
return cur_word_embedding
else:
return None
def getEmbeddingsForSentence(self, query_sentence):
cur_sentence_tokens = query_sentence.split()
word_embeddings_list = self.__getWordEmbeddings(cur_sentence_tokens)
if word_embeddings_list is None:
return None
else:
sentence_embeddings = self.__getSentenceEmbeddings(word_embeddings_list)
return sentence_embeddings
def getEmbeddingsForTokenList(self, query_token_list):
word_embeddings_list = self.__getWordEmbeddings(query_token_list)
if word_embeddings_list is None:
return None
else:
sentence_embeddings = self.__getSentenceEmbeddings(word_embeddings_list)
return sentence_embeddings
if __name__ == "__main__":
embeddings_file = "../../ontology_reconstruction/embeddings/sampleFasttext.vec"
embeddings_dims = 100
embeddings_model = WordEmbeddingsModel(embeddings_file, embeddings_dims)
sample_sentence = "This men s dress is blue in color"
temp = embeddings_model.getEmbeddingsForSentence(sample_sentence)
# print(temp)
|
{"hexsha": "49ba5efbb74481d58a91e6579c8babdaf01aa790", "size": 4045, "ext": "py", "lang": "Python", "max_stars_repo_path": "8_graph_embeddings/modelUtils/LanguageModels.py", "max_stars_repo_name": "ravikiran0606/Game-and-Requirements-KG", "max_stars_repo_head_hexsha": "ede2c176c83e33b401b879461a9312660049cf10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "8_graph_embeddings/modelUtils/LanguageModels.py", "max_issues_repo_name": "ravikiran0606/Game-and-Requirements-KG", "max_issues_repo_head_hexsha": "ede2c176c83e33b401b879461a9312660049cf10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-28T19:44:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-28T19:44:05.000Z", "max_forks_repo_path": "8_graph_embeddings/modelUtils/LanguageModels.py", "max_forks_repo_name": "ravikiran0606/game-and-requirements-kg", "max_forks_repo_head_hexsha": "ede2c176c83e33b401b879461a9312660049cf10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-02T19:31:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-23T23:49:19.000Z", "avg_line_length": 39.2718446602, "max_line_length": 103, "alphanum_fraction": 0.6751545117, "include": true, "reason": "import numpy", "num_tokens": 842}
|
GUI=1;
|
{"author": "european-central-bank", "repo": "BEAR-toolbox", "sha": "f33aae80c40f7a2e78a54de99b2ce3663f59aa75", "save_path": "github-repos/MATLAB/european-central-bank-BEAR-toolbox", "path": "github-repos/MATLAB/european-central-bank-BEAR-toolbox/BEAR-toolbox-f33aae80c40f7a2e78a54de99b2ce3663f59aa75/tbx/bear/unreachableCode_ToRemove/gui.m"}
|
@doc raw"""
`PODE`: Partitioned Ordinary Differential Equation
Defines a partitioned initial value problem
```math
\begin{align*}
\dot{q} (t) &= v(t, q(t), p(t)) , &
q(t_{0}) &= q_{0} , \\
\dot{p} (t) &= f(t, q(t), p(t)) , &
p(t_{0}) &= p_{0} ,
\end{align*}
```
with vector fields ``v`` and ``f``, initial conditions ``(q_{0}, p_{0})`` and the solution
``(q,p)`` taking values in ``\mathbb{R}^{d} \times \mathbb{R}^{d}``.
### Fields
* `d`: dimension of dynamical variables ``q`` and ``p`` as well as the vector fields ``v`` and ``f``
* `v`: function computing the vector field ``v``
* `f`: function computing the vector field ``f``
* `t₀`: initial time
* `q₀`: initial condition for `q`
* `p₀`: initial condition for `p`
The functions `v` and `f` must have the interface
```julia
function v(t, q, p, v)
v[1] = ...
v[2] = ...
...
end
```
and
```julia
function f(t, q, p, f)
f[1] = ...
f[2] = ...
...
end
```
where `t` is the current time, `q` and `p` are the current solution vectors
and `v` and `f` are the vectors which hold the result of evaluating the
vector fields ``v`` and ``f`` on `t`, `q` and `p`.
"""
struct PODE{dType <: Number, tType <: Number,
vType <: Function, fType <: Function,
pType <: Union{Tuple,Nothing}, N} <: Equation{dType, tType}
d::Int
n::Int
v::vType
f::fType
t₀::tType
q₀::Array{dType, N}
p₀::Array{dType, N}
parameters::pType
periodicity::Vector{dType}
function PODE(DT::DataType, N::Int, d::Int, n::Int, v::vType, f::fType,
t₀::tType, q₀::DenseArray{dType}, p₀::DenseArray{dType};
parameters=nothing, periodicity=zeros(DT,d)) where {
dType <: Number, tType <: Number, vType <: Function, fType <: Function}
@assert d == size(q₀,1) == size(p₀,1)
@assert n == size(q₀,2) == size(p₀,2)
@assert ndims(q₀) == ndims(p₀) == N ∈ (1,2)
new{DT, tType, vType, fType, typeof(parameters), N}(d, n, v, f, t₀,
convert(Array{DT}, q₀), convert(Array{DT}, p₀),
parameters, periodicity)
end
end
function PODE(v, f, t₀, q₀::DenseArray{DT}, p₀::DenseArray{DT}; kwargs...) where {DT}
PODE(DT, ndims(q₀), size(q₀,1), size(q₀,2), v, f, t₀, q₀, p₀; kwargs...)
end
function PODE(v, f, q₀, p₀; kwargs...)
PODE(v, f, zero(eltype(q₀)), q₀, p₀; kwargs...)
end
Base.hash(ode::PODE, h::UInt) = hash(ode.d, hash(ode.n, hash(ode.v, hash(ode.f,
hash(ode.t₀, hash(ode.q₀, hash(ode.p₀, hash(ode.periodicity, hash(ode.parameters, h)))))))))
Base.:(==)(ode1::PODE{DT1,TT1,VT1,FT1}, ode2::PODE{DT2,TT2,VT2,FT2}) where {DT1, DT2, TT1, TT2, VT1, VT2, FT1, FT2} = (
ode1.d == ode2.d
&& ode1.n == ode2.n
&& ode1.v == ode2.v
&& ode1.f == ode2.f
&& ode1.t₀ == ode2.t₀
&& ode1.q₀ == ode2.q₀
&& ode1.p₀ == ode2.p₀
&& ode1.parameters == ode2.parameters
&& ode1.periodicity == ode2.periodicity)
function Base.similar(ode::PODE, q₀, p₀; kwargs...)
similar(ode, ode.t₀, q₀, p₀; kwargs...)
end
function Base.similar(ode::PODE, t₀::TT, q₀::DenseArray{DT}, p₀::DenseArray{DT};
parameters=ode.parameters, periodicity=ode.periodicity) where {DT <: Number, TT <: Number}
@assert ode.d == size(q₀,1) == size(p₀,1)
PODE(ode.v, ode.f, t₀, q₀, p₀; parameters=parameters, periodicity=periodicity)
end
Base.ndims(ode::PODE) = ode.d
|
{"hexsha": "a4cb151b114dda873620929f11a64c314b9f72d6", "size": 3682, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/equations/pode.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/GeometricIntegrators.jl-dcce2d33-59f6-5b8d-9047-0defad88ae06", "max_stars_repo_head_hexsha": "5ffdd27e87719a998492287d90794ffa6d69231a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/equations/pode.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/GeometricIntegrators.jl-dcce2d33-59f6-5b8d-9047-0defad88ae06", "max_issues_repo_head_hexsha": "5ffdd27e87719a998492287d90794ffa6d69231a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/equations/pode.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/GeometricIntegrators.jl-dcce2d33-59f6-5b8d-9047-0defad88ae06", "max_forks_repo_head_hexsha": "5ffdd27e87719a998492287d90794ffa6d69231a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4112149533, "max_line_length": 119, "alphanum_fraction": 0.5334057577, "num_tokens": 1218}
|
[STATEMENT]
lemma onl_invariant_sterms:
assumes wf: "wellformed \<Gamma>"
and il: "A \<TTurnstile> (I \<rightarrow>) onl \<Gamma> P"
and rp: "(\<xi>, p) \<in> reachable A I"
and "p'\<in>sterms \<Gamma> p"
and "l\<in>labels \<Gamma> p'"
shows "P (\<xi>, l)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P (\<xi>, l)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P (\<xi>, l)
[PROOF STEP]
from wf \<open>p'\<in>sterms \<Gamma> p\<close> \<open>l\<in>labels \<Gamma> p'\<close>
[PROOF STATE]
proof (chain)
picking this:
wellformed \<Gamma>
p' \<in> sterms \<Gamma> p
l \<in> labels \<Gamma> p'
[PROOF STEP]
have "l\<in>labels \<Gamma> p"
[PROOF STATE]
proof (prove)
using this:
wellformed \<Gamma>
p' \<in> sterms \<Gamma> p
l \<in> labels \<Gamma> p'
goal (1 subgoal):
1. l \<in> labels \<Gamma> p
[PROOF STEP]
by (rule labels_sterms_labels)
[PROOF STATE]
proof (state)
this:
l \<in> labels \<Gamma> p
goal (1 subgoal):
1. P (\<xi>, l)
[PROOF STEP]
with il rp
[PROOF STATE]
proof (chain)
picking this:
A \<TTurnstile> (I \<rightarrow>) onl \<Gamma> P
(\<xi>, p) \<in> reachable A I
l \<in> labels \<Gamma> p
[PROOF STEP]
show "P (\<xi>, l)"
[PROOF STATE]
proof (prove)
using this:
A \<TTurnstile> (I \<rightarrow>) onl \<Gamma> P
(\<xi>, p) \<in> reachable A I
l \<in> labels \<Gamma> p
goal (1 subgoal):
1. P (\<xi>, l)
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
P (\<xi>, l)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 649, "file": "AWN_AWN_Invariants", "length": 8}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.