code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Given a matrix A, return the transpose of A. The transpose of a matrix is the matrix flipped over it's
main diagonal, switching the row and column indices of the matrix.
https://assets.leetcode.com/uploads/2019/10/20/hint_transpose.png
Example 1:
Input: [[1,2,3],[4,5,6],[7,8,9]]
Output: [[1,4,7],[2,5,8],[3,6,9]]
Example 2:
Input: [[1,2,3],[4,5,6]]
Output: [[1,4],[2,5],[3,6]]
Note:
1. 1 <= A.length <= 1000
2. 1 <= A[0].length <= 1000
"""
import numpy as np
class Solution:
def transpose1(self, A):
C = len(A[0])
A = sum(A, [])
return [A[i::C] for i in range(C)]
def transpose2(self, A):
return np.array(A).T.tolist()
def transpose3(self, A):
return [[row[i] for row in A] for i in range(len(A[0]))]
| [
"numpy.array"
] | [((681, 692), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (689, 692), True, 'import numpy as np\n')] |
import numpy as np
import datetime
import calendar
import matplotlib.mlab
matplotlib.use('Agg')
from netCDF4 import Dataset
import io
import collections
def time2unix(datestring):
try:
f = datetime.datetime.strptime(datestring,"%Y%m%d%H%M%S.%f")
unix = calendar.timegm(f.timetuple())
except ValueError:
unix = np.nan
return unix
def count_file_lines(fname, site):
if site == 'jue':
f = open(fname, 'r')
elif site == 'nya':
f = io.open(fname, 'r', encoding='ISO-8859-1')
line_total = sum(1 for line in f)
f.close()
return line_total
def readASCII_old(logfile): #valid for reading in .logs from Aug.2013 until April 17th,2015
#read .log-file:
dic = {}
colnames = ['unixtime',\
'rr','r_accum','wawa','z','vis','interval','amp','nmb','T_sensor',\
'serial_no','version',\
'curr_heating','volt_sensor',\
'status_sensor','station_name',\
'r_amount',\
'error_code',\
'n', 'v' ]
#0: datetime string, 1-9:float, 10,11:string, 12,13: float, 14,15: string, 16:float, 17:string
#check for bad lines to skip:
iline = 0
filelen = count_file_lines(logfile)
rowlen = 570. # default for files!
#set keys where strings will be put in, to string arrays:
for k,key in enumerate(colnames):
if k == 10 or k == 11 or k == 14 or k == 15 or k == 17:
dic[key] = np.empty(filelen,dtype = 'S20')
elif k == 18 or k == 19:
dic[key] = np.zeros([32,filelen])
else:
dic[key] = np.nan * np.ones(filelen)
#read file:
f = open(logfile,'r')
for line in f: # for each line split up string, put value into corresponding array if rowlen normal.
line = line.strip()
cols = line.split(';')
#1/0
for i,cname in enumerate(colnames):
if len(line) == rowlen:
if i == 0:
#datetime = cols[i]
dic[cname][iline] = time2unix(cols[i])
elif i == 10 or i == 11 or i == 14 or i == 15 or i == 17: #all columns containing strings
dic[cname][iline] = str(cols[i])
elif i == 18:
for aa in range(32):
dic[cname][aa,iline] = float(cols[i+aa])
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 19:
for aa in range(32):
dic[cname][aa,iline] = float(cols[50+aa])
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
else: dic[cname][iline] = float(cols[i])
dic['rr'][:] = dic['rr'][:]*60. #convert from mm/min to mm/h
iline += 1
f.close()
return dic
################################################################################
##############################################################################
def readASCII(logfile, site): #valid for reading in .logs later than April 17th,2015
#read .log-file:
dic = {}
colnames = ['unixtime',\
'rr','r_accum','wawa','z','vis','interval','amp','nmb','T_sensor',\
'serial_no','version',\
'curr_heating','volt_sensor',\
'status_sensor','station_name',\
'r_amount',\
'error_code',\
'n', 'v',
'M']
#0: datetime string, 1-9:float, 10,11:string, 12,13: float, 14,15: string, 16:float, 17:string, 18,19: array(32,filelen), 20: array(32,32,filelen)
#check for bad lines to skip:
iline = 0
filelen = count_file_lines(logfile, site)
# if site == 'jue':
# if int(logfile[-12:-4]) > 20160625 :
# rowlen = 4662.0 # Station name JOYCE
# elif 20151016 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151020 :
# rowlen = 4665.
# elif 20151001 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151015 :
# rowlen = 4660.
# else:
# rowlen = 4666.0 # Station name Parsivel4
#
# elif site == 'nya':
# rowlen = 4660.0
#set keys where strings will be put in, to string arrays:
for k,key in enumerate(colnames):
if k == 10 or k == 11 or k == 14 or k == 15 or k == 17:
dic[key] = np.empty(filelen,dtype = 'S20')
elif k == 18 or k == 19:
dic[key] = np.zeros([32,filelen])
elif k == 20:
dic[key] = np.zeros([32,32,filelen])
else:
dic[key] = np.nan * np.ones(filelen)
#read file:
if site == 'jue':
f = open(logfile,'r')
elif site == 'nya':
f = io.open(logfile,'r', encoding='ISO-8859-1')
for line in f.readlines(): # for each line split up string, put value into corresponding array if rowlen normal.
line = line.strip()
cols = line.split(';')
if 20150917 < int(logfile[-12:-4]) and int(logfile[-12:-4]) < 20151017 :
cols = [s.replace('<', '') for s in cols]
cols = [s.replace('>', '') for s in cols]
#1/0
#print 'len(line)', len(line), rowlen, len(line) == rowlen, 'len(cols)', len(cols), len(cols) == 1107
for i,cname in enumerate(colnames): # loop through columns
#if len(line) == rowlen :# and cols[14] < 2: # check status of parsivel: if 0 or 1: sensor usable, if 2 or 3: not usable.
if 1 == 1:
try:
test = float(cols[0][0:4])
except: continue
if test < 2000: # time stamp missing or in the wrong place
continue
if len(cols) == 1106:
tempcols = collections.deque(cols)
tempcols.extendleft([cols[0][0:18]])
tempcols[1] = tempcols[1][18:-1]
cols = list(tempcols)
elif len(cols) != 1107:
continue
if i == 0:
dic[cname][iline] = time2unix(cols[i])
elif i == 10 or i == 11 or i == 14 or i == 15 or i == 17: #all columns containing strings
dic[cname][iline] = str(cols[i])
elif i == 18:
for aa in range(32):
try:
dic[cname][aa,iline] = float(cols[i+aa]) #cols 18 upto 49 (32 values)
except ValueError:
dic[cname][aa,iline] = np.nan
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 19:
for aa in range(32):
try:
dic[cname][aa,iline] = float(cols[50+aa]) #cols 50 upto 81 (32 values)
except ValueError:
dic[cname][aa,iline] = np.nan
if dic[cname][aa,iline] == -9.999 : dic[cname][aa,iline] = np.nan
elif i == 20:
for bb in range(32): #loop through falling velocities, ie rows in matrix
for aa in range(32): #loop through sizes, ie columns
try:
dic[cname][aa,bb,iline] = float(cols[82+32*aa+bb])
if float(cols[82+32*aa+bb]) < 1000000: dic[cname][aa,bb,iline] = np.nan
except ValueError:
dic[cname][aa,bb,iline] = np.nan
else:
#if i == 1: 1/0
if len(cols) == 1107: # RG 5.8.2016: if some different lenght, something wrong with this line (e.g. time stamp missing)
try:
dic[cname][iline] = float(cols[i])
except ValueError:
dic[cname][iline] = np.nan
else :
dic[cname][iline] = np.nan
#if iline == 1: 1/0
iline += 1
f.close()
return dic
################################################################################################
################################################################################################
def writeNC_old(logfile,ncname): #valid for data Aug2013-Apr17,2015
#read .log-file into dictionnary:
data = readASCII_old(logfile)
#get number of lines in file ie length of data columns
filelen = len(data['unixtime'])
#open .nc outfile.
ncout = Dataset(ncname,'w',format='NETCDF4')
# define dimensions:
dim = ncout.createDimension('dim', filelen) #filelen, set='none' if unlimited dimension
ndim = ncout.createDimension('ndim',32)
stri = ncout.createDimension('stri',None)
#read variables:
time = ncout.createVariable('time','i8',('dim',)) #time in double-precision...
time.units = 'seconds since 1/1/1970 00:00:00'
time[:] = data['unixtime']
rain_rate = ncout.createVariable('rain_rate','f',('dim',))
rain_rate.units = 'mm/h'
rain_rate[:] = data['rr']
rain_accum = ncout.createVariable('rain_accum','f',('dim',))
rain_accum.units = 'mm'
rain_accum[:] = data['r_accum']
wawa = ncout.createVariable('wawa','f',('dim',))
wawa.units = 'weather code'
wawa[:] = data['wawa']
zeff = ncout.createVariable('Z','f',('dim',))
zeff.units = 'dB'
zeff[:] = data['z']
vis = ncout.createVariable('MOR_visibility','f',('dim',))
vis.units = 'm'
vis[:] = data['vis']
interval = ncout.createVariable('sample_interval','f',('dim',))
interval.units = 's'
interval[:] = data['interval']
ampli = ncout.createVariable('signal_amplitude','f',('dim',))
ampli.units = ''
ampli[:] = data['amp']
n_part = ncout.createVariable('n_particles','f',('dim',))
n_part.units = '#'
n_part.description = 'number of detected particles'
n_part[:] = data['nmb']
temp_sens = ncout.createVariable('T_sensor','f',('dim',))
temp_sens.units = 'deg C'
temp_sens[:] = data['T_sensor']
serial_no = ncout.createVariable('serial_no','S',('stri',))
serial_no[:] = data['serial_no']
version = ncout.createVariable('version','S',('stri',))
version.description = 'IOP firmware version'
version[:] = data['version']
curr_heating = ncout.createVariable('curr_heating','f',('dim',))
curr_heating.units = 'A'
curr_heating.description = 'Current heating system'
curr_heating[:] = data['curr_heating']
volt_sensor = ncout.createVariable('volt_sensor','f',('dim',))
volt_sensor.units = 'V'
volt_sensor.description = 'Power supply voltage in the sensor'
volt_sensor[:] = data['volt_sensor']
status_sensor = ncout.createVariable('status_sensor','S',('stri',))
status_sensor[:] = data['status_sensor']
station_name = ncout.createVariable('station_name','S',('stri',))
station_name[:] = data['station_name']
rain_am = ncout.createVariable('rain_am','f',('dim',))
rain_am.units = 'mm'
rain_am.description = 'rain amount absolute'
rain_am[:] = data['r_amount']
error_code = ncout.createVariable('error_code','S',('stri',))
error_code[:] = data['error_code']
N = ncout.createVariable('N','f',('ndim','dim'))
N.units = '1/m3'
N.description = 'mean volume equivalent diameter per preci class'
N[:,:] = data['n']
v = ncout.createVariable('v','f',('ndim','dim'))
v.units = 'm/s'
v.description = 'mean falling speed per preci class'
v[:,:] = data['v']
#close .nc-file:
ncout.close()
return
##################################################################################################
##################################################################################################
def writeNC(logfile,ncname, site):
#read .log-file into dictionnary:
data = readASCII(logfile, site)
#get number of lines in file ie length of data columns
filelen = len(data['unixtime'])
#open .nc outfile.
ncout = Dataset(ncname,'w',format='NETCDF4')
# define dimensions:
dim = ncout.createDimension('dim', filelen) #filelen, set='none' if unlimited dimension
ndim = ncout.createDimension('ndim',32)
stri = ncout.createDimension('stri',None)
#read variables:
time = ncout.createVariable('time','i8',('dim',)) #time in double-precision...
time.units = 'seconds since 1/1/1970 00:00:00'
time[:] = data['unixtime']
rain_rate = ncout.createVariable('rain_rate','f',('dim',))
rain_rate.units = 'mm/h'
rain_rate[:] = data['rr']
rain_accum = ncout.createVariable('rain_accum','f',('dim',))
rain_accum.units = 'mm'
rain_accum[:] = data['r_accum']
wawa = ncout.createVariable('wawa','f',('dim',))
wawa.units = 'weather code'
wawa[:] = data['wawa']
zeff = ncout.createVariable('Z','f',('dim',))
zeff.units = 'dB'
zeff[:] = data['z']
vis = ncout.createVariable('MOR_visibility','f',('dim',))
vis.units = 'm'
vis[:] = data['vis']
interval = ncout.createVariable('sample_interval','f',('dim',))
interval.units = 's'
interval[:] = data['interval']
ampli = ncout.createVariable('signal_amplitude','f',('dim',))
ampli.units = ''
ampli[:] = data['amp']
n_part = ncout.createVariable('n_particles','f',('dim',))
n_part.units = '#'
n_part.description = 'number of detected particles'
n_part[:] = data['nmb']
temp_sens = ncout.createVariable('T_sensor','f',('dim',))
temp_sens.units = 'deg C'
temp_sens[:] = data['T_sensor']
serial_no = ncout.createVariable('serial_no','S6',('stri',))
serial_no[:] = data['serial_no']
version = ncout.createVariable('version','S5',('stri',))
version.description = 'IOP firmware version'
version[:] = data['version']
curr_heating = ncout.createVariable('curr_heating','f',('dim',))
curr_heating.units = 'A'
curr_heating.description = 'Current heating system'
curr_heating[:] = data['curr_heating']
volt_sensor = ncout.createVariable('volt_sensor','f',('dim',))
volt_sensor.units = 'V'
volt_sensor.description = 'Power supply voltage in the sensor'
volt_sensor[:] = data['volt_sensor']
status_sensor = ncout.createVariable('status_sensor','S2',('stri',))
status_sensor[:] = data['status_sensor']
station_name = ncout.createVariable('station_name','S5',('stri',))
station_name[:] = data['station_name']
rain_am = ncout.createVariable('rain_am','f',('dim',))
rain_am.units = 'mm'
rain_am.description = 'rain amount absolute'
rain_am[:] = data['r_amount']
error_code = ncout.createVariable('error_code','S3',('stri',))
error_code[:] = data['error_code']
N = ncout.createVariable('N','f',('ndim','dim'))
N.units = '1/m3'
N.description = 'mean volume equivalent diameter per preci class'
N[:,:] = data['n']
v = ncout.createVariable('v','f',('ndim','dim'))
v.units = 'm/s'
v.description = 'mean falling velocity per preci class'
v[:,:] = data['v']
M = ncout.createVariable('M','f',('ndim','ndim','dim'))
M.units = ''
M.description = 'raw data matrix. number of particles per volume diameter and fall velocity'
M[:,:,:] = data['M']
#close .nc-file:
ncout.close()
return
| [
"collections.deque",
"numpy.ones",
"datetime.datetime.strptime",
"netCDF4.Dataset",
"io.open",
"numpy.zeros",
"numpy.empty"
] | [((9333, 9371), 'netCDF4.Dataset', 'Dataset', (['ncname', '"""w"""'], {'format': '"""NETCDF4"""'}), "(ncname, 'w', format='NETCDF4')\n", (9340, 9371), False, 'from netCDF4 import Dataset\n'), ((12960, 12998), 'netCDF4.Dataset', 'Dataset', (['ncname', '"""w"""'], {'format': '"""NETCDF4"""'}), "(ncname, 'w', format='NETCDF4')\n", (12967, 12998), False, 'from netCDF4 import Dataset\n'), ((207, 264), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['datestring', '"""%Y%m%d%H%M%S.%f"""'], {}), "(datestring, '%Y%m%d%H%M%S.%f')\n", (233, 264), False, 'import datetime\n'), ((498, 540), 'io.open', 'io.open', (['fname', '"""r"""'], {'encoding': '"""ISO-8859-1"""'}), "(fname, 'r', encoding='ISO-8859-1')\n", (505, 540), False, 'import io\n'), ((1563, 1593), 'numpy.empty', 'np.empty', (['filelen'], {'dtype': '"""S20"""'}), "(filelen, dtype='S20')\n", (1571, 1593), True, 'import numpy as np\n'), ((4736, 4766), 'numpy.empty', 'np.empty', (['filelen'], {'dtype': '"""S20"""'}), "(filelen, dtype='S20')\n", (4744, 4766), True, 'import numpy as np\n'), ((5091, 5135), 'io.open', 'io.open', (['logfile', '"""r"""'], {'encoding': '"""ISO-8859-1"""'}), "(logfile, 'r', encoding='ISO-8859-1')\n", (5098, 5135), False, 'import io\n'), ((1651, 1674), 'numpy.zeros', 'np.zeros', (['[32, filelen]'], {}), '([32, filelen])\n', (1659, 1674), True, 'import numpy as np\n'), ((4824, 4847), 'numpy.zeros', 'np.zeros', (['[32, filelen]'], {}), '([32, filelen])\n', (4832, 4847), True, 'import numpy as np\n'), ((1720, 1736), 'numpy.ones', 'np.ones', (['filelen'], {}), '(filelen)\n', (1727, 1736), True, 'import numpy as np\n'), ((4892, 4919), 'numpy.zeros', 'np.zeros', (['[32, 32, filelen]'], {}), '([32, 32, filelen])\n', (4900, 4919), True, 'import numpy as np\n'), ((6151, 6174), 'collections.deque', 'collections.deque', (['cols'], {}), '(cols)\n', (6168, 6174), False, 'import collections\n'), ((4964, 4980), 'numpy.ones', 'np.ones', (['filelen'], {}), '(filelen)\n', (4971, 4980), True, 'import numpy as np\n')] |
'''My CMA=ES
'''
import numpy as np
import matplotlib.pyplot as plt
mean = [0, 0]
cov = [[1, 0], [0, 100]]
x, y = np.random.multivariate_normal(mean, cov, 5000).T
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
| [
"numpy.random.multivariate_normal",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((165, 184), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""x"""'], {}), "(x, y, 'x')\n", (173, 184), True, 'import matplotlib.pyplot as plt\n'), ((185, 202), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (193, 202), True, 'import matplotlib.pyplot as plt\n'), ((203, 213), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (211, 213), True, 'import matplotlib.pyplot as plt\n'), ((116, 162), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', '(5000)'], {}), '(mean, cov, 5000)\n', (145, 162), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on May 19, 2014
Unit test for convolutional layer forward propagation, compared to CAFFE data.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import numpy
import os
from scipy.signal import correlate2d, convolve2d # pylint: disable=E0611
from veles.dummy import DummyUnit
from veles.memory import Array
import veles.znicz.all2all as all2all
import veles.znicz.conv as conv
import veles.znicz.evaluator as evaluator
import veles.znicz.gd_conv as gd_conv
import veles.znicz.gd as gd
import veles.znicz.gd_pooling as gd_pooling
import veles.znicz.normalization as normalization
import veles.znicz.pooling as pooling
from veles.znicz.tests.functional import StandardTest
class CaffeTestBase(StandardTest):
def _read_array(self, array_name, lines, shape=None):
"""
Reads a pic array from from export file, splitted to lines.
NB: last line should be empty
Arguments:
array_name(str): name of array to read
lines(array): lines of file to read from
shape(tuple): shape=(n_pics, height, width, n_chans), must be given if
not set in file.
Returns:
:class:`numpy.ndarray`
"""
cur_line = None
for i, line in enumerate(lines):
line = line.replace("\n", "")
nibbles = line.split("\t")
if nibbles[0] == array_name:
if len(nibbles) >= 5: # shape is set in file
dimensions = {}
for nibble in nibbles[1:]:
[nibble_name, nibble_val] = nibble.split(":")
dimensions[nibble_name] = int(nibble_val)
n_pics = dimensions["num"]
height = dimensions["height"]
width = dimensions["width"]
n_chans = dimensions["channels"]
if shape is not None:
assert shape == (n_pics, height, width, n_chans)
else: # shape is set externally
assert len(shape) == 4
n_pics, height, width, n_chans = shape
out_array = numpy.zeros((n_pics, height, width, n_chans),
numpy.float64)
cur_line = i + 1
break
assert cur_line is not None
assert cur_line < len(lines)
for cur_pic in range(n_pics):
nibbles = lines[cur_line].split(":")
assert nibbles[0] == "num"
assert int(nibbles[1]) == cur_pic
cur_line += 1
for cur_chan in range(n_chans):
nibbles = lines[cur_line].split(":")
assert nibbles[0] == "channels"
assert int(nibbles[1]) == cur_chan
cur_line += 1
for i in range(height):
data = [float(x) for x in lines[cur_line].split("\t")]
cur_line += 1
for j in range(width):
out_array[cur_pic, i, j, cur_chan] = data[j]
return out_array
def _read_lines(self, data_filename):
"""
Returns all lines from a file maned `data_filename`.
File is searched in ``self.data_dir_path``.
Arguments:
data_filename(str): name to file with pooling data,
exported from CAFFE (searched in ``self.data_dir_path``)
Returns:
list: list of all lines read
"""
full_path = os.path.join(self.data_dir_path, data_filename)
return self._read_lines_by_abspath(full_path)
def _read_lines_by_abspath(self, full_path):
with open(full_path, 'r') as in_file:
return in_file.readlines()
class TestConvCaffe(CaffeTestBase):
def test_caffe_conv(self, data_filename="conv.txt"):
"""
Compare CAFFE conv layer fwd prop with Veles conv layer.
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
lines = self._read_lines(data_filename)
kernel_size = 5
padding_size = 2
bottom = self._read_array("bottom", lines=lines, shape=(2, 32, 32, 3))
weights = self._read_array("weights", lines=lines, shape=(2, 5, 5, 3))
top = self._read_array("top", lines=lines, shape=(2, 32, 32, 2))
fwd_conv = conv.Conv(self.parent, kx=kernel_size, ky=kernel_size,
padding=(padding_size, padding_size,
padding_size, padding_size),
sliding=(1, 1),
n_kernels=2)
fwd_conv.input = Array()
fwd_conv.input.mem = bottom
fwd_conv.initialize(self.device)
fwd_conv.weights.map_invalidate()
fwd_conv.weights.mem[:] = weights.reshape(2, 75)[:]
fwd_conv.bias.map_invalidate()
fwd_conv.bias.mem[:] = 0
fwd_conv.run()
self.info("Veles vs CAFFE data:")
fwd_conv.output.map_read()
self.info("Veles top shape:" + str(fwd_conv.output.mem.shape))
delta_with_veles = fwd_conv.output.mem - top
self.info("CONV: diff with Veles: %.2f%%" % (
100. * numpy.sum(numpy.abs(delta_with_veles)) /
numpy.sum(numpy.abs(fwd_conv.output.mem)),))
self.info("COMPARED TO HANDMADE CORRELATION:")
scipy_conv_out = numpy.zeros(shape=(2, 32, 32, 2), dtype=numpy.float64)
for pic in range(2):
for color_chan in range(3):
for weight_id in range(2):
correlation = correlate2d(
bottom[pic, :, :, color_chan],
weights[weight_id, :, :, color_chan], mode="same")
scipy_conv_out[pic, :, :, weight_id] += correlation
delta_with_scipy = fwd_conv.output.mem - scipy_conv_out
self.info("CONV: diff with SciPy: %.2f%%" % (
100. * numpy.sum(numpy.abs(delta_with_scipy)) /
numpy.sum(numpy.abs(fwd_conv.output.mem)),))
def test_caffe_grad_conv(self, data_filename="conv_grad.txt"):
"""
Compare CAFFE conv layer with Veles conv layer (FwdProp and BackProp).
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
lines = self._read_lines(data_filename)
# stride = 1
bot_size = 32
top_size = 32
kernel_size = 5
padding_size = 2
n_kernels = 2
batch_size = 2
bottom = self._read_array("bottom", lines=lines,
shape=(batch_size, bot_size, bot_size, 3))
weights = self._read_array("weights", lines=lines,
shape=(n_kernels,
kernel_size,
kernel_size,
3))
top = self._read_array("top", lines=lines,
shape=(batch_size, top_size, top_size, 2))
top_err = self._read_array("top_diff", lines=lines,
shape=(batch_size,
top_size,
top_size,
2))
bot_err = self._read_array("bottom_diff", lines=lines,
shape=(batch_size,
bot_size,
bot_size,
3))
fwd_conv = conv.Conv(self.parent, kx=kernel_size, ky=kernel_size,
padding=(padding_size, padding_size,
padding_size, padding_size),
sliding=(1, 1), n_kernels=n_kernels)
fwd_conv.input = Array(bottom)
fwd_conv.initialize(self.device)
fwd_conv.weights.map_invalidate()
fwd_conv.weights.mem[:] = weights.reshape(2, 75)[:]
fwd_conv.bias.map_invalidate()
fwd_conv.bias.mem[:] = 0
fwd_conv.run()
self.info("Veles vs CAFFE data:")
fwd_conv.output.map_read()
self.info("Veles top shape:" + str(fwd_conv.output.mem.shape))
delta_with_veles = fwd_conv.output.mem - top
self.info("CONV: diff with CAFFE: %.2f%%" % (
100. * numpy.sum(numpy.abs(delta_with_veles)) /
numpy.sum(numpy.abs(fwd_conv.output.mem)),))
back_conv = gd_conv.GradientDescentConv(self.parent)
back_conv.input = Array(bottom)
back_conv.output = Array(top)
back_conv.err_output = Array(top_err)
back_conv.weights = Array()
back_conv.weights.mem = fwd_conv.weights.mem
back_conv.bias = Array(fwd_conv.bias.mem)
back_conv.batch_size = 2
back_conv.link_conv_attrs(fwd_conv)
back_conv.initialize(self.device)
back_conv.run()
back_conv.err_input.map_read()
# BACKPROP: difference with CAFFE export
back_delta = back_conv.err_input.mem - bot_err
self.info("GDCONV: diff with CAFFE: %.3f%%" %
(100. * numpy.sum(numpy.fabs(back_delta)) /
numpy.sum(numpy.fabs(back_conv.err_input.mem)),))
# perform manual GD CONV
manual_bot_err = numpy.zeros(shape=(2, bot_size, bot_size, 3),
dtype=numpy.float64)
for pic in range(batch_size):
for color_chan in range(3):
for weight_id in range(n_kernels):
conv_result = convolve2d(
top_err[pic, :, :, weight_id],
weights[weight_id, :, :, color_chan], mode="same")
manual_bot_err[pic, :, :, color_chan] += conv_result
self.info("Manual GDCONV: diff with CAFFE: %.3f%%" % (
100. * numpy.sum(numpy.fabs(manual_bot_err - bot_err)) /
numpy.sum(numpy.fabs(bot_err))))
def test_caffe_pooling(self, data_filename="pool.txt"):
"""
Compare CAFFE pooling unit fwd_prop with Veles one
Args:
data_filename(str): name to file with pooling data,
exported from CAFFE (searched in ``self.data_dir_path``)
"""
# load pooling data from CAFFE dumps
in_file = open(os.path.join(self.data_dir_path, data_filename), 'r')
lines = in_file.readlines()
in_file.close()
# max pooling: 3x3 kernel, 2x2 stride
kernel_size = 3
stride = 2
in_height, in_width = 32, 32
out_height, out_width = 16, 16
bottom = self._read_array("bottom", lines=lines,
shape=(2, in_height, in_width, 2))
# do pooling with VELES
fwd_pool = pooling.MaxPooling(self.parent, kx=kernel_size,
ky=kernel_size, sliding=(stride, stride),
device=self.device)
fwd_pool.input = Array(bottom)
fwd_pool.input.map_write()
fwd_pool.initialize(device=self.device)
fwd_pool.numpy_run()
fwd_pool.output.map_read()
# do MANUAL pooling
manual_pooling_out = numpy.zeros(shape=(2, out_height, out_width, 2),
dtype=numpy.float64)
for pic in range(2):
for chan in range(2):
for i_out in range(out_height):
for j_out in range(out_width):
min_i = i_out * 2
max_i = i_out * 2 + kernel_size - 1
min_j = j_out * 2
max_j = j_out * 2 + kernel_size - 1
zone = bottom[pic, min_i: max_i + 1, min_j:
max_j + 1, chan]
manual_pooling_out[pic, i_out, j_out,
chan] = numpy.max((zone))
def test_caffe_grad_pooling(self, data_filename="pool_grad.txt"):
"""
Compare CAFFE pooling unit with Veles ones (fwd and back propagations)
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
bot_size = 32
top_size = 16
kernel_size = 3
stride = 2
n_chans = 2
n_pics = 2
lines = self._read_lines(data_filename)
lines = [line.replace("\t\n", "").replace("\n", "") for line in lines]
bottom = self._read_array("bottom", lines,
shape=(n_pics, bot_size, bot_size, n_chans))
top = self._read_array("top", lines,
shape=(n_pics, top_size, top_size, n_chans))
bot_err = self._read_array("bottom_diff", lines,
shape=(n_pics, bot_size, bot_size, n_chans))
top_err = self._read_array("top_diff", lines,
shape=(n_pics, top_size, top_size, n_chans))
# FORWARD PROP
fwd_pool = pooling.MaxPooling(self.parent, kx=kernel_size,
ky=kernel_size, sliding=(stride, stride))
fwd_pool.input = Array(bottom)
fwd_pool.input.map_write()
fwd_pool.initialize(device=self.device)
fwd_pool.numpy_run()
fwd_pool.output.map_read()
self.info("FWD POOL: Veles vs CAFFE: %.3f%%" %
(100. * (numpy.sum(numpy.abs(fwd_pool.output.mem - top)) /
numpy.sum(numpy.abs(top)))))
# Do MANUAL pooling
out_height, out_width = top_size, top_size
manual_pooling_out = numpy.zeros(shape=(2, out_height, out_width, 2),
dtype=numpy.float64)
for pic in range(2):
for chan in range(2):
for i_out in range(out_height):
for j_out in range(out_width):
min_i = i_out * stride
max_i = i_out * stride + kernel_size - 1
min_j = j_out * stride
max_j = j_out * stride + kernel_size - 1
zone = bottom[pic, min_i: max_i + 1,
min_j: max_j + 1, chan]
manual_pooling_out[pic, i_out, j_out,
chan] = numpy.max((zone))
# BACK PROP
grad_pool = gd_pooling.GDMaxPooling(self.parent, kx=kernel_size,
ky=kernel_size,
sliding=(stride, stride),
device=self.device)
grad_pool.input = Array(bottom)
grad_pool.input.map_write()
grad_pool.err_output = Array(top_err)
grad_pool.err_output.map_write()
grad_pool.input_offset = fwd_pool.input_offset
grad_pool.link_pool_attrs(fwd_pool)
grad_pool.initialize(device=self.device)
grad_pool.numpy_run()
grad_pool.err_input.map_read()
self.info("BACK POOL: Veles vs CAFFE, %.3f%%" % (100 * numpy.sum(
numpy.abs(grad_pool.err_input.mem - bot_err)) /
numpy.sum(numpy.abs(bot_err))))
def test_caffe_grad_normalization(self, data_filename="norm_gd.txt"):
"""
Tests LRU normalization unit: compares it with CAFFE one.
Fwd and back props made.
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
lines = self._read_lines(data_filename)
size = 16
n_chans = 2
n_pics = 2
max_percent_delta = 2. # max difference with CAFFE (percents)
bottom = self._read_array("bottom", lines,
shape=(n_pics, size, size, n_chans))
top = self._read_array("top", lines,
shape=(n_pics, size, size, n_chans))
bot_err = self._read_array("bottom_diff", lines,
shape=(n_pics, size, size, n_chans))
top_err = self._read_array("top_diff", lines,
shape=(n_pics, size, size, n_chans))
fwd_norm = normalization.LRNormalizerForward(self.parent,
k=1, device=self.device)
# FWD PROP
fwd_norm.input = Array(bottom)
fwd_norm.initialize(self.device)
fwd_norm.run()
fwd_norm.output.map_read()
fwd_percent_delta = 100. * (
numpy.sum(numpy.abs(fwd_norm.output.mem - top)) /
numpy.sum(numpy.abs(top)))
self.info("FWD NORM DELTA: %.2f%%" % fwd_percent_delta)
self.assertLess(fwd_percent_delta, max_percent_delta,
"Fwd norm differs by %.2f%%" % fwd_percent_delta)
# BACK PROP
back_norm = normalization.LRNormalizerBackward(self.parent,
k=1, device=self.device)
back_norm.output = Array(top)
back_norm.input = Array(bottom)
back_norm.err_output = Array(top_err)
back_norm.initialize(self.device)
back_norm.run()
back_norm.err_input.map_read()
back_percent_delta = 100. * numpy.sum(
numpy.abs(back_norm.err_output.mem - bot_err)) / \
numpy.sum(numpy.abs(bot_err))
self.info("BACK NORM DELTA: %.2f%%" % back_percent_delta)
self.assertLess(back_percent_delta, max_percent_delta,
"Fwd norm differs by %.2f%%" % (back_percent_delta))
def test_caffe_relu(self, data_filename="conv_relu.txt"):
"""
Tests CONV+RELU unit: compares it with CAFFE one.
Fwd prop only.
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
lines = self._read_lines(data_filename)
in_size = 32
out_size = 32
n_chans = 3
n_kernels = 2
n_pics = 2
kernel_size = 5
padding_size = 2
max_percent_delta = 2.0
conv_bottom = self._read_array(
"conv_bottom", lines, shape=(n_pics, in_size, in_size, n_chans))
conv_top = self._read_array(
"conv_top", lines, shape=(n_pics, out_size, out_size, n_kernels))
relu_top_flat = self._read_array(
"relu_top_flat", lines, shape=(1, 1, conv_top.size, 1)).ravel()
relu_top = numpy.ndarray(
shape=(n_pics, out_size, out_size, n_kernels), dtype=numpy.float64)
cur_pos = 0
for pic in range(n_pics):
for kernel in range(n_kernels):
for i in range(out_size):
for j in range(out_size):
relu_top[pic, i, j, kernel] = relu_top_flat[cur_pos]
cur_pos += 1
conv_weights = self._read_array("conv_weights", lines, shape=(
n_kernels, kernel_size, kernel_size, n_chans))
fwd_conv_relu = conv.ConvStrictRELU(
self.parent, kx=kernel_size, ky=kernel_size,
padding=(padding_size, padding_size, padding_size, padding_size),
sliding=(1, 1), n_kernels=n_kernels, device=self.device)
fwd_conv_relu.input = Array(conv_bottom)
fwd_conv_relu.initialize(self.device)
fwd_conv_relu.weights.map_invalidate()
fwd_conv_relu.weights.mem[:] = conv_weights.reshape(2, 75)[:]
fwd_conv_relu.bias.map_invalidate()
fwd_conv_relu.bias.mem[:] = 0
fwd_conv_relu.run()
fwd_conv_relu.output.map_read()
percent_delta = 100. * (numpy.sum(numpy.abs(
fwd_conv_relu.output.mem - relu_top)) /
numpy.sum(numpy.abs(relu_top)))
self.info("CONV_RELU: diff with CAFFE %.3f%%" % percent_delta)
self.assertLess(percent_delta, max_percent_delta,
"Fwd ConvRELU differs by %.2f%%" % percent_delta)
relu_top_manual = numpy.where(numpy.greater(conv_top, 0), conv_top, 0)
manual_delta = 100. * numpy.sum(
numpy.abs(relu_top_manual - relu_top)) /\
(numpy.sum(numpy.abs(relu_top)))
self.info("SciPy CONV_RELU: diff with CAFFE %.3f%%" % manual_delta)
def test_caffe_grad_relu(self, data_filename="conv_relu.txt"):
"""
Tests CONV+RELU unit: compares it with CAFFE one.
Fwd prop only.
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
lines = self._read_lines(data_filename)
in_size = 32
out_size = 32
n_chans = 3
n_kernels = 2
n_pics = 2
kernel_size = 5
padding_size = 2
max_percent_delta = 2.0
conv_bottom = self._read_array(
"conv_bottom", lines, shape=(n_pics, in_size, in_size, n_chans))
conv_top = self._read_array(
"conv_top", lines, shape=(n_pics, out_size, out_size, n_kernels))
relu_top_flat = self._read_array(
"relu_top_flat", lines, shape=(1, 1, conv_top.size, 1)).ravel()
relu_top = numpy.ndarray(shape=(n_pics, out_size, out_size, n_kernels),
dtype=numpy.float64)
cur_pos = 0
for pic in range(n_pics):
for kernel in range(n_kernels):
for i in range(out_size):
for j in range(out_size):
relu_top[pic, i, j, kernel] = relu_top_flat[cur_pos]
cur_pos += 1
conv_weights = self._read_array("conv_weights", lines, shape=(
n_kernels, kernel_size, kernel_size, n_chans))
fwd_conv_relu = conv.ConvStrictRELU(
self.parent, kx=kernel_size, ky=kernel_size,
padding=(padding_size, padding_size, padding_size, padding_size),
sliding=(1, 1), n_kernels=n_kernels, device=self.device)
fwd_conv_relu.input = Array(conv_bottom)
fwd_conv_relu.initialize(self.device)
fwd_conv_relu.weights.map_invalidate()
fwd_conv_relu.weights.mem[:] = conv_weights.reshape(2, 75)[:]
fwd_conv_relu.bias.map_invalidate()
fwd_conv_relu.bias.mem[:] = 0
fwd_conv_relu.run()
fwd_conv_relu.output.map_read()
percent_delta = 100. * (numpy.sum(numpy.abs(
fwd_conv_relu.output.mem - relu_top)) /
numpy.sum(numpy.abs(relu_top)))
self.info("CONV_RELU: diff with CAFFE %.3f%%" % percent_delta)
self.assertLess(percent_delta, max_percent_delta,
"Fwd ConvRELU differs by %.2f%%" % percent_delta)
relu_top_manual = numpy.where(numpy.greater(conv_top, 0), conv_top, 0)
manual_delta = 100. * numpy.sum(numpy.abs(relu_top_manual - relu_top))\
/ numpy.sum(numpy.abs(relu_top))
self.info("SciPy CONV_RELU: diff with CAFFE %.3f%%" % manual_delta)
def test_grad_conv_relu(self, data_filename="conv_relu_grad.txt"):
"""
Tests GDDRELU_CONV unit: compares it with CAFFE one.
Backward prop only
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
lines = self._read_lines(data_filename)
in_size = 32
out_size = 32
n_chans = 3
n_kernels = 2
n_pics = 2
kernel_size = 5
padding = 2
max_percent_delta = 2.0
conv_bot_err = self._read_array("conv_bottom_diff", lines,
shape=(n_pics, in_size, in_size,
n_chans))
conv_bottom = self._read_array(
"conv_bottom", lines, shape=(n_pics, in_size, in_size, n_chans))
conv_weights = self._read_array("conv_weights", lines, shape=(
n_kernels, kernel_size, kernel_size, n_chans))
conv_weight_delta = self._read_array(
"conv_weight_delta", lines,
shape=(n_kernels, kernel_size, kernel_size, n_chans))
relu_top_err = self._read_array("relu_top_diff", lines, shape=(n_pics,
in_size, in_size, n_kernels))
relu_top_flat = self._read_array("relu_top_flat", lines, shape=(
1, 1, relu_top_err.size, 1)).ravel()
relu_top = numpy.ndarray(shape=(n_pics, out_size, out_size, n_kernels),
dtype=numpy.float64)
cur_pos = 0
for pic in range(n_pics):
for kernel in range(n_kernels):
for i in range(out_size):
for j in range(out_size):
relu_top[pic, i, j, kernel] = relu_top_flat[cur_pos]
cur_pos += 1
# Testing back prop
back_conv_relu = gd_conv.GDStrictRELUConv(self.parent,
device=self.device,
learning_rate=1,
weights_decay=0,
batch_size=n_pics)
back_conv_relu.err_output = Array(relu_top_err)
back_conv_relu.input = Array(conv_bottom)
back_conv_relu.weights = Array(conv_weights.reshape(2, 75))
back_conv_relu.bias = Array(numpy.zeros(shape=n_kernels))
back_conv_relu.output = Array(relu_top)
back_conv_relu.link_conv_attrs(
DummyUnit(kx=kernel_size, ky=kernel_size, n_kernels=n_kernels,
padding=((padding,) * 4), sliding=(1, 1),
unpack_size=1, unpack_data=Array()))
back_conv_relu.initialize(device=self.device)
back_conv_relu.weights.map_invalidate()
back_conv_relu.weights.mem[:] = conv_weights.reshape(2, 75)[:]
back_conv_relu.bias.map_invalidate()
back_conv_relu.bias.mem[:] = 0
back_conv_relu.numpy_run()
back_conv_relu.err_input.map_read()
result = back_conv_relu.err_input.mem
percent_delta = 100. * (numpy.sum(numpy.abs(result - conv_bot_err))
/ numpy.sum(numpy.abs(result)))
self.info("GD_CONV_RELU: diff with CAFFE %.3f%%" % percent_delta)
self.assertLess(percent_delta, max_percent_delta,
"Fwd GD_ConvRELU differs by %.2f%%" % (percent_delta))
# Testing weight deltas
delta_w = back_conv_relu.weights.mem - conv_weights.reshape(2, 75)
percent_delta_w = 100. * numpy.sum(numpy.abs(
delta_w + conv_weight_delta.reshape(2, 75))) \
/ numpy.sum(numpy.abs(delta_w))
# Hint: in CAFFE their \Delta W = - \Delta W_{Veles} (??)
self.info("DELTA W: diff with CAFFE %.3f%%" % percent_delta_w)
self.assertLess(percent_delta_w, max_percent_delta,
"Delta W differs by %.2f%%" % (percent_delta_w))
def test_softmax(self, data_filename="softmax.txt"):
"""
A complex test for EvaluatorSoftmax, All2AllSoftMax and GDSM layers.
Args:
data_filename(str): name of file with pooling data
(relative from data dir)
"""
n_classes = 10 # CIFAR
n_pics = 2
n_chans = 64
size = 4
max_percent_delta = 2.0
lines = self._read_lines(data_filename)
# Fwd prop
a2a_bottom = self._read_array("a2a_bottom", lines,
(n_pics, size, size, n_chans))
a2a_top = self._read_array("a2a_top", lines, (n_pics, 1, 1, n_classes))
a2a_weights_raw = self._read_array(
"a2a_weights", lines, (n_classes, 1, size * size * n_chans, 1))
a2a_weights_raw = a2a_weights_raw.reshape(
n_classes, n_chans, size, size).swapaxes(1, 2).swapaxes(2, 3)
a2a_weights = a2a_weights_raw.reshape(n_classes, size * size * n_chans)
a2a_bias_raw = self._read_array("a2a_bias", lines,
(1, 1, n_classes, 1))
sm_bottom = self._read_array("sm_bottom", lines,
(n_pics, 1, 1, n_classes))
sm_top = self._read_array("sm_top", lines, (n_pics, 1, 1, n_classes))
labels = self._read_array("labels", lines,
(n_pics, 1, 1, 1)).astype(numpy.int32)
a2a_softmax = all2all.All2AllSoftmax(
self.parent, output_sample_shape=n_classes,
weights_filling="uniform", weights_stddev=0.1,
bias_filling="uniform", bias_stddev=0.01)
a2a_softmax.input = Array(a2a_bottom)
a2a_softmax.initialize(self.device)
a2a_softmax.weights.mem[:] = a2a_weights[:]
a2a_softmax.weights.map_invalidate()
a2a_softmax.bias.mem[:] = 0
a2a_softmax.bias.map_invalidate()
a2a_softmax.numpy_run()
a2a_softmax.output.map_read()
fwd_percent_delta = (numpy.sum(numpy.abs(
a2a_softmax.output.mem - sm_top)) /
(numpy.sum(numpy.abs(sm_top)))) * 100.
self.info("A2A_SM FWD DELTA: %.3f%%" % fwd_percent_delta)
self.assertLess(fwd_percent_delta, max_percent_delta,
"A2A_SM_FWD differs by %.2f%%" % (fwd_percent_delta))
# Back prop
sm_top_err = self._read_array("sm_top_diff", lines,
(n_pics, 1, 1, n_classes))
sm_bot_err = self._read_array("sm_bottom_diff", lines,
(n_pics, 1, 1, n_classes))
a2a_bot_err = self._read_array("a2a_bottom_diff", lines,
(n_pics, size, size, n_chans))
ev_sm = evaluator.EvaluatorSoftmax(self.parent)
ev_sm.max_idx = a2a_softmax.max_idx
ev_sm.batch_size = n_pics
ev_sm.output = a2a_softmax.output
ev_sm.labels = Array(labels.reshape(n_pics))
ev_sm.initialize(self.device)
ev_sm.numpy_run()
ev_sm.output.map_read()
ev_sm.err_output.map_read()
back_a2a_sm = gd.GDSoftmax(self.parent, store_gradient=False)
back_a2a_sm.output = a2a_softmax.output
back_a2a_sm.input = a2a_softmax.input
back_a2a_sm.err_output = ev_sm.err_output
back_a2a_sm.weights = a2a_softmax.weights
back_a2a_sm.bias = a2a_softmax.bias
back_a2a_sm.initialize(self.device)
back_a2a_sm.numpy_run()
back_a2a_sm.err_input.map_read()
back_percent_delta = \
100. * (numpy.sum(numpy.abs(
back_a2a_sm.err_input.mem - a2a_bot_err)) /
numpy.sum(numpy.abs(a2a_bot_err)))
self.info("A2ASM_BACK_DELTA %.2f", back_percent_delta)
manual_sm_bot_err = numpy.zeros(shape=(n_pics, 1, 1, n_classes),
dtype=numpy.float64)
for pic in range(n_pics):
for i in range(n_classes):
for j in range(n_classes):
if labels[pic] == j:
target = 1
else:
target = 0
manual_sm_bot_err[pic, 0, 0, i] += (
target / sm_top_err[pic, 0, 0, j] *
(sm_top_err[pic, 0, 0, i] * sm_top_err[pic, 0, 0, j]
- sm_top_err[pic, 0, 0, i] * int(i == j))
)
manual_sm_bot_err /= n_pics # WTF???!!!!
self.info(" manual SM_BOT_ERR_DELTA %.3f%%" %
(numpy.sum(numpy.abs(manual_sm_bot_err - sm_bot_err))
/ numpy.sum(numpy.abs(sm_bot_err),)))
manual_a2a_bot_err = numpy.zeros(shape=(n_pics, size, size, n_chans),
dtype=numpy.float64)
for pic in range(n_pics):
for cur_class in range(n_classes):
for i in range(size):
for j in range(size):
for chan in range(n_chans):
manual_a2a_bot_err[pic, i, j, chan] += (
sm_bot_err[pic, 0, 0, cur_class] *
a2a_weights_raw[cur_class, i, j, chan])
self.info(" manual A2A_BOT_ERR_DELTA %.3f%%" % (
numpy.sum(numpy.abs(manual_a2a_bot_err - a2a_bot_err))
/ numpy.sum(numpy.abs(a2a_bot_err))))
self.assertLess(back_percent_delta, max_percent_delta,
"Back A2SM differs by %.3f%%" % back_percent_delta)
if __name__ == "__main__":
StandardTest.main()
| [
"veles.znicz.tests.functional.StandardTest.main",
"veles.znicz.gd_conv.GradientDescentConv",
"veles.znicz.conv.Conv",
"veles.znicz.evaluator.EvaluatorSoftmax",
"veles.znicz.normalization.LRNormalizerForward",
"veles.znicz.gd.GDSoftmax",
"veles.znicz.all2all.All2AllSoftmax",
"numpy.greater",
"veles.z... | [((33981, 34000), 'veles.znicz.tests.functional.StandardTest.main', 'StandardTest.main', ([], {}), '()\n', (33998, 34000), False, 'from veles.znicz.tests.functional import StandardTest\n'), ((4639, 4686), 'os.path.join', 'os.path.join', (['self.data_dir_path', 'data_filename'], {}), '(self.data_dir_path, data_filename)\n', (4651, 4686), False, 'import os\n'), ((5529, 5687), 'veles.znicz.conv.Conv', 'conv.Conv', (['self.parent'], {'kx': 'kernel_size', 'ky': 'kernel_size', 'padding': '(padding_size, padding_size, padding_size, padding_size)', 'sliding': '(1, 1)', 'n_kernels': '(2)'}), '(self.parent, kx=kernel_size, ky=kernel_size, padding=(\n padding_size, padding_size, padding_size, padding_size), sliding=(1, 1),\n n_kernels=2)\n', (5538, 5687), True, 'import veles.znicz.conv as conv\n'), ((5830, 5837), 'veles.memory.Array', 'Array', ([], {}), '()\n', (5835, 5837), False, 'from veles.memory import Array\n'), ((6569, 6623), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(2, 32, 32, 2)', 'dtype': 'numpy.float64'}), '(shape=(2, 32, 32, 2), dtype=numpy.float64)\n', (6580, 6623), False, 'import numpy\n'), ((8802, 8968), 'veles.znicz.conv.Conv', 'conv.Conv', (['self.parent'], {'kx': 'kernel_size', 'ky': 'kernel_size', 'padding': '(padding_size, padding_size, padding_size, padding_size)', 'sliding': '(1, 1)', 'n_kernels': 'n_kernels'}), '(self.parent, kx=kernel_size, ky=kernel_size, padding=(\n padding_size, padding_size, padding_size, padding_size), sliding=(1, 1),\n n_kernels=n_kernels)\n', (8811, 8968), True, 'import veles.znicz.conv as conv\n'), ((9082, 9095), 'veles.memory.Array', 'Array', (['bottom'], {}), '(bottom)\n', (9087, 9095), False, 'from veles.memory import Array\n'), ((9731, 9771), 'veles.znicz.gd_conv.GradientDescentConv', 'gd_conv.GradientDescentConv', (['self.parent'], {}), '(self.parent)\n', (9758, 9771), True, 'import veles.znicz.gd_conv as gd_conv\n'), ((9799, 9812), 'veles.memory.Array', 'Array', (['bottom'], {}), '(bottom)\n', (9804, 9812), False, 'from veles.memory import Array\n'), ((9841, 9851), 'veles.memory.Array', 'Array', (['top'], {}), '(top)\n', (9846, 9851), False, 'from veles.memory import Array\n'), ((9884, 9898), 'veles.memory.Array', 'Array', (['top_err'], {}), '(top_err)\n', (9889, 9898), False, 'from veles.memory import Array\n'), ((9928, 9935), 'veles.memory.Array', 'Array', ([], {}), '()\n', (9933, 9935), False, 'from veles.memory import Array\n'), ((10015, 10039), 'veles.memory.Array', 'Array', (['fwd_conv.bias.mem'], {}), '(fwd_conv.bias.mem)\n', (10020, 10039), False, 'from veles.memory import Array\n'), ((10577, 10643), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(2, bot_size, bot_size, 3)', 'dtype': 'numpy.float64'}), '(shape=(2, bot_size, bot_size, 3), dtype=numpy.float64)\n', (10588, 10643), False, 'import numpy\n'), ((12062, 12176), 'veles.znicz.pooling.MaxPooling', 'pooling.MaxPooling', (['self.parent'], {'kx': 'kernel_size', 'ky': 'kernel_size', 'sliding': '(stride, stride)', 'device': 'self.device'}), '(self.parent, kx=kernel_size, ky=kernel_size, sliding=(\n stride, stride), device=self.device)\n', (12080, 12176), True, 'import veles.znicz.pooling as pooling\n'), ((12273, 12286), 'veles.memory.Array', 'Array', (['bottom'], {}), '(bottom)\n', (12278, 12286), False, 'from veles.memory import Array\n'), ((12494, 12563), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(2, out_height, out_width, 2)', 'dtype': 'numpy.float64'}), '(shape=(2, out_height, out_width, 2), dtype=numpy.float64)\n', (12505, 12563), False, 'import numpy\n'), ((14340, 14434), 'veles.znicz.pooling.MaxPooling', 'pooling.MaxPooling', (['self.parent'], {'kx': 'kernel_size', 'ky': 'kernel_size', 'sliding': '(stride, stride)'}), '(self.parent, kx=kernel_size, ky=kernel_size, sliding=(\n stride, stride))\n', (14358, 14434), True, 'import veles.znicz.pooling as pooling\n'), ((14493, 14506), 'veles.memory.Array', 'Array', (['bottom'], {}), '(bottom)\n', (14498, 14506), False, 'from veles.memory import Array\n'), ((14954, 15023), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(2, out_height, out_width, 2)', 'dtype': 'numpy.float64'}), '(shape=(2, out_height, out_width, 2), dtype=numpy.float64)\n', (14965, 15023), False, 'import numpy\n'), ((15748, 15866), 'veles.znicz.gd_pooling.GDMaxPooling', 'gd_pooling.GDMaxPooling', (['self.parent'], {'kx': 'kernel_size', 'ky': 'kernel_size', 'sliding': '(stride, stride)', 'device': 'self.device'}), '(self.parent, kx=kernel_size, ky=kernel_size,\n sliding=(stride, stride), device=self.device)\n', (15771, 15866), True, 'import veles.znicz.gd_pooling as gd_pooling\n'), ((16021, 16034), 'veles.memory.Array', 'Array', (['bottom'], {}), '(bottom)\n', (16026, 16034), False, 'from veles.memory import Array\n'), ((16103, 16117), 'veles.memory.Array', 'Array', (['top_err'], {}), '(top_err)\n', (16108, 16117), False, 'from veles.memory import Array\n'), ((17567, 17638), 'veles.znicz.normalization.LRNormalizerForward', 'normalization.LRNormalizerForward', (['self.parent'], {'k': '(1)', 'device': 'self.device'}), '(self.parent, k=1, device=self.device)\n', (17600, 17638), True, 'import veles.znicz.normalization as normalization\n'), ((17737, 17750), 'veles.memory.Array', 'Array', (['bottom'], {}), '(bottom)\n', (17742, 17750), False, 'from veles.memory import Array\n'), ((18233, 18305), 'veles.znicz.normalization.LRNormalizerBackward', 'normalization.LRNormalizerBackward', (['self.parent'], {'k': '(1)', 'device': 'self.device'}), '(self.parent, k=1, device=self.device)\n', (18267, 18305), True, 'import veles.znicz.normalization as normalization\n'), ((18388, 18398), 'veles.memory.Array', 'Array', (['top'], {}), '(top)\n', (18393, 18398), False, 'from veles.memory import Array\n'), ((18426, 18439), 'veles.memory.Array', 'Array', (['bottom'], {}), '(bottom)\n', (18431, 18439), False, 'from veles.memory import Array\n'), ((18472, 18486), 'veles.memory.Array', 'Array', (['top_err'], {}), '(top_err)\n', (18477, 18486), False, 'from veles.memory import Array\n'), ((19846, 19932), 'numpy.ndarray', 'numpy.ndarray', ([], {'shape': '(n_pics, out_size, out_size, n_kernels)', 'dtype': 'numpy.float64'}), '(shape=(n_pics, out_size, out_size, n_kernels), dtype=numpy.\n float64)\n', (19859, 19932), False, 'import numpy\n'), ((20397, 20593), 'veles.znicz.conv.ConvStrictRELU', 'conv.ConvStrictRELU', (['self.parent'], {'kx': 'kernel_size', 'ky': 'kernel_size', 'padding': '(padding_size, padding_size, padding_size, padding_size)', 'sliding': '(1, 1)', 'n_kernels': 'n_kernels', 'device': 'self.device'}), '(self.parent, kx=kernel_size, ky=kernel_size, padding=(\n padding_size, padding_size, padding_size, padding_size), sliding=(1, 1),\n n_kernels=n_kernels, device=self.device)\n', (20416, 20593), True, 'import veles.znicz.conv as conv\n'), ((20653, 20671), 'veles.memory.Array', 'Array', (['conv_bottom'], {}), '(conv_bottom)\n', (20658, 20671), False, 'from veles.memory import Array\n'), ((22536, 22622), 'numpy.ndarray', 'numpy.ndarray', ([], {'shape': '(n_pics, out_size, out_size, n_kernels)', 'dtype': 'numpy.float64'}), '(shape=(n_pics, out_size, out_size, n_kernels), dtype=numpy.\n float64)\n', (22549, 22622), False, 'import numpy\n'), ((23107, 23303), 'veles.znicz.conv.ConvStrictRELU', 'conv.ConvStrictRELU', (['self.parent'], {'kx': 'kernel_size', 'ky': 'kernel_size', 'padding': '(padding_size, padding_size, padding_size, padding_size)', 'sliding': '(1, 1)', 'n_kernels': 'n_kernels', 'device': 'self.device'}), '(self.parent, kx=kernel_size, ky=kernel_size, padding=(\n padding_size, padding_size, padding_size, padding_size), sliding=(1, 1),\n n_kernels=n_kernels, device=self.device)\n', (23126, 23303), True, 'import veles.znicz.conv as conv\n'), ((23363, 23381), 'veles.memory.Array', 'Array', (['conv_bottom'], {}), '(conv_bottom)\n', (23368, 23381), False, 'from veles.memory import Array\n'), ((25759, 25845), 'numpy.ndarray', 'numpy.ndarray', ([], {'shape': '(n_pics, out_size, out_size, n_kernels)', 'dtype': 'numpy.float64'}), '(shape=(n_pics, out_size, out_size, n_kernels), dtype=numpy.\n float64)\n', (25772, 25845), False, 'import numpy\n'), ((26228, 26342), 'veles.znicz.gd_conv.GDStrictRELUConv', 'gd_conv.GDStrictRELUConv', (['self.parent'], {'device': 'self.device', 'learning_rate': '(1)', 'weights_decay': '(0)', 'batch_size': 'n_pics'}), '(self.parent, device=self.device, learning_rate=1,\n weights_decay=0, batch_size=n_pics)\n', (26252, 26342), True, 'import veles.znicz.gd_conv as gd_conv\n'), ((26576, 26595), 'veles.memory.Array', 'Array', (['relu_top_err'], {}), '(relu_top_err)\n', (26581, 26595), False, 'from veles.memory import Array\n'), ((26627, 26645), 'veles.memory.Array', 'Array', (['conv_bottom'], {}), '(conv_bottom)\n', (26632, 26645), False, 'from veles.memory import Array\n'), ((26814, 26829), 'veles.memory.Array', 'Array', (['relu_top'], {}), '(relu_top)\n', (26819, 26829), False, 'from veles.memory import Array\n'), ((29817, 29980), 'veles.znicz.all2all.All2AllSoftmax', 'all2all.All2AllSoftmax', (['self.parent'], {'output_sample_shape': 'n_classes', 'weights_filling': '"""uniform"""', 'weights_stddev': '(0.1)', 'bias_filling': '"""uniform"""', 'bias_stddev': '(0.01)'}), "(self.parent, output_sample_shape=n_classes,\n weights_filling='uniform', weights_stddev=0.1, bias_filling='uniform',\n bias_stddev=0.01)\n", (29839, 29980), True, 'import veles.znicz.all2all as all2all\n'), ((30039, 30056), 'veles.memory.Array', 'Array', (['a2a_bottom'], {}), '(a2a_bottom)\n', (30044, 30056), False, 'from veles.memory import Array\n'), ((31133, 31172), 'veles.znicz.evaluator.EvaluatorSoftmax', 'evaluator.EvaluatorSoftmax', (['self.parent'], {}), '(self.parent)\n', (31159, 31172), True, 'import veles.znicz.evaluator as evaluator\n'), ((31504, 31551), 'veles.znicz.gd.GDSoftmax', 'gd.GDSoftmax', (['self.parent'], {'store_gradient': '(False)'}), '(self.parent, store_gradient=False)\n', (31516, 31551), True, 'import veles.znicz.gd as gd\n'), ((32188, 32253), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(n_pics, 1, 1, n_classes)', 'dtype': 'numpy.float64'}), '(shape=(n_pics, 1, 1, n_classes), dtype=numpy.float64)\n', (32199, 32253), False, 'import numpy\n'), ((33101, 33170), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(n_pics, size, size, n_chans)', 'dtype': 'numpy.float64'}), '(shape=(n_pics, size, size, n_chans), dtype=numpy.float64)\n', (33112, 33170), False, 'import numpy\n'), ((11602, 11649), 'os.path.join', 'os.path.join', (['self.data_dir_path', 'data_filename'], {}), '(self.data_dir_path, data_filename)\n', (11614, 11649), False, 'import os\n'), ((21382, 21408), 'numpy.greater', 'numpy.greater', (['conv_top', '(0)'], {}), '(conv_top, 0)\n', (21395, 21408), False, 'import numpy\n'), ((24092, 24118), 'numpy.greater', 'numpy.greater', (['conv_top', '(0)'], {}), '(conv_top, 0)\n', (24105, 24118), False, 'import numpy\n'), ((26751, 26779), 'numpy.zeros', 'numpy.zeros', ([], {'shape': 'n_kernels'}), '(shape=n_kernels)\n', (26762, 26779), False, 'import numpy\n'), ((3279, 3339), 'numpy.zeros', 'numpy.zeros', (['(n_pics, height, width, n_chans)', 'numpy.float64'], {}), '((n_pics, height, width, n_chans), numpy.float64)\n', (3290, 3339), False, 'import numpy\n'), ((18727, 18745), 'numpy.abs', 'numpy.abs', (['bot_err'], {}), '(bot_err)\n', (18736, 18745), False, 'import numpy\n'), ((21541, 21560), 'numpy.abs', 'numpy.abs', (['relu_top'], {}), '(relu_top)\n', (21550, 21560), False, 'import numpy\n'), ((24237, 24256), 'numpy.abs', 'numpy.abs', (['relu_top'], {}), '(relu_top)\n', (24246, 24256), False, 'import numpy\n'), ((28053, 28071), 'numpy.abs', 'numpy.abs', (['delta_w'], {}), '(delta_w)\n', (28062, 28071), False, 'import numpy\n'), ((6771, 6868), 'scipy.signal.correlate2d', 'correlate2d', (['bottom[pic, :, :, color_chan]', 'weights[weight_id, :, :, color_chan]'], {'mode': '"""same"""'}), "(bottom[pic, :, :, color_chan], weights[weight_id, :, :,\n color_chan], mode='same')\n", (6782, 6868), False, 'from scipy.signal import correlate2d, convolve2d\n'), ((10844, 10940), 'scipy.signal.convolve2d', 'convolve2d', (['top_err[pic, :, :, weight_id]', 'weights[weight_id, :, :, color_chan]'], {'mode': '"""same"""'}), "(top_err[pic, :, :, weight_id], weights[weight_id, :, :,\n color_chan], mode='same')\n", (10854, 10940), False, 'from scipy.signal import correlate2d, convolve2d\n'), ((17912, 17948), 'numpy.abs', 'numpy.abs', (['(fwd_norm.output.mem - top)'], {}), '(fwd_norm.output.mem - top)\n', (17921, 17948), False, 'import numpy\n'), ((17974, 17988), 'numpy.abs', 'numpy.abs', (['top'], {}), '(top)\n', (17983, 17988), False, 'import numpy\n'), ((18654, 18699), 'numpy.abs', 'numpy.abs', (['(back_norm.err_output.mem - bot_err)'], {}), '(back_norm.err_output.mem - bot_err)\n', (18663, 18699), False, 'import numpy\n'), ((21032, 21078), 'numpy.abs', 'numpy.abs', (['(fwd_conv_relu.output.mem - relu_top)'], {}), '(fwd_conv_relu.output.mem - relu_top)\n', (21041, 21078), False, 'import numpy\n'), ((21117, 21136), 'numpy.abs', 'numpy.abs', (['relu_top'], {}), '(relu_top)\n', (21126, 21136), False, 'import numpy\n'), ((21476, 21513), 'numpy.abs', 'numpy.abs', (['(relu_top_manual - relu_top)'], {}), '(relu_top_manual - relu_top)\n', (21485, 21513), False, 'import numpy\n'), ((23742, 23788), 'numpy.abs', 'numpy.abs', (['(fwd_conv_relu.output.mem - relu_top)'], {}), '(fwd_conv_relu.output.mem - relu_top)\n', (23751, 23788), False, 'import numpy\n'), ((23827, 23846), 'numpy.abs', 'numpy.abs', (['relu_top'], {}), '(relu_top)\n', (23836, 23846), False, 'import numpy\n'), ((24173, 24210), 'numpy.abs', 'numpy.abs', (['(relu_top_manual - relu_top)'], {}), '(relu_top_manual - relu_top)\n', (24182, 24210), False, 'import numpy\n'), ((27059, 27066), 'veles.memory.Array', 'Array', ([], {}), '()\n', (27064, 27066), False, 'from veles.memory import Array\n'), ((27498, 27530), 'numpy.abs', 'numpy.abs', (['(result - conv_bot_err)'], {}), '(result - conv_bot_err)\n', (27507, 27530), False, 'import numpy\n'), ((27576, 27593), 'numpy.abs', 'numpy.abs', (['result'], {}), '(result)\n', (27585, 27593), False, 'import numpy\n'), ((30389, 30431), 'numpy.abs', 'numpy.abs', (['(a2a_softmax.output.mem - sm_top)'], {}), '(a2a_softmax.output.mem - sm_top)\n', (30398, 30431), False, 'import numpy\n'), ((30471, 30488), 'numpy.abs', 'numpy.abs', (['sm_top'], {}), '(sm_top)\n', (30480, 30488), False, 'import numpy\n'), ((31973, 32023), 'numpy.abs', 'numpy.abs', (['(back_a2a_sm.err_input.mem - a2a_bot_err)'], {}), '(back_a2a_sm.err_input.mem - a2a_bot_err)\n', (31982, 32023), False, 'import numpy\n'), ((32070, 32092), 'numpy.abs', 'numpy.abs', (['a2a_bot_err'], {}), '(a2a_bot_err)\n', (32079, 32092), False, 'import numpy\n'), ((11214, 11233), 'numpy.fabs', 'numpy.fabs', (['bot_err'], {}), '(bot_err)\n', (11224, 11233), False, 'import numpy\n'), ((13209, 13224), 'numpy.max', 'numpy.max', (['zone'], {}), '(zone)\n', (13218, 13224), False, 'import numpy\n'), ((15689, 15704), 'numpy.max', 'numpy.max', (['zone'], {}), '(zone)\n', (15698, 15704), False, 'import numpy\n'), ((16537, 16555), 'numpy.abs', 'numpy.abs', (['bot_err'], {}), '(bot_err)\n', (16546, 16555), False, 'import numpy\n'), ((32968, 33009), 'numpy.abs', 'numpy.abs', (['(manual_sm_bot_err - sm_bot_err)'], {}), '(manual_sm_bot_err - sm_bot_err)\n', (32977, 33009), False, 'import numpy\n'), ((33045, 33066), 'numpy.abs', 'numpy.abs', (['sm_bot_err'], {}), '(sm_bot_err)\n', (33054, 33066), False, 'import numpy\n'), ((33713, 33756), 'numpy.abs', 'numpy.abs', (['(manual_a2a_bot_err - a2a_bot_err)'], {}), '(manual_a2a_bot_err - a2a_bot_err)\n', (33722, 33756), False, 'import numpy\n'), ((33782, 33804), 'numpy.abs', 'numpy.abs', (['a2a_bot_err'], {}), '(a2a_bot_err)\n', (33791, 33804), False, 'import numpy\n'), ((6453, 6483), 'numpy.abs', 'numpy.abs', (['fwd_conv.output.mem'], {}), '(fwd_conv.output.mem)\n', (6462, 6483), False, 'import numpy\n'), ((7187, 7217), 'numpy.abs', 'numpy.abs', (['fwd_conv.output.mem'], {}), '(fwd_conv.output.mem)\n', (7196, 7217), False, 'import numpy\n'), ((9675, 9705), 'numpy.abs', 'numpy.abs', (['fwd_conv.output.mem'], {}), '(fwd_conv.output.mem)\n', (9684, 9705), False, 'import numpy\n'), ((10478, 10513), 'numpy.fabs', 'numpy.fabs', (['back_conv.err_input.mem'], {}), '(back_conv.err_input.mem)\n', (10488, 10513), False, 'import numpy\n'), ((11152, 11188), 'numpy.fabs', 'numpy.fabs', (['(manual_bot_err - bot_err)'], {}), '(manual_bot_err - bot_err)\n', (11162, 11188), False, 'import numpy\n'), ((14749, 14785), 'numpy.abs', 'numpy.abs', (['(fwd_pool.output.mem - top)'], {}), '(fwd_pool.output.mem - top)\n', (14758, 14785), False, 'import numpy\n'), ((14826, 14840), 'numpy.abs', 'numpy.abs', (['top'], {}), '(top)\n', (14835, 14840), False, 'import numpy\n'), ((16467, 16511), 'numpy.abs', 'numpy.abs', (['(grad_pool.err_input.mem - bot_err)'], {}), '(grad_pool.err_input.mem - bot_err)\n', (16476, 16511), False, 'import numpy\n'), ((6400, 6427), 'numpy.abs', 'numpy.abs', (['delta_with_veles'], {}), '(delta_with_veles)\n', (6409, 6427), False, 'import numpy\n'), ((7134, 7161), 'numpy.abs', 'numpy.abs', (['delta_with_scipy'], {}), '(delta_with_scipy)\n', (7143, 7161), False, 'import numpy\n'), ((9622, 9649), 'numpy.abs', 'numpy.abs', (['delta_with_veles'], {}), '(delta_with_veles)\n', (9631, 9649), False, 'import numpy\n'), ((10423, 10445), 'numpy.fabs', 'numpy.fabs', (['back_delta'], {}), '(back_delta)\n', (10433, 10445), False, 'import numpy\n')] |
from curses import COLOR_CYAN
from os import wait
import string
from manim import *
from manim.utils import tex
import numpy as np
import math
import textwrap
import random
from solarized import *
from tqdm import tqdm
# Use our fork of manim_rubikscube!
from manim_rubikscube import *
# Temne pozadi, ale zakomentovat pro DesIntro a GeneralMITM !!!
config.background_color = BASE02
random.seed(0)
# colors
keyColor = GRAY
keyColor2= BASE1
textColor = GRAY
encodeColor = RED
decodeColor = BLUE
borderColor = GRAY
#plainCipherColor = DARK_BROWN
plainColor = CYAN
cipherColor = YELLOW2
smallFontSize = 18
tinyFontSize = 5
fontSize = 40
padding = 0.5 # between the text and the border around it
#first diagram constants
midDiagramPos = 0*UP
topDiagramPos = 1*UP
bottomDiagramPos = 2*DOWN
diagramWidth = 5
arrowLen = 3
keyWidthLarge = 2.5
keyWidth = 1.5
keyPadding = 0.8
textPadding = 0.1
keyInfoWidth = 3.0
keyInfoHeight = 2.0
leftTextPos = 5.5 * LEFT
invMinTime = 6
minTime = 1.0 / invMinTime
posPlain = bottomDiagramPos + 6 * LEFT
posFinal = bottomDiagramPos + 6 * RIGHT
cipherPositions = [
2*posPlain/3 + posFinal/3,
posPlain/3 + 2*posFinal/3,
bottomDiagramPos + 6 * RIGHT,
]
keyPositions = [
(posPlain + cipherPositions[0])/2,
(cipherPositions[0] + cipherPositions[1])/2,
(cipherPositions[1] + cipherPositions[2])/2
]
def flatten(t):
return [item for sublist in t for item in sublist]
# constructing random strings inside keys and ciphertexts
rng_state_1 = random.getstate()
def get_cached_lines():
lineLen = 30
strList = []
for j in range(50):
letters = string.ascii_letters + string.digits
str = r""
for k in range(lineLen):
str += random.choice(letters)
strList.append(str)
return strList
cached_lines = get_cached_lines()
def constructRandomString(lineLen = 30, numLines = 12):
global rng_state_1
old_state = random.getstate()
random.setstate(rng_state_1)
letters = string.ascii_letters + string.digits
strList = []
for j in range(numLines):
# str = r""
# for k in range(lineLen):
# str += random.choice(letters)
# strList.append(str)
strList.append(random.choice(cached_lines))
strList[-1] = strList[-1][:-3] + "..."
strList[0] = strList[0][0: int(lineLen * tinyFontSize / smallFontSize - 1)]
rng_state_1 = random.getstate()
random.setstate(old_state)
fontSizes = [smallFontSize] + [tinyFontSize]*(len(strList) - 1)
return [(str, fontSize) for str, fontSize in zip(strList, fontSizes)]
strPlainText = [
("Hi mom,", smallFontSize),
("yes, I am watching all Polylog videos", 5),
("as you wanted me to. Just now I watch ", 5),
("the one about the meet in the middle", 5),
("technique, the part about breaking", 5),
("the double DES cipher. Did you read", 5),
("the dummy plain text message in that", 5),
("part of the video? It starts with:", 5),
("\"Hi mom, yes, I am watching all", 5),
("Polylog videos as you wanted me to. ", 5),
("Just now I watch the one about the ", 5),
("meet in the middle technique, the...", 5)
##123456789012345678901234567890
]
strCipherText = constructRandomString()
rng_state_2 = random.getstate()
def constructRandomKeyString(len1 = 3, len2 = 5, prefix = None, suffix = None):
global rng_state_2
old_state = random.getstate()
random.setstate(rng_state_2)
st = ""
if prefix is None:
for _ in range(len1):
st += random.choice(["0", "1"])
else:
st += ('{0:0'+str(len1)+'b}').format(prefix)
st += "..."
if suffix is None:
for _ in range(len2):
st += random.choice(["0", "1"])
else:
st += ('{0:0'+str(len2)+'b}').format(suffix)
rng_state_2 = random.getstate()
random.setstate(old_state)
return st
def random_click_file():
return f"audio/click/click_{random.randint(0, 3)}.wav"
zeroString = "000...00000"
oneString = "000...00001"
ourKeyString = "101...01110"
randKeyString = constructRandomKeyString()
unknownKeyString = "???...?????"
# text object
class Btext:
def __init__(self, strLines, position = np.array([0, 0, 0]), color = textColor, width = None, height = None, scale = 1.0, fill_color = None, fill_opacity = 0.0):
self.position = position
self.width = width
self.height = height
self.strLines = strLines
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.color = color
self.tag = False
self.lines = Group(*[
Tex(
str,
color = self.color,
font_size = size,
).scale(scale) for str, size in strLines
]).arrange(DOWN, center = False, aligned_edge = LEFT, buff = textPadding)
self.border = constructTextBorder(
insideObject = self.lines,
width = self.width,
height = self.height,
color = self.color,
fill_color = self.fill_color,
fill_opacity = self.fill_opacity,
).scale(scale)
self.textBorder = Group(self.border, self.lines)
if self.width == None:
self.width = self.textBorder.width
self.height = self.textBorder.height
def changeText(self, newStrLines, empty = False):
self.strLines = newStrLines
newLines = Group(*[
Tex(str, color = self.color, font_size = size) for str,size in self.strLines
]).arrange(DOWN, center = False, aligned_edge = LEFT, buff = textPadding).move_to(self.lines.get_center())
if empty == False:
return AnimationGroup(*[Transform(line, newline) for (line, newline) in zip(self.lines, newLines)], lag_ratio = 0.2)
else:
self.lines = newLines
return AnimationGroup(*[Write(line) for line in self.lines], lag_ratio = 0.2)
def create(self, position = None, noText = False, tag = False, tagStr = ""):
if position is not None:
self.position = position.copy()
self.border.move_to(self.position)
self.lines.move_to(self.position)
anim = []
if noText == False:
anim.append(
AnimationGroup(
Write(self.lines[0]),
AnimationGroup(
*[Write(text) for text in self.lines[1:]],
lag_ratio = 0.2,
run_time = 1
)
)
)
if tag == True:
self.tag = True
self.tagText = Tex(
tagStr,
color = self.color,
font_size = fontSize
).move_to(
self.border.get_center()
).next_to(self.border, DOWN)
anim.append(
Write(self.tagText)
)
return AnimationGroup(
AnimationGroup(
DrawBorderThenFill(self.border),
),
*anim,
)
def highlight(self):
animl = [
self.border.animate().set_z_index(9999999)
]
return AnimationGroup(
*animl
)
def move_to(self, position):
self.textBorder.generate_target()
self.textBorder.target.move_to(position)
anims = [MoveToTarget(self.textBorder)]
if self.tag == True:
self.tagText.generate_target()
self.tagText.target.shift(position - self.position)
anims.append(
MoveToTarget(self.tagText)
)
self.position = position
return AnimationGroup(
*anims
)
def shift(self, vec):
return self.move_to(self.position + vec)
def remove(self):
return AnimationGroup(
*[FadeOut(self.border)],
*[FadeOut(l) for l in self.lines],
)
def removeTag(self):
self.tag = False
return Unwrite(self.tagText)
def addTag(self, tagStr):
self.tag = True
self.tagText = Tex(
tagStr,
color = self.color,
font_size = fontSize
).move_to(
self.border.get_center()
).next_to(self.border, DOWN)
return Write(self.tagText)
# key object
class Key:
def __init__(self, keyString, position = np.array([0, 0, 0]), scale = 1.0, clipartWidth = keyWidthLarge, upShift = 0.0*UP, keyTitle ="56 bit key"):
self.keyString = keyString
self.clipartWidth = clipartWidth
self.position = position
self.upShift = upShift
self.keyTitle = keyTitle
self.text = Tex(keyString, color = textColor).move_to(self.position).scale(scale)
self.brace = Brace(self.text, UP, color = textColor).shift(textPadding * UP).scale(scale)
self.title = Tex(self.keyTitle, color = textColor).scale(scale)
self.braceTitle = Group(self.brace, self.title).arrange(UP)
self.border = Rectangle(
width = self.text.get_right()[0] - self.text.get_left()[0] + padding,
height = self.text.get_top()[1] - self.text.get_bottom()[1] + padding,
color = keyColor
).scale(scale)
self.border.move_to(self.position)
self.braceTitle.shift(
self.border.get_top() + padding * UP + (self.braceTitle.get_top() - self.braceTitle.get_center())/2
)
self.rectangleKey = Group(
self.text,
self.braceTitle,
self.border
)
_, self.clipartKeyLine, self.clipartKeyCirc = constructKey(
position = position,
width = self.clipartWidth
)
self.clipartKey = Group(
self.border,
self.clipartKeyLine,
self.clipartKeyCirc
)
self.redActive = False
self.redArrow = Arrow(
start = 0*LEFT,
end = 0*LEFT,
color = encodeColor
)
self.blueActive = False
self.blueArrow = Arrow(
end = 0*LEFT,
start = 0*LEFT,
color = decodeColor
)
def changeText(self, newKeyString, fst = False):
self.keyString = newKeyString
newText = Tex(self.keyString, color = textColor).move_to(self.text.get_center())
return Transform(self.text, newText)
def changeTextandSize(self, newKeyString, shift = 0):
self.keyString = newKeyString
newText = Tex(self.keyString, color = textColor).move_to(self.text.get_center())
newBorder = Rectangle(
width = newText.get_right()[0] - newText.get_left()[0] + padding,
height = newText.get_top()[1] - newText.get_bottom()[1] + padding,
color = keyColor
).move_to(self.border.get_center())
newBrace = Brace(newText, UP, color = textColor).move_to(self.brace.get_center())
newTitle = Tex(self.keyTitle, color = textColor).move_to(self.title.get_center())
newRedArrow = Arrow(
start = newBorder.get_left(),
end = newBorder.get_right(),
color = encodeColor,
buff = 0
).move_to(self.redArrow.get_center())
Group(
newText,
newBorder,
newBrace,
newTitle,
newRedArrow
).shift(shift)
return AnimationGroup(
Transform(self.text, newText),
Transform(self.border, newBorder),
Transform(self.brace, newBrace),
Transform(self.title, newTitle),
Transform(self.redArrow, newRedArrow)
)
def createRectangleKey(self, position = None, noBrace = False):
if position is None:
position = self.position.copy()
self.rectangleKey.move_to(position + self.rectangleKey.get_center() - self.border.get_center() )
self.position = position
if noBrace == False:
anims = AnimationGroup(
Write(self.text),
Create(self.border),
Create(self.brace),
Write(self.title)
)
else:
anims = AnimationGroup(
Create(self.border),
Write(self.text)
)
return anims
def createClipartKey(self, position = None):
if not position is None:
self.position = position.copy()
self.border, self.clipartKeyLine, self.clipartKeyCirc = constructKey(
position = self.position,
width = self.clipartWidth
)
self.clipartKey.move_to(self.position + self.upShift)
anims = AnimationGroup(
Create(self.border),
Create(self.clipartKeyLine),
Create(self.clipartKeyCirc)
)
return anims
def transformClipartToRectangle(self, position = None, noBrace = False):
if not position is None:
self.position = position
self.text = Tex(self.keyString, color = textColor).move_to(self.position + self.upShift)
self.brace = Brace(self.text, UP, color = textColor).shift(textPadding * UP)
self.title = Tex("56 bit key", color = textColor)
self.braceTitle = Group(self.brace, self.title).arrange(UP)
newBorder = Rectangle(
width = self.text.get_right()[0] - self.text.get_left()[0] + padding,
height = self.text.get_top()[1] - self.text.get_bottom()[1] + padding,
color = keyColor
)
newBorder.move_to(self.position + self.upShift)
anims1 = AnimationGroup(
Uncreate(self.clipartKeyLine),
Uncreate(self.clipartKeyCirc)
)
anims2l = [Transform(
self.border,
newBorder
)]
if self.redActive == True:
newRedArrow = Arrow(
start = newBorder.get_left(),
end = newBorder.get_right(),
color = encodeColor,
buff = 0
).move_to(newBorder.get_bottom() + 1*textPadding*DOWN)
anims2l.append(
Transform(self.redArrow, newRedArrow)
)
if self.blueActive == True:
newBlueArrow = Arrow(
end = newBorder.get_left(),
start = newBorder.get_right(),
color = decodeColor,
buff = 0
).move_to(newBorder.get_bottom() + 1*textPadding*DOWN)
anims2l.append(
Transform(self.blueArrow, newBlueArrow)
)
anims2 = AnimationGroup(
*anims2l,
run_time = 2
)
self.text.move_to(self.position + self.upShift)
self.braceTitle.move_to(self.position + self.upShift).next_to(self.text, UP).shift(textPadding * UP)
if noBrace == False:
anims3 = AnimationGroup(
Write(self.text),
Create(self.brace),
Write(self.title)
)
else:
anims3 = AnimationGroup(
Write(self.text)
)
return [anims1, anims2, anims3]
def transformRectangleToClipart(self, position = None):
if not position is None:
self.position = position
self.clipartKey.move_to(self.position + self.upShift)
anims1 = AnimationGroup(
Uncreate(self.text),
Uncreate(self.brace),
Uncreate(self.title)
)
keyShape, self.clipartKeyLine, self.clipartKeyCirc = constructKey(
position = self.position + self.upShift,
width = self.clipartWidth
)
anims2 = AnimationGroup(
Transform(
self.border,
keyShape
),
Create(self.clipartKeyLine),
Create(self.clipartKeyCirc)
)
return [anims1, anims2]
def moveClipart(self, position = None, clipartWidth = None):
if not position is None:
self.position = position
if not clipartWidth is None:
self.clipartWidth = clipartWidth
#key moves
shapeNew, lineNew, circNew = constructKey(
position = self.position + self.upShift,
width = self.clipartWidth
)
anim = [
Transform(self.border, shapeNew),
Transform(self.clipartKeyLine, lineNew),
Transform(self.clipartKeyCirc, circNew),
]
if self.redActive:
anim.append(self.redArrow.animate().shift(circNew.get_center() - self.clipartKeyCirc.get_center()))
if self.blueActive:
anim.append(self.blueArrow.animate().shift(circNew.get_center() - self.clipartKeyCirc.get_center()))
return AnimationGroup(*anim)
def shiftRec(self, vec, noBrace = False):
self.position += vec
animl = [
self.border.animate().shift(vec),
self.text.animate().shift(vec),
]
if noBrace == False:
animl.append(self.braceTitle.animate().shift(vec))
if self.redActive == True:
animl.append(self.redArrow.animate().shift(vec))
if self.blueActive == True:
animl.append(self.blueArrow.animate().shift(vec))
anim = AnimationGroup(
*animl
)
return anim
def moveRec(self, pos, noBrace=False):
return self.shiftRec(pos - self.position, noBrace = noBrace)
def removeRec(self, noBrace = False):
anims = [
Uncreate(self.border),
Unwrite(self.text)
]
if noBrace == False:
anims.append(Uncreate(self.brace))
anims.append(Unwrite(self.title))
return AnimationGroup(
*anims
)
def createRedArrow(self, position = None):
if not position is None:
self.position = position
self.redArrow = Arrow(
start = self.border.get_left(),
end = self.border.get_right(),
color = encodeColor,
buff = 0
).move_to(self.border.get_bottom() + 1*textPadding*DOWN)
self.redActive = True
anim = AnimationGroup(
Create(self.redArrow)
)
return anim
def removeRedArrow(self):
self.redActive = False
return AnimationGroup(
Uncreate(self.redArrow)
)
def createBlueArrow(self, position = None):
if not position is None:
self.position = position
self.blueArrow = Arrow(
end = self.border.get_left(),
start = self.border.get_right(),
color = decodeColor,
buff = 0
).move_to(self.border.get_bottom() + 1*textPadding*DOWN)
self.blueActive = True
anim = AnimationGroup(
Create(self.blueArrow)
)
return anim
def removeBlueArrow(self):
self.blueActive = False
return AnimationGroup(
Uncreate(self.blueArrow)
)
def remove(self, noBrace = False):
anims = [
Uncreate(self.border),
Unwrite(self.text),
]
if noBrace == False:
anims.append(Unwrite(self.title)),
anims.append(Uncreate(self.brace))
if self.redActive == True:
anims.append(Uncreate(self.redArrow))
if self.blueActive == True:
anims.append(Uncreate(self.blueArrow))
return AnimationGroup(*anims)
class DesBruteForce(Scene):
def construct(self):
"""
: Animace, kde je vstupní text (něco vtipného), pak šipky nad
kterými jsou klíče a jde to do cipher textů. Klíč je buď klipart nebo
občas string bitů. Napravo je cipher text a checkujeme (posouváme ho?)
zda matchuje naše CipherTexty
"""
self.next_section(skip_animations = False)
DesTextPosition = 3*UP
DesText = Tex(r"DES", color = textColor).move_to(DesTextPosition)
self.add(DesText)
# create plain and cipher text
plain = Btext(strPlainText, color = plainColor, position = midDiagramPos - diagramWidth * RIGHT /2 )
self.play(
plain.create(tag = True, tagStr = "plain text"),
run_time = 0.01
)
cipher = Btext(strCipherText, color = cipherColor, width = plain.width, height = plain.height, position = midDiagramPos + diagramWidth*RIGHT/2)
self.play(
cipher.create(tag = True, tagStr = "cipher text"),
run_time = 0.01
)
key = Key(unknownKeyString, position = midDiagramPos)
# + diagramWidth/3 * LEFT)
self.play(
key.createRectangleKey(),
run_time = 0.01
)
# shift plain to the left
self.play(
key.shiftRec(diagramWidth/3 * LEFT, noBrace = False),
plain.shift(diagramWidth/3 * LEFT),
cipher.shift(diagramWidth/3*RIGHT)
)
self.wait()
guess = Btext(
constructRandomString(),
position = 2 * key.position - plain.position,
width = plain.width,
height = plain.height
)
self.play(
#key.changeText(zeroString),
guess.create(noText = True),
key.createRedArrow(),
#run_time = 1
)
self.next_section(skip_animations=False)
# self.play(
# Succession(
# AnimationGroup(
# guess.changeText(constructRandomString(), empty = True),
# )
# )
# )
# first go one by one
waitingTimes = []
L = 10 # víc
for i in range(L):
waitingTimes.append(
max((1.0 - (i * 1.0 / L)), minTime)
)
for i in range(1000):
waitingTimes.append(minTime)
cumTimes = np.cumsum(np.array(waitingTimes))
np.insert(cumTimes, 0, 0)
anims = []
cnt = 0
actString = "000..." + '{0:05b}'.format((cnt % 32))
self.play( # first one is done differently due to weird behaviour otherwise
key.changeText(actString),
guess.changeText(constructRandomString(), empty = True),
run_time = waitingTimes[0]
)
for t in waitingTimes[1:L]:
actString = "000..." + '{0:05b}'.format((cnt % 32))
self.add_sound(random_click_file(), time_offset=cumTimes[cnt]- cumTimes[1])
anims.append(
Succession(
Wait(cumTimes[cnt]- cumTimes[1]),
AnimationGroup(
key.changeText(actString),
guess.changeText(constructRandomString()),
run_time = t
)
)
)
cnt += 1
# fast forward
for big in range(5):
for _ in range(invMinTime):
actString = '{0:03b}'.format(big) + "..."
for _ in range(5):
actString += random.choice(["0", "1"])
self.add_sound(random_click_file(), time_offset=cumTimes[cnt]- cumTimes[1])
anims.append(
Succession(
Wait(cumTimes[cnt] - cumTimes[1]),
AnimationGroup(
key.changeText(actString),
guess.changeText(constructRandomString()),
run_time = minTime
)
)
)
cnt += 1
# we found the correct key
self.add_sound(random_click_file(), time_offset=cumTimes[cnt]- cumTimes[1])
anims.append(
Succession(
Wait(cumTimes[cnt] - cumTimes[1]),
AnimationGroup(
key.changeText(ourKeyString),
guess.changeText(strCipherText),
run_time = minTime
)
)
)
self.play(
*anims
)
self.add_sound("audio/polylog_success.wav")
self.play(
Circumscribe(guess.border),
Circumscribe(cipher.border)
)
self.wait()
self.play(
plain.remove(),
plain.removeTag(),
cipher.removeTag(),
cipher.remove(),
guess.remove(),
key.remove()
)
class TripleDes(Scene):
def construct(self):
"""
: <NAME>:
To fix this issue, people came up with a new cipher known as Triple DES,
which is just the old DES but applied three times in a row with three
different keys of combined length of 3*56 = 168 bits. But you may ask,
why Triple DES and not just Double DES where you would apply the
encryption function two times, each time with a different key. The
combined length of the two keys would be 2*56 = 112 which is definitely
enough so that bruteforce is not a possibility, because 2^112 is 10^30,
which is just way too much.
"""
# beginning of the scene
self.next_section(skip_animations=False)
DesTextPosition = 3*UP
DesText = Tex(r"{{ }}{{DES}}", color = textColor).move_to(DesTextPosition)
TripleDes = Tex(r"{{Triple }}{{DES}}", color = textColor).move_to(DesTextPosition)
self.add(DesText)
keyStrings = [
constructRandomKeyString(len1 = 2, len2 = 2),
constructRandomKeyString(len1 = 2, len2 = 2),
constructRandomKeyString(len1 = 2, len2 = 2)
]
plain = Btext(
strPlainText,
position = 2*DOWN + 6*LEFT,
color = plainColor
)
self.play(Transform(DesText,TripleDes))
self.wait()
self.play(
plain.create(tag = True, tagStr= "plain text")
)
self.wait()
ciphers = [
Btext(
constructRandomString(),
position = pos.copy(),
width = plain.width,
height = plain.height,
color = col
) for pos, col in zip(cipherPositions, [textColor, textColor, cipherColor])
]
keys = [
Key(
str,
position = pos.copy(),
keyTitle = "56 bits"
) for (pos, str) in zip(keyPositions, keyStrings)
]
self.play(
ciphers[0].create(),
keys[0].createRectangleKey(),
keys[0].createRedArrow(),
)
self.wait()
self.play(
ciphers[1].create(),
keys[1].createRectangleKey(),
keys[1].createRedArrow(),
)
self.wait()
self.play(
ciphers[2].create(tag = True, tagStr= "cipher text"),
keys[2].createRectangleKey(),
keys[2].createRedArrow(),
)
self.wait()
topKeys = [
Key(
str,
position = pos.copy(),
keyTitle = "56 bits"
) for (pos, str) in zip(keyPositions, keyStrings)
]
self.play(*[key.createRectangleKey(noBrace=True) for key in topKeys], run_time = 0.01)
self.wait()
self.play(
topKeys[0].shiftRec( 2.5*UP
+ (keys[1].position - keys[0].position)
+ (keys[0].border.get_width() + keys[1].border.get_width())/2 * LEFT
),
topKeys[1].shiftRec(2.5*UP),
topKeys[2].shiftRec(2.5*UP
+ (keys[1].position - keys[2].position)
+ (keys[2].border.get_width() + keys[1].border.get_width())/2 * RIGHT
)
)
self.wait()
newTitle = Tex(r"168 bit key", color = textColor).move_to(topKeys[1].title.get_center())
newBrace = Brace(Group(topKeys[0].text, topKeys[2].text), UP, color = textColor).move_to(topKeys[1].brace.get_center())
self.play(
FadeOut(topKeys[0].title),
FadeOut(topKeys[0].brace),
FadeOut(topKeys[2].title),
FadeOut(topKeys[2].brace),
Transform(topKeys[1].title, newTitle),
Transform(topKeys[1].brace, newBrace)
)
self.wait()
#triple des -> double des
DoubleDes = Tex("{{Double }}{{DES}}", color = textColor).move_to(DesText.get_center()+0.05*UP)
txtShift = ciphers[1].border.get_center()[0]*RIGHT
recShift = topKeys[1].border.get_center() - topKeys[1].border.get_left()
newKeys = [[key.border.copy().shift(recShift), key.text.copy().shift(recShift)] for key in topKeys]
newKeys[2][0].color = RED
newKeys[2][1].color = RED
newTitle = Tex(r"112 bit key", color = textColor).move_to(topKeys[1].title.get_center())
newBrace = Brace(Group(newKeys[0][1], newKeys[1][1]), UP, color = textColor).move_to(topKeys[1].brace)
self.next_section(skip_animations= False)
# change from triple to double
self.play(
keys[1].remove(),
ciphers[1].remove(),
FadeOut(topKeys[1].border),
Unwrite(topKeys[1].text),
Transform(DesText, DoubleDes),
)
self.wait()
shft = ciphers[1].border.get_center()[0] * LEFT
shft2 = topKeys[0].border.get_center()[0]*LEFT
self.play(
ciphers[0].shift(-shft),
keys[0].shiftRec(-shft/2),
keys[2].shiftRec(shft/2),
topKeys[0].shiftRec(shft2/2, noBrace = True),
topKeys[2].shiftRec(-shft2/2, noBrace = True),
Transform(topKeys[1].brace, newBrace),
Transform(topKeys[1].title, newTitle),
)
self.wait()
# return
# self.play(
# FadeOut(ciphers[2].border), *[FadeOut(txt) for txt in ciphers[2].lines],
# FadeOut(topKeys[2].border), FadeOut(topKeys[2].text),
# FadeOut(keys[2].border), FadeOut(keys[2].text), FadeOut(keys[2].redArrow), FadeOut(keys[2].braceTitle),
# Transform(topKeys[1].title, newTitle),
# Transform(topKeys[1].brace, newBrace),
# )
# return
# # shift to the right
# self.play(
# *[txt.shift(txtShift) for txt in ciphers+[plain]],
# *[key.shiftRec(txtShift) for key in keys],
# *flatten([
# [Transform(key.border, border),
# Transform(key.text,text)]
# for (key, [border, text]) in zip(topKeys, newKeys)
# ]),
# ciphers[2].tagText.animate().shift(-txtShift)
# )
# self.play(
# FadeOut(ciphers[2].tagText),
# ciphers[1].addTag("cipher text"),
# run_time = 0.001
# )
# self.wait()
# highlight arrows
self.remove(
keys[0].redArrow,
keys[2].redArrow
)
self.play(
keys[0].createRedArrow(),
Circumscribe(ciphers[0].border),
)
self.wait()
self.play(
Circumscribe(ciphers[2].border),
keys[2].createRedArrow()
)
self.wait()
# write down the calculation
# txt = Group(
# Tex(
# r"$2 \cdot 56 = 112$ bits",
# color = textColor
# ),
txt = Tex(
r"$2^{112} \approx 10^{34}$",
color = textColor
).move_to(5*RIGHT + 2*UP)
self.play(
Write(txt)
)
self.wait()
# move to the middle
self.play(
Unwrite(txt),
plain.move_to(posPlain.copy()+midDiagramPos - bottomDiagramPos),
ciphers[2].move_to(posFinal+midDiagramPos - bottomDiagramPos),
keys[0].remove(),
keys[2].remove(),
topKeys[0].removeRec(noBrace=True),
topKeys[2].removeRec(noBrace=True),
ciphers[0].remove(),
Uncreate(topKeys[1].brace),
Unwrite(topKeys[1].title),
)
self.wait()
# self.play(
# Unwrite(txt),
# topKeys[0].removeRec(noBrace=True),
# topKeys[1].removeRec(),
# keys[0].changeTextandSize(zeroString, shift = midDiagramPos - bottomDiagramPos + 0.5 * LEFT),
# keys[1].changeTextandSize(zeroString, shift = midDiagramPos - bottomDiagramPos + 0.5 * RIGHT),
# plain.shift(midDiagramPos - bottomDiagramPos + 1 * LEFT),
# ciphers[0].shift(midDiagramPos - bottomDiagramPos),
# ciphers[1].shift(midDiagramPos - bottomDiagramPos + 1 * RIGHT),
# )
# # bruteforce animation
# self.next_section(skip_animations=False)
# for i in range(1):
# for big in range(4):
# for _ in range(invMinTime):
# actString = '{0:03b}'.format(big) + "..."
# for _ in range(5):
# actString += random.choice(["0", "1"])
# self.play(
# keys[1].changeText(actString),
# ciphers[1].changeText(constructRandomString()),
# run_time = minTime/2
# )
# self.wait(minTime/2)
# self.play(
# keys[0].changeText("000..." + '{0:05b}'.format(i+1)),
# ciphers[0].changeText(constructRandomString()),
# run_time = 0.01
# )
self.wait()
class DesMITM(Scene):
def construct(self):
"""
But, in fact, the Double DES is not that much safer than the ordinary DES!
To break it, if you know a plain text and the respective cypher text,
you will first start yet again by bruteforcing all 2^56 keys and compute all possible encrypted texts.
You save them in memory -- you will need at least millions of gigabytes but remember, you are the military
-- and then, as in the case of the cube, you start working from the other side.
You use the decryption function on the ciphertext with all possible keys.
Whenever you decrypt the message, you compare it with the huge database you computed in the first step.
This comparison can be done very quickly if you put the strings you computed in a hash table.
You are again iterating over all 2^56 possible keys, until you find the decrypted string in the database,
which gives you the two keys used in the cipher.
"""
# beginning of the scene
self.next_section(skip_animations=False)
global posPlain, posFinal
for val in [posPlain, posFinal]:
val += midDiagramPos - bottomDiagramPos
posInter = 0.6 * posPlain + 0.4 * posFinal
posKey = (posPlain + posInter) / 2
posInter2 = 0.4 * posPlain + 0.6 * posFinal
posKey2 = (posFinal + posInter2) / 2
DesTextPosition = 3*UP
DesText = Tex(r"Double DES", color = textColor).move_to(DesTextPosition)
self.add(DesText)
plain = Btext(
strPlainText,
position = posPlain,
color = plainColor
)
self.play(
plain.create(tag = True, tagStr="plain text"),
)
self.wait()
cipher = Btext(
constructRandomString(),
position = posFinal,
width = plain.width,
height = plain.height,
color = cipherColor
)
self.play(
cipher.create(tag = True, tagStr="cipher text"),
)
self.wait()
# generating all intermediate strings in a table
key = Key(zeroString, position = posKey)
self.play(
key.createRectangleKey(),
key.createRedArrow()
)
self.wait()
topLeft = topDiagramPos + 1*RIGHT
databasePositions = []
databaseInters = []
keyStrings = []
w = 10
h = 6
for i in range(h):
for j in range(w):
databasePositions.append(topLeft + i * 0.7 * DOWN + j * 0.3 * RIGHT)
for i in range(h*w):
keyStrings.append(constructRandomKeyString(
len1 = 3,
len2 = 5,
prefix = int(((2 ** 3)*i)/(1.0*h*w)),
suffix = None if i<h*w-1 else (2 ** 5 - 1)
))
self.wait()
anims = []
cum_time = 0
for it, (pos, keyString) in tqdm(list(enumerate(zip(databasePositions, keyStrings)))):
curInter = Btext(
#strPlainText,
constructRandomString(),
position = posInter,
width = plain.width,
height = plain.height,
fill_color = config.background_color,
fill_opacity = 1
)
databaseInters.append(curInter)
anim = Succession(
AnimationGroup(
curInter.create(),
run_time = 0.01
),
# AnimationGroup(
# Wait()
# ),
AnimationGroup(
AnimationGroup(
curInter.move_to(pos),
run_time = 0.3
),
AnimationGroup(
key.changeText(keyString),
run_time = minTime
),
lag_ratio = 0.0
)
)
anims.append(anim)
self.add_sound(random_click_file(), time_offset=cum_time)
cum_time += minTime * anim.run_time
self.play(AnimationGroup(
*anims,
lag_ratio = minTime
))
self.wait()
self.next_section(skip_animations=False)
# key disappears and database shifts
shft = 5*LEFT
self.play(
key.remove()
)
self.play(
*[
bt.shift(shft)
for bt in databaseInters
],
)
self.wait()
# add brace
databaseBrace = Brace(Group(databaseInters[-1].border, databaseInters[-1-w].border),RIGHT,color = textColor )
#databaseBrace = Brace(databaseInters[w*h-1].border, RIGHT),
databaseBraceText = Tex(r"$2^{56} \approx 10^{17}$ intermediate texts", color = textColor, font_size = fontSize).move_to(databaseBrace.get_center()).next_to(databaseBrace, RIGHT)
databaseBraceGroup = Group(databaseBrace, databaseBraceText)
self.play(
Create(databaseBrace),
Write(databaseBraceText)
)
self.wait()
# blue key appears
key2 = Key(
zeroString,
position = posKey2
)
inter = Btext(
constructRandomString(),
position = posInter2,
width = plain.width,
height = plain.height
)
self.play(
key2.createRectangleKey(),
key2.createBlueArrow(),
inter.create()
)
self.wait()
# trying blue keys
# first go one by one
waitingTimes = []
L = 0
for i in range(L):
waitingTimes.append(
max((1.0 - (i * 1.0 / L)), minTime)
)
for i in range(1000):
waitingTimes.append(minTime)
cumTimes = np.cumsum(np.array(waitingTimes))
np.insert(cumTimes, 0, 0)
anims = []
cnt = 0
for t in waitingTimes[:L]:
actString = "000..." + '{0:05b}'.format((cnt % 32))
self.add_sound(random_click_file(), time_offset=cumTimes[cnt])
anims.append(
Succession(
Wait(cumTimes[cnt]),
AnimationGroup(
key2.changeText(actString),
inter.changeText(constructRandomString()),
run_time = t
)
)
)
cnt += 1
# fast forward
for big in range(6):
for _ in range(invMinTime):
actString = '{0:03b}'.format(big) + "..."
for _ in range(5):
actString += random.choice(["0", "1"])
self.add_sound(random_click_file(), time_offset=cumTimes[cnt])
anims.append(
Succession(
Wait(cumTimes[cnt]),
AnimationGroup(
key2.changeText(actString),
inter.changeText(constructRandomString()),
run_time = minTime
)
)
)
cnt += 1
# we found the correct key
hit = (2*w*h) // 3 + 2
strLinesHit = databaseInters[hit].strLines.copy()
inter2 = databaseInters[hit]
# inter2 = Btext(
# strLinesHit,
# position = databaseInters[hit].position,
# width = databaseInters[hit].width,
# height = databaseInters[hit].height,
# fill_color = config.background_color,
# fill_opacity = 1
# )
anims.append(
Succession(
Wait(cumTimes[cnt]),
AnimationGroup(
key2.changeText("110...10010"),
inter.changeText(strLinesHit),
run_time = minTime
)
)
)
self.play(*anims)
self.wait()
#fade in the correct text
inter2.position = databaseInters[hit].position
inter2.border.move_to(inter2.position)
inter2.lines.move_to(inter2.position)
# self.play(
# FadeIn(inter2.border),
# AnimationGroup(
# *[FadeIn(text) for text in inter2.lines],
# #lag_ratio = 0.2
# ),
# run_time = 1
# )
self.next_section(skip_animations=False)
anims = []
for it, inte in enumerate(databaseInters):
if it == hit:
anims.append(
inte.shift(2.4*UP)
)
else:
anims.append(
inte.shift(0*UP)
)
self.play(
*anims
)
self.wait()
self.add_sound("audio/polylog_success.wav")
self.play(
Circumscribe(inter.border),
Circumscribe(inter2.border)
)
self.wait()
# remove all other texts
self.play(
*[datInter.remove() for datInter in databaseInters],
inter2.move_to(inter2.position),
Unwrite(databaseBraceText),
Uncreate(databaseBrace)
)
self.play(
inter.move_to((plain.position + cipher.position)/2),
key2.moveRec(plain.position/4 + cipher.position*3.0/4),
inter2.move_to((plain.position + cipher.position)/2 ),
)
self.wait()
key = Key(
constructRandomKeyString(),
position = plain.position*3.0/4 + cipher.position/4
)
self.play(
key.createRectangleKey(),
key.createRedArrow()
)
self.wait()
self.play(
key2.removeBlueArrow(),
key.removeRedArrow()
)
self.wait()
# remove everything
self.play(
plain.remove(),
plain.removeTag(),
inter.remove(),
inter2.remove(),
cipher.remove(),
cipher.removeTag(),
key.remove(),
key2.remove(),
Unwrite(DesText)
)
self.wait()
def constructTextBorder(insideObject = None, position = np.array([0, 0, 0]), width = None, height = None, color = borderColor, fill_color = None, fill_opacity = 0.0):
#rec = RoundedRectangle(corner_radius = 0.1, color = color, height = height, width = width)
#rec.move_to(position)
if insideObject != None:
topPadding = 0.1 * padding
if width == None:
width = (insideObject.get_right() - insideObject.get_left())[0] + padding
if height == None:
height = (insideObject.get_top() - insideObject.get_bottom())[1] + padding + topPadding
position = insideObject.get_center() + topPadding + width/2 * LEFT + (height/2 + topPadding ) * UP
topleft = position
topright= topleft + width * RIGHT
bottomleft = position + height * DOWN
bottomright= bottomleft + width * RIGHT
d = width / 10
D = width / 4
infty = 10000000
noAngle = {'radius': infty, 'color': color}
dAngle = {'radius': d, 'color': color}
DAngle = {'radius': D*0.8, 'color': color}
rec = ArcPolygon(
topright + D * LEFT,
topleft + d * RIGHT,
topleft + d * DOWN,
bottomleft + d * UP,
bottomleft + d * RIGHT,
bottomright + d * LEFT,
bottomright + d * UP,
topright + D * DOWN,
topright + D * LEFT,
topright + D * DOWN,
color = color,
arc_config = [
noAngle.copy(),
dAngle.copy(),
noAngle.copy(),
dAngle.copy(),
noAngle.copy(),
dAngle.copy(),
noAngle.copy(),
noAngle.copy(),
DAngle.copy(),
noAngle.copy()
],
fill_color = fill_color,
fill_opacity = fill_opacity
)
return rec
def constructKey(position = np.array([0, 0, 0]), granularity = 100, width = 1, color = [keyColor, keyColor2]):
#right part of the key
key = [
np.array([262.968, 373.851, 0]),
np.array([285.022, 362.026, 0]),
np.array([301.516, 361.804, 0]),
np.array([301.806, 396.862, 0]),
np.array([336.07, 396.501, 0]),
np.array([358.072, 375.943, 0]),
np.array([385.122, 404.436, 0]),
np.array([408.927, 381.353, 0]),
np.array([430.928, 381.353, 0]),
np.array([457.979, 406.961, 0]),
np.array([476.374, 390.009, 0]),
np.array([500.539, 390.37, 0]),
np.array([544.542, 432.569, 0]),
np.array([538.771, 446.275, 0]),
np.array([528.311, 453.488, 0]),
np.array([510.277, 461.784, 0]),
np.array([302.167, 461.062, 0]),
np.array([301.806, 494.245, 0]),
np.array([285.215, 494.245, 0]),
np.array([260.219, 474.218, 0])]
offset = key[0].copy()
mid = np.array([164.406, 421.883, 0])
# adding the circly left part
midx = mid[0]
midy = mid[1]
fstx = key[len(key)-1][0]
fsty = key[len(key)-1][1]
lastx = key[0][0]
lasty = key[0][1]
fstangle = math.atan((fsty - midy) / (fstx - midx))
lastangle = math.atan((lasty - midy) / (lastx - midx))
piangle = 3.141592654
r = math.sqrt((midx-fstx)*(midx-fstx) + (midy-fsty)*(midy-fsty))
for i in range(granularity):
angle = piangle * i / granularity + fstangle * (granularity - i) / granularity
vec = np.array([math.cos(angle), math.sin(angle), 0])
key.append(mid + r * vec)
for i in range(granularity):
angle = lastangle * i / granularity - piangle * (granularity - i) / granularity
vec = np.array([math.cos(angle), math.sin(angle), 0])
key.append(mid + r * vec)
#key.reverse()
# compute offsets
left = min([pnt[0] for pnt in key])
right = max([pnt[0] for pnt in key])
top = max([pnt[1] for pnt in key])
bottom = min([pnt[1] for pnt in key])
keyWidth = right - left
offset = np.array([(left + right)/2, (top + bottom)/2, 0])
# line in the key
keyline = [
np.array([539.002, 427.256, 0]),
np.array([253.473, 425.757, 0]),
np.array([286.295, 412.773, 0]),
np.array([525.775, 414.571, 0])]
# middle circle
midcirc = np.array([109.552, 425.451, 0])
radcirc = 25.2117 * width / keyWidth
for pnt in key + keyline + [midcirc]:
pnt -= offset
pnt *= width / keyWidth
pnt += position
return Polygon(*key, fill_opacity = 1, color = color[0]), \
Polygon(*keyline, color = color[1]), \
Circle(radius = radcirc, color = color[1]).move_to(midcirc)
| [
"numpy.insert",
"random.choice",
"random.randint",
"random.setstate",
"math.sqrt",
"random.seed",
"random.getstate",
"math.cos",
"numpy.array",
"math.sin",
"math.atan"
] | [((387, 401), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (398, 401), False, 'import random\n'), ((1479, 1496), 'random.getstate', 'random.getstate', ([], {}), '()\n', (1494, 1496), False, 'import random\n'), ((3101, 3118), 'random.getstate', 'random.getstate', ([], {}), '()\n', (3116, 3118), False, 'import random\n'), ((1859, 1876), 'random.getstate', 'random.getstate', ([], {}), '()\n', (1874, 1876), False, 'import random\n'), ((1878, 1906), 'random.setstate', 'random.setstate', (['rng_state_1'], {}), '(rng_state_1)\n', (1893, 1906), False, 'import random\n'), ((2278, 2295), 'random.getstate', 'random.getstate', ([], {}), '()\n', (2293, 2295), False, 'import random\n'), ((2297, 2323), 'random.setstate', 'random.setstate', (['old_state'], {}), '(old_state)\n', (2312, 2323), False, 'import random\n'), ((3233, 3250), 'random.getstate', 'random.getstate', ([], {}), '()\n', (3248, 3250), False, 'import random\n'), ((3252, 3280), 'random.setstate', 'random.setstate', (['rng_state_2'], {}), '(rng_state_2)\n', (3267, 3280), False, 'import random\n'), ((3587, 3604), 'random.getstate', 'random.getstate', ([], {}), '()\n', (3602, 3604), False, 'import random\n'), ((3606, 3632), 'random.setstate', 'random.setstate', (['old_state'], {}), '(old_state)\n', (3621, 3632), False, 'import random\n'), ((35467, 35486), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (35475, 35486), True, 'import numpy as np\n'), ((36961, 36980), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (36969, 36980), True, 'import numpy as np\n'), ((37815, 37846), 'numpy.array', 'np.array', (['[164.406, 421.883, 0]'], {}), '([164.406, 421.883, 0])\n', (37823, 37846), True, 'import numpy as np\n'), ((38025, 38065), 'math.atan', 'math.atan', (['((fsty - midy) / (fstx - midx))'], {}), '((fsty - midy) / (fstx - midx))\n', (38034, 38065), False, 'import math\n'), ((38080, 38122), 'math.atan', 'math.atan', (['((lasty - midy) / (lastx - midx))'], {}), '((lasty - midy) / (lastx - midx))\n', (38089, 38122), False, 'import math\n'), ((38154, 38226), 'math.sqrt', 'math.sqrt', (['((midx - fstx) * (midx - fstx) + (midy - fsty) * (midy - fsty))'], {}), '((midx - fstx) * (midx - fstx) + (midy - fsty) * (midy - fsty))\n', (38163, 38226), False, 'import math\n'), ((38856, 38909), 'numpy.array', 'np.array', (['[(left + right) / 2, (top + bottom) / 2, 0]'], {}), '([(left + right) / 2, (top + bottom) / 2, 0])\n', (38864, 38909), True, 'import numpy as np\n'), ((39116, 39147), 'numpy.array', 'np.array', (['[109.552, 425.451, 0]'], {}), '([109.552, 425.451, 0])\n', (39124, 39147), True, 'import numpy as np\n'), ((3956, 3975), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3964, 3975), True, 'import numpy as np\n'), ((7284, 7303), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (7292, 7303), True, 'import numpy as np\n'), ((18318, 18343), 'numpy.insert', 'np.insert', (['cumTimes', '(0)', '(0)'], {}), '(cumTimes, 0, 0)\n', (18327, 18343), True, 'import numpy as np\n'), ((32260, 32285), 'numpy.insert', 'np.insert', (['cumTimes', '(0)', '(0)'], {}), '(cumTimes, 0, 0)\n', (32269, 32285), True, 'import numpy as np\n'), ((37084, 37115), 'numpy.array', 'np.array', (['[262.968, 373.851, 0]'], {}), '([262.968, 373.851, 0])\n', (37092, 37115), True, 'import numpy as np\n'), ((37119, 37150), 'numpy.array', 'np.array', (['[285.022, 362.026, 0]'], {}), '([285.022, 362.026, 0])\n', (37127, 37150), True, 'import numpy as np\n'), ((37154, 37185), 'numpy.array', 'np.array', (['[301.516, 361.804, 0]'], {}), '([301.516, 361.804, 0])\n', (37162, 37185), True, 'import numpy as np\n'), ((37189, 37220), 'numpy.array', 'np.array', (['[301.806, 396.862, 0]'], {}), '([301.806, 396.862, 0])\n', (37197, 37220), True, 'import numpy as np\n'), ((37224, 37254), 'numpy.array', 'np.array', (['[336.07, 396.501, 0]'], {}), '([336.07, 396.501, 0])\n', (37232, 37254), True, 'import numpy as np\n'), ((37258, 37289), 'numpy.array', 'np.array', (['[358.072, 375.943, 0]'], {}), '([358.072, 375.943, 0])\n', (37266, 37289), True, 'import numpy as np\n'), ((37293, 37324), 'numpy.array', 'np.array', (['[385.122, 404.436, 0]'], {}), '([385.122, 404.436, 0])\n', (37301, 37324), True, 'import numpy as np\n'), ((37328, 37359), 'numpy.array', 'np.array', (['[408.927, 381.353, 0]'], {}), '([408.927, 381.353, 0])\n', (37336, 37359), True, 'import numpy as np\n'), ((37363, 37394), 'numpy.array', 'np.array', (['[430.928, 381.353, 0]'], {}), '([430.928, 381.353, 0])\n', (37371, 37394), True, 'import numpy as np\n'), ((37398, 37429), 'numpy.array', 'np.array', (['[457.979, 406.961, 0]'], {}), '([457.979, 406.961, 0])\n', (37406, 37429), True, 'import numpy as np\n'), ((37433, 37464), 'numpy.array', 'np.array', (['[476.374, 390.009, 0]'], {}), '([476.374, 390.009, 0])\n', (37441, 37464), True, 'import numpy as np\n'), ((37468, 37498), 'numpy.array', 'np.array', (['[500.539, 390.37, 0]'], {}), '([500.539, 390.37, 0])\n', (37476, 37498), True, 'import numpy as np\n'), ((37502, 37533), 'numpy.array', 'np.array', (['[544.542, 432.569, 0]'], {}), '([544.542, 432.569, 0])\n', (37510, 37533), True, 'import numpy as np\n'), ((37537, 37568), 'numpy.array', 'np.array', (['[538.771, 446.275, 0]'], {}), '([538.771, 446.275, 0])\n', (37545, 37568), True, 'import numpy as np\n'), ((37572, 37603), 'numpy.array', 'np.array', (['[528.311, 453.488, 0]'], {}), '([528.311, 453.488, 0])\n', (37580, 37603), True, 'import numpy as np\n'), ((37607, 37638), 'numpy.array', 'np.array', (['[510.277, 461.784, 0]'], {}), '([510.277, 461.784, 0])\n', (37615, 37638), True, 'import numpy as np\n'), ((37642, 37673), 'numpy.array', 'np.array', (['[302.167, 461.062, 0]'], {}), '([302.167, 461.062, 0])\n', (37650, 37673), True, 'import numpy as np\n'), ((37677, 37708), 'numpy.array', 'np.array', (['[301.806, 494.245, 0]'], {}), '([301.806, 494.245, 0])\n', (37685, 37708), True, 'import numpy as np\n'), ((37712, 37743), 'numpy.array', 'np.array', (['[285.215, 494.245, 0]'], {}), '([285.215, 494.245, 0])\n', (37720, 37743), True, 'import numpy as np\n'), ((37747, 37778), 'numpy.array', 'np.array', (['[260.219, 474.218, 0]'], {}), '([260.219, 474.218, 0])\n', (37755, 37778), True, 'import numpy as np\n'), ((38946, 38977), 'numpy.array', 'np.array', (['[539.002, 427.256, 0]'], {}), '([539.002, 427.256, 0])\n', (38954, 38977), True, 'import numpy as np\n'), ((38981, 39012), 'numpy.array', 'np.array', (['[253.473, 425.757, 0]'], {}), '([253.473, 425.757, 0])\n', (38989, 39012), True, 'import numpy as np\n'), ((39016, 39047), 'numpy.array', 'np.array', (['[286.295, 412.773, 0]'], {}), '([286.295, 412.773, 0])\n', (39024, 39047), True, 'import numpy as np\n'), ((39051, 39082), 'numpy.array', 'np.array', (['[525.775, 414.571, 0]'], {}), '([525.775, 414.571, 0])\n', (39059, 39082), True, 'import numpy as np\n'), ((1669, 1691), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1682, 1691), False, 'import random\n'), ((2116, 2143), 'random.choice', 'random.choice', (['cached_lines'], {}), '(cached_lines)\n', (2129, 2143), False, 'import random\n'), ((3344, 3369), 'random.choice', 'random.choice', (["['0', '1']"], {}), "(['0', '1'])\n", (3357, 3369), False, 'import random\n'), ((3491, 3516), 'random.choice', 'random.choice', (["['0', '1']"], {}), "(['0', '1'])\n", (3504, 3516), False, 'import random\n'), ((3700, 3720), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3714, 3720), False, 'import random\n'), ((18292, 18314), 'numpy.array', 'np.array', (['waitingTimes'], {}), '(waitingTimes)\n', (18300, 18314), True, 'import numpy as np\n'), ((32234, 32256), 'numpy.array', 'np.array', (['waitingTimes'], {}), '(waitingTimes)\n', (32242, 32256), True, 'import numpy as np\n'), ((38351, 38366), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (38359, 38366), False, 'import math\n'), ((38368, 38383), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (38376, 38383), False, 'import math\n'), ((38553, 38568), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (38561, 38568), False, 'import math\n'), ((38570, 38585), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (38578, 38585), False, 'import math\n'), ((19186, 19211), 'random.choice', 'random.choice', (["['0', '1']"], {}), "(['0', '1'])\n", (19199, 19211), False, 'import random\n'), ((32837, 32862), 'random.choice', 'random.choice', (["['0', '1']"], {}), "(['0', '1'])\n", (32850, 32862), False, 'import random\n')] |
import os
import numpy as np
import nilearn as nl
import nilearn.plotting
from math import floor
import numpy as np
import torch
import torch.utils.data as data
from torch import Tensor
from typing import Optional
def load_scenes(path: str) -> list:
with open(path, "r") as f:
result = []
for line in f:
parts = line.strip().split(',')
t = float(parts[0])
name = parts[1][1:-1]
is_day = parts[2] == "\"DAY\""
is_exterior = parts[3] == "\"INT\""
result.append((t, name, is_day, is_exterior))
return result
def calc_scene_examples(scenes: list,
num_frames: int):
frame_dur_sec = 2.0
scene_examples = []
for i in range(len(scenes)):
t0 = scenes[i+0][0]
if i == len(scenes) - 1:
t1 = 3599.0 * frame_dur_sec
else:
t1 = scenes[i+1][0]
dur_sec = t1 - t0
num_examples = dur_sec / frame_dur_sec - num_frames + 1
num_examples = floor(num_examples)
num_examples = max(0, num_examples)
scene_examples.append(num_examples)
return scene_examples
class ForrestGumpRawDataset(data.Dataset):
""" Forrest Gump fMRI dataset from OpenNeuro. BOLD imagery acquired
at 0.5 Hz for the entire duration of the film are associated with
class labels assigned to each scene.
https://openneuro.org/datasets/ds000113/
Args:
root: Path to download directory, e.g. /data/ds000113-download
num_frames: Number of BOLD frames in an example. Note: each frame
is 2.0 seconds in duration.
offset_frames: Number of BOLD frames to delay between stimulation
and label assignment. Activity of interest may only be visible
after a short delay. Adjust this value so the apparent activity
correlates optimally with the stimulation. Note: fMRI by itself
has a delay on the order of seconds, so further offset may not
be necessary.
alignment: Optional alignment transformation geometry. Valid values
are "linear" and "nonlinear".
Labels:
0: The scene takes place indoors
1: The scene takes place outdoors
Reference:
<NAME>., <NAME>., <NAME>. et al. A high-resolution 7-Tesla
fMRI dataset from complex natural stimulation with an audio movie.
Sci Data 1, 140003 (2014). https://doi.org/10.1038/sdata.2014.3
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Estimating the Delay of the fMRI Response. NeuroImage,
Volume 16, Issue 3, Part A. 2002. Pages 593-606. ISSN 1053-8119.
https://doi.org/10.1006/nimg.2002.1096.
https://www.math.mcgill.ca/keith/delay/delay.pdf.
"""
FILE_DURATIONS = [902, 882, 876, 976, 924, 878, 1084, 676]
def __init__(self,
root: str,
num_frames: int = 8,
offset_frames: int = 0,
alignment: Optional[str] = None):
super(ForrestGumpRawDataset, self).__init__()
if offset_frames != 0:
raise NotImplementedError
self.root = root
self.num_frames = num_frames
self.scenes = load_scenes(os.path.join(
root, "stimuli", "annotations", "scenes.csv"))
if alignment is None:
self.data_dir = root
self.identifier = 'acq-raw'
elif alignment == 'linear':
self.data_dir = os.path.join(
root, 'derivatives', 'linear_anatomical_alignment')
self.identifier = 'rec-dico7Tad2grpbold7Tad'
elif alignment == 'nonlinear':
self.data_dir = os.path.join(
root, 'derivatives', 'non-linear_anatomical_alignment')
self.identifier = 'rec-dico7Tad2grpbold7TadNL'
else:
raise ValueError(f"unknown alignment value '{alignment}'")
subjects = [f for f in os.listdir(self.data_dir)
if f.startswith('sub-') and len(f) == 6 and int(f[len('sub-'):]) <= 20]
self.subjects = subjects
self.scene_examples = calc_scene_examples(self.scenes,
num_frames=num_frames)
self.examples_per_subject = sum(self.scene_examples)
def __getitem__(self, index):
frame_dur_sec = 2.0
example_dur = frame_dur_sec * self.num_frames
subj_no = int(floor(index / self.examples_per_subject))
subj = self.subjects[subj_no]
example_no = index % self.examples_per_subject
scene_no = 0
offset = 0
for scene, num_examples in zip(self.scenes, self.scene_examples):
if offset + num_examples > example_no:
break
offset += num_examples
scene_no += 1
scene_example = example_no - offset
scene = self.scenes[scene_no]
label = 1 if scene[3] else 0
start_time = scene[0] + frame_dur_sec * scene_example
end_time = start_time + example_dur
start_file = None
end_file = None
file_start_time = 0
for i, file_time in enumerate(self.FILE_DURATIONS):
file_end_time = file_start_time + file_time
if file_start_time <= start_time and start_time < file_end_time:
start_file = i+1
if file_start_time < end_time and end_time <= file_end_time:
end_file = i+1
if start_file is not None and end_file is not None:
break
file_start_time = file_end_time
if start_file is None:
raise ValueError("unable to seek start file")
if end_file is None:
raise ValueError("unable to seek end file")
if start_file != end_file:
start_img_time = sum(self.FILE_DURATIONS[:start_file])
start_dur = start_img_time - start_time
start_frames = int(start_dur / frame_dur_sec)
remainder = int(self.num_frames - start_frames)
start_path = f'{subj}_ses-forrestgump_task-forrestgump_{self.identifier}_run-0{start_file}_bold.nii.gz'
start_path = os.path.join(self.data_dir, subj,
'ses-forrestgump', 'func', start_path)
start_img = nl.image.load_img(start_path)
start_img = start_img.get_data()
start_img = start_img[:, :, :, start_img.shape[-1]-start_frames:]
start_img = np.transpose(start_img, (3, 2, 0, 1))
end_path = f'{subj}_ses-forrestgump_task-forrestgump_{self.identifier}_run-0{end_file}_bold.nii.gz'
end_path = os.path.join(self.data_dir, subj,
'ses-forrestgump', 'func', end_path)
end_img = nl.image.load_img(end_path)
end_img = end_img.get_data()
end_img = end_img[:, :, :, :remainder]
end_img = np.transpose(end_img, (3, 2, 0, 1))
img = np.concatenate([start_img, end_img], axis=0)
else:
filename = f'{subj}_ses-forrestgump_task-forrestgump_{self.identifier}_run-0{start_file}_bold.nii.gz'
filename = os.path.join(self.data_dir, subj,
'ses-forrestgump', 'func', filename)
img = nl.image.load_img(filename)
img = img.get_data()
img = img[:, :, :, scene_example:scene_example+self.num_frames]
img = np.transpose(img, (3, 2, 0, 1))
return (img, label)
def __len__(self):
return self.examples_per_subject * len(self.subjects)
if __name__ == '__main__':
ds = ForrestGumpRawDataset(
root='/data/openneuro/ds000113-download', alignment='linear')
#print(f'last: {ds[len(ds)-1][1]}')
for i in range(len(ds.subjects)):
print(
f'subject {i+1}: {ds[ds.examples_per_subject * i][1]}, {ds[ds.examples_per_subject * (i+1) - 1][1]}')
# for i, x in enumerate(ds):
# print(f'{i}. {x[1]}')
#i = len(ds) - 1
# while i >= 0:
# print(f'{i}. {ds[i][1]}')
# i -= 1
| [
"os.listdir",
"math.floor",
"nilearn.image.load_img",
"os.path.join",
"numpy.concatenate",
"numpy.transpose"
] | [((1030, 1049), 'math.floor', 'floor', (['num_examples'], {}), '(num_examples)\n', (1035, 1049), False, 'from math import floor\n'), ((3255, 3313), 'os.path.join', 'os.path.join', (['root', '"""stimuli"""', '"""annotations"""', '"""scenes.csv"""'], {}), "(root, 'stimuli', 'annotations', 'scenes.csv')\n", (3267, 3313), False, 'import os\n'), ((4449, 4489), 'math.floor', 'floor', (['(index / self.examples_per_subject)'], {}), '(index / self.examples_per_subject)\n', (4454, 4489), False, 'from math import floor\n'), ((6183, 6255), 'os.path.join', 'os.path.join', (['self.data_dir', 'subj', '"""ses-forrestgump"""', '"""func"""', 'start_path'], {}), "(self.data_dir, subj, 'ses-forrestgump', 'func', start_path)\n", (6195, 6255), False, 'import os\n'), ((6318, 6347), 'nilearn.image.load_img', 'nl.image.load_img', (['start_path'], {}), '(start_path)\n', (6335, 6347), True, 'import nilearn as nl\n'), ((6495, 6532), 'numpy.transpose', 'np.transpose', (['start_img', '(3, 2, 0, 1)'], {}), '(start_img, (3, 2, 0, 1))\n', (6507, 6532), True, 'import numpy as np\n'), ((6669, 6739), 'os.path.join', 'os.path.join', (['self.data_dir', 'subj', '"""ses-forrestgump"""', '"""func"""', 'end_path'], {}), "(self.data_dir, subj, 'ses-forrestgump', 'func', end_path)\n", (6681, 6739), False, 'import os\n'), ((6798, 6825), 'nilearn.image.load_img', 'nl.image.load_img', (['end_path'], {}), '(end_path)\n', (6815, 6825), True, 'import nilearn as nl\n'), ((6940, 6975), 'numpy.transpose', 'np.transpose', (['end_img', '(3, 2, 0, 1)'], {}), '(end_img, (3, 2, 0, 1))\n', (6952, 6975), True, 'import numpy as np\n'), ((6995, 7039), 'numpy.concatenate', 'np.concatenate', (['[start_img, end_img]'], {'axis': '(0)'}), '([start_img, end_img], axis=0)\n', (7009, 7039), True, 'import numpy as np\n'), ((7191, 7261), 'os.path.join', 'os.path.join', (['self.data_dir', 'subj', '"""ses-forrestgump"""', '"""func"""', 'filename'], {}), "(self.data_dir, subj, 'ses-forrestgump', 'func', filename)\n", (7203, 7261), False, 'import os\n'), ((7316, 7343), 'nilearn.image.load_img', 'nl.image.load_img', (['filename'], {}), '(filename)\n', (7333, 7343), True, 'import nilearn as nl\n'), ((7471, 7502), 'numpy.transpose', 'np.transpose', (['img', '(3, 2, 0, 1)'], {}), '(img, (3, 2, 0, 1))\n', (7483, 7502), True, 'import numpy as np\n'), ((3495, 3559), 'os.path.join', 'os.path.join', (['root', '"""derivatives"""', '"""linear_anatomical_alignment"""'], {}), "(root, 'derivatives', 'linear_anatomical_alignment')\n", (3507, 3559), False, 'import os\n'), ((3962, 3987), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (3972, 3987), False, 'import os\n'), ((3701, 3769), 'os.path.join', 'os.path.join', (['root', '"""derivatives"""', '"""non-linear_anatomical_alignment"""'], {}), "(root, 'derivatives', 'non-linear_anatomical_alignment')\n", (3713, 3769), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
import warnings
import skimage.segmentation
from patchwork._augment import SINGLE_AUG_FUNC
SEG_AUG_FUNCTIONS = ["flip_left_right", "flip_up_down", "rot90", "shear", "zoom_scale", "center_zoom_scale"]
def _get_segments(img, mean_scale=1000, num_samples=16, return_enough_segments=False):
"""
Wrapper for computing segments for an image. Inputs an image tensor and
returns a stack of binary segmentation masks.
:img: (H,W,C) image tensor
:mean_scale: average scale parameter for Felzenszwalb's algorithm. Actual
value will be sampled from (0.5*mean_scale, 1.5*mean_scale), and min_size
will be set to the scale.
:num_samples: number of segments to compute. if more segments are found than
num_samples, they'll be uniformly subsampled without replacement; if fewer
are found they'll be sampled with replacement.
:return_enough_segments: whether to include a Boolean indicator of whether
"""
# randomly choose the segmentation scale
scale = np.random.uniform(0.5*mean_scale, 1.5*mean_scale)
# run heuristic segmentation
segments = skimage.segmentation.felzenszwalb(img, scale=scale,
min_size=int(scale))
# sample a set of segmentations to use; bias toward larger ones
max_segment = segments.max()
indices = np.arange(max_segment+1)
seg_count = np.array([(segments == i).sum()+1 for i in indices])
p = seg_count/seg_count.sum()
# try this for error correction?
if num_samples <= max_segment:
sampled_indices = np.random.choice(indices, p=p, size=num_samples,
replace=False)
else:
warnings.warn("not enough unique segments; sampling WITH replacement")
sampled_indices = np.random.choice(indices, size=num_samples, replace=True)
# build normalized segment occupancy masks for each segment we choose
seg_tensor = np.stack([(segments == i)/seg_count[i] for i in sampled_indices],
-1).astype(np.float32)
if return_enough_segments:
enough_segs = num_samples <= max_segment
return seg_tensor, enough_segs
return seg_tensor
def _get_grid_segments(imshape, num_samples=16):
gridsize = int(np.sqrt(num_samples))
h,w = imshape
seg = np.zeros((h,w, gridsize**2), dtype=np.int64)
index = 0
dy = int(h/gridsize)
dx = int(w/gridsize)
for i in range(gridsize):
for j in range(gridsize):
seg[i*dy:(i+1)*dy, j*dx:(j+1)*dx,index] = 1
index += 1
return seg
def _segment_aug(img, seg, aug, outputsize=None):
"""
"""
num_channels = img.shape[-1]
imshape = (img.shape[0], img.shape[1])
x = tf.concat([img, tf.cast(seg, tf.float32)], -1)
for f in SEG_AUG_FUNCTIONS:
if f in aug:
x = SINGLE_AUG_FUNC[f](x, aug[f], imshape=imshape)
img_aug = x[:,:,:num_channels]
seg_aug = x[:,:,num_channels:]
if outputsize is not None:
seg_aug = tf.image.resize(seg_aug, outputsize, method="area")
# normalize segments
norm = tf.expand_dims(tf.expand_dims(tf.reduce_sum(seg_aug, [0,1]),0),0)
seg_aug /= (norm+1e-8)
return img_aug, seg_aug
def _filter_out_bad_segments(img1, seg1, img2, seg2):
"""
It's possible for shearing or scaling augmentation to sample
one segment completely out of the image- use this function
to filter out those cases
"""
minval = tf.reduce_min(tf.reduce_sum(seg1, [0,1])*tf.reduce_sum(seg2, [0,1]))
if minval < 0.5:
warnings.warn("filtering bad segment")
return False
else:
return True
def _prepare_embeddings(h, m):
"""
Combine FCN outputs with segmentation masks to build a batch of
mask-pooled hidden vectors. Represents the calculation of h_{m}
in the first equation in Henaff et al's paper
:h: batch of embeddings; (N,w,h,d)
:m: batch of NORMALIZED segmentation tensors; (N,w,h,num_samples)
Returns a tensor of shape (N*num_samples, d)
"""
d = h.shape[-1]
h = tf.expand_dims(h, 4)
m = tf.expand_dims(m, 3)
hm = tf.reduce_mean(h*m, [1,2])
return tf.reshape(hm, [-1, d])
def _prepare_mask(m1, m2):
"""
:m1, m2: masks of segmentation tensors; (N,w,h,num_samples)
Returns a mask of shape (N*num_samples, 1)
"""
m1_sum = tf.reduce_sum(m1, [1,2])
m2_sum = tf.reduce_sum(m2, [1,2])
return tf.reshape(m1_sum*m2_sum, [-1,1])
| [
"numpy.sqrt",
"tensorflow.reshape",
"tensorflow.image.resize",
"numpy.random.choice",
"tensorflow.reduce_sum",
"numpy.stack",
"numpy.zeros",
"tensorflow.reduce_mean",
"numpy.random.uniform",
"warnings.warn",
"tensorflow.expand_dims",
"tensorflow.cast",
"numpy.arange"
] | [((1088, 1141), 'numpy.random.uniform', 'np.random.uniform', (['(0.5 * mean_scale)', '(1.5 * mean_scale)'], {}), '(0.5 * mean_scale, 1.5 * mean_scale)\n', (1105, 1141), True, 'import numpy as np\n'), ((1424, 1450), 'numpy.arange', 'np.arange', (['(max_segment + 1)'], {}), '(max_segment + 1)\n', (1433, 1450), True, 'import numpy as np\n'), ((2402, 2449), 'numpy.zeros', 'np.zeros', (['(h, w, gridsize ** 2)'], {'dtype': 'np.int64'}), '((h, w, gridsize ** 2), dtype=np.int64)\n', (2410, 2449), True, 'import numpy as np\n'), ((4198, 4218), 'tensorflow.expand_dims', 'tf.expand_dims', (['h', '(4)'], {}), '(h, 4)\n', (4212, 4218), True, 'import tensorflow as tf\n'), ((4227, 4247), 'tensorflow.expand_dims', 'tf.expand_dims', (['m', '(3)'], {}), '(m, 3)\n', (4241, 4247), True, 'import tensorflow as tf\n'), ((4257, 4286), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(h * m)', '[1, 2]'], {}), '(h * m, [1, 2])\n', (4271, 4286), True, 'import tensorflow as tf\n'), ((4295, 4318), 'tensorflow.reshape', 'tf.reshape', (['hm', '[-1, d]'], {}), '(hm, [-1, d])\n', (4305, 4318), True, 'import tensorflow as tf\n'), ((4493, 4518), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['m1', '[1, 2]'], {}), '(m1, [1, 2])\n', (4506, 4518), True, 'import tensorflow as tf\n'), ((4531, 4556), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['m2', '[1, 2]'], {}), '(m2, [1, 2])\n', (4544, 4556), True, 'import tensorflow as tf\n'), ((4567, 4603), 'tensorflow.reshape', 'tf.reshape', (['(m1_sum * m2_sum)', '[-1, 1]'], {}), '(m1_sum * m2_sum, [-1, 1])\n', (4577, 4603), True, 'import tensorflow as tf\n'), ((1650, 1713), 'numpy.random.choice', 'np.random.choice', (['indices'], {'p': 'p', 'size': 'num_samples', 'replace': '(False)'}), '(indices, p=p, size=num_samples, replace=False)\n', (1666, 1713), True, 'import numpy as np\n'), ((1776, 1846), 'warnings.warn', 'warnings.warn', (['"""not enough unique segments; sampling WITH replacement"""'], {}), "('not enough unique segments; sampling WITH replacement')\n", (1789, 1846), False, 'import warnings\n'), ((1873, 1930), 'numpy.random.choice', 'np.random.choice', (['indices'], {'size': 'num_samples', 'replace': '(True)'}), '(indices, size=num_samples, replace=True)\n', (1889, 1930), True, 'import numpy as np\n'), ((2352, 2372), 'numpy.sqrt', 'np.sqrt', (['num_samples'], {}), '(num_samples)\n', (2359, 2372), True, 'import numpy as np\n'), ((3118, 3169), 'tensorflow.image.resize', 'tf.image.resize', (['seg_aug', 'outputsize'], {'method': '"""area"""'}), "(seg_aug, outputsize, method='area')\n", (3133, 3169), True, 'import tensorflow as tf\n'), ((3676, 3714), 'warnings.warn', 'warnings.warn', (['"""filtering bad segment"""'], {}), "('filtering bad segment')\n", (3689, 3714), False, 'import warnings\n'), ((2022, 2095), 'numpy.stack', 'np.stack', (['[((segments == i) / seg_count[i]) for i in sampled_indices]', '(-1)'], {}), '([((segments == i) / seg_count[i]) for i in sampled_indices], -1)\n', (2030, 2095), True, 'import numpy as np\n'), ((2837, 2861), 'tensorflow.cast', 'tf.cast', (['seg', 'tf.float32'], {}), '(seg, tf.float32)\n', (2844, 2861), True, 'import tensorflow as tf\n'), ((3236, 3266), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['seg_aug', '[0, 1]'], {}), '(seg_aug, [0, 1])\n', (3249, 3266), True, 'import tensorflow as tf\n'), ((3592, 3619), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['seg1', '[0, 1]'], {}), '(seg1, [0, 1])\n', (3605, 3619), True, 'import tensorflow as tf\n'), ((3619, 3646), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['seg2', '[0, 1]'], {}), '(seg2, [0, 1])\n', (3632, 3646), True, 'import tensorflow as tf\n')] |
"""
FID evaluation function used by misalignments.py
"""
import os
import shutil
import numpy as np
import torch
from pytorch_fid import fid_score
from generate_samples import save_individuals_v2
def fid_eval(generator,
real_data,
directory,
inception_net,
noise):
"""
Will compute FID of generated distribution from generator when compared to real_data.
INPUTS
- generator: A generator network to be evaluated.
- real_data: Path either of a folder with the test data against which the generated samples are to be compared, or
a .npz file containing the inception feature-space statistics of the test data (avoinding computing
those again).
- directory: Path of a folder where the generated samples will be saved (in a subfolder), as well as the inception
feature-space statistics of the test data.
- inception_net: An instance of the InceptionV3 network.
- noise: Generator noise inputs used to generate samples to be compared with the test data.
OUTPUTS
- fid: The computed FID
- real_data: The new path for test data, which will have changed if the real_data input specifed a folder containing
the test data.
"""
# ------------------------------- Setting up -------------------------------
# Choose device to use when evaluating FIDs
use_cuda = next(generator.parameters()).is_cuda
device = torch.device('cuda') if use_cuda else torch.device('cpu')
# ------------------------------- Generating fake data -------------------------------
# Make folder in which generated samples will be saved, deleting previous one if necessary
fake_samples_path = os.path.join(directory, 'samples_for_fid')
if os.path.exists(fake_samples_path):
shutil.rmtree(os.path.join(fake_samples_path))
os.makedirs(fake_samples_path)
fake = generator(noise)
save_individuals_v2(0,
data=fake,
ext='jpg',
path=fake_samples_path,
to_rgb=True if fake.shape[1]==1 else False)
# ------------------------------- Evaluating FID -------------------------------
real_mean, real_cov = fid_score.compute_statistics_of_path(real_data, inception_net, 100, 2048, device)
if not real_data.split('.')[-1] == 'npz':
real_data = os.path.join(directory, 'real_data_stats.npz')
np.savez(real_data, mu=real_mean, sigma=real_cov)
fake_mean, fake_cov = fid_score.compute_statistics_of_path(fake_samples_path, inception_net, 100, 2048, device)
fid = fid_score.calculate_frechet_distance(fake_mean, fake_cov, real_mean, real_cov)
return fid, real_data | [
"os.path.exists",
"generate_samples.save_individuals_v2",
"numpy.savez",
"os.makedirs",
"pytorch_fid.fid_score.calculate_frechet_distance",
"os.path.join",
"pytorch_fid.fid_score.compute_statistics_of_path",
"torch.device"
] | [((1798, 1840), 'os.path.join', 'os.path.join', (['directory', '"""samples_for_fid"""'], {}), "(directory, 'samples_for_fid')\n", (1810, 1840), False, 'import os\n'), ((1848, 1881), 'os.path.exists', 'os.path.exists', (['fake_samples_path'], {}), '(fake_samples_path)\n', (1862, 1881), False, 'import os\n'), ((1942, 1972), 'os.makedirs', 'os.makedirs', (['fake_samples_path'], {}), '(fake_samples_path)\n', (1953, 1972), False, 'import os\n'), ((2010, 2129), 'generate_samples.save_individuals_v2', 'save_individuals_v2', (['(0)'], {'data': 'fake', 'ext': '"""jpg"""', 'path': 'fake_samples_path', 'to_rgb': '(True if fake.shape[1] == 1 else False)'}), "(0, data=fake, ext='jpg', path=fake_samples_path, to_rgb\n =True if fake.shape[1] == 1 else False)\n", (2029, 2129), False, 'from generate_samples import save_individuals_v2\n'), ((2339, 2424), 'pytorch_fid.fid_score.compute_statistics_of_path', 'fid_score.compute_statistics_of_path', (['real_data', 'inception_net', '(100)', '(2048)', 'device'], {}), '(real_data, inception_net, 100, 2048,\n device)\n', (2375, 2424), False, 'from pytorch_fid import fid_score\n'), ((2622, 2715), 'pytorch_fid.fid_score.compute_statistics_of_path', 'fid_score.compute_statistics_of_path', (['fake_samples_path', 'inception_net', '(100)', '(2048)', 'device'], {}), '(fake_samples_path, inception_net, 100,\n 2048, device)\n', (2658, 2715), False, 'from pytorch_fid import fid_score\n'), ((2722, 2800), 'pytorch_fid.fid_score.calculate_frechet_distance', 'fid_score.calculate_frechet_distance', (['fake_mean', 'fake_cov', 'real_mean', 'real_cov'], {}), '(fake_mean, fake_cov, real_mean, real_cov)\n', (2758, 2800), False, 'from pytorch_fid import fid_score\n'), ((1525, 1545), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1537, 1545), False, 'import torch\n'), ((1563, 1582), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1575, 1582), False, 'import torch\n'), ((2487, 2533), 'os.path.join', 'os.path.join', (['directory', '"""real_data_stats.npz"""'], {}), "(directory, 'real_data_stats.npz')\n", (2499, 2533), False, 'import os\n'), ((2542, 2591), 'numpy.savez', 'np.savez', (['real_data'], {'mu': 'real_mean', 'sigma': 'real_cov'}), '(real_data, mu=real_mean, sigma=real_cov)\n', (2550, 2591), True, 'import numpy as np\n'), ((1905, 1936), 'os.path.join', 'os.path.join', (['fake_samples_path'], {}), '(fake_samples_path)\n', (1917, 1936), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
import csv
causes = ['OUTRAS', 'COVID', 'INSUFICIENCIA_RESPIRATORIA', 'PNEUMONIA', 'SEPTICEMIA', 'SRAG', 'INDETERMINADA']
for i in range(len(causes)):
vars()[causes[i]] = 0
firstRow = 1
with open('obitos-2020.csv', newline='') as csvfile:
originalfile = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in originalfile:
if firstRow:
firstRow = 0
else:
line = np.array(row)
if(line.size == 1):
cause = line[0].split(",")[1]
number = line[0].split(",")[5]
vars()[cause] += int(number)
elif(line.size == 2):
cause = line[0].split(",")[1]
number = line[1].split(",")[2]
vars()[cause] += int(number)
elif(line.size == 3):
cause = line[0].split(",")[1]
number = line[2].split(",")[2]
vars()[cause] += int(number)
deathCount = []
for i in range(len(causes)):
deathCount.append(vars()[causes[i]])
deathCount.sort()
causesSorted = causes[:]
for i in range(len(deathCount)):
for j in range(len(deathCount)):
if vars()[causes[j]] == deathCount[i]:
causesSorted[i] = causes[j]
break
plt.rcdefaults()
fig, ax = plt.subplots()
y_pos = np.arange(len(causesSorted))
ax.barh(y_pos, deathCount, align='center')
ax.set_yticks(y_pos)
ax.set_yticklabels(causesSorted)
plt.show() | [
"numpy.array",
"csv.reader",
"matplotlib.pyplot.rcdefaults",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1340, 1356), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (1354, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1367, 1381), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1379, 1381), True, 'import matplotlib.pyplot as plt\n'), ((1518, 1528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1526, 1528), True, 'import matplotlib.pyplot as plt\n'), ((317, 366), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""" """', 'quotechar': '"""|"""'}), "(csvfile, delimiter=' ', quotechar='|')\n", (327, 366), False, 'import csv\n'), ((475, 488), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (483, 488), True, 'import numpy as np\n')] |
"""
Copyright (C) 2019 <NAME>, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
from functools import partial
from keras.models import Model
from keras.regularizers import L1L2
from keras.optimizers import Adam, RMSprop, SGD
from keras.layers.normalization import BatchNormalization
from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D
from ame_starter.models.losses import *
from ame_starter.models.mixture_soft_attention import MixtureSoftAttention
class ModelBuilder(object):
@staticmethod
def compile_model(model, learning_rate, optimizer="adam", loss_weights=list([1.0, 1.0, 1.0, 1.0]),
main_loss="mse", extra_loss=None, metrics={}, gradient_clipping_threshold=100):
losses = main_loss
if loss_weights is not None:
losses = [losses] * len(loss_weights)
if extra_loss is not None:
if isinstance(extra_loss, list):
for i in range(1, 1 + len(extra_loss)):
losses[i] = extra_loss[i - 1]
else:
losses[1] = extra_loss
if optimizer == "rmsprop":
opt = RMSprop(lr=learning_rate, clipvalue=gradient_clipping_threshold)
elif optimizer == "sgd":
opt = SGD(lr=learning_rate, nesterov=True, momentum=0.9, clipvalue=gradient_clipping_threshold)
else:
opt = Adam(lr=learning_rate, clipvalue=gradient_clipping_threshold)
model.compile(loss=losses,
loss_weights=loss_weights,
optimizer=opt,
metrics=metrics)
return model
@staticmethod
def build_mlp(input_layer, num_units=16, activation="selu", n_layers=1, p_dropout=0.0,
l2_weight=0.0, with_bn=False, with_bias=True, **kwargs):
last_layer = input_layer
for i in range(n_layers):
last_layer = Dense(num_units,
kernel_regularizer=L1L2(l2=l2_weight),
bias_regularizer=L1L2(l2=l2_weight),
use_bias=with_bias)(last_layer)
if with_bn:
last_layer = BatchNormalization(beta_regularizer=L1L2(l2=l2_weight),
gamma_regularizer=L1L2(l2=l2_weight))(last_layer)
last_layer = Activation(activation)(last_layer)
if p_dropout != 0.0:
last_layer = Dropout(p_dropout)(last_layer)
return last_layer
@staticmethod
def build_mlp_expert(i, input_shape, output_dim, output_activation,
l2_weight=0.0, topmost_hidden_state=None, **kwargs):
if topmost_hidden_state is None:
input_layer = Input(shape=input_shape)
topmost_hidden_state = ModelBuilder.build_mlp(input_layer, l2_weight=l2_weight, **kwargs)
else:
input_layer = topmost_hidden_state
auxiliary_output = Dense(output_dim,
name="auxiliary" + str(i),
activation=output_activation)(topmost_hidden_state)
return Model(inputs=[input_layer], outputs=[topmost_hidden_state, auxiliary_output])
@staticmethod
def get_output_activation(is_regression, output_dim):
if is_regression:
output_activation = "linear"
output_tf_activation = lambda x: x
else:
if output_dim == 1:
output_activation = "sigmoid"
output_tf_activation = tf.nn.sigmoid
else:
output_activation = "softmax"
output_tf_activation = tf.nn.softmax
return output_activation, output_tf_activation
@staticmethod
def _attention_dot(x):
a, r = x
if len(a.shape) == 2: # at least 3d
a = K.expand_dims(a, axis=-1)
weighted_input = a * r
return K.sum(weighted_input, axis=1)
@staticmethod
def _get_expert_outputs_unoptimized(input_num_dimensions, last_layer, make_expert_fn,
output_dim, output_activation, l2_weight, topmost_hidden_states=None,
**kwargs):
"""
An unoptimised routine for obtaining expert outputs in an AME. Provided for didactic reasons.
This routine creates a large number of operations in the TF graph if there are more than >100 experts and
can therefore be slow to compile. Use _get_expert_outputs_optimized for problems that require more than
100 experts in a single model.
"""
outputs, topmost_hidden_states = [], []
for i in range(input_num_dimensions):
expert_input_layer = Lambda(lambda x: x[:, i:i + 1])(last_layer)
expert = make_expert_fn(i, (1,), output_dim, output_activation, l2_weight=l2_weight,
topmost_hidden_state=topmost_hidden_states[i],
**kwargs)
topmost_hidden_state, auxiliary_output = expert(expert_input_layer)
topmost_hidden_states.append(topmost_hidden_state)
outputs.append(auxiliary_output)
return outputs, topmost_hidden_states, []
@staticmethod
def _get_expert_auxiliary_predictions_unoptimized(output_dim, output_activation, topmost_hidden_states):
"""
An unoptimised routine for obtaining experts' auxiliary predictions in an AME. Provided for didactic reasons.
Like _get_expert_outputs_unoptimized, this routine creates a large number of operations in the TF graph and
can therefore be slow to compile. Use _get_expert_auxiliary_predictions_optimized for problems that require
more than 100 experts in a single model.
"""
all_but_one_auxiliary_outputs = []
for i in range(len(topmost_hidden_states)):
all_but_one_output = concatenate([topmost_hidden_state
for j, topmost_hidden_state
in enumerate(topmost_hidden_states) if j != i])
all_but_one_output = Dense(output_dim,
activation=output_activation,
name="1vk_auxiliary" + str(i))(all_but_one_output)
all_but_one_auxiliary_outputs.append(all_but_one_output)
return all_but_one_auxiliary_outputs, []
@staticmethod
def _get_expert_outputs_optimized(input_num_dimensions, last_layer, num_units,
output_dim, output_tf_activation, topmost_hidden_states=None):
"""
Method for obtaining expert outputs in an AME optimised for faster compilation speed.
Reduces the number of operations in the TF graph to a number independent of the chosen number of experts
by sharing the per-expert operations and looping over their associated weights instead.
This method is less flexible than the unoptimised version since it
requires that all experts share the same architecture.
"""
from keras.initializers import he_normal, zeros
extra_trainable_weights = []
has_prebuilt_hidden_states = topmost_hidden_states is not None
if not has_prebuilt_hidden_states:
num_experts = input_num_dimensions
w1 = tf.Variable(he_normal()((num_experts, 1, num_units)))
b1 = tf.Variable(zeros()((num_experts, num_units)))
extra_trainable_weights += [w1, b1]
else:
num_experts = len(topmost_hidden_states)
w2 = tf.Variable(he_normal()((num_experts, num_units, output_dim)))
b2 = tf.Variable(zeros()((num_experts, output_dim)))
extra_trainable_weights += [w2, b2]
if not has_prebuilt_hidden_states:
topmost_hidden_states = tf.TensorArray(dtype=tf.float32, size=num_experts, dynamic_size=False)
else:
topmost_hidden_states = tf.stack(topmost_hidden_states, axis=0)
outputs = tf.TensorArray(dtype=tf.float32, size=num_experts, dynamic_size=False)
def loop_fun(x):
i = tf.constant(0)
c = lambda i, ths, o: tf.less(i, num_experts)
def loop_body(i, topmost_hidden_states, outputs):
if has_prebuilt_hidden_states:
topmost_hidden_state = topmost_hidden_states[i]
else:
topmost_hidden_state = tf.nn.selu(tf.matmul(x[:, i:i + 1], w1[i]) + b1[i])
topmost_hidden_states = topmost_hidden_states.write(i, topmost_hidden_state)
auxiliary_output = output_tf_activation(tf.matmul(topmost_hidden_state, w2[i]) + b2[i])
outputs = outputs.write(i, auxiliary_output)
return i + 1, topmost_hidden_states, outputs
_, hidden_states, aux_outputs = tf.while_loop(c, loop_body, [i, topmost_hidden_states, outputs])
return [hidden_states.stack() if not has_prebuilt_hidden_states else topmost_hidden_states,
aux_outputs.stack()]
topmost_hidden_states, outputs = Lambda(loop_fun,
output_shape=lambda _: [(num_experts, None, num_units),
(num_experts, None, output_dim)])(last_layer)
topmost_hidden_states = Lambda(lambda x: tf.unstack(x, num=num_experts, axis=0))(topmost_hidden_states)
outputs = Lambda(lambda x: tf.unstack(x, num=num_experts, axis=0))(outputs)
return outputs, topmost_hidden_states, extra_trainable_weights
@staticmethod
def _get_expert_auxiliary_predictions_optimized(output_dim, output_tf_activation, topmost_hidden_states):
"""
Method for obtaining experts' auxiliary predictions in an AME optimised for faster compilation speed.
Like _get_expert_outputs_unoptimized, this routine creates a large number of operations in the TF graph and
can therefore be slow to compile. Use _get_expert_auxiliary_predictions_optimized for problems that require
more than 100 experts in a single model.
"""
from keras.initializers import he_normal, zeros
num_experts = len(topmost_hidden_states)
step_size = K.int_shape(topmost_hidden_states[0])[-1]
w3 = tf.Variable(he_normal()((num_experts, step_size * (num_experts - 1), output_dim)))
b3 = tf.Variable(zeros()((num_experts, output_dim)))
extra_trainable_weights = [w3, b3]
def apply_fully_connected(idx, x):
return output_tf_activation(tf.matmul(x, w3[idx]) + b3[idx])
def get_all_but_one_auxiliary_outputs(x):
all_outputs = tf.concat(x, axis=-1)
return [apply_fully_connected(
idx,
tf.concat((all_outputs[:, :idx * step_size],
all_outputs[:, (idx + 1) * step_size:]),
axis=-1)
) for idx in range(num_experts)]
all_but_one_auxiliary_outputs = Lambda(get_all_but_one_auxiliary_outputs)(topmost_hidden_states)
return all_but_one_auxiliary_outputs, extra_trainable_weights
@staticmethod
def build_ame_model(input_dim, output_dim, make_expert_fn=None, l2_weight=0.0, num_softmaxes=1,
num_units=36, granger_loss_weight=0.03, is_regression=True, fast_compile=True,
is_image=True, downsample_factor=4, learning_rate=0.0001, dropout=0.0, attention_dropout=0.2,
**kwargs):
if make_expert_fn is None:
make_expert_fn = ModelBuilder.build_mlp_expert
output_activation, output_tf_activation = ModelBuilder.get_output_activation(is_regression, output_dim)
input_layer, input_num_dimensions = Input(shape=input_dim), int(np.prod(input_dim))
last_layer = input_layer
if is_image:
last_num_units = num_units
for _ in range(downsample_factor // 2):
# Apply downsampling convolutions for image data to reduce the number of total experts.
# This reduces the compilation and training time at the cost of resolution
# in the attention map.
last_layer = Conv2D(last_num_units, kernel_size=2, strides=2, activation="elu",
kernel_regularizer=L1L2(l2=l2_weight))(last_layer)
if dropout != 0.0:
last_layer = Dropout(dropout)(last_layer)
last_num_units *= 2
num_units = K.int_shape(last_layer)[-1]
num_pixel_experts = np.prod(K.int_shape(last_layer)[1:3])
last_layer = Reshape((num_pixel_experts, num_units))(last_layer)
topmost_hidden_states = Lambda(lambda x: tf.unstack(x, axis=1))(last_layer)
else:
topmost_hidden_states = None
if fast_compile:
outputs, topmost_hidden_states, extra_trainable_weights1 = \
ModelBuilder._get_expert_outputs_optimized(input_num_dimensions, last_layer, num_units,
output_dim, output_tf_activation,
topmost_hidden_states=topmost_hidden_states)
all_but_one_auxiliary_outputs, extra_trainable_weights2 = \
ModelBuilder._get_expert_auxiliary_predictions_optimized(output_dim, output_tf_activation,
topmost_hidden_states)
else:
outputs, topmost_hidden_states, extra_trainable_weights1 = \
ModelBuilder._get_expert_outputs_unoptimized(input_num_dimensions, last_layer, make_expert_fn,
output_dim, output_activation, l2_weight,
topmost_hidden_states=topmost_hidden_states,
**kwargs)
all_but_one_auxiliary_outputs, extra_trainable_weights2 = \
ModelBuilder._get_expert_auxiliary_predictions_unoptimized(output_dim,
output_activation,
topmost_hidden_states)
extra_trainable_weights = extra_trainable_weights1 + extra_trainable_weights2
all_auxiliary_outputs = concatenate(topmost_hidden_states)
all_auxiliary_outputs_layer = Dense(output_dim,
activation=output_activation,
name="all_auxiliary")
# Extra trainable weights must be added to a trainable layer.
# See https://stackoverflow.com/questions/46544329/keras-add-external-trainable-variable-to-graph
all_auxiliary_outputs_layer.trainable_weights.extend(extra_trainable_weights)
all_auxiliary_outputs = all_auxiliary_outputs_layer(all_auxiliary_outputs)
combined_hidden_state = concatenate(topmost_hidden_states + outputs)
attention_weights = MixtureSoftAttention(num_softmaxes=num_softmaxes,
num_independent_attention_mechanisms=len(outputs),
attention_dropout=attention_dropout,
name="mixture_attention_1",
u_regularizer=L1L2(l2=l2_weight),
w_regularizer=L1L2(l2=l2_weight),
activation="tanh",
normalised=True)(combined_hidden_state)
if is_regression:
concatenated_residuals = concatenate(outputs, axis=-1)
concatenated_residuals = Reshape((len(outputs),))(concatenated_residuals)
attention_weights = Reshape((len(outputs),), name="soft_attention_1")(attention_weights)
output = dot([attention_weights, concatenated_residuals], axes=-1, name="combined")
else:
concatenated_residuals = Lambda(lambda x: K.stack(x, axis=-2))(outputs)
output = Lambda(ModelBuilder._attention_dot, name="combined")([attention_weights, concatenated_residuals])
granger_output = Lambda(lambda x: x, name="granger")(output)
repeat_output = Lambda(lambda x: x, name="repeat")(all_auxiliary_outputs)
if is_regression:
main_loss = "mse"
auxiliary_loss = absolute_error_loss
metrics = {}
else:
main_loss = "binary_crossentropy" if output_dim == 1 else "categorical_crossentropy"
auxiliary_loss = categorical_loss
metrics = {"combined": "accuracy"}
granger_loss = partial(granger_causal_loss,
attention_weights=attention_weights,
auxiliary_outputs=all_auxiliary_outputs,
all_but_one_auxiliary_outputs=all_but_one_auxiliary_outputs,
loss_function=auxiliary_loss)
granger_loss.__name__ = "granger_causal_loss"
# We optimise compilation speed by using one shared loss function for all auxiliary outputs.
repeat_loss = partial(repeat_output_loss,
outputs=outputs + all_but_one_auxiliary_outputs + [all_auxiliary_outputs],
main_loss=main_loss)
repeat_loss.__name__ = "repeat_loss"
extra_losses = [granger_loss, repeat_loss]
outputs = [output, granger_output, repeat_output]
auxiliary_loss_weight = 1.0
loss_weights = [(1 - granger_loss_weight), granger_loss_weight, auxiliary_loss_weight]
model = Model(inputs=input_layer, outputs=outputs)
return ModelBuilder.compile_model(model,
learning_rate=learning_rate,
main_loss=main_loss,
extra_loss=extra_losses,
loss_weights=loss_weights,
metrics=metrics,
# We found gradient clipping useful to combat exploding gradients
# when using unbounded outputs, e.g. in the regression setting.
gradient_clipping_threshold=100 if is_regression else 0)
| [
"keras.optimizers.Adam",
"keras.initializers.zeros",
"numpy.prod",
"keras.initializers.he_normal",
"keras.layers.Lambda",
"keras.regularizers.L1L2",
"keras.layers.Input",
"keras.optimizers.SGD",
"functools.partial",
"keras.layers.concatenate",
"keras.models.Model",
"keras.layers.dot",
"keras... | [((4213, 4290), 'keras.models.Model', 'Model', ([], {'inputs': '[input_layer]', 'outputs': '[topmost_hidden_state, auxiliary_output]'}), '(inputs=[input_layer], outputs=[topmost_hidden_state, auxiliary_output])\n', (4218, 4290), False, 'from keras.models import Model\n'), ((15669, 15703), 'keras.layers.concatenate', 'concatenate', (['topmost_hidden_states'], {}), '(topmost_hidden_states)\n', (15680, 15703), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((15742, 15811), 'keras.layers.Dense', 'Dense', (['output_dim'], {'activation': 'output_activation', 'name': '"""all_auxiliary"""'}), "(output_dim, activation=output_activation, name='all_auxiliary')\n", (15747, 15811), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((16279, 16323), 'keras.layers.concatenate', 'concatenate', (['(topmost_hidden_states + outputs)'], {}), '(topmost_hidden_states + outputs)\n', (16290, 16323), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((18094, 18300), 'functools.partial', 'partial', (['granger_causal_loss'], {'attention_weights': 'attention_weights', 'auxiliary_outputs': 'all_auxiliary_outputs', 'all_but_one_auxiliary_outputs': 'all_but_one_auxiliary_outputs', 'loss_function': 'auxiliary_loss'}), '(granger_causal_loss, attention_weights=attention_weights,\n auxiliary_outputs=all_auxiliary_outputs, all_but_one_auxiliary_outputs=\n all_but_one_auxiliary_outputs, loss_function=auxiliary_loss)\n', (18101, 18300), False, 'from functools import partial\n'), ((18594, 18721), 'functools.partial', 'partial', (['repeat_output_loss'], {'outputs': '(outputs + all_but_one_auxiliary_outputs + [all_auxiliary_outputs])', 'main_loss': 'main_loss'}), '(repeat_output_loss, outputs=outputs + all_but_one_auxiliary_outputs +\n [all_auxiliary_outputs], main_loss=main_loss)\n', (18601, 18721), False, 'from functools import partial\n'), ((19082, 19124), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'outputs'}), '(inputs=input_layer, outputs=outputs)\n', (19087, 19124), False, 'from keras.models import Model\n'), ((2218, 2282), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'learning_rate', 'clipvalue': 'gradient_clipping_threshold'}), '(lr=learning_rate, clipvalue=gradient_clipping_threshold)\n', (2225, 2282), False, 'from keras.optimizers import Adam, RMSprop, SGD\n'), ((3818, 3842), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (3823, 3842), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((10247, 10358), 'keras.layers.Lambda', 'Lambda', (['loop_fun'], {'output_shape': '(lambda _: [(num_experts, None, num_units), (num_experts, None, output_dim)])'}), '(loop_fun, output_shape=lambda _: [(num_experts, None, num_units), (\n num_experts, None, output_dim)])\n', (10253, 10358), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((12195, 12236), 'keras.layers.Lambda', 'Lambda', (['get_all_but_one_auxiliary_outputs'], {}), '(get_all_but_one_auxiliary_outputs)\n', (12201, 12236), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((12957, 12979), 'keras.layers.Input', 'Input', ([], {'shape': 'input_dim'}), '(shape=input_dim)\n', (12962, 12979), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((17053, 17082), 'keras.layers.concatenate', 'concatenate', (['outputs'], {'axis': '(-1)'}), '(outputs, axis=-1)\n', (17064, 17082), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((17291, 17365), 'keras.layers.dot', 'dot', (['[attention_weights, concatenated_residuals]'], {'axes': '(-1)', 'name': '"""combined"""'}), "([attention_weights, concatenated_residuals], axes=-1, name='combined')\n", (17294, 17365), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((17609, 17644), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x)'], {'name': '"""granger"""'}), "(lambda x: x, name='granger')\n", (17615, 17644), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((17677, 17711), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x)'], {'name': '"""repeat"""'}), "(lambda x: x, name='repeat')\n", (17683, 17711), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((2334, 2428), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'learning_rate', 'nesterov': '(True)', 'momentum': '(0.9)', 'clipvalue': 'gradient_clipping_threshold'}), '(lr=learning_rate, nesterov=True, momentum=0.9, clipvalue=\n gradient_clipping_threshold)\n', (2337, 2428), False, 'from keras.optimizers import Adam, RMSprop, SGD\n'), ((2456, 2517), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate', 'clipvalue': 'gradient_clipping_threshold'}), '(lr=learning_rate, clipvalue=gradient_clipping_threshold)\n', (2460, 2517), False, 'from keras.optimizers import Adam, RMSprop, SGD\n'), ((3427, 3449), 'keras.layers.Activation', 'Activation', (['activation'], {}), '(activation)\n', (3437, 3449), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((5813, 5844), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[:, i:i + 1])'], {}), '(lambda x: x[:, i:i + 1])\n', (5819, 5844), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((8729, 8740), 'keras.initializers.he_normal', 'he_normal', ([], {}), '()\n', (8738, 8740), False, 'from keras.initializers import he_normal, zeros\n'), ((8805, 8812), 'keras.initializers.zeros', 'zeros', ([], {}), '()\n', (8810, 8812), False, 'from keras.initializers import he_normal, zeros\n'), ((11490, 11501), 'keras.initializers.he_normal', 'he_normal', ([], {}), '()\n', (11499, 11501), False, 'from keras.initializers import he_normal, zeros\n'), ((11586, 11593), 'keras.initializers.zeros', 'zeros', ([], {}), '()\n', (11591, 11593), False, 'from keras.initializers import he_normal, zeros\n'), ((12985, 13003), 'numpy.prod', 'np.prod', (['input_dim'], {}), '(input_dim)\n', (12992, 13003), True, 'import numpy as np\n'), ((13851, 13890), 'keras.layers.Reshape', 'Reshape', (['(num_pixel_experts, num_units)'], {}), '((num_pixel_experts, num_units))\n', (13858, 13890), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((17485, 17537), 'keras.layers.Lambda', 'Lambda', (['ModelBuilder._attention_dot'], {'name': '"""combined"""'}), "(ModelBuilder._attention_dot, name='combined')\n", (17491, 17537), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((3525, 3543), 'keras.layers.Dropout', 'Dropout', (['p_dropout'], {}), '(p_dropout)\n', (3532, 3543), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((8483, 8494), 'keras.initializers.he_normal', 'he_normal', ([], {}), '()\n', (8492, 8494), False, 'from keras.initializers import he_normal, zeros\n'), ((8554, 8561), 'keras.initializers.zeros', 'zeros', ([], {}), '()\n', (8559, 8561), False, 'from keras.initializers import he_normal, zeros\n'), ((16729, 16747), 'keras.regularizers.L1L2', 'L1L2', ([], {'l2': 'l2_weight'}), '(l2=l2_weight)\n', (16733, 16747), False, 'from keras.regularizers import L1L2\n'), ((16812, 16830), 'keras.regularizers.L1L2', 'L1L2', ([], {'l2': 'l2_weight'}), '(l2=l2_weight)\n', (16816, 16830), False, 'from keras.regularizers import L1L2\n'), ((3044, 3062), 'keras.regularizers.L1L2', 'L1L2', ([], {'l2': 'l2_weight'}), '(l2=l2_weight)\n', (3048, 3062), False, 'from keras.regularizers import L1L2\n'), ((3112, 3130), 'keras.regularizers.L1L2', 'L1L2', ([], {'l2': 'l2_weight'}), '(l2=l2_weight)\n', (3116, 3130), False, 'from keras.regularizers import L1L2\n'), ((13637, 13653), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (13644, 13653), False, 'from keras.layers import Dense, Activation, Dropout, Input, Reshape, concatenate, dot, Lambda, Conv2D\n'), ((3284, 3302), 'keras.regularizers.L1L2', 'L1L2', ([], {'l2': 'l2_weight'}), '(l2=l2_weight)\n', (3288, 3302), False, 'from keras.regularizers import L1L2\n'), ((3370, 3388), 'keras.regularizers.L1L2', 'L1L2', ([], {'l2': 'l2_weight'}), '(l2=l2_weight)\n', (3374, 3388), False, 'from keras.regularizers import L1L2\n'), ((13537, 13555), 'keras.regularizers.L1L2', 'L1L2', ([], {'l2': 'l2_weight'}), '(l2=l2_weight)\n', (13541, 13555), False, 'from keras.regularizers import L1L2\n')] |
import tensorflow as tf
import numpy as np
def clip_by_value_with_gradient(x, l=-1., u=1.):
clip_up = tf.cast(x > u, tf.float32)
clip_low = tf.cast(x < l, tf.float32)
# if the difference between x and l or u is smaller than the precision,
# the following may cause the result to be 0 or 2*u
return x + tf.stop_gradient((u - x)*clip_up + (l - x)*clip_low)
def lstm_layer(name, X, in_channels, out_filters, batch_size=None, sequence_length_tensor=None,
initializer=tf.truncated_normal_initializer(0.0, 0.01)):
if len(X.shape) != 2 or X.shape[1] != in_channels:
X = tf.reshape(X, [-1, in_channels])
with tf.variable_scope(name):
if sequence_length_tensor is None:
sequence_length_tensor = tf.placeholder(tf.float32, [None], name="seq_len")
cell = tf.nn.rnn_cell.LSTMCell(out_filters, state_is_tuple=True, initializer=initializer)
c = tf.placeholder(tf.float32, [None, cell.state_size.c], name="state_c")
h = tf.placeholder(tf.float32, [None, cell.state_size.h], name="state_h")
init_state = tf.nn.rnn_cell.LSTMStateTuple(c, h)
if batch_size is None:
if sequence_length_tensor is None:
batch_size = 1
else:
with tf.name_scope("batch_size"):
batch_size = tf.shape(sequence_length_tensor)[0]
with tf.variable_scope("seq_batch"):
X = tf.reshape(X, [batch_size, -1, in_channels])
Y, state = tf.nn.dynamic_rnn(cell, X,
initial_state=init_state,
sequence_length=sequence_length_tensor,
time_major=False)
return tf.reshape(Y, [-1, out_filters]), (state, (c, h))
def conv_layer(name, X, in_channels, out_filters, ksize, stride,
activator,
# tf.variance_scaling_initializer
# tf.truncated_normal_initializer(0.0, 0.01)
# tf.orthogonal_initializer
# tf.glorot_uniform_initializer()
weight_initializer,
bias_initializer=tf.constant_initializer(0.0),
padding="SAME", trainable=True):
if not hasattr(ksize, "__len__"):
ksize = [ksize, ksize]
if not hasattr(stride, "__len__"):
stride = [stride, stride]
if len(X.shape) == 2:
X = tf.reshape(X, [-1, 1, X.shape[1], 1])
in_channels = 1
else:
assert(len(X.shape) == 4)
with tf.variable_scope(name):
if callable(weight_initializer):
try:
weight_initializer = weight_initializer()
except:
pass
w_shape = [ksize[0], ksize[1], in_channels, out_filters]
w = tf.get_variable("weight", w_shape, tf.float32,
weight_initializer, trainable=trainable)
Y = tf.nn.conv2d(X, w, [1, stride[0], stride[1], 1], padding)
if bias_initializer is not None:
if callable(bias_initializer):
try:
bias_initializer = bias_initializer()
except:
pass
b = tf.get_variable("bias", [out_filters], tf.float32,
bias_initializer,
trainable=trainable)
Y = tf.add(Y, b)
if activator is not None:
Y = activator(Y)
return Y
def fc_layer(name, X, in_channels, out_filters,
activator,
# tf.variance_scaling_initializer
# tf.truncated_normal_initializer(0.0, 0.01)
# tf.orthogonal_initializer(),
# tf.glorot_uniform_initializer()
weight_initializer,
bias_initializer=tf.constant_initializer(0.0),
trainable=True):
if X.shape.ndims != 2 or X.shape[1] != in_channels:
X = tf.reshape(X, [-1, in_channels])
with tf.variable_scope(name):
if callable(weight_initializer):
if weight_initializer == tf.orthogonal_initializer:
weight_initializer = tf.orthogonal_initializer(np.sqrt(2) if activator == tf.nn.relu else 1)
else:
try:
weight_initializer = weight_initializer()
except:
pass
w = tf.get_variable("weight", [in_channels, out_filters], tf.float32,
weight_initializer,
trainable=trainable)
Y = tf.matmul(X, w)
if bias_initializer is not None:
if callable(bias_initializer):
try:
bias_initializer = bias_initializer()
except:
pass
b = tf.get_variable("bias", [out_filters], tf.float32,
bias_initializer,
trainable=trainable)
Y = tf.add(Y, b)
if activator is not None:
Y = activator(Y)
return Y
| [
"tensorflow.nn.conv2d",
"tensorflow.shape",
"tensorflow.variable_scope",
"tensorflow.get_variable",
"numpy.sqrt",
"tensorflow.placeholder",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.add",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.truncated_normal_initializer",
"tensorflow.nn.dyna... | [((107, 133), 'tensorflow.cast', 'tf.cast', (['(x > u)', 'tf.float32'], {}), '(x > u, tf.float32)\n', (114, 133), True, 'import tensorflow as tf\n'), ((149, 175), 'tensorflow.cast', 'tf.cast', (['(x < l)', 'tf.float32'], {}), '(x < l, tf.float32)\n', (156, 175), True, 'import tensorflow as tf\n'), ((504, 546), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (535, 546), True, 'import tensorflow as tf\n'), ((2151, 2179), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2174, 2179), True, 'import tensorflow as tf\n'), ((3796, 3824), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3819, 3824), True, 'import tensorflow as tf\n'), ((323, 379), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['((u - x) * clip_up + (l - x) * clip_low)'], {}), '((u - x) * clip_up + (l - x) * clip_low)\n', (339, 379), True, 'import tensorflow as tf\n'), ((616, 648), 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, in_channels]'], {}), '(X, [-1, in_channels])\n', (626, 648), True, 'import tensorflow as tf\n'), ((658, 681), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (675, 681), True, 'import tensorflow as tf\n'), ((829, 916), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['out_filters'], {'state_is_tuple': '(True)', 'initializer': 'initializer'}), '(out_filters, state_is_tuple=True, initializer=\n initializer)\n', (852, 916), True, 'import tensorflow as tf\n'), ((925, 994), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, cell.state_size.c]'], {'name': '"""state_c"""'}), "(tf.float32, [None, cell.state_size.c], name='state_c')\n", (939, 994), True, 'import tensorflow as tf\n'), ((1007, 1076), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, cell.state_size.h]'], {'name': '"""state_h"""'}), "(tf.float32, [None, cell.state_size.h], name='state_h')\n", (1021, 1076), True, 'import tensorflow as tf\n'), ((1098, 1133), 'tensorflow.nn.rnn_cell.LSTMStateTuple', 'tf.nn.rnn_cell.LSTMStateTuple', (['c', 'h'], {}), '(c, h)\n', (1127, 1133), True, 'import tensorflow as tf\n'), ((1507, 1622), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'X'], {'initial_state': 'init_state', 'sequence_length': 'sequence_length_tensor', 'time_major': '(False)'}), '(cell, X, initial_state=init_state, sequence_length=\n sequence_length_tensor, time_major=False)\n', (1524, 1622), True, 'import tensorflow as tf\n'), ((1740, 1772), 'tensorflow.reshape', 'tf.reshape', (['Y', '[-1, out_filters]'], {}), '(Y, [-1, out_filters])\n', (1750, 1772), True, 'import tensorflow as tf\n'), ((2409, 2446), 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, 1, X.shape[1], 1]'], {}), '(X, [-1, 1, X.shape[1], 1])\n', (2419, 2446), True, 'import tensorflow as tf\n'), ((2524, 2547), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (2541, 2547), True, 'import tensorflow as tf\n'), ((2783, 2874), 'tensorflow.get_variable', 'tf.get_variable', (['"""weight"""', 'w_shape', 'tf.float32', 'weight_initializer'], {'trainable': 'trainable'}), "('weight', w_shape, tf.float32, weight_initializer,\n trainable=trainable)\n", (2798, 2874), True, 'import tensorflow as tf\n'), ((2911, 2968), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['X', 'w', '[1, stride[0], stride[1], 1]', 'padding'], {}), '(X, w, [1, stride[0], stride[1], 1], padding)\n', (2923, 2968), True, 'import tensorflow as tf\n'), ((3924, 3956), 'tensorflow.reshape', 'tf.reshape', (['X', '[-1, in_channels]'], {}), '(X, [-1, in_channels])\n', (3934, 3956), True, 'import tensorflow as tf\n'), ((3966, 3989), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (3983, 3989), True, 'import tensorflow as tf\n'), ((4367, 4477), 'tensorflow.get_variable', 'tf.get_variable', (['"""weight"""', '[in_channels, out_filters]', 'tf.float32', 'weight_initializer'], {'trainable': 'trainable'}), "('weight', [in_channels, out_filters], tf.float32,\n weight_initializer, trainable=trainable)\n", (4382, 4477), True, 'import tensorflow as tf\n'), ((4542, 4557), 'tensorflow.matmul', 'tf.matmul', (['X', 'w'], {}), '(X, w)\n', (4551, 4557), True, 'import tensorflow as tf\n'), ((763, 813), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""seq_len"""'}), "(tf.float32, [None], name='seq_len')\n", (777, 813), True, 'import tensorflow as tf\n'), ((1395, 1425), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""seq_batch"""'], {}), "('seq_batch')\n", (1412, 1425), True, 'import tensorflow as tf\n'), ((1443, 1487), 'tensorflow.reshape', 'tf.reshape', (['X', '[batch_size, -1, in_channels]'], {}), '(X, [batch_size, -1, in_channels])\n', (1453, 1487), True, 'import tensorflow as tf\n'), ((3197, 3290), 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""', '[out_filters]', 'tf.float32', 'bias_initializer'], {'trainable': 'trainable'}), "('bias', [out_filters], tf.float32, bias_initializer,\n trainable=trainable)\n", (3212, 3290), True, 'import tensorflow as tf\n'), ((3367, 3379), 'tensorflow.add', 'tf.add', (['Y', 'b'], {}), '(Y, b)\n', (3373, 3379), True, 'import tensorflow as tf\n'), ((4786, 4879), 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""', '[out_filters]', 'tf.float32', 'bias_initializer'], {'trainable': 'trainable'}), "('bias', [out_filters], tf.float32, bias_initializer,\n trainable=trainable)\n", (4801, 4879), True, 'import tensorflow as tf\n'), ((4956, 4968), 'tensorflow.add', 'tf.add', (['Y', 'b'], {}), '(Y, b)\n', (4962, 4968), True, 'import tensorflow as tf\n'), ((1283, 1310), 'tensorflow.name_scope', 'tf.name_scope', (['"""batch_size"""'], {}), "('batch_size')\n", (1296, 1310), True, 'import tensorflow as tf\n'), ((1345, 1377), 'tensorflow.shape', 'tf.shape', (['sequence_length_tensor'], {}), '(sequence_length_tensor)\n', (1353, 1377), True, 'import tensorflow as tf\n'), ((4159, 4169), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4166, 4169), True, 'import numpy as np\n')] |
import string
import pandas as pd
import numpy as np
MODEL_RUN_SCRIPT_PREFIX='''
from System import Array
import TIME.DataTypes.TimeStep as TimeStep
import TIME.DataTypes.TimeSeries as TimeSeries
import System.DateTime as DateTime
import ${namespace}.${klass} as ${klass}
import TIME.Tools.ModelRunner as ModelRunner
model = ${klass}()
runner = ModelRunner(model)
'''
def _literal_timeseries(series,name):
start = series.index[0]
values = list(np.array(series))
# TimeSeries( DateTime startTime, TimeStep timeStep, double[] values )
scriptlet = 'values_%s = Array[float](%s)\n'%(name,values)
scriptlet += 'start_%s = DateTime(%d,%d,%d)\n'%(name,start.year,start.month,start.day)
scriptlet += 'timestep_%s = TimeStep.Daily\n'%name
scriptlet += 'ts_%s = TimeSeries(start_%s,timestep_%s,values_%s)\n\n'%(name,name,name,name)
return scriptlet
def _play(series,name):
scriptlet = _literal_timeseries(series,name)
scriptlet += 'runner.Play("%s",ts_%s)\n'%(name,name)
return scriptlet
def _record(output):
return 'runner.Record("%s")\n'%(output)
def _retrieve(output):
return 'result["%s"] = runner.GetRecorded("%s")\n'%(output,output)
class VeneerComponentModelActions(object):
def __init__(self,ironpy):
self._ironpy = ironpy
def run_model(self,model_name,namespace=None,inputs={},parameters={},outputs=[],extra_initialisation=''):
'''
Run the given component model with specified inputs, parameters and recorded outputs.
Return:
Dataframe of output time series
'''
if namespace is None:
name_elements = model_name.split('.')
namespace = '.'.join(name_elements[:-1])
model_name = name_elements[-1]
tmpl = string.Template(MODEL_RUN_SCRIPT_PREFIX)
run_script = tmpl.substitute(namespace=namespace,klass=model_name)
run_script += extra_initialisation
for k,ts in inputs.items():
run_script += _play(ts,k)
for k,val in parameters.items():
run_script += 'model.%s = %s\n'%(k,str(val))
run_script += ''.join([_record(o) for o in outputs])
run_script += 'runner.execute()\n'
run_script += 'result = {}\n'
run_script += ''.join([_retrieve(o) for o in outputs])
res = self._run_model(run_script)
return self._transform_results(res)
def _run_model(self,script):
res = self._ironpy._safe_run(script)
if res['Exception'] is not None:
raise Exception(res['Exception'])
entries = res['Response']['Entries']
return {e['Key']['Value']:e['Value'] for e in entries}
def _transform_results(self,res):
index = [self._ironpy._veneer.parse_veneer_date(e['Date']) for e in list(res.values())[0]['Events']]
data = {k:[e['Value'] for e in vals['Events']] for k,vals in res.items()}
return pd.DataFrame(data,index=index)
| [
"pandas.DataFrame",
"numpy.array",
"string.Template"
] | [((455, 471), 'numpy.array', 'np.array', (['series'], {}), '(series)\n', (463, 471), True, 'import numpy as np\n'), ((1784, 1824), 'string.Template', 'string.Template', (['MODEL_RUN_SCRIPT_PREFIX'], {}), '(MODEL_RUN_SCRIPT_PREFIX)\n', (1799, 1824), False, 'import string\n'), ((2930, 2961), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'index'}), '(data, index=index)\n', (2942, 2961), True, 'import pandas as pd\n')] |
# Code for CVPR'21 paper:
# [Title] - "CoLA: Weakly-Supervised Temporal Action Localization with Snippet Contrastive Learning"
# [Author] - <NAME>*, <NAME>, <NAME>, <NAME> and <NAME>
# [Github] - https://github.com/zhang-can/CoLA
import numpy as np
import os
from easydict import EasyDict as edict
cfg = edict()
cfg.GPU_ID = '0'
cfg.LR = '[0.0001]*6000'
cfg.NUM_ITERS = len(eval(cfg.LR))
cfg.NUM_CLASSES = 20
cfg.MODAL = 'all'
cfg.FEATS_DIM = 2048
cfg.BATCH_SIZE = 16
cfg.DATA_PATH = './data/THUMOS14'
cfg.NUM_WORKERS = 8
cfg.LAMBDA = 0.01
cfg.R_EASY = 5
cfg.R_HARD = 20
cfg.m = 3
cfg.M = 6
cfg.TEST_FREQ = 100
cfg.PRINT_FREQ = 20
cfg.CLASS_THRESH = 0.2
cfg.NMS_THRESH = 0.6
cfg.CAS_THRESH = np.arange(0.0, 0.25, 0.025)
cfg.ANESS_THRESH = np.arange(0.1, 0.925, 0.025)
cfg.TIOU_THRESH = np.linspace(0.1, 0.7, 7)
cfg.UP_SCALE = 24
cfg.GT_PATH = os.path.join(cfg.DATA_PATH, 'gt.json')
cfg.SEED = 0
cfg.FEATS_FPS = 25
cfg.NUM_SEGMENTS = 750
cfg.CLASS_DICT = {'BaseballPitch': 0, 'BasketballDunk': 1, 'Billiards': 2,
'CleanAndJerk': 3, 'CliffDiving': 4, 'CricketBowling': 5,
'CricketShot': 6, 'Diving': 7, 'FrisbeeCatch': 8,
'GolfSwing': 9, 'HammerThrow': 10, 'HighJump': 11,
'JavelinThrow': 12, 'LongJump': 13, 'PoleVault': 14,
'Shotput': 15, 'SoccerPenalty': 16, 'TennisSwing': 17,
'ThrowDiscus': 18, 'VolleyballSpiking': 19}
| [
"easydict.EasyDict",
"numpy.linspace",
"os.path.join",
"numpy.arange"
] | [((307, 314), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (312, 314), True, 'from easydict import EasyDict as edict\n'), ((696, 723), 'numpy.arange', 'np.arange', (['(0.0)', '(0.25)', '(0.025)'], {}), '(0.0, 0.25, 0.025)\n', (705, 723), True, 'import numpy as np\n'), ((743, 771), 'numpy.arange', 'np.arange', (['(0.1)', '(0.925)', '(0.025)'], {}), '(0.1, 0.925, 0.025)\n', (752, 771), True, 'import numpy as np\n'), ((790, 814), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.7)', '(7)'], {}), '(0.1, 0.7, 7)\n', (801, 814), True, 'import numpy as np\n'), ((847, 885), 'os.path.join', 'os.path.join', (['cfg.DATA_PATH', '"""gt.json"""'], {}), "(cfg.DATA_PATH, 'gt.json')\n", (859, 885), False, 'import os\n')] |
import io
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import yaml
from scipy.optimize import curve_fit
from scipy.signal import savgol_filter
# if compact printing is required
np.set_printoptions(precision=2)
def friction_func(xdot, mass, mu, damping):
return np.sign(xdot) *(mu * mass * 9.8) + damping * xdot
def parse_time(time_dataframe):
split_time_str = pd.Series.str(time_dataframe).split(':')
# convert the hours*3600, minutes*60, seconds*1, and microseconds*1e-6 format to seconds
time_array = np.array([int(x[0])*3600 + int(x[1])*60 + int(x[2]) + int(x[3])*1e-6 for x in split_time_str]).reshape(-1, )
return time_array
def calculate_friction(mass, dataframe, start, stop, setting):
dataframe['Time'] = parse_time(dataframe['Time'])
dataframe['Time'] = dataframe['Time'] - dataframe['Time'][0]
dt = np.round(np.mean(np.diff(dataframe['Time'].to_numpy())), 3)
# -----------------------------------------------
# The window length and polynomial order for pos, vel, and acc are selected iteratively
# to match the filtered data with the original data as much as possible
# -----------------------------------------------
# hammer position obtained from the potentiometer
hammer_pos = dataframe['Handle_position'].to_numpy()
hammer_pos = savgol_filter(hammer_pos, window_length=21, polyorder=5)
# Filter the hammer velocity and acceleration using Savitzky-Golay filter
hammer_vel = np.diff(hammer_pos, n=1) / dt
hammer_vel = savgol_filter(hammer_vel, window_length=21, polyorder=5)
hammer_acc = np.diff(hammer_pos, n=2) / dt**2
hammer_acc = savgol_filter(hammer_acc, window_length=21, polyorder=5)
# resultant force = F1 - F2 (here 1 is the magnet in negative x direction and 2 on the other side)
res_force = dataframe['Resultant_force'].to_numpy()
# filter the magnet force which is based on the hammer position
res_force = savgol_filter(res_force, window_length=21, polyorder=5)
friction_force = res_force[:-2] - mass * hammer_acc
popt, _ = curve_fit(friction_func, hammer_vel[start:stop], friction_force[start:stop], p0=[mass, 0.01, 2.0], bounds=([mass-0.001, 0.005, 0.4], [mass+0.001, 0.5, 5]))
print(popt)
estimated_friction = friction_func(hammer_vel[start:stop], mass, 0.1, 2)
residual = friction_force[start:stop] - friction_func(hammer_vel[start:stop], mass, 0.1, 2)
# _, ax = plt.subplots(2,1)
# ax[0].plot(friction_force[start:stop], 'b', label='actual friction')
# ax[0].plot(estimated_friction, 'r', label='estimated friction')
# ax[0].set_title(setting + ' friction force estimation')
# ax[0].legend()
# ax[1].plot(residual)
# ax[1].set_ylabel('residual')
plt.figure()
plt.plot(hammer_pos)
plt.plot(res_force/20)
plt.title(setting)
config = yaml.safe_load(io.open('src/config.yml'))
df_low1 = pd.read_csv(config['low_mass_file_1'], delimiter=',')
df_low2 = pd.read_csv(config['low_mass_file_2'], delimiter=',')
df_high1 = pd.read_csv(config['high_mass_file_1'], delimiter=',')
df_high2 = pd.read_csv(config['high_mass_file_2'], delimiter=',')
m_low = 0.06 # as the hammer weight is 140 grams
m_high = 0.2
print('mass, mu, damping')
# friction estimation for two different outer magnet settings and without hammer
calculate_friction(m_low, df_low1, start=430, stop=489, setting='0.06 kg') # start=414
calculate_friction(m_low, df_low2, start=370, stop=416, setting='0.06 kg') # start=357
# friction estimation for two different outer magnet settings and with hammer
calculate_friction(m_high, df_high1, start=660, stop=799, setting='0.2 kg') # start=629
calculate_friction(m_high, df_high2, start=400, stop=536, setting='0.2 kg') # start=371
print('Final values of mu:{}, damping:{} used'.format(0.1, 2))
plt.show() | [
"scipy.optimize.curve_fit",
"pandas.read_csv",
"numpy.set_printoptions",
"matplotlib.pyplot.plot",
"scipy.signal.savgol_filter",
"io.open",
"numpy.diff",
"matplotlib.pyplot.figure",
"numpy.sign",
"matplotlib.pyplot.title",
"pandas.Series.str",
"matplotlib.pyplot.show"
] | [((216, 248), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (235, 248), True, 'import numpy as np\n'), ((2998, 3051), 'pandas.read_csv', 'pd.read_csv', (["config['low_mass_file_1']"], {'delimiter': '""","""'}), "(config['low_mass_file_1'], delimiter=',')\n", (3009, 3051), True, 'import pandas as pd\n'), ((3063, 3116), 'pandas.read_csv', 'pd.read_csv', (["config['low_mass_file_2']"], {'delimiter': '""","""'}), "(config['low_mass_file_2'], delimiter=',')\n", (3074, 3116), True, 'import pandas as pd\n'), ((3128, 3182), 'pandas.read_csv', 'pd.read_csv', (["config['high_mass_file_1']"], {'delimiter': '""","""'}), "(config['high_mass_file_1'], delimiter=',')\n", (3139, 3182), True, 'import pandas as pd\n'), ((3194, 3248), 'pandas.read_csv', 'pd.read_csv', (["config['high_mass_file_2']"], {'delimiter': '""","""'}), "(config['high_mass_file_2'], delimiter=',')\n", (3205, 3248), True, 'import pandas as pd\n'), ((3926, 3936), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3934, 3936), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1417), 'scipy.signal.savgol_filter', 'savgol_filter', (['hammer_pos'], {'window_length': '(21)', 'polyorder': '(5)'}), '(hammer_pos, window_length=21, polyorder=5)\n', (1374, 1417), False, 'from scipy.signal import savgol_filter\n'), ((1575, 1631), 'scipy.signal.savgol_filter', 'savgol_filter', (['hammer_vel'], {'window_length': '(21)', 'polyorder': '(5)'}), '(hammer_vel, window_length=21, polyorder=5)\n', (1588, 1631), False, 'from scipy.signal import savgol_filter\n'), ((1704, 1760), 'scipy.signal.savgol_filter', 'savgol_filter', (['hammer_acc'], {'window_length': '(21)', 'polyorder': '(5)'}), '(hammer_acc, window_length=21, polyorder=5)\n', (1717, 1760), False, 'from scipy.signal import savgol_filter\n'), ((2010, 2065), 'scipy.signal.savgol_filter', 'savgol_filter', (['res_force'], {'window_length': '(21)', 'polyorder': '(5)'}), '(res_force, window_length=21, polyorder=5)\n', (2023, 2065), False, 'from scipy.signal import savgol_filter\n'), ((2146, 2313), 'scipy.optimize.curve_fit', 'curve_fit', (['friction_func', 'hammer_vel[start:stop]', 'friction_force[start:stop]'], {'p0': '[mass, 0.01, 2.0]', 'bounds': '([mass - 0.001, 0.005, 0.4], [mass + 0.001, 0.5, 5])'}), '(friction_func, hammer_vel[start:stop], friction_force[start:stop],\n p0=[mass, 0.01, 2.0], bounds=([mass - 0.001, 0.005, 0.4], [mass + 0.001,\n 0.5, 5]))\n', (2155, 2313), False, 'from scipy.optimize import curve_fit\n'), ((2842, 2854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2852, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2879), 'matplotlib.pyplot.plot', 'plt.plot', (['hammer_pos'], {}), '(hammer_pos)\n', (2867, 2879), True, 'import matplotlib.pyplot as plt\n'), ((2888, 2912), 'matplotlib.pyplot.plot', 'plt.plot', (['(res_force / 20)'], {}), '(res_force / 20)\n', (2896, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2933), 'matplotlib.pyplot.title', 'plt.title', (['setting'], {}), '(setting)\n', (2924, 2933), True, 'import matplotlib.pyplot as plt\n'), ((2960, 2985), 'io.open', 'io.open', (['"""src/config.yml"""'], {}), "('src/config.yml')\n", (2967, 2985), False, 'import io\n'), ((1528, 1552), 'numpy.diff', 'np.diff', (['hammer_pos'], {'n': '(1)'}), '(hammer_pos, n=1)\n', (1535, 1552), True, 'import numpy as np\n'), ((1654, 1678), 'numpy.diff', 'np.diff', (['hammer_pos'], {'n': '(2)'}), '(hammer_pos, n=2)\n', (1661, 1678), True, 'import numpy as np\n'), ((305, 318), 'numpy.sign', 'np.sign', (['xdot'], {}), '(xdot)\n', (312, 318), True, 'import numpy as np\n'), ((409, 438), 'pandas.Series.str', 'pd.Series.str', (['time_dataframe'], {}), '(time_dataframe)\n', (422, 438), True, 'import pandas as pd\n')] |
import numpy as np
import os, pickle
from tqdm import tqdm
def get_word_emb(word2coef_dict, word, default_value):
return word2coef_dict.get(word, default_value)
def get_phrase_emb(word2coef_dict, phrase, default_value):
words = phrase.split(' ')
embs = [ get_word_emb(word2coef_dict, word, default_value) for word in words ]
return np.mean(embs, axis=0)
def init_glove_data(glove_fname, glove_outname):
print(f'Constructing glove dictionary from {glove_fname} ...... ')
word2coef_dict = {}
running_sum = np.zeros((300,))
with open(os.path.join(glove_fname), 'r') as f:
for idx, line in enumerate(tqdm( list(f) )):
values = line.split()
word = ''.join(values[0:-300])
coefs = np.asarray(values[-300:], dtype='float32')
running_sum += coefs
word2coef_dict[word] = coefs
average_emb = running_sum / (idx + 1)
with open( os.path.join(glove_outname, 'glove_dict.pkl'), 'wb') as f:
pickle.dump( word2coef_dict, f)
np.save( os.path.join(glove_outname, 'default_value'), average_emb)
print(f'Glove dictionary saved at {glove_outname}') | [
"numpy.mean",
"pickle.dump",
"os.path.join",
"numpy.asarray",
"numpy.zeros"
] | [((349, 370), 'numpy.mean', 'np.mean', (['embs'], {'axis': '(0)'}), '(embs, axis=0)\n', (356, 370), True, 'import numpy as np\n'), ((534, 550), 'numpy.zeros', 'np.zeros', (['(300,)'], {}), '((300,))\n', (542, 550), True, 'import numpy as np\n'), ((999, 1029), 'pickle.dump', 'pickle.dump', (['word2coef_dict', 'f'], {}), '(word2coef_dict, f)\n', (1010, 1029), False, 'import os, pickle\n'), ((1044, 1088), 'os.path.join', 'os.path.join', (['glove_outname', '"""default_value"""'], {}), "(glove_outname, 'default_value')\n", (1056, 1088), False, 'import os, pickle\n'), ((565, 590), 'os.path.join', 'os.path.join', (['glove_fname'], {}), '(glove_fname)\n', (577, 590), False, 'import os, pickle\n'), ((753, 795), 'numpy.asarray', 'np.asarray', (['values[-300:]'], {'dtype': '"""float32"""'}), "(values[-300:], dtype='float32')\n", (763, 795), True, 'import numpy as np\n'), ((932, 977), 'os.path.join', 'os.path.join', (['glove_outname', '"""glove_dict.pkl"""'], {}), "(glove_outname, 'glove_dict.pkl')\n", (944, 977), False, 'import os, pickle\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 13:29:13 2017
@author: amirs
"""
# Artificial Neural Network
# Installing Theano
# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
# Installing Tensorflow
# Install Tensorflow from the website: https://www.tensorflow.org/versions/r0.12/get_started/os_setup.html
# Installing Keras
# pip install --upgrade keras
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('features/Table_Step2_159Features-85Subs-5Levels-z.csv')
all = dataset.iloc[:, :].values
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(all[:,1:])
all[:,1:] = imputer.transform(all[:,1:])
level_zero_one=([i for i in all if i[0] == 'level_zero_one'])
level_one=[i for i in all if i[0] == 'level_one']
level_two=[i for i in all if i[0] == 'level_two']
level_three=[i for i in all if i[0] == 'level_three']
level_four=[i for i in all if i[0] == 'level_four']
all = np.concatenate((level_zero_one, level_one), axis=0)
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_Y_1 = LabelEncoder()
all[:,0]=labelencoder_Y_1.fit_transform(all[:,0])
Y_label=all[:,0]
y = keras.utils.to_categorical(Y_label, num_classes=2)
y=y[:,1:]
X = all[:, 2:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 7)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras import metrics
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 120, activation = 'relu',kernel_initializer="uniform", input_dim = 159))
# Adding the second hidden layer
#.add(Dense(output_dim = 80, init = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 90, activation = 'relu',kernel_initializer="uniform"))
classifier.add(Dense(units = 40, activation = 'relu',kernel_initializer="uniform"))
#classifier.add(Dense(units = 50, activation = 'relu',kernel_initializer="uniform"))
#classifier.add(Dense(output_dim = 20, init = 'uniform', activation = 'relu'))
classifier.add(Dense(units = 30, activation = 'relu',kernel_initializer="uniform"))
#classifier.add(Dense(units = 15, activation = 'relu',kernel_initializer="uniform"))
# Adding the output layer
# Adding the output layer
classifier.add(Dense(output_dim = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)
# Part 3 - Making the predictions and evaluating the model
scores =classifier.evaluate(X_test, y_test, batch_size=384, verbose=1)
print("%s: %.2f%%" % (classifier.metrics_names[1], scores[1]*100))
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) | [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.Imputer",
"keras.utils.to_categorical",
"sklearn.preprocessing.StandardScaler",
"keras.models.Sequential",
"numpy.concatenate",
"keras.layers.Dense",
"sklearn.metrics.confusi... | [((554, 622), 'pandas.read_csv', 'pd.read_csv', (['"""features/Table_Step2_159Features-85Subs-5Levels-z.csv"""'], {}), "('features/Table_Step2_159Features-85Subs-5Levels-z.csv')\n", (565, 622), True, 'import pandas as pd\n'), ((709, 763), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': '"""NaN"""', 'strategy': '"""mean"""', 'axis': '(0)'}), "(missing_values='NaN', strategy='mean', axis=0)\n", (716, 763), False, 'from sklearn.preprocessing import Imputer\n'), ((1120, 1171), 'numpy.concatenate', 'np.concatenate', (['(level_zero_one, level_one)'], {'axis': '(0)'}), '((level_zero_one, level_one), axis=0)\n', (1134, 1171), True, 'import numpy as np\n'), ((1256, 1270), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1268, 1270), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((1344, 1394), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['Y_label'], {'num_classes': '(2)'}), '(Y_label, num_classes=2)\n', (1370, 1394), False, 'import keras\n'), ((1571, 1625), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(7)'}), '(X, y, test_size=0.25, random_state=7)\n', (1587, 1625), False, 'from sklearn.model_selection import train_test_split\n'), ((1703, 1719), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1717, 1719), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2046, 2058), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2056, 2058), False, 'from keras.models import Sequential\n'), ((3564, 3596), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3580, 3596), False, 'from sklearn.metrics import confusion_matrix\n'), ((2127, 2212), 'keras.layers.Dense', 'Dense', ([], {'units': '(120)', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""', 'input_dim': '(159)'}), "(units=120, activation='relu', kernel_initializer='uniform', input_dim=159\n )\n", (2132, 2212), False, 'from keras.layers import Dense\n'), ((2333, 2397), 'keras.layers.Dense', 'Dense', ([], {'units': '(90)', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""'}), "(units=90, activation='relu', kernel_initializer='uniform')\n", (2338, 2397), False, 'from keras.layers import Dense\n'), ((2418, 2482), 'keras.layers.Dense', 'Dense', ([], {'units': '(40)', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""'}), "(units=40, activation='relu', kernel_initializer='uniform')\n", (2423, 2482), False, 'from keras.layers import Dense\n'), ((2669, 2733), 'keras.layers.Dense', 'Dense', ([], {'units': '(30)', 'activation': '"""relu"""', 'kernel_initializer': '"""uniform"""'}), "(units=30, activation='relu', kernel_initializer='uniform')\n", (2674, 2733), False, 'from keras.layers import Dense\n'), ((2891, 2962), 'keras.layers.Dense', 'Dense', ([], {'output_dim': '(1)', 'kernel_initializer': '"""uniform"""', 'activation': '"""sigmoid"""'}), "(output_dim=1, kernel_initializer='uniform', activation='sigmoid')\n", (2896, 2962), False, 'from keras.layers import Dense\n')] |
import numpy as np
import pandas as pd
import cvxopt as opt
from cvxopt import solvers #, blas
from matplotlib import pyplot as plt
plt.style.use('seaborn')
np.random.seed(9062020)
# Cargar y limpiar datos
df = pd.read_csv("stocks.csv", sep=",", engine="python")
#���Field 1 la columna tiene caracteres extranios
df.columns = ['Field1' if i == 0 else x for i, x in enumerate(df.columns)]
df.set_index(df.columns[0], inplace=True)
# Seleccionamos 5 activos al azar
activos = np.random.permutation(df.columns)[:5]
plt.figure(figsize=(8,6))
plt.title('Historico de Algunos Activos', color='gray')
for activo in activos:
plt.plot(df[activo].to_numpy(), label=activo)
plt.ylabel('Precio del Activo', color='gray')
plt.xlabel('Observaciones', color='gray')
plt.legend(loc='upper left')
# Por que usar rendimiento logaritmicos
# https://quantdare.com/por-que-usar-rendimientos-logaritmicos/
df = df / df.shift(1)
df.dropna(inplace=True)
log_df = np.log(df)
print(log_df)
# Grafico con Rendimientos historicos
plt.figure(figsize=(8,6))
plt.title('Rendimiento de Todos los Activos', color='gray')
plt.plot(df.to_numpy(), alpha=0.2)
plt.ylabel('Rendimiento del Activo', color='gray')
plt.xlabel('Observaciones', color='gray')
def metricas_historicas_portafolio(portafolio, dias_anual):
"""
Da un pequenio reporte sobre las observaciones de
cada activo contenido en el portafolio, para
devolver finalmente dos matrices
`portafolio`: es un dataframe de observaciones (filas)
por activos (columnas) historico
`dias_anual`: es un entero que indica cuantos dias
tiene el anio, asumiendose para ellos que las
observaciones contenidas en `portafolio` son diarias
`return`: devuelve las matrices o arrays de rendimientos
esperados y la de covarianza del portafolio historico,
asi como el numero de activos
"""
# Metricas Historicas
activos_en_portafolio = portafolio.shape[1]
rendimientos_anual = dias_anual * portafolio.mean()
sigma_de_rendimientos_anual = dias_anual * portafolio.std()
varianza_de_rendimientos_anual = dias_anual * portafolio.std() ** 2 # diagonal de la covarianza de rendimientos
covarianza_de_rendimientos_anual = dias_anual * portafolio.cov()
# Reporte de Historicos
print(f"\nNumero de Activos:\n{activos_en_portafolio}")
print(f"\nRendimiento:\n{rendimientos_anual}")
print(f"\nDesviacion Estandar:\n{sigma_de_rendimientos_anual}")
print(f"\nVarianza:\n{varianza_de_rendimientos_anual}")
print(f"\nCovarianza:\n{covarianza_de_rendimientos_anual}")
# Matrices con las Estadisticas Hitoricas de Interes
# la variable `C`es mayuscula
p = np.asmatrix(rendimientos_anual.to_numpy())
C = np.asmatrix(covarianza_de_rendimientos_anual.to_numpy())
return p, C, activos_en_portafolio
p, C, numero_de_activos = metricas_historicas_portafolio(log_df, 252)
#print(p ,p.shape[1])
def resultados_portafolio(p,w,C):
"""
Dados unos pesos de colocacion para un
portafolio y teniendose los rendimientos y
covarianzas historicas, se obtiene el
rendimiento y volatilidad del portafolio
`p`: matriz con rendimientos historicos del
portafolio
`w`: peso que se empleara para colocar los
los fondos en los activos correspondientes
del portafolio
`C`: matriz con la covarianza historico del
portafolio
`return`: el redimiento y riesgo (volatilida)
del portafolio
"""
mu = w * p.T # Rendimiento Esperado
sigma = np.sqrt(w * C * w.T) # Volatilidad
return mu, sigma
def simular_pesos_portafolio(numero_de_activos):
"""
Generar pesos aleatorios para cada
activo en el portafolio
`numero_de_activos`: es entero
`return`:El peso de cada uno de los
activos en el portafolio cuya suma es 1
como matriz
"""
pesos = np.random.random(numero_de_activos)
pesos *= sum([np.random.binomial(1, 0.08, numero_de_activos) for _ in range(2)])
pesos = np.asmatrix(pesos / sum(pesos) )
return pesos
def simular_portafolio(p, C, numero_de_activos, libre_riesgo=0, limite_volatilidad=1):
"""
Genera el redimiento y la desviacion estandar
de una posible combinacion en la inversion
de cada activo para un portafolio dado
`p`: matriz con rendimientos historicos del
portafolio
`C`: matriz con la covarianza historico del
portafolio
`numero_de_activos`: entero que indica la cantidad
de activos en el portafolio
`libre de riesgo`: flotante que va de 0 a 1
`limite_volatilidad`: es para mantener la
volatilidad hasta un tope durante la
simulacion
`return`: el peso de inversion, el rendimiento
esperado (mu) y la desviacion estandar (sigma)
tambien conocida como volatilidad para el
portafolio generado así como el Sharpe Ratio
todas las salidas son arrays
"""
# Generar una posible combinacion del portafolio
p = p
w = simular_pesos_portafolio(numero_de_activos)
C = C
mu, sigma = resultados_portafolio(p,w,C)
sharpe_ratio = (mu - libre_riesgo) / sigma
# Esta recursividad reduce los valores atípicos
# para mantener el portafolio de interés
# tambien se puede desarrollar con `while` pero
# se requiere más codigo
if sigma > limite_volatilidad:
return simular_portafolio(p, C, numero_de_activos, libre_riesgo, limite_volatilidad)
return w, mu, sigma, sharpe_ratio
peso_activos, rendimiento, volatilidad, sharpe_ratio = simular_portafolio(p, C, numero_de_activos)
print("-"*40)
print('---- Portafolio Simulado ----')
print("-"*40)
print(f"\nSharpe Ratio: {sharpe_ratio}")
print(f"""
Pesos Del Portafolio Simulado:\n{peso_activos}
\nLos Pesos Suman: {peso_activos.sum():.4f}
""")
print(f"\nRedimiento del Portafolio Simulado:{rendimiento}")
print(f"\nVolatilidad del Portafolio Simulado:{volatilidad}\n")
def simulacion_de_portafolios(numero_de_portafolios, p, C,
numero_de_activos, libre_riesgo=0, limite_volatilidad=1):
"""
Genera los rendimientos y volatidades para un conjunto
de portafolios
`numero_de_portafolios`: entero que indica la
cantidad de replicas o simulaciones a efectuarse
`p`: matriz con rendimientos historicos del
portafolio
`C`: matriz con la covarianza historico del
portafolio
`numero_de_activos`: entero que indica la cantidad
de activos en el portafolio
`libre de riesgo`: flotante que va de 0 a 1
`limite_volatilidad`: es para mantener la
volatilidad hasta un tope durante la
simulacion
`return`: los pesos, rendimientos esperados así
como las volatidades `desviacion estandar`
para cada uno de los portafolios simulados
"""
pesos, rendimientos, volatilidades, sharper_ratios = zip(*[
simular_portafolio(p, C, numero_de_activos, libre_riesgo, limite_volatilidad)
for _ in range(numero_de_portafolios)
])
pesos, rendimientos, volatilidades, sharper_ratios = \
np.array(pesos), np.array(rendimientos), np.array(volatilidades), np.array(sharper_ratios)
return pesos, rendimientos, volatilidades, sharper_ratios
pesos, rendimientos, volatilidades, sharper_ratios = simulacion_de_portafolios(
numero_de_portafolios=1000,
p=p,
C=C,
numero_de_activos=numero_de_activos,
libre_riesgo=0
)
# Metricas Sharper
def rsharpe_maximo(sharper_ratios, rendimientos, volatilidades):
maximo_sharpe_ratio = sharper_ratios.max()
indice_maximo_sharpe_ratio = sharper_ratios.argmax()
pesos_optimos_simulados = pesos[indice_maximo_sharpe_ratio, :]
maximo_sharpe_ratio_rendimiento = rendimientos[indice_maximo_sharpe_ratio]
maximo_sharpe_ratio_volatilidad = volatilidades[indice_maximo_sharpe_ratio]
print("-" * 50)
print('---- Estadisticas Sharper Ratio ----')
print("-" * 50)
print(f"\nMaximo Sharpe Ratio: {maximo_sharpe_ratio}")
print(f"""Pesos Del Portafolio:\n{pesos_optimos_simulados}
\nLos Pesos Suman: {pesos_optimos_simulados.sum():.4f}
""" )
print(f"\nRedimiento del Maximo Sharpe Ratio:{maximo_sharpe_ratio_rendimiento}")
print(f"\nVolatilidad del Maximo Sharpe Ratio:{maximo_sharpe_ratio_volatilidad}\n")
return maximo_sharpe_ratio, maximo_sharpe_ratio_volatilidad, maximo_sharpe_ratio_rendimiento
# Estadisticas de Montecarlo
maximo_sharpe_ratio, maximo_sharpe_ratio_volatilidad, maximo_sharpe_ratio_rendimiento = rsharpe_maximo(sharper_ratios, rendimientos, volatilidades)
plt.figure(figsize=(8,6))
plt.title('Rendimientos y Volatilidades\n Portafolios Simulados', color='gray')
plt.scatter(volatilidades, rendimientos, c=sharper_ratios, cmap='cool')
plt.colorbar(label=r"$Sharpe\ Ratio$")
# Optimo Sharpe Ratio Simulado
plt.scatter(
maximo_sharpe_ratio_volatilidad,
maximo_sharpe_ratio_rendimiento,
c='orange', s=60, edgecolors='gray', label=f'Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}'
)
plt.ylabel(r'$Rendimiento$', color='gray')
plt.xlabel(r'$Volatilidad\ \sigma$', color='gray')
plt.legend(loc="upper left")
# Resolviendo el modelo cuadratico
# http://cvxopt.org/userguide/coneprog.html
# http://cvxopt.org/examples/book/portfolio.html
# http://cvxopt.org/examples/tutorial/qp.html
def portafolio_optimo(p, C, numero_de_activos):
"""
Genera los puntos para la Frontera Eficiente
`p`: matriz con rendimientos historicos del
portafolio
`C`: matriz con la covarianza historico del
portafolio
`numero_de_activos`: entero que indica la cantidad
de activos en el portafolio
`retorna`: arrays, de los pesos de cada portafolio
correspondientes a cada punto de la frontera
eficiente, siendo dichos puntos el par rendimiento
y volatilidad
"""
# Se establece saltos discretos para hallar la
# la frontera eficiente estos seran los
# `targets` u objetivos que se fijan para optimizar
N = 100
n = numero_de_activos
#mus = np.power(10, 5 * np.arange(N) / N - 1) # tiene que ser lista tolist()
mus = [ 10**(5.0*t/N-1.0) for t in range(N) ]
# convertir el p y C a matrices del tipo cvxopt
# en el caso de p se trabaja con su transpuesta
pbar = opt.matrix(p.T)
S = opt.matrix(C)
# Crear las matrices de restricciones
# Gx <= h
G = -opt.matrix(np.eye(n)) # matriz identidad negativa n x n
h = opt.matrix(0.0, (n ,1))
# Ax = b
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0) # La suma de los pesos es 1
# Calcular los pesos de la frontera eficiente
# Empleando Programacion Cuadratica
# Pero primero silenciamos el solver (es opcional)
solvers.options['show_progress'] = False
portafolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x'] for mu in mus]
# Calcular los rendimientos y volatilidades o riesgos
# para la frontera eficiente
# Estas implementaciones funcionan... hay que importar "blas"
# pero el codigo que esta fuera de este codigo
# requiere que se redimensionen los rendimientos, volatilidades
# para que funcionen
#rendimientos = [blas.dot(pbar, x) for x in portafolios]
#volatilidades = [np.sqrt(blas.dot(x, S*x)) for x in portafolios]
rendimientos, volatilidades = zip(*[
resultados_portafolio(p, np.array(w).T, C) for w in portafolios
])
# Calcular el portafolio optimo
#pesos = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x'] # # No funciona para lo que se quiere hacer mas adelante
pesos = [np.asarray(x) for x in portafolios]
pesos = np.asarray(pesos)
rendimientos = np.asarray(rendimientos)
volatilidades = np.asarray(volatilidades)
return pesos, rendimientos, volatilidades
w_optimos, mu_optimos, sigma_optimos = portafolio_optimo(p, C, numero_de_activos)
########################################
# Frontera Eficiente y Simulacion
########################################
plt.figure(figsize=(8,6))
plt.title('Frontera eficiente', color='gray')
# Frontera eficiente
plt.plot(sigma_optimos.reshape((1,-1))[0], mu_optimos.reshape((1,-1))[0], 'y-o', color='gray', alpha=0.4, label='Frontera Eficiente')
# Simulados
plt.scatter(volatilidades, rendimientos, c=sharper_ratios, cmap='cool')
plt.colorbar(label=r"$Sharpe\ Ratio$")
# Optimo Sharpe Ratio Simulado
plt.scatter(
maximo_sharpe_ratio_volatilidad,
maximo_sharpe_ratio_rendimiento,
c='orange', s=60, edgecolors='gray', label=f'Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}'
)
plt.ylabel(r'$Rendimiento$', color='gray')
plt.xlabel(r'$Volatilidad\ \sigma$', color='gray')
plt.legend(loc="upper left")
########################################
# Ratio de Sharper
########################################
print("\nVerificar cuales suman 1: ")
print(np.array([x.sum() for x in w_optimos]))
filtrar_pesos_positivos = np.array([(x>=0).all() for x in w_optimos])
print("\nVerificar que todos los pesos sean >= 0: ")
print(filtrar_pesos_positivos)
print(w_optimos.shape, mu_optimos.shape, sigma_optimos.shape)
w_optimos = w_optimos[filtrar_pesos_positivos]
mu_optimos = mu_optimos [filtrar_pesos_positivos]
sigma_optimos = sigma_optimos[filtrar_pesos_positivos]
print("\nVerificar que todos los pesos sean >= 0: ")
print(np.array([(x>=0).all() for x in w_optimos]))
print(w_optimos.shape, mu_optimos.shape, sigma_optimos.shape)
libre_riesgo = 0
rsharpe_optimos = (mu_optimos - libre_riesgo) / sigma_optimos
rsharpe_optimos = rsharpe_optimos.reshape((1,-1)).reshape((1,-1)) # quitarle dimensiones
maximo_rsharpe, maximo_rsharpe_volatilidad, maximo_rsharpe_rendimiento = rsharpe_maximo(rsharpe_optimos, mu_optimos, sigma_optimos)
plt.figure(figsize=(8,6))
plt.title('Sharpers de la Frontera Eficiente\nCon Pesos no Negativos', color='gray')
# Optimo Sharpe Ratio Frontera Eficiente
plt.scatter(
rsharpe_optimos[0].argmax(),
maximo_rsharpe,
c='#90ff1e', s=100, edgecolors='gray', label=f'Sharpe Ratio = {maximo_rsharpe:.4f}'
)
plt.plot(rsharpe_optimos[0], 'y-o', color='dodgerblue', alpha=0.4)
plt.ylabel(r'$Sharpe\ Ratio$', color='gray')
plt.xlabel(r'$Observaciones$', color='gray')
plt.legend(loc="upper left")
########################################
# Todo Junto:
# Ratio de Sharper Optimo
# Frontera Eficiente y Simulacion
########################################
w_optimos, mu_optimos, sigma_optimos = portafolio_optimo(p, C, numero_de_activos)
libre_riesgo = 0
rsharpe_optimos = (mu_optimos - libre_riesgo) / sigma_optimos
rsharpe_optimos = rsharpe_optimos.reshape((1,-1)).reshape((1,-1)) # quitarle dimensiones
plt.figure(figsize=(8,6))
plt.title('Frontera eficiente', color='gray')
# Frontera eficiente
plt.plot(sigma_optimos.reshape((1,-1))[0], mu_optimos.reshape((1,-1))[0], 'y-o', color='gray', alpha=0.4, label='Frontera Eficiente')
# Simulados
plt.scatter(volatilidades, rendimientos, c=sharper_ratios, cmap='cool')
plt.colorbar(label=r"$Sharpe\ Ratio$")
# Optimo Sharpe Ratio Frontera Eficiente
idx_rshape_optimo = np.where(rsharpe_optimos[0] == maximo_rsharpe)
plt.scatter(
sigma_optimos.reshape((1, -1))[0][idx_rshape_optimo], # eje volatilidad
mu_optimos.reshape((1,-1))[0][idx_rshape_optimo], # eje rendimientos
c='#90ff1e', s=100, edgecolors='gray', label=f'Sharpe Ratio Optimo = {maximo_rsharpe:.4f}'
)
# Optimo Sharpe Ratio Simulado
plt.scatter(
maximo_sharpe_ratio_volatilidad,
maximo_sharpe_ratio_rendimiento,
c='orange', s=100, edgecolors='gray', label=f'Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}'
)
plt.ylabel(r'$Rendimiento$', color='gray')
plt.xlabel(r'$Volatilidad\ \sigma$', color='gray')
plt.legend(loc="upper left")
########################################
# Se muestran todos los lienzos
########################################
plt.show()
| [
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.log",
"numpy.array",
"numpy.random.binomial",
"numpy.where",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"matplotlib.pyplot.style.use",
"numpy.random.seed",
"matplotlib.pyp... | [((134, 158), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (147, 158), True, 'from matplotlib import pyplot as plt\n'), ((159, 182), 'numpy.random.seed', 'np.random.seed', (['(9062020)'], {}), '(9062020)\n', (173, 182), True, 'import numpy as np\n'), ((215, 266), 'pandas.read_csv', 'pd.read_csv', (['"""stocks.csv"""'], {'sep': '""","""', 'engine': '"""python"""'}), "('stocks.csv', sep=',', engine='python')\n", (226, 266), True, 'import pandas as pd\n'), ((517, 543), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (527, 543), True, 'from matplotlib import pyplot as plt\n'), ((543, 598), 'matplotlib.pyplot.title', 'plt.title', (['"""Historico de Algunos Activos"""'], {'color': '"""gray"""'}), "('Historico de Algunos Activos', color='gray')\n", (552, 598), True, 'from matplotlib import pyplot as plt\n'), ((672, 717), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precio del Activo"""'], {'color': '"""gray"""'}), "('Precio del Activo', color='gray')\n", (682, 717), True, 'from matplotlib import pyplot as plt\n'), ((718, 759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observaciones"""'], {'color': '"""gray"""'}), "('Observaciones', color='gray')\n", (728, 759), True, 'from matplotlib import pyplot as plt\n'), ((760, 788), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (770, 788), True, 'from matplotlib import pyplot as plt\n'), ((950, 960), 'numpy.log', 'np.log', (['df'], {}), '(df)\n', (956, 960), True, 'import numpy as np\n'), ((1015, 1041), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (1025, 1041), True, 'from matplotlib import pyplot as plt\n'), ((1041, 1100), 'matplotlib.pyplot.title', 'plt.title', (['"""Rendimiento de Todos los Activos"""'], {'color': '"""gray"""'}), "('Rendimiento de Todos los Activos', color='gray')\n", (1050, 1100), True, 'from matplotlib import pyplot as plt\n'), ((1136, 1186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rendimiento del Activo"""'], {'color': '"""gray"""'}), "('Rendimiento del Activo', color='gray')\n", (1146, 1186), True, 'from matplotlib import pyplot as plt\n'), ((1187, 1228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observaciones"""'], {'color': '"""gray"""'}), "('Observaciones', color='gray')\n", (1197, 1228), True, 'from matplotlib import pyplot as plt\n'), ((8828, 8854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (8838, 8854), True, 'from matplotlib import pyplot as plt\n'), ((8854, 8941), 'matplotlib.pyplot.title', 'plt.title', (['"""Rendimientos y Volatilidades\n Portafolios Simulados"""'], {'color': '"""gray"""'}), '("""Rendimientos y Volatilidades\n Portafolios Simulados""", color=\n \'gray\')\n', (8863, 8941), True, 'from matplotlib import pyplot as plt\n'), ((8934, 9005), 'matplotlib.pyplot.scatter', 'plt.scatter', (['volatilidades', 'rendimientos'], {'c': 'sharper_ratios', 'cmap': '"""cool"""'}), "(volatilidades, rendimientos, c=sharper_ratios, cmap='cool')\n", (8945, 9005), True, 'from matplotlib import pyplot as plt\n'), ((9006, 9044), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""$Sharpe\\\\ Ratio$"""'}), "(label='$Sharpe\\\\ Ratio$')\n", (9018, 9044), True, 'from matplotlib import pyplot as plt\n'), ((9077, 9266), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maximo_sharpe_ratio_volatilidad', 'maximo_sharpe_ratio_rendimiento'], {'c': '"""orange"""', 's': '(60)', 'edgecolors': '"""gray"""', 'label': 'f"""Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}"""'}), "(maximo_sharpe_ratio_volatilidad,\n maximo_sharpe_ratio_rendimiento, c='orange', s=60, edgecolors='gray',\n label=f'Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}')\n", (9088, 9266), True, 'from matplotlib import pyplot as plt\n'), ((9274, 9315), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Rendimiento$"""'], {'color': '"""gray"""'}), "('$Rendimiento$', color='gray')\n", (9284, 9315), True, 'from matplotlib import pyplot as plt\n'), ((9317, 9368), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Volatilidad\\\\ \\\\sigma$"""'], {'color': '"""gray"""'}), "('$Volatilidad\\\\ \\\\sigma$', color='gray')\n", (9327, 9368), True, 'from matplotlib import pyplot as plt\n'), ((9368, 9396), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (9378, 9396), True, 'from matplotlib import pyplot as plt\n'), ((12275, 12301), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (12285, 12301), True, 'from matplotlib import pyplot as plt\n'), ((12301, 12346), 'matplotlib.pyplot.title', 'plt.title', (['"""Frontera eficiente"""'], {'color': '"""gray"""'}), "('Frontera eficiente', color='gray')\n", (12310, 12346), True, 'from matplotlib import pyplot as plt\n'), ((12516, 12587), 'matplotlib.pyplot.scatter', 'plt.scatter', (['volatilidades', 'rendimientos'], {'c': 'sharper_ratios', 'cmap': '"""cool"""'}), "(volatilidades, rendimientos, c=sharper_ratios, cmap='cool')\n", (12527, 12587), True, 'from matplotlib import pyplot as plt\n'), ((12588, 12626), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""$Sharpe\\\\ Ratio$"""'}), "(label='$Sharpe\\\\ Ratio$')\n", (12600, 12626), True, 'from matplotlib import pyplot as plt\n'), ((12659, 12848), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maximo_sharpe_ratio_volatilidad', 'maximo_sharpe_ratio_rendimiento'], {'c': '"""orange"""', 's': '(60)', 'edgecolors': '"""gray"""', 'label': 'f"""Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}"""'}), "(maximo_sharpe_ratio_volatilidad,\n maximo_sharpe_ratio_rendimiento, c='orange', s=60, edgecolors='gray',\n label=f'Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}')\n", (12670, 12848), True, 'from matplotlib import pyplot as plt\n'), ((12856, 12897), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Rendimiento$"""'], {'color': '"""gray"""'}), "('$Rendimiento$', color='gray')\n", (12866, 12897), True, 'from matplotlib import pyplot as plt\n'), ((12899, 12950), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Volatilidad\\\\ \\\\sigma$"""'], {'color': '"""gray"""'}), "('$Volatilidad\\\\ \\\\sigma$', color='gray')\n", (12909, 12950), True, 'from matplotlib import pyplot as plt\n'), ((12950, 12978), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (12960, 12978), True, 'from matplotlib import pyplot as plt\n'), ((14012, 14038), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (14022, 14038), True, 'from matplotlib import pyplot as plt\n'), ((14038, 14129), 'matplotlib.pyplot.title', 'plt.title', (['"""Sharpers de la Frontera Eficiente\nCon Pesos no Negativos"""'], {'color': '"""gray"""'}), '("""Sharpers de la Frontera Eficiente\nCon Pesos no Negativos""",\n color=\'gray\')\n', (14047, 14129), True, 'from matplotlib import pyplot as plt\n'), ((14322, 14388), 'matplotlib.pyplot.plot', 'plt.plot', (['rsharpe_optimos[0]', '"""y-o"""'], {'color': '"""dodgerblue"""', 'alpha': '(0.4)'}), "(rsharpe_optimos[0], 'y-o', color='dodgerblue', alpha=0.4)\n", (14330, 14388), True, 'from matplotlib import pyplot as plt\n'), ((14390, 14434), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Sharpe\\\\ Ratio$"""'], {'color': '"""gray"""'}), "('$Sharpe\\\\ Ratio$', color='gray')\n", (14400, 14434), True, 'from matplotlib import pyplot as plt\n'), ((14435, 14478), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Observaciones$"""'], {'color': '"""gray"""'}), "('$Observaciones$', color='gray')\n", (14445, 14478), True, 'from matplotlib import pyplot as plt\n'), ((14480, 14508), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (14490, 14508), True, 'from matplotlib import pyplot as plt\n'), ((14922, 14948), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (14932, 14948), True, 'from matplotlib import pyplot as plt\n'), ((14948, 14993), 'matplotlib.pyplot.title', 'plt.title', (['"""Frontera eficiente"""'], {'color': '"""gray"""'}), "('Frontera eficiente', color='gray')\n", (14957, 14993), True, 'from matplotlib import pyplot as plt\n'), ((15163, 15234), 'matplotlib.pyplot.scatter', 'plt.scatter', (['volatilidades', 'rendimientos'], {'c': 'sharper_ratios', 'cmap': '"""cool"""'}), "(volatilidades, rendimientos, c=sharper_ratios, cmap='cool')\n", (15174, 15234), True, 'from matplotlib import pyplot as plt\n'), ((15235, 15273), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""$Sharpe\\\\ Ratio$"""'}), "(label='$Sharpe\\\\ Ratio$')\n", (15247, 15273), True, 'from matplotlib import pyplot as plt\n'), ((15336, 15382), 'numpy.where', 'np.where', (['(rsharpe_optimos[0] == maximo_rsharpe)'], {}), '(rsharpe_optimos[0] == maximo_rsharpe)\n', (15344, 15382), True, 'import numpy as np\n'), ((15682, 15872), 'matplotlib.pyplot.scatter', 'plt.scatter', (['maximo_sharpe_ratio_volatilidad', 'maximo_sharpe_ratio_rendimiento'], {'c': '"""orange"""', 's': '(100)', 'edgecolors': '"""gray"""', 'label': 'f"""Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}"""'}), "(maximo_sharpe_ratio_volatilidad,\n maximo_sharpe_ratio_rendimiento, c='orange', s=100, edgecolors='gray',\n label=f'Sharpe Ratio Optimo Simulado = {maximo_sharpe_ratio:.4f}')\n", (15693, 15872), True, 'from matplotlib import pyplot as plt\n'), ((15880, 15921), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Rendimiento$"""'], {'color': '"""gray"""'}), "('$Rendimiento$', color='gray')\n", (15890, 15921), True, 'from matplotlib import pyplot as plt\n'), ((15923, 15974), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$Volatilidad\\\\ \\\\sigma$"""'], {'color': '"""gray"""'}), "('$Volatilidad\\\\ \\\\sigma$', color='gray')\n", (15933, 15974), True, 'from matplotlib import pyplot as plt\n'), ((15974, 16002), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (15984, 16002), True, 'from matplotlib import pyplot as plt\n'), ((16120, 16130), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16128, 16130), True, 'from matplotlib import pyplot as plt\n'), ((479, 512), 'numpy.random.permutation', 'np.random.permutation', (['df.columns'], {}), '(df.columns)\n', (500, 512), True, 'import numpy as np\n'), ((3634, 3654), 'numpy.sqrt', 'np.sqrt', (['(w * C * w.T)'], {}), '(w * C * w.T)\n', (3641, 3654), True, 'import numpy as np\n'), ((4017, 4052), 'numpy.random.random', 'np.random.random', (['numero_de_activos'], {}), '(numero_de_activos)\n', (4033, 4052), True, 'import numpy as np\n'), ((10570, 10585), 'cvxopt.matrix', 'opt.matrix', (['p.T'], {}), '(p.T)\n', (10580, 10585), True, 'import cvxopt as opt\n'), ((10594, 10607), 'cvxopt.matrix', 'opt.matrix', (['C'], {}), '(C)\n', (10604, 10607), True, 'import cvxopt as opt\n'), ((10741, 10764), 'cvxopt.matrix', 'opt.matrix', (['(0.0)', '(n, 1)'], {}), '(0.0, (n, 1))\n', (10751, 10764), True, 'import cvxopt as opt\n'), ((10787, 10810), 'cvxopt.matrix', 'opt.matrix', (['(1.0)', '(1, n)'], {}), '(1.0, (1, n))\n', (10797, 10810), True, 'import cvxopt as opt\n'), ((10819, 10834), 'cvxopt.matrix', 'opt.matrix', (['(1.0)'], {}), '(1.0)\n', (10829, 10834), True, 'import cvxopt as opt\n'), ((11917, 11934), 'numpy.asarray', 'np.asarray', (['pesos'], {}), '(pesos)\n', (11927, 11934), True, 'import numpy as np\n'), ((11954, 11978), 'numpy.asarray', 'np.asarray', (['rendimientos'], {}), '(rendimientos)\n', (11964, 11978), True, 'import numpy as np\n'), ((11999, 12024), 'numpy.asarray', 'np.asarray', (['volatilidades'], {}), '(volatilidades)\n', (12009, 12024), True, 'import numpy as np\n'), ((7313, 7328), 'numpy.array', 'np.array', (['pesos'], {}), '(pesos)\n', (7321, 7328), True, 'import numpy as np\n'), ((7330, 7352), 'numpy.array', 'np.array', (['rendimientos'], {}), '(rendimientos)\n', (7338, 7352), True, 'import numpy as np\n'), ((7354, 7377), 'numpy.array', 'np.array', (['volatilidades'], {}), '(volatilidades)\n', (7362, 7377), True, 'import numpy as np\n'), ((7379, 7403), 'numpy.array', 'np.array', (['sharper_ratios'], {}), '(sharper_ratios)\n', (7387, 7403), True, 'import numpy as np\n'), ((11868, 11881), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (11878, 11881), True, 'import numpy as np\n'), ((4071, 4117), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.08)', 'numero_de_activos'], {}), '(1, 0.08, numero_de_activos)\n', (4089, 4117), True, 'import numpy as np\n'), ((10685, 10694), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (10691, 10694), True, 'import numpy as np\n'), ((11082, 11119), 'cvxopt.solvers.qp', 'solvers.qp', (['(mu * S)', '(-pbar)', 'G', 'h', 'A', 'b'], {}), '(mu * S, -pbar, G, h, A, b)\n', (11092, 11119), False, 'from cvxopt import solvers\n'), ((11646, 11657), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (11654, 11657), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["Parameter", "UnitVector", "Model"]
import numpy as np
import tensorflow as tf
def get_param_for_value(value, min_value, max_value):
if ((min_value is not None and np.any(value <= min_value)) or
(max_value is not None and np.any(value >= max_value))):
raise ValueError("value must be in the range (min_value, max_value)")
if min_value is None and max_value is None:
return value
result = 0.0
if min_value is not None:
result += np.log(value - min_value)
if max_value is not None:
result -= np.log(max_value - value)
return result
def get_value_for_param(param, min_value, max_value, _np=np):
if min_value is None and max_value is None:
return param
if min_value is None:
return max_value - _np.exp(-param)
if max_value is None:
return min_value + _np.exp(param)
return min_value + (max_value - min_value) / (1.0 + _np.exp(-param))
class Parameter(object):
def __init__(self, value, bounds=None, name=None, dtype=None,
frozen=False):
self.changed = True
self.frozen = frozen
self.name = name
self.bounds = bounds
with tf.name_scope(name, "Parameter"):
if bounds is None:
self._parameters = [tf.Variable(value, name="parameter",
dtype=dtype)]
self._value = self._parameters[0]
self._log_jac = tf.constant(0.0, dtype=self._value.dtype)
else:
self._parameters = [tf.Variable(
get_param_for_value(value, *bounds), name="parameter",
dtype=dtype)]
self._value = get_value_for_param(self._parameters[0], *bounds,
_np=tf)
self._log_jac = tf.constant(0.0, dtype=self._value.dtype)
if bounds[0] is not None:
self._log_jac += tf.reduce_sum(
tf.log(self._value - bounds[0]))
if bounds[1] is not None:
self._log_jac += tf.reduce_sum(
tf.log(bounds[1] - self._value))
def __getattr__(self, key):
try:
return getattr(self.value, key)
except AttributeError:
raise AttributeError(key)
@property
def value(self):
return self._value
def get_parameters(self, include_frozen=False):
if self.frozen and not include_frozen:
return []
return self._parameters
@property
def log_jacobian(self):
return self._log_jac
def freeze(self):
self.changed = True
self.frozen = True
def thaw(self):
self.changed = True
self.frozen = False
class UnitVector(Parameter):
def __init__(self, x, name=None, dtype=None, frozen=False):
self.changed = True
self.frozen = frozen
self.name = name
with tf.name_scope(name, "UnitVector"):
self.x = tf.Variable(x, dtype=dtype, name="x")
norm = tf.reduce_sum(tf.square(self.x), axis=-1)
self._parameters = [self.x]
self._value = self.x / tf.expand_dims(tf.sqrt(norm), -1)
self._log_jac = -0.5 * tf.reduce_sum(norm)
class Model(object):
def __init__(self, target, parameters, feed_dict=None, session=None):
self._changed = True
self._parameters = parameters
self._feed_dict = dict() if feed_dict is None else feed_dict
self._session = session
self.target = target
for p in self._parameters:
try:
self.target += p.log_jacobian
except AttributeError:
pass
@property
def changed(self):
return self._changed or any(p.changed for p in self.get_parameters())
def get_parameters(self, include_frozen=False):
params = []
for par in self._parameters:
try:
params += par.get_parameters(include_frozen=include_frozen)
except AttributeError:
params.append(par)
return params
@property
def session(self):
if self._session is None:
self._session = tf.get_default_session()
return self._session
def value(self, vector):
feed_dict = self.vector_to_feed_dict(vector)
return self.session.run(self.target, feed_dict=feed_dict)
def gradient(self, vector):
feed_dict = self.vector_to_feed_dict(vector)
return np.concatenate([
np.reshape(g, s) for s, g in zip(
self.sizes,
self.session.run(self.grad_target, feed_dict=feed_dict))
])
def update(self):
if not self.changed:
return
self.parameters = self.get_parameters()
self.grad_target = tf.gradients(self.target, self.parameters)
values = self.session.run(self.parameters, feed_dict=self._feed_dict)
self.shapes = [np.shape(v) for v in values]
self.sizes = [np.size(v) for v in values]
for p in self.parameters:
p.changed = False
self._changed = False
def vector_to_feed_dict(self, vector, specs=None):
self.update()
i = 0
fd = dict(self._feed_dict)
for var, shape, size in zip(self.parameters, self.shapes, self.sizes):
fd[var] = np.reshape(vector[i:i+size], shape)
i += size
return fd
def feed_dict_to_vector(self, feed_dict):
self.update()
return np.concatenate([
np.reshape(feed_dict[v], s)
for v, s in zip(self.parameters, self.sizes)])
def current_vector(self):
self.update()
values = self.session.run(self.parameters, feed_dict=self._feed_dict)
return np.concatenate([
np.reshape(v, s)
for v, s in zip(values, self.sizes)])
def get_values_for_chain(self, chain, var_list=None, names=None):
self.update()
if var_list is None:
if names is None:
names = [p.name for p in self._parameters]
var_list = [p.value if hasattr(p, "value") else p
for p in self._parameters]
elif names is None:
names = [v.name for v in var_list]
# Work out the dtype for the output chain
dtype = [(n, float, np.shape(v))
for n, v in zip(names,
self.session.run(var_list,
feed_dict=self._feed_dict))]
# Allocate the output chain
N = len(chain)
out_chain = np.empty(N, dtype=dtype)
# Loop over the chain and get the value at each sample
for i, s in enumerate(chain):
fd = self.vector_to_feed_dict(s)
for n, v in zip(names, self.session.run(var_list, feed_dict=fd)):
out_chain[n][i] = v
return out_chain
| [
"numpy.reshape",
"tensorflow.Variable",
"tensorflow.reduce_sum",
"numpy.size",
"numpy.log",
"tensorflow.get_default_session",
"numpy.any",
"tensorflow.gradients",
"tensorflow.sqrt",
"numpy.empty",
"tensorflow.name_scope",
"tensorflow.constant",
"tensorflow.square",
"numpy.shape",
"tensor... | [((568, 593), 'numpy.log', 'np.log', (['(value - min_value)'], {}), '(value - min_value)\n', (574, 593), True, 'import numpy as np\n'), ((642, 667), 'numpy.log', 'np.log', (['(max_value - value)'], {}), '(max_value - value)\n', (648, 667), True, 'import numpy as np\n'), ((4993, 5035), 'tensorflow.gradients', 'tf.gradients', (['self.target', 'self.parameters'], {}), '(self.target, self.parameters)\n', (5005, 5035), True, 'import tensorflow as tf\n'), ((6807, 6831), 'numpy.empty', 'np.empty', (['N'], {'dtype': 'dtype'}), '(N, dtype=dtype)\n', (6815, 6831), True, 'import numpy as np\n'), ((256, 282), 'numpy.any', 'np.any', (['(value <= min_value)'], {}), '(value <= min_value)\n', (262, 282), True, 'import numpy as np\n'), ((326, 352), 'numpy.any', 'np.any', (['(value >= max_value)'], {}), '(value >= max_value)\n', (332, 352), True, 'import numpy as np\n'), ((1279, 1311), 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""Parameter"""'], {}), "(name, 'Parameter')\n", (1292, 1311), True, 'import tensorflow as tf\n'), ((3087, 3120), 'tensorflow.name_scope', 'tf.name_scope', (['name', '"""UnitVector"""'], {}), "(name, 'UnitVector')\n", (3100, 3120), True, 'import tensorflow as tf\n'), ((3143, 3180), 'tensorflow.Variable', 'tf.Variable', (['x'], {'dtype': 'dtype', 'name': '"""x"""'}), "(x, dtype=dtype, name='x')\n", (3154, 3180), True, 'import tensorflow as tf\n'), ((4368, 4392), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (4390, 4392), True, 'import tensorflow as tf\n'), ((5137, 5148), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (5145, 5148), True, 'import numpy as np\n'), ((5188, 5198), 'numpy.size', 'np.size', (['v'], {}), '(v)\n', (5195, 5198), True, 'import numpy as np\n'), ((5538, 5575), 'numpy.reshape', 'np.reshape', (['vector[i:i + size]', 'shape'], {}), '(vector[i:i + size], shape)\n', (5548, 5575), True, 'import numpy as np\n'), ((1561, 1602), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'self._value.dtype'}), '(0.0, dtype=self._value.dtype)\n', (1572, 1602), True, 'import tensorflow as tf\n'), ((1949, 1990), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'self._value.dtype'}), '(0.0, dtype=self._value.dtype)\n', (1960, 1990), True, 'import tensorflow as tf\n'), ((3214, 3231), 'tensorflow.square', 'tf.square', (['self.x'], {}), '(self.x)\n', (3223, 3231), True, 'import tensorflow as tf\n'), ((3387, 3406), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['norm'], {}), '(norm)\n', (3400, 3406), True, 'import tensorflow as tf\n'), ((4701, 4717), 'numpy.reshape', 'np.reshape', (['g', 's'], {}), '(g, s)\n', (4711, 4717), True, 'import numpy as np\n'), ((5727, 5754), 'numpy.reshape', 'np.reshape', (['feed_dict[v]', 's'], {}), '(feed_dict[v], s)\n', (5737, 5754), True, 'import numpy as np\n'), ((5989, 6005), 'numpy.reshape', 'np.reshape', (['v', 's'], {}), '(v, s)\n', (5999, 6005), True, 'import numpy as np\n'), ((6535, 6546), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (6543, 6546), True, 'import numpy as np\n'), ((1380, 1429), 'tensorflow.Variable', 'tf.Variable', (['value'], {'name': '"""parameter"""', 'dtype': 'dtype'}), "(value, name='parameter', dtype=dtype)\n", (1391, 1429), True, 'import tensorflow as tf\n'), ((3333, 3346), 'tensorflow.sqrt', 'tf.sqrt', (['norm'], {}), '(norm)\n', (3340, 3346), True, 'import tensorflow as tf\n'), ((2109, 2140), 'tensorflow.log', 'tf.log', (['(self._value - bounds[0])'], {}), '(self._value - bounds[0])\n', (2115, 2140), True, 'import tensorflow as tf\n'), ((2260, 2291), 'tensorflow.log', 'tf.log', (['(bounds[1] - self._value)'], {}), '(bounds[1] - self._value)\n', (2266, 2291), True, 'import tensorflow as tf\n')] |
import numpy as np
import sys, os, json, argparse, glob, itertools, pickle
from typing import List
from dataclasses import dataclass
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
import h5py as h5
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from tracer import Tracer, read_tracerpick
from fluid_particle import FluidParticle, find_fp_volume
#import matplotlib.gridspec as gridspec
#from mpl_toolkits.axes_grid1.inset_locator import inset_axes
#colors = cm.rainbow(np.linspace(0,1, np.maximum(len(plot_mol), len(plot_atm))+1))
ybound = [1.0E-12, 1.2E0]
def generate_plot(input_hdf: str, settings: dict):
plot_atm = settings["plot"]["elements"].split()
# plot_mol = settings["plot"]["molecules_small"].split()
# plot_molbig = settings["plot"]["molecules_large"].split()
plt.gca()
#colors = cm.rainbow(np.linspace(0,1, np.maximum(len(plot_mol), len(plot_atm))+1))
colors_0 = cm.rainbow(np.linspace(0,1, len(plot_mol)+1))
colors_1 = cm.rainbow(np.linspace(0,1, len(plot_molbig)+1))
with h5.File(input_hdf5, "r") as hf:
for tsp in hf["root"]:
step = tsp
temperatures
# headers = None
# with open(inputf, "r") as f:
# lines = f.readlines()
# headers = lines[0].split(",")
# headers = [h.replace("Y_","").strip() for h in headers]
n2idx = 0
for i, h in enumerate(headers):
if h == "N2":
n2idx = i
break
headers = headers[:n2idx] + headers[n2idx+1:] + [headers[n2idx]]
cols = dict([h, i] for i, h in enumerate(headers))
chemdat = np.loadtxt(inputf, delimiter=',', skiprows=1)
times = chemdat[:, cols["t"]]
temperatures = chemdat[:, cols["T"]] / 1.0E3
densities = chemdat[:, cols["density"]]
Ymol = [chemdat[:, cols[s]] for s in plot_mol]
Ymolbig = [chemdat[:, cols[s]] for s in plot_molbig]
plt.rcParams['figure.figsize'] = 18, 9
plt.rcParams.update({"font.size": 38})
plt.rcParams.update({"legend.fontsize": 'large'})
# fig, ax = plt.subplots(nrows=2, ncols=2)
fig, ax = plt.subplots(nrows=1, ncols=1)
i_short = 0
try:
while times[i_short] < 5.0:
i_short += 1
except:
i_short = -1
tshort = times[:i_short]
for i in range(len(plot_mol)):
ax.semilogy(times, Ymol[i], label=plot_mol[i], color=colors_0[i], linewidth=3.4, linestyle='solid', marker='o', markevery=200, markersize=7.0)
# ax[0,0].semilogy(times, Ymol[i], label=plot_mol[i], color=colors_0[i], linewidth=2.2, linestyle='solid', marker='o', markevery=180, markersize=4.2)
# ax[0,1].semilogy(tshort, Ymol[i][:i_short], label=plot_mol[i], color=colors_0[i], linewidth=2.2, linestyle='solid', marker='o', markevery=180, markersize=4.2)
# for i in range(len(plot_molbig)):
# ax[1,0].semilogy(times, Ymolbig[i], label=plot_molbig[i], color=colors_1[i])
# ax[1,1].semilogy(tshort, Ymolbig[i][:i_short], label=plot_molbig[i], color=colors_1[i])
#for i in range(len(plot_atm)):
# ax.semilogy(times, Yatm[i], label=plot_atm[i], linestyle='--', color=colors[i])
for i in range(1):
for j in range(1):
# a = ax[i,j]
a = ax
a.set_ylim(*ybound)
a.grid()
if j == 0:
a.set_ylabel("Y")
#a.spines['left'].set_visible(False)
ax2 = a.twinx()
if j == 1 or j == 0:
ax2.plot(tshort, temperatures[:i_short], linestyle=':', color='r', linewidth=3.3, label="T")
# a.set_yticks([])
else:
ax2.plot(times, temperatures, linestyle=':', color='r', linewidth=3.3, label="T")
#ax2.spines['right'].set_visible(False)
ax2.set_yticks([])
a.set_yticks([])
if j == 1 or j == 0:
ax2.set_ylabel(r"temperature (K) / $10^3$")
box = a.get_position()
a.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# Put a legend to the right of the current axis
# a.legend(loc='center left', bbox_to_anchor=(1.20, 0.5))
a.legend(loc='upper right', fontsize='x-small')
ax2.legend(loc='lower right', fontsize='x-small')
if i == 1 or i == 0:
a.set_xlabel("time (s)")
a.set_xlabel("time (s)")
new_tick_label = ['{:2.1f}'.format(x) for x in [2, 4, 6,8,10]]
ax2.set_yticklabels(new_tick_label)
#fig.legend(loc=7)
fig.tight_layout()
ax.set_xlim(left=0.05)
#ax[1].set_xlabel("time (s)")
fig.subplots_adjust(right=0.74)
return fig
#fig.suptitle(f"chemdat", x=0.88, y=0.98)
#plt.show()
# outputf =f"{outdir}//tracer_{tid}.png"
# print(f"saving {outputf}")
# fig.savefig(outputf, dpi=200)
#return
def plot_particle_chem(pfile: str, title: str, settings: dict, output = "screen") -> None:
fig = generate_plot(pfile, settings)
#fig.suptitle(f"{title}")
if output == "screen":
plt.show()
else:
fig.savefig(output, dpi=440)
plt.close(fig)
return
def main(input: str, config_f: str, output: str, ff: str, batch: bool = False) -> None:
settings = dict()
with open(config_f, 'r') as cf:
settings = json.load(cf)
if batch:
if not os.path.isdir(input):
raise NotADirectoryError("input is not a directory on batch plot")
for datf in os.listdir(input):
prefix = os.path.splitext(os.path.basename(datf))[0]
outputf = os.path.join(output, f"{prefix}.png")
inputf = os.path.join(input, datf)
if os.path.isfile(inputf):
xy = get_xy0(ff, prefix)
title=f"{prefix} x_0=[{xy[0]/3.0E6:.2f},{xy[1]/5.0E6:.2f}]"
print(f"plotting {datf}")
plot_particle_chem(inputf, title, settings, output=outputf)
else:
prefix = os.path.splitext(os.path.basename(input))[0]
xy = get_xy0(ff, prefix)
title=f"{prefix} x_0=[{xy[0]/3.0E6:.2f},{xy[1]/5.0E6:.2f}]"
plot_particle_chem(input, title, settings, output=output)
def get_xy0(pickf: str, tlbl: str):
with open(pickf, "rb") as pf:
while True:
try:
t = pickle.load(pf)
if t.label == tlbl:
return t.xyz[0][0], t.xyz[0][1]
except:# EOFError:
break
return -1, -1
def file_2d(fp_pick: str, output: str, res_dir: str, follow=["H2O", "H2", "CO", "NO", "NO2", "NNH", "NH3", "HNO"]):
#fps = [FluidParticle(tr) for tr in read_tracerpick(fp_pick)]
#find_fp_volume(fps)
avail_files = os.listdir(res_dir)
with open(output, "w") as ouf:
print("preparing file...")
fslb = " ".join([f"{s:10s}" for s in follow])
print(f"{'tid':8s} {'time':10s} {'x':10s} {'y':10s} {fslb}", file=ouf)
with open(fp_pick, "rb") as pf:
while True:
try:
fp = FluidParticle(pickle.load(pf))
except EOFError:
break
expected_filename = f"{fp.tlbl}.out"
print(f"searching for chemistry on {fp.tlbl}...")
if expected_filename not in avail_files:
print(f"could not find data for {fp.tlbl}")
continue
headers = None
with open(os.path.join(res_dir, expected_filename), "r") as f:
line = f.readline()
headers = line.split(",")
headers = [h.replace("Y_","").strip() for h in headers]
n2idx = 0
for i, h in enumerate(headers):
if h == "N2":
n2idx = i
break
headers = headers[:n2idx] + headers[n2idx+1:] + [headers[n2idx]]
cols = dict([h, i] for i, h in enumerate(headers))
chemdat = np.loadtxt(os.path.join(res_dir, expected_filename), delimiter=',', skiprows=1)
times = chemdat[:, cols["t"]]
Ymol = [chemdat[:, cols[s]] for s in follow]
for i in range(times.size):
pos = fp.XYZ(times[i])
fslb = " ".join([f"{y[i]:10.8E}" for y in Ymol])
print(f"{fp.tid:8d} {times[i]:10.8E} {pos[0]:10.8E} {pos[1]:10.8E} {fslb}", file=ouf)
def iter_loadtxt(filename, skiprows=0):
cid = 1
with open(filename, 'r') as infile:
for _ in range(skiprows):
next(infile)
line = infile.readline()
headers = line.split()
yield headers
blk = None
for line in infile:
tok = line.split()
tid = int(tok[0])
if tid != cid:
if blk is not None:
yield blk
blk = { h : [] for h in headers}
cid = tid
blk["tid"].append(tid)
for h, t in zip(headers[1:], tok[1:]):
blk[h].append(float(t))
#data = np.fromiter(iter_func(), dtype=dtype)
#data = data.reshape((-1, iter_loadtxt.rowlength))
#return data
class animate_helper:
def __init__(self, dat2d: str, npix: int=1000):
self._datf = dat2d
self._npix = npix
#dat = np.genfromtxt(self._datf, names=True)
#self._ids = np.unique(dat["tid"])[::4]
#self._ntids = self._ids.size
#self._fields = list(dat.dtype.fields.keys())[2:]
dat = iter_loadtxt(dat2d)
self._fields = next(dat)[2:]
self._intrps = []
for blk in dat:
print(f"constructing intrp for {blk['tid'][0]}")
self._intrps.append({ fld : interp1d(blk["time"], blk[fld], bounds_error=False, fill_value=-20, copy=False) for fld in self._fields })
self._ntids=len(self._intrps)
def __call__(self, time: float, mol: str):
p, z = self._get_pntz(time, mol)
return p, z
#gridx, gridy = np.mgrid[np.min(p[:,0]):np.max(p[:,0]):1000j, np.min(p[:,1]):np.max(p[:,0]):1000j]
#grid = griddata(p, z, (gridx, gridy), method='cubic', rescale=False)
#return grid
def _get_pntz(self, time, mol):
p = np.zeros((self._ntids, 2))
z = np.zeros(self._ntids)
for n in range(self._ntids):
p[n,0] = self._intrps[n]["x"](time)
p[n,1] = self._intrps[n]["y"](time)
z[n] = np.maximum(self._intrps[n][mol](time), 1.0E-20)
#idz = np.where([z > 0])[0]
#z[idz] = np.log10(z[idz])
z = np.log10(z)
return p, z
#fp_x0 = np.array([ fp.XYZ(fp.t_left) for fp in fps ])[:,0:2]
#x = fp_x0[:, 0]
#y = fp_x0[:, 1]
#ds = np.log10(np.array([ fp.D(fp.t_left) for fp in fps]))
#gridx, gridy = np.mgrid[np.min(x):np.max(x):1000j, np.min(y):np.max(y):1000j]
#grid = griddata(fp_x0, ds, (gridx, gridy), method='nearest', rescale=True)
#plt.imshow(grid.T, extent=(0,1,0,1), origin="lower")
#plt.show()
if __name__ == "__main__":
par = argparse.ArgumentParser()
par.add_argument("-i", "--input", type=str, help="input file or directory", required=True)
par.add_argument("-c", "--config", type=str, help="plot configurations", default=None)
par.add_argument("-o", "--output", help="output locate ('screen', or directory)", type=str, default="screen")
par.add_argument("-b", "--batch", action="store_true", help="run on all files in directory")
par.add_argument("--make2Ddat", action="store_true", help="make the data file for 2d viewing")
par.add_argument("--fluidfile", type=str, default=None)
par.add_argument("--movie", action="store_true")
args = par.parse_args()
if args.make2Ddat:
if args.fluidfile is None:
print("error, need --fluidfile")
file_2d(args.fluidfile, args.output, args.input)
elif args.movie:
plt.rcParams['figure.figsize'] = 28, 4
plt.rcParams.update({"font.size": 14})
ahpf="__ah_tmp.pickle"
if not os.path.exists(ahpf):
ah = animate_helper(args.input)
with open(ahpf, "wb") as pf:
pickle.dump(ah, pf)
with open(ahpf, "rb") as pf:
ah = pickle.load(pf)
#g = ah(10.0E0, "H2O")
cm = plt.cm.get_cmap('Reds')
mol = "CO"
ts = [0.5, 1.0, 2.0, 5.0, 10.0, 15.0, 30.0 ]
fig, axs = plt.subplots(nrows=1, ncols=len(ts))
divider = make_axes_locatable(axs[-1])
cax = divider.append_axes('right', size='5%', pad=0.05)
im = None
for i, t, ax in zip(range(len(ts)), ts, axs):
p, z = ah(t, mol)
im = ax.scatter(p[:,0] * 1.0E-5, p[:,1] * 1.0E-5, c=z, vmin=-16.0, vmax=0.0, s=10, cmap=cm, alpha=0.3)
#gridx, gridy = np.mgrid[0:3*10E5:2000j,0:5.5*10E5:2000j]
#grid = griddata(p, z, (gridx, gridy), method='cubic', rescale=False, fill_value=np.min(z))
#ax.imshow(grid.T, extent=(0,1,0,1), origin="lower")
ax.set_title(f"t = {t} s")
if i != 0:
ax.set_yticks([])
fig.colorbar(im, cax=cax, orientation='vertical')
fig.suptitle(f"{mol}")
plt.show()
#plt.imshow(g.T, extent=(0,1,0,1), origin="lower", cmap='jet', vmin=-10, vmax=0)
#plt.colorbar()
#plt.show()
else:
main(args.input, args.config, args.output, args.fluidfile, args.batch)
| [
"numpy.log10",
"scipy.interpolate.interp1d",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"os.path.isdir",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"matplotlib.pyplot.gca",
"pickle.load",
"h5py.File",
"os.path.isfile",
"matplotlib.pyplot.cm.get_... | [((1018, 1027), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1025, 1027), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1849), 'numpy.loadtxt', 'np.loadtxt', (['inputf'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(inputf, delimiter=',', skiprows=1)\n", (1814, 1849), True, 'import numpy as np\n'), ((2135, 2173), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 38}"], {}), "({'font.size': 38})\n", (2154, 2173), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2227), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'legend.fontsize': 'large'}"], {}), "({'legend.fontsize': 'large'})\n", (2197, 2227), True, 'import matplotlib.pyplot as plt\n'), ((2289, 2319), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)'}), '(nrows=1, ncols=1)\n', (2301, 2319), True, 'import matplotlib.pyplot as plt\n'), ((7001, 7020), 'os.listdir', 'os.listdir', (['res_dir'], {}), '(res_dir)\n', (7011, 7020), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((11425, 11450), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11448, 11450), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((1256, 1280), 'h5py.File', 'h5.File', (['input_hdf5', '"""r"""'], {}), "(input_hdf5, 'r')\n", (1263, 1280), True, 'import h5py as h5\n'), ((5306, 5316), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5314, 5316), True, 'import matplotlib.pyplot as plt\n'), ((5372, 5386), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5381, 5386), True, 'import matplotlib.pyplot as plt\n'), ((5565, 5578), 'json.load', 'json.load', (['cf'], {}), '(cf)\n', (5574, 5578), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((5730, 5747), 'os.listdir', 'os.listdir', (['input'], {}), '(input)\n', (5740, 5747), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((10592, 10618), 'numpy.zeros', 'np.zeros', (['(self._ntids, 2)'], {}), '((self._ntids, 2))\n', (10600, 10618), True, 'import numpy as np\n'), ((10631, 10652), 'numpy.zeros', 'np.zeros', (['self._ntids'], {}), '(self._ntids)\n', (10639, 10652), True, 'import numpy as np\n'), ((10938, 10949), 'numpy.log10', 'np.log10', (['z'], {}), '(z)\n', (10946, 10949), True, 'import numpy as np\n'), ((5609, 5629), 'os.path.isdir', 'os.path.isdir', (['input'], {}), '(input)\n', (5622, 5629), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((5836, 5873), 'os.path.join', 'os.path.join', (['output', 'f"""{prefix}.png"""'], {}), "(output, f'{prefix}.png')\n", (5848, 5873), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((5895, 5920), 'os.path.join', 'os.path.join', (['input', 'datf'], {}), '(input, datf)\n', (5907, 5920), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((5936, 5958), 'os.path.isfile', 'os.path.isfile', (['inputf'], {}), '(inputf)\n', (5950, 5958), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((12328, 12366), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (12347, 12366), True, 'import matplotlib.pyplot as plt\n'), ((12672, 12695), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""Reds"""'], {}), "('Reds')\n", (12687, 12695), True, 'import matplotlib.pyplot as plt\n'), ((12842, 12870), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axs[-1]'], {}), '(axs[-1])\n', (12861, 12870), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((13589, 13599), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13597, 13599), True, 'import matplotlib.pyplot as plt\n'), ((442, 467), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (457, 467), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((6239, 6262), 'os.path.basename', 'os.path.basename', (['input'], {}), '(input)\n', (6255, 6262), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((6578, 6593), 'pickle.load', 'pickle.load', (['pf'], {}), '(pf)\n', (6589, 6593), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((12414, 12434), 'os.path.exists', 'os.path.exists', (['ahpf'], {}), '(ahpf)\n', (12428, 12434), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((12611, 12626), 'pickle.load', 'pickle.load', (['pf'], {}), '(pf)\n', (12622, 12626), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((5787, 5809), 'os.path.basename', 'os.path.basename', (['datf'], {}), '(datf)\n', (5803, 5809), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((8326, 8366), 'os.path.join', 'os.path.join', (['res_dir', 'expected_filename'], {}), '(res_dir, expected_filename)\n', (8338, 8366), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((10081, 10160), 'scipy.interpolate.interp1d', 'interp1d', (["blk['time']", 'blk[fld]'], {'bounds_error': '(False)', 'fill_value': '(-20)', 'copy': '(False)'}), "(blk['time'], blk[fld], bounds_error=False, fill_value=-20, copy=False)\n", (10089, 10160), False, 'from scipy.interpolate import InterpolatedUnivariateSpline, interp1d\n'), ((12537, 12556), 'pickle.dump', 'pickle.dump', (['ah', 'pf'], {}), '(ah, pf)\n', (12548, 12556), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((7350, 7365), 'pickle.load', 'pickle.load', (['pf'], {}), '(pf)\n', (7361, 7365), False, 'import sys, os, json, argparse, glob, itertools, pickle\n'), ((7754, 7794), 'os.path.join', 'os.path.join', (['res_dir', 'expected_filename'], {}), '(res_dir, expected_filename)\n', (7766, 7794), False, 'import sys, os, json, argparse, glob, itertools, pickle\n')] |
#!/usr/bin/env python3
# Copyright (c) 2017 <NAME>. All rights reserved
# coding=utf-8
# -*- coding: utf8 -*-
"""
SpectrumClass is a class to store the spectrum information, i.e. the ralation between
wavelength and intensity.
The constructor of this calss includes two necessities and one option
wavelength, intensity (necessary)
wavelengthunitstr="nm" (optional)
Once the object is constructed, wavelength and intensity do not support the modification
but support the change in unit. (e.x. from "nm" to "m")
This class store the information of the absolute value of intensity but provide some function
to calculate the normalized intensity by area or by max.
(If the searching wavelength is not in the wavelength provided when the object is constructed,
then all the value is calculated by interpolation. However, if the searching wavelength
is out of the range of, the user can definced whether to recall assertation or put the
other value. Default no assertation, and the value is zero if out of range.)
Intensity(wavelength, OutofRangeAssertBool = True, OutofRangeValue = 0)
Intensity_NormalizedByMax(wavelength, OutofRangeAssertBool = True, OutofRangeValue = 0)
Intensity_NormalizedByArea(wavelength, OutofRangeAssertBool = True, OutofRangeValue = 0)
This class can also calculate the average photon energy (eV) of the spectrum.
AvergePhotonIntensity()
>>> wavelength = np.linspace(520, 600, 9)
>>> intensity = np.array([1,2,3,4,5,4,3,2,1], dtype=np.float64)
>>> spec = SpectrumClass(wavelength,intensity)
>>> print(spec.Area)
[ 240.]
>>> print(spec.wavelength_store())
[ 520. 530. 540. 550. 560. 570. 580. 590. 600.]
>>> print(spec.intensity_store())
[ 1. 2. 3. 4. 5. 4. 3. 2. 1.]
>>> print(spec.wavelengthunitstr)
nm
>>>
>>> wv = np.array([520,525,530,535,540,545,550], dtype=np.float64)
>>> value = spec.Intensity(wv)
>>> print(value)
520.0 1.0
525.0 1.5
530.0 2.0
535.0 2.5
540.0 3.0
545.0 3.5
550.0 4.0
dtype: float64
>>> value = spec.Intensity_NormalizedByMax(wv)
>>> print(value)
520.0 0.2
525.0 0.3
530.0 0.4
535.0 0.5
540.0 0.6
545.0 0.7
550.0 0.8
dtype: float64
>>> value = spec.Intensity_NormalizedByArea(wv)
>>> print(value)
520.0 0.004167
525.0 0.006250
530.0 0.008333
535.0 0.010417
540.0 0.012500
545.0 0.014583
550.0 0.016667
dtype: float64
>>>
>>> E = spec.AvergePhotonIntensity()
>>> print(E)
2.21676345446
>>>
>>> spec.wavelengthunitstr = "um"
>>> print(spec.Area)
[ 240.]
>>> print(spec.wavelength_store())
[ 0.52 0.53 0.54 0.55 0.56 0.57 0.58 0.59 0.6 ]
>>> print(spec.intensity_store())
[ 1000. 2000. 3000. 4000. 5000. 4000. 3000. 2000. 1000.]
>>> print(spec.wavelengthunitstr)
um
>>> wv = wv/1000
>>> value = spec.Intensity(wv)
>>> print(value)
0.520 1000.0
0.525 1500.0
0.530 2000.0
0.535 2500.0
0.540 3000.0
0.545 3500.0
0.550 4000.0
dtype: float64
>>> value = spec.Intensity_NormalizedByMax(wv)
>>> print(value)
0.520 0.2
0.525 0.3
0.530 0.4
0.535 0.5
0.540 0.6
0.545 0.7
0.550 0.8
dtype: float64
>>> value = spec.Intensity_NormalizedByArea(wv)
>>> print(value)
0.520 4.166667
0.525 6.250000
0.530 8.333333
0.535 10.416667
0.540 12.500000
0.545 14.583333
0.550 16.666667
dtype: float64
>>> E = spec.AvergePhotonIntensity()
>>> print(E)
2.21676345446
"""
### python module
import sys
import os
import collections
import numpy as np
import pandas as pd
### my module
MaterialPath = os.path.dirname(os.path.abspath(__file__))
srcPath = os.path.dirname(MaterialPath)
if srcPath not in sys.path:
sys.path.append(srcPath)
from Help.my_unit import unit2unit
from Help.myXYClass import myXYClass_FixedArea
from Help.myNumericalIntegration import myNumericalIntegration
from ReaderWriter.Writer.colWriter import my_XYcolWriter
# Constant variable
_IntensityDefaultValue = 0.0
class SpectrumClass(myXYClass_FixedArea):
def __init__(self, wavelength, intensity, wavelengthunitstr="nm"):
super().__init__(wavelength, intensity, xStr='wavelength', tags=['Intensity'], xdtype=np.float64, ydtype=np.float64)
self.__wavelengthunitstr = wavelengthunitstr
# intensity information
def Intensity(self, wavelength, OutofRangeAssertBool = True, OutofRangeValue = _IntensityDefaultValue):
tempt = np.transpose(self.getValue(wavelength, OutofRangeAssertBool, OutofRangeValue))[0]
if tempt.ndim==0:
tempt = np.array([tempt], dtype=np.float64)
wavelength = np.array([wavelength], dtype=np.float64)
return pd.Series(tempt, index=wavelength, dtype=np.float64)
def Intensity_NormalizedByMax(self, wavelength, alpha=1.0, OutofRangeAssertBool = True, OutofRangeValue = _IntensityDefaultValue):
intensity = self.Intensity(wavelength, OutofRangeAssertBool, OutofRangeValue)
maxi = np.max(self.intensity_store())
return intensity/maxi*alpha
def Intensity_NormalizedByArea(self, wavelength, alpha=1.0, OutofRangeAssertBool = True, OutofRangeValue = _IntensityDefaultValue):
intensity = self.Intensity(wavelength, OutofRangeAssertBool, OutofRangeValue)
Area = self.Area[0]
return intensity/Area*alpha if Area!=0 else intensity
# Spectrum Information
def AvergePhotonIntensity(self):
factor = unit2unit('n',self.wavelengthunitstr[0:(len(self.wavelengthunitstr)-1)])
wavelength = self.wavelength_store()
intensity = self.Intensity_NormalizedByArea(wavelength).values
return (1240*factor)*myNumericalIntegration(wavelength, intensity/wavelength)
def wavelength_store(self):
return self.x
def intensity_store(self):
y = self.y
return np.transpose(y)[0]
def printInformation(self):
if len(self.NoteDict.keys())!=0:
print("In Note : ")
for key in self.NoteDict:
print(" {0}:{1}".format(key,self.NoteDict[key]))
print('-'*40)
wavelength = self.wavelength_store()
intensity = self.intensity_store()
print('{0:>20s} {1:>20s}'.format( "Wavelength({0})".format(self.wavelengthunitstr), "Intensity"))
for ii in range(len(wavelength)):
print("{0:>20.5f} {1:>20.5f}".format(wavelength[ii],intensity[ii]))
print('='*40)
def saveInformationstr(self):
strr = ""
if "PATH" in self.NoteDict and "FILENAME" in self.NoteDict:
strr = strr + "[{0}]{1}".format("PATH",self.NoteDict["PATH"]) + "|" + "[{0}]{1}".format("FILENAME",self.NoteDict["FILENAME"])
return strr
"""
for key in self.NoteDict:
strr = strr + "[{0}]{1}".format(key,self.NoteDict[key]) + "|"
"""
# operator overloading
def __add__(self, other):
if isinstance(other, float):
return SpectrumClass(self.wavelength_store(), self.intensity_store()+other, wavelengthunitstr=self.wavelengthunitstr)
unit1, unit2 = self.wavelengthunitstr, other.wavelengthunitstr
self.wavelengthunitstr, other.wavelengthunitstr = "nm", "nm"
wv1, wv2 = self.wavelength_store(), other.wavelength_store()
wv = wv1[ np.bitwise_and(wv1>=max([wv1[0], wv2[0]]), wv1<=min([wv1[-1], wv2[-1]])) ]
Int1, Int2 = self.Intensity(wv).values, self.Intensity(wv).values
spec3 = SpectrumClass(wv, Int1+Int2, wavelengthunitstr="nm")
self.wavelengthunitstr = unit1
other.wavelengthunitstr = unit2
return spec3
def __mul__(self, other):
if isinstance(other, float):
return SpectrumClass(self.wavelength_store(), self.intensity_store()*other, wavelengthunitstr=self.wavelengthunitstr)
unit1, unit2 = self.wavelengthunitstr, other.wavelengthunitstr
self.wavelengthunitstr, other.wavelengthunitstr = "nm", "nm"
wv1, wv2 = self.wavelength_store(), other.wavelength_store()
wv = wv1[ np.bitwise_and(wv1>=max([wv1[0], wv2[0]]), wv1<=min([wv1[-1], wv2[-1]])) ]
Int1, Int2 = self.Intensity(wv).values, self.Intensity(wv).values
spec3 = SpectrumClass(wv, Int1*Int2, wavelengthunitstr="nm")
self.wavelengthunitstr = unit1
other.wavelengthunitstr = unit2
return spec3
def __sub__(self, other):
if isinstance(other, float):
return SpectrumClass(self.wavelength_store(), self.intensity_store()-other, wavelengthunitstr=self.wavelengthunitstr)
unit1, unit2 = self.wavelengthunitstr, other.wavelengthunitstr
self.wavelengthunitstr, other.wavelengthunitstr = "nm", "nm"
wv1, wv2 = self.wavelength_store(), other.wavelength_store()
wv = wv1[ np.bitwise_and(wv1>=max([wv1[0], wv2[0]]), wv1<=min([wv1[-1], wv2[-1]])) ]
Int1, Int2 = self.Intensity(wv).values, self.Intensity(wv).values
spec3 = SpectrumClass(wv, Int1-Int2, wavelengthunitstr="nm")
self.wavelengthunitstr = unit1
other.wavelengthunitstr = unit2
return spec3
def __truediv__(self, other):
if isinstance(other, float):
return SpectrumClass(self.wavelength_store(), self.intensity_store()/other, wavelengthunitstr=self.wavelengthunitstr)
unit1, unit2 = self.wavelengthunitstr, other.wavelengthunitstr
self.wavelengthunitstr, other.wavelengthunitstr = "nm", "nm"
wv1, wv2 = self.wavelength_store(), other.wavelength_store()
wv = wv1[ np.bitwise_and(wv1>=max([wv1[0], wv2[0]]), wv1<=min([wv1[-1], wv2[-1]])) ]
Int1, Int2 = self.Intensity(wv).values, self.Intensity(wv).values
spec3 = SpectrumClass(wv, Int1/Int2, wavelengthunitstr="nm")
self.wavelengthunitstr = unit1
other.wavelengthunitstr = unit2
return spec3
# data member
@property
def wavelengthunitstr(self):
return self.__wavelengthunitstr
@wavelengthunitstr.setter
def wavelengthunitstr(self, wavelengthunitstr):
assert wavelengthunitstr[-1].lower() == 'm', "The unit of wavelength should be the length [e.x. m, nm]"
factor = unit2unit(self.wavelengthunitstr[0:(len(self.wavelengthunitstr)-1)],wavelengthunitstr[0:(len(wavelengthunitstr)-1)])
self._xScaling(factor)
self.__wavelengthunitstr = wavelengthunitstr
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"pandas.Series",
"Help.myNumericalIntegration.myNumericalIntegration",
"os.path.dirname",
"numpy.array",
"doctest.testmod",
"os.path.abspath",
"numpy.transpose",
"sys.path.append"
] | [((3750, 3779), 'os.path.dirname', 'os.path.dirname', (['MaterialPath'], {}), '(MaterialPath)\n', (3765, 3779), False, 'import os\n'), ((3712, 3737), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3727, 3737), False, 'import os\n'), ((3814, 3838), 'sys.path.append', 'sys.path.append', (['srcPath'], {}), '(srcPath)\n', (3829, 3838), False, 'import sys\n'), ((10625, 10642), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (10640, 10642), False, 'import doctest\n'), ((4801, 4853), 'pandas.Series', 'pd.Series', (['tempt'], {'index': 'wavelength', 'dtype': 'np.float64'}), '(tempt, index=wavelength, dtype=np.float64)\n', (4810, 4853), True, 'import pandas as pd\n'), ((4682, 4717), 'numpy.array', 'np.array', (['[tempt]'], {'dtype': 'np.float64'}), '([tempt], dtype=np.float64)\n', (4690, 4717), True, 'import numpy as np\n'), ((4744, 4784), 'numpy.array', 'np.array', (['[wavelength]'], {'dtype': 'np.float64'}), '([wavelength], dtype=np.float64)\n', (4752, 4784), True, 'import numpy as np\n'), ((5782, 5840), 'Help.myNumericalIntegration.myNumericalIntegration', 'myNumericalIntegration', (['wavelength', '(intensity / wavelength)'], {}), '(wavelength, intensity / wavelength)\n', (5804, 5840), False, 'from Help.myNumericalIntegration import myNumericalIntegration\n'), ((5963, 5978), 'numpy.transpose', 'np.transpose', (['y'], {}), '(y)\n', (5975, 5978), True, 'import numpy as np\n')] |
import numpy as np
PI = np.pi
DEG = 180./PI
INF = np.inf
def eig(M, sort_type=1):
"""
Calculates eigenvalues and eigenvectors of matrix
"""
if sort_type not in [1,2,3,4]:
raise ValueError
lam,V = np.linalg.eigh(M)
# sorting of eigenvalues
# 1: highest to lowest, algebraic: lam1 >= lam2 >= lam3
# 2: lowest to highest, algebraic: lam1 <= lam2 <= lam3
# 3: highest to lowest, absolute : | lam1 | >= | lam2 | >= | lam3 |
# 4: lowest to highest, absolute : | lam1 | <= | lam2 | <= | lam3 |
if sort_type == 1:
idx = np.argsort(lam)[::-1]
elif sort_type == 2:
idx = np.argsort(lam)
elif sort_type == 3:
idx = np.argsort(np.abs(lam))[::-1]
elif sort_type == 4:
idx = np.argsort(np.abs(lam))
lsort = lam[idx]
Vsort = V[:,idx]
return lsort,Vsort
def list_intersect(a, b):
""" Intersection of two lists
"""
return list(set(a).intersection(set(b)))
def list_intersect_with_indices(a, b):
intersection = list(set(a).intersection(set(b)))
indices = [a.index(item) for item in intersection]
return intersection, indices
def rotmat(xdeg, idx):
""" 3D rotation matrix about given axis
"""
if idx not in [0, 1, 2]:
raise ValueError
cosx = np.cos(xdeg / DEG)
sinx = np.sin(xdeg / DEG)
if idx==0:
return np.array([
[1, 0, 0],
[0, cosx, -sinx],
[0, sinx, cosx],
])
elif idx==1:
return np.array([
[cosx, 0, sinx],
[0, 1, 0],
[-sinx, 0, cosx],
])
elif idx==2:
return np.array([
[cosx, -sinx, 0],
[sinx, cosx, 0],
[0, 0, 1],
])
def rotmat_gen(v, xi):
rho = np.linalg.norm(v)
vth = np.arccos(v[2] / rho)
vph = np.arctan2(v[1],v[0])
return np.dot(np.dot(np.dot(np.dot(
rotmat(vph*DEG,2),
rotmat(vth*DEG,1)),
rotmat(xi,2)),
rotmat(-vth*DEG,1)),
rotmat(-vph*DEG,2))
def fangle(x,y):
""" Returns the angle between two vectors, in degrees
"""
xy = np.dot(x,y)
xx = np.dot(x,x)
yy = np.dot(y,y)
return np.arccos(xy/(xx*yy)**0.5)*DEG
def fangle_signed(va,vb,vnor):
""" Returns the signed angle (of rotation) between two vectors, in degrees
"""
# get rotation angle (always positive)
theta = fangle(va,vb);
EPSVAL = 0;
stheta = theta; # initialize to rotation angle
if abs(theta - 180) <= EPSVAL:
stheta = 180
else:
Dmat = np.column_stack([va, vb, vnor])
if np.linalg.det(Dmat) < 0:
stheta = -theta
return stheta
def wrap360(omega):
""" Wrap phase
"""
return omega % 360.
def isclose(X, Y):
EPSVAL = 1.e-6
X = np.array(X)
Y = np.array(Y)
return bool(
np.linalg.norm(X-Y) < EPSVAL)
def open_interval(x1,x2,nx):
return np.linspace(x1,x2,nx+2)[1:-1]
def closed_interval(x1,x2,nx):
return np.linspace(x1,x2,nx)
| [
"numpy.abs",
"numpy.arccos",
"numpy.sin",
"numpy.column_stack",
"numpy.linalg.det",
"numpy.argsort",
"numpy.array",
"numpy.dot",
"numpy.linspace",
"numpy.arctan2",
"numpy.cos",
"numpy.linalg.norm",
"numpy.linalg.eigh"
] | [((229, 246), 'numpy.linalg.eigh', 'np.linalg.eigh', (['M'], {}), '(M)\n', (243, 246), True, 'import numpy as np\n'), ((1293, 1311), 'numpy.cos', 'np.cos', (['(xdeg / DEG)'], {}), '(xdeg / DEG)\n', (1299, 1311), True, 'import numpy as np\n'), ((1323, 1341), 'numpy.sin', 'np.sin', (['(xdeg / DEG)'], {}), '(xdeg / DEG)\n', (1329, 1341), True, 'import numpy as np\n'), ((1798, 1815), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {}), '(v)\n', (1812, 1815), True, 'import numpy as np\n'), ((1826, 1847), 'numpy.arccos', 'np.arccos', (['(v[2] / rho)'], {}), '(v[2] / rho)\n', (1835, 1847), True, 'import numpy as np\n'), ((1858, 1880), 'numpy.arctan2', 'np.arctan2', (['v[1]', 'v[0]'], {}), '(v[1], v[0])\n', (1868, 1880), True, 'import numpy as np\n'), ((2170, 2182), 'numpy.dot', 'np.dot', (['x', 'y'], {}), '(x, y)\n', (2176, 2182), True, 'import numpy as np\n'), ((2191, 2203), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (2197, 2203), True, 'import numpy as np\n'), ((2212, 2224), 'numpy.dot', 'np.dot', (['y', 'y'], {}), '(y, y)\n', (2218, 2224), True, 'import numpy as np\n'), ((2849, 2860), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2857, 2860), True, 'import numpy as np\n'), ((2869, 2880), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (2877, 2880), True, 'import numpy as np\n'), ((3054, 3077), 'numpy.linspace', 'np.linspace', (['x1', 'x2', 'nx'], {}), '(x1, x2, nx)\n', (3065, 3077), True, 'import numpy as np\n'), ((1373, 1429), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]]'], {}), '([[1, 0, 0], [0, cosx, -sinx], [0, sinx, cosx]])\n', (1381, 1429), True, 'import numpy as np\n'), ((2235, 2267), 'numpy.arccos', 'np.arccos', (['(xy / (xx * yy) ** 0.5)'], {}), '(xy / (xx * yy) ** 0.5)\n', (2244, 2267), True, 'import numpy as np\n'), ((2611, 2642), 'numpy.column_stack', 'np.column_stack', (['[va, vb, vnor]'], {}), '([va, vb, vnor])\n', (2626, 2642), True, 'import numpy as np\n'), ((2979, 3006), 'numpy.linspace', 'np.linspace', (['x1', 'x2', '(nx + 2)'], {}), '(x1, x2, nx + 2)\n', (2990, 3006), True, 'import numpy as np\n'), ((578, 593), 'numpy.argsort', 'np.argsort', (['lam'], {}), '(lam)\n', (588, 593), True, 'import numpy as np\n'), ((639, 654), 'numpy.argsort', 'np.argsort', (['lam'], {}), '(lam)\n', (649, 654), True, 'import numpy as np\n'), ((1514, 1570), 'numpy.array', 'np.array', (['[[cosx, 0, sinx], [0, 1, 0], [-sinx, 0, cosx]]'], {}), '([[cosx, 0, sinx], [0, 1, 0], [-sinx, 0, cosx]])\n', (1522, 1570), True, 'import numpy as np\n'), ((2654, 2673), 'numpy.linalg.det', 'np.linalg.det', (['Dmat'], {}), '(Dmat)\n', (2667, 2673), True, 'import numpy as np\n'), ((2906, 2927), 'numpy.linalg.norm', 'np.linalg.norm', (['(X - Y)'], {}), '(X - Y)\n', (2920, 2927), True, 'import numpy as np\n'), ((1655, 1711), 'numpy.array', 'np.array', (['[[cosx, -sinx, 0], [sinx, cosx, 0], [0, 0, 1]]'], {}), '([[cosx, -sinx, 0], [sinx, cosx, 0], [0, 0, 1]])\n', (1663, 1711), True, 'import numpy as np\n'), ((705, 716), 'numpy.abs', 'np.abs', (['lam'], {}), '(lam)\n', (711, 716), True, 'import numpy as np\n'), ((774, 785), 'numpy.abs', 'np.abs', (['lam'], {}), '(lam)\n', (780, 785), True, 'import numpy as np\n')] |
import numpy as np
from scipy import sparse as sps
from src.models.QuantumSLIM.ItemSelectors.ItemSelectorInterface import ItemSelectorInterface
class ItemSelectorByPopularity(ItemSelectorInterface):
"""
Item selector that selects the items to be kept by item popularity. The higher popular items are kept.
"""
def precompute_best_item_indices(self, URM):
item_pop = np.array((URM > 0).sum(axis=0)).flatten()
self.sorted_indices = np.argsort(item_pop)[::-1]
def get_sorted_best_item_indices(self, URM: sps.csr_matrix, target_column: np.ndarray, item_idx: int) -> np.ndarray:
if self.sorted_indices is None:
item_pop = np.array((URM > 0).sum(axis=0)).flatten()
sorted_indices = np.argsort(item_pop)[::-1]
return sorted_indices
return self.sorted_indices
| [
"numpy.argsort"
] | [((465, 485), 'numpy.argsort', 'np.argsort', (['item_pop'], {}), '(item_pop)\n', (475, 485), True, 'import numpy as np\n'), ((748, 768), 'numpy.argsort', 'np.argsort', (['item_pop'], {}), '(item_pop)\n', (758, 768), True, 'import numpy as np\n')] |
import random
import re
import math
import numpy as np
from src import constants
from src.multi_agent.elements.camera import Camera, CameraRepresentation
from src.my_utils import constant_class
from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound
from src.my_utils.my_math.line import distance_btw_two_point, Line
from src.my_utils.string_operations import parse_list
class MobileCameraType:
"""
Camera types
dof = degree of freedom
1) FIX -- 1 dof beta
2) ROTATIVE -- 2 dof beta,alpha
3) RAIL -- 3 dof beta,alpha,(x,y)=f(s)
4) FREE -- 4 dof beta,alpha,x,y
"""
FIX = 0
ROTATIVE = 1
RAIL = 2
FREE = 3
class MobileCameraRepresentation(CameraRepresentation):
"""
Class MobileCameraRepresentation.
Description :
:param
8. (MobileCameraType) camera_type -- describe what feature the camera has
9. (Trajectory) trajectory -- only used for RAIL camera
:attibutes
8. (MobileCameraType) camera_type -- describe what feature the camera has
9. (Trajectory) trajectory -- only used for RAIL camera
"""
def __init__(self, id=None, xc=None, yc=None, alpha=None, beta=None, field_depth=None, type=None, color=None):
CameraRepresentation.__init__(self, id, xc, yc, alpha, beta, field_depth, color)
self.camera_type = type
self.trajectory = TrajectoryPlaner([])
def update_from_camera(self, camera):
super().update_from_camera(camera)
self.camera_type = camera.camera_type
self.trajectory = TrajectoryPlaner(camera.trajectory.trajectory)
class MobileCamera(Camera, MobileCameraRepresentation):
"""
Class MobileCameraRepresentation.
Description :
:param
:attibutes
"""
def __init__(self, id=None, xc=None, yc=None, alpha=None, beta=None, trajectory=None, field_depth=None, color=None,
t_add=None, t_del=None, type=None, vx_vy_min=None, vx_vy_max=None, v_alpha_min=None, v_alpha_max=None,
delta_beta=None, v_beta_min=None, v_beta_max=None):
Camera.__init__(self, id, xc, yc, alpha, beta, field_depth, color, t_add, t_del)
camera_attributes_not_to_txt = self.attributes_not_to_txt
MobileCameraRepresentation.__init__(self, id, xc, yc, alpha, beta, field_depth, type, color)
self.attributes_not_to_txt += [elem for elem in camera_attributes_not_to_txt if
elem not in self.attributes_not_to_txt]
self.attributes_not_to_txt += ["coeff_field", "coeff_std_position", "coeff_std_speed", "coeff_std_acc",
"swipe_angle_direction", "swipe_delta_alpha", "last_swipe_direction_change",
"dt_next_swipe_direction_change", "last_swipe_configuration",
"last_swipe_position_change","beta_min","beta_max"]
"""Limit the variation"""
self.vx_vy_min = vx_vy_min
self.vx_vy_max = vx_vy_max
self.v_alpha_min = v_alpha_min
self.v_alpha_max = v_alpha_max
self.v_beta_min = v_beta_min
self.v_beta_max = v_beta_max
self.delta_beta = delta_beta
"""Zoom"""
self.coeff_field = constants.COEFF_VARIATION_FROM_FIELD_DEPTH
self.coeff_std_position = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_POSITION
self.coeff_std_speed = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_SPEED
self.coeff_std_acc = constants.COEFF_STD_VARIATION_MEASURMENT_ERROR_ACCELERATION
"""Trajectory"""
self.trajectory = TrajectoryPlaner(trajectory)
"""Variables for the swipe"""
self.swipe_angle_direction = 1
self.swipe_delta_alpha = 0.2
self.last_swipe_direction_change = constants.get_time()
self.dt_next_swipe_direction_change = -10
self.last_swipe_position_change = -10
from src.multi_agent.tools.configuration import Configuration
self.last_swipe_configuration = Configuration(None, None, random.uniform(0, constants.ROOM_DIMENSION_X),
random.uniform(0, constants.ROOM_DIMENSION_Y), 1, 1,
self.field_depth,
False)
self.default_parameters()
def default_parameters(self):
"""Default option"""
if not self.camera_type == MobileCameraType.RAIL:
self.trajectory = TrajectoryPlaner([])
if self.camera_type == MobileCameraType.FIX or self.camera_type == MobileCameraType.ROTATIVE:
self.vx_vy_min = 0
self.vx_vy_max = 0
if self.camera_type == MobileCameraType.FIX:
self.v_alpha_min = 0
self.v_alpha_max = 0
if self.delta_beta is not None and self.beta is not None:
self.beta_min = bound_angle_btw_minus_pi_plus_pi(self.beta - self.delta_beta)
self.beta_max = bound_angle_btw_minus_pi_plus_pi(self.beta + self.delta_beta)
else:
self.beta_min = None
self.beta_max = None
def angle_degToRad(self):
"""
:description
Transforms angle attribues to radians supposing it is in degree
"""
super().angle_degToRad()
if self.delta_beta is not None:
self.delta_beta = math.radians(self.delta_beta)
if self.beta_min is not None:
self.beta_min = math.radians(self.beta_min)
if self.beta_max is not None:
self.beta_max = math.radians(self.beta_max)
self.v_alpha_min = math.radians(self.v_alpha_min)
self.v_alpha_max = math.radians(self.v_alpha_max)
self.v_beta_min = math.radians(self.v_beta_min)
self.v_beta_max = math.radians(self.v_beta_max)
def angle_radToDeg(self):
"""
:description
Transforms angle attribues to degrees supposing it is in radians
"""
super().angle_radToDeg()
if self.delta_beta is not None:
self.delta_beta = math.degrees(self.delta_beta)
if self.beta_min is not None:
self.beta_min = math.degrees(self.beta_min)
if self.beta_max is not None:
self.beta_max = math.degrees(self.beta_max)
self.v_alpha_min = math.degrees(self.v_alpha_min)
self.v_alpha_max = math.degrees(self.v_alpha_max)
self.v_beta_min = math.degrees(self.v_beta_min)
self.v_beta_max = math.degrees(self.v_beta_max)
def load_from_save_to_txt(self, s):
"""
:description
Load attributes for a txt string representation
:param
1. (string) s -- string description of the object, method save_to_txt.
"""
super().load_from_save_to_txt(s)
self.trajectory = TrajectoryPlaner(self.trajectory)
self.default_parameters()
def my_rand(self, bound):
"""
:description
Random function used in randomize
:param
1. ((int,int)) bound -- limit of the random variable that is created.
:return
1. (float) random value btw bound[0] and bound[1]
"""
return random.uniform(bound[0], bound[1])
def randomize(self, camera_type, beta_bound, delta_beta_bound, field_bound, v_xy_min_bound, v_xy_max_bound,
v_alpha_min_bound, v_alpha_max_bound, v_beta_min_bound, v_beta_max_bound):
"""
:description
Create a mobile camera with random
:param
1.(MobileCameraType) camera_type -- Camera type
2.((int,int))beta_bound - [degree] -- random bound of beta
3.((int,int))delta_beta_bound - [degree] -- random bound of delta_beta
4.((int,int))field_bound - [m] -- random bound of field_detph
5.((int,int))v_xx_min_bound -[m/s] -- random bound of v_min in x and y axis
6.((int,int))v_xy_max_bound -[m/s] -- random bound of v_max in x and y axis
7.((int,int))v_alpha_min_bound - [degree/s] -- random bound of alpha min
8.((int,int))v_alpha_max_bound - [degree/s] -- random bound of alpha max
9.((int,int))v_beta_min_bound - [degree/s] -- random bound of beta min
10((int,int))v_beta_ùax_bound - [degree/s] -- random bound of beta max
:return
set severals attributes to random values bounded btw parameters
"""
self.xc = self.my_rand((0, constants.ROOM_DIMENSION_X))
self.yc = self.my_rand((0, constants.ROOM_DIMENSION_Y))
self.alpha = bound_angle_btw_minus_pi_plus_pi(self.my_rand((-math.pi, math.pi)))
self.beta = self.my_rand(beta_bound)
self.delta_beta = self.my_rand(delta_beta_bound)
self.field_depth = self.my_rand(field_bound)
self.t_add = [0]
self.t_del = [1000]
self.vx_vy_min = self.my_rand(v_xy_min_bound)
self.vx_vy_max = self.my_rand(v_xy_max_bound)
self.v_alpha_min = self.my_rand(v_alpha_min_bound)
self.v_alpha_max = self.my_rand(v_alpha_max_bound)
self.v_beta_min = self.my_rand(v_beta_min_bound)
self.v_beta_max = self.my_rand(v_beta_max_bound)
self.trajectory = TrajectoryPlaner([])
self.camera_type = camera_type
"""Default values"""
self.set_default_values(xc=self.xc, yc=self.yc, alpha=self.alpha, beta=self.beta, field_depth=self.field_depth)
self.beta_min = self.beta - self.delta_beta
self.beta_max = self.beta + self.delta_beta
self.angle_degToRad()
def compute_field_depth_variation_for_a_new_beta(self, new_beta):
"""
:description
the field depth is inversaly propotional to beta
:param
1. (float) new_beta - [radians] -- new angle from the camera
:return
1. (float) field_depth - [m] -- field depth corresponding to the new beta
"""
delta = new_beta - self.beta
field_depth = self.field_depth - delta * self.coeff_field
field_depth = bound(field_depth, constants.AGENT_CAMERA_FIELD_MIN * self.default_field_depth,
constants.AGENT_CAMERA_FIELD_MAX * self.default_field_depth)
return field_depth
def zoom(self, speed, dt):
"""
:description
Modelize the zoom of a camera (modifies beta and field_depth)
effects :
zoom in / zoom out
1) on the field geometry:
a. Increase/decrease beta
b. Decrease/increase the field depth
2) on the precision
c. Decrease/increase the std on the measure
self.coeff_speed -- value > 0, defines the proportionality btw a. and b.
self.coeff_std -- value > 0, defines the proportionality btw a. and c.
:param
1. (float) speed -- going from -1 to 1, + to zoom out - to zoom
2. (float) dt -- time
"""
sign = np.sign(speed)
if self.beta_min <= self.beta <= self.beta_max:
if speed == 0:
delta = 0
else:
delta = sign * dt * (self.v_beta_min + math.fabs(speed) * (self.v_beta_max - self.v_beta_min))
elif self.beta < self.beta_min or self.beta_max > 0:
self.beta = bound(self.beta, self.beta_min, self.beta_max)
delta = 0
else:
delta = 0
print("problem in beta target")
self.field_depth = self.compute_field_depth_variation_for_a_new_beta(self.beta + delta)
self.beta += delta
if constants.ERROR_VARIATION_ZOOM:
self.std_measurement_error_position -= delta * self.coeff_std_position
self.std_measurement_error_speed -= delta * self.coeff_std_speed
self.std_measurement_error_acceleration -= delta * self.coeff_std_acc
self.std_measurement_error_position = bound(self.std_measurement_error_position, 0,
self.std_measurement_error_position * 10)
self.std_measurement_error_speed = bound(self.std_measurement_error_speed, 0,
self.std_measurement_error_speed * 10)
self.std_measurement_error_acceleration = bound(self.std_measurement_error_acceleration, 0,
self.std_measurement_error_acceleration * 10)
def rotate(self, speed, dt):
"""
:description
Rotate the camera in the room '(modifies angle alpha)
:param
1. (float) speed -- going from -1 to 1
2. (float) dt -- time
"""
if not self.camera_type == MobileCameraType.FIX:
sign = np.sign(speed)
if speed == 0:
delta = 0
else:
delta = sign * dt * (self.v_alpha_min + math.fabs(speed) * (self.v_alpha_max - self.v_alpha_min))
self.alpha += delta
self.alpha = bound_angle_btw_minus_pi_plus_pi(self.alpha)
def move(self, speed_x, speed_y, dt):
"""
:description
Move the camera in the room (modifies xc and yc)
:param
1. (float) speed_x -- going from -1 to 1
1. (float) speed_y -- going from -1 to 1
2. (float) dt -- time
"""
sign_x = np.sign(speed_x)
sign_y = np.sign(speed_y)
if speed_x == 0:
delta_x = 0
else:
delta_x = sign_x * dt * (self.vx_vy_min + math.fabs(speed_x) * (self.vx_vy_max - self.vx_vy_min))
if speed_y == 0:
delta_y = 0
else:
delta_y = sign_y * dt * (self.vx_vy_min + math.fabs(speed_y) * (self.vx_vy_max - self.vx_vy_min))
if self.camera_type == MobileCameraType.RAIL:
"On the rail it is only 1 dimension"
delta = delta_x
x_new, y_new = self.trajectory.move_on_trajectory(self.xc, self.yc, delta)
self.xc = x_new
self.yc = y_new
elif self.camera_type == MobileCameraType.FREE:
self.xc += delta_x
self.yc += delta_y
self.xc = bound(self.xc, self.xc_min, self.xc_max)
self.yc = bound(self.yc, self.yc_min, self.yc_max)
def set_configuration(self, configuration):
"""
:description
Set the parameters thanks to a configuration
:param
1. (Configuration) configuration -- group several parameters
"""
self.xc = configuration.x
self.yc = configuration.y
self.alpha = configuration.alpha
self.beta = configuration.beta
self.field_depth = configuration.field_depth
def get_edge_points_world_frame(self):
"""
:description
#TODO - petite description
"""
# angles of edge of field of view in cam frame
angle_min, angle_max = -self.beta / 2, self.beta / 2
# distance of depth field along these angles
min_edge = (self.field_depth * math.cos(angle_min), self.field_depth * math.sin(angle_min))
max_edge = (self.field_depth * math.sin(angle_max), self.field_depth * math.sin(angle_max))
min_edge_world_frame = self.coordinate_change_from_camera_frame_to_world_frame(min_edge[0], min_edge[1])
max_edge_world_frame = self.coordinate_change_from_camera_frame_to_world_frame(max_edge[0], max_edge[1])
return min_edge_world_frame, max_edge_world_frame
class TrajectoryPlaner:
"""
Class TrajectoryPlaner.
Description :
This class modelize the displacement from a camrea on a rail.
:param
1. (list[(float,float)]) trajectory - [m] -- List that contains the via points
:attibutes
1. (list[(float,float)]) trajectory - [m] -- List that contains the via points
2. (int) trajectory_index -- Current segment of the trajectory
3. (float) distance - [m] -- Distance travelled by the camera on the rail
(going forward increase the distance, going
backwards decrease the distance => [0,length])
"""
def __init__(self, trajectory):
self.trajectory = trajectory
self.trajectory_index = 0
self.distance = 0
def move_on_trajectory(self, x, y, delta):
"""
:description
x,y belong to the trajectory !!
:param
1. (float) x - [m] -- x coordinate of the point in world frame
2. (float) y - [m] -- y coordinate of the point in world frame
3. (float) delta - [m] -- distance to travel
:return
1. (float) x - [m] -- x moved new coordinate of the point in world frame
2. (float) y - [m] -- y moved new coordinate of the point in world frame
"""
if len(self.trajectory) > 1:
(xi, yi) = self.trajectory[self.trajectory_index]
(xf, yf) = self.trajectory[self.trajectory_index + 1]
(x_trajectory_frame, y_trajectory_frame) = self.from_world_frame_to_trajectory_frame(x, y)
(xf_trajectory_frame, yf_trajectory_frame) = self.from_world_frame_to_trajectory_frame(xf, yf)
"""Check to make the transformatio is ok, y shoud be 0 a the frame is place on the x axis"""
if y_trajectory_frame > 0.0001:
print("problème in move_on_trajectory y = %.2f", y_trajectory_frame)
"Variation"
self.distance += delta
x_trajectory_frame += delta
if x_trajectory_frame > xf_trajectory_frame:
"On the next segment"
if self.trajectory_index < len(self.trajectory) - 2:
"Changing to next segment"
self.trajectory_index += 1
delta_new = (x_trajectory_frame - xf_trajectory_frame)
return self.move_on_trajectory(xf, yf, delta_new)
else:
"Reaching the end point"
(self.distance, y) = self.compute_distance_for_point_x_y(xf, yf, self.trajectory_index)
return (xf, yf)
elif x_trajectory_frame < 0:
"On the previous segment"
if self.trajectory_index > 0:
"Changing to previous segment"
self.trajectory_index -= 1
delta_new = x_trajectory_frame
return self.move_on_trajectory(xi, yi, delta_new)
else:
"Reaching start point"
self.distance = 0
return (xi, yi)
else:
"The delta is on the same segment"
return self.from_trajectory_frame_to_world_frame(x_trajectory_frame, y_trajectory_frame)
else:
return x, y
def find_all_intersection(self, line):
all_possible_intersection = []
for index in range(len(self.trajectory) - 1):
(xi, yi) = self.trajectory[index]
(xf, yf) = self.trajectory[index + 1]
segment = Line(xi, yi, xf, yf)
x_intersection, y_intersection = segment.find_intersection_btw_two_line(line)
x_intersection_in_trajecotry_frame, y_intersection_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
x_intersection, y_intersection, index)
xf_in_trajectory_frame, yf_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
xf, yf, index)
if y_intersection_in_trajectory_frame > 0.001:
print(y_intersection)
print("problème")
elif 0 < x_intersection_in_trajecotry_frame < xf_in_trajectory_frame:
all_possible_intersection.append((x_intersection, y_intersection, index))
return all_possible_intersection
def find_closest_intersection(self, line, index):
if index < 0:
return (0, 0, 0)
elif index >= len(self.trajectory) - 1:
return (self.trajectory[-1][0], self.trajectory[-1][1], len(self.trajectory) - 1)
else:
(xi, yi) = self.trajectory[index]
(xf, yf) = self.trajectory[index + 1]
segment = Line(xi, yi, xf, yf)
x_intersection, y_intersection = segment.find_intersection_btw_two_line(line)
x_intersection_in_trajecotry_frame, y_intersection_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
x_intersection, y_intersection, index)
xf_in_trajectory_frame, yf_in_trajectory_frame = self.from_world_frame_to_trajectory_frame_for_a_given_segment(
xf, yf, index)
if y_intersection_in_trajectory_frame > 0.001:
print("problème in find closest intersection")
return (None, None, None)
elif x_intersection_in_trajecotry_frame > xf_in_trajectory_frame or x_intersection is None:
return self.find_closest_intersection(line, index + 1)
elif x_intersection_in_trajecotry_frame < xi:
return self.find_closest_intersection(line, index - 1)
else:
return (x_intersection, y_intersection, index)
def get_angle(self):
(xi, yi) = self.trajectory[self.trajectory_index]
(xf, yf) = self.trajectory[self.trajectory_index + 1]
return math.atan2(yf - yi, xf - xi)
def rotate_angle(self, angle, x, y):
x_rotate = math.cos(angle) * x + math.sin(angle) * y
y_rotate = -math.sin(angle) * x + math.cos(angle) * y
return (x_rotate, y_rotate)
def from_world_frame_to_trajectory_frame(self, x, y):
(xi, yi) = self.trajectory[self.trajectory_index]
angle = self.get_angle()
x_no_offset = x - xi
y_no_offset = y - yi
return self.rotate_angle(angle, x_no_offset, y_no_offset)
def compute_distance_for_point_x_y(self, x, y, i_index):
sum = 0
for n in range(i_index):
(xi, yi) = self.trajectory[n]
(xf, yf) = self.trajectory[n + 1]
d = distance_btw_two_point(xi, yi, xf, yf)
sum += d
(xi, yi) = self.trajectory[i_index]
d = distance_btw_two_point(xi, yi, x, y)
sum += d
return sum, 0
def from_world_frame_to_trajectory_frame_for_a_given_segment(self, x, y, index):
(xi, yi) = self.trajectory[index]
(xf, yf) = self.trajectory[self.trajectory_index + 1]
angle = math.atan2(yf - yi, xf - xi)
x_no_offset = x - xi
y_no_offset = y - yi
return self.rotate_angle(angle, x_no_offset, y_no_offset)
def from_trajectory_frame_to_world_frame(self, x, y):
(xi, yi) = self.trajectory[self.trajectory_index]
angle = self.get_angle()
(x_rotate, y_rotate) = self.rotate_angle(-angle, x, y)
return (x_rotate + xi, y_rotate + yi)
def __str__(self):
return str(self.trajectory)
if __name__ == "__main__":
camera = MobileCameraRepresentation(0, 1, 1, 1, 1, 5, MobileCameraType.FIX, TrajectoryPlaner([]))
print(camera.attributes_to_string())
print(camera.save_to_txt())
camera = MobileCamera(delta_beta=20)
print(camera.attributes_to_string())
s = camera.save_to_txt()
print(s)
| [
"src.my_utils.my_math.line.Line",
"src.constants.get_time",
"random.uniform",
"src.my_utils.my_math.bound.bound_angle_btw_minus_pi_plus_pi",
"src.multi_agent.elements.camera.CameraRepresentation.__init__",
"src.my_utils.my_math.line.distance_btw_two_point",
"math.degrees",
"math.radians",
"src.multi... | [((1391, 1476), 'src.multi_agent.elements.camera.CameraRepresentation.__init__', 'CameraRepresentation.__init__', (['self', 'id', 'xc', 'yc', 'alpha', 'beta', 'field_depth', 'color'], {}), '(self, id, xc, yc, alpha, beta, field_depth, color\n )\n', (1420, 1476), False, 'from src.multi_agent.elements.camera import Camera, CameraRepresentation\n'), ((2268, 2353), 'src.multi_agent.elements.camera.Camera.__init__', 'Camera.__init__', (['self', 'id', 'xc', 'yc', 'alpha', 'beta', 'field_depth', 'color', 't_add', 't_del'], {}), '(self, id, xc, yc, alpha, beta, field_depth, color, t_add, t_del\n )\n', (2283, 2353), False, 'from src.multi_agent.elements.camera import Camera, CameraRepresentation\n'), ((3989, 4009), 'src.constants.get_time', 'constants.get_time', ([], {}), '()\n', (4007, 4009), False, 'from src import constants\n'), ((5872, 5902), 'math.radians', 'math.radians', (['self.v_alpha_min'], {}), '(self.v_alpha_min)\n', (5884, 5902), False, 'import math\n'), ((5930, 5960), 'math.radians', 'math.radians', (['self.v_alpha_max'], {}), '(self.v_alpha_max)\n', (5942, 5960), False, 'import math\n'), ((5987, 6016), 'math.radians', 'math.radians', (['self.v_beta_min'], {}), '(self.v_beta_min)\n', (5999, 6016), False, 'import math\n'), ((6043, 6072), 'math.radians', 'math.radians', (['self.v_beta_max'], {}), '(self.v_beta_max)\n', (6055, 6072), False, 'import math\n'), ((6587, 6617), 'math.degrees', 'math.degrees', (['self.v_alpha_min'], {}), '(self.v_alpha_min)\n', (6599, 6617), False, 'import math\n'), ((6645, 6675), 'math.degrees', 'math.degrees', (['self.v_alpha_max'], {}), '(self.v_alpha_max)\n', (6657, 6675), False, 'import math\n'), ((6702, 6731), 'math.degrees', 'math.degrees', (['self.v_beta_min'], {}), '(self.v_beta_min)\n', (6714, 6731), False, 'import math\n'), ((6758, 6787), 'math.degrees', 'math.degrees', (['self.v_beta_max'], {}), '(self.v_beta_max)\n', (6770, 6787), False, 'import math\n'), ((7520, 7554), 'random.uniform', 'random.uniform', (['bound[0]', 'bound[1]'], {}), '(bound[0], bound[1])\n', (7534, 7554), False, 'import random\n'), ((10700, 10850), 'src.my_utils.my_math.bound.bound', 'bound', (['field_depth', '(constants.AGENT_CAMERA_FIELD_MIN * self.default_field_depth)', '(constants.AGENT_CAMERA_FIELD_MAX * self.default_field_depth)'], {}), '(field_depth, constants.AGENT_CAMERA_FIELD_MIN * self.\n default_field_depth, constants.AGENT_CAMERA_FIELD_MAX * self.\n default_field_depth)\n', (10705, 10850), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((11745, 11759), 'numpy.sign', 'np.sign', (['speed'], {}), '(speed)\n', (11752, 11759), True, 'import numpy as np\n'), ((14270, 14286), 'numpy.sign', 'np.sign', (['speed_x'], {}), '(speed_x)\n', (14277, 14286), True, 'import numpy as np\n'), ((14304, 14320), 'numpy.sign', 'np.sign', (['speed_y'], {}), '(speed_y)\n', (14311, 14320), True, 'import numpy as np\n'), ((15081, 15121), 'src.my_utils.my_math.bound.bound', 'bound', (['self.xc', 'self.xc_min', 'self.xc_max'], {}), '(self.xc, self.xc_min, self.xc_max)\n', (15086, 15121), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((15140, 15180), 'src.my_utils.my_math.bound.bound', 'bound', (['self.yc', 'self.yc_min', 'self.yc_max'], {}), '(self.yc, self.yc_min, self.yc_max)\n', (15145, 15180), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((22755, 22783), 'math.atan2', 'math.atan2', (['(yf - yi)', '(xf - xi)'], {}), '(yf - yi, xf - xi)\n', (22765, 22783), False, 'import math\n'), ((23591, 23627), 'src.my_utils.my_math.line.distance_btw_two_point', 'distance_btw_two_point', (['xi', 'yi', 'x', 'y'], {}), '(xi, yi, x, y)\n', (23613, 23627), False, 'from src.my_utils.my_math.line import distance_btw_two_point, Line\n'), ((23873, 23901), 'math.atan2', 'math.atan2', (['(yf - yi)', '(xf - xi)'], {}), '(yf - yi, xf - xi)\n', (23883, 23901), False, 'import math\n'), ((4242, 4287), 'random.uniform', 'random.uniform', (['(0)', 'constants.ROOM_DIMENSION_X'], {}), '(0, constants.ROOM_DIMENSION_X)\n', (4256, 4287), False, 'import random\n'), ((4343, 4388), 'random.uniform', 'random.uniform', (['(0)', 'constants.ROOM_DIMENSION_Y'], {}), '(0, constants.ROOM_DIMENSION_Y)\n', (4357, 4388), False, 'import random\n'), ((5128, 5189), 'src.my_utils.my_math.bound.bound_angle_btw_minus_pi_plus_pi', 'bound_angle_btw_minus_pi_plus_pi', (['(self.beta - self.delta_beta)'], {}), '(self.beta - self.delta_beta)\n', (5160, 5189), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((5218, 5279), 'src.my_utils.my_math.bound.bound_angle_btw_minus_pi_plus_pi', 'bound_angle_btw_minus_pi_plus_pi', (['(self.beta + self.delta_beta)'], {}), '(self.beta + self.delta_beta)\n', (5250, 5279), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((5626, 5655), 'math.radians', 'math.radians', (['self.delta_beta'], {}), '(self.delta_beta)\n', (5638, 5655), False, 'import math\n'), ((5722, 5749), 'math.radians', 'math.radians', (['self.beta_min'], {}), '(self.beta_min)\n', (5734, 5749), False, 'import math\n'), ((5816, 5843), 'math.radians', 'math.radians', (['self.beta_max'], {}), '(self.beta_max)\n', (5828, 5843), False, 'import math\n'), ((6341, 6370), 'math.degrees', 'math.degrees', (['self.delta_beta'], {}), '(self.delta_beta)\n', (6353, 6370), False, 'import math\n'), ((6437, 6464), 'math.degrees', 'math.degrees', (['self.beta_min'], {}), '(self.beta_min)\n', (6449, 6464), False, 'import math\n'), ((6531, 6558), 'math.degrees', 'math.degrees', (['self.beta_max'], {}), '(self.beta_max)\n', (6543, 6558), False, 'import math\n'), ((12695, 12787), 'src.my_utils.my_math.bound.bound', 'bound', (['self.std_measurement_error_position', '(0)', '(self.std_measurement_error_position * 10)'], {}), '(self.std_measurement_error_position, 0, self.\n std_measurement_error_position * 10)\n', (12700, 12787), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((12886, 12971), 'src.my_utils.my_math.bound.bound', 'bound', (['self.std_measurement_error_speed', '(0)', '(self.std_measurement_error_speed * 10)'], {}), '(self.std_measurement_error_speed, 0, self.std_measurement_error_speed *\n 10)\n', (12891, 12971), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((13075, 13175), 'src.my_utils.my_math.bound.bound', 'bound', (['self.std_measurement_error_acceleration', '(0)', '(self.std_measurement_error_acceleration * 10)'], {}), '(self.std_measurement_error_acceleration, 0, self.\n std_measurement_error_acceleration * 10)\n', (13080, 13175), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((13586, 13600), 'numpy.sign', 'np.sign', (['speed'], {}), '(speed)\n', (13593, 13600), True, 'import numpy as np\n'), ((13844, 13888), 'src.my_utils.my_math.bound.bound_angle_btw_minus_pi_plus_pi', 'bound_angle_btw_minus_pi_plus_pi', (['self.alpha'], {}), '(self.alpha)\n', (13876, 13888), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((20376, 20396), 'src.my_utils.my_math.line.Line', 'Line', (['xi', 'yi', 'xf', 'yf'], {}), '(xi, yi, xf, yf)\n', (20380, 20396), False, 'from src.my_utils.my_math.line import distance_btw_two_point, Line\n'), ((23474, 23512), 'src.my_utils.my_math.line.distance_btw_two_point', 'distance_btw_two_point', (['xi', 'yi', 'xf', 'yf'], {}), '(xi, yi, xf, yf)\n', (23496, 23512), False, 'from src.my_utils.my_math.line import distance_btw_two_point, Line\n'), ((12085, 12131), 'src.my_utils.my_math.bound.bound', 'bound', (['self.beta', 'self.beta_min', 'self.beta_max'], {}), '(self.beta, self.beta_min, self.beta_max)\n', (12090, 12131), False, 'from src.my_utils.my_math.bound import bound_angle_btw_minus_pi_plus_pi, bound\n'), ((15992, 16011), 'math.cos', 'math.cos', (['angle_min'], {}), '(angle_min)\n', (16000, 16011), False, 'import math\n'), ((16032, 16051), 'math.sin', 'math.sin', (['angle_min'], {}), '(angle_min)\n', (16040, 16051), False, 'import math\n'), ((16092, 16111), 'math.sin', 'math.sin', (['angle_max'], {}), '(angle_max)\n', (16100, 16111), False, 'import math\n'), ((16132, 16151), 'math.sin', 'math.sin', (['angle_max'], {}), '(angle_max)\n', (16140, 16151), False, 'import math\n'), ((21573, 21593), 'src.my_utils.my_math.line.Line', 'Line', (['xi', 'yi', 'xf', 'yf'], {}), '(xi, yi, xf, yf)\n', (21577, 21593), False, 'from src.my_utils.my_math.line import distance_btw_two_point, Line\n'), ((22845, 22860), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (22853, 22860), False, 'import math\n'), ((22867, 22882), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (22875, 22882), False, 'import math\n'), ((22929, 22944), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (22937, 22944), False, 'import math\n'), ((22907, 22922), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (22915, 22922), False, 'import math\n'), ((14439, 14457), 'math.fabs', 'math.fabs', (['speed_x'], {}), '(speed_x)\n', (14448, 14457), False, 'import math\n'), ((14613, 14631), 'math.fabs', 'math.fabs', (['speed_y'], {}), '(speed_y)\n', (14622, 14631), False, 'import math\n'), ((11943, 11959), 'math.fabs', 'math.fabs', (['speed'], {}), '(speed)\n', (11952, 11959), False, 'import math\n'), ((13728, 13744), 'math.fabs', 'math.fabs', (['speed'], {}), '(speed)\n', (13737, 13744), False, 'import math\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Code accompanying the manuscript:
"Reinterpreting the relationship between number of species and
number of links connects community structure and stability"
-------
v1.0.0 (First release)
-------
For any question or comment, please contact:
<NAME>(1), <EMAIL>
(1) University of Namur, Namur, Rue de Bruxelles 61, Belgium.
Research Unit in Environmental and Evolutionary Biology (URBE);
Institute of Life-Earth-Environment, Namur;
Center for Complex Systems, Namur.
"""
import numpy as np
from scipy.stats import linregress
from collections import namedtuple
##############################################################################
# 'Public' functions
##############################################################################
# Decompositions (in-silico extinction experiments)
###################################################
def experiment(mat, nbsimu, independent = False):
"""
Simulates the network decompositions (in-silico extinction experiments)
Parameters
----------
mat : numpy array of shape (S,S), S being the number of species
Adjacency matrix of the network.
nbsimu : integer
Number of simulations (decompositions) to perform.
independent : bool
Should the species having no incoming links be considered as
independent (i.e. not undergo secondary extinction)?
Returns
-------
S: integer
Number of species in the network
L: integer
Number of links (edges) in the network (excluding cannibalism)
b: float
Shape of the L~S relationship defined as log(L)/log(S/2)
z: float
Proportion of basal species
sseq: numpy array of shape (nbsimu, S)
Number of species along each decomposition
(one row is one whole decomposition of the network)
lseq: numpy array of shape (nbsimu, S)
Number of links along each decomposition
(one row is one whole decomposition of the network)
"""
#-------- Network structure --------
mat = np.tril(mat != 0) # Binary matrix of incoming links (low triangle)
S = mat.shape[0] # Number of species
L = np.sum(np.tril(mat, k = -1)) # Number of edges
b = np.log(L)/np.log(0.5*S) # Shape of the L~S relationship
#-------- Wanted matrix and Independent species --------
mat = trophorder(mat) # Order the species by "trophic" level in the matrix
z = sum(np.sum(mat, axis=1)==0) # Number of independent species
# Independent sp. are considered as sp. having no incoming links
#-------- Initial values for each simulation --------
# Number of species
sseq = [nbsimu*[S]] # All decompositions start with all species
# Number of links
lseq = [nbsimu*[L]] # All decompositions start with all links
# Adjacency matrix
adj = np.array(nbsimu*[mat]) # All decompositions start with the same adj
# Presence/Absence of species
species = np.ones((nbsimu, S)) # At the start, all species are present
#-------- Network decompositions --------
while np.sum(adj)!= 0 : # Simulations run until there is no link left
### Random species removal ###
species = removal(species, nbsimu) # One species removed in each simulation
adj = cancel(adj, species) # Update of the adjacency matrices
### Secondary extinctions ###
species = extinction(species, adj, z, independent)
# Update of the presence/absence of each species in each simulation
### Save decomposition sequence ###
sseq.append(np.sum(species, axis=1)) # Storage of the number of species
lseq.append(np.sum(np.sum(
np.tril(cancel(adj, species)), axis=2), axis=1))
# Storage of the number of links
sseq = np.array(sseq).T # Each row is a simulation, each column is a step
lseq = np.array(lseq).T # Each row is a simulation, each column is a step
result = namedtuple('experes', ('S', 'L', 'b', 'z', 'sseq', 'lseq'))
if not independent :
z = 0
return(result(S, L, b, z/S, sseq, lseq))
# R2 of the L~S relationship prediction
########################################
def R2L(sseq, lseq, b):
"""
Computes the coefficient of determination of the regression and the b-value based
approaches to predict the number of links along the network decomposition.
Parameters
----------
sseq : numpy array of shape (nbsimu, S) with nbsimu being the number of network decompositions done.
Each row of this array contains the species richness along one decomposition.
lseq : numpy array of shape (nbsimu, S) with nbsimu being the number of network decompositions done.
Each row of this array contains the number of links along one decomposition.
b : float
Shape of the links~species relationship given by b = log(L)/log(S/2).
Returns
-------
breg : float
Estimation of b (from L = a * S^b) based on log-log regression
areg : float
Estimation of a (from L = a * S^b) based on log-log regression
r2reg : float
R2 of the predictions of L based on the log(L)~log(S) regression
r2b : float
R2 of the predictions of L based on the log(L) = b*log(S/2) equation
"""
#### Log-log regression-based approach ####
lseqforl = lseq[(sseq!=0) & (lseq!=0)] # Non-zero value only in log space
sseqforl = sseq[(sseq!=0) & (lseq!=0)] # Non-zero value only in log space
paramL = linregress(np.log(sseqforl).flat, np.log(lseqforl).flat) # Regression
#### b-value based approach ####
obs = np.log(lseqforl) # Observed values
pred = b*np.log(sseqforl/2) # Predicted values
SSres = np.sum((obs-pred)**2) # Residual sum of squares
SStot = np.sum((obs-np.mean(obs))**2) # Total sum of squares
r2Lb = 1 - (SSres/SStot) # Coefficient of determination
result = namedtuple('R2Lres', ('breg','areg','r2reg', 'r2b'))
return(result(paramL.slope, np.exp(paramL.intercept),
paramL.rvalue, r2Lb))
##############################################################################
# 'Private' functions
##############################################################################
def trophorder(mat):
"""
Order the matrix based on "trophic level": the species having no incoming
links are in the first rows/columns; the species having the highest number
of incoming links is in the last row.
Parameters
----------
mat : numpy array of shape (S, S)
the binary matrix containing ones (if there is an incoming link) and zeros otherwise.
Returns
-------
Numpy array of shape (S, S) containing the matrix 'mat' ordered.
"""
#------- Getting each species trophic level -------
S = mat.shape[0] # Number of species
level = np.repeat(True, S)
troph = np.zeros((S,)) # Each species starts at the level 0 (i.e. basal)
for l in np.arange(S):
troph += mat[:,level].sum(1) != 0 # Do species feed on the lower level?
level = troph > l
# If a sp. feeds on lower level, it might belong to the next level
#------- Ordering based on trophic level -------
to_order = troph.argsort() # Order by the level
newmat = np.full_like(mat, 0) # New matrix to fill in
for sp1 in np.arange(mat.shape[0]): # For each row
for sp2 in np.arange(mat.shape[0]): # For each columns
newmat[sp1,sp2] = mat[to_order[sp1],to_order[sp2]]
return(newmat)
def removal(species, nbsimu):
"""
Removes one extant species from the network for each simulation (decomposition).
Parameters
----------
species : numpy array of shape (nbsimu, S) with nbsimu being the number of simulations (decompositions).
This array contains the information about the presence (1) or
absence (0) of each species (columns) in each simulation (rows).
nbsimu : integer
Number of simulations to perform.
Returns
-------
Numpy array of shape (nbsimu, S) with one species removed per simulation.
"""
for n in range(nbsimu): # For each simulation
extant = np.where(species[n,:] != 0)[0] # Localise extant species
if len(extant) != 0: # If there is still at leats one species left
# Random species switch from present to absent
toremove = np.random.permutation(extant)[0] # Draw one random sp.
species[n, toremove] = 0 # Removal of the drawn species.
return(species)
def cancel(adj, species):
"""
Removes links that are in- or outgoing from species that are not
in the network anymore.
Parameters
----------
adj : numpy array of size (S,S) with S being the species richness
Adjacency matrix.
species : numpy array of shape (nbsimu, S) with nbsimu being the number of simulations (decompositions).
This array contains the information about the presence (1) or
absence (0) of each species (columns) in each simulation (rows).
Returns
-------
Numpy array of shape (S,S) corresponding to the adjacency matrix
without the links that needed to be removed.
"""
# Row i full of 0 if the species i is extinct and full of 1 otherwise
cancelrow = np.repeat(species, species.shape[1],axis=1).reshape(*adj.shape)
# Column j full of 0 if the species j is extinct and full of 1 otherwise
cancelcol = np.repeat(species, species.shape[1],axis=0).reshape(*adj.shape)
adj = cancelcol * cancelrow * adj
return(adj)
def extinction(species, adj, z, independent):
"""
Returns the presence/absence of each species after taking into account
the secondary extinctions.
Parameters
----------
species : numpy array of shape (nbsimu, S) with nbsimu being the number
of simulations (decompositions). This array contains the information
about the presence (1) or absence (0) of each species (columns) in
each simulation (rows).
adj : numpy array of size (S,S) with S being the species richness
Adjacency matrix.
z : float
Number of species which might not undergo secondary extinction.
independent : bool
Should the species having no incoming links be considered as
independent (i.e. not undergo secondary extinction)?
Returns
-------
Numpy array of shape (nbsimu, S) containing, for each decomposition (row),
the presence (1) or absence (0) of each species (columns).
"""
#-------- Extinction of dependent species --------
# Basic rule for dependent species :
# they need to be linked to another species to be part of the network
left = np.sum(adj, axis = 2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
# Extinction cascade through trophic levels
while np.sum(species[:,z:] != Psurvival) != 0 :
### Extinction(s) ###
# Removal of non surviving species
species[:,z:] = (species[:,z:])*Psurvival
# Removal of non surviving links (i.e. links of the extinct species)
adj = cancel(adj, species)
### Check for higher order extinctions ###
left = np.sum(adj, axis=2)[:,z:] # Number of neighbours left
Psurvival = (left > 0).astype(int) # Survival if at least 1 neighbour
#-------- Extinction of independent species --------
if independent==False: # If there is no independent species
# Species having no incoming link undergo secondary extinction
interact = np.sum(cancel(adj, species),axis=1)[:, :z] # Outgoing links
(species[:,:z])[interact == 0] = 0 # Removed if no outgoing links left
return(species)
| [
"numpy.mean",
"collections.namedtuple",
"numpy.repeat",
"numpy.ones",
"numpy.full_like",
"numpy.where",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.zeros",
"numpy.tril",
"numpy.arange",
"numpy.random.permutation"
] | [((2145, 2162), 'numpy.tril', 'np.tril', (['(mat != 0)'], {}), '(mat != 0)\n', (2152, 2162), True, 'import numpy as np\n'), ((2929, 2953), 'numpy.array', 'np.array', (['(nbsimu * [mat])'], {}), '(nbsimu * [mat])\n', (2937, 2953), True, 'import numpy as np\n'), ((3045, 3065), 'numpy.ones', 'np.ones', (['(nbsimu, S)'], {}), '((nbsimu, S))\n', (3052, 3065), True, 'import numpy as np\n'), ((4068, 4127), 'collections.namedtuple', 'namedtuple', (['"""experes"""', "('S', 'L', 'b', 'z', 'sseq', 'lseq')"], {}), "('experes', ('S', 'L', 'b', 'z', 'sseq', 'lseq'))\n", (4078, 4127), False, 'from collections import namedtuple\n'), ((5796, 5812), 'numpy.log', 'np.log', (['lseqforl'], {}), '(lseqforl)\n', (5802, 5812), True, 'import numpy as np\n'), ((5894, 5919), 'numpy.sum', 'np.sum', (['((obs - pred) ** 2)'], {}), '((obs - pred) ** 2)\n', (5900, 5919), True, 'import numpy as np\n'), ((6085, 6139), 'collections.namedtuple', 'namedtuple', (['"""R2Lres"""', "('breg', 'areg', 'r2reg', 'r2b')"], {}), "('R2Lres', ('breg', 'areg', 'r2reg', 'r2b'))\n", (6095, 6139), False, 'from collections import namedtuple\n'), ((7039, 7057), 'numpy.repeat', 'np.repeat', (['(True)', 'S'], {}), '(True, S)\n', (7048, 7057), True, 'import numpy as np\n'), ((7070, 7084), 'numpy.zeros', 'np.zeros', (['(S,)'], {}), '((S,))\n', (7078, 7084), True, 'import numpy as np\n'), ((7148, 7160), 'numpy.arange', 'np.arange', (['S'], {}), '(S)\n', (7157, 7160), True, 'import numpy as np\n'), ((7462, 7482), 'numpy.full_like', 'np.full_like', (['mat', '(0)'], {}), '(mat, 0)\n', (7474, 7482), True, 'import numpy as np\n'), ((7522, 7545), 'numpy.arange', 'np.arange', (['mat.shape[0]'], {}), '(mat.shape[0])\n', (7531, 7545), True, 'import numpy as np\n'), ((2268, 2286), 'numpy.tril', 'np.tril', (['mat'], {'k': '(-1)'}), '(mat, k=-1)\n', (2275, 2286), True, 'import numpy as np\n'), ((2316, 2325), 'numpy.log', 'np.log', (['L'], {}), '(L)\n', (2322, 2325), True, 'import numpy as np\n'), ((2326, 2341), 'numpy.log', 'np.log', (['(0.5 * S)'], {}), '(0.5 * S)\n', (2332, 2341), True, 'import numpy as np\n'), ((3173, 3184), 'numpy.sum', 'np.sum', (['adj'], {}), '(adj)\n', (3179, 3184), True, 'import numpy as np\n'), ((3910, 3924), 'numpy.array', 'np.array', (['sseq'], {}), '(sseq)\n', (3918, 3924), True, 'import numpy as np\n'), ((3988, 4002), 'numpy.array', 'np.array', (['lseq'], {}), '(lseq)\n', (3996, 4002), True, 'import numpy as np\n'), ((5844, 5864), 'numpy.log', 'np.log', (['(sseqforl / 2)'], {}), '(sseqforl / 2)\n', (5850, 5864), True, 'import numpy as np\n'), ((6170, 6194), 'numpy.exp', 'np.exp', (['paramL.intercept'], {}), '(paramL.intercept)\n', (6176, 6194), True, 'import numpy as np\n'), ((7585, 7608), 'numpy.arange', 'np.arange', (['mat.shape[0]'], {}), '(mat.shape[0])\n', (7594, 7608), True, 'import numpy as np\n'), ((10948, 10967), 'numpy.sum', 'np.sum', (['adj'], {'axis': '(2)'}), '(adj, axis=2)\n', (10954, 10967), True, 'import numpy as np\n'), ((11137, 11172), 'numpy.sum', 'np.sum', (['(species[:, z:] != Psurvival)'], {}), '(species[:, z:] != Psurvival)\n', (11143, 11172), True, 'import numpy as np\n'), ((2527, 2546), 'numpy.sum', 'np.sum', (['mat'], {'axis': '(1)'}), '(mat, axis=1)\n', (2533, 2546), True, 'import numpy as np\n'), ((3693, 3716), 'numpy.sum', 'np.sum', (['species'], {'axis': '(1)'}), '(species, axis=1)\n', (3699, 3716), True, 'import numpy as np\n'), ((5685, 5701), 'numpy.log', 'np.log', (['sseqforl'], {}), '(sseqforl)\n', (5691, 5701), True, 'import numpy as np\n'), ((5708, 5724), 'numpy.log', 'np.log', (['lseqforl'], {}), '(lseqforl)\n', (5714, 5724), True, 'import numpy as np\n'), ((8376, 8404), 'numpy.where', 'np.where', (['(species[n, :] != 0)'], {}), '(species[n, :] != 0)\n', (8384, 8404), True, 'import numpy as np\n'), ((9508, 9552), 'numpy.repeat', 'np.repeat', (['species', 'species.shape[1]'], {'axis': '(1)'}), '(species, species.shape[1], axis=1)\n', (9517, 9552), True, 'import numpy as np\n'), ((9665, 9709), 'numpy.repeat', 'np.repeat', (['species', 'species.shape[1]'], {'axis': '(0)'}), '(species, species.shape[1], axis=0)\n', (9674, 9709), True, 'import numpy as np\n'), ((11499, 11518), 'numpy.sum', 'np.sum', (['adj'], {'axis': '(2)'}), '(adj, axis=2)\n', (11505, 11518), True, 'import numpy as np\n'), ((5966, 5978), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (5973, 5978), True, 'import numpy as np\n'), ((8590, 8619), 'numpy.random.permutation', 'np.random.permutation', (['extant'], {}), '(extant)\n', (8611, 8619), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
import librosa
import argparse
import time
def main():
parser = argparse.ArgumentParser(
prog = 'The Noise Reduction (Spectral Subtraction)',
usage = 'シンプルなスペクトルサブトラクションで, ノイズを軽減します.',
description = 'python3 NoiseReduction.py -i [Input Filename] -o [Output Filename] -s [Noise start time(sec)] -f [Noise finish time(sec)]',
epilog = 'MIT Licensce',
add_help=True
)
parser.add_argument('-i', '--input', help = 'Input Filename', required = True)
parser.add_argument('-o', '--output', help = 'Output Filename (wav)', required = True)
parser.add_argument('-s', '--start', help = 'Cut Noise Sound (Start Time [sec])', required = True)
parser.add_argument('-f', '--finish', help = 'Cut Noise Sound (Finish Time [sec])', required = True)
args = parser.parse_args()
# 処理開始時間
process_start = time.time()
# 音声ファイル読込
print('[Do] 音声ファイル読込 Filename: ', args.input)
data, sr = librosa.load(args.input)
print('[Done] 音声ファイル読込 Total Time: ', len(data) / sr, ' [sec]')
print('[Do] ノイズ軽減 計算')
# short-time Fourier transfor (STFT)
# (n_fft = 2048, hop_length = win_length(=n_fft) / 4, window = 'hann')
# D: np.ndarray [shape=(1+n_fft / 2, t) T = t * hop_length])
S = np.abs(librosa.stft(data))
# Convert a power spectrogram to decibel(dB)
D = librosa.power_to_db(S**2)
# Calc Noise FrameRate
_n_fft = 2048
_hop_length = _n_fft / 4
noise_start = int(_hop_length * float(args.start))
noise_finish = int(_hop_length * float(args.finish))
# Noise Copy and calc Average powers
noise_D = D[:, noise_start : noise_finish]
noise_Ave = np.average(noise_D, axis = 1)
# Calc Spectral Subtraction
D = D.transpose()
SS = D - noise_Ave
SS = SS.transpose()
# Convert decibel to power spectrogram
SSP = librosa.db_to_power(SS)
# Inverse short-time Fourier transfor(ISTFT)
OutputS = librosa.istft(SSP)
# 正規化(normalize)
OutputS = librosa.util.normalize(OutputS)
print('[Done] ノイズ軽減 計算')
print('[Do] ノイズ軽減ファイル出力 : ', args.output)
# Output File (WAV)
librosa.output.write_wav(args.output, OutputS, sr)
# 処理時間計算
process_finish = time.time()
process_time = process_finish - process_start
print('[Done] ノイズ軽減ファイル出力 処理時間 : ', process_time, ' [sec]')
if __name__ == '__main__':
main() | [
"librosa.istft",
"librosa.db_to_power",
"argparse.ArgumentParser",
"numpy.average",
"librosa.output.write_wav",
"librosa.power_to_db",
"librosa.stft",
"time.time",
"librosa.util.normalize",
"librosa.load"
] | [((115, 419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""The Noise Reduction (Spectral Subtraction)"""', 'usage': '"""シンプルなスペクトルサブトラクションで, ノイズを軽減します."""', 'description': '"""python3 NoiseReduction.py -i [Input Filename] -o [Output Filename] -s [Noise start time(sec)] -f [Noise finish time(sec)]"""', 'epilog': '"""MIT Licensce"""', 'add_help': '(True)'}), "(prog='The Noise Reduction (Spectral Subtraction)',\n usage='シンプルなスペクトルサブトラクションで, ノイズを軽減します.', description=\n 'python3 NoiseReduction.py -i [Input Filename] -o [Output Filename] -s [Noise start time(sec)] -f [Noise finish time(sec)]'\n , epilog='MIT Licensce', add_help=True)\n", (138, 419), False, 'import argparse\n'), ((928, 939), 'time.time', 'time.time', ([], {}), '()\n', (937, 939), False, 'import time\n'), ((1026, 1050), 'librosa.load', 'librosa.load', (['args.input'], {}), '(args.input)\n', (1038, 1050), False, 'import librosa\n'), ((1435, 1462), 'librosa.power_to_db', 'librosa.power_to_db', (['(S ** 2)'], {}), '(S ** 2)\n', (1454, 1462), False, 'import librosa\n'), ((1763, 1790), 'numpy.average', 'np.average', (['noise_D'], {'axis': '(1)'}), '(noise_D, axis=1)\n', (1773, 1790), True, 'import numpy as np\n'), ((1957, 1980), 'librosa.db_to_power', 'librosa.db_to_power', (['SS'], {}), '(SS)\n', (1976, 1980), False, 'import librosa\n'), ((2052, 2070), 'librosa.istft', 'librosa.istft', (['SSP'], {}), '(SSP)\n', (2065, 2070), False, 'import librosa\n'), ((2110, 2141), 'librosa.util.normalize', 'librosa.util.normalize', (['OutputS'], {}), '(OutputS)\n', (2132, 2141), False, 'import librosa\n'), ((2253, 2303), 'librosa.output.write_wav', 'librosa.output.write_wav', (['args.output', 'OutputS', 'sr'], {}), '(args.output, OutputS, sr)\n', (2277, 2303), False, 'import librosa\n'), ((2342, 2353), 'time.time', 'time.time', ([], {}), '()\n', (2351, 2353), False, 'import time\n'), ((1354, 1372), 'librosa.stft', 'librosa.stft', (['data'], {}), '(data)\n', (1366, 1372), False, 'import librosa\n')] |
import json
import re
import ast
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold, cross_val_score
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, log_loss
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import r2_score
from sklearn.model_selection import cross_val_score
people_path = pd.read_csv("../data/processed/people_transformation/people_cast_list.csv")
## 1.Dataset Builder
def convert_output_id(output):
data = []
for i in range(0, len(output)):
if isinstance(output[i], dict):
if len(output[i]["movie_results"]) >= 1:
data.append(output[i]["movie_results"][0]["id"])
return data
def get_transformed_json(path):
id_remove = []
json_final = []
with open(path) as json_file:
data = json.load(json_file)
for i in range(0, len(data)):
if data[i] == "<class 'requests.exceptions.ReadTimeout'>":
id_remove.append(i)
elif data[i] == "<class 'requests.exceptions.ConnectionError'>":
id_remove.append(i)
else:
json_final.append(data[i])
return json_final
## 2.1 Pre_transformation
def from_json_to_array(df, column, regex):
df[column] = df[column].apply(lambda x: re.findall(rf"{regex}", str(x)))
def split_credits_column(df):
df["cast"] = df["credits"].apply(lambda x: string_to_dictionary(x, "cast"))
df["crew"] = df["credits"].apply(lambda x: string_to_dictionary(x, "crew"))
df.drop("credits", axis=1, inplace=True)
## 2.2 People Pre Pre_transformation
def unique_values(df_list):
ids_list = [re.findall(r'\b\d+\b', value) for value in df_list]
return set(flatten(ids_list))
## 4 Data Wrangling
def create_new_columns(df, column):
value_list = []
for cell in df[column]:
lista_genres = re.findall(r'\b\w+\b', cell)
for value in lista_genres:
value_list.append(value)
v = get_value_counts(value_list)
columns_to_zero(df, v, column)
validate_column(df, column)
def get_average_people(df, df_list, year):
ids_list = [re.findall(r'\b\d+\b', value) for value in df_list]
for i in range(len(df_list)):
df.loc[i, "cast"] = np.mean(get_score(ids_list[i], year[i]))
## Modeling
def predict(model, X_train, y_train, X_test, y_test, model_text):
model.fit(X_train, y_train)
y_pred_test = model.predict(X_test)
cf_matrix = confusion_matrix(y_test, y_pred_test)
plot_confusion_matrix(cf_matrix, model_text)
return baseline_report(model, X_train, X_test, y_train, y_test, model_text)
def predict_linear(model, X_test, X_train, y_train, X, y, model_text):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
score = r2_score(y_train, y_pred)
scores = cross_val_score(model,
X_train,
y_train,
cv=5,
scoring='r2')
print('CV Mean: ', np.mean(scores))
print('STD: ', np.std(scores))
fig, ax = plt.subplots()
ax.scatter(y_train, y_pred)
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4)
ax.set_xlabel('Predicted revenue')
ax.set_ylabel('Predicted revenue')
plt.title('Measured versus predicted revenue')
plt.savefig(f"../data/exports/{model_text}_regression_scatter.png")
plt.show()
# Private functions
## Generics
def isNaN(num):
return num == num
flatten = lambda l: [item for sublist in l for item in sublist]
def string_to_dictionary(data, key):
if data and (data == data):
json_string = ast.literal_eval(data)
json_dump = json.dumps(json_string)
json_loaded = json.loads(json_dump)
return json_loaded[key]
else:
return np.NaN
## Encode our categorical values
def get_value_counts(list):
d = {}
for name in list:
d[name.lower()] = d.get(name.lower(), 0) + 1
sorted_list = dict(sorted(d.items(), key=lambda kv: kv[1], reverse=True))
return sorted_list
def columns_to_zero(df, sorted_list, column):
iterator = iter(sorted_list.items())
for i in range(len(sorted_list)):
name_column = f"{column}_{list(sorted_list)[i]}"
df[name_column] = 0
next(iterator)
def validate_column(df, column):
for i, j in df.iterrows():
lista_genres = re.findall(r'\b\w+\b', j[column])
for value in lista_genres:
name_column = f"{column}_{value.lower()}"
df.loc[i, name_column] = 1
## Retrieve average cast value from df
def get_score(ids, year):
values = []
for id_ in ids:
dict_people = people_path[people_path["tmdb_id"] == int(id_)].to_dict('r')
if len(dict_people) > 0:
values.append(get_mean_value(get_values_people(dict_people, year)))
return values
def get_mean_value(array):
try:
mean_value = sum(array) / len(array)
except ZeroDivisionError:
mean_value = 0
return mean_value
def get_values_people(dictionary, year):
values = []
for i,k in dictionary[0].items():
if i != "tmdb_id":
if '{0:g}'.format(float(i)) == str(year):
if isNaN(k):
values.append(k)
return values
else:
if isNaN(k):
values.append(k)
## Model visualization
def plot_confusion_matrix(cf_matrix, model_text):
group_names = ["True Neg", "False Pos", "False Neg", "True Pos"]
group_counts = ["{0:0.0f}".format(value) for value in cf_matrix.flatten()]
group_percentages = ["{0:.2%}".format(value) for value in
cf_matrix.flatten()/np.sum(cf_matrix)]
labels = [f"{v1}\n{v2}\n{v3}" for v1, v2, v3 in zip(group_names,group_counts,group_percentages)]
labels = np.asarray(labels).reshape(2,2)
sns_heatmap = sns.heatmap(cf_matrix, annot=labels, fmt="", cmap='Blues')
sns_heatmap.figure.savefig(f"../data/exports/{model_text}_classification_heatmap.png")
def baseline_report(model, X_train, X_test, y_train, y_test, name):
"""
The function takes the model, the split data and the name and returns the dataframe with
the scores of the models with training and test data.
"""
strat_k_fold = StratifiedKFold(n_splits=5, shuffle=True)
model.fit(X_train, y_train)
accuracy = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='accuracy'))
precision = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='precision'))
recall = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='recall'))
f1score = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='f1'))
rocauc = np.mean(cross_val_score(model, X_train, y_train, cv=strat_k_fold, scoring='roc_auc'))
y_pred_train = model.predict(X_train)
logloss = log_loss(y_train, y_pred_train)
df_model_train = pd.DataFrame({'data' : 'training',
'model' : [name],
'accuracy' : [accuracy],
'precision' : [precision],
'recall' : [recall],
'f1score' : [f1score],
'rocauc' : [rocauc],
'logloss' : [logloss]})
accuracy = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='accuracy'))
precision = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='precision'))
recall = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='recall'))
f1score = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='f1'))
rocauc = np.mean(cross_val_score(model, X_test, y_test, cv=strat_k_fold, scoring='roc_auc'))
y_pred_test = model.predict(X_test)
logloss = log_loss(y_test, y_pred_test) # SVC & LinearSVC unable to use cvs
df_model_test = pd.DataFrame({'data' : 'test',
'model' : [name],
'accuracy' : [accuracy],
'precision' : [precision],
'recall' : [recall],
'f1score' : [f1score],
'rocauc' : [rocauc],
'logloss' : [logloss]}) # timetaken: to be used for comparison later
df_model = pd.concat([df_model_train, df_model_test], ignore_index=True)
return df_model
| [
"pandas.read_csv",
"sklearn.model_selection.StratifiedKFold",
"sklearn.metrics.log_loss",
"sklearn.metrics.r2_score",
"numpy.mean",
"json.dumps",
"numpy.asarray",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.cross_val_score",
"json.loads",
"matplotlib.pyplot... | [((494, 569), 'pandas.read_csv', 'pd.read_csv', (['"""../data/processed/people_transformation/people_cast_list.csv"""'], {}), "('../data/processed/people_transformation/people_cast_list.csv')\n", (505, 569), True, 'import pandas as pd\n'), ((2605, 2642), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (2621, 2642), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((2924, 2949), 'sklearn.metrics.r2_score', 'r2_score', (['y_train', 'y_pred'], {}), '(y_train, y_pred)\n', (2932, 2949), False, 'from sklearn.metrics import r2_score\n'), ((2964, 3024), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': '(5)', 'scoring': '"""r2"""'}), "(model, X_train, y_train, cv=5, scoring='r2')\n", (2979, 3024), False, 'from sklearn.model_selection import cross_val_score\n'), ((3232, 3246), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3244, 3246), True, 'import matplotlib.pyplot as plt\n'), ((3426, 3472), 'matplotlib.pyplot.title', 'plt.title', (['"""Measured versus predicted revenue"""'], {}), "('Measured versus predicted revenue')\n", (3435, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3544), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../data/exports/{model_text}_regression_scatter.png"""'], {}), "(f'../data/exports/{model_text}_regression_scatter.png')\n", (3488, 3544), True, 'import matplotlib.pyplot as plt\n'), ((3549, 3559), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3557, 3559), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6094), 'seaborn.heatmap', 'sns.heatmap', (['cf_matrix'], {'annot': 'labels', 'fmt': '""""""', 'cmap': '"""Blues"""'}), "(cf_matrix, annot=labels, fmt='', cmap='Blues')\n", (6047, 6094), True, 'import seaborn as sns\n'), ((6441, 6482), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)'}), '(n_splits=5, shuffle=True)\n', (6456, 6482), False, 'from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold, cross_val_score\n'), ((7098, 7129), 'sklearn.metrics.log_loss', 'log_loss', (['y_train', 'y_pred_train'], {}), '(y_train, y_pred_train)\n', (7106, 7129), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, log_loss\n'), ((7152, 7345), 'pandas.DataFrame', 'pd.DataFrame', (["{'data': 'training', 'model': [name], 'accuracy': [accuracy], 'precision':\n [precision], 'recall': [recall], 'f1score': [f1score], 'rocauc': [\n rocauc], 'logloss': [logloss]}"], {}), "({'data': 'training', 'model': [name], 'accuracy': [accuracy],\n 'precision': [precision], 'recall': [recall], 'f1score': [f1score],\n 'rocauc': [rocauc], 'logloss': [logloss]})\n", (7164, 7345), True, 'import pandas as pd\n'), ((8164, 8193), 'sklearn.metrics.log_loss', 'log_loss', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (8172, 8193), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, log_loss\n'), ((8253, 8442), 'pandas.DataFrame', 'pd.DataFrame', (["{'data': 'test', 'model': [name], 'accuracy': [accuracy], 'precision': [\n precision], 'recall': [recall], 'f1score': [f1score], 'rocauc': [rocauc\n ], 'logloss': [logloss]}"], {}), "({'data': 'test', 'model': [name], 'accuracy': [accuracy],\n 'precision': [precision], 'recall': [recall], 'f1score': [f1score],\n 'rocauc': [rocauc], 'logloss': [logloss]})\n", (8265, 8442), True, 'import pandas as pd\n'), ((8752, 8813), 'pandas.concat', 'pd.concat', (['[df_model_train, df_model_test]'], {'ignore_index': '(True)'}), '([df_model_train, df_model_test], ignore_index=True)\n', (8761, 8813), True, 'import pandas as pd\n'), ((970, 990), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (979, 990), False, 'import json\n'), ((1799, 1830), 're.findall', 're.findall', (['"""\\\\b\\\\d+\\\\b"""', 'value'], {}), "('\\\\b\\\\d+\\\\b', value)\n", (1809, 1830), False, 'import re\n'), ((2014, 2044), 're.findall', 're.findall', (['"""\\\\b\\\\w+\\\\b"""', 'cell'], {}), "('\\\\b\\\\w+\\\\b', cell)\n", (2024, 2044), False, 'import re\n'), ((2279, 2310), 're.findall', 're.findall', (['"""\\\\b\\\\d+\\\\b"""', 'value'], {}), "('\\\\b\\\\d+\\\\b', value)\n", (2289, 2310), False, 'import re\n'), ((3165, 3180), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (3172, 3180), True, 'import numpy as np\n'), ((3201, 3215), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (3207, 3215), True, 'import numpy as np\n'), ((3789, 3811), 'ast.literal_eval', 'ast.literal_eval', (['data'], {}), '(data)\n', (3805, 3811), False, 'import ast\n'), ((3832, 3855), 'json.dumps', 'json.dumps', (['json_string'], {}), '(json_string)\n', (3842, 3855), False, 'import json\n'), ((3878, 3899), 'json.loads', 'json.loads', (['json_dump'], {}), '(json_dump)\n', (3888, 3899), False, 'import json\n'), ((4535, 4570), 're.findall', 're.findall', (['"""\\\\b\\\\w+\\\\b"""', 'j[column]'], {}), "('\\\\b\\\\w+\\\\b', j[column])\n", (4545, 4570), False, 'import re\n'), ((6542, 6619), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': 'strat_k_fold', 'scoring': '"""accuracy"""'}), "(model, X_train, y_train, cv=strat_k_fold, scoring='accuracy')\n", (6557, 6619), False, 'from sklearn.model_selection import cross_val_score\n'), ((6648, 6726), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': 'strat_k_fold', 'scoring': '"""precision"""'}), "(model, X_train, y_train, cv=strat_k_fold, scoring='precision')\n", (6663, 6726), False, 'from sklearn.model_selection import cross_val_score\n'), ((6755, 6830), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': 'strat_k_fold', 'scoring': '"""recall"""'}), "(model, X_train, y_train, cv=strat_k_fold, scoring='recall')\n", (6770, 6830), False, 'from sklearn.model_selection import cross_val_score\n'), ((6859, 6930), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': 'strat_k_fold', 'scoring': '"""f1"""'}), "(model, X_train, y_train, cv=strat_k_fold, scoring='f1')\n", (6874, 6930), False, 'from sklearn.model_selection import cross_val_score\n'), ((6959, 7035), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': 'strat_k_fold', 'scoring': '"""roc_auc"""'}), "(model, X_train, y_train, cv=strat_k_fold, scoring='roc_auc')\n", (6974, 7035), False, 'from sklearn.model_selection import cross_val_score\n'), ((7620, 7695), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_test', 'y_test'], {'cv': 'strat_k_fold', 'scoring': '"""accuracy"""'}), "(model, X_test, y_test, cv=strat_k_fold, scoring='accuracy')\n", (7635, 7695), False, 'from sklearn.model_selection import cross_val_score\n'), ((7724, 7800), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_test', 'y_test'], {'cv': 'strat_k_fold', 'scoring': '"""precision"""'}), "(model, X_test, y_test, cv=strat_k_fold, scoring='precision')\n", (7739, 7800), False, 'from sklearn.model_selection import cross_val_score\n'), ((7829, 7902), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_test', 'y_test'], {'cv': 'strat_k_fold', 'scoring': '"""recall"""'}), "(model, X_test, y_test, cv=strat_k_fold, scoring='recall')\n", (7844, 7902), False, 'from sklearn.model_selection import cross_val_score\n'), ((7931, 8000), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_test', 'y_test'], {'cv': 'strat_k_fold', 'scoring': '"""f1"""'}), "(model, X_test, y_test, cv=strat_k_fold, scoring='f1')\n", (7946, 8000), False, 'from sklearn.model_selection import cross_val_score\n'), ((8029, 8103), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_test', 'y_test'], {'cv': 'strat_k_fold', 'scoring': '"""roc_auc"""'}), "(model, X_test, y_test, cv=strat_k_fold, scoring='roc_auc')\n", (8044, 8103), False, 'from sklearn.model_selection import cross_val_score\n'), ((5986, 6004), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (5996, 6004), True, 'import numpy as np\n'), ((5853, 5870), 'numpy.sum', 'np.sum', (['cf_matrix'], {}), '(cf_matrix)\n', (5859, 5870), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/10 21:53
# @Author : DaiPuWei
# E-Mail : <EMAIL>
# blog : https://blog.csdn.net/qq_30091945
# @Site : 中国民航大学北教25-506实验室
# @File : GMM.py
# @Software: PyCharm
"""
这是高斯混合模型的Python类代码,这份代码使用EM算法进行
求解模型参数,对于高斯分布的均值向量利用K-Means聚类算法
的最终的聚类质心代替,协方差矩阵利用数据集的协方差矩阵代替
"""
import numpy as np
from KMeansCluster.KMeansCluster import KMeansCluster
class GMM(object):
def __init__(self,Data,K,weights = None,means = None,covars = None):
"""
这是GMM(高斯混合模型)类的构造函数
:param Data: 数据集
:param K: 高斯分布的个数
:param weigths: 每个高斯分布的初始概率(权重)数组,默认为None,即可以没有
:param means: 高斯分布的均值向量数组,默认为None,即可以没有
:param covars: 高斯分布的协方差矩阵数组,默认为None,即可以没有
"""
# 初始化数据集、高斯分布个数和数据集的形状
self.Data = Data
self.K = K
self.size,self.dim = np.shape(self.Data)
# 初始化数据集中每组数据属于各个高斯分布的概率数组
# possibility[i][j]代表第i个样本属于第j个高斯分布的概率
self.possibility = np.array([list(np.zeros(self.K)) for i in range(self.size)])
# 初始化聚类标签数组
self.clusterlabels = []
# 随机隐含变量的多项式分布的参数数组不为None则进行初始化
if weights is not None:
self.weights = weights
else: # 随机隐含变量的多项式分布的参数数组为None时
self.weights = np.array([1.0 / self.K] * self.K)
# 高斯分布的均值向量数组不为None则进行初始化
if means is not None:
self.means = means
else: # 高斯分布的均值向量数组为None时
# 获取高斯分布的均值向量数组
self.means = self.get_means(self.Data,self.K)
# 高斯分布的协方差矩阵数组不为None则进行初始化
if covars is not None:
self.covars = covars
else: # 高斯分布的协方差矩阵数组为None时
# 利用数据集的协方差矩阵代替高斯分布的协方差矩阵
self.covars = self.get_cov(self.Data,self.K)
def get_means(self,data,K):
"""
K-Means聚类算法的聚类质心作为高斯分布的均值向量
:param data: 数据集
:param K: 高斯分布个数
:param criter: 标准系数
"""
# 定义K-Means聚类算法
kmeans = KMeansCluster(data)
# 获取K-Means的聚类质心
_,centroids,__ = kmeans.cluster(K,None)
return centroids
def get_cov(self,data,K):
"""
这是生成矩阵的函数
:param data: 数据集
:param k: 高斯混合模型个数
"""
covs = []
for i in range(K):
# 利用数据集的协方差矩阵作为高斯分布的协方差矩阵
covs.append(np.cov(data,rowvar=False))
return covs
def Gaussian(self,x,mean,cov):
"""
这是自定义高斯分布概率密度函数
:param x: 输入数据
:param mean: 均值数组
:param cov: 协方差矩阵
:return: x的概率
"""
# 获取协方差矩阵规模,即数据维数
dim = np.shape(cov)[0]
# cov的行列式为零时,加上一个与协防矩阵同规模的单位阵乘较小的常数
if np.linalg.det(cov) == 0:
covdet = np.linalg.det(cov + np.eye(dim) * 0.001)
covinv = np.linalg.inv(cov + np.eye(dim) * 0.001)
else: # cov的行列式不为零时
covdet = np.linalg.det(cov)
covinv = np.linalg.inv(cov)
# 计算数据与均值向量之间的的差值
xdiff = x - mean
xdiff = np.reshape(xdiff,(1,len(xdiff)))
# 计算高斯分布概率密度值
prob = 1.0/(np.power(2*np.pi,1.0*dim/2)*np.sqrt(np.abs(covdet)))* \
np.exp(-0.5*np.dot(np.dot(xdiff,covinv),xdiff.T))[0][0]
return prob
def GMM_EM(self):
"""
这是利用EM算法进行优化GMM参数的函数
:return: 返回各组数据的属于每个分类的概率
"""
loglikelyhood = 0 # 当前迭代的极大似然函数值
oldloglikelyhood = 1 # 上一次迭代的极大似然函数值
while np.abs(loglikelyhood-oldloglikelyhood) > 1E-4:
oldloglikelyhood = loglikelyhood
# 下面是EM算法的E-step
# 遍历整个数据集,计算每组数据属于每个高斯分布的后验概率
self.possibility = []
for data in self.Data:
# respons是E-Step中每组数据与对应的随机隐含变量之间的联合概率数组
respons = np.array([self.weights[k] * self.Gaussian(data, self.means[k], self.covars[k])
for k in range(self.K)])
# 计算联合概率之和
sum_respons = np.sum(respons)
# 利用全概率公式计算每组数据对应于各个高斯分布的后验概率
respons = respons / sum_respons
self.possibility.append(list(respons))
self.possibility = np.array(self.possibility)
# 下面是EM算法的M-step,根据E-step设置的后验概率更新G模型参数
# 遍历每个高斯分布
for k in range(self.K):
#计算数据集中每组数据属于第k个高斯分布的概率和
sum_gaussionprob_k = np.sum([self.possibility[i][k] for i in range(self.size)])
# 更新第k个高斯分布的均值向量
self.means[k] = (1.0 / sum_gaussionprob_k) * np.sum([self.possibility[i][k] * self.Data[i]
for i in range(self.size)], axis=0)
# 计算数据集与均值向量之间的差值
xdiffs = self.Data - self.means[k]
# 更新第k个高斯分布的协方差矩阵
self.covars[k] = (1.0/sum_gaussionprob_k)*\
np.sum([self.possibility[i][k]*xdiffs[i].reshape(self.dim,1)*xdiffs[i]
for i in range(self.size)],axis=0)
# 更新随机隐含变量的多项式分布的第k个参数
self.weights[k] = 1.0 * sum_gaussionprob_k / self.size
# 更新整个数据集的极大似然函数值
loglikelyhood = []
# 遍历整个数据集,计算每组数据的对应的极大似然函数值
for data in self.Data:
# 遍历每个高斯分布,计算每组数据在每个高斯分布下的极大似然估计
data_Likelyhood = [self.Likelyhood(data,k) for k in range(self.K)]
loglikelyhood.extend(data_Likelyhood)
# 计算最终的数据集的极大似然函数值
loglikelyhood = np.log(self.Mul(np.array(loglikelyhood)))
# 对每组数据集分配到各个高斯分布的概率进行归一化
for i in range(self.size):
self.possibility[i] = self.possibility[i]/np.sum(self.possibility[i])
# 生成每组数据的聚类标签
self.clusterlabels = np.array([np.argmax(_possibility) for _possibility in self.possibility])
return self.clusterlabels,loglikelyhood
def Mul(self,data):
"""
这是进行数据连乘的函数
:param data: 数组
"""
ans = 1.0
for _data in data:
ans = ans * _data
return ans
def Likelyhood(self,data,k):
"""
这是计算每组数据在第k个高斯分布下的的极大似然函数值
:param data: 数据
:param k: 第k个高斯分布
"""
# 计算第k个高斯分布下的概率值
gaussian = self.Gaussian(data, self.means[k], self.covars[k])
# 数据在第k个高斯分布下的的极大似然函数值为第k个
# 高斯分布下的概率值与多项式分布的第k的参数的乘积
likelyhood = self.weights[k] * gaussian
return likelyhood | [
"numpy.abs",
"numpy.eye",
"numpy.power",
"numpy.argmax",
"numpy.linalg.det",
"numpy.array",
"numpy.sum",
"numpy.linalg.inv",
"numpy.cov",
"numpy.zeros",
"numpy.dot",
"numpy.shape",
"KMeansCluster.KMeansCluster.KMeansCluster"
] | [((880, 899), 'numpy.shape', 'np.shape', (['self.Data'], {}), '(self.Data)\n', (888, 899), True, 'import numpy as np\n'), ((1992, 2011), 'KMeansCluster.KMeansCluster.KMeansCluster', 'KMeansCluster', (['data'], {}), '(data)\n', (2005, 2011), False, 'from KMeansCluster.KMeansCluster import KMeansCluster\n'), ((1298, 1331), 'numpy.array', 'np.array', (['([1.0 / self.K] * self.K)'], {}), '([1.0 / self.K] * self.K)\n', (1306, 1331), True, 'import numpy as np\n'), ((2610, 2623), 'numpy.shape', 'np.shape', (['cov'], {}), '(cov)\n', (2618, 2623), True, 'import numpy as np\n'), ((2682, 2700), 'numpy.linalg.det', 'np.linalg.det', (['cov'], {}), '(cov)\n', (2695, 2700), True, 'import numpy as np\n'), ((2880, 2898), 'numpy.linalg.det', 'np.linalg.det', (['cov'], {}), '(cov)\n', (2893, 2898), True, 'import numpy as np\n'), ((2920, 2938), 'numpy.linalg.inv', 'np.linalg.inv', (['cov'], {}), '(cov)\n', (2933, 2938), True, 'import numpy as np\n'), ((3447, 3487), 'numpy.abs', 'np.abs', (['(loglikelyhood - oldloglikelyhood)'], {}), '(loglikelyhood - oldloglikelyhood)\n', (3453, 3487), True, 'import numpy as np\n'), ((4167, 4193), 'numpy.array', 'np.array', (['self.possibility'], {}), '(self.possibility)\n', (4175, 4193), True, 'import numpy as np\n'), ((2342, 2368), 'numpy.cov', 'np.cov', (['data'], {'rowvar': '(False)'}), '(data, rowvar=False)\n', (2348, 2368), True, 'import numpy as np\n'), ((3971, 3986), 'numpy.sum', 'np.sum', (['respons'], {}), '(respons)\n', (3977, 3986), True, 'import numpy as np\n'), ((5705, 5732), 'numpy.sum', 'np.sum', (['self.possibility[i]'], {}), '(self.possibility[i])\n', (5711, 5732), True, 'import numpy as np\n'), ((5794, 5817), 'numpy.argmax', 'np.argmax', (['_possibility'], {}), '(_possibility)\n', (5803, 5817), True, 'import numpy as np\n'), ((1024, 1040), 'numpy.zeros', 'np.zeros', (['self.K'], {}), '(self.K)\n', (1032, 1040), True, 'import numpy as np\n'), ((3081, 3115), 'numpy.power', 'np.power', (['(2 * np.pi)', '(1.0 * dim / 2)'], {}), '(2 * np.pi, 1.0 * dim / 2)\n', (3089, 3115), True, 'import numpy as np\n'), ((5556, 5579), 'numpy.array', 'np.array', (['loglikelyhood'], {}), '(loglikelyhood)\n', (5564, 5579), True, 'import numpy as np\n'), ((2748, 2759), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (2754, 2759), True, 'import numpy as np\n'), ((2810, 2821), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (2816, 2821), True, 'import numpy as np\n'), ((3117, 3131), 'numpy.abs', 'np.abs', (['covdet'], {}), '(covdet)\n', (3123, 3131), True, 'import numpy as np\n'), ((3171, 3192), 'numpy.dot', 'np.dot', (['xdiff', 'covinv'], {}), '(xdiff, covinv)\n', (3177, 3192), True, 'import numpy as np\n')] |
"""
Auxiliarry functions for the pointwise gravity spherical harmonics expansion.
"""
import warnings
import numpy as np
from joblib import Parallel, delayed
from .legendre import lplm, lplm_d1
np.seterr(over='raise')
def expand_parallel(x, q, *args):
nlat = x.shape[0]
# parallel only if there are more than one circle
if nlat > 1:
values = np.array(Parallel(n_jobs=-1)(delayed(expand_circle)(x[i], q[i], *args)
for i in range(nlat)))
else:
values = np.array([expand_circle(x[i], q[i], *args)
for i in range(nlat)])
return values
def expand_circle(x, q, in_coeff_func, sum_func, *args):
if q.size == 1:
in_coeff = in_coeff_func(x, q, *args[:-2])
values = []
for j in range(args[-2][0].shape[0]):
if q.size > 1:
in_coeff = in_coeff_func(x[j], q[j], *args[:-2])
out = sum_func(in_coeff, args[-2][:, j], args[-1])
values.append(out)
return values
def common_precompute(lat, lon, r, r0, lmax):
lat = np.atleast_2d(lat)
lon = np.atleast_2d(lon)
r = np.atleast_2d(r)
degrees = np.arange(lmax + 1)
m = np.atleast_2d(lon[0]).T * degrees
cosin = np.array([np.cos(m), np.sin(m)])
if np.allclose(r[:, 0, None], r):
ri = 1 / r[:, 0]
x = np.sin(lat[:, 0])
else:
ri = 1 / r
x = np.sin(lat)
q = np.asarray(r0 * ri)
if np.any(q > 1.01):
warnings.filterwarnings('once')
warnings.warn("Possible singularity in downward continuation, r << r0")
return lat, lon, degrees, cosin, x, q
def in_coeff_potential(x, q, lmax, degrees):
p = lplm(lmax, x)
q = np.power(q, degrees)
l_coeff = np.tile(q, (lmax + 1, 1)).T
in_coeff = l_coeff * p
return in_coeff
def sum_potential(in_coeff, cosin, cilm):
cosm_sinm_sum = cilm[0] * cosin[0] + cilm[1] * cosin[1]
pot = np.sum(in_coeff * (cosm_sinm_sum))
return pot
def in_coeff_r_derivative(x, q, lmax, degrees):
p = lplm(lmax, x)
q = np.power(q, degrees)
l_coeff = np.tile(q * (degrees + 1), (lmax + 1, 1)).T
in_coeff = l_coeff * p
return in_coeff
def in_coeff_lat_derivative(x, q, lmax, degrees):
pole = np.allclose(x, -1) | np.allclose(x, 1)
if not pole:
_, p_d1 = lplm_d1(lmax, x)
q = np.power(q, degrees)
l_coeff = np.tile(q, (lmax + 1, 1)).T
in_coeff = l_coeff * p_d1
else:
in_coeff = 0.0
return in_coeff
def sum_lat_derivative(in_coeff, cosin, cilm):
if not np.all(in_coeff == 0.0):
cosm_sinm_sum = cilm[0] * cosin[0] + cilm[1] * cosin[1]
lat_d = np.sum(in_coeff * (cosm_sinm_sum))
else:
lat_d = 0.0
return lat_d
def in_coeff_lon_derivative(x, q, lmax, degrees, m_coeff):
pole = np.allclose(x, -1) | np.allclose(x, 1)
if not pole:
p = lplm(lmax, x)
q = np.power(q, degrees)
l_coeff = np.tile(q, (lmax + 1, 1)).T
in_coeff = l_coeff * m_coeff * p
else:
in_coeff = 0.0
return in_coeff
def sum_lon_derivative(in_coeff, cosin, cilm):
if not np.all(in_coeff == 0.0):
lon_d = np.sum(in_coeff * (-cilm[1] * cosin[0] +
cilm[0] * cosin[1]))
else:
lon_d = 0.0
return lon_d
def in_coeff_gradient(x, q, lmax, degrees, m_coeff):
p, p_d1 = lplm_d1(lmax, x)
q = np.power(q, degrees)
l_coeff_1 = np.tile(q, (lmax + 1, 1)).T
l_coeff_rad_d = np.tile(q * (degrees + 1), (lmax + 1, 1)).T
in_coeff_rad_d = l_coeff_rad_d * p
if p_d1 is not None:
in_coeff_lat_d = l_coeff_1 * p_d1
in_coeff_lon_d = l_coeff_1 * m_coeff * p
else:
in_coeff_lat_d = in_coeff_lon_d = 0.0
in_coeff = (in_coeff_rad_d, in_coeff_lat_d, in_coeff_lon_d)
return in_coeff
def sum_gradient(in_coeff, cosin, cilm):
cosm_sinm_sum = cilm[0] * cosin[0] + cilm[1] * cosin[1]
rad_d = np.sum(in_coeff[0] * (cosm_sinm_sum))
if not np.all(in_coeff[1:] == 0.0):
lat_d = np.sum(in_coeff[1] * (cosm_sinm_sum))
lon_d = np.sum(in_coeff[2] * (-cilm[1] * cosin[0] +
cilm[0] * cosin[1]))
else:
lat_d = lon_d = 0.0
return (lat_d, lon_d, rad_d)
def in_coeff_gravity_anomaly(x, q, lmax, degrees):
p = lplm(lmax, x)
q = np.power(q, degrees)
l_coeff = np.tile(q * (degrees - 1), (lmax + 1, 1)).T
in_coeff = l_coeff * p
return in_coeff
| [
"numpy.atleast_2d",
"numpy.tile",
"numpy.allclose",
"numpy.all",
"numpy.power",
"numpy.asarray",
"numpy.any",
"joblib.Parallel",
"numpy.sum",
"numpy.cos",
"numpy.sin",
"joblib.delayed",
"warnings.warn",
"numpy.seterr",
"warnings.filterwarnings",
"numpy.arange"
] | [((196, 219), 'numpy.seterr', 'np.seterr', ([], {'over': '"""raise"""'}), "(over='raise')\n", (205, 219), True, 'import numpy as np\n'), ((1082, 1100), 'numpy.atleast_2d', 'np.atleast_2d', (['lat'], {}), '(lat)\n', (1095, 1100), True, 'import numpy as np\n'), ((1111, 1129), 'numpy.atleast_2d', 'np.atleast_2d', (['lon'], {}), '(lon)\n', (1124, 1129), True, 'import numpy as np\n'), ((1138, 1154), 'numpy.atleast_2d', 'np.atleast_2d', (['r'], {}), '(r)\n', (1151, 1154), True, 'import numpy as np\n'), ((1170, 1189), 'numpy.arange', 'np.arange', (['(lmax + 1)'], {}), '(lmax + 1)\n', (1179, 1189), True, 'import numpy as np\n'), ((1286, 1315), 'numpy.allclose', 'np.allclose', (['r[:, 0, None]', 'r'], {}), '(r[:, 0, None], r)\n', (1297, 1315), True, 'import numpy as np\n'), ((1434, 1453), 'numpy.asarray', 'np.asarray', (['(r0 * ri)'], {}), '(r0 * ri)\n', (1444, 1453), True, 'import numpy as np\n'), ((1462, 1478), 'numpy.any', 'np.any', (['(q > 1.01)'], {}), '(q > 1.01)\n', (1468, 1478), True, 'import numpy as np\n'), ((1721, 1741), 'numpy.power', 'np.power', (['q', 'degrees'], {}), '(q, degrees)\n', (1729, 1741), True, 'import numpy as np\n'), ((1947, 1979), 'numpy.sum', 'np.sum', (['(in_coeff * cosm_sinm_sum)'], {}), '(in_coeff * cosm_sinm_sum)\n', (1953, 1979), True, 'import numpy as np\n'), ((2077, 2097), 'numpy.power', 'np.power', (['q', 'degrees'], {}), '(q, degrees)\n', (2085, 2097), True, 'import numpy as np\n'), ((3443, 3463), 'numpy.power', 'np.power', (['q', 'degrees'], {}), '(q, degrees)\n', (3451, 3463), True, 'import numpy as np\n'), ((3988, 4023), 'numpy.sum', 'np.sum', (['(in_coeff[0] * cosm_sinm_sum)'], {}), '(in_coeff[0] * cosm_sinm_sum)\n', (3994, 4023), True, 'import numpy as np\n'), ((4395, 4415), 'numpy.power', 'np.power', (['q', 'degrees'], {}), '(q, degrees)\n', (4403, 4415), True, 'import numpy as np\n'), ((1354, 1371), 'numpy.sin', 'np.sin', (['lat[:, 0]'], {}), '(lat[:, 0])\n', (1360, 1371), True, 'import numpy as np\n'), ((1413, 1424), 'numpy.sin', 'np.sin', (['lat'], {}), '(lat)\n', (1419, 1424), True, 'import numpy as np\n'), ((1488, 1519), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""once"""'], {}), "('once')\n", (1511, 1519), False, 'import warnings\n'), ((1528, 1599), 'warnings.warn', 'warnings.warn', (['"""Possible singularity in downward continuation, r << r0"""'], {}), "('Possible singularity in downward continuation, r << r0')\n", (1541, 1599), False, 'import warnings\n'), ((1757, 1782), 'numpy.tile', 'np.tile', (['q', '(lmax + 1, 1)'], {}), '(q, (lmax + 1, 1))\n', (1764, 1782), True, 'import numpy as np\n'), ((2113, 2154), 'numpy.tile', 'np.tile', (['(q * (degrees + 1))', '(lmax + 1, 1)'], {}), '(q * (degrees + 1), (lmax + 1, 1))\n', (2120, 2154), True, 'import numpy as np\n'), ((2268, 2286), 'numpy.allclose', 'np.allclose', (['x', '(-1)'], {}), '(x, -1)\n', (2279, 2286), True, 'import numpy as np\n'), ((2289, 2306), 'numpy.allclose', 'np.allclose', (['x', '(1)'], {}), '(x, 1)\n', (2300, 2306), True, 'import numpy as np\n'), ((2371, 2391), 'numpy.power', 'np.power', (['q', 'degrees'], {}), '(q, degrees)\n', (2379, 2391), True, 'import numpy as np\n'), ((2586, 2609), 'numpy.all', 'np.all', (['(in_coeff == 0.0)'], {}), '(in_coeff == 0.0)\n', (2592, 2609), True, 'import numpy as np\n'), ((2691, 2723), 'numpy.sum', 'np.sum', (['(in_coeff * cosm_sinm_sum)'], {}), '(in_coeff * cosm_sinm_sum)\n', (2697, 2723), True, 'import numpy as np\n'), ((2846, 2864), 'numpy.allclose', 'np.allclose', (['x', '(-1)'], {}), '(x, -1)\n', (2857, 2864), True, 'import numpy as np\n'), ((2867, 2884), 'numpy.allclose', 'np.allclose', (['x', '(1)'], {}), '(x, 1)\n', (2878, 2884), True, 'import numpy as np\n'), ((2940, 2960), 'numpy.power', 'np.power', (['q', 'degrees'], {}), '(q, degrees)\n', (2948, 2960), True, 'import numpy as np\n'), ((3162, 3185), 'numpy.all', 'np.all', (['(in_coeff == 0.0)'], {}), '(in_coeff == 0.0)\n', (3168, 3185), True, 'import numpy as np\n'), ((3203, 3264), 'numpy.sum', 'np.sum', (['(in_coeff * (-cilm[1] * cosin[0] + cilm[0] * cosin[1]))'], {}), '(in_coeff * (-cilm[1] * cosin[0] + cilm[0] * cosin[1]))\n', (3209, 3264), True, 'import numpy as np\n'), ((3481, 3506), 'numpy.tile', 'np.tile', (['q', '(lmax + 1, 1)'], {}), '(q, (lmax + 1, 1))\n', (3488, 3506), True, 'import numpy as np\n'), ((3529, 3570), 'numpy.tile', 'np.tile', (['(q * (degrees + 1))', '(lmax + 1, 1)'], {}), '(q * (degrees + 1), (lmax + 1, 1))\n', (3536, 3570), True, 'import numpy as np\n'), ((4038, 4065), 'numpy.all', 'np.all', (['(in_coeff[1:] == 0.0)'], {}), '(in_coeff[1:] == 0.0)\n', (4044, 4065), True, 'import numpy as np\n'), ((4083, 4118), 'numpy.sum', 'np.sum', (['(in_coeff[1] * cosm_sinm_sum)'], {}), '(in_coeff[1] * cosm_sinm_sum)\n', (4089, 4118), True, 'import numpy as np\n'), ((4137, 4201), 'numpy.sum', 'np.sum', (['(in_coeff[2] * (-cilm[1] * cosin[0] + cilm[0] * cosin[1]))'], {}), '(in_coeff[2] * (-cilm[1] * cosin[0] + cilm[0] * cosin[1]))\n', (4143, 4201), True, 'import numpy as np\n'), ((4431, 4472), 'numpy.tile', 'np.tile', (['(q * (degrees - 1))', '(lmax + 1, 1)'], {}), '(q * (degrees - 1), (lmax + 1, 1))\n', (4438, 4472), True, 'import numpy as np\n'), ((1198, 1219), 'numpy.atleast_2d', 'np.atleast_2d', (['lon[0]'], {}), '(lon[0])\n', (1211, 1219), True, 'import numpy as np\n'), ((1255, 1264), 'numpy.cos', 'np.cos', (['m'], {}), '(m)\n', (1261, 1264), True, 'import numpy as np\n'), ((1266, 1275), 'numpy.sin', 'np.sin', (['m'], {}), '(m)\n', (1272, 1275), True, 'import numpy as np\n'), ((2410, 2435), 'numpy.tile', 'np.tile', (['q', '(lmax + 1, 1)'], {}), '(q, (lmax + 1, 1))\n', (2417, 2435), True, 'import numpy as np\n'), ((2979, 3004), 'numpy.tile', 'np.tile', (['q', '(lmax + 1, 1)'], {}), '(q, (lmax + 1, 1))\n', (2986, 3004), True, 'import numpy as np\n'), ((375, 394), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (383, 394), False, 'from joblib import Parallel, delayed\n'), ((395, 417), 'joblib.delayed', 'delayed', (['expand_circle'], {}), '(expand_circle)\n', (402, 417), False, 'from joblib import Parallel, delayed\n')] |
"""Script to perform a simple at-home yes/no experiment and analyze the resulting data
using signal detection theory.
"""
import numpy as np
from scipy.stats import norm
import prettytable as pt
import sounddevice as sd
def trial(signal, n=None):
"""Performs a trial in the experiment.
Args:
signal (bool): Should the trial contain a tone?
n (:obj:`bool`, optional): Trial number. If omitted, a "practice" trial is
performed which will allow the observer an opportunity to change the volume
settings on their computer.
Returns:
rsp (bool): On practice trials, this indicates whether the real experiment
should begin. On real trials, it indicates whether the observer responded
"yes".
"""
t = np.arange(0, 0.1, 1 / 44100)
tone = 1e-5 * 10 ** (50 / 20) * np.sin(2 * np.pi * 1000 * t + 0)
noise = np.random.normal(size=len(t)) * tone.std() / np.sqrt(2)
sd.play(noise + tone if signal and isinstance(n, int) else noise, 44100)
responses = {"n": False, "y": True}
if isinstance(n, int):
instr = f"Trial {n}: Did you hear a tone? ([y] or [n])?"
else:
instr = "Adjust your volume settings until the noise barely audible."
instr += "\n([y] to adjust and hear again; [n] to continue)"
while 1:
try:
return responses[input(instr).lower()]
except KeyError:
pass
def experiment():
"""Performs a series of trials.
"""
adj = True
while adj:
adj = trial(False)
X = [False, True] * 20
np.random.shuffle(X)
Y = [trial(*p[::-1]) for p in enumerate(X)]
c = sum([1 for x, y in zip(X, Y) if x == 0 and y == 0])
f = sum([1 for x, y in zip(X, Y) if x == 0 and y == 1])
m = sum([1 for x, y in zip(X, Y) if x == 1 and y == 0])
h = sum([1 for x, y in zip(X, Y) if x == 1 and y == 1])
return c, f, m, h
def sdt_yn(c, f, m, h):
"""Calcualte SDT statistics.
"""
n = f + c
s = m + h
sens = norm.ppf(h / s) - norm.ppf(f / n)
crit = -0.5 * (norm.ppf(h / s) + norm.ppf(f / n))
return sens, crit
if __name__ == "__main__":
print(
"""
████████╗██╗ ██╗███████╗ ██████╗██████╗ █████╗ ██████╗██╗ ██╗███████╗██████╗
╚══██╔══╝██║ ██║██╔════╝ ██╔════╝██╔══██╗██╔══██╗██╔════╝██║ ██╔╝██╔════╝██╔══██╗
██║ ███████║█████╗ ██║ ██████╔╝███████║██║ █████╔╝ █████╗ ██║ ██║
██║ ██╔══██║██╔══╝ ██║ ██╔══██╗██╔══██║██║ ██╔═██╗ ██╔══╝ ██║ ██║
██║ ██║ ██║███████╗ ╚██████╗██║ ██║██║ ██║╚██████╗██║ ██╗███████╗██████╔╝
╚═╝ ╚═╝ ╚═╝╚══════╝ ╚═════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚═════╝
██████╗ █████╗ ███████╗███████╗ ██████╗ ██████╗ ███╗ ██╗ ██╗██╗██╗██╗██╗██╗██╗
██╔══██╗██╔══██╗██╔════╝██╔════╝██╔═══██╗██╔═══██╗████╗ ██║ ██║██║██║██║██║██║██║
██████╔╝███████║███████╗███████╗██║ ██║██║ ██║██╔██╗ ██║ ██║██║██║██║██║██║██║
██╔══██╗██╔══██║╚════██║╚════██║██║ ██║██║ ██║██║╚██╗██║ ╚═╝╚═╝╚═╝╚═╝╚═╝╚═╝╚═╝
██████╔╝██║ ██║███████║███████║╚██████╔╝╚██████╔╝██║ ╚████║ ██╗██╗██╗██╗██╗██╗██╗
╚═════╝ ╚═╝ ╚═╝╚══════╝╚══════╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═╝╚═╝╚═╝╚═╝╚═╝╚═╝╚═╝
Welcome! This script performs a simple experiment and analyzes the data using signal
detection theory (SDT)."""
)
c, f, m, h = experiment()
print("Experiment done!")
table = pt.PrettyTable()
table.field_names = ["", "x = 0", "x = 1"]
table.add_row(["y = 0", c, m])
table.add_row(["y = 1", f, h])
print("Here is your contingency table:")
print(table)
if any(x == 0 for x in (c, f, m, h)):
print(
"""\
Unfortunately, one or more of the cells has a value of 0. SDT statistics can't be
calculated without applying some form of correction. Exiting now"""
)
exit()
print("Calculating SDT statistics ...")
sens, crit = sdt_yn(c, f, m, h)
print("sensitivity (d') = %.2f" % sens)
print("criterion (c) = %.2f" % crit)
| [
"prettytable.PrettyTable",
"numpy.sqrt",
"scipy.stats.norm.ppf",
"numpy.sin",
"numpy.arange",
"numpy.random.shuffle"
] | [((790, 818), 'numpy.arange', 'np.arange', (['(0)', '(0.1)', '(1 / 44100)'], {}), '(0, 0.1, 1 / 44100)\n', (799, 818), True, 'import numpy as np\n'), ((1594, 1614), 'numpy.random.shuffle', 'np.random.shuffle', (['X'], {}), '(X)\n', (1611, 1614), True, 'import numpy as np\n'), ((3582, 3598), 'prettytable.PrettyTable', 'pt.PrettyTable', ([], {}), '()\n', (3596, 3598), True, 'import prettytable as pt\n'), ((855, 887), 'numpy.sin', 'np.sin', (['(2 * np.pi * 1000 * t + 0)'], {}), '(2 * np.pi * 1000 * t + 0)\n', (861, 887), True, 'import numpy as np\n'), ((945, 955), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (952, 955), True, 'import numpy as np\n'), ((2032, 2047), 'scipy.stats.norm.ppf', 'norm.ppf', (['(h / s)'], {}), '(h / s)\n', (2040, 2047), False, 'from scipy.stats import norm\n'), ((2050, 2065), 'scipy.stats.norm.ppf', 'norm.ppf', (['(f / n)'], {}), '(f / n)\n', (2058, 2065), False, 'from scipy.stats import norm\n'), ((2085, 2100), 'scipy.stats.norm.ppf', 'norm.ppf', (['(h / s)'], {}), '(h / s)\n', (2093, 2100), False, 'from scipy.stats import norm\n'), ((2103, 2118), 'scipy.stats.norm.ppf', 'norm.ppf', (['(f / n)'], {}), '(f / n)\n', (2111, 2118), False, 'from scipy.stats import norm\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Created on Tue Jul 14 18:39:41 2015
@author: <NAME>
"""
from datetime import datetime
import collections
import json
import numpy as np
from anim_utils.animation_data import MotionVector, align_quaternion_frames
from anim_utils.animation_data.motion_concatenation import align_and_concatenate_frames
from anim_utils.utilities.log import write_log, write_message_to_log, LOG_MODE_DEBUG, LOG_MODE_INFO, LOG_MODE_ERROR
from .annotated_motion_vector import AnnotatedMotionVector
from ..constraints.spatial_constraints import SPATIAL_CONSTRAINT_TYPE_KEYFRAME_POSITION, SPATIAL_CONSTRAINT_TYPE_TWO_HAND_POSITION
from .keyframe_event_list import KeyframeEventList
from ..constraints.spatial_constraints.splines.utils import plot_annotated_spline
DEFAULT_PLACE_ACTION_LIST = ["placeRight", "placeLeft","insertRight","insertLeft","screwRight", "screwLeft"] #list of actions in which the orientation constraints are ignored
class GraphWalkEntry(object):
def __init__(self, motion_state_graph, node_key, parameters, arc_length, start_frame, end_frame, motion_primitive_constraints=None):
self.node_key = node_key
self.parameters = parameters
self.arc_length = arc_length
self.start_frame = start_frame
self.end_frame = end_frame
self.motion_primitive_constraints = motion_primitive_constraints
self.n_spatial_components = motion_state_graph.nodes[node_key].get_n_spatial_components()
self.n_time_components = motion_state_graph.nodes[node_key].get_n_time_components()
@staticmethod
def from_json(motion_state_graph, data):
return GraphWalkEntry(motion_state_graph, tuple(data["node_key"]),
np.array(data["parameters"]), data["arc_length"],
data["start_frame"], data["end_frame"])
def to_json(self):
data = dict()
data["node_key"] =self.node_key
data["parameters"] = self.parameters.tolist()
data["arc_length"] = self.arc_length
data["start_frame"] = self.start_frame
data["end_frame"] = self.end_frame
return data
class HighLevelGraphWalkEntry(object):
def __init__(self, action_name, start_step, end_step, action_constraints):
self.action_name = action_name
self.start_step = start_step
self.end_step = end_step
self.action_constraints = action_constraints
class GraphWalk(object):
""" Product of the MotionGenerate class. Contains the graph walk used to generate the frames,
a mapping of frame segments
to elementary actions and a list of events for certain frames.
"""
def __init__(self, motion_state_graph, mg_input, algorithm_config, start_pose=None, create_ca_vis_data=False):
self.elementary_action_list = []
self.steps = []
self.motion_state_graph = motion_state_graph
self.step_count = 0
self.mg_input = mg_input
self._algorithm_config = algorithm_config
self.motion_vector = MotionVector(self.motion_state_graph.skeleton, algorithm_config)
if start_pose is None:
start_pose = mg_input.get_start_pose()
self.motion_vector.start_pose = start_pose
smoothing_settings = algorithm_config["smoothing_settings"]
self.spatial_smoothing_method = "smoothing"
self.apply_smoothing = smoothing_settings["spatial_smoothing"] # set whether the exported motion is smoothed at transitions
if "spatial_smoothing_method" in smoothing_settings:
self.spatial_smoothing_method = smoothing_settings["spatial_smoothing_method"]
self.motion_vector.apply_spatial_smoothing = False # deactivate smoothing during the synthesis
self.use_time_parameters = algorithm_config["activate_time_variation"]
self.constrain_place_orientation = algorithm_config["inverse_kinematics_settings"]["constrain_place_orientation"]
write_message_to_log("Use time parameters" + str(self.use_time_parameters), LOG_MODE_DEBUG)
self.keyframe_event_list = KeyframeEventList(create_ca_vis_data)
self.place_action_list = DEFAULT_PLACE_ACTION_LIST
def add_entry_to_action_list(self, action_name, start_step, end_step, action_constraints):
self.elementary_action_list.append(HighLevelGraphWalkEntry(action_name, start_step, end_step, action_constraints))
def convert_to_annotated_motion(self, step_size=1.0):
self.motion_vector.apply_spatial_smoothing = self.apply_smoothing # set wether or not smoothing is applied
self.motion_vector.spatial_smoothing_method = self.spatial_smoothing_method
self.convert_graph_walk_to_quaternion_frames(use_time_parameters=self.use_time_parameters, step_size=step_size)
self.keyframe_event_list.update_events(self, 0)
annotated_motion_vector = AnnotatedMotionVector(self.motion_state_graph.skeleton, self._algorithm_config)
annotated_motion_vector.frames = self.motion_vector.frames
annotated_motion_vector.n_frames = self.motion_vector.n_frames
annotated_motion_vector.frame_time = self.motion_state_graph.skeleton.frame_time
annotated_motion_vector.keyframe_event_list = self.keyframe_event_list
annotated_motion_vector.skeleton = self.motion_state_graph.skeleton
annotated_motion_vector.mg_input = self.mg_input
version = 0
if "version" in self._algorithm_config["inverse_kinematics_settings"]:
version = self._algorithm_config["inverse_kinematics_settings"]["version"]
if version == 1:
annotated_motion_vector.ik_constraints = self._create_ik_constraints()
elif version == 2:
annotated_motion_vector.ik_constraints = self._create_ik_constraints2()
annotated_motion_vector.graph_walk = self
return annotated_motion_vector
def get_action_from_keyframe(self, keyframe):
found_action_index = -1
step_index = self.get_step_from_keyframe(keyframe)
write_message_to_log("Found keyframe in step " + str(step_index), LOG_MODE_DEBUG)
if step_index < 0:
return found_action_index
for action_index, action in enumerate(self.elementary_action_list):
if action.start_step <= step_index <= action.end_step:
found_action_index = action_index
return found_action_index
def get_step_from_keyframe(self, keyframe):
found_step_index = -1
for step_index, step in enumerate(self.steps):
#Note the start_frame and end_frame are warped in update_temp_motion_vector
#print step.start_frame, keyframe, step.end_frame
if step.start_frame <= keyframe <= step.end_frame:
found_step_index = step_index
return found_step_index
def convert_graph_walk_to_quaternion_frames(self, start_step=0, use_time_parameters=False, step_size=1.0):
"""
:param start_step:
:return:
"""
if start_step == 0:
start_frame = 0
else:
start_frame = self.steps[start_step].start_frame
self.motion_vector.clear(end_frame=start_frame)
for step in self.steps[start_step:]:
step.start_frame = start_frame
#write_log(step.node_key, len(step.parameters))
quat_frames = self.motion_state_graph.nodes[step.node_key].back_project(step.parameters, use_time_parameters, step_size).get_motion_vector()
if step.node_key[1].lower().endswith("leftstance"):
foot_joint = "foot_r"
elif step.node_key[1].lower().endswith("rightstance"):
foot_joint = "foot_l"
else:
foot_joint = None
self.motion_vector.append_frames(quat_frames, foot_joint)
step.end_frame = self.get_num_of_frames()-1
start_frame = step.end_frame + 1
def get_global_spatial_parameter_vector(self, start_step=0):
initial_guess = []
for step in self.steps[start_step:]:
initial_guess += step.parameters[:step.n_spatial_components].tolist()
return initial_guess
def get_global_time_parameter_vector(self, start_step=0):
initial_guess = []
for step in self.steps[start_step:]:
initial_guess += step.parameters[step.n_spatial_components:].tolist()
return initial_guess
def update_spatial_parameters(self, parameter_vector, start_step=0):
write_message_to_log("Update spatial parameters", LOG_MODE_DEBUG)
offset = 0
for step in self.steps[start_step:]:
new_alpha = parameter_vector[offset:offset+step.n_spatial_components]
step.parameters[:step.n_spatial_components] = new_alpha
offset += step.n_spatial_components
def update_time_parameters(self, parameter_vector, start_step, end_step):
offset = 0
for step in self.steps[start_step:end_step]:
new_gamma = parameter_vector[offset:offset+step.n_time_components]
step.parameters[step.n_spatial_components:] = new_gamma
offset += step.n_time_components
def append_quat_frames(self, new_frames):
self.motion_vector.append_frames(new_frames)
def get_quat_frames(self):
return self.motion_vector.frames
def get_num_of_frames(self):
return self.motion_vector.n_frames
def update_frame_annotation(self, action_name, start_frame, end_frame):
""" Adds a dictionary to self.frame_annotation marking start and end
frame of an action.
"""
self.keyframe_event_list.update_frame_annotation(action_name, start_frame, end_frame)
def _create_ik_constraints(self):
ik_constraints = []
for idx, action in enumerate(self.elementary_action_list):
write_message_to_log("Create IK constraints for action" + " " + str(idx) + " " + str(action.start_step) + " " + str(self.steps[action.start_step].start_frame), LOG_MODE_DEBUG)
if not self.constrain_place_orientation and action.action_name in self.place_action_list:
constrain_orientation = False
else:
constrain_orientation = True
start_step = action.start_step
end_step = action.end_step
elementary_action_ik_constraints = dict()
elementary_action_ik_constraints["keyframes"] = dict()
elementary_action_ik_constraints["trajectories"] = list()
elementary_action_ik_constraints["collision_avoidance"] = list()
frame_offset = self.steps[start_step].start_frame
for step in self.steps[start_step: end_step+1]:
time_function = None
if self.use_time_parameters and self.motion_state_graph.nodes[step.node_key].get_n_time_components() > 0:
time_function = self.motion_state_graph.nodes[step.node_key].back_project_time_function(step.parameters)
step_keyframe_constraints = step.motion_primitive_constraints.convert_to_ik_constraints(self.motion_state_graph, frame_offset, time_function, constrain_orientation)
elementary_action_ik_constraints["keyframes"].update(step_keyframe_constraints)
elementary_action_ik_constraints["collision_avoidance"] += step.motion_primitive_constraints.get_ca_constraints()
frame_offset += step.end_frame - step.start_frame + 1
if self._algorithm_config["collision_avoidance_constraints_mode"] == "ik":
elementary_action_ik_constraints["trajectories"] += self._create_ik_trajectory_constraints_from_ca_trajectories(idx)
elementary_action_ik_constraints["trajectories"] += self._create_ik_trajectory_constraints_from_annotated_trajectories(idx)
ik_constraints.append(elementary_action_ik_constraints)
return ik_constraints
def _create_ik_constraints2(self):
ik_constraints = collections.OrderedDict()
for idx, action in enumerate(self.elementary_action_list):
write_message_to_log("Create IK constraints for action" + " " + str(idx) + " " + str(action.start_step) + " " + str(self.steps[action.start_step].start_frame), LOG_MODE_DEBUG)
if not self.constrain_place_orientation and action.action_name in self.place_action_list:
constrain_orientation = False
else:
constrain_orientation = True
start_step = action.start_step
end_step = action.end_step
frame_offset = self.steps[start_step].start_frame
for step in self.steps[start_step: end_step + 1]:
time_function = None
if self.use_time_parameters and self.motion_state_graph.nodes[step.node_key].get_n_time_components() > 0:
time_function = self.motion_state_graph.nodes[step.node_key].back_project_time_function(step.parameters)
step_constraints = step.motion_primitive_constraints.convert_to_ik_constraints(
self.motion_state_graph, frame_offset, time_function, constrain_orientation, version=2)
ik_constraints.update(step_constraints)
frame_offset += step.end_frame - step.start_frame + 1
return ik_constraints
def _create_ik_trajectory_constraints_from_ca_trajectories(self, action_idx):
frame_annotation = self.keyframe_event_list.frame_annotation['elementaryActionSequence'][action_idx]
trajectory_constraints = list()
action = self.elementary_action_list[action_idx]
for ca_constraint in action.action_constraints.collision_avoidance_constraints:
traj_constraint = dict()
traj_constraint["trajectory"] = ca_constraint
traj_constraint["fixed_range"] = False # search for closer start
traj_constraint["constrain_orientation"] = False
traj_constraint["start_frame"] = frame_annotation["startFrame"]
traj_constraint["end_frame"] = frame_annotation["endFrame"]
#TODO find a better solution than this workaround that undoes the joint name mapping from hands to tool bones for ca constraints
if self.mg_input.activate_joint_mapping and ca_constraint.joint_name in list(self.mg_input.inverse_joint_name_map.keys()):
joint_name = self.mg_input.inverse_joint_name_map[ca_constraint.joint_name]
else:
joint_name = ca_constraint.joint_name
traj_constraint["joint_name"] = joint_name
traj_constraint["delta"] = 1.0
trajectory_constraints.append(traj_constraint)
return trajectory_constraints
def _create_ik_trajectory_constraints_from_annotated_trajectories(self, action_idx):
write_message_to_log("extract annotated trajectories", LOG_MODE_DEBUG)
frame_annotation = self.keyframe_event_list.frame_annotation['elementaryActionSequence'][action_idx]
start_frame = frame_annotation["startFrame"]
trajectory_constraints = list()
action = self.elementary_action_list[action_idx]
for constraint in action.action_constraints.annotated_trajectory_constraints:
label = list(constraint.semantic_annotation.keys())[0]
write_message_to_log("trajectory constraint label " + str(list(constraint.semantic_annotation.keys())), LOG_MODE_DEBUG)
action_name = action.action_name
for step in self.steps[action.start_step: action.end_step+1]:
motion_primitive_name = step.node_key[1]
write_message_to_log("look for action annotation of " + action_name+" "+motion_primitive_name, LOG_MODE_DEBUG)
if motion_primitive_name not in self.motion_state_graph.node_groups[action_name].motion_primitive_annotation_regions:
continue
annotations = self.motion_state_graph.node_groups[action_name].motion_primitive_annotation_regions[motion_primitive_name]
write_message_to_log("action annotation" + str(annotations) +" "+ str(frame_annotation["startFrame"]) + " " + str(frame_annotation["endFrame"]),
LOG_MODE_DEBUG)
if label not in list(annotations.keys()):
continue
annotation_range = annotations[label]
traj_constraint = dict()
traj_constraint["trajectory"] = constraint
traj_constraint["constrain_orientation"] = True
traj_constraint["fixed_range"] = True
time_function = None
if self.use_time_parameters and self.motion_state_graph.nodes[step.node_key].get_n_time_components() > 0:
time_function = self.motion_state_graph.nodes[step.node_key].back_project_time_function(step.parameters)
if time_function is None:
traj_constraint["start_frame"] = start_frame + annotation_range[0]
traj_constraint["end_frame"] = start_frame + annotation_range[1]
else:
#add +1 for correct mapping TODO verify for all cases
traj_constraint["start_frame"] = start_frame + int(time_function[annotation_range[0]]) + 1
traj_constraint["end_frame"] = start_frame + int(time_function[annotation_range[1]]) + 1
if self.mg_input.activate_joint_mapping and constraint.joint_name in list(self.mg_input.inverse_joint_name_map.keys()):
joint_name = self.mg_input.inverse_joint_name_map[constraint.joint_name]
else:
joint_name = constraint.joint_name
traj_constraint["joint_name"] = joint_name
traj_constraint["delta"] = 1.0
write_message_to_log( "create ik trajectory constraint from label " + str(label), LOG_MODE_DEBUG)
trajectory_constraints.append(traj_constraint)
return trajectory_constraints
def get_average_keyframe_constraint_error(self):
keyframe_constraint_errors = []
step_index = 0
prev_frames = None
for step_idx, step in enumerate(self.steps):
quat_frames = self.motion_state_graph.nodes[step.node_key].back_project(step.parameters, use_time_parameters=False).get_motion_vector()
skeleton = self.motion_vector.skeleton
aligned_frames = align_and_concatenate_frames(skeleton, skeleton.aligning_root_node, quat_frames, prev_frames,
self.motion_vector.start_pose, 0)
for c_idx, constraint in enumerate(step.motion_primitive_constraints.constraints):
if constraint.constraint_type in [SPATIAL_CONSTRAINT_TYPE_KEYFRAME_POSITION, SPATIAL_CONSTRAINT_TYPE_TWO_HAND_POSITION] and\
not "generated" in list(constraint.semantic_annotation.keys()):
error = constraint.evaluate_motion_sample(aligned_frames)
write_message_to_log("Error of Keyframe constraint " +str(step_idx) + "-" + str(c_idx) +": " +str(error), LOG_MODE_DEBUG)
keyframe_constraint_errors.append(error)
prev_frames = aligned_frames
step_index += 1
if len(keyframe_constraint_errors) > 0:
return np.average(keyframe_constraint_errors)
else:
return -1
def get_generated_constraints(self):
step_count = 0
generated_constraints = dict()
for step in self.steps:
key = str(step.node_key) + str(step_count)
generated_constraints[key] = []
for constraint in step.motion_primitive_constraints.constraints:
if constraint.is_generated():
generated_constraints[key].append(constraint.position)
step_count += 1
return generated_constraints
def get_average_error(self):
average_error = 0
for step in self.steps:
average_error += step.motion_primitive_constraints.min_error
if average_error > 0:
average_error /= len(self.steps)
return average_error
def get_number_of_object_evaluations(self):
objective_evaluations = 0
for step in self.steps:
objective_evaluations += step.motion_primitive_constraints.evaluations
return objective_evaluations
def print_statistics(self):
print(self.get_statistics_string())
def get_statistics_string(self):
average_error = self.get_average_error()
evaluations_string = "Total number of objective evaluations " + str(self.get_number_of_object_evaluations())
error_string = "Average error for " + str(len(self.steps)) + \
" motion primitives: " + str(average_error)
average_keyframe_error = self.get_average_keyframe_constraint_error()
if average_keyframe_error > -1:
average_keyframe_error_string = "Average keyframe constraint error " + str(average_keyframe_error)
else:
average_keyframe_error_string = "No keyframe constraint specified"
average_time_per_step = 0.0
for step in self.steps:
average_time_per_step += step.motion_primitive_constraints.time
average_time_per_step /= len(self.steps)
average_time_string = "Average time per motion primitive " + str(average_time_per_step)
return average_keyframe_error_string + "\n" + evaluations_string + "\n" + average_time_string + "\n" + error_string
def export_generated_constraints(self, file_path="goals.path"):
""" Converts constraints that were generated based on input constraints into a json dictionary for a debug visualization
"""
root_control_point_data = []
hand_constraint_data = []
for idx, step in enumerate(self.steps):
step_constraints = {"semanticAnnotation": {"step": idx}}
for c in step.motion_primitive_constraints.constraints:
if c.constraint_type == "keyframe_position" and c.joint_name == self.motion_state_graph.skeleton.root:
p = c.position
if p is not None:
step_constraints["position"] = [p[0], -p[2], None]
elif c.constraint_type == "keyframe_2d_direction":
step_constraints["direction"] = c.direction_constraint.tolist()
elif c.constraint_type == "ca_constraint":
#if c.constraint_type in ["RightHand", "LeftHand"]:
position = [c.position[0], -c.position[2], c.position[1]]
hand_constraint = {"position": position}
hand_constraint_data.append(hand_constraint)
root_control_point_data.append(step_constraints)
constraints = {"tasks": [{"elementaryActions":[{
"action": "walk",
"constraints": [{"joint": "Hips",
"keyframeConstraints": root_control_point_data },
{"joint": "RightHand",
"keyframeConstraints": hand_constraint_data}]
}]
}]
}
constraints["startPose"] = {"position":[0,0,0], "orientation": [0,0,0]}
constraints["session"] = "session"
with open(file_path, "wb") as out:
json.dump(constraints, out)
def get_number_of_actions(self):
return len(self.elementary_action_list)
def plot_constraints(self, file_name="traj"):
for idx, action in enumerate(self.elementary_action_list):
start_frame = self.steps[action.start_step].start_frame
end_frame = self.steps[action.end_step].end_frame
root_motion = self.motion_vector.frames#[start_frame:end_frame,:3]
if action.action_constraints.root_trajectory is not None:
traj_constraint = action.action_constraints.root_trajectory
plot_annotated_spline(traj_constraint,root_motion, file_name+str(idx)+".png")
def to_json(self):
data = dict()
data["algorithm_config"] = self._algorithm_config
data["start_pose"] = self.motion_vector.start_pose
data["steps"] = []
for step in self.steps:
data["steps"].append(step.to_json())
return data
@staticmethod
def from_json(graph, data):
graph_walk = GraphWalk(graph, None, data["algorithm_config"], data["start_pose"])
graph_walk.steps = []
for step_data in data["steps"]:
graph_walk.steps.append(GraphWalkEntry.from_json(graph, step_data))
return graph_walk
def save_to_file(self, file_path):
with open(file_path, "wb") as out:
json.dump(self.to_json(), out)
| [
"collections.OrderedDict",
"numpy.average",
"anim_utils.utilities.log.write_message_to_log",
"numpy.array",
"anim_utils.animation_data.MotionVector",
"json.dump",
"anim_utils.animation_data.motion_concatenation.align_and_concatenate_frames"
] | [((4124, 4188), 'anim_utils.animation_data.MotionVector', 'MotionVector', (['self.motion_state_graph.skeleton', 'algorithm_config'], {}), '(self.motion_state_graph.skeleton, algorithm_config)\n', (4136, 4188), False, 'from anim_utils.animation_data import MotionVector, align_quaternion_frames\n'), ((9588, 9653), 'anim_utils.utilities.log.write_message_to_log', 'write_message_to_log', (['"""Update spatial parameters"""', 'LOG_MODE_DEBUG'], {}), "('Update spatial parameters', LOG_MODE_DEBUG)\n", (9608, 9653), False, 'from anim_utils.utilities.log import write_log, write_message_to_log, LOG_MODE_DEBUG, LOG_MODE_INFO, LOG_MODE_ERROR\n'), ((13090, 13115), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (13113, 13115), False, 'import collections\n'), ((15928, 15998), 'anim_utils.utilities.log.write_message_to_log', 'write_message_to_log', (['"""extract annotated trajectories"""', 'LOG_MODE_DEBUG'], {}), "('extract annotated trajectories', LOG_MODE_DEBUG)\n", (15948, 15998), False, 'from anim_utils.utilities.log import write_log, write_message_to_log, LOG_MODE_DEBUG, LOG_MODE_INFO, LOG_MODE_ERROR\n'), ((2814, 2842), 'numpy.array', 'np.array', (["data['parameters']"], {}), "(data['parameters'])\n", (2822, 2842), True, 'import numpy as np\n'), ((19591, 19722), 'anim_utils.animation_data.motion_concatenation.align_and_concatenate_frames', 'align_and_concatenate_frames', (['skeleton', 'skeleton.aligning_root_node', 'quat_frames', 'prev_frames', 'self.motion_vector.start_pose', '(0)'], {}), '(skeleton, skeleton.aligning_root_node,\n quat_frames, prev_frames, self.motion_vector.start_pose, 0)\n', (19619, 19722), False, 'from anim_utils.animation_data.motion_concatenation import align_and_concatenate_frames\n'), ((20496, 20534), 'numpy.average', 'np.average', (['keyframe_constraint_errors'], {}), '(keyframe_constraint_errors)\n', (20506, 20534), True, 'import numpy as np\n'), ((24859, 24886), 'json.dump', 'json.dump', (['constraints', 'out'], {}), '(constraints, out)\n', (24868, 24886), False, 'import json\n'), ((16735, 16853), 'anim_utils.utilities.log.write_message_to_log', 'write_message_to_log', (["('look for action annotation of ' + action_name + ' ' + motion_primitive_name)", 'LOG_MODE_DEBUG'], {}), "('look for action annotation of ' + action_name + ' ' +\n motion_primitive_name, LOG_MODE_DEBUG)\n", (16755, 16853), False, 'from anim_utils.utilities.log import write_log, write_message_to_log, LOG_MODE_DEBUG, LOG_MODE_INFO, LOG_MODE_ERROR\n')] |
# Standard imports
import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=False, help='Path to the image')
args = vars(ap.parse_args())
# Read image
image = cv2.imread(args['image'])
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dim = (320, 240)
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
lower = np.array([0, 106, 190], dtype = "uint8")
upper = np.array([255, 255, 255], dtype = "uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(image, lower, upper)
mask = cv2.GaussianBlur(mask, (3,3), 0)
(cnts, _) = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
x,y,w,h = cv2.boundingRect(cnt)
image = cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
# print('mask', mask)
output = cv2.bitwise_and(image, image, mask = mask)
# show the images
print("shapes: ", image.shape, mask.shape)
cv2.imshow("images", np.hstack([image]))
cv2.waitKey(0) | [
"cv2.rectangle",
"argparse.ArgumentParser",
"numpy.hstack",
"cv2.inRange",
"cv2.bitwise_and",
"numpy.array",
"cv2.waitKey",
"cv2.resize",
"cv2.GaussianBlur",
"cv2.imread",
"cv2.boundingRect"
] | [((72, 97), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (95, 97), False, 'import argparse\n'), ((225, 250), 'cv2.imread', 'cv2.imread', (["args['image']"], {}), "(args['image'])\n", (235, 250), False, 'import cv2\n'), ((328, 380), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(image, dim, interpolation=cv2.INTER_AREA)\n', (338, 380), False, 'import cv2\n'), ((392, 430), 'numpy.array', 'np.array', (['[0, 106, 190]'], {'dtype': '"""uint8"""'}), "([0, 106, 190], dtype='uint8')\n", (400, 430), True, 'import numpy as np\n'), ((441, 481), 'numpy.array', 'np.array', (['[255, 255, 255]'], {'dtype': '"""uint8"""'}), "([255, 255, 255], dtype='uint8')\n", (449, 481), True, 'import numpy as np\n'), ((563, 595), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (574, 595), False, 'import cv2\n'), ((603, 636), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['mask', '(3, 3)', '(0)'], {}), '(mask, (3, 3), 0)\n', (619, 636), False, 'import cv2\n'), ((928, 968), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (943, 968), False, 'import cv2\n'), ((1073, 1087), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1084, 1087), False, 'import cv2\n'), ((815, 836), 'cv2.boundingRect', 'cv2.boundingRect', (['cnt'], {}), '(cnt)\n', (831, 836), False, 'import cv2\n'), ((846, 906), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (859, 906), False, 'import cv2\n'), ((1053, 1071), 'numpy.hstack', 'np.hstack', (['[image]'], {}), '([image])\n', (1062, 1071), True, 'import numpy as np\n')] |
import argparse
from keras.models import load_model
import make_dataset
import numpy as np
import matplotlib.pyplot as plt
# load the model
import tkinter as tk
from tkinter import filedialog
def main():
parser = argparse.ArgumentParser()
parser.add_argument("base", help="base directory where dataset data is found")
args = parser.parse_args()
base = args.base + "/"
# set up the data
(x_train, y_train), (x_test, y_test) = make_dataset.load_data(args.base)
x_train = x_train.astype('float32')/255. # Make sure that the data is between 0 and 1
x_test = x_test.astype('float32')/255.
classes = y_test.shape[1]
model = load_model(base+"classifier.h5")
print(model.input_shape)
test_out = model.predict(x_test)
train_out = model.predict(x_train)
print(test_out.shape)
# build a confusion matrix
confusion_test = np.zeros((classes,classes))
missed = []
for index, item in enumerate(test_out):
current = np.argmax(y_test[index])
value = np.argmax(item)
confusion_test[current][value] += 1
if current != value:
missed.append(index)
# train matrix
confusion_train = np.zeros((classes,classes))
for index, item in enumerate(train_out):
current = np.argmax(y_train[index])
value = np.argmax(item)
confusion_train[current][value] += 1
print("test error:",len(missed), len(test_out), "=", len(missed)/ len(test_out))
# make a new plot, and get the figure from it.
fig = plt.figure()
axe = fig.add_subplot(1, 2, 1) # vertical, horizontal, index
axe.imshow(confusion_test)
axe = fig.add_subplot(1, 2, 2)
axe.imshow(confusion_train)
# show the plot
plt.show()
if __name__=="__main__":
main()
| [
"keras.models.load_model",
"argparse.ArgumentParser",
"numpy.argmax",
"make_dataset.load_data",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((221, 246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (244, 246), False, 'import argparse\n'), ((454, 487), 'make_dataset.load_data', 'make_dataset.load_data', (['args.base'], {}), '(args.base)\n', (476, 487), False, 'import make_dataset\n'), ((665, 699), 'keras.models.load_model', 'load_model', (["(base + 'classifier.h5')"], {}), "(base + 'classifier.h5')\n", (675, 699), False, 'from keras.models import load_model\n'), ((885, 913), 'numpy.zeros', 'np.zeros', (['(classes, classes)'], {}), '((classes, classes))\n', (893, 913), True, 'import numpy as np\n'), ((1196, 1224), 'numpy.zeros', 'np.zeros', (['(classes, classes)'], {}), '((classes, classes))\n', (1204, 1224), True, 'import numpy as np\n'), ((1538, 1550), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1548, 1550), True, 'import matplotlib.pyplot as plt\n'), ((1739, 1749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1747, 1749), True, 'import matplotlib.pyplot as plt\n'), ((991, 1015), 'numpy.argmax', 'np.argmax', (['y_test[index]'], {}), '(y_test[index])\n', (1000, 1015), True, 'import numpy as np\n'), ((1032, 1047), 'numpy.argmax', 'np.argmax', (['item'], {}), '(item)\n', (1041, 1047), True, 'import numpy as np\n'), ((1287, 1312), 'numpy.argmax', 'np.argmax', (['y_train[index]'], {}), '(y_train[index])\n', (1296, 1312), True, 'import numpy as np\n'), ((1329, 1344), 'numpy.argmax', 'np.argmax', (['item'], {}), '(item)\n', (1338, 1344), True, 'import numpy as np\n')] |
"""
Thanks to <NAME>
"""
from __future__ import division, print_function
import sys
import math
import numba
from numba import jit, autojit, size_t
import numpy as np
import numpy.testing as npt
try:
from skimage import img_as_float
except ImportError as e:
print("skimage not available, skipping")
sys.exit()
SCALAR_DTYPE = np.float64
# This doesn't work :(
# SCALAR_TYPE = numba.typeof(SCALAR_DTYPE)
SCALAR_TYPE = numba.float64
def window_floor(idx, radius):
if radius > idx:
return 0
else:
return idx - radius
def window_ceil(idx, ceil, radius):
if idx + radius > ceil:
return ceil
else:
return idx + radius
def distance(image, r0, c0, r1, c1):
d = image[r0, c0, 0] - image[r1, c1, 0]
s = d * d
for i in range(1, 3):
d = image[r0, c0, i] - image[r1, c1, i]
s += d * d
return math.sqrt(s)
def pixel_distance(pixel1, pixel2):
d = pixel1[0] - pixel2[0]
s = d*d
for i in range(1, 3):
d = pixel1[i] - pixel2[i]
s += d*d
return math.sqrt(s)
def np_distance(pixel1, pixel2):
return np.linalg.norm(pixel1-pixel2, 2)
sqrt_3 = math.sqrt(3.0)
def g(d):
return 1.0 - d/sqrt_3
def np_g(x, y):
return 1.0 - np_distance(x, y)/sqrt_3
def kernel(image, state, state_next, window_radius):
changes = 0
height = image.shape[0]
width = image.shape[1]
for j in xrange(width):
for i in xrange(height):
winning_colony = state[i, j, 0]
defense_strength = state[i, j, 1]
for jj in xrange(window_floor(j, window_radius),
window_ceil(j+1, width, window_radius)):
for ii in xrange(window_floor(i, window_radius),
window_ceil(i+1, height, window_radius)):
if (ii == i and jj == j):
continue
d = image[i, j, 0] - image[ii, jj, 0]
s = d * d
for k in range(1, 3):
d = image[i, j, k] - image[ii, jj, k]
s += d * d
gval = 1.0 - math.sqrt(s)/sqrt_3
attack_strength = gval * state[ii, jj, 1]
if attack_strength > defense_strength:
defense_strength = attack_strength
winning_colony = state[ii, jj, 0]
changes += 1
state_next[i, j, 0] = winning_colony
state_next[i, j, 1] = defense_strength
return changes
def growcut(image, state, max_iter=20, window_size=3):
"""Grow-cut segmentation (Numba accelerated).
Parameters
----------
image : (M, N) ndarray
Input image.
state : (M, N, 2) ndarray
Initial state, which stores (foreground/background, strength) for
each pixel position or automaton. The strength represents the
certainty of the state (e.g., 1 is a hard seed value that remains
constant throughout segmentation).
max_iter : int, optional
The maximum number of automata iterations to allow. The segmentation
may complete earlier if the state no longer varies.
window_size : int, optional
Size of the neighborhood window.
Returns
-------
mask : ndarray
Segmented image. A value of zero indicates background, one foreground.
"""
image = img_as_float(image)
window_radius = (window_size - 1) // 2
changes = 1
n = 0
state_next = np.empty_like(state)
while changes > 0 and n < max_iter:
changes = 0
n += 1
changes = kernel(image, state, state_next, window_radius)
state_next, state = state, state_next
#print n, changes
print('.', end='')
print('')
return state_next[:, :, 0]
def create_numba_funcs(scalar_type=SCALAR_TYPE):
this = sys.modules[__name__]
pixel_type = scalar_type[:]
image_type = scalar_type[:, :, :]
state_type = scalar_type[:, :, :]
this._numba_window_floor = jit(nopython=True,
argtypes=[size_t, size_t],
restype=size_t)(_py_window_floor)
this._numba_window_ceil = jit(nopython=True,
argtypes=[size_t, size_t, size_t],
restype=size_t)(_py_window_ceil)
this._numba_distance = jit(nopython=True,
argtypes=[image_type,
size_t, size_t, size_t, size_t],
restype=scalar_type)(_py_distance)
this._numba_np_distance = jit(nopython=False,
argtypes=[pixel_type, pixel_type],
restype=scalar_type)(_py_np_distance)
this._numba_g = jit(nopython=True,
argtypes=[scalar_type],
restype=scalar_type)(_py_g)
this._numba_np_g = jit(nopython=False,
argtypes=[pixel_type, pixel_type],
restype=scalar_type)(_py_np_g)
this._numba_kernel = autojit(nopython=True)(_py_kernel)
# the below code does not work
# this._numba_kernel = jit(nopython=False,
# argtypes=[image_type,
# state_type,
# state_type,
# size_t],
# restype=int_,
# attack_strength=scalar_type,
# defense_strength=scalar_type,
# winning_colony=scalar_type)(_py_kernel)
def debug():
this = sys.modules[__name__]
this.window_floor = _py_window_floor
this.window_ceil = _py_window_ceil
this.distance = _py_distance
this.np_distance = _py_np_distance
this.g = _py_g
this.np_g = _py_np_g
this.kernel = _py_kernel
def optimize():
this = sys.modules[__name__]
this.window_floor = _numba_window_floor
this.window_ceil = _numba_window_ceil
this.distance = _numba_distance
this.np_distance = _numba_np_distance
this.g = _numba_g
this.np_g = _numba_np_g
this.kernel = _numba_kernel
# protected Pythonic versions of code:
_py_window_floor = window_floor
_py_window_ceil = window_ceil
_py_distance = distance
_py_np_distance = np_distance
_py_g = g
_py_np_g = np_g
_py_kernel = kernel
def test_window_floor_ceil():
assert 3 == window_floor(4, 1)
assert 0 == window_floor(1, 4)
assert 3 == window_ceil(3, 3, 1)
assert 5 == window_ceil(4, 5, 1)
def test_distance():
image = np.zeros((2, 2, 3), dtype=SCALAR_DTYPE)
image[0, 1] = [1, 1, 1]
image[1, 0] = [0.5, 0.5, 0.5]
assert 0.0 == distance(image, 0, 0, 0, 0)
assert abs(math.sqrt(3) - distance(image, 0, 0, 0, 1)) < 1e-15
assert abs(math.sqrt(3/4) - distance(image, 0, 1, 1, 0)) < 1e-15
pixel1 = np.asarray([0.0, 0.0, 0.0], dtype=SCALAR_DTYPE)
pixel2 = np.asarray([1.0, 1.0, 1.0], dtype=SCALAR_DTYPE)
pixel3 = np.asarray([0.5, 0.5, 0.5], dtype=SCALAR_DTYPE)
assert 0.0 == np_distance(pixel1, pixel1)
assert abs(math.sqrt(3) - np_distance(pixel1, pixel2)) < 1e-15
assert abs(math.sqrt(3/4) - np_distance(pixel2, pixel3)) < 1e-15
def test_g():
image = np.zeros((2, 2, 3), dtype=SCALAR_DTYPE)
image[0, 1] = [1, 1, 1]
image[1, 0] = [0.5, 0.5, 0.5]
assert 1.0 == g(distance(image, 0, 0, 0, 0))
assert abs(0 - g(distance(image, 0, 0, 0, 1))) < 1e-15
assert abs(0.5 - g(distance(image, 0, 1, 1, 0))) < 1e-15
pixel1 = np.asarray([0.0, 0.0, 0.0], dtype=SCALAR_DTYPE)
pixel2 = np.asarray([1.0, 1.0, 1.0], dtype=SCALAR_DTYPE)
pixel3 = np.asarray([0.5, 0.5, 0.5], dtype=SCALAR_DTYPE)
assert 1.0 == np_g(pixel1, pixel1)
assert abs(0 - np_g(pixel1, pixel2)) < 1e-15
assert abs(0.5 - np_g(pixel2, pixel3)) < 1e-15
def test_kernel():
image = np.zeros((3, 3, 3), dtype=SCALAR_DTYPE)
state = np.zeros((3, 3, 2), dtype=SCALAR_DTYPE)
state_next = np.empty_like(state)
# colony 1 is strength 1 at position 0,0
# colony 0 is strength 0 at all other positions
state[0, 0, 0] = 1
state[0, 0, 1] = 1
# window_size 1, colony 1 should propagate to three neighbors
changes = kernel(image, state, state_next, 1)
assert(3 == changes)
npt.assert_array_equal(state_next[0:2, 0:2], 1)
npt.assert_array_equal(state_next[2, :], 0)
npt.assert_array_equal(state_next[2, :], 0)
# window_size 1, colony 1 should propagate to entire image
changes = kernel(image, state, state_next, 2)
assert(8 == changes)
npt.assert_array_equal(state_next, 1)
def test():
test_window_floor_ceil()
test_distance()
test_g()
test_kernel()
# create numba versions of code
create_numba_funcs()
if __name__ == "__main__":
# always verify pure Python code first
test()
# then test optimized variants
optimize()
test()
# replace default function calls with numba calls
optimize() | [
"numba.autojit",
"sys.exit",
"math.sqrt",
"skimage.img_as_float",
"numpy.asarray",
"numpy.zeros",
"numba.jit",
"numpy.empty_like",
"numpy.linalg.norm",
"numpy.testing.assert_array_equal"
] | [((1164, 1178), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (1173, 1178), False, 'import math\n'), ((880, 892), 'math.sqrt', 'math.sqrt', (['s'], {}), '(s)\n', (889, 892), False, 'import math\n'), ((1061, 1073), 'math.sqrt', 'math.sqrt', (['s'], {}), '(s)\n', (1070, 1073), False, 'import math\n'), ((1120, 1154), 'numpy.linalg.norm', 'np.linalg.norm', (['(pixel1 - pixel2)', '(2)'], {}), '(pixel1 - pixel2, 2)\n', (1134, 1154), True, 'import numpy as np\n'), ((3451, 3470), 'skimage.img_as_float', 'img_as_float', (['image'], {}), '(image)\n', (3463, 3470), False, 'from skimage import img_as_float\n'), ((3560, 3580), 'numpy.empty_like', 'np.empty_like', (['state'], {}), '(state)\n', (3573, 3580), True, 'import numpy as np\n'), ((6810, 6849), 'numpy.zeros', 'np.zeros', (['(2, 2, 3)'], {'dtype': 'SCALAR_DTYPE'}), '((2, 2, 3), dtype=SCALAR_DTYPE)\n', (6818, 6849), True, 'import numpy as np\n'), ((7109, 7156), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 0.0]'], {'dtype': 'SCALAR_DTYPE'}), '([0.0, 0.0, 0.0], dtype=SCALAR_DTYPE)\n', (7119, 7156), True, 'import numpy as np\n'), ((7170, 7217), 'numpy.asarray', 'np.asarray', (['[1.0, 1.0, 1.0]'], {'dtype': 'SCALAR_DTYPE'}), '([1.0, 1.0, 1.0], dtype=SCALAR_DTYPE)\n', (7180, 7217), True, 'import numpy as np\n'), ((7231, 7278), 'numpy.asarray', 'np.asarray', (['[0.5, 0.5, 0.5]'], {'dtype': 'SCALAR_DTYPE'}), '([0.5, 0.5, 0.5], dtype=SCALAR_DTYPE)\n', (7241, 7278), True, 'import numpy as np\n'), ((7490, 7529), 'numpy.zeros', 'np.zeros', (['(2, 2, 3)'], {'dtype': 'SCALAR_DTYPE'}), '((2, 2, 3), dtype=SCALAR_DTYPE)\n', (7498, 7529), True, 'import numpy as np\n'), ((7776, 7823), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 0.0]'], {'dtype': 'SCALAR_DTYPE'}), '([0.0, 0.0, 0.0], dtype=SCALAR_DTYPE)\n', (7786, 7823), True, 'import numpy as np\n'), ((7837, 7884), 'numpy.asarray', 'np.asarray', (['[1.0, 1.0, 1.0]'], {'dtype': 'SCALAR_DTYPE'}), '([1.0, 1.0, 1.0], dtype=SCALAR_DTYPE)\n', (7847, 7884), True, 'import numpy as np\n'), ((7898, 7945), 'numpy.asarray', 'np.asarray', (['[0.5, 0.5, 0.5]'], {'dtype': 'SCALAR_DTYPE'}), '([0.5, 0.5, 0.5], dtype=SCALAR_DTYPE)\n', (7908, 7945), True, 'import numpy as np\n'), ((8119, 8158), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {'dtype': 'SCALAR_DTYPE'}), '((3, 3, 3), dtype=SCALAR_DTYPE)\n', (8127, 8158), True, 'import numpy as np\n'), ((8171, 8210), 'numpy.zeros', 'np.zeros', (['(3, 3, 2)'], {'dtype': 'SCALAR_DTYPE'}), '((3, 3, 2), dtype=SCALAR_DTYPE)\n', (8179, 8210), True, 'import numpy as np\n'), ((8228, 8248), 'numpy.empty_like', 'np.empty_like', (['state'], {}), '(state)\n', (8241, 8248), True, 'import numpy as np\n'), ((8539, 8586), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['state_next[0:2, 0:2]', '(1)'], {}), '(state_next[0:2, 0:2], 1)\n', (8561, 8586), True, 'import numpy.testing as npt\n'), ((8591, 8634), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['state_next[2, :]', '(0)'], {}), '(state_next[2, :], 0)\n', (8613, 8634), True, 'import numpy.testing as npt\n'), ((8639, 8682), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['state_next[2, :]', '(0)'], {}), '(state_next[2, :], 0)\n', (8661, 8682), True, 'import numpy.testing as npt\n'), ((8826, 8863), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['state_next', '(1)'], {}), '(state_next, 1)\n', (8848, 8863), True, 'import numpy.testing as npt\n'), ((313, 323), 'sys.exit', 'sys.exit', ([], {}), '()\n', (321, 323), False, 'import sys\n'), ((4092, 4153), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'argtypes': '[size_t, size_t]', 'restype': 'size_t'}), '(nopython=True, argtypes=[size_t, size_t], restype=size_t)\n', (4095, 4153), False, 'from numba import jit, autojit, size_t\n'), ((4273, 4342), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'argtypes': '[size_t, size_t, size_t]', 'restype': 'size_t'}), '(nopython=True, argtypes=[size_t, size_t, size_t], restype=size_t)\n', (4276, 4342), False, 'from numba import jit, autojit, size_t\n'), ((4456, 4554), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'argtypes': '[image_type, size_t, size_t, size_t, size_t]', 'restype': 'scalar_type'}), '(nopython=True, argtypes=[image_type, size_t, size_t, size_t, size_t],\n restype=scalar_type)\n', (4459, 4554), False, 'from numba import jit, autojit, size_t\n'), ((4699, 4774), 'numba.jit', 'jit', ([], {'nopython': '(False)', 'argtypes': '[pixel_type, pixel_type]', 'restype': 'scalar_type'}), '(nopython=False, argtypes=[pixel_type, pixel_type], restype=scalar_type)\n', (4702, 4774), False, 'from numba import jit, autojit, size_t\n'), ((4881, 4944), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'argtypes': '[scalar_type]', 'restype': 'scalar_type'}), '(nopython=True, argtypes=[scalar_type], restype=scalar_type)\n', (4884, 4944), False, 'from numba import jit, autojit, size_t\n'), ((5024, 5099), 'numba.jit', 'jit', ([], {'nopython': '(False)', 'argtypes': '[pixel_type, pixel_type]', 'restype': 'scalar_type'}), '(nopython=False, argtypes=[pixel_type, pixel_type], restype=scalar_type)\n', (5027, 5099), False, 'from numba import jit, autojit, size_t\n'), ((5190, 5212), 'numba.autojit', 'autojit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5197, 5212), False, 'from numba import jit, autojit, size_t\n'), ((6974, 6986), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (6983, 6986), False, 'import math\n'), ((7041, 7057), 'math.sqrt', 'math.sqrt', (['(3 / 4)'], {}), '(3 / 4)\n', (7050, 7057), False, 'import math\n'), ((7341, 7353), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (7350, 7353), False, 'import math\n'), ((7408, 7424), 'math.sqrt', 'math.sqrt', (['(3 / 4)'], {}), '(3 / 4)\n', (7417, 7424), False, 'import math\n'), ((2169, 2181), 'math.sqrt', 'math.sqrt', (['s'], {}), '(s)\n', (2178, 2181), False, 'import math\n')] |
import PIL
import numpy as np
from covid.metrics import VQMTMetrics
from covid.video_reader import PlaybackPosition, FfmsReader, NonBlockingPairReader
def test_playback():
pos = PlaybackPosition(10, 100)
assert pos.get_playback_frame_position() == 9
pos.shift_playback_frame_position(-100)
assert pos.get_playback_frame_position() == 0
pos.shift_playback_frame_position(3)
assert pos.get_playback_frame_position() == 3
pos.set_playback_frame_position(10)
assert pos.get_playback_frame_position() == 9
def test_video_reader():
reader = FfmsReader("samples/foreman_crf30_short.mp4")
assert (reader.get_length(), reader.enc_width, reader.enc_height) == (210, 352, 288)
frame = reader.read_frame(0, None)
assert frame[0].shape == (reader.enc_height, reader.enc_width, 3)
reader.update_video_size((600, 600))
assert max(reader.read_frame(0, None)[0].shape) == 600
def test_threaded():
with NonBlockingPairReader("sbs") as main_thread:
main_thread.create_left_reader("samples/foreman_crf30_short.mp4")
main_thread.update_video_size((600, 600))
frame, delta = main_thread.get_next_frame(False, (600, 600))
print(frame, delta)
assert isinstance(frame, PIL.Image.Image)
frame = np.array(frame)
assert (frame[:, int(frame.shape[1] * 0.8) :, :]).max() < 0.01
main_thread.composer_type = "chess"
main_thread.create_right_reader("samples/foreman_crf40_short.mp4")
main_thread.update_video_size((800, 800))
del main_thread.last_cmd_data["read_frame"]
frame, delta = main_thread.get_next_frame(False, (800, 800))
main_thread.composer_type = "split"
del main_thread.last_cmd_data["read_frame"]
frame, delta = main_thread.get_next_frame(False, (800, 800))
assert isinstance(frame, PIL.Image.Image)
frame = np.array(frame)
assert (frame[:, int(frame.shape[1] * 0.8) :, :]).max() > 0.01
main_thread.metrics = [("PSNR, Y", VQMTMetrics.PSNR_Y)]
metrics = main_thread.get_metrics(0, 0)[0][1]
assert 13 < metrics[0] < metrics[1] < 14
assert main_thread.has_no_tasks()
# main_thread.close()
if __name__ == "__main__":
test_threaded()
| [
"covid.video_reader.FfmsReader",
"numpy.array",
"covid.video_reader.PlaybackPosition",
"covid.video_reader.NonBlockingPairReader"
] | [((185, 210), 'covid.video_reader.PlaybackPosition', 'PlaybackPosition', (['(10)', '(100)'], {}), '(10, 100)\n', (201, 210), False, 'from covid.video_reader import PlaybackPosition, FfmsReader, NonBlockingPairReader\n'), ((579, 624), 'covid.video_reader.FfmsReader', 'FfmsReader', (['"""samples/foreman_crf30_short.mp4"""'], {}), "('samples/foreman_crf30_short.mp4')\n", (589, 624), False, 'from covid.video_reader import PlaybackPosition, FfmsReader, NonBlockingPairReader\n'), ((956, 984), 'covid.video_reader.NonBlockingPairReader', 'NonBlockingPairReader', (['"""sbs"""'], {}), "('sbs')\n", (977, 984), False, 'from covid.video_reader import PlaybackPosition, FfmsReader, NonBlockingPairReader\n'), ((1288, 1303), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (1296, 1303), True, 'import numpy as np\n'), ((1897, 1912), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (1905, 1912), True, 'import numpy as np\n')] |
from typing import Any, Optional
import numpy as np
from joblib import Parallel, delayed
from budgetcb.base import MAB
from budgetcb.helper import _get_ALP_predict, _LinUCBnTSSingle, _sherman_morrison_update
class LinUCB(MAB):
"""
Constrained LinUCB
References:
Li, Lihong, et al. "A contextual-bandit approach to personalized news article recommendation."
Proceedings of the 19th international conference on World wide web. 2010.
"""
def __init__(
self, ndims: int, alpha: float, narms: int, T: int, B: int, dummy_arm: int
):
"""
Args:
ndims: (int): number of context dimension
alpha: (float): hyper-parameter in LinUCB,
scaling the UCB associated with the models reward estimate
within each arm based on the data matrices.
narms (int): number of arms
T (int): total global time budget
B (int): total resource budget
dummy_arm (int): the arm that does not consume any resource
"""
super().__init__(narms, T, B, dummy_arm)
self.ndims = ndims
self.alpha = alpha
self.b_tau = self.B
self.tau = self.T
self.AaI = {} # a dict to store the inverse of A for each arm
self.ba = {} # a dict to store the `ndims` dimensional vector for each arm
for arm in range(self.narms):
self.AaI[arm] = np.eye(self.ndims)
self.ba[arm] = np.zeros((self.ndims, 1))
def play(self, tround: int, context: np.ndarray) -> int:
"""
Args:
tround (int): the index of rounds, starting from 0
context (np.ndarray): contexts array in the round
Returns: the chosen action
"""
tau = self.T - tround
avg_remaining_budget = float(self.b_tau) / tau
if self.b_tau > 0:
p = np.zeros(self.narms)
for arm in range(self.narms):
theta = np.dot(self.AaI[arm], self.ba[arm])
standard_deviation = np.sqrt(
np.dot(np.dot(context.T, self.AaI[arm]), context)
)
p[arm] = np.dot(theta.T, context) + self.alpha * standard_deviation
# select the best arm
best_arm = np.random.choice(np.where(p == max(p))[0]) # tie-breaking
# take the best arm with the probability due to the resource constraint
rand = np.random.uniform()
if rand < avg_remaining_budget:
# take action
if best_arm != self.dummy_arm:
self.b_tau -= 1
return best_arm
else:
# skip
return self.dummy_arm
else:
return self.dummy_arm # resource is exhausted
def update(
self,
arm: int,
reward: float,
context: Optional[np.ndarray] = None,
tround: Optional[int] = None,
) -> Any:
if context is None:
raise ValueError("Must supply with context in LinUCB class")
self.AaI[arm] = _sherman_morrison_update(self.AaI[arm], context)
self.ba[arm] += reward * context
return self
class UcbAlp(MAB):
"""
Constrained UCB-ALP
References:
Wu, Huasen, et al. "Algorithms with logarithmic or sublinear regret for constrained contextual bandits."
Advances in Neural Information Processing Systems. 2015.
"""
def __init__(
self, narms: int, T: int, B: int, pai: np.ndarray, gmm: Any, dummy_arm: int
):
"""
Args:
narms (int): number of arms
T (int): total global time budget
B (int): total resource budget
pai (numpy.ndarray): user class distribution
gmm (any object): any fitted clustering object but recommend using the Gaussian Mixture Model
dummy_arm (int): the arm that does not consume any resource
"""
super().__init__(narms, T, B, dummy_arm)
self.pai = pai
self.gmm = gmm
self.J = len(self.pai) # number of user clusters
self.b_tau = self.B
self.tau = self.T
# init
self.C = np.zeros((self.J, self.narms))
self.mu_bar = np.zeros((self.J, self.narms))
self.mu_hat = np.ones((self.J, self.narms))
self.mu_star = np.ones(self.J)
def play(self, tround: int, context: np.ndarray) -> int:
"""
Args:
tround (int): the index of rounds, starting from 0
context (np.ndarray): contexts array in the round
Returns: the chosen action
"""
tau = self.T - tround # update time budget
avg_remaining_budget = float(self.b_tau) / tau
# compute the user class
j = self.gmm.predict(context.T)[0]
score_max = np.max(self.mu_hat[j, :])
best_arm = np.random.choice(
[i for i, v in enumerate(self.mu_hat[j, :]) if v == score_max]
) # tie-breaking
self.mu_star[j] = score_max
if self.b_tau > 0:
alp = _get_ALP_predict(
self.mu_star, np.array(self.pai), avg_remaining_budget
) # choose the best arm with proba
probs_of_action = alp.x # type: ignore
rand = np.random.uniform()
decision = ["action" if rand < p else "skip" for p in probs_of_action]
if decision[j] == "skip":
return self.dummy_arm
else:
if best_arm != self.dummy_arm:
self.b_tau -= 1
return best_arm
else:
return self.dummy_arm # resource is exhausted
def update(
self,
arm: int,
reward: float,
context: Optional[np.ndarray] = None,
tround: Optional[int] = None,
) -> "UcbAlp":
"""
Single update
"""
if context is None:
raise ValueError("Must supply with context in UcbAlp class")
j = self.gmm.predict(context.T)[0]
self.C[j, arm] += 1
self.mu_bar[j, arm] = (self.mu_bar[j, arm] + reward) / self.C[j, arm]
self.mu_hat[j, arm] = (
self.mu_bar[j, arm] + np.sqrt(np.log(tround + 1)) / 2 * self.C[j, arm] # type: ignore
)
best_arm = np.argmax(self.mu_hat[j, :])
self.mu_star[j] = self.mu_hat[j, best_arm]
return self
class HATCH(MAB):
"""
References:
Yang, Mengyue, et al. "Hierarchical Adaptive Contextual Bandits for Resource Constraint based Recommendation."
Proceedings of The Web Conference 2020. 2020.
"""
def __init__(
self,
narms: int,
gmm: any, # type: ignore
J: int,
pai: np.ndarray,
T: int,
B: int,
context_dic: dict,
alpha: float,
njobs: int = 1,
dummy_arm: int = 0,
):
"""
Args:
narms (int): number of arms
gmm (any object): any fitted clustering object but recommend using the Gaussian Mixture Model
J (int): number of segments
pai (numpy.ndarray): user class distribution
T (int): total global time budget
B (int): total resource budget
context_dic (dict): user class center learned by the clustering object
alpha: (float): hyper-parameter in LinUCB,
scaling the UCB associated with the models reward estimate
within each arm based on the data matrices.
njobs (int): parallel computing parameter, default to 1
dummy_arm (int): the arm that does not consume any resource
"""
super().__init__(narms, T, B, dummy_arm)
self.pai = np.array(pai)
self.context_dic = context_dic
self.gmm = gmm
self.context_dim = len(context_dic.get(dummy_arm)) # type: ignore
self._add_common_lin(alpha, narms, njobs, J, self.context_dim) # type: ignore
self.ustar = [0 for i in range(J)]
self.b_tau = self.B
self.tau = self.T
self.alpha = alpha
def _add_common_lin(
self, alpha: float, narms: int, njobs: int, J: int, context_dim: dict
) -> None:
if isinstance(alpha, int):
alpha = float(alpha)
assert isinstance(alpha, float)
self.njobs = njobs
self.alpha = alpha
self.narms = narms
self.J = J
self._oraclesa = [
[_LinUCBnTSSingle(1.0, context_dim) for n in range(narms)] for i in range(J) # type: ignore
]
self._oraclesj = [_LinUCBnTSSingle(1.0, context_dim) for n in range(J)] # type: ignore
self.uj = np.array([float(1) for i in range(self.J)])
def play(self, tround: int, context: np.ndarray) -> int:
tau = self.T - tround
avg_remaining_budget = float(self.b_tau) / tau
pred = np.zeros(self.narms)
j = self.gmm.predict(context.T)
for choice in range(self.narms):
pred[choice] = self._oraclesa[j[0]][choice].predict(context.T)
best_arm = np.argmax(np.array([pred]), axis=1)[0]
if self.b_tau > 0:
acc_percentage = _get_ALP_predict(
self.uj, np.array(self.pai), avg_remaining_budget
)
if np.random.uniform(0, 1) > acc_percentage["x"][j]: # skip
return self.dummy_arm
else: # retain
if best_arm != self.dummy_arm:
self.b_tau -= 1
return best_arm
else:
return self.dummy_arm
def update(
self,
arm: int,
reward: float,
context: Optional[np.ndarray] = None,
tround: Optional[int] = None,
) -> "HATCH": # type: ignore
self.ndim = context.shape[1] # type: ignore
Xj = np.array(
list(map(lambda x: self.context_dic[x], list(self.gmm.predict(context))))
)
Parallel(n_jobs=self.njobs, verbose=0, require="sharedmem")(
delayed(self._update_single)(j, context, Xj, arm, reward)
for j in range(self.J)
)
for j in range(self.J):
self.uj[j] = self._oraclesj[j].predict(np.array([self.context_dic[j]]))
def _update_single(
self,
j: int,
context: np.ndarray,
Xj: np.ndarray,
arm: np.ndarray,
reward: np.ndarray,
) -> None:
xj = self.gmm.predict(context)
this_context = xj == j
self._oraclesj[j].fit(
Xj[this_context, :], reward[this_context].astype("float64")
)
for choice in range(self.narms):
this_action = arm == choice
self._oraclesa[j][choice].fit(
context[this_action, :], reward[this_action].astype("float64")
)
| [
"numpy.eye",
"numpy.ones",
"numpy.log",
"numpy.argmax",
"numpy.max",
"joblib.Parallel",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"joblib.delayed",
"numpy.random.uniform",
"budgetcb.helper._sherman_morrison_update",
"budgetcb.helper._LinUCBnTSSingle"
] | [((3153, 3201), 'budgetcb.helper._sherman_morrison_update', '_sherman_morrison_update', (['self.AaI[arm]', 'context'], {}), '(self.AaI[arm], context)\n', (3177, 3201), False, 'from budgetcb.helper import _get_ALP_predict, _LinUCBnTSSingle, _sherman_morrison_update\n'), ((4270, 4300), 'numpy.zeros', 'np.zeros', (['(self.J, self.narms)'], {}), '((self.J, self.narms))\n', (4278, 4300), True, 'import numpy as np\n'), ((4323, 4353), 'numpy.zeros', 'np.zeros', (['(self.J, self.narms)'], {}), '((self.J, self.narms))\n', (4331, 4353), True, 'import numpy as np\n'), ((4376, 4405), 'numpy.ones', 'np.ones', (['(self.J, self.narms)'], {}), '((self.J, self.narms))\n', (4383, 4405), True, 'import numpy as np\n'), ((4429, 4444), 'numpy.ones', 'np.ones', (['self.J'], {}), '(self.J)\n', (4436, 4444), True, 'import numpy as np\n'), ((4909, 4934), 'numpy.max', 'np.max', (['self.mu_hat[j, :]'], {}), '(self.mu_hat[j, :])\n', (4915, 4934), True, 'import numpy as np\n'), ((6385, 6413), 'numpy.argmax', 'np.argmax', (['self.mu_hat[j, :]'], {}), '(self.mu_hat[j, :])\n', (6394, 6413), True, 'import numpy as np\n'), ((7856, 7869), 'numpy.array', 'np.array', (['pai'], {}), '(pai)\n', (7864, 7869), True, 'import numpy as np\n'), ((9012, 9032), 'numpy.zeros', 'np.zeros', (['self.narms'], {}), '(self.narms)\n', (9020, 9032), True, 'import numpy as np\n'), ((1468, 1486), 'numpy.eye', 'np.eye', (['self.ndims'], {}), '(self.ndims)\n', (1474, 1486), True, 'import numpy as np\n'), ((1514, 1539), 'numpy.zeros', 'np.zeros', (['(self.ndims, 1)'], {}), '((self.ndims, 1))\n', (1522, 1539), True, 'import numpy as np\n'), ((1931, 1951), 'numpy.zeros', 'np.zeros', (['self.narms'], {}), '(self.narms)\n', (1939, 1951), True, 'import numpy as np\n'), ((2494, 2513), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2511, 2513), True, 'import numpy as np\n'), ((5364, 5383), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5381, 5383), True, 'import numpy as np\n'), ((8716, 8750), 'budgetcb.helper._LinUCBnTSSingle', '_LinUCBnTSSingle', (['(1.0)', 'context_dim'], {}), '(1.0, context_dim)\n', (8732, 8750), False, 'from budgetcb.helper import _get_ALP_predict, _LinUCBnTSSingle, _sherman_morrison_update\n'), ((10080, 10139), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.njobs', 'verbose': '(0)', 'require': '"""sharedmem"""'}), "(n_jobs=self.njobs, verbose=0, require='sharedmem')\n", (10088, 10139), False, 'from joblib import Parallel, delayed\n'), ((2019, 2054), 'numpy.dot', 'np.dot', (['self.AaI[arm]', 'self.ba[arm]'], {}), '(self.AaI[arm], self.ba[arm])\n', (2025, 2054), True, 'import numpy as np\n'), ((5204, 5222), 'numpy.array', 'np.array', (['self.pai'], {}), '(self.pai)\n', (5212, 5222), True, 'import numpy as np\n'), ((8588, 8622), 'budgetcb.helper._LinUCBnTSSingle', '_LinUCBnTSSingle', (['(1.0)', 'context_dim'], {}), '(1.0, context_dim)\n', (8604, 8622), False, 'from budgetcb.helper import _get_ALP_predict, _LinUCBnTSSingle, _sherman_morrison_update\n'), ((9220, 9236), 'numpy.array', 'np.array', (['[pred]'], {}), '([pred])\n', (9228, 9236), True, 'import numpy as np\n'), ((9349, 9367), 'numpy.array', 'np.array', (['self.pai'], {}), '(self.pai)\n', (9357, 9367), True, 'import numpy as np\n'), ((9420, 9443), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (9437, 9443), True, 'import numpy as np\n'), ((10340, 10371), 'numpy.array', 'np.array', (['[self.context_dic[j]]'], {}), '([self.context_dic[j]])\n', (10348, 10371), True, 'import numpy as np\n'), ((2214, 2238), 'numpy.dot', 'np.dot', (['theta.T', 'context'], {}), '(theta.T, context)\n', (2220, 2238), True, 'import numpy as np\n'), ((10153, 10181), 'joblib.delayed', 'delayed', (['self._update_single'], {}), '(self._update_single)\n', (10160, 10181), False, 'from joblib import Parallel, delayed\n'), ((2128, 2160), 'numpy.dot', 'np.dot', (['context.T', 'self.AaI[arm]'], {}), '(context.T, self.AaI[arm])\n', (2134, 2160), True, 'import numpy as np\n'), ((6299, 6317), 'numpy.log', 'np.log', (['(tround + 1)'], {}), '(tround + 1)\n', (6305, 6317), True, 'import numpy as np\n')] |
import numpy as np
import sys, os, os.path, time, gc
from astropy.table import Table, vstack, join
import matplotlib.pyplot as plt
from astropy import units as u
from scipy.optimize import curve_fit, minimize
from astropy.time import Time
import astropy.coordinates as coords
from astropy.stats import sigma_clip
from matplotlib.offsetbox import AnchoredText
from astroquery.irsa import Irsa
import emcee
from scipy.stats import norm
import matplotlib.gridspec as gridspec
import matplotlib
import Neighbor_Offsets as ne
import Register_Frames as reg
# Set a few defaults
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
Irsa.ROW_LIMIT = -1
Irsa.TIMEOUT = 60*10 # 10 minutes
plt.rc('xtick', labelsize=8)
plt.rc('ytick', labelsize=8)
plt.rc('text', usetex=True)
plt.rc('legend', fontsize=7)
plt.rc('axes', labelsize=10)
plt.rc('axes', titlesize=10)
# Set some constants
d2a = 3600.
d2ma = 3600000.
d2y = 1/365.25
###########################################################################################################
clickpoints = []
clickpoints2 = []
def onclick(event):
global clickpoints
clickpoints.append([event.xdata, event.ydata])
plt.axvline(event.xdata, c='r', ls=':')
plt.axhline(event.ydata, c='r', ls=':')
plt.draw()
if len(clickpoints) == 2:
print('Closing figure')
RAmin, RAmax = np.min(list(zip(*clickpoints))[0]), np.max(list(zip(*clickpoints))[0])
DECmin, DECmax = np.min(list(zip(*clickpoints))[1]), np.max(list(zip(*clickpoints))[1])
plt.fill_between([RAmin, RAmax], [DECmin, DECmin], [DECmax, DECmax], color='0.5', alpha=0.5, zorder=-100)
plt.draw()
plt.pause(2)
plt.close('all')
def onclick2(event):
global clickpoints2
clickpoints2.append([event.xdata, event.ydata])
plt.axvline(event.xdata, c='r', ls=':')
plt.draw()
if len(clickpoints2) == 2:
print('Closing figure')
plt.axvspan(clickpoints2[0][0], clickpoints2[1][0], color='0.5', alpha=0.5, zorder=-100)
plt.draw()
plt.pause(2)
plt.close('all')
def onclickclose(event):
if event.button: plt.close('all')
###########################################################################################################
def AstrometryFunc(x, Delta1, Delta2, PMra, PMdec, pi, JPL=True, RA=True, DEC=True):
ras, decs, mjds = x
years = (mjds - mjds[0])*d2y
bary0 = coords.get_body_barycentric('earth', Time(mjds, format='mjd'))
if JPL: # Use JPL DE430 ephemeris
bary = bary0 / 1.496e8
else:
bary = bary0
# Parallax factors
Fac1 = (bary.x * np.sin(ras/d2a*np.pi/180.) - bary.y * np.cos(ras/d2a *np.pi/180.) )
Fac2 = bary.x * np.cos(ras/d2a *np.pi/180.) * np.sin(decs/d2a *np.pi/180.) + \
bary.y * np.sin(ras/d2a *np.pi/180.) * np.sin(decs/d2a *np.pi/180.) - \
bary.z * np.cos(decs/d2a *np.pi/180.)
RAsend = Delta1 + PMra * years + pi * Fac1.value
DECsend = Delta2 + PMdec * years + pi * Fac2.value
if RA == True and DEC == False:
return RAsend
elif RA == False and DEC == True:
return DECsend
else:
return np.concatenate( [RAsend, DECsend]).flatten()
###########################################################################################################
def MeasureParallax(Name='JohnDoe', radecstr=None, ra0=None, dec0=None, radius=10,
PLOT=True, method='mcmc', savechain=True, JPL=True, Register=False, Calibrate=True,
AllowUpperLimits=False, sigma=3, removeSingles=False, **kwargs):
'''
Required:
Name : name of the object. Used for directory structure
radecstr : target coordinates as a string in hmsdms
OR
ra0, dec0 : target coordinates as decimal degrees
radius : search radius for target object in arcsec
Optional:
PLOT : keyword to set for plotting (default = True)
method : keyword to set for fitting method. Currently only 'mcmc' using the emcee is available
savechain : keyword to set for saving the final MCMC chain (default = True)
JPL : keyword to set to use the JPL ephemeris (default = True)
Register : keyword to set for registering within a single epoch (default = False)
Calibrate : keyword to set for registering each epoch to the first epoch (default = True)
AllowUpperLimits : keyword to set for allowing astrometry from upper limit magnitude measurements (default = False)
sigma : keyword for the sigma clipping value (default = 3)
removeSingles : remove epochs that only have a single frame (observation)
'''
# Make directories for the plots and results
name = Name.replace('$','').replace(' ','').replace('.','')
if not os.path.exists('%s/Plots'%name):
os.makedirs('%s/Plots'%name)
if not os.path.exists('%s/Results'%name):
os.makedirs('%s/Results'%name)
# Get the object
if radecstr != None:
t1 = Irsa.query_region(coords.SkyCoord(radecstr, unit=(u.deg,u.deg), frame='icrs'),
catalog="allsky_4band_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
t2 = Irsa.query_region(coords.SkyCoord(radecstr, unit=(u.deg,u.deg), frame='icrs'),
catalog="allsky_3band_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
if len(t2) == 0:
t2 = Irsa.query_region(coords.SkyCoord(radecstr, unit=(u.deg,u.deg), frame='icrs'),
catalog="allsky_2band_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
t3 = Irsa.query_region(coords.SkyCoord(radecstr, unit=(u.deg,u.deg), frame='icrs'),
catalog="neowiser_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
elif ra0 != None and dec0 != None:
t1 = Irsa.query_region(coords.SkyCoord(ra0, dec0, unit=(u.deg,u.deg), frame='icrs'),
catalog="allsky_4band_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
t2 = Irsa.query_region(coords.SkyCoord(ra0, dec0, unit=(u.deg,u.deg), frame='icrs'),
catalog="allsky_3band_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
if len(t2) == 0:
t2 = Irsa.query_region(coords.SkyCoord(ra0, dec0, unit=(u.deg,u.deg), frame='icrs'),
catalog="allsky_2band_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
t3 = Irsa.query_region(coords.SkyCoord(ra0, dec0, unit=(u.deg,u.deg), frame='icrs'),
catalog="neowiser_p1bs_psd", spatial="Cone", radius=radius * u.arcsec)
else:
raise ValueError("Need to supply either radecstr or ra0 and dec0") # Need some coords
t00 = vstack([t1, t2], join_type='inner')
t0 = vstack([t00, t3], join_type='inner')
index00 = np.argsort(t0['mjd'])
t = t0[index00]
if JPL: # Use the JPL DE430 ephemeris
from astropy.coordinates import solar_system_ephemeris
solar_system_ephemeris.set('jpl')
######################################################################################################
def AstrometryFunc0(x, Delta1, Delta2, PMra, PMdec, pi):
ras, decs, mjds = x
years = (mjds - mjds[0])*d2y
bary0 = coords.get_body_barycentric('earth', Time(mjds, format='mjd'))
if JPL:
bary = bary0 / 1.496e8
else:
bary = bary0
# Parallax factors
Fac1 = (bary.x * np.sin(ras/d2a*np.pi/180.) - bary.y * np.cos(ras/d2a *np.pi/180.) )
Fac2 = bary.x * np.cos(ras/d2a *np.pi/180.) * np.sin(decs/d2a *np.pi/180.) + \
bary.y * np.sin(ras/d2a *np.pi/180.) * np.sin(decs/d2a *np.pi/180.) - \
bary.z * np.cos(decs/d2a *np.pi/180.)
RAsend = Delta1 + PMra * years + pi * Fac1.value
DECsend = Delta2 + PMdec * years + pi * Fac2.value
if RA == True and DEC == False:
return RAsend
elif RA == False and DEC == True:
return DECsend
else:
return np.concatenate( [RAsend, DECsend]).flatten()
######################################################################################################
# Plot the RA, DEC, and MJDs
fig0 = plt.figure(1, figsize=(7,6))
ax0 = fig0.add_subplot(211)
ax00 = fig0.add_subplot(212)
ax0.scatter(t1['mjd'], t1['ra']*d2a, c='r', alpha=0.5)
ax0.scatter(t2['mjd'], t2['ra']*d2a, c='g', alpha=0.5)
ax0.scatter(t3['mjd'], t3['ra']*d2a, c='b', alpha=0.5)
ax00.scatter(t1['mjd'], t1['dec']*d2a, c='r', alpha=0.5)
ax00.scatter(t2['mjd'], t2['dec']*d2a, c='g', alpha=0.5)
ax00.scatter(t3['mjd'], t3['dec']*d2a, c='b', alpha=0.5)
ax0.set_xlabel('MJD')
ax0.set_ylabel('R.A. (arcsec)')
ax00.set_xlabel('MJD')
ax00.set_ylabel('Dec. (arcsec)')
if PLOT:
fig = plt.figure(2, figsize=(7,6))
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.scatter(t['mjd'], t['ra']*d2a, c='r', alpha=0.5)
for j in np.arange(-1, 30, 2):
ax1.axvline(np.min(t['mjd']) + 365.25/4*j , c='k', ls='--')
ax2.scatter(t['mjd'], t['dec']*d2a, c='r', alpha=0.5)
for j in np.arange(-1, 30, 2):
ax2.axvline(np.min(t['mjd']) + 365.25/4*j , c='k', ls='--')
fig3, ax3 = plt.subplots()
cid = fig3.canvas.mpl_connect('button_press_event', onclick)
ax3.scatter(t['ra']*d2a, t['dec']*d2a, alpha=0.3)
ax3.set_xlabel('R.A. (arcsec)')
xmin,xmax = ax3.get_xlim()
ax3.set_xlim(xmax, xmin)
ax3.set_ylabel('Dec. (arcsec)')
ax3.set_title('Select two points to form a bounding box around target\n(plot will close automatically when finished)')
plt.show()
plt.close('all')
# Get the RADEC limits from the click points
RAmin, RAmax = np.min(list(zip(*clickpoints))[0]), np.max(list(zip(*clickpoints))[0])
DECmin, DECmax = np.min(list(zip(*clickpoints))[1]), np.max(list(zip(*clickpoints))[1])
slice1 = np.where( (t['ra']*d2a >= RAmin) & (t['ra']*d2a <= RAmax) &
(t['dec']*d2a >= DECmin) & (t['dec']*d2a <= DECmax) )
# Get the magnitude limits from the click points
fig4 = plt.figure(104)
cid = fig4.canvas.mpl_connect('button_press_event', onclick2)
x = np.linspace(np.min(t['w2mpro'][slice1])-2*np.max(t['w2sigmpro'][slice1]), np.max(t['w2mpro'][slice1])+2*np.max(t['w2sigmpro'][slice1]), 2000)
W2pdf = np.zeros(len(x))
for w2, w2err in list(zip(t['w2mpro'][slice1].filled(-9999), t['w2sigmpro'][slice1].filled(-9999))):
if w2err == -9999:
if w2 == -9999:
continue # Skip only upper limits
else:
plt.axvline(w2, ls='--', lw=0.75, c='r', alpha=0.5, zorder=-100)
continue # Skip only upper limits
if w2 == -9999:
continue # Skip only upper limits
plt.axvline(w2, ls=':', lw=0.75, c='0.5', alpha=0.5, zorder=-100)
W2pdf += norm.pdf(x, loc=w2, scale=w2err)
plt.plot(x, W2pdf / np.trapz(W2pdf, x=x), zorder=100)
plt.plot([x[1000],x[1000]],[0,0], 'r--', lw=0.75, alpha=0.5, label='Upper limits')
plt.plot([x[1000],x[1000]],[0,0], c='0.5', ls=':', lw=0.75, alpha=0.5, label='Individual measurements')
plt.xlabel(r'$W2$')
plt.ylabel('PDF')
plt.legend(frameon=False)
plt.title('Select the lower and upper bound magnitudes\n(plot will close automatically when finished)')
plt.show()
plt.close('all')
W2min, W2max = np.min(list(zip(*clickpoints2))[0]), np.max(list(zip(*clickpoints2))[0])
if AllowUpperLimits: # Sometimes useful for very faint sources (Y dwarfs)
slice2 = np.where( (t['w2mpro'][slice1] >= W2min) & (t['w2mpro'][slice1] <= W2max) )
else:
slice2 = np.where( (t['w2mpro'][slice1] >= W2min) & (t['w2mpro'][slice1] <= W2max) & (t['w2sigmpro'][slice1].filled(-9999) != -9999) )
#### Find the date clusters
Groups = []
Epochs = []
DateGrps = np.arange(-1, 30, 2)
for i in range(len(DateGrps)-1):
bottom = np.min(t['mjd'][slice1][slice2]) + 365.25/4*DateGrps[i]
top = np.min(t['mjd'][slice1][slice2]) + 365.25/4*DateGrps[i+1]
group = np.where( (t['mjd'][slice1][slice2] > bottom) & (t['mjd'][slice1][slice2] < top) )
if len(group[0]) != 0:
if removeSingles == True:
if len(group[0]) == 1: continue
Groups.append(group[0])
Epochs.append([bottom, top])
else:
Groups.append(group[0])
Epochs.append([bottom, top])
MJDs = []
Ys1 = []
Ys2 = []
unYs1 = []
unYs2 = []
XsALL = []
Ys1ALL = []
Ys2ALL = []
Colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
i = 0
groupcount = 0
if PLOT:
fig2 = plt.figure(3)
cid = fig2.canvas.mpl_connect('button_press_event', onclickclose)
ax3 = fig2.add_subplot(111)
for group in Groups:
groupcount += 1
if Register != False: ## Register the epoch
# Get the first position of the epoch
ra00 = t['ra'][slice1][slice2][group][0]
dec00 = t['dec'][slice1][slice2][group][0]
epochs00 = t['mjd'][slice1][slice2][group]
# Get the shifts (only need to find the correct search radius for the 1st epoch)
if groupcount == 1:
rashifts0, decshifts0, RegisterRadius = reg.GetRegistrators(name, epochs00, subepoch=groupcount, ra0=ra00, dec0=dec00)
else:
rashifts0, decshifts0, RegisterRadius = reg.GetRegistrators(name, epochs00, subepoch=groupcount, ra0=ra00, dec0=dec00, radius=RegisterRadius)
# Shift the epoch
shiftedRAs = t['ra'][slice1][slice2][group] + rashifts0
shiftedDECs = t['dec'][slice1][slice2][group] + decshifts0
#sys.exit()
filteredRA = sigma_clip(shiftedRAs, sigma=sigma, maxiters=None)
filteredDEC = sigma_clip(shiftedDECs, sigma=sigma, maxiters=None)
else: # Don't register each subepoch
filteredRA = sigma_clip(t['ra'][slice1][slice2][group], sigma=sigma, maxiters=None)
filteredDEC = sigma_clip(t['dec'][slice1][slice2][group], sigma=sigma, maxiters=None)
index = np.where( (~filteredRA.mask) & (~filteredDEC.mask) )[0]
print('Epoch %s - Group / Filtered Group: %s / %s'%(groupcount, len(t['ra'][slice1][slice2][group]), len(t['ra'][slice1][slice2][group][index])))
if PLOT:
ax1.scatter(t['mjd'][slice1][slice2][group][index], t['ra'][slice1][slice2][group][index]*d2a, c='b', marker='x', alpha=0.5)
ax2.scatter(t['mjd'][slice1][slice2][group][index], t['dec'][slice1][slice2][group][index]*d2a, c='b', marker='x', alpha=0.5)
ax3.scatter(t['ra'][slice1][slice2][group]*d2a, t['dec'][slice1][slice2][group]*d2a, alpha=0.3, color=Colors[i], label='%s'%np.average(t['mjd'][slice1][slice2][group][index]))
ax3.scatter(t['ra'][slice1][slice2][group][index]*d2a, t['dec'][slice1][slice2][group][index]*d2a, s=2, color=Colors[i], label='%s'%np.average(t['mjd'][slice1][slice2][group][index]))
ax3.errorbar(np.average(t['ra'][slice1][slice2][group][index], weights = 1./(t['sigra'][slice1][slice2][group][index]/d2a)**2)*d2a,
np.average(t['dec'][slice1][slice2][group][index], weights = 1./(t['sigdec'][slice1][slice2][group][index]/d2a)**2)*d2a,
xerr = np.std(t['ra'][slice1][slice2][group][index]) / np.sqrt(len(t[slice1][slice2][group][index][0])),
yerr = np.std(t['dec'][slice1][slice2][group][index]) / np.sqrt(len(t[slice1][slice2][group][index][0])), c=Colors[i], marker='x', ms=20)
i += 1
MJDs.append(np.average(t['mjd'][slice1][slice2][group][index]))
Ys1.append(np.average(t['ra'][slice1][slice2][group][index], weights = 1./(t['sigra'][slice1][slice2][group][index]/d2a)**2))
Ys2.append(np.average(t['dec'][slice1][slice2][group][index], weights = 1./(t['sigdec'][slice1][slice2][group][index]/d2a)**2))
# Uncertainty weighted position
unYs1.append( (1. / np.sqrt(np.sum(1./t['sigra'][slice1][slice2][group][index]**2)) ) / d2a)
unYs2.append( (1. / np.sqrt(np.sum(1./t['sigdec'][slice1][slice2][group][index]**2)) ) / d2a)
# This is just for plotting
XsALL.append(t['mjd'][slice1][slice2][group][index].data.compressed())
Ys1ALL.append(t['ra'][slice1][slice2][group][index].data.compressed())
Ys2ALL.append(t['dec'][slice1][slice2][group][index].data.compressed())
raP, decP = Ys1[0], Ys2[0]
if PLOT:
ax1.set_xlabel('MJD')
ax1.set_ylabel('R.A. (arcsec)')
ax2.set_xlabel('MJD')
ax2.set_ylabel('Dec. (arcsec)')
ax3.legend(frameon=False)
ax3.set_title('Here are the measurements we will fit to\n(click anywhere to close)')
ax3.set_xlabel('R.A. (arcsec)')
xmin,xmax = ax3.get_xlim()
ax3.set_xlim(xmax, xmin)
ax3.set_ylabel('Dec. (arcsec)')
fig0.savefig('%s/Plots/MJDs0.png'%name, dpi=600, bbox_inches='tight')
fig.savefig('%s/Plots/MJDs1.png'%name, dpi=600, bbox_inches='tight')
fig2.savefig('%s/Plots/MJDs2.png'%name, dpi=600, bbox_inches='tight')
plt.show()
plt.close('all')
if groupcount < 4:
# Only do objects that have multiple observations
raise Exception('Not enough epochs available. Only %s epochs available'%groupcount)
# Get the shifts using calibrators (Only use 10 arcsec/arcminutes)
if Calibrate == True:
if radecstr != None:
if Register == True:
rashifts, decshifts = ne.GetCalibrators(name, Epochs, radecstr=radecstr, radius=RegisterRadius)
else:
rashifts, decshifts = ne.GetCalibrators(name, Epochs, radecstr=radecstr)
elif ra0 != None and dec0 != None:
if Register == True:
rashifts, decshifts = ne.GetCalibrators(name, Epochs, ra0=ra0, dec0=dec0, radius=RegisterRadius)
else:
rashifts, decshifts = ne.GetCalibrators(name, Epochs, ra0=ra0, dec0=dec0)
print('Shifts (mas):')
print('RA:', rashifts*d2ma)
print('DEC:', decshifts*d2ma)
Ys1 = np.array(Ys1).flatten() - rashifts
Ys2 = np.array(Ys2).flatten() - decshifts
Ys1ALL0 = np.array(Ys1ALL) - rashifts
Ys2ALL0 = np.array(Ys2ALL) - decshifts
else:
Ys1 = np.array(Ys1).flatten()
Ys2 = np.array(Ys2).flatten()
Ys1ALL0 = np.array(Ys1ALL)
Ys2ALL0 = np.array(Ys2ALL)
MJDs = np.array(MJDs).flatten()
unYs1 = np.array(unYs1).flatten()
unYs2 = np.array(unYs2).flatten()
#unYs1 = np.sqrt( np.array(unYs1).flatten()**2 + (5./d2ma)**2 )
#unYs2 = np.sqrt( np.array(unYs2).flatten()**2 + (5./d2ma)**2 )
XsALL0 = np.array(XsALL)
# Need to reshape the arrays. Not the most efficient thing.
XsALL = np.empty(0)
Ys1ALL = np.empty(0)
Ys2ALL = np.empty(0)
for i in range(len(Ys1ALL0)):
XsALL = np.append(XsALL, XsALL0[i])
Ys1ALL = np.append(Ys1ALL, Ys1ALL0[i])
Ys2ALL = np.append(Ys2ALL, Ys2ALL0[i])
XsALL = np.array(XsALL).flatten()
Ys1ALL = np.array(Ys1ALL).flatten()
Ys2ALL = np.array(Ys2ALL).flatten()
Twrite1 = Table([Ys1, unYs1, Ys2, unYs2, MJDs], names=['RA','SIGRA','DEC','SIGDEC','MJD'])
Twrite1.write('%s/Results/Weighted_Epochs.csv'%name, overwrite=True)
Twrite2 = Table([Ys1ALL, Ys2ALL, XsALL], names=['RA','DEC','MJD'])
Twrite2.write('%s/Results/All_Epochs.csv'%name, overwrite=True)
print('Epochs:', groupcount)
print('Positions (RA):', Ys1)
print('Positions (Dec):', Ys2)
print('Average Pos Uncert (RA; mas):', np.mean(unYs1 * d2ma), np.median(unYs1 * d2ma))
print('Average Pos Uncert (Decl; mas):', np.mean(unYs2 * d2ma), np.median(unYs2 * d2ma))
print('Average Pos Uncert (mas):', (np.mean(unYs1 * d2ma) + np.mean(unYs2 * d2ma)) / 2.)
print('Average Pos Uncert (Combined; mas):', (np.mean(np.sqrt(unYs1**2 + unYs1[0]**2)) * d2ma + np.mean(np.sqrt(unYs2**2 + unYs2[0]**2)) * d2ma) / 2.)
print('Time Baseline (yr):', (np.max(MJDs) - np.min(MJDs)) * d2y)
# Uncertainty arrays in arcsec
RA_Uncert = np.sqrt(unYs1**2 + unYs1[0]**2)*d2a
#RA_Uncert = np.sqrt( np.cos(Ys2[0]*np.pi/180.)**2 * (unYs1**2 + unYs1[0]**2) + \
# np.sin(Ys2[0]*np.pi/180.)**2 * (Ys1 - Ys1[0])**2 * unYs2**2 ) * d2a
DEC_Uncert = np.sqrt(unYs2**2 + unYs2[0]**2)*d2a
print('INITIAL FIT')
RA, DEC = False, False
#poptD, popc = curve_fit(func1radec, [Ys1*d2a, Ys2*d2a, MJDs] , np.concatenate( [np.cos(Ys2[0]*np.pi/180.)*(Ys1 - Ys1[0])*d2a, (Ys2 - Ys2[0])*d2a] ).flatten(), sigma=np.concatenate( [np.sqrt( (unYs1**2 + unYs1[0]**2)*np.cos(Ys2[0]*np.pi/180.)**2 + unYs2[0]**2*(Ys1-Ys1[0])**2*np.sin(Ys2[0]*np.pi/180)**2)*d2a, np.sqrt(unYs2**2 + unYs2[0]**2)*d2a ] ).flatten())
#bounds = [[-10,-10,-10,-10,0],[10,10,10,10,1]]
poptD, popc = curve_fit(AstrometryFunc0, [Ys1*d2a, Ys2*d2a, MJDs] ,
np.concatenate( [(Ys1 - Ys1[0])*np.cos(Ys2[0]*np.pi/180.)*d2a, (Ys2 - Ys2[0])*d2a] ).flatten(),
sigma=np.concatenate( [ RA_Uncert, DEC_Uncert ] ).flatten() )
#print(poptD)
#print(popc)
#print(np.diag(popc))
print('DELTARA\tDELTADE\tPM_RA \tPM_DEC\tPLX\n{:.7}\t{:.7}\t{:.6}\t{:.6}\t{:.5}'.format(str(poptD[0]), str(poptD[1]), str(poptD[2]), str(poptD[3]), str(poptD[4])))
print('%s pc'%(1/poptD[-1]))
if method == 'leastsq':
return poptD, np.diag(popc)
if method == 'amoeba': # still a work in progress
RA, DEC = False, False
#poptD, popc = curve_fit(func1radec, [Ys1*d2a, Ys2*d2a, MJDs] , np.concatenate( [np.cos(Ys2[0]*np.pi/180.)*(Ys1 - Ys1[0])*d2a, (Ys2 - Ys2[0])*d2a] ).flatten(), sigma=np.concatenate( [np.sqrt( (unYs1**2 + unYs1[0]**2)*np.cos(Ys2[0]*np.pi/180.)**2 + unYs2[0]**2*(Ys1-Ys1[0])**2*np.sin(Ys2[0]*np.pi/180)**2)*d2a, np.sqrt(unYs2**2 + unYs2[0]**2)*d2a ] ).flatten())
#bounds = [[-10,-10,-10,-10,0],[10,10,10,10,1]]
poptD, popc = minimize(AstrometryFunc0, [Ys1*d2a, Ys2*d2a, MJDs] ,
np.concatenate( [(Ys1 - Ys1[0])*np.cos(Ys2[0]*np.pi/180.)*d2a, (Ys2 - Ys2[0])*d2a] ).flatten(),
method='Nelder-Mead',
sigma=np.concatenate( [ RA_Uncert, DEC_Uncert ] ).flatten() )
return poptD, np.diag(popc)
##########################################################################
print('Starting MCMC')
def ln_likelihood(parameters, x, yerr):
delta1, delta2, pmra, pmdec, pi = parameters
y = np.concatenate( [ np.cos(Ys2[0]*np.pi/180.)*(Ys1 - Ys1[0])*d2a, (Ys2 - Ys2[0])*d2a ] ).flatten()
modelval = AstrometryFunc0(x, delta1, delta2, pmra, pmdec, pi)
invsig2 = 1 / np.array(yerr).flatten()**2
LogChiSq = -0.5*np.sum( ( (np.array(y) - np.array(modelval).flatten() )**2 * invsig2 - np.log(invsig2) ) )
return LogChiSq
def ln_prior(parameters):
# Define the priors.
pmra0, pmra00 = -100, 100
pmdec0, pmdec00 = -100, 100
pi0, pi00 = 0, 1
delta1, delta2, pmra, pmdec, pi = parameters
if pmra0 <= pmra <= pmra00 and pmdec0 <= pmdec <= pmdec00 and pi0 <= pi <= pi00:
return 0
else:
return -np.inf
def ln_probability(parameters, x, yerrs):
delta1, delta2, pmra, pmdec, pi = parameters
priors = ln_prior(parameters)
if not np.isfinite(priors):
return -np.inf
else:
return priors + ln_likelihood(parameters, x, yerrs)
# Set up the MCMC
n_dim, n_walkers, n_steps = 5, 200, 200
x = [Ys1*d2a, Ys2*d2a, MJDs]
yerr = np.concatenate( [ RA_Uncert, DEC_Uncert ] ).flatten()
pos = np.array([poptD + 0.2*np.random.randn(n_dim) for i in range(n_walkers)]) # Take initial walkers by best fit values
pos[:,-1] = abs(pos[:,-1]) # Fix for negative parallax values
RA, DEC = True, True
# Single core run MCMC
sampler = emcee.EnsembleSampler(n_walkers, n_dim, ln_probability, args=(x, yerr))
sampler.run_mcmc(pos, n_steps, progress=True)
samples = sampler.chain
# Parallelization
#from multiprocessing import Pool
#os.environ["OMP_NUM_THREADS"] = "1"
#with Pool() as pool:
# sampler = emcee.EnsembleSampler(n_walkers, n_dim, ln_probability, args=(x, yerr), pool=pool)
# sampler.run_mcmc(pos, n_steps, progress=True)
# Now plot them
nwalkers, nsamples, ndim = samples.shape
labels = ['Delta1', 'Delta2', 'PMra', 'PMdec', 'pi']
truths = None
extents = None
xs = samples
fig = plt.figure()
cid = fig.canvas.mpl_connect('button_press_event', onclickclose)
gs = gridspec.GridSpec(ndim, 5)
# For each parameter, I want to plot each walker on one panel, and a histogram
# of all links from all walkers
for ii in range(ndim):
walkers = xs[:,:,ii]
flatchain = np.hstack(walkers)
ax1 = plt.subplot(gs[ii, :5])
steps = np.arange(nsamples)
for walker in walkers:
ax1.plot(steps, walker,
drawstyle="steps", color="0.5", alpha=0.4)
if labels:
ax1.set_ylabel(labels[ii])
# Don't show ticks on the y-axis
#ax1.yaxis.set_ticks([])
# For the plot on the bottom, add an x-axis label. Hide all others
if ii == ndim-1:
ax1.set_xlabel("step number")
else:
ax1.xaxis.set_visible(False)
samples = sampler.chain[:, 100:, :].reshape((-1, n_dim))
# Save the chain?
if savechain:
np.save('%s/Results/MCMCresults.npy'%name, samples)
a_mcmc, b_mcmc, c_mcmc, d_mcmc, e_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
list(zip(*np.percentile(samples, [16, 50, 84],
axis=0))))
print('Printing Quantiles (median, 16th, 84th)')
print('DELTARA:', a_mcmc)
print('DELTADEC:', b_mcmc)
print('PMRA:', c_mcmc)
print('PMDEC:', d_mcmc)
print('PLX:', e_mcmc)
print('dist (pc), [high, low]: %0.2f [%0.2f, %0.2f]'%(1./e_mcmc[0], 1./(e_mcmc[0] - e_mcmc[1]), 1./(e_mcmc[0] + e_mcmc[2])))
if PLOT:
plt.show()
plt.close('all')
##### EMCEE Values
poptEMCEE = np.array([ a_mcmc[0], b_mcmc[0], c_mcmc[0], d_mcmc[0], e_mcmc[0] ])
poptEMCEEcov = np.array([ np.max([a_mcmc[1], a_mcmc[2]]), np.max([b_mcmc[1], b_mcmc[2]]), np.max([c_mcmc[1], c_mcmc[2]]),
np.max([d_mcmc[1], d_mcmc[2]]), np.max([e_mcmc[1], e_mcmc[2]]) ])
##################
if PLOT == False: # Don't need to go any further
return poptEMCEE, poptEMCEEcov
# Get a random sample of walkers for plotting
Xs = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs)) + Ys2[0]*d2a
########################################################################### Plot EMCEE values
fig = plt.figure(1, figsize=(5, 4*3/4.))
cid = fig.canvas.mpl_connect('button_press_event', onclickclose)
ax = fig.add_subplot(111)
fontsize = 7
ms=2
offset = 1
if poptEMCEE[0] > poptEMCEE[1]:
### PLOT THE RA
ax.errorbar(MJDs, offset+(Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.),
yerr = np.sqrt(unYs1**2 + unYs1[0]**2)*d2a, marker='o',linestyle='None', color='b', ms=ms)
ax.scatter(XsALL, offset+(Ys1ALL - Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.), c='0.5', alpha=0.3, zorder=-10, s=4)
Xs = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs)) + Ys2[0]*d2a
RA, DEC = True, False
RAplot = AstrometryFunc0([RAs, DECs, Xs], *poptEMCEE)
ax.plot(Xs, offset+RAplot, 'k-', lw=0.5)
ax.text(np.min(MJDs)+50, 1.5*offset, r'$\Delta \alpha \cos \delta$', fontsize=fontsize)
### PLOT THE DEC
ax.errorbar(MJDs, (Ys2-Ys2[0])*d2a, yerr = np.sqrt(unYs2**2 + unYs2[0]**2)*d2a, marker='^',linestyle='None', ms=ms)
ax.scatter(XsALL, (Ys2ALL - Ys2[0])*d2a, c='0.5', alpha=0.3, zorder=-10, s=4)
RA, DEC = False, True
DECplot = AstrometryFunc0([RAs, DECs, Xs], *poptEMCEE)
ax.plot(Xs, DECplot, 'k-', lw=0.5)
ax.text(np.min(MJDs)+50, -1*offset, r'$\Delta \delta$', fontsize=fontsize)
else:
### PLOT THE RA
ax.errorbar(MJDs, (Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.), yerr = np.sqrt(unYs1**2 + unYs1[0]**2)*d2a, marker='o',linestyle='None', color='b', ms=ms)
ax.scatter(XsALL, (Ys1ALL - Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.), c='0.5', alpha=0.3, zorder=-10, s=4)
Xs = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs)) + Ys2[0]*d2a
RA, DEC = True, False
RAplot = AstrometryFunc0([RAs, DECs, Xs], *poptEMCEE)
ax.plot(Xs, RAplot, 'k-', lw=0.5)
ax.text(np.min(MJDs)+50, -1*offset, r'$\Delta \alpha \cos \delta$', fontsize=fontsize)
### PLOT THE DEC
ax.errorbar(MJDs, offset+(Ys2-Ys2[0])*d2a, yerr = np.sqrt(unYs2**2 + unYs2[0]**2)*d2a, marker='^',linestyle='None', ms=ms)
ax.scatter(XsALL, offset+(Ys2ALL - Ys2[0])*d2a, c='0.5', alpha=0.3, zorder=-10, s=4)
RA, DEC = False, True
DECplot = AstrometryFunc0([RAs, DECs, Xs], *poptEMCEE)
ax.plot(Xs, offset+DECplot, 'k-', lw=0.5)
ax.text(np.min(MJDs)+50, 1.5*offset, r'$\Delta \delta$', fontsize=fontsize)
at = AnchoredText('MCMC Fit' + '\n' + r'$\mu_\alpha \cos \delta = %0.0f \pm %0.0f$ mas yr$^{-1}$'%(poptEMCEE[-3]*1e3, poptEMCEEcov[-3]*1e3) + '\n' + r'$\mu_\delta = %0.0f \pm %0.0f$ mas yr$^{-1}$'%(poptEMCEE[-2]*1e3, poptEMCEEcov[-2]*1e3) + '\n' + r'$\pi = %0.0f \pm %0.0f$ mas'%(poptEMCEE[-1]*1e3, poptEMCEEcov[-1]*1e3),
prop=dict(size=8), frameon=False,
loc=2,
)
ax.add_artist(at)
ax.set_ylabel(r'Motion + offset (arcsec)')
ax.set_xlabel(r'MJD (day)')
plt.minorticks_on()
plt.savefig('%s/Plots/Pi_radec_solution.png'%name, dpi=600, bbox_inches='tight')
plt.show()
plt.close('all')
###############################
fig = plt.figure(2, figsize=(3.4, 3.4*3/4.))
cid = fig.canvas.mpl_connect('button_press_event', onclickclose)
plt.errorbar( (Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.),
(Ys2-Ys2[0])*d2a,
xerr = RA_Uncert,
yerr = DEC_Uncert, marker='o',linestyle='None', color='b', ms=ms)
plt.plot(RAplot, DECplot, 'k-', lw=0.5)
plt.xlabel(r'$\Delta \alpha \cos \delta$ (arcsec)')
plt.ylabel(r'$\Delta \delta$ (arcsec)')
plt.minorticks_on()
plt.savefig('%s/Plots/Pi_all_solution.png'%name, dpi=600, bbox_inches='tight')
plt.show()
plt.close('all')
################################################## Plot residuals without proper motion
fig = plt.figure(3, figsize=(8,6))
cid = fig.canvas.mpl_connect('button_press_event', onclickclose)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.errorbar(MJDs, (Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.) - poptEMCEE[-3]*(MJDs-MJDs[0])*d2y - poptEMCEE[0],
yerr = RA_Uncert, marker='o',linestyle='None', color='b', ms=ms)
ax1.scatter(XsALL, (Ys1ALL - Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.) - poptEMCEE[-3]*(XsALL-XsALL[0])*d2y - poptEMCEE[0],
c='0.5', alpha=0.3, zorder=-10, s=4, marker='o')
Xs2 = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs2)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs2)) + Ys2[0]*d2a
RA, DEC = True, False
RAplot = AstrometryFunc0([RAs, DECs, Xs2], *poptEMCEE)
ax1.plot(Xs2, RAplot - poptEMCEE[-3]*(Xs2-Xs2[0])*d2y - poptEMCEE[0], 'k-', lw=0.5)
####### Dec part
ax2.errorbar(MJDs, (Ys2-Ys2[0])*d2a - poptEMCEE[-2]*(MJDs-MJDs[0])*d2y - poptEMCEE[1],
yerr = DEC_Uncert, marker='^',linestyle='None', ms=ms)
ax2.scatter(XsALL, (Ys2ALL - Ys2[0])*d2a - poptEMCEE[-2]*(XsALL-XsALL[0])*d2y - poptEMCEE[1],
c='0.5', alpha=0.3, zorder=-10, s=4, marker='^')
RA, DEC = False, True
DECplot = AstrometryFunc0([RAs, DECs, Xs2], *poptEMCEE)
ax2.plot(Xs2, DECplot - poptEMCEE[-2]*(Xs2-Xs2[0])*d2y - poptEMCEE[1], 'k-', lw=0.5)
ax1.set_ylabel(r'$\Delta$RA (arcsec)')
ax2.set_ylabel(r'$\Delta$Dec (arcsec)')
ax2.set_xlabel(r'MJD (day)')
plt.savefig('%s/Plots/Pi_RA_DEC_solution.png'%name, dpi=600, bbox_inches='tight')
plt.show()
plt.close('all')
################################################## Plot the parallax circle
fig = plt.figure(4, figsize=(6,6))
cid = fig.canvas.mpl_connect('button_press_event', onclickclose)
ax1 = fig.add_subplot(111)
ax1.errorbar( (Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.) - poptEMCEE[-3]*(MJDs-MJDs[0])*d2y - poptEMCEE[0],
(Ys2-Ys2[0])*d2a - poptEMCEE[-2]*(MJDs-MJDs[0])*d2y - poptEMCEE[1],
xerr = RA_Uncert,
yerr = DEC_Uncert,
marker='o',linestyle='None', color='b', ms=ms)
ax1.plot(RAplot - poptEMCEE[-3]*(Xs2-Xs2[0])*d2y - poptEMCEE[0],
DECplot - poptEMCEE[-2]*(Xs2-Xs2[0])*d2y - poptEMCEE[1], 'k-', lw=0.5)
ax1.set_xlabel(r'$\Delta$RA (arcsec)')
ax1.set_ylabel(r'$\Delta$Dec (arcsec)')
plt.savefig('%s/Plots/Pi_circle_solution.png'%name, dpi=600, bbox_inches='tight')
plt.show()
plt.close('all')
##################################################
return 0
###########################################################################################################
def PlotParallax(Name, Place, offset, offset1, offset2, PDFsave=False):
# Get the name for the filepaths
name = Name.replace('$','').replace(' ','').replace('.','')
##### EMCEE Values
samples = np.load('%s/Results/MCMCresults.npy'%name)
a_mcmc, b_mcmc, c_mcmc, d_mcmc, e_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
list(zip(*np.percentile(samples, [16, 50, 84],
axis=0))))
poptEMCEE = np.array([ a_mcmc[0], b_mcmc[0], c_mcmc[0], d_mcmc[0], e_mcmc[0] ])
poptEMCEEcov = np.array([ np.max([a_mcmc[1], a_mcmc[2]]), np.max([b_mcmc[1], b_mcmc[2]]), np.max([c_mcmc[1], c_mcmc[2]]),
np.max([d_mcmc[1], d_mcmc[2]]), np.max([e_mcmc[1], e_mcmc[2]]) ])
##################
JPL = True
if JPL:
from astropy.coordinates import solar_system_ephemeris
solar_system_ephemeris.set('jpl')
T1 = Table.read('%s/Results/Weighted_Epochs.csv'%name)
T2 = Table.read('%s/Results/All_Epochs.csv'%name)
MJDs = T1['MJD'].data
Ys1 = T1['RA'].data
Ys2 = T1['DEC'].data
unYs1 = T1['SIGRA'].data
unYs2 = T1['SIGDEC'].data
XsALL = T2['MJD'].data
Ys1ALL = T2['RA'].data
Ys2ALL = T2['DEC'].data
# Uncertainty arrays in arcsec
RA_Uncert = np.sqrt(unYs1**2 + unYs1[0]**2) * d2a
DEC_Uncert = np.sqrt(unYs2**2 + unYs2[0]**2) * d2a
# Get a random sample of walkers for plotting
Xs = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs)) + Ys2[0]*d2a
print('Getting 300 random solutions')
RAplots = []
DECplots = []
for a, b, c, d, e in samples[np.random.randint(len(samples), size=300)]:
poptEMCEEtest = np.array([ a, b, c, d, e ])
RA, DEC = True, False
RAplots.append(AstrometryFunc([RAs, DECs, Xs], *poptEMCEEtest, RA=RA, DEC=DEC))
RA, DEC = False, True
DECplots.append(AstrometryFunc([RAs, DECs, Xs], *poptEMCEEtest, RA=RA, DEC=DEC))
################################# Plot the emcee
print('Plotting MCMC')
fig = plt.figure(1, figsize=(7.1, 3.4*3/4.))
cid = fig.canvas.mpl_connect('button_press_event', onclickclose)
ax = plt.subplot2grid((2, 2), (0, 0), rowspan=2)
ax11 = plt.subplot2grid((2, 2), (0, 1))
ax22 = plt.subplot2grid((2, 2), (1, 1))
fontsize = 7
ms=2
if poptEMCEE[-3] > poptEMCEE[-2]: # The PMRA > PMDEC
### PLOT THE RA
ax.errorbar(MJDs, offset+(Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.),
yerr = RA_Uncert, marker='o',linestyle='None', color='b', ms=ms)
ax.scatter(XsALL, offset+(Ys1ALL - Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.), c='0.5', alpha=0.3, zorder=-10, s=4)
Xs = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs)) + Ys2[0]*d2a
RA, DEC = True, False
RAplot = AstrometryFunc([RAs, DECs, Xs], *poptEMCEE, RA=RA, DEC=DEC)
ax.plot(Xs, offset+RAplot, 'k-', lw=0.5)
for RAplot2 in RAplots:
ax.plot(Xs, offset+RAplot2, color="0.5", alpha=0.01)
ax.text(np.min(MJDs)+50, offset1, r'$\Delta \alpha \cos \delta$', fontsize=fontsize)
### PLOT THE DEC
ax.errorbar(MJDs, (Ys2-Ys2[0])*d2a, yerr = DEC_Uncert, marker='^',linestyle='None', ms=ms)
ax.scatter(XsALL, (Ys2ALL - Ys2[0])*d2a, c='0.5', alpha=0.3, zorder=-10, s=4)
RA, DEC = False, True
DECplot = AstrometryFunc([RAs, DECs, Xs], *poptEMCEE, RA=RA, DEC=DEC)
ax.plot(Xs, DECplot, 'k-', lw=0.5)
for DECplot2 in DECplots:
ax.plot(Xs, DECplot2, color="0.5", alpha=0.01)
ax.text(np.min(MJDs)+50, offset2, r'$\Delta \delta$', fontsize=fontsize)
else: # PMDEC > PMRA
### PLOT THE RA
ax.errorbar(MJDs, (Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.),
yerr = RA_Uncert, marker='o',linestyle='None', color='b', ms=ms)
ax.scatter(XsALL, (Ys1ALL - Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.), c='0.5', alpha=0.3, zorder=-10, s=4)
Xs = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs)) + Ys2[0]*d2a
RA, DEC = True, False
RAplot = AstrometryFunc([RAs, DECs, Xs], *poptEMCEE, RA=RA, DEC=DEC)
ax.plot(Xs, RAplot, 'k-', lw=0.5)
for RAplot2 in RAplots:
ax.plot(Xs, RAplot2, color="0.5", alpha=0.01)
ax.text(np.min(MJDs)+50, offset2, r'$\Delta \alpha \cos \delta$', fontsize=fontsize)
### PLOT THE DEC
ax.errorbar(MJDs, offset+(Ys2-Ys2[0])*d2a, yerr = DEC_Uncert, marker='^',linestyle='None', ms=ms)
ax.scatter(XsALL, offset+(Ys2ALL - Ys2[0])*d2a, c='0.5', alpha=0.3, zorder=-10, s=4)
RA, DEC = False, True
DECplot = AstrometryFunc([RAs, DECs, Xs], *poptEMCEE, RA=RA, DEC=DEC)
ax.plot(Xs, offset+DECplot, 'k-', lw=0.5)
for DECplot2 in DECplots:
ax.plot(Xs, offset+DECplot2, color="0.5", alpha=0.01)
ax.text(np.min(MJDs)+50, offset1, r'$\Delta \delta$', fontsize=fontsize)
at = AnchoredText('MCMC Fit' + '\n' + r'$\mu_\alpha \cos \delta = %0.0f \pm %0.0f$ mas yr$^{-1}$'%(poptEMCEE[-3]*1e3, poptEMCEEcov[-3]*1e3) + '\n' + r'$\mu_\delta = %0.0f \pm %0.0f$ mas yr$^{-1}$'%(poptEMCEE[-2]*1e3, poptEMCEEcov[-2]*1e3) + '\n' + r'$\pi = %0.0f \pm %0.0f$ mas'%(poptEMCEE[-1]*1e3, poptEMCEEcov[-1]*1e3),
prop=dict(size=8), frameon=False,
loc=Place,
)
ax.add_artist(at)
ax.set_ylabel(r'Motion + offset (arcsec)')
ax.set_xlabel(r'MJD (day)')
ax.minorticks_on()
################################################## Plot residuals without proper motion
ax1 = ax11.twinx()
ax2 = ax22.twinx()
ax11.set_yticks([])
ax11.set_xticks([])
ax22.set_yticks([])
ax1.errorbar(MJDs, (Ys1-Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.) - poptEMCEE[-3]*(MJDs-MJDs[0])*d2y - poptEMCEE[0],
yerr = RA_Uncert, marker='o',linestyle='None', color='b', ms=ms)
ax1.scatter(XsALL, (Ys1ALL - Ys1[0])*d2a*np.cos(Ys2[0]*np.pi/180.) - poptEMCEE[-3]*(XsALL-XsALL[0])*d2y - poptEMCEE[0],
c='0.5', alpha=0.3, zorder=-10, s=4, marker='o')
Xs2 = np.linspace(np.min(MJDs), np.max(MJDs), 2000)
RAs = np.zeros(len(Xs2)) + Ys1[0]*d2a
DECs = np.zeros(len(Xs2)) + Ys2[0]*d2a
RA, DEC = True, False
RAplot = AstrometryFunc([RAs, DECs, Xs2], *poptEMCEE, RA=RA, DEC=DEC)
ax1.plot(Xs2, RAplot - poptEMCEE[-3]*(Xs2-Xs2[0])*d2y - poptEMCEE[0], 'k-', lw=0.5)
# Errorbar
for RAplot2 in RAplots:
ax1.plot(Xs2, RAplot2 - poptEMCEE[-3]*(Xs2-Xs2[0])*d2y - poptEMCEE[0], color="0.5", alpha=0.01)
####### Dec part
ax2.errorbar(MJDs, (Ys2-Ys2[0])*d2a - poptEMCEE[-2]*(MJDs-MJDs[0])*d2y - poptEMCEE[1],
yerr = DEC_Uncert, marker='^',linestyle='None', ms=ms)
ax2.scatter(XsALL, (Ys2ALL - Ys2[0])*d2a - poptEMCEE[-2]*(XsALL-XsALL[0])*d2y - poptEMCEE[1],
c='0.5', alpha=0.3, zorder=-10, s=4, marker='^')
RA, DEC = False, True
DECplot = AstrometryFunc([RAs, DECs, Xs2], *poptEMCEE, RA=RA, DEC=DEC)
ax2.plot(Xs2, DECplot - poptEMCEE[-2]*(Xs2-Xs2[0])*d2y - poptEMCEE[1], 'k-', lw=0.5)
# Errorbar
for DECplot2 in DECplots:
ax2.plot(Xs2, DECplot2 - poptEMCEE[-2]*(Xs2-Xs2[0])*d2y - poptEMCEE[1], color="0.5", alpha=0.01)
ax1.set_ylabel(r'$\Delta\alpha$ (arcsec)')
ax1.minorticks_on()
ax2.set_ylabel(r'$\Delta\delta$ (arcsec)')
ax22.set_xlabel(r'MJD (day)')
ax2.minorticks_on()
plt.suptitle('%s'%Name)
fig.subplots_adjust(wspace=0.05, hspace=0.1)
if PDFsave:
plt.savefig('%s/Plots/FullSolution.pdf'%name, dpi=600, bbox_inches='tight')
else:
plt.savefig('%s/Plots/FullSolution.png'%name, dpi=600, bbox_inches='tight')
plt.show()
##################################################
| [
"numpy.sqrt",
"astropy.table.Table",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"Register_Frames.GetRegistrators",
"numpy.log",
"emcee.EnsembleSampler",
"matplotlib.pyplot.fill_between",
"numpy.argsort",
"numpy.array",
"astropy.coordinates.solar_system_ephemeris.set",
"numpy.isfinite",
"ast... | [((728, 756), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(8)'}), "('xtick', labelsize=8)\n", (734, 756), True, 'import matplotlib.pyplot as plt\n'), ((757, 785), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(8)'}), "('ytick', labelsize=8)\n", (763, 785), True, 'import matplotlib.pyplot as plt\n'), ((786, 813), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (792, 813), True, 'import matplotlib.pyplot as plt\n'), ((814, 842), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(7)'}), "('legend', fontsize=7)\n", (820, 842), True, 'import matplotlib.pyplot as plt\n'), ((843, 871), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(10)'}), "('axes', labelsize=10)\n", (849, 871), True, 'import matplotlib.pyplot as plt\n'), ((872, 900), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(10)'}), "('axes', titlesize=10)\n", (878, 900), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1248), 'matplotlib.pyplot.axvline', 'plt.axvline', (['event.xdata'], {'c': '"""r"""', 'ls': '""":"""'}), "(event.xdata, c='r', ls=':')\n", (1220, 1248), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1291), 'matplotlib.pyplot.axhline', 'plt.axhline', (['event.ydata'], {'c': '"""r"""', 'ls': '""":"""'}), "(event.ydata, c='r', ls=':')\n", (1263, 1291), True, 'import matplotlib.pyplot as plt\n'), ((1296, 1306), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1304, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1807, 1846), 'matplotlib.pyplot.axvline', 'plt.axvline', (['event.xdata'], {'c': '"""r"""', 'ls': '""":"""'}), "(event.xdata, c='r', ls=':')\n", (1818, 1846), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1860), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1858, 1860), True, 'import matplotlib.pyplot as plt\n'), ((6686, 6721), 'astropy.table.vstack', 'vstack', (['[t1, t2]'], {'join_type': '"""inner"""'}), "([t1, t2], join_type='inner')\n", (6692, 6721), False, 'from astropy.table import Table, vstack, join\n'), ((6731, 6767), 'astropy.table.vstack', 'vstack', (['[t00, t3]'], {'join_type': '"""inner"""'}), "([t00, t3], join_type='inner')\n", (6737, 6767), False, 'from astropy.table import Table, vstack, join\n'), ((6785, 6806), 'numpy.argsort', 'np.argsort', (["t0['mjd']"], {}), "(t0['mjd'])\n", (6795, 6806), True, 'import numpy as np\n'), ((8127, 8156), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(7, 6)'}), '(1, figsize=(7, 6))\n', (8137, 8156), True, 'import matplotlib.pyplot as plt\n'), ((11761, 11781), 'numpy.arange', 'np.arange', (['(-1)', '(30)', '(2)'], {}), '(-1, 30, 2)\n', (11770, 11781), True, 'import numpy as np\n'), ((18304, 18319), 'numpy.array', 'np.array', (['XsALL'], {}), '(XsALL)\n', (18312, 18319), True, 'import numpy as np\n'), ((18394, 18405), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18402, 18405), True, 'import numpy as np\n'), ((18417, 18428), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18425, 18428), True, 'import numpy as np\n'), ((18440, 18451), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (18448, 18451), True, 'import numpy as np\n'), ((18744, 18832), 'astropy.table.Table', 'Table', (['[Ys1, unYs1, Ys2, unYs2, MJDs]'], {'names': "['RA', 'SIGRA', 'DEC', 'SIGDEC', 'MJD']"}), "([Ys1, unYs1, Ys2, unYs2, MJDs], names=['RA', 'SIGRA', 'DEC', 'SIGDEC',\n 'MJD'])\n", (18749, 18832), False, 'from astropy.table import Table, vstack, join\n'), ((18908, 18966), 'astropy.table.Table', 'Table', (['[Ys1ALL, Ys2ALL, XsALL]'], {'names': "['RA', 'DEC', 'MJD']"}), "([Ys1ALL, Ys2ALL, XsALL], names=['RA', 'DEC', 'MJD'])\n", (18913, 18966), False, 'from astropy.table import Table, vstack, join\n'), ((23470, 23541), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['n_walkers', 'n_dim', 'ln_probability'], {'args': '(x, yerr)'}), '(n_walkers, n_dim, ln_probability, args=(x, yerr))\n', (23491, 23541), False, 'import emcee\n'), ((24073, 24085), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (24083, 24085), True, 'import matplotlib.pyplot as plt\n'), ((24169, 24195), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['ndim', '(5)'], {}), '(ndim, 5)\n', (24186, 24195), True, 'import matplotlib.gridspec as gridspec\n'), ((25732, 25797), 'numpy.array', 'np.array', (['[a_mcmc[0], b_mcmc[0], c_mcmc[0], d_mcmc[0], e_mcmc[0]]'], {}), '([a_mcmc[0], b_mcmc[0], c_mcmc[0], d_mcmc[0], e_mcmc[0]])\n', (25740, 25797), True, 'import numpy as np\n'), ((26417, 26456), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(5, 4 * 3 / 4.0)'}), '(1, figsize=(5, 4 * 3 / 4.0))\n', (26427, 26456), True, 'import matplotlib.pyplot as plt\n'), ((29369, 29388), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (29386, 29388), True, 'import matplotlib.pyplot as plt\n'), ((29391, 29478), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/Plots/Pi_radec_solution.png' % name)"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "('%s/Plots/Pi_radec_solution.png' % name, dpi=600, bbox_inches=\n 'tight')\n", (29402, 29478), True, 'import matplotlib.pyplot as plt\n'), ((29474, 29484), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29482, 29484), True, 'import matplotlib.pyplot as plt\n'), ((29487, 29503), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (29496, 29503), True, 'import matplotlib.pyplot as plt\n'), ((29550, 29593), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(3.4, 3.4 * 3 / 4.0)'}), '(2, figsize=(3.4, 3.4 * 3 / 4.0))\n', (29560, 29593), True, 'import matplotlib.pyplot as plt\n'), ((29872, 29911), 'matplotlib.pyplot.plot', 'plt.plot', (['RAplot', 'DECplot', '"""k-"""'], {'lw': '(0.5)'}), "(RAplot, DECplot, 'k-', lw=0.5)\n", (29880, 29911), True, 'import matplotlib.pyplot as plt\n'), ((29915, 29969), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\Delta \\\\alpha \\\\cos \\\\delta$ (arcsec)"""'], {}), "('$\\\\Delta \\\\alpha \\\\cos \\\\delta$ (arcsec)')\n", (29925, 29969), True, 'import matplotlib.pyplot as plt\n'), ((29969, 30009), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta \\\\delta$ (arcsec)"""'], {}), "('$\\\\Delta \\\\delta$ (arcsec)')\n", (29979, 30009), True, 'import matplotlib.pyplot as plt\n'), ((30011, 30030), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (30028, 30030), True, 'import matplotlib.pyplot as plt\n'), ((30033, 30118), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/Plots/Pi_all_solution.png' % name)"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "('%s/Plots/Pi_all_solution.png' % name, dpi=600, bbox_inches='tight'\n )\n", (30044, 30118), True, 'import matplotlib.pyplot as plt\n'), ((30114, 30124), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30122, 30124), True, 'import matplotlib.pyplot as plt\n'), ((30127, 30143), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (30136, 30143), True, 'import matplotlib.pyplot as plt\n'), ((30244, 30273), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {'figsize': '(8, 6)'}), '(3, figsize=(8, 6))\n', (30254, 30273), True, 'import matplotlib.pyplot as plt\n'), ((31720, 31808), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/Plots/Pi_RA_DEC_solution.png' % name)"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "('%s/Plots/Pi_RA_DEC_solution.png' % name, dpi=600, bbox_inches=\n 'tight')\n", (31731, 31808), True, 'import matplotlib.pyplot as plt\n'), ((31805, 31815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31813, 31815), True, 'import matplotlib.pyplot as plt\n'), ((31818, 31834), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (31827, 31834), True, 'import matplotlib.pyplot as plt\n'), ((31923, 31952), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {'figsize': '(6, 6)'}), '(4, figsize=(6, 6))\n', (31933, 31952), True, 'import matplotlib.pyplot as plt\n'), ((32614, 32702), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/Plots/Pi_circle_solution.png' % name)"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "('%s/Plots/Pi_circle_solution.png' % name, dpi=600, bbox_inches=\n 'tight')\n", (32625, 32702), True, 'import matplotlib.pyplot as plt\n'), ((32699, 32709), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32707, 32709), True, 'import matplotlib.pyplot as plt\n'), ((32712, 32728), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (32721, 32728), True, 'import matplotlib.pyplot as plt\n'), ((33112, 33156), 'numpy.load', 'np.load', (["('%s/Results/MCMCresults.npy' % name)"], {}), "('%s/Results/MCMCresults.npy' % name)\n", (33119, 33156), True, 'import numpy as np\n'), ((33418, 33483), 'numpy.array', 'np.array', (['[a_mcmc[0], b_mcmc[0], c_mcmc[0], d_mcmc[0], e_mcmc[0]]'], {}), '([a_mcmc[0], b_mcmc[0], c_mcmc[0], d_mcmc[0], e_mcmc[0]])\n', (33426, 33483), True, 'import numpy as np\n'), ((33859, 33910), 'astropy.table.Table.read', 'Table.read', (["('%s/Results/Weighted_Epochs.csv' % name)"], {}), "('%s/Results/Weighted_Epochs.csv' % name)\n", (33869, 33910), False, 'from astropy.table import Table, vstack, join\n'), ((33916, 33962), 'astropy.table.Table.read', 'Table.read', (["('%s/Results/All_Epochs.csv' % name)"], {}), "('%s/Results/All_Epochs.csv' % name)\n", (33926, 33962), False, 'from astropy.table import Table, vstack, join\n'), ((35005, 35048), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(7.1, 3.4 * 3 / 4.0)'}), '(1, figsize=(7.1, 3.4 * 3 / 4.0))\n', (35015, 35048), True, 'import matplotlib.pyplot as plt\n'), ((35121, 35164), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 2)', '(0, 0)'], {'rowspan': '(2)'}), '((2, 2), (0, 0), rowspan=2)\n', (35137, 35164), True, 'import matplotlib.pyplot as plt\n'), ((35174, 35206), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 2)', '(0, 1)'], {}), '((2, 2), (0, 1))\n', (35190, 35206), True, 'import matplotlib.pyplot as plt\n'), ((35216, 35248), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(2, 2)', '(1, 1)'], {}), '((2, 2), (1, 1))\n', (35232, 35248), True, 'import matplotlib.pyplot as plt\n'), ((40321, 40346), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('%s' % Name)"], {}), "('%s' % Name)\n", (40333, 40346), True, 'import matplotlib.pyplot as plt\n'), ((40579, 40589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40587, 40589), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1661), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['[RAmin, RAmax]', '[DECmin, DECmin]', '[DECmax, DECmax]'], {'color': '"""0.5"""', 'alpha': '(0.5)', 'zorder': '(-100)'}), "([RAmin, RAmax], [DECmin, DECmin], [DECmax, DECmax], color=\n '0.5', alpha=0.5, zorder=-100)\n", (1567, 1661), True, 'import matplotlib.pyplot as plt\n'), ((1661, 1671), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1669, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1688), 'matplotlib.pyplot.pause', 'plt.pause', (['(2)'], {}), '(2)\n', (1685, 1688), True, 'import matplotlib.pyplot as plt\n'), ((1693, 1709), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1702, 1709), True, 'import matplotlib.pyplot as plt\n'), ((1922, 2014), 'matplotlib.pyplot.axvspan', 'plt.axvspan', (['clickpoints2[0][0]', 'clickpoints2[1][0]'], {'color': '"""0.5"""', 'alpha': '(0.5)', 'zorder': '(-100)'}), "(clickpoints2[0][0], clickpoints2[1][0], color='0.5', alpha=0.5,\n zorder=-100)\n", (1933, 2014), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2025), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2023, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2042), 'matplotlib.pyplot.pause', 'plt.pause', (['(2)'], {}), '(2)\n', (2039, 2042), True, 'import matplotlib.pyplot as plt\n'), ((2047, 2063), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2056, 2063), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2125), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2118, 2125), True, 'import matplotlib.pyplot as plt\n'), ((2425, 2449), 'astropy.time.Time', 'Time', (['mjds'], {'format': '"""mjd"""'}), "(mjds, format='mjd')\n", (2429, 2449), False, 'from astropy.time import Time\n'), ((4777, 4810), 'os.path.exists', 'os.path.exists', (["('%s/Plots' % name)"], {}), "('%s/Plots' % name)\n", (4791, 4810), False, 'import sys, os, os.path, time, gc\n'), ((4814, 4844), 'os.makedirs', 'os.makedirs', (["('%s/Plots' % name)"], {}), "('%s/Plots' % name)\n", (4825, 4844), False, 'import sys, os, os.path, time, gc\n'), ((4852, 4887), 'os.path.exists', 'os.path.exists', (["('%s/Results' % name)"], {}), "('%s/Results' % name)\n", (4866, 4887), False, 'import sys, os, os.path, time, gc\n'), ((4891, 4923), 'os.makedirs', 'os.makedirs', (["('%s/Results' % name)"], {}), "('%s/Results' % name)\n", (4902, 4923), False, 'import sys, os, os.path, time, gc\n'), ((6934, 6967), 'astropy.coordinates.solar_system_ephemeris.set', 'solar_system_ephemeris.set', (['"""jpl"""'], {}), "('jpl')\n", (6960, 6967), False, 'from astropy.coordinates import solar_system_ephemeris\n'), ((8708, 8737), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(7, 6)'}), '(2, figsize=(7, 6))\n', (8718, 8737), True, 'import matplotlib.pyplot as plt\n'), ((8869, 8889), 'numpy.arange', 'np.arange', (['(-1)', '(30)', '(2)'], {}), '(-1, 30, 2)\n', (8878, 8889), True, 'import numpy as np\n'), ((9029, 9049), 'numpy.arange', 'np.arange', (['(-1)', '(30)', '(2)'], {}), '(-1, 30, 2)\n', (9038, 9049), True, 'import numpy as np\n'), ((9135, 9149), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9147, 9149), True, 'import matplotlib.pyplot as plt\n'), ((9528, 9538), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9536, 9538), True, 'import matplotlib.pyplot as plt\n'), ((9543, 9559), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9552, 9559), True, 'import matplotlib.pyplot as plt\n'), ((9807, 9930), 'numpy.where', 'np.where', (["((t['ra'] * d2a >= RAmin) & (t['ra'] * d2a <= RAmax) & (t['dec'] * d2a >=\n DECmin) & (t['dec'] * d2a <= DECmax))"], {}), "((t['ra'] * d2a >= RAmin) & (t['ra'] * d2a <= RAmax) & (t['dec'] *\n d2a >= DECmin) & (t['dec'] * d2a <= DECmax))\n", (9815, 9930), True, 'import numpy as np\n'), ((10012, 10027), 'matplotlib.pyplot.figure', 'plt.figure', (['(104)'], {}), '(104)\n', (10022, 10027), True, 'import matplotlib.pyplot as plt\n'), ((10854, 10944), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[1000], x[1000]]', '[0, 0]', '"""r--"""'], {'lw': '(0.75)', 'alpha': '(0.5)', 'label': '"""Upper limits"""'}), "([x[1000], x[1000]], [0, 0], 'r--', lw=0.75, alpha=0.5, label=\n 'Upper limits')\n", (10862, 10944), True, 'import matplotlib.pyplot as plt\n'), ((10941, 11051), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[1000], x[1000]]', '[0, 0]'], {'c': '"""0.5"""', 'ls': '""":"""', 'lw': '(0.75)', 'alpha': '(0.5)', 'label': '"""Individual measurements"""'}), "([x[1000], x[1000]], [0, 0], c='0.5', ls=':', lw=0.75, alpha=0.5,\n label='Individual measurements')\n", (10949, 11051), True, 'import matplotlib.pyplot as plt\n'), ((11049, 11067), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$W2$"""'], {}), "('$W2$')\n", (11059, 11067), True, 'import matplotlib.pyplot as plt\n'), ((11073, 11090), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""PDF"""'], {}), "('PDF')\n", (11083, 11090), True, 'import matplotlib.pyplot as plt\n'), ((11095, 11120), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (11105, 11120), True, 'import matplotlib.pyplot as plt\n'), ((11125, 11241), 'matplotlib.pyplot.title', 'plt.title', (['"""Select the lower and upper bound magnitudes\n(plot will close automatically when finished)"""'], {}), '(\n """Select the lower and upper bound magnitudes\n(plot will close automatically when finished)"""\n )\n', (11134, 11241), True, 'import matplotlib.pyplot as plt\n'), ((11233, 11243), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11241, 11243), True, 'import matplotlib.pyplot as plt\n'), ((11248, 11264), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (11257, 11264), True, 'import matplotlib.pyplot as plt\n'), ((11971, 12056), 'numpy.where', 'np.where', (["((t['mjd'][slice1][slice2] > bottom) & (t['mjd'][slice1][slice2] < top))"], {}), "((t['mjd'][slice1][slice2] > bottom) & (t['mjd'][slice1][slice2] < top)\n )\n", (11979, 12056), True, 'import numpy as np\n'), ((12534, 12547), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (12544, 12547), True, 'import matplotlib.pyplot as plt\n'), ((16825, 16835), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16833, 16835), True, 'import matplotlib.pyplot as plt\n'), ((16840, 16856), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (16849, 16856), True, 'import matplotlib.pyplot as plt\n'), ((18003, 18019), 'numpy.array', 'np.array', (['Ys1ALL'], {}), '(Ys1ALL)\n', (18011, 18019), True, 'import numpy as np\n'), ((18034, 18050), 'numpy.array', 'np.array', (['Ys2ALL'], {}), '(Ys2ALL)\n', (18042, 18050), True, 'import numpy as np\n'), ((18499, 18526), 'numpy.append', 'np.append', (['XsALL', 'XsALL0[i]'], {}), '(XsALL, XsALL0[i])\n', (18508, 18526), True, 'import numpy as np\n'), ((18542, 18571), 'numpy.append', 'np.append', (['Ys1ALL', 'Ys1ALL0[i]'], {}), '(Ys1ALL, Ys1ALL0[i])\n', (18551, 18571), True, 'import numpy as np\n'), ((18587, 18616), 'numpy.append', 'np.append', (['Ys2ALL', 'Ys2ALL0[i]'], {}), '(Ys2ALL, Ys2ALL0[i])\n', (18596, 18616), True, 'import numpy as np\n'), ((19169, 19190), 'numpy.mean', 'np.mean', (['(unYs1 * d2ma)'], {}), '(unYs1 * d2ma)\n', (19176, 19190), True, 'import numpy as np\n'), ((19192, 19215), 'numpy.median', 'np.median', (['(unYs1 * d2ma)'], {}), '(unYs1 * d2ma)\n', (19201, 19215), True, 'import numpy as np\n'), ((19260, 19281), 'numpy.mean', 'np.mean', (['(unYs2 * d2ma)'], {}), '(unYs2 * d2ma)\n', (19267, 19281), True, 'import numpy as np\n'), ((19283, 19306), 'numpy.median', 'np.median', (['(unYs2 * d2ma)'], {}), '(unYs2 * d2ma)\n', (19292, 19306), True, 'import numpy as np\n'), ((19670, 19705), 'numpy.sqrt', 'np.sqrt', (['(unYs1 ** 2 + unYs1[0] ** 2)'], {}), '(unYs1 ** 2 + unYs1[0] ** 2)\n', (19677, 19705), True, 'import numpy as np\n'), ((19899, 19934), 'numpy.sqrt', 'np.sqrt', (['(unYs2 ** 2 + unYs2[0] ** 2)'], {}), '(unYs2 ** 2 + unYs2[0] ** 2)\n', (19906, 19934), True, 'import numpy as np\n'), ((24381, 24399), 'numpy.hstack', 'np.hstack', (['walkers'], {}), '(walkers)\n', (24390, 24399), True, 'import numpy as np\n'), ((24412, 24435), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[ii, :5]'], {}), '(gs[ii, :5])\n', (24423, 24435), True, 'import matplotlib.pyplot as plt\n'), ((24457, 24476), 'numpy.arange', 'np.arange', (['nsamples'], {}), '(nsamples)\n', (24466, 24476), True, 'import numpy as np\n'), ((25036, 25089), 'numpy.save', 'np.save', (["('%s/Results/MCMCresults.npy' % name)", 'samples'], {}), "('%s/Results/MCMCresults.npy' % name, samples)\n", (25043, 25089), True, 'import numpy as np\n'), ((25659, 25669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25667, 25669), True, 'import matplotlib.pyplot as plt\n'), ((25674, 25690), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (25683, 25690), True, 'import matplotlib.pyplot as plt\n'), ((26197, 26209), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (26203, 26209), True, 'import numpy as np\n'), ((26211, 26223), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (26217, 26223), True, 'import numpy as np\n'), ((30803, 30815), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (30809, 30815), True, 'import numpy as np\n'), ((30817, 30829), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (30823, 30829), True, 'import numpy as np\n'), ((33816, 33849), 'astropy.coordinates.solar_system_ephemeris.set', 'solar_system_ephemeris.set', (['"""jpl"""'], {}), "('jpl')\n", (33842, 33849), False, 'from astropy.coordinates import solar_system_ephemeris\n'), ((34226, 34261), 'numpy.sqrt', 'np.sqrt', (['(unYs1 ** 2 + unYs1[0] ** 2)'], {}), '(unYs1 ** 2 + unYs1[0] ** 2)\n', (34233, 34261), True, 'import numpy as np\n'), ((34279, 34314), 'numpy.sqrt', 'np.sqrt', (['(unYs2 ** 2 + unYs2[0] ** 2)'], {}), '(unYs2 ** 2 + unYs2[0] ** 2)\n', (34286, 34314), True, 'import numpy as np\n'), ((34387, 34399), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (34393, 34399), True, 'import numpy as np\n'), ((34401, 34413), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (34407, 34413), True, 'import numpy as np\n'), ((34669, 34694), 'numpy.array', 'np.array', (['[a, b, c, d, e]'], {}), '([a, b, c, d, e])\n', (34677, 34694), True, 'import numpy as np\n'), ((39041, 39053), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (39047, 39053), True, 'import numpy as np\n'), ((39055, 39067), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (39061, 39067), True, 'import numpy as np\n'), ((40412, 40489), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/Plots/FullSolution.pdf' % name)"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "('%s/Plots/FullSolution.pdf' % name, dpi=600, bbox_inches='tight')\n", (40423, 40489), True, 'import matplotlib.pyplot as plt\n'), ((40500, 40577), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/Plots/FullSolution.png' % name)"], {'dpi': '(600)', 'bbox_inches': '"""tight"""'}), "('%s/Plots/FullSolution.png' % name, dpi=600, bbox_inches='tight')\n", (40511, 40577), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2623), 'numpy.sin', 'np.sin', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (2596, 2623), True, 'import numpy as np\n'), ((2628, 2661), 'numpy.cos', 'np.cos', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (2634, 2661), True, 'import numpy as np\n'), ((2839, 2873), 'numpy.cos', 'np.cos', (['(decs / d2a * np.pi / 180.0)'], {}), '(decs / d2a * np.pi / 180.0)\n', (2845, 2873), True, 'import numpy as np\n'), ((4993, 5053), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['radecstr'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(radecstr, unit=(u.deg, u.deg), frame='icrs')\n", (5008, 5053), True, 'import astropy.coordinates as coords\n'), ((5184, 5244), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['radecstr'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(radecstr, unit=(u.deg, u.deg), frame='icrs')\n", (5199, 5244), True, 'import astropy.coordinates as coords\n'), ((5591, 5651), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['radecstr'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(radecstr, unit=(u.deg, u.deg), frame='icrs')\n", (5606, 5651), True, 'import astropy.coordinates as coords\n'), ((7245, 7269), 'astropy.time.Time', 'Time', (['mjds'], {'format': '"""mjd"""'}), "(mjds, format='mjd')\n", (7249, 7269), False, 'from astropy.time import Time\n'), ((10677, 10742), 'matplotlib.pyplot.axvline', 'plt.axvline', (['w2'], {'ls': '""":"""', 'lw': '(0.75)', 'c': '"""0.5"""', 'alpha': '(0.5)', 'zorder': '(-100)'}), "(w2, ls=':', lw=0.75, c='0.5', alpha=0.5, zorder=-100)\n", (10688, 10742), True, 'import matplotlib.pyplot as plt\n'), ((10758, 10790), 'scipy.stats.norm.pdf', 'norm.pdf', (['x'], {'loc': 'w2', 'scale': 'w2err'}), '(x, loc=w2, scale=w2err)\n', (10766, 10790), False, 'from scipy.stats import norm\n'), ((11453, 11526), 'numpy.where', 'np.where', (["((t['w2mpro'][slice1] >= W2min) & (t['w2mpro'][slice1] <= W2max))"], {}), "((t['w2mpro'][slice1] >= W2min) & (t['w2mpro'][slice1] <= W2max))\n", (11461, 11526), True, 'import numpy as np\n'), ((11831, 11863), 'numpy.min', 'np.min', (["t['mjd'][slice1][slice2]"], {}), "(t['mjd'][slice1][slice2])\n", (11837, 11863), True, 'import numpy as np\n'), ((11900, 11932), 'numpy.min', 'np.min', (["t['mjd'][slice1][slice2]"], {}), "(t['mjd'][slice1][slice2])\n", (11906, 11932), True, 'import numpy as np\n'), ((13542, 13592), 'astropy.stats.sigma_clip', 'sigma_clip', (['shiftedRAs'], {'sigma': 'sigma', 'maxiters': 'None'}), '(shiftedRAs, sigma=sigma, maxiters=None)\n', (13552, 13592), False, 'from astropy.stats import sigma_clip\n'), ((13614, 13665), 'astropy.stats.sigma_clip', 'sigma_clip', (['shiftedDECs'], {'sigma': 'sigma', 'maxiters': 'None'}), '(shiftedDECs, sigma=sigma, maxiters=None)\n', (13624, 13665), False, 'from astropy.stats import sigma_clip\n'), ((13729, 13799), 'astropy.stats.sigma_clip', 'sigma_clip', (["t['ra'][slice1][slice2][group]"], {'sigma': 'sigma', 'maxiters': 'None'}), "(t['ra'][slice1][slice2][group], sigma=sigma, maxiters=None)\n", (13739, 13799), False, 'from astropy.stats import sigma_clip\n'), ((13821, 13892), 'astropy.stats.sigma_clip', 'sigma_clip', (["t['dec'][slice1][slice2][group]"], {'sigma': 'sigma', 'maxiters': 'None'}), "(t['dec'][slice1][slice2][group], sigma=sigma, maxiters=None)\n", (13831, 13892), False, 'from astropy.stats import sigma_clip\n'), ((13906, 13952), 'numpy.where', 'np.where', (['(~filteredRA.mask & ~filteredDEC.mask)'], {}), '(~filteredRA.mask & ~filteredDEC.mask)\n', (13914, 13952), True, 'import numpy as np\n'), ((15360, 15410), 'numpy.average', 'np.average', (["t['mjd'][slice1][slice2][group][index]"], {}), "(t['mjd'][slice1][slice2][group][index])\n", (15370, 15410), True, 'import numpy as np\n'), ((15428, 15551), 'numpy.average', 'np.average', (["t['ra'][slice1][slice2][group][index]"], {'weights': "(1.0 / (t['sigra'][slice1][slice2][group][index] / d2a) ** 2)"}), "(t['ra'][slice1][slice2][group][index], weights=1.0 / (t['sigra']\n [slice1][slice2][group][index] / d2a) ** 2)\n", (15438, 15551), True, 'import numpy as np\n'), ((15559, 15684), 'numpy.average', 'np.average', (["t['dec'][slice1][slice2][group][index]"], {'weights': "(1.0 / (t['sigdec'][slice1][slice2][group][index] / d2a) ** 2)"}), "(t['dec'][slice1][slice2][group][index], weights=1.0 / (t[\n 'sigdec'][slice1][slice2][group][index] / d2a) ** 2)\n", (15569, 15684), True, 'import numpy as np\n'), ((17838, 17854), 'numpy.array', 'np.array', (['Ys1ALL'], {}), '(Ys1ALL)\n', (17846, 17854), True, 'import numpy as np\n'), ((17880, 17896), 'numpy.array', 'np.array', (['Ys2ALL'], {}), '(Ys2ALL)\n', (17888, 17896), True, 'import numpy as np\n'), ((18062, 18076), 'numpy.array', 'np.array', (['MJDs'], {}), '(MJDs)\n', (18070, 18076), True, 'import numpy as np\n'), ((18097, 18112), 'numpy.array', 'np.array', (['unYs1'], {}), '(unYs1)\n', (18105, 18112), True, 'import numpy as np\n'), ((18133, 18148), 'numpy.array', 'np.array', (['unYs2'], {}), '(unYs2)\n', (18141, 18148), True, 'import numpy as np\n'), ((18628, 18643), 'numpy.array', 'np.array', (['XsALL'], {}), '(XsALL)\n', (18636, 18643), True, 'import numpy as np\n'), ((18665, 18681), 'numpy.array', 'np.array', (['Ys1ALL'], {}), '(Ys1ALL)\n', (18673, 18681), True, 'import numpy as np\n'), ((18703, 18719), 'numpy.array', 'np.array', (['Ys2ALL'], {}), '(Ys2ALL)\n', (18711, 18719), True, 'import numpy as np\n'), ((20976, 20989), 'numpy.diag', 'np.diag', (['popc'], {}), '(popc)\n', (20983, 20989), True, 'import numpy as np\n'), ((21841, 21854), 'numpy.diag', 'np.diag', (['popc'], {}), '(popc)\n', (21848, 21854), True, 'import numpy as np\n'), ((22937, 22956), 'numpy.isfinite', 'np.isfinite', (['priors'], {}), '(priors)\n', (22948, 22956), True, 'import numpy as np\n'), ((23165, 23204), 'numpy.concatenate', 'np.concatenate', (['[RA_Uncert, DEC_Uncert]'], {}), '([RA_Uncert, DEC_Uncert])\n', (23179, 23204), True, 'import numpy as np\n'), ((25828, 25858), 'numpy.max', 'np.max', (['[a_mcmc[1], a_mcmc[2]]'], {}), '([a_mcmc[1], a_mcmc[2]])\n', (25834, 25858), True, 'import numpy as np\n'), ((25860, 25890), 'numpy.max', 'np.max', (['[b_mcmc[1], b_mcmc[2]]'], {}), '([b_mcmc[1], b_mcmc[2]])\n', (25866, 25890), True, 'import numpy as np\n'), ((25892, 25922), 'numpy.max', 'np.max', (['[c_mcmc[1], c_mcmc[2]]'], {}), '([c_mcmc[1], c_mcmc[2]])\n', (25898, 25922), True, 'import numpy as np\n'), ((25952, 25982), 'numpy.max', 'np.max', (['[d_mcmc[1], d_mcmc[2]]'], {}), '([d_mcmc[1], d_mcmc[2]])\n', (25958, 25982), True, 'import numpy as np\n'), ((25984, 26014), 'numpy.max', 'np.max', (['[e_mcmc[1], e_mcmc[2]]'], {}), '([e_mcmc[1], e_mcmc[2]])\n', (25990, 26014), True, 'import numpy as np\n'), ((26960, 26972), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (26966, 26972), True, 'import numpy as np\n'), ((26974, 26986), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (26980, 26986), True, 'import numpy as np\n'), ((28055, 28067), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (28061, 28067), True, 'import numpy as np\n'), ((28069, 28081), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (28075, 28081), True, 'import numpy as np\n'), ((29690, 29720), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (29696, 29720), True, 'import numpy as np\n'), ((33514, 33544), 'numpy.max', 'np.max', (['[a_mcmc[1], a_mcmc[2]]'], {}), '([a_mcmc[1], a_mcmc[2]])\n', (33520, 33544), True, 'import numpy as np\n'), ((33546, 33576), 'numpy.max', 'np.max', (['[b_mcmc[1], b_mcmc[2]]'], {}), '([b_mcmc[1], b_mcmc[2]])\n', (33552, 33576), True, 'import numpy as np\n'), ((33578, 33608), 'numpy.max', 'np.max', (['[c_mcmc[1], c_mcmc[2]]'], {}), '([c_mcmc[1], c_mcmc[2]])\n', (33584, 33608), True, 'import numpy as np\n'), ((33638, 33668), 'numpy.max', 'np.max', (['[d_mcmc[1], d_mcmc[2]]'], {}), '([d_mcmc[1], d_mcmc[2]])\n', (33644, 33668), True, 'import numpy as np\n'), ((33670, 33700), 'numpy.max', 'np.max', (['[e_mcmc[1], e_mcmc[2]]'], {}), '([e_mcmc[1], e_mcmc[2]])\n', (33676, 33700), True, 'import numpy as np\n'), ((35648, 35660), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (35654, 35660), True, 'import numpy as np\n'), ((35662, 35674), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (35668, 35674), True, 'import numpy as np\n'), ((36920, 36932), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (36926, 36932), True, 'import numpy as np\n'), ((36934, 36946), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (36940, 36946), True, 'import numpy as np\n'), ((2707, 2741), 'numpy.sin', 'np.sin', (['(decs / d2a * np.pi / 180.0)'], {}), '(decs / d2a * np.pi / 180.0)\n', (2713, 2741), True, 'import numpy as np\n'), ((2788, 2822), 'numpy.sin', 'np.sin', (['(decs / d2a * np.pi / 180.0)'], {}), '(decs / d2a * np.pi / 180.0)\n', (2794, 2822), True, 'import numpy as np\n'), ((5398, 5458), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['radecstr'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(radecstr, unit=(u.deg, u.deg), frame='icrs')\n", (5413, 5458), True, 'import astropy.coordinates as coords\n'), ((5816, 5877), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['ra0', 'dec0'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(ra0, dec0, unit=(u.deg, u.deg), frame='icrs')\n", (5831, 5877), True, 'import astropy.coordinates as coords\n'), ((6008, 6069), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['ra0', 'dec0'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(ra0, dec0, unit=(u.deg, u.deg), frame='icrs')\n", (6023, 6069), True, 'import astropy.coordinates as coords\n'), ((6417, 6478), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['ra0', 'dec0'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(ra0, dec0, unit=(u.deg, u.deg), frame='icrs')\n", (6432, 6478), True, 'import astropy.coordinates as coords\n'), ((7398, 7431), 'numpy.sin', 'np.sin', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (7404, 7431), True, 'import numpy as np\n'), ((7436, 7469), 'numpy.cos', 'np.cos', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (7442, 7469), True, 'import numpy as np\n'), ((7653, 7687), 'numpy.cos', 'np.cos', (['(decs / d2a * np.pi / 180.0)'], {}), '(decs / d2a * np.pi / 180.0)\n', (7659, 7687), True, 'import numpy as np\n'), ((10114, 10141), 'numpy.min', 'np.min', (["t['w2mpro'][slice1]"], {}), "(t['w2mpro'][slice1])\n", (10120, 10141), True, 'import numpy as np\n'), ((10176, 10203), 'numpy.max', 'np.max', (["t['w2mpro'][slice1]"], {}), "(t['w2mpro'][slice1])\n", (10182, 10203), True, 'import numpy as np\n'), ((10816, 10836), 'numpy.trapz', 'np.trapz', (['W2pdf'], {'x': 'x'}), '(W2pdf, x=x)\n', (10824, 10836), True, 'import numpy as np\n'), ((13107, 13185), 'Register_Frames.GetRegistrators', 'reg.GetRegistrators', (['name', 'epochs00'], {'subepoch': 'groupcount', 'ra0': 'ra00', 'dec0': 'dec00'}), '(name, epochs00, subepoch=groupcount, ra0=ra00, dec0=dec00)\n', (13126, 13185), True, 'import Register_Frames as reg\n'), ((13247, 13353), 'Register_Frames.GetRegistrators', 'reg.GetRegistrators', (['name', 'epochs00'], {'subepoch': 'groupcount', 'ra0': 'ra00', 'dec0': 'dec00', 'radius': 'RegisterRadius'}), '(name, epochs00, subepoch=groupcount, ra0=ra00, dec0=\n dec00, radius=RegisterRadius)\n', (13266, 13353), True, 'import Register_Frames as reg\n'), ((17200, 17273), 'Neighbor_Offsets.GetCalibrators', 'ne.GetCalibrators', (['name', 'Epochs'], {'radecstr': 'radecstr', 'radius': 'RegisterRadius'}), '(name, Epochs, radecstr=radecstr, radius=RegisterRadius)\n', (17217, 17273), True, 'import Neighbor_Offsets as ne\n'), ((17316, 17366), 'Neighbor_Offsets.GetCalibrators', 'ne.GetCalibrators', (['name', 'Epochs'], {'radecstr': 'radecstr'}), '(name, Epochs, radecstr=radecstr)\n', (17333, 17366), True, 'import Neighbor_Offsets as ne\n'), ((17929, 17942), 'numpy.array', 'np.array', (['Ys1'], {}), '(Ys1)\n', (17937, 17942), True, 'import numpy as np\n'), ((17965, 17978), 'numpy.array', 'np.array', (['Ys2'], {}), '(Ys2)\n', (17973, 17978), True, 'import numpy as np\n'), ((19347, 19368), 'numpy.mean', 'np.mean', (['(unYs1 * d2ma)'], {}), '(unYs1 * d2ma)\n', (19354, 19368), True, 'import numpy as np\n'), ((19371, 19392), 'numpy.mean', 'np.mean', (['(unYs2 * d2ma)'], {}), '(unYs2 * d2ma)\n', (19378, 19392), True, 'import numpy as np\n'), ((19585, 19597), 'numpy.max', 'np.max', (['MJDs'], {}), '(MJDs)\n', (19591, 19597), True, 'import numpy as np\n'), ((19600, 19612), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (19606, 19612), True, 'import numpy as np\n'), ((27220, 27232), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (27226, 27232), True, 'import numpy as np\n'), ((27664, 27676), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (27670, 27676), True, 'import numpy as np\n'), ((27802, 27832), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (27808, 27832), True, 'import numpy as np\n'), ((27967, 27997), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (27973, 27997), True, 'import numpy as np\n'), ((28308, 28320), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (28314, 28320), True, 'import numpy as np\n'), ((28773, 28785), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (28779, 28785), True, 'import numpy as np\n'), ((36011, 36023), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (36017, 36023), True, 'import numpy as np\n'), ((36526, 36538), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (36532, 36538), True, 'import numpy as np\n'), ((36677, 36707), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (36683, 36707), True, 'import numpy as np\n'), ((36832, 36862), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (36838, 36862), True, 'import numpy as np\n'), ((37269, 37281), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (37275, 37281), True, 'import numpy as np\n'), ((37813, 37825), 'numpy.min', 'np.min', (['MJDs'], {}), '(MJDs)\n', (37819, 37825), True, 'import numpy as np\n'), ((2677, 2710), 'numpy.cos', 'np.cos', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (2683, 2710), True, 'import numpy as np\n'), ((2758, 2791), 'numpy.sin', 'np.sin', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (2764, 2791), True, 'import numpy as np\n'), ((3102, 3135), 'numpy.concatenate', 'np.concatenate', (['[RAsend, DECsend]'], {}), '([RAsend, DECsend])\n', (3116, 3135), True, 'import numpy as np\n'), ((6223, 6284), 'astropy.coordinates.SkyCoord', 'coords.SkyCoord', (['ra0', 'dec0'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(ra0, dec0, unit=(u.deg, u.deg), frame='icrs')\n", (6238, 6284), True, 'import astropy.coordinates as coords\n'), ((7517, 7551), 'numpy.sin', 'np.sin', (['(decs / d2a * np.pi / 180.0)'], {}), '(decs / d2a * np.pi / 180.0)\n', (7523, 7551), True, 'import numpy as np\n'), ((7600, 7634), 'numpy.sin', 'np.sin', (['(decs / d2a * np.pi / 180.0)'], {}), '(decs / d2a * np.pi / 180.0)\n', (7606, 7634), True, 'import numpy as np\n'), ((8910, 8926), 'numpy.min', 'np.min', (["t['mjd']"], {}), "(t['mjd'])\n", (8916, 8926), True, 'import numpy as np\n'), ((9070, 9086), 'numpy.min', 'np.min', (["t['mjd']"], {}), "(t['mjd'])\n", (9076, 9086), True, 'import numpy as np\n'), ((10144, 10174), 'numpy.max', 'np.max', (["t['w2sigmpro'][slice1]"], {}), "(t['w2sigmpro'][slice1])\n", (10150, 10174), True, 'import numpy as np\n'), ((10206, 10236), 'numpy.max', 'np.max', (["t['w2sigmpro'][slice1]"], {}), "(t['w2sigmpro'][slice1])\n", (10212, 10236), True, 'import numpy as np\n'), ((10497, 10561), 'matplotlib.pyplot.axvline', 'plt.axvline', (['w2'], {'ls': '"""--"""', 'lw': '(0.75)', 'c': '"""r"""', 'alpha': '(0.5)', 'zorder': '(-100)'}), "(w2, ls='--', lw=0.75, c='r', alpha=0.5, zorder=-100)\n", (10508, 10561), True, 'import matplotlib.pyplot as plt\n'), ((14788, 14911), 'numpy.average', 'np.average', (["t['ra'][slice1][slice2][group][index]"], {'weights': "(1.0 / (t['sigra'][slice1][slice2][group][index] / d2a) ** 2)"}), "(t['ra'][slice1][slice2][group][index], weights=1.0 / (t['sigra']\n [slice1][slice2][group][index] / d2a) ** 2)\n", (14798, 14911), True, 'import numpy as np\n'), ((14927, 15052), 'numpy.average', 'np.average', (["t['dec'][slice1][slice2][group][index]"], {'weights': "(1.0 / (t['sigdec'][slice1][slice2][group][index] / d2a) ** 2)"}), "(t['dec'][slice1][slice2][group][index], weights=1.0 / (t[\n 'sigdec'][slice1][slice2][group][index] / d2a) ** 2)\n", (14937, 15052), True, 'import numpy as np\n'), ((17464, 17538), 'Neighbor_Offsets.GetCalibrators', 'ne.GetCalibrators', (['name', 'Epochs'], {'ra0': 'ra0', 'dec0': 'dec0', 'radius': 'RegisterRadius'}), '(name, Epochs, ra0=ra0, dec0=dec0, radius=RegisterRadius)\n', (17481, 17538), True, 'import Neighbor_Offsets as ne\n'), ((17582, 17633), 'Neighbor_Offsets.GetCalibrators', 'ne.GetCalibrators', (['name', 'Epochs'], {'ra0': 'ra0', 'dec0': 'dec0'}), '(name, Epochs, ra0=ra0, dec0=dec0)\n', (17599, 17633), True, 'import Neighbor_Offsets as ne\n'), ((17741, 17754), 'numpy.array', 'np.array', (['Ys1'], {}), '(Ys1)\n', (17749, 17754), True, 'import numpy as np\n'), ((17788, 17801), 'numpy.array', 'np.array', (['Ys2'], {}), '(Ys2)\n', (17796, 17801), True, 'import numpy as np\n'), ((20623, 20662), 'numpy.concatenate', 'np.concatenate', (['[RA_Uncert, DEC_Uncert]'], {}), '([RA_Uncert, DEC_Uncert])\n', (20637, 20662), True, 'import numpy as np\n'), ((22378, 22393), 'numpy.log', 'np.log', (['invsig2'], {}), '(invsig2)\n', (22384, 22393), True, 'import numpy as np\n'), ((23250, 23272), 'numpy.random.randn', 'np.random.randn', (['n_dim'], {}), '(n_dim)\n', (23265, 23272), True, 'import numpy as np\n'), ((25233, 25277), 'numpy.percentile', 'np.percentile', (['samples', '[16, 50, 84]'], {'axis': '(0)'}), '(samples, [16, 50, 84], axis=0)\n', (25246, 25277), True, 'import numpy as np\n'), ((26685, 26715), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (26691, 26715), True, 'import numpy as np\n'), ((26736, 26771), 'numpy.sqrt', 'np.sqrt', (['(unYs1 ** 2 + unYs1[0] ** 2)'], {}), '(unYs1 ** 2 + unYs1[0] ** 2)\n', (26743, 26771), True, 'import numpy as np\n'), ((26872, 26902), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (26878, 26902), True, 'import numpy as np\n'), ((27370, 27405), 'numpy.sqrt', 'np.sqrt', (['(unYs2 ** 2 + unYs2[0] ** 2)'], {}), '(unYs2 ** 2 + unYs2[0] ** 2)\n', (27377, 27405), True, 'import numpy as np\n'), ((27836, 27871), 'numpy.sqrt', 'np.sqrt', (['(unYs1 ** 2 + unYs1[0] ** 2)'], {}), '(unYs1 ** 2 + unYs1[0] ** 2)\n', (27843, 27871), True, 'import numpy as np\n'), ((28465, 28500), 'numpy.sqrt', 'np.sqrt', (['(unYs2 ** 2 + unYs2[0] ** 2)'], {}), '(unYs2 ** 2 + unYs2[0] ** 2)\n', (28472, 28500), True, 'import numpy as np\n'), ((30437, 30467), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (30443, 30467), True, 'import numpy as np\n'), ((30638, 30668), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (30644, 30668), True, 'import numpy as np\n'), ((32081, 32111), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (32087, 32111), True, 'import numpy as np\n'), ((33301, 33345), 'numpy.percentile', 'np.percentile', (['samples', '[16, 50, 84]'], {'axis': '(0)'}), '(samples, [16, 50, 84], axis=0)\n', (33314, 33345), True, 'import numpy as np\n'), ((35397, 35427), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (35403, 35427), True, 'import numpy as np\n'), ((35560, 35590), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (35566, 35590), True, 'import numpy as np\n'), ((38671, 38701), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (38677, 38701), True, 'import numpy as np\n'), ((38874, 38904), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (38880, 38904), True, 'import numpy as np\n'), ((7487, 7520), 'numpy.cos', 'np.cos', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (7493, 7520), True, 'import numpy as np\n'), ((7570, 7603), 'numpy.sin', 'np.sin', (['(ras / d2a * np.pi / 180.0)'], {}), '(ras / d2a * np.pi / 180.0)\n', (7576, 7603), True, 'import numpy as np\n'), ((7932, 7965), 'numpy.concatenate', 'np.concatenate', (['[RAsend, DECsend]'], {}), '([RAsend, DECsend])\n', (7946, 7965), True, 'import numpy as np\n'), ((14527, 14577), 'numpy.average', 'np.average', (["t['mjd'][slice1][slice2][group][index]"], {}), "(t['mjd'][slice1][slice2][group][index])\n", (14537, 14577), True, 'import numpy as np\n'), ((14717, 14767), 'numpy.average', 'np.average', (["t['mjd'][slice1][slice2][group][index]"], {}), "(t['mjd'][slice1][slice2][group][index])\n", (14727, 14767), True, 'import numpy as np\n'), ((15074, 15119), 'numpy.std', 'np.std', (["t['ra'][slice1][slice2][group][index]"], {}), "(t['ra'][slice1][slice2][group][index])\n", (15080, 15119), True, 'import numpy as np\n'), ((15200, 15246), 'numpy.std', 'np.std', (["t['dec'][slice1][slice2][group][index]"], {}), "(t['dec'][slice1][slice2][group][index])\n", (15206, 15246), True, 'import numpy as np\n'), ((15745, 15804), 'numpy.sum', 'np.sum', (["(1.0 / t['sigra'][slice1][slice2][group][index] ** 2)"], {}), "(1.0 / t['sigra'][slice1][slice2][group][index] ** 2)\n", (15751, 15804), True, 'import numpy as np\n'), ((15842, 15902), 'numpy.sum', 'np.sum', (["(1.0 / t['sigdec'][slice1][slice2][group][index] ** 2)"], {}), "(1.0 / t['sigdec'][slice1][slice2][group][index] ** 2)\n", (15848, 15902), True, 'import numpy as np\n'), ((19456, 19491), 'numpy.sqrt', 'np.sqrt', (['(unYs1 ** 2 + unYs1[0] ** 2)'], {}), '(unYs1 ** 2 + unYs1[0] ** 2)\n', (19463, 19491), True, 'import numpy as np\n'), ((19506, 19541), 'numpy.sqrt', 'np.sqrt', (['(unYs2 ** 2 + unYs2[0] ** 2)'], {}), '(unYs2 ** 2 + unYs2[0] ** 2)\n', (19513, 19541), True, 'import numpy as np\n'), ((21766, 21805), 'numpy.concatenate', 'np.concatenate', (['[RA_Uncert, DEC_Uncert]'], {}), '([RA_Uncert, DEC_Uncert])\n', (21780, 21805), True, 'import numpy as np\n'), ((22257, 22271), 'numpy.array', 'np.array', (['yerr'], {}), '(yerr)\n', (22265, 22271), True, 'import numpy as np\n'), ((20526, 20556), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (20532, 20556), True, 'import numpy as np\n'), ((22087, 22117), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (22093, 22117), True, 'import numpy as np\n'), ((22318, 22329), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (22326, 22329), True, 'import numpy as np\n'), ((21618, 21648), 'numpy.cos', 'np.cos', (['(Ys2[0] * np.pi / 180.0)'], {}), '(Ys2[0] * np.pi / 180.0)\n', (21624, 21648), True, 'import numpy as np\n'), ((22332, 22350), 'numpy.array', 'np.array', (['modelval'], {}), '(modelval)\n', (22340, 22350), True, 'import numpy as np\n')] |
import torch
from abc import abstractmethod
from numpy import inf
import numpy as np
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, model, criterion, metric_ftns, optimizer, config, fold_id):
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
# setup GPU device if available, move model into configured device
self.device, device_ids = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
self.criterion = criterion
self.metric_ftns = metric_ftns
self.optimizer = optimizer
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
self.fold_id = fold_id
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
if config.resume is not None:
self._resume_checkpoint(config.resume)
@abstractmethod
def _train_epoch(self, epoch, total_epochs):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
not_improved_count = 0
all_outs = []
all_trgs = []
for epoch in range(self.start_epoch, self.epochs + 1):
result, epoch_outs, epoch_trgs = self._train_epoch(epoch, self.epochs)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
all_outs.extend(epoch_outs)
all_trgs.extend(epoch_trgs)
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = True
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if not_improved_count > self.early_stop:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
outs_name = "outs_" + str(self.fold_id)
trgs_name = "trgs_" + str(self.fold_id)
np.save(self.config._save_dir / outs_name, all_outs)
np.save(self.config._save_dir / trgs_name, all_trgs)
if self.fold_id == self.config["data_loader"]["args"]["num_folds"] - 1:
self._calc_metrics()
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def _save_checkpoint(self, epoch, save_best=True):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
arch = type(self.model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = str(self.checkpoint_dir / 'model_best.pth')
torch.save(state, best_path)
self.logger.info("Saving current best: model_best.pth ...")
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
resume_path = str(resume_path)
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['arch'] != self.config['arch']:
self.logger.warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger.warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
def _calc_metrics(self):
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import pandas as pd
import os
from os import walk
n_folds = self.config["data_loader"]["args"]["num_folds"]
all_outs = []
all_trgs = []
outs_list = []
trgs_list = []
save_dir = os.path.abspath(os.path.join(self.checkpoint_dir, os.pardir))
for root, dirs, files in os.walk(save_dir):
for file in files:
if "outs" in file:
outs_list.append(os.path.join(root, file))
if "trgs" in file:
trgs_list.append(os.path.join(root, file))
if len(outs_list)==self.config["data_loader"]["args"]["num_folds"]:
for i in range(len(outs_list)):
outs = np.load(outs_list[i])
trgs = np.load(trgs_list[i])
all_outs.extend(outs)
all_trgs.extend(trgs)
all_trgs = np.array(all_trgs).astype(int)
all_outs = np.array(all_outs).astype(int)
r = classification_report(all_trgs, all_outs, digits=6, output_dict=True)
cm = confusion_matrix(all_trgs, all_outs)
df = pd.DataFrame(r)
df["cohen"] = cohen_kappa_score(all_trgs, all_outs)
df["accuracy"] = accuracy_score(all_trgs, all_outs)
df = df * 100
file_name = self.config["name"] + "_classification_report.xlsx"
report_Save_path = os.path.join(save_dir, file_name)
df.to_excel(report_Save_path)
cm_file_name = self.config["name"] + "_confusion_matrix.torch"
cm_Save_path = os.path.join(save_dir, cm_file_name)
torch.save(cm, cm_Save_path)
# Uncomment if you want to copy some of the important files into the experiement folder
# from shutil import copyfile
# copyfile("model/model.py", os.path.join(self.checkpoint_dir, "model.py"))
# copyfile("model/loss.py", os.path.join(self.checkpoint_dir, "loss.py"))
# copyfile("trainer/trainer.py", os.path.join(self.checkpoint_dir, "trainer.py"))
# copyfile("train_Kfold_CV.py", os.path.join(self.checkpoint_dir, "train_Kfold_CV.py"))
# copyfile("config.json", os.path.join(self.checkpoint_dir, "config.json"))
# copyfile("data_loader/data_loaders.py", os.path.join(self.checkpoint_dir, "data_loaders.py"))
| [
"sklearn.metrics.confusion_matrix",
"sklearn.metrics.classification_report",
"torch.load",
"os.walk",
"os.path.join",
"torch.cuda.device_count",
"sklearn.metrics.cohen_kappa_score",
"torch.nn.DataParallel",
"numpy.array",
"torch.save",
"pandas.DataFrame",
"numpy.load",
"sklearn.metrics.accur... | [((3964, 4016), 'numpy.save', 'np.save', (['(self.config._save_dir / outs_name)', 'all_outs'], {}), '(self.config._save_dir / outs_name, all_outs)\n', (3971, 4016), True, 'import numpy as np\n'), ((4025, 4077), 'numpy.save', 'np.save', (['(self.config._save_dir / trgs_name)', 'all_trgs'], {}), '(self.config._save_dir / trgs_name, all_trgs)\n', (4032, 4077), True, 'import numpy as np\n'), ((4348, 4373), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (4371, 4373), False, 'import torch\n'), ((4867, 4917), 'torch.device', 'torch.device', (["('cuda:0' if n_gpu_use > 0 else 'cpu')"], {}), "('cuda:0' if n_gpu_use > 0 else 'cpu')\n", (4879, 4917), False, 'import torch\n'), ((5677, 5704), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (5687, 5704), False, 'import torch\n'), ((6283, 6306), 'torch.load', 'torch.load', (['resume_path'], {}), '(resume_path)\n', (6293, 6306), False, 'import torch\n'), ((7963, 7980), 'os.walk', 'os.walk', (['save_dir'], {}), '(save_dir)\n', (7970, 7980), False, 'import os\n'), ((8612, 8681), 'sklearn.metrics.classification_report', 'classification_report', (['all_trgs', 'all_outs'], {'digits': '(6)', 'output_dict': '(True)'}), '(all_trgs, all_outs, digits=6, output_dict=True)\n', (8633, 8681), False, 'from sklearn.metrics import classification_report\n'), ((8695, 8731), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['all_trgs', 'all_outs'], {}), '(all_trgs, all_outs)\n', (8711, 8731), False, 'from sklearn.metrics import confusion_matrix\n'), ((8745, 8760), 'pandas.DataFrame', 'pd.DataFrame', (['r'], {}), '(r)\n', (8757, 8760), True, 'import pandas as pd\n'), ((8783, 8820), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['all_trgs', 'all_outs'], {}), '(all_trgs, all_outs)\n', (8800, 8820), False, 'from sklearn.metrics import cohen_kappa_score\n'), ((8846, 8880), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['all_trgs', 'all_outs'], {}), '(all_trgs, all_outs)\n', (8860, 8880), False, 'from sklearn.metrics import accuracy_score\n'), ((9002, 9035), 'os.path.join', 'os.path.join', (['save_dir', 'file_name'], {}), '(save_dir, file_name)\n', (9014, 9035), False, 'import os\n'), ((9169, 9205), 'os.path.join', 'os.path.join', (['save_dir', 'cm_file_name'], {}), '(save_dir, cm_file_name)\n', (9181, 9205), False, 'import os\n'), ((9214, 9242), 'torch.save', 'torch.save', (['cm', 'cm_Save_path'], {}), '(cm, cm_Save_path)\n', (9224, 9242), False, 'import torch\n'), ((596, 647), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'device_ids'}), '(model, device_ids=device_ids)\n', (617, 647), False, 'import torch\n'), ((5878, 5906), 'torch.save', 'torch.save', (['state', 'best_path'], {}), '(state, best_path)\n', (5888, 5906), False, 'import torch\n'), ((7884, 7928), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', 'os.pardir'], {}), '(self.checkpoint_dir, os.pardir)\n', (7896, 7928), False, 'import os\n'), ((8355, 8376), 'numpy.load', 'np.load', (['outs_list[i]'], {}), '(outs_list[i])\n', (8362, 8376), True, 'import numpy as np\n'), ((8400, 8421), 'numpy.load', 'np.load', (['trgs_list[i]'], {}), '(trgs_list[i])\n', (8407, 8421), True, 'import numpy as np\n'), ((8518, 8536), 'numpy.array', 'np.array', (['all_trgs'], {}), '(all_trgs)\n', (8526, 8536), True, 'import numpy as np\n'), ((8568, 8586), 'numpy.array', 'np.array', (['all_outs'], {}), '(all_outs)\n', (8576, 8586), True, 'import numpy as np\n'), ((8086, 8110), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (8098, 8110), False, 'import os\n'), ((8185, 8209), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (8197, 8209), False, 'import os\n')] |
import numpy as np
def hist_to_xy(array, bins):
hist = np.histogram(array, bins=bins)
y, x = [hist[0], 0.5 * (hist[1][1:] + hist[1][:-1])]
return x, y
def find_nearest(array, values):
indices = np.abs(np.subtract.outer(array, values)).argmin(0)
return indices
def check_for_completeness(list):
start, end = int(list[0]), int(list[-1])
return sorted(set(range(start, end + 1)).difference(list))
def print_table(table):
longest_cols = [(max([len(str(row[i])) for row in table]) + 3) for i in range(len(table[0]))]
row_format = "".join(["{:>" + str(longest_col) + "}" for longest_col in longest_cols])
for row in table:
print(row_format.format(*row))
| [
"numpy.histogram",
"numpy.subtract.outer"
] | [((61, 91), 'numpy.histogram', 'np.histogram', (['array'], {'bins': 'bins'}), '(array, bins=bins)\n', (73, 91), True, 'import numpy as np\n'), ((221, 253), 'numpy.subtract.outer', 'np.subtract.outer', (['array', 'values'], {}), '(array, values)\n', (238, 253), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#########################################################
#
# Main file for parallel mesh testing.
#
# This is a modification of the run_parallel_advection.py
# file.
#
#
# *) The (new) files that have been added to manage the
# grid partitioning are
# +) pmesh_divide_metis.py: subdivide a pmesh
# +) build_submesh.py: build the submeshes on the host
# processor.
# +) build_local.py: build the GA mesh datastructure
# on each processor.
# +) build_commun.py: handle the communication between
# the host and processors
#
# *) Things still to do:
# +) Overlap the communication and computation: The
# communication routines in build_commun.py should be
# interdispersed in the build_submesh.py and build_local.py
# files. This will overlap the communication and
# computation and will be far more efficient. This should
# be done after more testing and there more confidence in
# the subpartioning.
# +) Much more testing especially with large numbers of
# processors.
# Authors: <NAME>, <NAME> and <NAME>,
# June 2005
#
#
#
#########################################################
import sys
#import pypar # The Python-MPI interface
import time
# Numeric arrays
import numpy as num
#from numpy import array, zeros, float
# Print debugging information
from print_stats import print_test_stats, build_full_flag
# pmesh
from anuga.shallow_water import Domain
from parallel_shallow_water import Parallel_domain
from anuga.abstract_2d_finite_volumes.pmesh2domain\
import pmesh_to_domain_instance
# Reuse previous mesh import
from anuga.caching import cache
# Mesh partition routines
from distribute_mesh import pmesh_divide_metis
from distribute_mesh import build_submesh
from distribute_mesh import build_local_mesh
from distribute_mesh import send_submesh, rec_submesh, extract_submesh
###############################
# Read in processor information
###############################
numprocs = pypar.size()
myid = pypar.rank()
processor_name = pypar.get_processor_name()
############################
# Set the initial conditions
############################
rect = num.zeros( 4, num.float) # Buffer for results
class Set_Stage:
"""Set an initial condition with constant water height, for x<x0
"""
def __init__(self, x0=0.25, x1=0.5, h=1.0):
self.x0 = x0
self.x1 = x1
self.h = h
def __call__(self, x, y):
return self.h*((x>self.x0)&(x<self.x1))
#######################
# Partition the domain
#######################
if myid == 0:
# Read in the test files
# filename = 'test-100.tsh'
# filename = 'merimbula_10785_1.tsh'
filename = 'merimbula_43200.tsh'
# Build the whole domain
domain_full = pmesh_to_domain_instance(filename, Domain)
# domain_full = cache(pmesh_to_domain_instance,
# (filename, Domain),
# dependencies = [filename])
rect = num.array(domain_full.get_extent(), num.float)
print (rect)
# Initialise the wave
#domain_full.set_quantity('stage', Set_Stage(200.0,300.0,1.0))
domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,2.0))
# domain_full.set_quantity('stage', Set_Stage(756000.0,756500.0,0.0))
# Subdivide the domain
# Note the different arguments compared with pmesh_divide,
# pmesh_divide_steve etc.
nodes, triangles, boundary, triangles_per_proc, quantities = \
pmesh_divide_metis(domain_full, numprocs)
print (triangles_per_proc)
rect = num.array(domain_full.get_extent(), num.float)
submesh = build_submesh(nodes, triangles, boundary,\
quantities, triangles_per_proc)
# Send the mesh partition to the appropriate processor
for p in range(1, numprocs):
send_submesh(submesh, triangles_per_proc, p)
# Build the local mesh for processor 0
points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict = \
extract_submesh(submesh, triangles_per_proc)
# Read in the mesh partition that belongs to this
# processor (note that the information is in the
# correct form for the GA data structure
else:
points, vertices, boundary, quantities, ghost_recv_dict, full_send_dict , \
no_full_nodes, no_full_trigs = rec_submesh(0)
###########################################
# Start the computations on each subpartion
###########################################
#if myid == 0:
# print 'ghost'
# print ghost_recv_dict
#processor_name
#if myid == 0:
# print 'full'
# print full_send_dict
# The visualiser needs to know the size of the whole domain
pypar.broadcast(rect,0)
domain = Parallel_domain(points, vertices, boundary,
full_send_dict = full_send_dict,
ghost_recv_dict = ghost_recv_dict)
# Make a note of which triangles are full and which are ghost
tri_full_flag = build_full_flag(domain, ghost_recv_dict)
try:
#domain.initialise_visualiser(rect=rect)
#domain.visualiser.coloring['stage'] = True
#domain.visualiser.scale_z['stage'] = 0.2
#domain.visualiser.scale_z['elevation'] = 0.05
pass
except:
print ('No visualiser')
domain.default_order = 1
#Boundaries
from anuga.interface import Transmissive_boundary, Reflective_boundary
T = Transmissive_boundary(domain)
R = Reflective_boundary(domain)
domain.set_boundary( {'outflow': R, 'inflow': R, 'inner':R, 'exterior': R, 'open':R, 'ghost':None} )
domain.set_quantity('stage', quantities['stage'])
domain.set_quantity('elevation', quantities['elevation'])
domain.store = False
#---------
# Evolution
t0 = time.time()
print ('Processor %d on %s: No of elements %d'%(domain.processor,processor_name,domain.number_of_elements))
yieldstep = 50.0
finaltime = 500.0
#yieldstep = 1000
#finaltime = 40000
#yieldstep = 1
#finaltime = 1
#processor_name
#for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
# if myid == 0:
# domain.write_time()
#print 'Processor %d, Integral of stage %d'%\
# (domain.processor,domain.quantities['stage'].get_integral())
# print_test_stats(domain, tri_full_flag)
# Profiling
#import profile
#profiler = profile.Profile()
#result.dump_stats("profile." + str(numprocs) + "." + str(myid) + ".dat")
## #New hotshot profiling
## import hotshot
## profiler = hotshot.Profile("hotshot." + str(numprocs) + "." + str(myid) + ".prof")
## s = '''for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
## if myid == 0:
## domain.write_time()
## print_test_stats(domain, tri_full_flag)
## '''
## result = profiler.runctx(s, globals(), locals())
## profiler.close()
#from vtk_realtime_visualiser import Visualiser
#V = Visualiser(domain,default_scale_z=100.0)
#V.coloring['stage'] = True
#V.coloring['elevation'] = False
#V.setup['elevation']=True
#V.updating['stage']=True
#V.qcolor['stage'] = (0.1,0.4,0.99)
#V.start()
#V.idle.wait()
#V.idle.clear()
for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):
if myid == 0:
domain.write_time()
#print_test_stats(domain, tri_full_flag)
# V.redraw_ready.set()
# V.idle.wait()
# V.idle.clear()
# V.unpaused.wait()
#print 'P%d: That took %.2f seconds' %(myid, time.time()-t0)
#print 'P%d: Communication time %.2f seconds' %(myid, domain.communication_time)
#print 'P%d: Reduction Communication time %.2f seconds' %(myid, domain.communication_reduce_time)
#print 'P%d: Broadcast time %.2f seconds' %(myid, domain.communication_broadcast_time)
if myid == 0:
print ('That took %.2f seconds' %(time.time()-t0))
print ('Communication time %.2f seconds'%domain.communication_time)
print ('Reduction Communication time %.2f seconds'%domain.communication_reduce_time)
print ('Broadcast time %.2f seconds'%domain.communication_broadcast_time)
pypar.finalize()
| [
"anuga.interface.Reflective_boundary",
"anuga.interface.Transmissive_boundary",
"parallel_shallow_water.Parallel_domain",
"anuga.abstract_2d_finite_volumes.pmesh2domain.pmesh_to_domain_instance",
"distribute_mesh.send_submesh",
"distribute_mesh.pmesh_divide_metis",
"distribute_mesh.build_submesh",
"nu... | [((2128, 2151), 'numpy.zeros', 'num.zeros', (['(4)', 'num.float'], {}), '(4, num.float)\n', (2137, 2151), True, 'import numpy as num\n'), ((4666, 4777), 'parallel_shallow_water.Parallel_domain', 'Parallel_domain', (['points', 'vertices', 'boundary'], {'full_send_dict': 'full_send_dict', 'ghost_recv_dict': 'ghost_recv_dict'}), '(points, vertices, boundary, full_send_dict=full_send_dict,\n ghost_recv_dict=ghost_recv_dict)\n', (4681, 4777), False, 'from parallel_shallow_water import Parallel_domain\n'), ((4909, 4949), 'print_stats.build_full_flag', 'build_full_flag', (['domain', 'ghost_recv_dict'], {}), '(domain, ghost_recv_dict)\n', (4924, 4949), False, 'from print_stats import print_test_stats, build_full_flag\n'), ((5308, 5337), 'anuga.interface.Transmissive_boundary', 'Transmissive_boundary', (['domain'], {}), '(domain)\n', (5329, 5337), False, 'from anuga.interface import Transmissive_boundary, Reflective_boundary\n'), ((5342, 5369), 'anuga.interface.Reflective_boundary', 'Reflective_boundary', (['domain'], {}), '(domain)\n', (5361, 5369), False, 'from anuga.interface import Transmissive_boundary, Reflective_boundary\n'), ((5632, 5643), 'time.time', 'time.time', ([], {}), '()\n', (5641, 5643), False, 'import time\n'), ((2738, 2780), 'anuga.abstract_2d_finite_volumes.pmesh2domain.pmesh_to_domain_instance', 'pmesh_to_domain_instance', (['filename', 'Domain'], {}), '(filename, Domain)\n', (2762, 2780), False, 'from anuga.abstract_2d_finite_volumes.pmesh2domain import pmesh_to_domain_instance\n'), ((3430, 3471), 'distribute_mesh.pmesh_divide_metis', 'pmesh_divide_metis', (['domain_full', 'numprocs'], {}), '(domain_full, numprocs)\n', (3448, 3471), False, 'from distribute_mesh import pmesh_divide_metis\n'), ((3582, 3655), 'distribute_mesh.build_submesh', 'build_submesh', (['nodes', 'triangles', 'boundary', 'quantities', 'triangles_per_proc'], {}), '(nodes, triangles, boundary, quantities, triangles_per_proc)\n', (3595, 3655), False, 'from distribute_mesh import build_submesh\n'), ((3968, 4012), 'distribute_mesh.extract_submesh', 'extract_submesh', (['submesh', 'triangles_per_proc'], {}), '(submesh, triangles_per_proc)\n', (3983, 4012), False, 'from distribute_mesh import send_submesh, rec_submesh, extract_submesh\n'), ((4284, 4298), 'distribute_mesh.rec_submesh', 'rec_submesh', (['(0)'], {}), '(0)\n', (4295, 4298), False, 'from distribute_mesh import send_submesh, rec_submesh, extract_submesh\n'), ((3785, 3829), 'distribute_mesh.send_submesh', 'send_submesh', (['submesh', 'triangles_per_proc', 'p'], {}), '(submesh, triangles_per_proc, p)\n', (3797, 3829), False, 'from distribute_mesh import send_submesh, rec_submesh, extract_submesh\n'), ((7624, 7635), 'time.time', 'time.time', ([], {}), '()\n', (7633, 7635), False, 'import time\n')] |
import itertools
from cirq import ops
from cirq.qutrit import raw_types
from cirq.qutrit import common_gates
import math, numpy as np
class ControlledTernaryGate(raw_types.TernaryLogicGate,
ops.CompositeGate,
ops.ReversibleEffect,
ops.TextDiagrammable):
def __init__(self, base_gate, control_values):
""" Args:
control_values: E.g. [(2,), (1,), (0,1)] for a three
control gate that triggers if the first qutrit is 2, the
second is 1 and the third is either 0 or 1.
"""
self.base_gate = base_gate
self.control_values = control_values
def validate_trits(self, trits):
pass
def applied_to_trits(self, trits):
pass
def default_decompose(self, qutrits):
# qutrits are control, target or control, control, target
assert len(self.control_values) in [1, 2]
assert len(qutrits) == len(self.control_values) + 1
if len(self.control_values) == 1:
if self.control_values == ((2,),) and self.base_gate == common_gates.F01:
yield C2F01()(*qutrits)
elif self.control_values == ((1,),) and self.base_gate == common_gates.PlusOne:
yield C1PlusOne()(*qutrits)
elif self.control_values == ((1,),) and self.base_gate == common_gates.MinusOne:
yield C1MinusOne()(*qutrits)
else:
assert False, "control_values: %s, base_gate: %s" % (self.control_values, self.base_gate)
elif len(self.control_values) == 2:
if self.control_values == ((1,), (1,)) and self.base_gate == common_gates.PlusOne:
yield C1C1PlusOne()(*qutrits)
elif self.control_values == ((1,), (1,)) and self.base_gate == common_gates.MinusOne:
yield C1C1MinusOne()(*qutrits)
elif self.control_values == ((2,), (2,)) and self.base_gate == common_gates.PlusOne:
yield C2C2PlusOne()(*qutrits)
elif self.control_values == ((2,), (2,)) and self.base_gate == common_gates.MinusOne:
yield C2C2MinusOne()(*qutrits)
else:
assert False, "control_values: %s, base_gate: %s" % (self.control_values, self.base_gate)
else:
assert False, 'more than two controls'
@staticmethod
def all_controlled_gates(base_gate, base_name, max_controls=1,
control_types=((0,), (1,), (2,), (0,1), (0,2), (1,2))):
gates = {}
for control_num in range(1, max_controls+1):
for controls in itertools.product(control_types,
repeat=control_num):
gate_pre = ''.join('C'+''.join(map(str, (c for c in control)))
for control in controls)
gate_name = gate_pre + base_name
gate = ControlledTernaryGate(base_gate, controls)
gates[gate_name] = gate
return gates
def inverse(self):
return ControlledTernaryGate(self.base_gate.inverse(),
self.control_values)
def _control_symbols(self):
return tuple(','.join(map(str, vals))
for vals in self.control_values)
def text_diagram_info(self, args):
base_info = self.base_gate.text_diagram_info(args)
c_syms = self._control_symbols()
return ops.TextDiagramInfo(c_syms+base_info.wire_symbols,
exponent=base_info.exponent,
connected=True)
def validate_trits(self, trits):
super().validate_trits(trits)
self.base_gate.validate_trits(trits[len(self.control_values):])
def applied_to_trits(self, trits):
len_trits = len(trits)
controls = trits[:len(self.control_values)]
control_active = all(
trit in matches
for trit, matches in zip(controls, self.control_values)
)
if control_active:
changed_trits = self.base_gate.applied_to_trits(
trits[len(self.control_values):])
trits[len(self.control_values):] = changed_trits
return trits
class C1C1PlusOne(raw_types.TernaryLogicGate,
ops.CompositeGate):
def default_decompose(self, qutrits):
yield Ry12(-math.pi/4)(qutrits[2])
yield C1F12()(qutrits[1], qutrits[2])
yield Ry12(-math.pi/4)(qutrits[2])
yield C1F12()(qutrits[0], qutrits[2])
yield Ry12(math.pi/4)(qutrits[2])
yield C1F12()(qutrits[1], qutrits[2])
yield Ry12Pi4Ry01Pi4()(qutrits[2])
yield C1F01()(qutrits[1], qutrits[2])
yield Ry01(math.pi/4)(qutrits[2])
yield C1F01()(qutrits[0], qutrits[2])
yield Ry01(-math.pi/4)(qutrits[2])
yield C1F01()(qutrits[1], qutrits[2])
yield Ry01(-math.pi/4)(qutrits[2])
class C2C2PlusOne(raw_types.TernaryLogicGate,
ops.CompositeGate):
def default_decompose(self, qutrits):
yield Ry12(-math.pi/4)(qutrits[2])
yield C2F12()(qutrits[1], qutrits[2])
yield Ry12(-math.pi/4)(qutrits[2])
yield C2F12()(qutrits[0], qutrits[2])
yield Ry12(math.pi/4)(qutrits[2])
yield C2F12()(qutrits[1], qutrits[2])
yield Ry12Pi4Ry01Pi4()(qutrits[2])
yield C2F01()(qutrits[1], qutrits[2])
yield Ry01(math.pi/4)(qutrits[2])
yield C2F01()(qutrits[0], qutrits[2])
yield Ry01(-math.pi/4)(qutrits[2])
yield C2F01()(qutrits[1], qutrits[2])
yield Ry01(-math.pi/4)(qutrits[2])
class C1C1MinusOne(raw_types.TernaryLogicGate,
ops.CompositeGate):
def default_decompose(self, qutrits):
yield Ry01(math.pi/4)(qutrits[2])
yield C1F01()(qutrits[1], qutrits[2])
yield Ry01(math.pi/4)(qutrits[2])
yield C1F01()(qutrits[0], qutrits[2])
yield Ry01(-math.pi/4)(qutrits[2])
yield C1F01()(qutrits[1], qutrits[2])
yield Ry01MinusPi4Ry12MinusPi4()(qutrits[2])
yield C1F12()(qutrits[1], qutrits[2])
yield Ry12(-math.pi/4)(qutrits[2])
yield C1F12()(qutrits[0], qutrits[2])
yield Ry12(math.pi/4)(qutrits[2])
yield C1F12()(qutrits[1], qutrits[2])
yield Ry12(math.pi/4)(qutrits[2])
class C2C2MinusOne(raw_types.TernaryLogicGate,
ops.CompositeGate):
def default_decompose(self, qutrits):
yield Ry01(math.pi/4)(qutrits[2])
yield C2F01()(qutrits[1], qutrits[2])
yield Ry01(math.pi/4)(qutrits[2])
yield C2F01()(qutrits[0], qutrits[2])
yield Ry01(-math.pi/4)(qutrits[2])
yield C2F01()(qutrits[1], qutrits[2])
yield Ry01MinusPi4Ry12MinusPi4()(qutrits[2])
yield C2F12()(qutrits[1], qutrits[2])
yield Ry12(-math.pi/4)(qutrits[2])
yield C2F12()(qutrits[0], qutrits[2])
yield Ry12(math.pi/4)(qutrits[2])
yield C2F12()(qutrits[1], qutrits[2])
yield Ry12(math.pi/4)(qutrits[2])
class Ry01(raw_types.TernaryLogicGate):
def __init__(self, theta):
self.theta = theta
def _unitary_(self):
theta = self.theta
return np.array([[math.cos(theta/2.0), -math.sin(theta/2.0), 0],
[math.sin(theta/2.0), math.cos(theta/2.0), 0],
[0, 0, 1]])
class Ry12(raw_types.TernaryLogicGate):
def __init__(self, theta):
self.theta = theta
def _unitary_(self):
theta = self.theta
return np.array([[1, 0, 0],
[0, math.cos(theta/2.0), -math.sin(theta/2.0)],
[0, math.sin(theta/2.0), math.cos(theta/2.0)]])
class Ry01MinusPi4Ry12MinusPi4(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.dot(Ry12(-math.pi/4)._unitary_(), Ry01(-math.pi/4)._unitary_())
class Ry12Pi4Ry01Pi4(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.dot(Ry01(math.pi/4)._unitary_(), Ry12(math.pi/4)._unitary_())
class C1F01(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
class C2F01(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
class C1F12(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
class C2F12(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 0]])
class C1PlusOne(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
class C1MinusOne(raw_types.TernaryLogicGate):
def _unitary_(self):
return np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
| [
"itertools.product",
"math.cos",
"numpy.array",
"math.sin",
"cirq.ops.TextDiagramInfo"
] | [((3523, 3625), 'cirq.ops.TextDiagramInfo', 'ops.TextDiagramInfo', (['(c_syms + base_info.wire_symbols)'], {'exponent': 'base_info.exponent', 'connected': '(True)'}), '(c_syms + base_info.wire_symbols, exponent=base_info.\n exponent, connected=True)\n', (3542, 3625), False, 'from cirq import ops\n'), ((8244, 8528), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0,\n 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (8252, 8528), True, 'import math, numpy as np\n'), ((8798, 9082), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, \n 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0,\n 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (8806, 9082), True, 'import math, numpy as np\n'), ((9352, 9636), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0,\n 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (9360, 9636), True, 'import math, numpy as np\n'), ((9906, 10190), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 1, 0]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0,\n 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 1, 0]])\n', (9914, 10190), True, 'import math, numpy as np\n'), ((10464, 10748), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0,\n 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (10472, 10748), True, 'import math, numpy as np\n'), ((11024, 11308), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, \n 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0,\n 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0,\n 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]])\n', (11032, 11308), True, 'import math, numpy as np\n'), ((2675, 2727), 'itertools.product', 'itertools.product', (['control_types'], {'repeat': 'control_num'}), '(control_types, repeat=control_num)\n', (2692, 2727), False, 'import itertools\n'), ((7348, 7369), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7356, 7369), False, 'import math, numpy as np\n'), ((7421, 7442), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7429, 7442), False, 'import math, numpy as np\n'), ((7442, 7463), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7450, 7463), False, 'import math, numpy as np\n'), ((7721, 7742), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7729, 7742), False, 'import math, numpy as np\n'), ((7794, 7815), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7802, 7815), False, 'import math, numpy as np\n'), ((7815, 7836), 'math.cos', 'math.cos', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7823, 7836), False, 'import math, numpy as np\n'), ((7370, 7391), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7378, 7391), False, 'import math, numpy as np\n'), ((7743, 7764), 'math.sin', 'math.sin', (['(theta / 2.0)'], {}), '(theta / 2.0)\n', (7751, 7764), False, 'import math, numpy as np\n')] |
"""
@Time : 2021/11/27 16:19
@File : misc.py
@Software: PyCharm
@Desc :
"""
import random
import warnings
import numpy as np
import torch
def setup_manual_seed(seed):
warnings.warn(f'You have chosen to seed ({seed}) training. This will turn on the CUDNN deterministic setting, '
f'which can slow down your training considerably! You may see unexpected behavior when restarting '
f'from checkpoints.')
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"random.seed",
"torch.cuda.is_available",
"numpy.random.seed",
"warnings.warn"
] | [((184, 419), 'warnings.warn', 'warnings.warn', (['f"""You have chosen to seed ({seed}) training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints."""'], {}), "(\n f'You have chosen to seed ({seed}) training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.'\n )\n", (197, 419), False, 'import warnings\n'), ((459, 482), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (476, 482), False, 'import torch\n'), ((487, 507), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (501, 507), True, 'import numpy as np\n'), ((512, 529), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (523, 529), False, 'import random\n'), ((538, 563), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (561, 563), False, 'import torch\n'), ((623, 655), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (649, 655), False, 'import torch\n')] |
"""<https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm>"""
import heapq
import sys
import numba as nb
import numpy as np
input = sys.stdin.readline
# NOTE: Dijkstra algorithm with priority queue (this is slow for dense graphs)
# 1. SciPy
# 2. Python
# 3. Numba
def solve1(N, A, B, T):
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import dijkstra
csgraph = csr_matrix((T, (A, B)), shape=(N, N))
min_time = 1 << 30
for s in range(N):
dist_matrix = dijkstra(csgraph, indices=s)
time = dist_matrix.max()
if time < min_time:
min_time = time
return int(min_time)
def solve2(N, G):
def dijkstra(V, G, s):
INF = 1 << 30
dist = [INF] * V
dist[s] = 0
p_queue = []
p_queue.append((dist[s], s))
while p_queue:
min_dist, u = heapq.heappop(p_queue)
if dist[u] < min_dist:
continue
for v, c in G[u]:
alt = dist[u] + c
if alt < dist[v]:
dist[v] = alt
heapq.heappush(p_queue, (alt, v))
return dist
min_time = 1 << 30
for s in range(N):
dist_matrix = dijkstra(N, G, s)
time = np.max(dist_matrix)
if time < min_time:
min_time = time
return min_time
@nb.njit("i8(i8,i8,i8[:,:])", cache=True)
def solve3(N, M, abt):
def dijkstra(V, G, s):
INF = 1 << 30
dist = [INF] * (V + 1)
dist[s] = 0
p_queue = []
p_queue.append((dist[s], s))
while p_queue:
min_dist, u = heapq.heappop(p_queue)
if dist[u] < min_dist:
continue
for v, c in G[u][1:]:
alt = dist[u] + c
if alt < dist[v]:
dist[v] = alt
heapq.heappush(p_queue, (alt, v))
return dist
G = [[(-1, -1)] for _ in range(N + 1)]
for i in range(M):
a, b, t = abt[i]
G[a].append((b, t))
G[b].append((a, t))
min_time = 1 << 30
for s in range(1, N + 1):
time = max(dijkstra(N, G, s)[1:])
min_time = min(min_time, time)
return min_time
def main():
solve = 1
# 1. SciPy
if solve == 1:
N, M = map(int, input().split())
A = np.zeros(shape=2 * M, dtype=np.int64)
B = np.zeros(shape=2 * M, dtype=np.int64)
T = np.zeros(shape=2 * M, dtype=np.int64)
for i in range(M):
A[i], B[i], T[i] = map(int, input().split())
T[M:] = T[:M]
A[M:] = B[:M]
B[M:] = A[:M]
A -= 1
B -= 1
ans = solve(N, A, B, T)
print(ans)
# 2. Python
elif solve == 2:
N, M = map(int, input().split())
G = [[] for _ in range(N)]
for _ in range(M):
a, b, t = map(int, input().split())
a -= 1
b -= 1
G[a].append((b, t))
G[b].append((a, t))
ans = solve2(N, G)
print(ans)
# 3. Numba
else:
N, M = map(int, input().split())
abt = np.zeros(shape=(M, 3), dtype=np.int64)
for i in range(M):
abt[i] = input().split()
ans = solve3(N, M, abt)
print(ans)
if __name__ == "__main__":
main()
"""
<https://atcoder.jp/contests/abc012/tasks/abc012_4>
Example for input
5 5
1 2 12
2 3 14
3 4 7
4 5 9
5 1 18
26
"""
| [
"numba.njit",
"scipy.sparse.csgraph.dijkstra",
"numpy.max",
"numpy.zeros",
"heapq.heappop",
"heapq.heappush",
"scipy.sparse.csr_matrix"
] | [((1360, 1400), 'numba.njit', 'nb.njit', (['"""i8(i8,i8,i8[:,:])"""'], {'cache': '(True)'}), "('i8(i8,i8,i8[:,:])', cache=True)\n", (1367, 1400), True, 'import numba as nb\n'), ((393, 430), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(T, (A, B))'], {'shape': '(N, N)'}), '((T, (A, B)), shape=(N, N))\n', (403, 430), False, 'from scipy.sparse import csr_matrix\n'), ((499, 527), 'scipy.sparse.csgraph.dijkstra', 'dijkstra', (['csgraph'], {'indices': 's'}), '(csgraph, indices=s)\n', (507, 527), False, 'from scipy.sparse.csgraph import dijkstra\n'), ((1228, 1245), 'scipy.sparse.csgraph.dijkstra', 'dijkstra', (['N', 'G', 's'], {}), '(N, G, s)\n', (1236, 1245), False, 'from scipy.sparse.csgraph import dijkstra\n'), ((1261, 1280), 'numpy.max', 'np.max', (['dist_matrix'], {}), '(dist_matrix)\n', (1267, 1280), True, 'import numpy as np\n'), ((2347, 2384), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * M)', 'dtype': 'np.int64'}), '(shape=2 * M, dtype=np.int64)\n', (2355, 2384), True, 'import numpy as np\n'), ((2397, 2434), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * M)', 'dtype': 'np.int64'}), '(shape=2 * M, dtype=np.int64)\n', (2405, 2434), True, 'import numpy as np\n'), ((2447, 2484), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * M)', 'dtype': 'np.int64'}), '(shape=2 * M, dtype=np.int64)\n', (2455, 2484), True, 'import numpy as np\n'), ((864, 886), 'heapq.heappop', 'heapq.heappop', (['p_queue'], {}), '(p_queue)\n', (877, 886), False, 'import heapq\n'), ((1632, 1654), 'heapq.heappop', 'heapq.heappop', (['p_queue'], {}), '(p_queue)\n', (1645, 1654), False, 'import heapq\n'), ((3137, 3175), 'numpy.zeros', 'np.zeros', ([], {'shape': '(M, 3)', 'dtype': 'np.int64'}), '(shape=(M, 3), dtype=np.int64)\n', (3145, 3175), True, 'import numpy as np\n'), ((2148, 2165), 'scipy.sparse.csgraph.dijkstra', 'dijkstra', (['N', 'G', 's'], {}), '(N, G, s)\n', (2156, 2165), False, 'from scipy.sparse.csgraph import dijkstra\n'), ((1104, 1137), 'heapq.heappush', 'heapq.heappush', (['p_queue', '(alt, v)'], {}), '(p_queue, (alt, v))\n', (1118, 1137), False, 'import heapq\n'), ((1872, 1905), 'heapq.heappush', 'heapq.heappush', (['p_queue', '(alt, v)'], {}), '(p_queue, (alt, v))\n', (1886, 1905), False, 'import heapq\n')] |
import bibtexparser as bp
import Levenshtein as le
import fuzzy as fz
import numpy
import scipy.misc as ch
from sklearn import linear_model
from sklearn.cross_validation import train_test_split
# from scipy import misc as ch
# import gmpy2 as ch
import re, csv, os, threading, logging, sys
from datetime import *
from collections import OrderedDict
from bibtex_merger.core import *
from bibtex_merger.extension import *
logger = logging.getLogger(__name__)
__all__ = [ 'BibTeX_Merger', 'MergerError' ]
class BibTeX_Merger(Core):
def __init__(self, out=sys.stdout, importDir='.', numFiles=-1, killLevel='warning', shallowDeepCompDiv=3.4, summedPercentErrorDiv=[0.4, 1.0], learningModel='fminunc', doLearning='remakeModel'):
super(BibTeX_Merger, self).__init__(ext=self.__initExtensions__(), out=out, killLevel=killLevel)
self.__initConstants__()
# Optionally passed flag from command line that specifies how many files to use
# If set to -1 (default) then all files in the data/0_original/ directory will be used
if not isinstance(numFiles, int):
raise ValueError("BibTeX_Merger numFiles argument requires int not ({})".format(type(numFiles)))
self._numFiles = numFiles
if not (isinstance(importDir, str) and os.path.isdir(importDir)):
raise ValueError("BibTeX_Merger importDir argument requires str to a directory not ({} -> {})".format(type(importDir), importDir))
self._importDir = importDir
# The manually decided breakpoints
if not ((isinstance(shallowDeepCompDiv, int) or isinstance(shallowDeepCompDiv, float)) and shallowDeepCompDiv >= 0):
raise ValueError("BibTeX_Merger shallowDeepCompDiv argument must be int|float > 0 not ({} -> {})".format(type(shallowDeepCompDiv), shallowDeepCompDiv))
self._shallowDeepCompDiv = shallowDeepCompDiv
# Lower and Upper breakpoints, everything lower than lower is assumed duplicate,
# everything greater than upper is assumed unique
if not (isinstance(summedPercentErrorDiv, list) and all(isinstance(x, int) or isinstance(x, float) for x in summedPercentErrorDiv) and all(x >= 0 for x in summedPercentErrorDiv)):
raise ValueError("BibTeX_Merger summedPercentErrorDiv argument must be a list of int|float > 0 not ({} -> {})".format(type(summedPercentErrorDiv), summedPercentErrorDiv))
self._summedPercentErrorDiv = summedPercentErrorDiv
# Sample fminunc theta
# self.theta = numpy.array([0.419, 1.036, -3.089, 0.025, -0.387, 0.943, 0.625, -3.326, -2.256, -1.798, 0.536, 1.777, 0.713, -0.026, -1.271, 0.215, 1.444, -12.533, -9.778, -2.590, -1.522])
# Sample glmfit theta
self._theta = numpy.array([200.064, 1.192, -3.152, 33.034, 0.000, 0.985, 80.515, -3.527, -2.330, -1.916, 0.006, 1.863, 0.149, -0.108, -1.397, 87.715, 1.519, -13.372, -10.149, -2.609, -1.637])
# Learning model to use
# available options: fminunc | glmfit
if not (isinstance(learningModel, str) and learningModel in self.learningModels):
raise ValueError("BibTeX_Merger learningModel argument must be {} not ({} -> {})".format("|".join(self.learningModels), type(learningModel), learningModel))
self._learningModel = self.learningModels[learningModel]
# What to do in this instance of code execution
# available options: off | remakeData | remakeModel
if not (isinstance(doLearning, str) and doLearning in self.doLearnings):
raise ValueError("BibTeX_Merger doLearning argument must be {} not ({} -> {})".format("|".join(self.doLearnings), type(doLearning), doLearning))
self._doLearning = self.doLearnings[doLearning]
self.__run__()
return
@property
def OUT(self):
return self._OUT
@property
def numFiles(self):
return self._numFiles
@property
def importDir(self):
return self._importDir
@property
def shallowDeepCompDiv(self):
return self._shallowDeepCompDiv
@property
def summedPercentErrorDiv(self):
return self._summedPercentErrorDiv
@property
def theta(self):
return self._theta
@property
def learningModel(self):
return self._learningModel
@property
def doLearning(self):
return self._doLearning
def __initExtensions__(self):
def bibRead(filename):
with open(filename, 'r') as f:
try:
return bp.load(f, parser=self.parser)
except ValueError:
raise MergerError("This file ({}) has formatting errors and could not be parsed. Skipping.".format(filename))
bibExt = Extension(ext=r'bib', reader=bibRead)
def csvRead(filename):
with open(filename) as f:
return [e for e in csv.reader(f)]
def csvWrite(filename, content):
if content != None:
if isinstance(content, list):
if len(content) > 0:
with open(filename, 'wb') as f:
filecontent = csv.writer(f)
if isinstance(content[0], list):
# Matrix, List of lists
filecontent.writerows(content)
else:
# Vector, List
filecontent.writerow(content)
return
raise MergerError("CSV content is empty, nothing to write")
raise MergerError("CSV content is not of matrix or vector format")
raise MergerError("CSV content is None, write failed")
csvExt = Extension(ext=r'csv', reader=csvRead, writer=csvWrite)
return [bibExt, csvExt]
def __initConstants__(self):
self.doLearnings = ['off', 'remakeData', 'remakeModel']
self.doLearnings = dict((v,k) for k, v in enumerate(self.doLearnings))
self.learningModels = ['fminunc', 'glmfit']
self.learningModels = dict((v,k) for k, v in enumerate(self.learningModels))
self.parser = bp.bparser.BibTexParser()
self.parser.customization = self.__customizations__
self.id = "ID"
self.author = "author"
self.title = "title"
self.key = "key"
self.label = "label"
self.mapToUnderscore = ''.join(chr(c) if chr(c).isupper() or chr(c).islower() or chr(c).isdigit() else '_' for c in range(256))
# Static vars
self.entry_types = { "STRING": {
},
"PREAMBLE": {
},
"ARTICLE": { "requires": ["author", "title", "journal", "year"],
"optional": ["volume", "number", "pages", "month", "note", "key"],
},
"BOOK": { "requires": [["author", "editor"], "title", "publisher", "year"],
"optional": ["volume", "series", "address", "edition", "month", "note", "key"],
},
"BOOKLET": { "requires": ["title"],
"optional": ["author", "howpublished", "address", "month", "year", "note", "key"],
},
"CONFERENCE": { "requires": ["author", "title", "booktitle", "year"],
"optional": ["editor", "pages", "organization", "publisher", "address", "month", "note", "key"],
},
"INPROCEEDINGS": { "requires": ["author", "title", "booktitle", "year"],
"optional": ["editor", "pages", "organization", "publisher", "address", "month", "note", "key"],
},
"INBOOK": { "requires": [["author", "editor"], "title", ["chapter", "pages"], "publisher", "year"],
"optional": ["volume", "series", "address", "edition", "month", "note", "key"],
},
"INCOLLECTION": { "requires": ["author", "title", "booktitle", "year"],
"optional": ["editor", "pages", "organization", "publisher", "address", "month", "note", "key"],
},
"MANUAL": { "requires": ["title"],
"optional": ["author", "organization", "address", "edition", "month", "year", "note", "key"],
},
"MASTERSTHESIS": { "requires": ["author", "title", "school", "year"],
"optional": ["address", "month", "note", "key"],
},
"MISC": { "requires": [],
"optional": ["author", "title", "howpublished", "month", "year", "note", "key"],
},
"PHDTHESIS": { "requires": ["author", "title", "school", "year"],
"optional": ["address", "month", "note", "key"],
},
"PROCEEDINGS": { "requires": ["title", "year"],
"optional": ["editor", "publisher", "organization", "address", "month", "note", "key"],
},
"TECHREPORT": { "requires": ["author", "title", "institution", "year"],
"optional": ["type", "number", "address", "month", "note", "key"],
},
"UNPUBLISHED": { "required": ["author", "title", "note"],
"optional": ["month", "year", "key"],
},
}
self.defaultKeysToDeepComp = set()
for k, d in self.entry_types.iteritems():
for k2, l in d.iteritems():
for i in l:
if type(i) is type(str()):
self.defaultKeysToDeepComp = self.defaultKeysToDeepComp.union(set([i]))
else:
self.defaultKeysToDeepComp = self.defaultKeysToDeepComp.union(set(i))
self.defaultKeysToDeepComp.remove(self.author)
self.defaultKeysToDeepComp.remove(self.key)
# self.defaultKeysToDeepComp.remove(self.id)
self.defaultKeysToDeepCompSorted = list(self.defaultKeysToDeepComp)
self.defaultKeysToDeepCompSorted.sort()
self.originalDir = "../data/0_original"
self.learningDir = "../data/2_prelearning"
self.installDir = "~/.bibtex_merger"
self.configFile = ".pref.cfg"
# self.reRemComment = re.compile(r'@COMMENT.*', re.IGNORECASE)
# self.reSplit=re.compile(r'(?=(?:' + '|'.join(["@" + et for et in self.entry_types.keys()]) + r'))',re.IGNORECASE)
return
def __run__(self):
self.Import()
self.Bagging()
self.ShallowCompare()
# if self.doLearning != self.doLearnings['off']:
# self.Learner()
return
def __customizations__(self, record):
# This is a formating specification of the BibtexParser package
# see https://bibtexparser.readthedocs.org/en/latest/bibtexparser.html#module-customization
# record = bp.customization.homogenize_latex_encoding(record)
record = bp.customization.type(record)
record = bp.customization.author(record)
# record = bp.customization.editor(record)
# record = bp.customization.journal(record)
# record = bp.customization.keyword(record)
# record = bp.customization.page_double_hyphen(record)
return record
def Import(self):
self.__title__("Import")
importDirFiles = [f for f in os.listdir(self.importDir) if os.path.isfile(os.path.join(self.importDir, f)) and os.path.splitext(f)[1] == ".bib"]
maxNumFiles = len(importDirFiles)
if maxNumFiles == 0:
raise MergerError("No files were imported. Need at least one.")
# determine whether we are only reading in the first subset or whether we are reading in the maximum
self._numFiles = maxNumFiles if self.numFiles < 0 else min(self.numFiles, maxNumFiles)
# self.db = None
self.db = bp.bibdatabase.BibDatabase()
lengths = []
self.tags = []
for filename in importDirFiles[0:self.numFiles]:
self.__subtitle__("Importing '{}'".format(filename))
# pull out the filename w/o the extension
# map any non-alpha-numeric to '_'
baseFilename = filename[:filename.index(".")]
baseFilename = baseFilename.translate(self.mapToUnderscore)
# find a unique tag (this should rarely occur)
while baseFilename in self.tags:
baseFilename += "_"
# import the specified file and parse
temp_db = self.__read__("{}/{}".format(self.importDir, filename))
# append all ids in the entries dictionary with this file's unique tag
# s.t. all resulting ids are entirely unique w/r to all of the imported files
for i, e in enumerate(temp_db.entries):
# is virtually impossible since we are reading in via the bib extension module
# if self.id not in e.keys():
# raise MergerError("'{}' key not in this entry ({})".format(self.id, e.keys()))
e[self.id] = "{}_{}".format(baseFilename, e[self.id])
# append all ids in the string dictionary with this file's unique tag
# s.t. all resulting ids are entirely unique w/r to all of the imported files
# temp_strings = OrderedDict()
# for k, v in temp_db.strings.iteritems():
# newID = "{}_{}".format(baseFilename, k)
# temp_strings[newID] = v
# temp_db.strings = temp_strings
self.tags += [baseFilename]
# merge the following from the current temp_dic to the master self.db
self.db.entries += temp_db.entries
# self.db.comments += temp_db.comments
# self.db.preambles += temp_db.preambles
# self.db.strings.update(temp_db.strings)
lengths.append(len(temp_db.entries))
return
def Bagging(self):
self.__title__("Bagging")
# Bagging based on initials
# pull out author entires
self.static_authors = [e for e in self.db.entries if self.author in e.keys() and not (e[self.author][-1][-1] == "others")]
self.etal_authors = [e for e in self.db.entries if self.author in e.keys() and (e[self.author][-1][-1] == "others")]
# pull out non-author entries
self.no_authors = [e for e in self.db.entries if self.author not in e.keys()]
self.__info__("""initial
static_authors: {:10d}
etal_authors: {:10d}
no_author: {:10d} (these are ignored)
""".format(
len(self.static_authors),
len(self.etal_authors),
len(self.no_authors)))
best_case = len(self.static_authors) + len(self.etal_authors)
worst_case = best_case
self.__info__("""by none split costs
| # entries | # comparisons
best_case | {:10d} | {:10d}
worst_case | {:10d} | {:10d}
""".format(
best_case, int(ch.comb(best_case, 2)),
worst_case, int(ch.comb(worst_case, 2))))
self.bag = {}
# bag entries by number of authors
for e in self.static_authors:
numAuthors = len(e[self.author])
if numAuthors not in self.bag:
self.bag[numAuthors] = [e]
else:
self.bag[numAuthors].append(e)
for e in self.etal_authors:
numAuthors = len(e[self.author])
for k in self.bag.keys():
if numAuthors <= k:
self.bag[k].append(e)
if numAuthors not in self.bag:
self.bag[numAuthors] = [e]
else:
self.bag[numAuthors].append(e)
best_case = min([len(e) for i, e in self.bag.iteritems()])
worst_case = max([len(e) for i, e in self.bag.iteritems()])
self.__info__("""by # authors split costs
| # entries | # comparisons
best_case | {:10d} | {:10d}
worst_case | {:10d} | {:10d}
""".format(
best_case, int(ch.comb(best_case, 2)),
worst_case, int(ch.comb(worst_case, 2))))
# bag entries by alpha keys
for num_authors, entries in dict(self.bag).iteritems():
alpha_bag = {}
for e in entries:
# generate the alpha key for this entry
alpha_key = ""
for a in e[self.author]:
# alpha key includes initials of all authors EXCEPT "others"
if a[-1] != "others":
alpha_key += a[0][0].lower()
alpha_key += a[1][0].lower()
# add this entry to all other alpha keys in this alpha bag that has matching alpha keys
# this forces all non "others" alpha keys to only be added once
# all "others" alpha keys can be included multiple times
added = False
for key in [key for key in alpha_bag.keys() if key.find(alpha_key) == 0]:
added = True
alpha_bag[key].append(e)
# if a key was not added, this means this is the first instance of that key
if not added:
assert alpha_key not in alpha_bag
alpha_bag[alpha_key] = [e]
self.bag[num_authors] = alpha_bag
best_case = min([min([len(e) for a, e in d.iteritems()]) for i, d in self.bag.iteritems()])
worst_case = max([max([len(e) for a, e in d.iteritems()]) for i, d in self.bag.iteritems()])
self.__info__("""by # authors and alpha key split costs
| # entries | # comparisons
best_case | {:10d} | {:10d}
worst_case | {:10d} | {:10d}
""".format(
best_case, int(ch.comb(best_case, 2)),
worst_case, int(ch.comb(worst_case, 2))))
return
def ShallowCompare(self):
self.__title__("Shallow Compare")
lenSoundex = 10
soundex = fz.Soundex(lenSoundex)
combDist = {}
numComp = {}
self.deepComp = {}
self.learning = []
# self.learningKeys = set()
self.allPredictions = []
self.allPredictionsClass = []
self.shallowCompares = 0
self.deepCompares = 0
self.maxCompares = sum([sum([ch.comb(len(e), 2) for a, e in d.iteritems() if len(e) > 1]) for i, d in self.bag.iteritems()])
for lenID, lenDic in self.bag.iteritems():
numComp[lenID] = {}
for alphaID, entries in lenDic.iteritems():
numComp[lenID][alphaID] = 0
for e1 in xrange(0, len(entries)):
entry1 = entries[e1]
# authors1 = [l + ", " + f for f, l in entry1[self.author]]
authors1 = entry1[self.author]
lenAuthors1 = len(authors1)
if "others" in authors1[-1]:
lenAuthors1 -= 1
for e2 in xrange(e1 + 1, len(entries)):
self.shallowCompares += 1
try:
entry2 = entries[e2]
# authors2 = [l + ", " + f for f, l in entry2[self.author]]
authors2 = entry2[self.author]
lenAuthors2 = len(authors2)
if "others" in authors2[-1]:
lenAuthors2 -= 1
numCompare = min(lenAuthors1, lenAuthors2)
editDistance = 0
phonDistance = 0
for compareIndex in xrange(0, numCompare):
a1 = authors1[compareIndex]
a2 = authors2[compareIndex]
f1 = a1[0]
l1 = a1[1]
f2 = a2[0]
l2 = a2[1]
if f1[1] == '.' or f2[1] == '.':
# one of the authors' first name is an abbreviation
# hence a perfect match
editDistance += 1
phonDistance += 1
else:
# neither first name is an abbreviation
# test for similarity
editDistance += le.jaro_winkler(f1, f2)
phonDistance += 1.0 - (le.distance(soundex(f1), soundex(f2)) / float(lenSoundex))
editDistance += le.jaro_winkler(l1, l2)
phonDistance += 1.0 - (le.distance(soundex(l1), soundex(l2)) / float(lenSoundex))
editDistance /= numCompare
phonDistance /= numCompare
if (editDistance * phonDistance) >= self.shallowDeepCompDiv:
# self.OUT.write("COMPARE", editDistance, phonDistance, editDistance * phonDistance, authors1, authors2)
self.DeepCompare(entry1, entry2)
numComp[lenID][alphaID] += 1
combDist[editDistance * phonDistance] = [authors1, authors2]
except UnicodeEncodeError:
self.__warn__(MergerError("unable to properly analyze these two entries ({}, {})".format(entry1[self.id], entry2[self.id])))
# if self.killLevel:
# self.OUT.write("ERROR: skipping")
best_case = min([min([n for a, n in d.iteritems()]) for l, d in numComp.iteritems()])
worst_case = max([max([n for a, n in d.iteritems()]) for l, d in numComp.iteritems()])
self.__info__("""shallow & deep compare complete
| # entries
best_case | {:10d}
worst_case | {:10d}
""".format(
best_case,
worst_case))
self.__info__("""predictions
# duplicate matches: {}
# of deep comparisons: {}
# of shallow comparisons: {}
max # comparisons: {}
""".format(
sum(self.allPredictionsClass),
self.deepCompares,
self.shallowCompares,
self.maxCompares))
return
def DeepCompare(self, entry1, entry2):
# self.__title__("deepCompare")
self.deepCompares += 1
try:
keys1 = entry1.keys()
keys2 = entry2.keys()
keysToComp = set(keys1).intersection(set(keys2))
keysToComp = keysToComp.intersection(self.defaultKeysToDeepComp)
l = {}
for k in keysToComp:
v1 = entry1[k]
v2 = entry2[k]
if v1 and v2:
l[k] = le.distance(v1, v2) / float(max(len(v1), len(v2)))
if self.doLearning == self.doLearnings['remakeData']:
sv = sum(l.values())
if sv <= self.summedPercentErrorDiv[0]:
# assume identical due to very low summed percent error
l[self.label] = 1
elif self.summedPercentErrorDiv[1] <= sv:
# assume unique due to very high summed percent error
l[self.label] = 0
else:
# prompt user to decide this gray area
label = None
while not label:
os.system('clear')
# progress bar equivalent, print out which comparison we are on
self.OUT.write("{}/{}".format(self.shallowCompares, self.maxCompares))
# print out the summed percent error
self.OUT.write("prediction: {} (0.4 means low error, 1 means high error)".format(sv))
# display all of the shared fields to manually compare
# CONSIDER: maybe also outputting non-shared fields is also useful???
for k in keysToComp:
self.OUT.write("e1: {}\ne2: {}\n".format(entry1[k], entry2[k]))
label = raw_input("Are the entries the same? [y, n] ")
label = str(label).lower()
if label == "y":
l[self.label] = 1
elif label == "n":
l[self.label] = 0
else:
label = None
self.learning.append(l)
elif self.doLearning == self.doLearnings['off']:
data = [1]
for k in self.defaultKeysToDeepCompSorted:
if k in l:
data.append(l[k])
else:
data.append(-1)
prediction = sum(numpy.array(data) * self.theta)
prediction = 1/(numpy.exp(-prediction) + 1)
self.allPredictions.append(prediction)
if prediction > 0.5:
self.allPredictionsClass.append(1)
self.OUT.write("duplicates", entry1[self.id], entry2[self.id])
else:
self.allPredictionsClass.append(0)
except KeyError:
if self.killLevel:
self.OUT.write("ERROR: skipping")
return
def Learner(self):
self.__title__("Learner")
self.OUT.write("defaultKeysToDeepCompSorted:", self.defaultKeysToDeepCompSorted)
self.OUT.write("# defaultKeysToDeepCompSorted:", len(self.defaultKeysToDeepCompSorted))
dataset = []
if self.doLearning == self.doLearnings['remakeData']:
for e in self.learning:
new_e = [e[self.label]]
for k in self.defaultKeysToDeepCompSorted:
if k in e:
new_e.append(e[k])
else:
new_e.append(-1)
dataset.append(new_e)
self.__write__(self.learningDir, "deepComparisonLearner.csv", array=dataset)
else:
dataset = self.__read__(self.learningDir, "deepComparisonLearner.csv")
dataset = numpy.array(dataset, dtype=numpy.float)
X = numpy.c_[numpy.ones(len(dataset)), dataset[:,1:]]
y = dataset[:, 0]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=numpy.random.RandomState(2))
# model = linear_model.Lasso(alpha=0.0000001, max_iter=2000)
model = linear_model.LogisticRegression()
model.fit(X_train, y_train)
y_train_prediction = model.predict(X_train)
y_test_prediction = model.predict(X_test)
train_accuracy = float(numpy.mean(y_train_prediction == y_train) * 100)
test_accuracy = float(numpy.mean(y_test_prediction == y_test) * 100)
#self.OUT.write(model.coef_)
# if self.learningModel == self.learningModels['fminunc']:
# os.system("matlab -nodesktop -nodisplay -nosplash -r {}".format(
# "\"cd('fminunc');optimize_fminunc();exit();\""))
# elif self.learningModel == self.learningModels['glmfit']:
# os.system("matlab -nodesktop -nodisplay -nosplash -r {}".format(
# "\"cd('glmfit');optimize_glmfit();exit();\""))
# else:
# raise ValueError("ERROR: bad model specified")
return
class MergerError(CoreError):
"""Exception raised for Merger object errors.
Attributes:
msg -- the message addressing the error thrown
"""
def __init__(self, msg=None):
super(MergerError, self).__init__(msg)
if __name__ == '__main__':
# try:
if len(sys.argv) == 2:
s = str(sys.argv[1])
BibTeX_Merger(importDir=s)
elif len(sys.argv) == 3:
s = str(sys.argv[1])
n = int(sys.argv[2])
BibTeX_Merger(importDir=s, numFiles=n)
else:
BibTeX_Merger()
# except UserError:
# PrintException("UserError")
# except ProgramError:
# PrintException("ProgramError")
# except BibTeXParserError:
# PrintException("BibTeXParserError") | [
"logging.getLogger",
"Levenshtein.jaro_winkler",
"numpy.array",
"numpy.random.RandomState",
"fuzzy.Soundex",
"numpy.mean",
"os.listdir",
"numpy.exp",
"os.path.isdir",
"csv.reader",
"scipy.misc.comb",
"csv.writer",
"os.path.splitext",
"bibtexparser.bparser.BibTexParser",
"Levenshtein.dist... | [((436, 463), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (453, 463), False, 'import re, csv, os, threading, logging, sys\n'), ((2596, 2780), 'numpy.array', 'numpy.array', (['[200.064, 1.192, -3.152, 33.034, 0.0, 0.985, 80.515, -3.527, -2.33, -1.916,\n 0.006, 1.863, 0.149, -0.108, -1.397, 87.715, 1.519, -13.372, -10.149, -\n 2.609, -1.637]'], {}), '([200.064, 1.192, -3.152, 33.034, 0.0, 0.985, 80.515, -3.527, -\n 2.33, -1.916, 0.006, 1.863, 0.149, -0.108, -1.397, 87.715, 1.519, -\n 13.372, -10.149, -2.609, -1.637])\n', (2607, 2780), False, 'import numpy\n'), ((5470, 5495), 'bibtexparser.bparser.BibTexParser', 'bp.bparser.BibTexParser', ([], {}), '()\n', (5493, 5495), True, 'import bibtexparser as bp\n'), ((9821, 9850), 'bibtexparser.customization.type', 'bp.customization.type', (['record'], {}), '(record)\n', (9842, 9850), True, 'import bibtexparser as bp\n'), ((9862, 9893), 'bibtexparser.customization.author', 'bp.customization.author', (['record'], {}), '(record)\n', (9885, 9893), True, 'import bibtexparser as bp\n'), ((10654, 10682), 'bibtexparser.bibdatabase.BibDatabase', 'bp.bibdatabase.BibDatabase', ([], {}), '()\n', (10680, 10682), True, 'import bibtexparser as bp\n'), ((15745, 15767), 'fuzzy.Soundex', 'fz.Soundex', (['lenSoundex'], {}), '(lenSoundex)\n', (15755, 15767), True, 'import fuzzy as fz\n'), ((21890, 21929), 'numpy.array', 'numpy.array', (['dataset'], {'dtype': 'numpy.float'}), '(dataset, dtype=numpy.float)\n', (21901, 21929), False, 'import numpy\n'), ((22201, 22234), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (22232, 22234), False, 'from sklearn import linear_model\n'), ((1237, 1261), 'os.path.isdir', 'os.path.isdir', (['importDir'], {}), '(importDir)\n', (1250, 1261), False, 'import re, csv, os, threading, logging, sys\n'), ((10186, 10212), 'os.listdir', 'os.listdir', (['self.importDir'], {}), '(self.importDir)\n', (10196, 10212), False, 'import re, csv, os, threading, logging, sys\n'), ((22098, 22125), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(2)'], {}), '(2)\n', (22122, 22125), False, 'import numpy\n'), ((22382, 22423), 'numpy.mean', 'numpy.mean', (['(y_train_prediction == y_train)'], {}), '(y_train_prediction == y_train)\n', (22392, 22423), False, 'import numpy\n'), ((22456, 22495), 'numpy.mean', 'numpy.mean', (['(y_test_prediction == y_test)'], {}), '(y_test_prediction == y_test)\n', (22466, 22495), False, 'import numpy\n'), ((4166, 4196), 'bibtexparser.load', 'bp.load', (['f'], {'parser': 'self.parser'}), '(f, parser=self.parser)\n', (4173, 4196), True, 'import bibtexparser as bp\n'), ((13323, 13344), 'scipy.misc.comb', 'ch.comb', (['best_case', '(2)'], {}), '(best_case, 2)\n', (13330, 13344), True, 'import scipy.misc as ch\n'), ((13364, 13386), 'scipy.misc.comb', 'ch.comb', (['worst_case', '(2)'], {}), '(worst_case, 2)\n', (13371, 13386), True, 'import scipy.misc as ch\n'), ((14175, 14196), 'scipy.misc.comb', 'ch.comb', (['best_case', '(2)'], {}), '(best_case, 2)\n', (14182, 14196), True, 'import scipy.misc as ch\n'), ((14216, 14238), 'scipy.misc.comb', 'ch.comb', (['worst_case', '(2)'], {}), '(worst_case, 2)\n', (14223, 14238), True, 'import scipy.misc as ch\n'), ((15574, 15595), 'scipy.misc.comb', 'ch.comb', (['best_case', '(2)'], {}), '(best_case, 2)\n', (15581, 15595), True, 'import scipy.misc as ch\n'), ((15615, 15637), 'scipy.misc.comb', 'ch.comb', (['worst_case', '(2)'], {}), '(worst_case, 2)\n', (15622, 15637), True, 'import scipy.misc as ch\n'), ((4467, 4480), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4477, 4480), False, 'import re, csv, os, threading, logging, sys\n'), ((10231, 10262), 'os.path.join', 'os.path.join', (['self.importDir', 'f'], {}), '(self.importDir, f)\n', (10243, 10262), False, 'import re, csv, os, threading, logging, sys\n'), ((19327, 19346), 'Levenshtein.distance', 'le.distance', (['v1', 'v2'], {}), '(v1, v2)\n', (19338, 19346), True, 'import Levenshtein as le\n'), ((4660, 4673), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4670, 4673), False, 'import re, csv, os, threading, logging, sys\n'), ((10268, 10287), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (10284, 10287), False, 'import re, csv, os, threading, logging, sys\n'), ((19820, 19838), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (19829, 19838), False, 'import re, csv, os, threading, logging, sys\n'), ((20818, 20835), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (20829, 20835), False, 'import numpy\n'), ((20870, 20892), 'numpy.exp', 'numpy.exp', (['(-prediction)'], {}), '(-prediction)\n', (20879, 20892), False, 'import numpy\n'), ((17593, 17616), 'Levenshtein.jaro_winkler', 'le.jaro_winkler', (['l1', 'l2'], {}), '(l1, l2)\n', (17608, 17616), True, 'import Levenshtein as le\n'), ((17453, 17476), 'Levenshtein.jaro_winkler', 'le.jaro_winkler', (['f1', 'f2'], {}), '(f1, f2)\n', (17468, 17476), True, 'import Levenshtein as le\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon July 9 22:20:12 2018
@author: Adam
"""
import os
import sqlite3
import numpy as np
import pandas as pd
from datetime import datetime
from emonitor.core import TABLE, DATA_DIRE
from emonitor.tools import db_path, db_init, db_check, db_describe, db_insert
from emonitor.data import EmonitorData
from emonitor import history
# constants
COLUMNS = ('A', 'B', 'C')
TCOL = 'TIMESTAMP'
NAME = '__pytest__.db'
DB = db_path(NAME)
if os.path.isfile(DB):
os.remove(DB)
CONN = sqlite3.connect(DB)
DATA = [('2016-12-09 09:08:13', 1, 34.8, 3),
('2018-12-10 09:08:13', 2, 12, 3),
('2018-12-10 09:10:13', 3, 6.7, 3)]
def test_datadire_exists():
assert os.path.exists(DATA_DIRE)
def test_new_db():
db_init(CONN, TABLE, COLUMNS)
db_check(CONN, TABLE, COLUMNS)
def test_desc():
DESC = ("[(0, 'TIMESTAMP', 'timestamp', 1, 'CURRENT_TIMESTAMP', 0),"
" (1, 'A', 'DOUBLE', 0, 'NULL', 0),"
" (2, 'B', 'DOUBLE', 0, 'NULL', 0),"
" (3, 'C', 'DOUBLE', 0, 'NULL', 0)]")
assert str(db_describe(CONN, TABLE)) == DESC
def test_insert():
cols = (TCOL,) + COLUMNS
for d in DATA:
db_insert(CONN, TABLE, cols, d)
def test_history():
start = datetime(2015, 12, 9, 9, 8, 13)
end = datetime(2018, 12, 11, 9, 8, 13)
df = history(CONN, start, end)
vals = np.array([row[1:] for row in DATA])
assert np.array_equal(df.values, vals)
def test_clean():
CONN.close()
os.remove(DB)
def test_emonitordata():
name = NAME
data = EmonitorData(DATA_DIRE)
data.create(name, columns=[1, 2, 3], quiet=True)
fils = data.show()
assert isinstance(fils, list)
assert name in fils
data.destroy(name, force=True)
fils = data.show()
assert name not in fils | [
"datetime.datetime",
"os.path.exists",
"sqlite3.connect",
"emonitor.tools.db_insert",
"emonitor.tools.db_describe",
"os.path.isfile",
"numpy.array",
"numpy.array_equal",
"emonitor.history",
"emonitor.data.EmonitorData",
"emonitor.tools.db_path",
"emonitor.tools.db_init",
"emonitor.tools.db_c... | [((452, 465), 'emonitor.tools.db_path', 'db_path', (['NAME'], {}), '(NAME)\n', (459, 465), False, 'from emonitor.tools import db_path, db_init, db_check, db_describe, db_insert\n'), ((469, 487), 'os.path.isfile', 'os.path.isfile', (['DB'], {}), '(DB)\n', (483, 487), False, 'import os\n'), ((514, 533), 'sqlite3.connect', 'sqlite3.connect', (['DB'], {}), '(DB)\n', (529, 533), False, 'import sqlite3\n'), ((493, 506), 'os.remove', 'os.remove', (['DB'], {}), '(DB)\n', (502, 506), False, 'import os\n'), ((706, 731), 'os.path.exists', 'os.path.exists', (['DATA_DIRE'], {}), '(DATA_DIRE)\n', (720, 731), False, 'import os\n'), ((756, 785), 'emonitor.tools.db_init', 'db_init', (['CONN', 'TABLE', 'COLUMNS'], {}), '(CONN, TABLE, COLUMNS)\n', (763, 785), False, 'from emonitor.tools import db_path, db_init, db_check, db_describe, db_insert\n'), ((790, 820), 'emonitor.tools.db_check', 'db_check', (['CONN', 'TABLE', 'COLUMNS'], {}), '(CONN, TABLE, COLUMNS)\n', (798, 820), False, 'from emonitor.tools import db_path, db_init, db_check, db_describe, db_insert\n'), ((1250, 1281), 'datetime.datetime', 'datetime', (['(2015)', '(12)', '(9)', '(9)', '(8)', '(13)'], {}), '(2015, 12, 9, 9, 8, 13)\n', (1258, 1281), False, 'from datetime import datetime\n'), ((1292, 1324), 'datetime.datetime', 'datetime', (['(2018)', '(12)', '(11)', '(9)', '(8)', '(13)'], {}), '(2018, 12, 11, 9, 8, 13)\n', (1300, 1324), False, 'from datetime import datetime\n'), ((1334, 1359), 'emonitor.history', 'history', (['CONN', 'start', 'end'], {}), '(CONN, start, end)\n', (1341, 1359), False, 'from emonitor import history\n'), ((1371, 1406), 'numpy.array', 'np.array', (['[row[1:] for row in DATA]'], {}), '([row[1:] for row in DATA])\n', (1379, 1406), True, 'import numpy as np\n'), ((1418, 1449), 'numpy.array_equal', 'np.array_equal', (['df.values', 'vals'], {}), '(df.values, vals)\n', (1432, 1449), True, 'import numpy as np\n'), ((1490, 1503), 'os.remove', 'os.remove', (['DB'], {}), '(DB)\n', (1499, 1503), False, 'import os\n'), ((1557, 1580), 'emonitor.data.EmonitorData', 'EmonitorData', (['DATA_DIRE'], {}), '(DATA_DIRE)\n', (1569, 1580), False, 'from emonitor.data import EmonitorData\n'), ((1185, 1216), 'emonitor.tools.db_insert', 'db_insert', (['CONN', 'TABLE', 'cols', 'd'], {}), '(CONN, TABLE, cols, d)\n', (1194, 1216), False, 'from emonitor.tools import db_path, db_init, db_check, db_describe, db_insert\n'), ((1075, 1099), 'emonitor.tools.db_describe', 'db_describe', (['CONN', 'TABLE'], {}), '(CONN, TABLE)\n', (1086, 1099), False, 'from emonitor.tools import db_path, db_init, db_check, db_describe, db_insert\n')] |
#
# Copyright (c) 2016-2021 <NAME>
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
from sklearn.utils import arrayfuncs
from sklearn import datasets
class LarsLasso:
def __init__(self, alpha: float = 1.0):
self.alpha = alpha
def predict(self, X: np.ndarray):
y = np.dot(X, self.coef_) + self.intercept_
return y
def fit(self, X: np.ndarray, y: np.ndarray):
n, p = X.shape
self.intercept_ = y.mean()
y = y - self.intercept_
self.coef_ = np.zeros(p)
active_set = []
inactive_set = list(range(p))
beta = np.zeros(p)
mu = np.zeros(n)
change_sign_flag = False
k = 0
while k != min(p, n - 1):
c = np.dot(X.T, y - mu)
# print(np.sign(c[active_set]) * np.sign(beta[active_set]))
if change_sign_flag:
# remove j from the calculation of the next equiangular direction.
pass
else:
j = inactive_set[np.argmax(np.abs(c[inactive_set]))]
# print(f"add {j=}")
active_set.append(j)
inactive_set.remove(j)
C = np.amax(np.abs(c))
s = np.sign(c[active_set]).reshape((1, len(active_set)))
XA = np.copy(X[:, active_set] * s)
GA = XA.T @ XA
GA_inv = np.linalg.inv(GA)
one = np.ones((len(active_set), 1))
AA = (1. / np.sqrt(one.T @ GA_inv @ one)).flatten()[0]
w = AA * GA_inv @ one
u = XA @ w
a = X.T @ u
d = s.T * w
if k == p - 1:
gamma = C / AA
else:
gamma_candidates = np.zeros((len(inactive_set), 2))
for _j, jj in enumerate(inactive_set):
gamma_candidates[_j] = [(C - c[jj]) / (AA - a[jj]), (C + c[jj]) / (AA + a[jj])]
gamma = arrayfuncs.min_pos(gamma_candidates)
gamma_candidates_tilde = - beta[active_set] / d.flatten()
gamma_tilde = arrayfuncs.min_pos(gamma_candidates_tilde)
change_sign_flag = False
if gamma_tilde < gamma:
gamma = gamma_tilde
j = active_set[list(gamma_candidates_tilde).index(gamma)]
# print(f"remove {j=}")
change_sign_flag = True
new_beta = beta[active_set] + gamma * d.flatten()
idx = 0 if j != 0 else 1
tmp_beta = np.zeros(p)
tmp_beta[active_set] = new_beta.copy()
lambda_ = np.abs(X[:, active_set[idx]] @ (y - X @ tmp_beta)) * 2 / n
if lambda_ < self.alpha:
prev_lambda_ = np.abs(X[:, active_set[idx]] @ (y - X @ self.coef_)) * 2 / n
if len(active_set) < 2 and prev_lambda_ < self.alpha:
break
# print(prev_lambda_, lambda_, self.alpha)
modified_gamma = 0 + (gamma - 0) * (self.alpha - prev_lambda_) / (lambda_ - prev_lambda_)
beta[active_set] += modified_gamma * d.flatten()
mu = mu + modified_gamma * u.flatten()
self.coef_ = beta.copy()
# print(np.abs(X[:, active_set[idx]] @ (y - X @ self.coef_)) * 2 / n)
break
mu = mu + gamma * u.flatten()
beta[active_set] = new_beta.copy()
self.coef_ = beta.copy()
# print(self.coef_)
if change_sign_flag:
active_set.remove(j)
inactive_set.append(j)
k = len(active_set)
return self
if __name__ == "__main__":
dataset = datasets.load_boston()
X = dataset.data
y = dataset.target
X = (X - X.mean(axis=0, keepdims=True)) / X.std(axis=0, keepdims=True)
model = LarsLasso(alpha=1)
model.fit(X, y)
print(model.intercept_)
print(model.coef_)
| [
"numpy.copy",
"numpy.abs",
"numpy.sqrt",
"sklearn.datasets.load_boston",
"numpy.dot",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.sign",
"sklearn.utils.arrayfuncs.min_pos"
] | [((3781, 3803), 'sklearn.datasets.load_boston', 'datasets.load_boston', ([], {}), '()\n', (3801, 3803), False, 'from sklearn import datasets\n'), ((628, 639), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (636, 639), True, 'import numpy as np\n'), ((718, 729), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (726, 729), True, 'import numpy as np\n'), ((743, 754), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (751, 754), True, 'import numpy as np\n'), ((409, 430), 'numpy.dot', 'np.dot', (['X', 'self.coef_'], {}), '(X, self.coef_)\n', (415, 430), True, 'import numpy as np\n'), ((854, 873), 'numpy.dot', 'np.dot', (['X.T', '(y - mu)'], {}), '(X.T, y - mu)\n', (860, 873), True, 'import numpy as np\n'), ((1405, 1434), 'numpy.copy', 'np.copy', (['(X[:, active_set] * s)'], {}), '(X[:, active_set] * s)\n', (1412, 1434), True, 'import numpy as np\n'), ((1484, 1501), 'numpy.linalg.inv', 'np.linalg.inv', (['GA'], {}), '(GA)\n', (1497, 1501), True, 'import numpy as np\n'), ((2183, 2225), 'sklearn.utils.arrayfuncs.min_pos', 'arrayfuncs.min_pos', (['gamma_candidates_tilde'], {}), '(gamma_candidates_tilde)\n', (2201, 2225), False, 'from sklearn.utils import arrayfuncs\n'), ((2613, 2624), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (2621, 2624), True, 'import numpy as np\n'), ((1308, 1317), 'numpy.abs', 'np.abs', (['c'], {}), '(c)\n', (1314, 1317), True, 'import numpy as np\n'), ((2049, 2085), 'sklearn.utils.arrayfuncs.min_pos', 'arrayfuncs.min_pos', (['gamma_candidates'], {}), '(gamma_candidates)\n', (2067, 2085), False, 'from sklearn.utils import arrayfuncs\n'), ((1335, 1357), 'numpy.sign', 'np.sign', (['c[active_set]'], {}), '(c[active_set])\n', (1342, 1357), True, 'import numpy as np\n'), ((2698, 2748), 'numpy.abs', 'np.abs', (['(X[:, active_set[idx]] @ (y - X @ tmp_beta))'], {}), '(X[:, active_set[idx]] @ (y - X @ tmp_beta))\n', (2704, 2748), True, 'import numpy as np\n'), ((1145, 1168), 'numpy.abs', 'np.abs', (['c[inactive_set]'], {}), '(c[inactive_set])\n', (1151, 1168), True, 'import numpy as np\n'), ((2826, 2878), 'numpy.abs', 'np.abs', (['(X[:, active_set[idx]] @ (y - X @ self.coef_))'], {}), '(X[:, active_set[idx]] @ (y - X @ self.coef_))\n', (2832, 2878), True, 'import numpy as np\n'), ((1574, 1603), 'numpy.sqrt', 'np.sqrt', (['(one.T @ GA_inv @ one)'], {}), '(one.T @ GA_inv @ one)\n', (1581, 1603), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# <img style="float: left;padding: 1.3em" src="https://indico.in2p3.fr/event/18313/logo-786578160.png">
#
# # Gravitational Wave Open Data Workshop #3
#
#
# ## Tutorial 2.1 PyCBC Tutorial, An introduction to matched-filtering
#
# We will be using the [PyCBC](http://github.com/ligo-cbc/pycbc) library, which is used to study gravitational-wave data, find astrophysical sources due to compact binary mergers, and study their parameters. These are some of the same tools that the LIGO and Virgo collaborations use to find gravitational waves in LIGO/Virgo data
#
# In this tutorial we will walk through how find a specific signal in LIGO data. We present matched filtering as a cross-correlation, in both the time domain and the frequency domain. In the next tutorial (2.2), we use the method as encoded in PyCBC, which is optimal in the case of Gaussian noise and a known signal model. In reality our noise is not entirely Gaussian, and in practice we use a variety of techniques to separate signals from noise in addition to the use of the matched filter.
#
# [Click this link to view this tutorial in Google Colaboratory](https://colab.research.google.com/github/gw-odw/odw-2020/blob/master/Day_2/Tuto_2.1_Matched_filtering_introduction.ipynb)
#
# Additional [examples](http://pycbc.org/pycbc/latest/html/#library-examples-and-interactive-tutorials) and module level documentation are [here](http://pycbc.org/pycbc/latest/html/py-modindex.html)
# ## Installation (un-comment and execute only if running on a cloud platform!)
# In[1]:
# -- Use the following for Google Colab
#! pip install -q 'lalsuite==6.66' 'PyCBC==1.15.3'
# **Important:** With Google Colab, you may need to restart the runtime after running the cell above.
# ### Matched-filtering: Finding well modelled signals in Gaussian noise
#
# Matched filtering can be shown to be the optimal method for "detecting" signals---when the signal waveform is known---in Gaussian noise. We'll explore those assumptions a little later, but for now let's demonstrate how this works.
#
# Let's assume you have a stretch of noise, white noise to start:
# In[162]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy
import pylab
# specify the sample rate.
# LIGO raw data is sampled at 16384 Hz (=2^14 samples/second).
# It captures signal frequency content up to f_Nyquist = 8192 Hz.
# Here, we will make the computation faster by sampling at a lower rate.
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
# And then let's add a gravitational wave signal to some random part of this data.
# In[163]:
from pycbc.waveform import get_td_waveform
# the "approximant" (jargon for parameterized waveform family).
# IMRPhenomD is defined in the frequency domain, but we'll get it in the time domain (td).
# It runs fast, but it doesn't include effects such as non-aligned component spin, or higher order modes.
apx = 'IMRPhenomD'
# You can specify many parameters,
# https://pycbc.org/pycbc/latest/html/pycbc.waveform.html?highlight=get_td_waveform#pycbc.waveform.waveform.get_td_waveform
# but here, we'll use defaults for everything except the masses.
# It returns both hplus and hcross, but we'll only use hplus for now.
hp1, _ = get_td_waveform(approximant=apx,
mass1=10,
mass2=10,
delta_t=1.0/sample_rate,
f_lower=25)
# The amplitude of gravitational-wave signals is normally of order 1E-20. To demonstrate our method
# on white noise with amplitude O(1) we normalize our signal so the cross-correlation of the signal with
# itself will give a value of 1. In this case we can interpret the cross-correlation of the signal with white
# noise as a signal-to-noise ratio.
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# note that in this figure, the waveform amplitude is of order 1.
# The duration (for frequency above f_lower=25 Hz) is only 3 or 4 seconds long.
# The waveform is "tapered": slowly ramped up from zero to full strength, over the first second or so.
# It is zero-padded at earlier times.
pylab.figure()
pylab.title("The waveform hp1")
pylab.plot(hp1.sample_times, hp1)
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
pylab.figure()
pylab.title("Looks like random noise, right?")
pylab.plot(hp1.sample_times, data[waveform_start:waveform_start+len(hp1)])
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
pylab.figure()
pylab.title("Signal in the data")
pylab.plot(hp1.sample_times, data[waveform_start:waveform_start+len(hp1)])
pylab.plot(hp1.sample_times, 10 * hp1)
pylab.xlabel('Time (s)')
pylab.ylabel('Normalized amplitude')
# To search for this signal we can cross-correlate the signal with the entire dataset -> Not in any way optimized at this point, just showing the method.
#
# We will do the cross-correlation in the time domain, once for each time step. It runs slowly...
# In[164]:
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
# plot the cross-correlated data vs time. Superimpose the location of the end of the signal;
# this is where we should find a peak in the cross-correlation.
pylab.figure()
times = numpy.arange(len(data) - len(hp1_numpy)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)], [-10,10],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# Here you can see that the largest spike from the cross-correlation comes at the time of the signal. We only really need one more ingredient to describe matched-filtering: "Colored" noise (Gaussian noise but with a frequency-dependent variance; white noise has frequency-independent variance).
#
# Let's repeat the process, but generate a stretch of data colored with LIGO's zero-detuned--high-power noise curve. We'll use a PyCBC library to do this.
# In[165]:
# http://pycbc.org/pycbc/latest/html/noise.html
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# plot it:
pylab.loglog(estimated_psd.sample_frequencies, estimated_psd, label='estimate')
pylab.loglog(psd.sample_frequencies, psd, linewidth=3, label='known psd')
pylab.xlim(xmin=flow, xmax=512)
pylab.ylim(1e-47, 1e-45)
pylab.legend()
pylab.grid()
pylab.show()
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Then all we need to do is to "whiten" both the data, and the template waveform. This can be done, in the frequency domain, by dividing by the PSD. This *can* be done in the time domain as well, but it's more intuitive in the frequency domain
# In[166]:
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
# In[167]:
# Now let's re-do the correlation, in the time domain, but with whitened data and template.
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# # Challenge!
#
# * Histogram the whitened time series. Ignoring the outliers associated with the signal, is it a Gaussian? What is the mean and standard deviation? (We have not been careful in normalizing the whitened data properly).
# * Histogram the above cross-correlation time series. Ignoring the outliers associated with the signal, is it a Gaussian? What is the mean and standard deviation?
# * Find the location of the peak. (Note that here, it can be positive or negative), and the value of the SNR of the signal (which is the absolute value of the peak value, divided by the standard deviation of the cross-correlation time series).
#
# ## Optional challenge question. much harder:
# * Repeat this process, but instead of using a waveform with mass1=mass2=10, try 15, 20, or 25. Plot the SNR vs mass. Careful! Using lower masses (eg, mass1=mass2=1.4 Msun) will not work here. Why?
# In[168]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
#0:1.2f
# ###### Yes the histogram plot of the whitened data is Gaussian
# In[169]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_10=n_max/std
print('The SNR_10 value is',SNR_10)
#print(bin_nmax)
# ### For mass1=mass2=15
# In[170]:
import numpy
import pylab
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
from pycbc.waveform import get_td_waveform
apx = 'IMRPhenomD'
hp1, _ = get_td_waveform(approximant=apx,
mass1=15,
mass2=15,
delta_t=1.0/sample_rate,
f_lower=25)
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# In[171]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
# In[172]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_15=n_max/std
print('The SNR_15 value is',SNR_15)
#print(bin_nmax)
# ### For mass1=mass2=20
# In[173]:
import numpy
import pylab
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
from pycbc.waveform import get_td_waveform
apx = 'IMRPhenomD'
hp1, _ = get_td_waveform(approximant=apx,
mass1=20,
mass2=20,
delta_t=1.0/sample_rate,
f_lower=25)
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# In[174]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
# In[175]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_20=n_max/std
print('The SNR_20 value is',SNR_20)
#print(bin_nmax)
# ### For mass1=mass2=25
# In[176]:
import numpy
import pylab
sample_rate = 1024 # samples per second
data_length = 1024 # seconds
# Generate a long stretch of white noise: the data series and the time series.
data = numpy.random.normal(size=[sample_rate * data_length])
times = numpy.arange(len(data)) / float(sample_rate)
from pycbc.waveform import get_td_waveform
apx = 'IMRPhenomD'
hp1, _ = get_td_waveform(approximant=apx,
mass1=25,
mass2=25,
delta_t=1.0/sample_rate,
f_lower=25)
hp1 = hp1 / max(numpy.correlate(hp1,hp1, mode='full'))**0.5
# Shift the waveform to start at a random time in the Gaussian noise data.
waveform_start = numpy.random.randint(0, len(data) - len(hp1))
data[waveform_start:waveform_start+len(hp1)] += 10 * hp1.numpy()
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1_numpy = hp1.numpy()
for i in range(len(data) - len(hp1_numpy)):
cross_correlation[i] = (hp1_numpy * data[i:i+len(hp1_numpy)]).sum()
import pycbc.noise
import pycbc.psd
# The color of the noise matches a PSD which you provide:
# Generate a PSD matching Advanced LIGO's zero-detuned--high-power noise curve
flow = 10.0
delta_f = 1.0 / 128
flen = int(sample_rate / (2 * delta_f)) + 1
psd = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, flow)
# Generate colored noise
delta_t = 1.0 / sample_rate
ts = pycbc.noise.noise_from_psd(data_length*sample_rate, delta_t, psd, seed=127)
# Estimate the amplitude spectral density (ASD = sqrt(PSD)) for the noisy data
# using the "welch" method. We'll choose 4 seconds PSD samples that are overlapped 50%
seg_len = int(4 / delta_t)
seg_stride = int(seg_len / 2)
estimated_psd = pycbc.psd.welch(ts,seg_len=seg_len,seg_stride=seg_stride)
# add the signal, this time, with a "typical" amplitude.
ts[waveform_start:waveform_start+len(hp1)] += hp1.numpy() * 1E-20
# Generate a PSD for whitening the data
from pycbc.types import TimeSeries
# The PSD, sampled properly for the noisy data
flow = 10.0
delta_f = 1.0 / data_length
flen = int(sample_rate / (2 * delta_f)) + 1
psd_td = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The PSD, sampled properly for the signal
delta_f = sample_rate / float(len(hp1))
flen = int(sample_rate / (2 * delta_f)) + 1
psd_hp1 = pycbc.psd.aLIGOZeroDetHighPower(flen, delta_f, 0)
# The 0th and Nth values are zero. Set them to a nearby value to avoid dividing by zero.
psd_td[0] = psd_td[1]
psd_td[len(psd_td) - 1] = psd_td[len(psd_td) - 2]
# Same, for the PSD sampled for the signal
psd_hp1[0] = psd_hp1[1]
psd_hp1[len(psd_hp1) - 1] = psd_hp1[len(psd_hp1) - 2]
# convert both noisy data and the signal to frequency domain,
# and divide each by ASD=PSD**0.5, then convert back to time domain.
# This "whitens" the data and the signal template.
# Multiplying the signal template by 1E-21 puts it into realistic units of strain.
data_whitened = (ts.to_frequencyseries() / psd_td**0.5).to_timeseries()
hp1_whitened = (hp1.to_frequencyseries() / psd_hp1**0.5).to_timeseries() * 1E-21
cross_correlation = numpy.zeros([len(data)-len(hp1)])
hp1n = hp1_whitened.numpy()
datan = data_whitened.numpy()
for i in range(len(datan) - len(hp1n)):
cross_correlation[i] = (hp1n * datan[i:i+len(hp1n)]).sum()
# plot the cross-correlation in the time domain. Superimpose the location of the end of the signal.
# Note how much bigger the cross-correlation peak is, relative to the noise level,
# compared with the unwhitened version of the same quantity. SNR is much higher!
pylab.figure()
times = numpy.arange(len(datan) - len(hp1n)) / float(sample_rate)
pylab.plot(times, cross_correlation)
pylab.plot([waveform_start/float(sample_rate), waveform_start/float(sample_rate)],
[(min(cross_correlation))*1.1,(max(cross_correlation))*1.1],'r:')
pylab.xlabel('Time (s)')
pylab.ylabel('Cross-correlation')
# In[177]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(data_whitened, bins = 75, density=1, range=[-100,100],color='orange')
mean = np.mean(data_whitened)
print('mean',mean)
std = np.std(data_whitened)
print('std',std)
median = np.median(data_whitened)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Whitened Data',fontsize=15)
plt.show()
# In[178]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
from scipy.stats import norm
fig, ax = plt.subplots(figsize =(10, 7))
n,bins,patches = ax.hist(cross_correlation, bins = 275, density=1, range=[-15000,15000],color='orange')
mean = np.mean(cross_correlation)
print('mean',mean)
std = np.std(cross_correlation)
print('std',std)
median = np.median(cross_correlation)
print('median',median)
fit=norm.pdf(bins,mean,std)
ax.plot(bins,fit,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_title(r'std.dev = $\sigma$ = {0:.3f}' .format(std),loc='left')
ax.set_title(r'mean = $\mu$ = {0:1.3e}' .format(mean),loc='right')
ax.set_title(r'median = {0:1.3f}' .format(median),loc='center')
ax.set_ylabel('Cross Correlation',fontsize=15)
plt.show()
n_max=n.max()
print(n.max())
bin_nmax = np.argmax(n)
SNR_25=n_max/std
print('The SNR_25 value is',SNR_25)
#print(bin_nmax)
# In[184]:
SNR=[SNR_10,SNR_15,SNR_20,SNR_25]
Mass=[10,15,20,25]
fig, ax = plt.subplots(figsize =(10, 7))
ax.plot(Mass,SNR,'--',color='r', linewidth=3.0)
#ax.set_title(r'$\sigma$ = {} and mean = {}' .format(std, mean))
ax.set_xlabel('Mass',fontsize=15)
ax.set_ylabel('SNR',fontsize=15)
#ax.set_yscale('log')
#ax.set_xscale('log')
plt.show()
# ### Optimizing a matched-filter
#
# That's all that a matched-filter is. A cross-correlation of the data with a template waveform performed as a function of time. This cross-correlation walking through the data is a convolution operation. Convolution operations are more optimally performed in the frequency domain, which becomes a `O(N ln N)` operation, as opposed to the `O(N^2)` operation shown here. You can also conveniently vary the phase of the signal in the frequency domain, as we will illustrate in the next tutorial. PyCBC implements a frequency-domain matched-filtering engine, which is much faster than the code we've shown here. Let's move to the next tutorial now, where we will demonstrate its use on real data.
| [
"pylab.title",
"pycbc.waveform.get_td_waveform",
"pylab.xlabel",
"pylab.loglog",
"numpy.mean",
"pylab.ylim",
"pylab.ylabel",
"pylab.plot",
"pylab.xlim",
"numpy.random.normal",
"numpy.argmax",
"pylab.figure",
"numpy.correlate",
"scipy.stats.norm.pdf",
"numpy.std",
"matplotlib.pyplot.sho... | [((2644, 2697), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '[sample_rate * data_length]'}), '(size=[sample_rate * data_length])\n', (2663, 2697), False, 'import numpy\n'), ((3479, 3574), 'pycbc.waveform.get_td_waveform', 'get_td_waveform', ([], {'approximant': 'apx', 'mass1': '(10)', 'mass2': '(10)', 'delta_t': '(1.0 / sample_rate)', 'f_lower': '(25)'}), '(approximant=apx, mass1=10, mass2=10, delta_t=1.0 /\n sample_rate, f_lower=25)\n', (3494, 3574), False, 'from pycbc.waveform import get_td_waveform\n'), ((4370, 4384), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (4382, 4384), False, 'import pylab\n'), ((4385, 4416), 'pylab.title', 'pylab.title', (['"""The waveform hp1"""'], {}), "('The waveform hp1')\n", (4396, 4416), False, 'import pylab\n'), ((4417, 4450), 'pylab.plot', 'pylab.plot', (['hp1.sample_times', 'hp1'], {}), '(hp1.sample_times, hp1)\n', (4427, 4450), False, 'import pylab\n'), ((4451, 4475), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (4463, 4475), False, 'import pylab\n'), ((4476, 4512), 'pylab.ylabel', 'pylab.ylabel', (['"""Normalized amplitude"""'], {}), "('Normalized amplitude')\n", (4488, 4512), False, 'import pylab\n'), ((4718, 4732), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (4730, 4732), False, 'import pylab\n'), ((4733, 4779), 'pylab.title', 'pylab.title', (['"""Looks like random noise, right?"""'], {}), "('Looks like random noise, right?')\n", (4744, 4779), False, 'import pylab\n'), ((4855, 4879), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (4867, 4879), False, 'import pylab\n'), ((4880, 4916), 'pylab.ylabel', 'pylab.ylabel', (['"""Normalized amplitude"""'], {}), "('Normalized amplitude')\n", (4892, 4916), False, 'import pylab\n'), ((4918, 4932), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (4930, 4932), False, 'import pylab\n'), ((4933, 4966), 'pylab.title', 'pylab.title', (['"""Signal in the data"""'], {}), "('Signal in the data')\n", (4944, 4966), False, 'import pylab\n'), ((5042, 5080), 'pylab.plot', 'pylab.plot', (['hp1.sample_times', '(10 * hp1)'], {}), '(hp1.sample_times, 10 * hp1)\n', (5052, 5080), False, 'import pylab\n'), ((5081, 5105), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (5093, 5105), False, 'import pylab\n'), ((5106, 5142), 'pylab.ylabel', 'pylab.ylabel', (['"""Normalized amplitude"""'], {}), "('Normalized amplitude')\n", (5118, 5142), False, 'import pylab\n'), ((5766, 5780), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (5778, 5780), False, 'import pylab\n'), ((5851, 5887), 'pylab.plot', 'pylab.plot', (['times', 'cross_correlation'], {}), '(times, cross_correlation)\n', (5861, 5887), False, 'import pylab\n'), ((5986, 6010), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (5998, 6010), False, 'import pylab\n'), ((6011, 6044), 'pylab.ylabel', 'pylab.ylabel', (['"""Cross-correlation"""'], {}), "('Cross-correlation')\n", (6023, 6044), False, 'import pylab\n'), ((7319, 7398), 'pylab.loglog', 'pylab.loglog', (['estimated_psd.sample_frequencies', 'estimated_psd'], {'label': '"""estimate"""'}), "(estimated_psd.sample_frequencies, estimated_psd, label='estimate')\n", (7331, 7398), False, 'import pylab\n'), ((7399, 7472), 'pylab.loglog', 'pylab.loglog', (['psd.sample_frequencies', 'psd'], {'linewidth': '(3)', 'label': '"""known psd"""'}), "(psd.sample_frequencies, psd, linewidth=3, label='known psd')\n", (7411, 7472), False, 'import pylab\n'), ((7473, 7504), 'pylab.xlim', 'pylab.xlim', ([], {'xmin': 'flow', 'xmax': '(512)'}), '(xmin=flow, xmax=512)\n', (7483, 7504), False, 'import pylab\n'), ((7505, 7529), 'pylab.ylim', 'pylab.ylim', (['(1e-47)', '(1e-45)'], {}), '(1e-47, 1e-45)\n', (7515, 7529), False, 'import pylab\n'), ((7530, 7544), 'pylab.legend', 'pylab.legend', ([], {}), '()\n', (7542, 7544), False, 'import pylab\n'), ((7545, 7557), 'pylab.grid', 'pylab.grid', ([], {}), '()\n', (7555, 7557), False, 'import pylab\n'), ((7558, 7570), 'pylab.show', 'pylab.show', ([], {}), '()\n', (7568, 7570), False, 'import pylab\n'), ((9699, 9713), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (9711, 9713), False, 'import pylab\n'), ((9780, 9816), 'pylab.plot', 'pylab.plot', (['times', 'cross_correlation'], {}), '(times, cross_correlation)\n', (9790, 9816), False, 'import pylab\n'), ((9977, 10001), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (9989, 10001), False, 'import pylab\n'), ((10002, 10035), 'pylab.ylabel', 'pylab.ylabel', (['"""Cross-correlation"""'], {}), "('Cross-correlation')\n", (10014, 10035), False, 'import pylab\n'), ((11068, 11097), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (11080, 11097), True, 'import matplotlib.pyplot as plt\n'), ((11202, 11224), 'numpy.mean', 'np.mean', (['data_whitened'], {}), '(data_whitened)\n', (11209, 11224), True, 'import numpy as np\n'), ((11250, 11271), 'numpy.std', 'np.std', (['data_whitened'], {}), '(data_whitened)\n', (11256, 11271), True, 'import numpy as np\n'), ((11298, 11322), 'numpy.median', 'np.median', (['data_whitened'], {}), '(data_whitened)\n', (11307, 11322), True, 'import numpy as np\n'), ((11350, 11375), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (11358, 11375), False, 'from scipy.stats import norm\n'), ((11733, 11743), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11741, 11743), True, 'import matplotlib.pyplot as plt\n'), ((11955, 11984), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (11967, 11984), True, 'import matplotlib.pyplot as plt\n'), ((12098, 12124), 'numpy.mean', 'np.mean', (['cross_correlation'], {}), '(cross_correlation)\n', (12105, 12124), True, 'import numpy as np\n'), ((12150, 12175), 'numpy.std', 'np.std', (['cross_correlation'], {}), '(cross_correlation)\n', (12156, 12175), True, 'import numpy as np\n'), ((12202, 12230), 'numpy.median', 'np.median', (['cross_correlation'], {}), '(cross_correlation)\n', (12211, 12230), True, 'import numpy as np\n'), ((12258, 12283), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (12266, 12283), False, 'from scipy.stats import norm\n'), ((12645, 12655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12653, 12655), True, 'import matplotlib.pyplot as plt\n'), ((12696, 12708), 'numpy.argmax', 'np.argmax', (['n'], {}), '(n)\n', (12705, 12708), True, 'import numpy as np\n'), ((13003, 13056), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '[sample_rate * data_length]'}), '(size=[sample_rate * data_length])\n', (13022, 13056), False, 'import numpy\n'), ((13184, 13279), 'pycbc.waveform.get_td_waveform', 'get_td_waveform', ([], {'approximant': 'apx', 'mass1': '(15)', 'mass2': '(15)', 'delta_t': '(1.0 / sample_rate)', 'f_lower': '(25)'}), '(approximant=apx, mass1=15, mass2=15, delta_t=1.0 /\n sample_rate, f_lower=25)\n', (13199, 13279), False, 'from pycbc.waveform import get_td_waveform\n'), ((16349, 16363), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (16361, 16363), False, 'import pylab\n'), ((16430, 16466), 'pylab.plot', 'pylab.plot', (['times', 'cross_correlation'], {}), '(times, cross_correlation)\n', (16440, 16466), False, 'import pylab\n'), ((16627, 16651), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (16639, 16651), False, 'import pylab\n'), ((16652, 16685), 'pylab.ylabel', 'pylab.ylabel', (['"""Cross-correlation"""'], {}), "('Cross-correlation')\n", (16664, 16685), False, 'import pylab\n'), ((16822, 16851), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (16834, 16851), True, 'import matplotlib.pyplot as plt\n'), ((16956, 16978), 'numpy.mean', 'np.mean', (['data_whitened'], {}), '(data_whitened)\n', (16963, 16978), True, 'import numpy as np\n'), ((17004, 17025), 'numpy.std', 'np.std', (['data_whitened'], {}), '(data_whitened)\n', (17010, 17025), True, 'import numpy as np\n'), ((17052, 17076), 'numpy.median', 'np.median', (['data_whitened'], {}), '(data_whitened)\n', (17061, 17076), True, 'import numpy as np\n'), ((17104, 17129), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (17112, 17129), False, 'from scipy.stats import norm\n'), ((17487, 17497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17495, 17497), True, 'import matplotlib.pyplot as plt\n'), ((17634, 17663), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (17646, 17663), True, 'import matplotlib.pyplot as plt\n'), ((17777, 17803), 'numpy.mean', 'np.mean', (['cross_correlation'], {}), '(cross_correlation)\n', (17784, 17803), True, 'import numpy as np\n'), ((17829, 17854), 'numpy.std', 'np.std', (['cross_correlation'], {}), '(cross_correlation)\n', (17835, 17854), True, 'import numpy as np\n'), ((17881, 17909), 'numpy.median', 'np.median', (['cross_correlation'], {}), '(cross_correlation)\n', (17890, 17909), True, 'import numpy as np\n'), ((17937, 17962), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (17945, 17962), False, 'from scipy.stats import norm\n'), ((18324, 18334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18332, 18334), True, 'import matplotlib.pyplot as plt\n'), ((18375, 18387), 'numpy.argmax', 'np.argmax', (['n'], {}), '(n)\n', (18384, 18387), True, 'import numpy as np\n'), ((18682, 18735), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '[sample_rate * data_length]'}), '(size=[sample_rate * data_length])\n', (18701, 18735), False, 'import numpy\n'), ((18863, 18958), 'pycbc.waveform.get_td_waveform', 'get_td_waveform', ([], {'approximant': 'apx', 'mass1': '(20)', 'mass2': '(20)', 'delta_t': '(1.0 / sample_rate)', 'f_lower': '(25)'}), '(approximant=apx, mass1=20, mass2=20, delta_t=1.0 /\n sample_rate, f_lower=25)\n', (18878, 18958), False, 'from pycbc.waveform import get_td_waveform\n'), ((22028, 22042), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (22040, 22042), False, 'import pylab\n'), ((22109, 22145), 'pylab.plot', 'pylab.plot', (['times', 'cross_correlation'], {}), '(times, cross_correlation)\n', (22119, 22145), False, 'import pylab\n'), ((22306, 22330), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (22318, 22330), False, 'import pylab\n'), ((22331, 22364), 'pylab.ylabel', 'pylab.ylabel', (['"""Cross-correlation"""'], {}), "('Cross-correlation')\n", (22343, 22364), False, 'import pylab\n'), ((22501, 22530), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (22513, 22530), True, 'import matplotlib.pyplot as plt\n'), ((22635, 22657), 'numpy.mean', 'np.mean', (['data_whitened'], {}), '(data_whitened)\n', (22642, 22657), True, 'import numpy as np\n'), ((22683, 22704), 'numpy.std', 'np.std', (['data_whitened'], {}), '(data_whitened)\n', (22689, 22704), True, 'import numpy as np\n'), ((22731, 22755), 'numpy.median', 'np.median', (['data_whitened'], {}), '(data_whitened)\n', (22740, 22755), True, 'import numpy as np\n'), ((22783, 22808), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (22791, 22808), False, 'from scipy.stats import norm\n'), ((23166, 23176), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23174, 23176), True, 'import matplotlib.pyplot as plt\n'), ((23313, 23342), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (23325, 23342), True, 'import matplotlib.pyplot as plt\n'), ((23456, 23482), 'numpy.mean', 'np.mean', (['cross_correlation'], {}), '(cross_correlation)\n', (23463, 23482), True, 'import numpy as np\n'), ((23508, 23533), 'numpy.std', 'np.std', (['cross_correlation'], {}), '(cross_correlation)\n', (23514, 23533), True, 'import numpy as np\n'), ((23560, 23588), 'numpy.median', 'np.median', (['cross_correlation'], {}), '(cross_correlation)\n', (23569, 23588), True, 'import numpy as np\n'), ((23616, 23641), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (23624, 23641), False, 'from scipy.stats import norm\n'), ((24003, 24013), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24011, 24013), True, 'import matplotlib.pyplot as plt\n'), ((24054, 24066), 'numpy.argmax', 'np.argmax', (['n'], {}), '(n)\n', (24063, 24066), True, 'import numpy as np\n'), ((24361, 24414), 'numpy.random.normal', 'numpy.random.normal', ([], {'size': '[sample_rate * data_length]'}), '(size=[sample_rate * data_length])\n', (24380, 24414), False, 'import numpy\n'), ((24542, 24637), 'pycbc.waveform.get_td_waveform', 'get_td_waveform', ([], {'approximant': 'apx', 'mass1': '(25)', 'mass2': '(25)', 'delta_t': '(1.0 / sample_rate)', 'f_lower': '(25)'}), '(approximant=apx, mass1=25, mass2=25, delta_t=1.0 /\n sample_rate, f_lower=25)\n', (24557, 24637), False, 'from pycbc.waveform import get_td_waveform\n'), ((27707, 27721), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (27719, 27721), False, 'import pylab\n'), ((27788, 27824), 'pylab.plot', 'pylab.plot', (['times', 'cross_correlation'], {}), '(times, cross_correlation)\n', (27798, 27824), False, 'import pylab\n'), ((27985, 28009), 'pylab.xlabel', 'pylab.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (27997, 28009), False, 'import pylab\n'), ((28010, 28043), 'pylab.ylabel', 'pylab.ylabel', (['"""Cross-correlation"""'], {}), "('Cross-correlation')\n", (28022, 28043), False, 'import pylab\n'), ((28180, 28209), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (28192, 28209), True, 'import matplotlib.pyplot as plt\n'), ((28314, 28336), 'numpy.mean', 'np.mean', (['data_whitened'], {}), '(data_whitened)\n', (28321, 28336), True, 'import numpy as np\n'), ((28362, 28383), 'numpy.std', 'np.std', (['data_whitened'], {}), '(data_whitened)\n', (28368, 28383), True, 'import numpy as np\n'), ((28410, 28434), 'numpy.median', 'np.median', (['data_whitened'], {}), '(data_whitened)\n', (28419, 28434), True, 'import numpy as np\n'), ((28462, 28487), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (28470, 28487), False, 'from scipy.stats import norm\n'), ((28845, 28855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28853, 28855), True, 'import matplotlib.pyplot as plt\n'), ((28992, 29021), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (29004, 29021), True, 'import matplotlib.pyplot as plt\n'), ((29135, 29161), 'numpy.mean', 'np.mean', (['cross_correlation'], {}), '(cross_correlation)\n', (29142, 29161), True, 'import numpy as np\n'), ((29187, 29212), 'numpy.std', 'np.std', (['cross_correlation'], {}), '(cross_correlation)\n', (29193, 29212), True, 'import numpy as np\n'), ((29239, 29267), 'numpy.median', 'np.median', (['cross_correlation'], {}), '(cross_correlation)\n', (29248, 29267), True, 'import numpy as np\n'), ((29295, 29320), 'scipy.stats.norm.pdf', 'norm.pdf', (['bins', 'mean', 'std'], {}), '(bins, mean, std)\n', (29303, 29320), False, 'from scipy.stats import norm\n'), ((29682, 29692), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29690, 29692), True, 'import matplotlib.pyplot as plt\n'), ((29733, 29745), 'numpy.argmax', 'np.argmax', (['n'], {}), '(n)\n', (29742, 29745), True, 'import numpy as np\n'), ((29894, 29923), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (29906, 29923), True, 'import matplotlib.pyplot as plt\n'), ((30153, 30163), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30161, 30163), True, 'import matplotlib.pyplot as plt\n'), ((4038, 4076), 'numpy.correlate', 'numpy.correlate', (['hp1', 'hp1'], {'mode': '"""full"""'}), "(hp1, hp1, mode='full')\n", (4053, 4076), False, 'import numpy\n'), ((13391, 13429), 'numpy.correlate', 'numpy.correlate', (['hp1', 'hp1'], {'mode': '"""full"""'}), "(hp1, hp1, mode='full')\n", (13406, 13429), False, 'import numpy\n'), ((19070, 19108), 'numpy.correlate', 'numpy.correlate', (['hp1', 'hp1'], {'mode': '"""full"""'}), "(hp1, hp1, mode='full')\n", (19085, 19108), False, 'import numpy\n'), ((24749, 24787), 'numpy.correlate', 'numpy.correlate', (['hp1', 'hp1'], {'mode': '"""full"""'}), "(hp1, hp1, mode='full')\n", (24764, 24787), False, 'import numpy\n')] |
import numpy
import cv2
def make2Dcolormap(
colors=(
(1, 1, 0),
(0, 0, 1),
(0, 1, 0),
(1, 0, 0),
), size=20):
######################
colormap = numpy.zeros((2, 2, 3))
colormap[1, 1] = colors[0]
colormap[0, 1] = colors[1]
colormap[0, 0] = colors[2]
colormap[1, 0] = colors[3]
size = size + 1
colormap = cv2.resize(colormap, (size, size))
colormap = numpy.clip(colormap, 0, 1)
return colormap
def flat_combine(lst):
return numpy.concatenate([x.reshape(-1, x.shape[-1]) for x in lst])
| [
"numpy.clip",
"numpy.zeros",
"cv2.resize"
] | [((219, 241), 'numpy.zeros', 'numpy.zeros', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (230, 241), False, 'import numpy\n'), ((401, 435), 'cv2.resize', 'cv2.resize', (['colormap', '(size, size)'], {}), '(colormap, (size, size))\n', (411, 435), False, 'import cv2\n'), ((451, 477), 'numpy.clip', 'numpy.clip', (['colormap', '(0)', '(1)'], {}), '(colormap, 0, 1)\n', (461, 477), False, 'import numpy\n')] |
import numpy as np
from gym_cooking.cooking_world.world_objects import *
from collections import namedtuple
GraphicScaling = namedtuple("GraphicScaling", ["holding_scale", "container_scale"])
class GraphicStore:
OBJECT_PROPERTIES = {Blender: GraphicScaling(None, 0.5)}
def __init__(self, world_height, world_width):
self.scale = 80 # num pixels per tile
self.holding_scale = 0.5
self.container_scale = 0.7
self.width = self.scale * world_width
self.height = self.scale * world_height
self.tile_size = (self.scale, self.scale)
self.holding_size = tuple((self.holding_scale * np.asarray(self.tile_size)).astype(int))
self.container_size = tuple((self.container_scale * np.asarray(self.tile_size)).astype(int))
self.holding_container_size = tuple((self.container_scale * np.asarray(self.holding_size)).astype(int))
| [
"collections.namedtuple",
"numpy.asarray"
] | [((127, 193), 'collections.namedtuple', 'namedtuple', (['"""GraphicScaling"""', "['holding_scale', 'container_scale']"], {}), "('GraphicScaling', ['holding_scale', 'container_scale'])\n", (137, 193), False, 'from collections import namedtuple\n'), ((645, 671), 'numpy.asarray', 'np.asarray', (['self.tile_size'], {}), '(self.tile_size)\n', (655, 671), True, 'import numpy as np\n'), ((746, 772), 'numpy.asarray', 'np.asarray', (['self.tile_size'], {}), '(self.tile_size)\n', (756, 772), True, 'import numpy as np\n'), ((855, 884), 'numpy.asarray', 'np.asarray', (['self.holding_size'], {}), '(self.holding_size)\n', (865, 884), True, 'import numpy as np\n')] |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as pl
from sklearn.metrics import roc_curve
def uim_data(N=20, M=100, sparsity=0.1, frac_test=0.2, show=True, fill_num=0.9,
fs=30):
"""
Show splitting by frac_test using random holdout.
"""
np.random.seed(1234)
tot_size = N * M
fill_size = np.round(tot_size * sparsity)
uim = np.zeros(tot_size) + 0.1
fill_ind = np.random.permutation(tot_size)[:fill_size]
uim[fill_ind] = fill_num
uim = uim.reshape(N, M)
fig = pl.figure(figsize=(20, 4))
cmap = sns.cubehelix_palette(3, as_cmap=True)
sns.heatmap(uim, cbar=False, xticklabels=False,
yticklabels=False)
pl.xlabel('Items', fontsize=fs)
pl.ylabel('Users', fontsize=fs)
if show:
pl.show()
pl.close()
return uim, fill_num
def leave_k_out(uim, fill_num, frac_test=0.2, test_num=0.5, fs=30, show=True):
"""
Show splitting by frac_test using random holdout.
"""
N_user, M_item = uim.shape
uim = uim.ravel()
entries = np.where(uim == fill_num)[0]
N = entries.size
N_test = np.round(frac_test * N)
test_ind = np.random.permutation(N)[:N_test]
uim[entries[test_ind]] = test_num
fig = pl.figure(figsize=(20, 4))
cmap = sns.cubehelix_palette(3, as_cmap=True)
sns.heatmap(uim.reshape(N_user, M_item), cbar=False, xticklabels=False,
yticklabels=False)
pl.xlabel('Items', fontsize=fs)
pl.ylabel('Users', fontsize=fs)
if show:
pl.show()
pl.close()
def M_fold(uim, fill_num, frac=0.5, test_num=0.5, fs=30, show=True):
"""
Show splitting by frac_test using random holdout.
"""
N_user, M_item = uim.shape
uim = uim.ravel()
entries = np.where(uim == fill_num)[0]
N = entries.size
N_holdout = np.int(np.round(frac * N))
N_test = np.int(np.round(frac * N_holdout))
holdout_ind = entries[N_holdout:]
test_ind = holdout_ind[np.random.permutation(N_holdout)[:N_test]]
uim[test_ind] = test_num
fig = pl.figure(figsize=(20, 4))
cmap = sns.cubehelix_palette(3, as_cmap=True)
sns.heatmap(uim.reshape(N_user, M_item), cbar=False, xticklabels=False,
yticklabels=False)
pl.xlabel('Items', fontsize=fs)
pl.ylabel('Users', fontsize=fs)
if show:
pl.show()
pl.close()
def user_item_hists(urm, fs=6, bins=50, a=0.4, log=False, font=25):
"""
Plot the distribution of the number of ratings, accross users and items.
"""
nonzero_urm = urm > 0
user_counts = np.sum(urm, axis=1)
item_counts = np.sum(urm, axis=0)
if log:
ylabel = 'Log(Frequency)'
else:
ylabel = 'Frequency'
pl.figure(figsize=(2 * fs, fs))
pl.subplot(121)
pl.hist(user_counts, bins=bins, alpha=a, normed=True, log=log,
label='User counts')
pl.xlabel('Number of ratings', fontsize=font)
pl.ylabel(ylabel, fontsize=font)
pl.legend(fontsize=font)
pl.subplot(122)
pl.hist(item_counts, bins=bins, alpha=a, normed=True, log=log,
label='Item counts')
pl.xlabel('Number of ratings', fontsize=font)
pl.ylabel(ylabel, fontsize=font)
pl.legend(fontsize=font)
return user_counts, item_counts
def get_se_dists(test, pred):
"""
Compute the global and conditional squared errors.
"""
se = (test - pred) ** 2.
ind = test > 0
se_glob = se[ind]
se_user = np.zeros(se.shape[0])
for i in range(se.shape[0]):
se_user[i] = np.mean(se[i][ind[i]])
se_user[np.isnan(se_user)] = np.mean(se_glob)
se_item = np.zeros(se.shape[1])
for i in range(se.shape[1]):
se_item[i] = np.mean(se[:, i][ind[:, i]])
se_item[np.isnan(se_item)] = np.mean(se_glob)
return se_glob, se_user, se_item
def se_hists(test, pred, fs=6, bins=50, a=0.5, log=False, font=25, max_err=5):
"""
Plot the squared error distributions - globally, by user, by item.
"""
se_glob, se_user, se_item = get_se_dists(test, pred)
if log:
ylabel = 'Log(Relative Frequency)'
else:
ylabel = 'Relative Frequency'
pl.figure(figsize=(fs, fs))
pl.hist(se_glob, bins=bins, alpha=a*2, normed=True, log=log,
label='Sq. Error')
pl.hist(se_user, bins=bins, alpha=a, normed=True, log=log,
label='Sq. Error by user')
pl.hist(se_item, bins=bins, alpha=a, normed=True, log=log,
label='Sq. Error by item')
pl.xlabel('Squared Error', fontsize=font)
pl.ylabel(ylabel, fontsize=font)
pl.xlim(0, max_err)
pl.legend(fontsize=font/2)
def se_hists_percentile(test, pred, user_counts, item_counts, percentile=10, fs=6, bins=20, a=0.5, log=False, font=25, max_err=10):
"""
Plot the squared error distributions - globally, by user, by item.
"""
se_glob, se_user, se_item = get_se_dists(test, pred)
user_ind = np.argsort(user_counts)
item_ind = np.argsort(item_counts)
Nuser = np.int(np.round(0.01 * percentile * user_ind.size))
Nitem = np.int(np.round(0.01 * percentile * item_ind.size))
if log:
ylabel = 'Log(Relative Frequency)'
else:
ylabel = 'Relative Frequency'
pl.figure(figsize=(2 * fs, fs))
pl.subplot(121)
pl.hist(se_user[user_ind[-Nuser:]], bins=bins, alpha=a/2, normed=True, log=log,
label='Sq. Error by user', color='g')
pl.hist(se_item[item_ind[-Nitem:]], bins=bins, alpha=a/2, normed=True, log=log,
label='Sq. Error by item', color='r')
pl.xlabel('Squared Error', fontsize=font)
pl.ylabel(ylabel, fontsize=font)
pl.xlim(0, max_err)
pl.title('Top %d%%' % percentile, fontsize=font)
pl.legend(fontsize=font/2)
pl.subplot(122)
pl.hist(se_user[user_ind[:Nuser]], bins=bins, alpha=a/2, normed=True, log=log,
label='Sq. Error by user', color='g')
pl.hist(se_item[item_ind[:Nitem]], bins=bins, alpha=a/2, normed=True, log=log,
label='Sq. Error by item', color='r')
pl.xlabel('Squared Error', fontsize=font)
pl.ylabel(ylabel, fontsize=font)
pl.xlim(0, max_err)
pl.title('Bottom %d%%' % percentile, fontsize=font)
pl.legend(fontsize=font/2)
def get_ind_user(nonzero_ind, user_ind):
"""
Construct indices for users with entries in a given index list.
"""
users = np.array([], dtype=np.int)
items = np.array([], dtype=np.int)
for i in range(nonzero_ind[0].size):
if nonzero_ind[0][i] in user_ind:
users = np.append(users, nonzero_ind[0][i])
items = np.append(items, nonzero_ind[1][i])
return (users, items)
def get_ind_item(nonzero_ind, item_ind):
"""
Construct indices for items with entries in a given index list.
"""
users = np.array([], dtype=np.int)
items = np.array([], dtype=np.int)
for i in range(nonzero_ind[1].size):
if nonzero_ind[1][i] in item_ind:
users = np.append(users, nonzero_ind[0][i])
items = np.append(items, nonzero_ind[1][i])
return (users, items)
def user_item_rocs(y_true, y_pred, user_counts, item_counts, nonzero_ind,
percentile=10, fs=6, font=25, a=0.5):
"""
Plot ROC curves, including those conditioned on users, items at Bottom
percentile of ratings distribution.
"""
user_ind = np.argsort(user_counts)
item_ind = np.argsort(item_counts)
Nuser = np.int(np.round(0.01 * percentile * user_ind.size))
Nitem = np.int(np.round(0.01 * percentile * item_ind.size))
pl.figure(figsize=(8, 8))
fpr, tpr, thresholds = roc_curve(y_true[nonzero_ind], y_pred[nonzero_ind])
pl.plot(fpr, tpr, lw=2, label='All')
# users
ind = get_ind_user(nonzero_ind, user_ind[:Nuser])
yt, yp = y_true[ind], y_pred[ind]
fpr, tpr, thresholds = roc_curve(yt, yp)
pl.plot(fpr, tpr, lw=2, color='g', alpha=a, label='Bottom 10% Users')
# items
ind = get_ind_item(nonzero_ind, item_ind[:Nitem])
yt, yp = y_true[ind], y_pred[ind]
fpr, tpr, thresholds = roc_curve(yt, yp)
pl.plot(fpr, tpr, lw=2, color='r', alpha=a, label='Bottom 10% Items')
pl.plot([0, 1], [0, 1], 'k--')
pl.xlabel('FPR', fontsize=font)
pl.ylabel('TPR', fontsize=font)
pl.legend(loc=2, fontsize=font/2)
def k_DCG(true, pred, user_ids, k):
"""
Return k DCGs per user.
"""
unique_users = np.unique(user_ids)
dcgs = np.ones((unique_users.size, k))
for i in range(unique_users.size):
user_ind = user_ids == unique_users[i]
user_true = true[user_ind]
user_pred = pred[user_ind]
ranked_ind = np.argsort(-user_pred)[:k]
user_relevances = user_true[ranked_ind]
gains = 2. ** (user_relevances)
discounts = np.log2(np.arange(user_relevances.size) + 2)
dcgs[i, :gains.size] = gains / discounts
return dcgs
def k_NDCG(true, pred, user_ids, k):
"""
Not actually NDCG. Return predicted and best DCGs for comparison.
"""
actual_dcgs = k_DCG(true, pred, user_ids, k)
best_dcgs = k_DCG(true, true, user_ids, k)
actual_dcgs /= np.max(actual_dcgs, axis=1)[:, None]
best_dcgs /= np.max(best_dcgs, axis=1)[:, None]
return actual_dcgs, best_dcgs
def ndcg_plot(y_true, y_pred, user_ids, k, N, font=40, fs=6, seed=1234):
"""
Plot predicted and best DCGs. Two columns of random users, one for the
users with the best MSE.
"""
np.random.seed(seed)
se = (y_true - y_pred) ** 2.
unique_users = np.unique(user_ids)
user_mses = np.zeros(unique_users.size)
for i in range(unique_users.size):
user_mses[i] = np.std(se[user_ids == unique_users[i]])
rand_user_ids = unique_users[np.random.permutation(unique_users.size)[:N]]
ind = np.array([], np.int)
for i in range(N):
ind = np.append(ind, np.where(user_ids == rand_user_ids[i])[0])
a, b = k_NDCG(y_true[ind], y_pred[ind], user_ids[ind], k)
cmap = sns.cubehelix_palette(as_cmap=True, reverse=True)
pl.figure(figsize=(3 * fs, 2 * fs))
ax = pl.subplot(231)
pl.imshow(a, interpolation='nearest', cmap=cmap)
ax.text(0.5, 1.05, 'Random %d Users' % N, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font)
ax.text(-0.05, 0.5, 'Users', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font/2,
rotation=90)
ax.text(-0.15, 0.5, 'Predicted DCGs', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font,
rotation=90)
pl.axis('off')
ax = pl.subplot(234)
pl.imshow(b, interpolation='nearest', cmap=cmap)
pl.axis('off')
ax.text(0.5, -0.05, 'Recommended Items', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font/2)
ax.text(-0.05, 0.5, 'Users', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font/2,
rotation=90)
ax.text(-0.15, 0.5, 'Best Possible DCGs', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font,
rotation=90)
rand_user_ids = unique_users[np.random.permutation(unique_users.size)[:N]]
ind = np.array([], np.int)
for i in range(N):
ind = np.append(ind, np.where(user_ids == rand_user_ids[i])[0])
a, b = k_NDCG(y_true[ind], y_pred[ind], user_ids[ind], k)
cmap = sns.cubehelix_palette(as_cmap=True, reverse=True)
ax = pl.subplot(232)
pl.imshow(a, interpolation='nearest', cmap=cmap)
pl.axis('off')
ax.text(0.5, 1.05, 'Random %d Users' % N, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font)
ax = pl.subplot(235)
pl.imshow(b, interpolation='nearest', cmap=cmap)
pl.axis('off')
ax.text(0.5, -0.05, 'Recommended Items', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font/2)
best_user_ids = unique_users[np.argsort(user_mses)[:N]]
ind = np.array([], np.int)
for i in range(N):
ind = np.append(ind, np.where(user_ids == best_user_ids[i])[0])
a, b = k_NDCG(y_true[ind], y_pred[ind], user_ids[ind], k)
ax = pl.subplot(233)
pl.imshow(a, interpolation='nearest', cmap=cmap)
ax.text(0.5, 1.05, 'Best %d Users' % N, horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font)
pl.axis('off')
ax = pl.subplot(236)
pl.imshow(b, interpolation='nearest', cmap=cmap)
pl.axis('off')
ax.text(0.5, -0.05, 'Recommended Items', horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes, fontsize=font/2)
pl.tight_layout(w_pad=0.1)
if __name__ == '__main__':
uim, fill_num = uim_data(show=False)
leave_k_out(uim.copy(), fill_num)
M_fold(uim.copy(), fill_num)
| [
"seaborn.cubehelix_palette",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.argsort",
"numpy.array",
"sklearn.metrics.roc_curve",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matpl... | [((287, 307), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (301, 307), True, 'import numpy as np\n'), ((345, 374), 'numpy.round', 'np.round', (['(tot_size * sparsity)'], {}), '(tot_size * sparsity)\n', (353, 374), True, 'import numpy as np\n'), ((538, 564), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(20, 4)'}), '(figsize=(20, 4))\n', (547, 564), True, 'import matplotlib.pyplot as pl\n'), ((576, 614), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', (['(3)'], {'as_cmap': '(True)'}), '(3, as_cmap=True)\n', (597, 614), True, 'import seaborn as sns\n'), ((619, 685), 'seaborn.heatmap', 'sns.heatmap', (['uim'], {'cbar': '(False)', 'xticklabels': '(False)', 'yticklabels': '(False)'}), '(uim, cbar=False, xticklabels=False, yticklabels=False)\n', (630, 685), True, 'import seaborn as sns\n'), ((723, 754), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Items"""'], {'fontsize': 'fs'}), "('Items', fontsize=fs)\n", (732, 754), True, 'import matplotlib.pyplot as pl\n'), ((759, 790), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Users"""'], {'fontsize': 'fs'}), "('Users', fontsize=fs)\n", (768, 790), True, 'import matplotlib.pyplot as pl\n'), ((1147, 1170), 'numpy.round', 'np.round', (['(frac_test * N)'], {}), '(frac_test * N)\n', (1155, 1170), True, 'import numpy as np\n'), ((1269, 1295), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(20, 4)'}), '(figsize=(20, 4))\n', (1278, 1295), True, 'import matplotlib.pyplot as pl\n'), ((1307, 1345), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', (['(3)'], {'as_cmap': '(True)'}), '(3, as_cmap=True)\n', (1328, 1345), True, 'import seaborn as sns\n'), ((1478, 1509), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Items"""'], {'fontsize': 'fs'}), "('Items', fontsize=fs)\n", (1487, 1509), True, 'import matplotlib.pyplot as pl\n'), ((1514, 1545), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Users"""'], {'fontsize': 'fs'}), "('Users', fontsize=fs)\n", (1523, 1545), True, 'import matplotlib.pyplot as pl\n'), ((2093, 2119), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(20, 4)'}), '(figsize=(20, 4))\n', (2102, 2119), True, 'import matplotlib.pyplot as pl\n'), ((2131, 2169), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', (['(3)'], {'as_cmap': '(True)'}), '(3, as_cmap=True)\n', (2152, 2169), True, 'import seaborn as sns\n'), ((2302, 2333), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Items"""'], {'fontsize': 'fs'}), "('Items', fontsize=fs)\n", (2311, 2333), True, 'import matplotlib.pyplot as pl\n'), ((2338, 2369), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Users"""'], {'fontsize': 'fs'}), "('Users', fontsize=fs)\n", (2347, 2369), True, 'import matplotlib.pyplot as pl\n'), ((2626, 2645), 'numpy.sum', 'np.sum', (['urm'], {'axis': '(1)'}), '(urm, axis=1)\n', (2632, 2645), True, 'import numpy as np\n'), ((2664, 2683), 'numpy.sum', 'np.sum', (['urm'], {'axis': '(0)'}), '(urm, axis=0)\n', (2670, 2683), True, 'import numpy as np\n'), ((2775, 2806), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(2 * fs, fs)'}), '(figsize=(2 * fs, fs))\n', (2784, 2806), True, 'import matplotlib.pyplot as pl\n'), ((2811, 2826), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(121)'], {}), '(121)\n', (2821, 2826), True, 'import matplotlib.pyplot as pl\n'), ((2831, 2919), 'matplotlib.pyplot.hist', 'pl.hist', (['user_counts'], {'bins': 'bins', 'alpha': 'a', 'normed': '(True)', 'log': 'log', 'label': '"""User counts"""'}), "(user_counts, bins=bins, alpha=a, normed=True, log=log, label=\n 'User counts')\n", (2838, 2919), True, 'import matplotlib.pyplot as pl\n'), ((2931, 2976), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Number of ratings"""'], {'fontsize': 'font'}), "('Number of ratings', fontsize=font)\n", (2940, 2976), True, 'import matplotlib.pyplot as pl\n'), ((2981, 3013), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['ylabel'], {'fontsize': 'font'}), '(ylabel, fontsize=font)\n', (2990, 3013), True, 'import matplotlib.pyplot as pl\n'), ((3018, 3042), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'fontsize': 'font'}), '(fontsize=font)\n', (3027, 3042), True, 'import matplotlib.pyplot as pl\n'), ((3047, 3062), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(122)'], {}), '(122)\n', (3057, 3062), True, 'import matplotlib.pyplot as pl\n'), ((3067, 3155), 'matplotlib.pyplot.hist', 'pl.hist', (['item_counts'], {'bins': 'bins', 'alpha': 'a', 'normed': '(True)', 'log': 'log', 'label': '"""Item counts"""'}), "(item_counts, bins=bins, alpha=a, normed=True, log=log, label=\n 'Item counts')\n", (3074, 3155), True, 'import matplotlib.pyplot as pl\n'), ((3167, 3212), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Number of ratings"""'], {'fontsize': 'font'}), "('Number of ratings', fontsize=font)\n", (3176, 3212), True, 'import matplotlib.pyplot as pl\n'), ((3217, 3249), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['ylabel'], {'fontsize': 'font'}), '(ylabel, fontsize=font)\n', (3226, 3249), True, 'import matplotlib.pyplot as pl\n'), ((3254, 3278), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'fontsize': 'font'}), '(fontsize=font)\n', (3263, 3278), True, 'import matplotlib.pyplot as pl\n'), ((3503, 3524), 'numpy.zeros', 'np.zeros', (['se.shape[0]'], {}), '(se.shape[0])\n', (3511, 3524), True, 'import numpy as np\n'), ((3635, 3651), 'numpy.mean', 'np.mean', (['se_glob'], {}), '(se_glob)\n', (3642, 3651), True, 'import numpy as np\n'), ((3667, 3688), 'numpy.zeros', 'np.zeros', (['se.shape[1]'], {}), '(se.shape[1])\n', (3675, 3688), True, 'import numpy as np\n'), ((3805, 3821), 'numpy.mean', 'np.mean', (['se_glob'], {}), '(se_glob)\n', (3812, 3821), True, 'import numpy as np\n'), ((4192, 4219), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(fs, fs)'}), '(figsize=(fs, fs))\n', (4201, 4219), True, 'import matplotlib.pyplot as pl\n'), ((4224, 4310), 'matplotlib.pyplot.hist', 'pl.hist', (['se_glob'], {'bins': 'bins', 'alpha': '(a * 2)', 'normed': '(True)', 'log': 'log', 'label': '"""Sq. Error"""'}), "(se_glob, bins=bins, alpha=a * 2, normed=True, log=log, label=\n 'Sq. Error')\n", (4231, 4310), True, 'import matplotlib.pyplot as pl\n'), ((4320, 4410), 'matplotlib.pyplot.hist', 'pl.hist', (['se_user'], {'bins': 'bins', 'alpha': 'a', 'normed': '(True)', 'log': 'log', 'label': '"""Sq. Error by user"""'}), "(se_user, bins=bins, alpha=a, normed=True, log=log, label=\n 'Sq. Error by user')\n", (4327, 4410), True, 'import matplotlib.pyplot as pl\n'), ((4422, 4512), 'matplotlib.pyplot.hist', 'pl.hist', (['se_item'], {'bins': 'bins', 'alpha': 'a', 'normed': '(True)', 'log': 'log', 'label': '"""Sq. Error by item"""'}), "(se_item, bins=bins, alpha=a, normed=True, log=log, label=\n 'Sq. Error by item')\n", (4429, 4512), True, 'import matplotlib.pyplot as pl\n'), ((4524, 4565), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Squared Error"""'], {'fontsize': 'font'}), "('Squared Error', fontsize=font)\n", (4533, 4565), True, 'import matplotlib.pyplot as pl\n'), ((4570, 4602), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['ylabel'], {'fontsize': 'font'}), '(ylabel, fontsize=font)\n', (4579, 4602), True, 'import matplotlib.pyplot as pl\n'), ((4607, 4626), 'matplotlib.pyplot.xlim', 'pl.xlim', (['(0)', 'max_err'], {}), '(0, max_err)\n', (4614, 4626), True, 'import matplotlib.pyplot as pl\n'), ((4631, 4659), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'fontsize': '(font / 2)'}), '(fontsize=font / 2)\n', (4640, 4659), True, 'import matplotlib.pyplot as pl\n'), ((4950, 4973), 'numpy.argsort', 'np.argsort', (['user_counts'], {}), '(user_counts)\n', (4960, 4973), True, 'import numpy as np\n'), ((4989, 5012), 'numpy.argsort', 'np.argsort', (['item_counts'], {}), '(item_counts)\n', (4999, 5012), True, 'import numpy as np\n'), ((5250, 5281), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(2 * fs, fs)'}), '(figsize=(2 * fs, fs))\n', (5259, 5281), True, 'import matplotlib.pyplot as pl\n'), ((5286, 5301), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(121)'], {}), '(121)\n', (5296, 5301), True, 'import matplotlib.pyplot as pl\n'), ((5306, 5429), 'matplotlib.pyplot.hist', 'pl.hist', (['se_user[user_ind[-Nuser:]]'], {'bins': 'bins', 'alpha': '(a / 2)', 'normed': '(True)', 'log': 'log', 'label': '"""Sq. Error by user"""', 'color': '"""g"""'}), "(se_user[user_ind[-Nuser:]], bins=bins, alpha=a / 2, normed=True,\n log=log, label='Sq. Error by user', color='g')\n", (5313, 5429), True, 'import matplotlib.pyplot as pl\n'), ((5440, 5563), 'matplotlib.pyplot.hist', 'pl.hist', (['se_item[item_ind[-Nitem:]]'], {'bins': 'bins', 'alpha': '(a / 2)', 'normed': '(True)', 'log': 'log', 'label': '"""Sq. Error by item"""', 'color': '"""r"""'}), "(se_item[item_ind[-Nitem:]], bins=bins, alpha=a / 2, normed=True,\n log=log, label='Sq. Error by item', color='r')\n", (5447, 5563), True, 'import matplotlib.pyplot as pl\n'), ((5574, 5615), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Squared Error"""'], {'fontsize': 'font'}), "('Squared Error', fontsize=font)\n", (5583, 5615), True, 'import matplotlib.pyplot as pl\n'), ((5620, 5652), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['ylabel'], {'fontsize': 'font'}), '(ylabel, fontsize=font)\n', (5629, 5652), True, 'import matplotlib.pyplot as pl\n'), ((5657, 5676), 'matplotlib.pyplot.xlim', 'pl.xlim', (['(0)', 'max_err'], {}), '(0, max_err)\n', (5664, 5676), True, 'import matplotlib.pyplot as pl\n'), ((5681, 5729), 'matplotlib.pyplot.title', 'pl.title', (["('Top %d%%' % percentile)"], {'fontsize': 'font'}), "('Top %d%%' % percentile, fontsize=font)\n", (5689, 5729), True, 'import matplotlib.pyplot as pl\n'), ((5734, 5762), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'fontsize': '(font / 2)'}), '(fontsize=font / 2)\n', (5743, 5762), True, 'import matplotlib.pyplot as pl\n'), ((5766, 5781), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(122)'], {}), '(122)\n', (5776, 5781), True, 'import matplotlib.pyplot as pl\n'), ((5786, 5909), 'matplotlib.pyplot.hist', 'pl.hist', (['se_user[user_ind[:Nuser]]'], {'bins': 'bins', 'alpha': '(a / 2)', 'normed': '(True)', 'log': 'log', 'label': '"""Sq. Error by user"""', 'color': '"""g"""'}), "(se_user[user_ind[:Nuser]], bins=bins, alpha=a / 2, normed=True, log\n =log, label='Sq. Error by user', color='g')\n", (5793, 5909), True, 'import matplotlib.pyplot as pl\n'), ((5919, 6042), 'matplotlib.pyplot.hist', 'pl.hist', (['se_item[item_ind[:Nitem]]'], {'bins': 'bins', 'alpha': '(a / 2)', 'normed': '(True)', 'log': 'log', 'label': '"""Sq. Error by item"""', 'color': '"""r"""'}), "(se_item[item_ind[:Nitem]], bins=bins, alpha=a / 2, normed=True, log\n =log, label='Sq. Error by item', color='r')\n", (5926, 6042), True, 'import matplotlib.pyplot as pl\n'), ((6052, 6093), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Squared Error"""'], {'fontsize': 'font'}), "('Squared Error', fontsize=font)\n", (6061, 6093), True, 'import matplotlib.pyplot as pl\n'), ((6098, 6130), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['ylabel'], {'fontsize': 'font'}), '(ylabel, fontsize=font)\n', (6107, 6130), True, 'import matplotlib.pyplot as pl\n'), ((6135, 6154), 'matplotlib.pyplot.xlim', 'pl.xlim', (['(0)', 'max_err'], {}), '(0, max_err)\n', (6142, 6154), True, 'import matplotlib.pyplot as pl\n'), ((6159, 6210), 'matplotlib.pyplot.title', 'pl.title', (["('Bottom %d%%' % percentile)"], {'fontsize': 'font'}), "('Bottom %d%%' % percentile, fontsize=font)\n", (6167, 6210), True, 'import matplotlib.pyplot as pl\n'), ((6215, 6243), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'fontsize': '(font / 2)'}), '(fontsize=font / 2)\n', (6224, 6243), True, 'import matplotlib.pyplot as pl\n'), ((6380, 6406), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (6388, 6406), True, 'import numpy as np\n'), ((6419, 6445), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (6427, 6445), True, 'import numpy as np\n'), ((6805, 6831), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (6813, 6831), True, 'import numpy as np\n'), ((6844, 6870), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (6852, 6870), True, 'import numpy as np\n'), ((7370, 7393), 'numpy.argsort', 'np.argsort', (['user_counts'], {}), '(user_counts)\n', (7380, 7393), True, 'import numpy as np\n'), ((7409, 7432), 'numpy.argsort', 'np.argsort', (['item_counts'], {}), '(item_counts)\n', (7419, 7432), True, 'import numpy as np\n'), ((7567, 7592), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (7576, 7592), True, 'import matplotlib.pyplot as pl\n'), ((7620, 7671), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_true[nonzero_ind]', 'y_pred[nonzero_ind]'], {}), '(y_true[nonzero_ind], y_pred[nonzero_ind])\n', (7629, 7671), False, 'from sklearn.metrics import roc_curve\n'), ((7676, 7712), 'matplotlib.pyplot.plot', 'pl.plot', (['fpr', 'tpr'], {'lw': '(2)', 'label': '"""All"""'}), "(fpr, tpr, lw=2, label='All')\n", (7683, 7712), True, 'import matplotlib.pyplot as pl\n'), ((7845, 7862), 'sklearn.metrics.roc_curve', 'roc_curve', (['yt', 'yp'], {}), '(yt, yp)\n', (7854, 7862), False, 'from sklearn.metrics import roc_curve\n'), ((7867, 7936), 'matplotlib.pyplot.plot', 'pl.plot', (['fpr', 'tpr'], {'lw': '(2)', 'color': '"""g"""', 'alpha': 'a', 'label': '"""Bottom 10% Users"""'}), "(fpr, tpr, lw=2, color='g', alpha=a, label='Bottom 10% Users')\n", (7874, 7936), True, 'import matplotlib.pyplot as pl\n'), ((8069, 8086), 'sklearn.metrics.roc_curve', 'roc_curve', (['yt', 'yp'], {}), '(yt, yp)\n', (8078, 8086), False, 'from sklearn.metrics import roc_curve\n'), ((8091, 8160), 'matplotlib.pyplot.plot', 'pl.plot', (['fpr', 'tpr'], {'lw': '(2)', 'color': '"""r"""', 'alpha': 'a', 'label': '"""Bottom 10% Items"""'}), "(fpr, tpr, lw=2, color='r', alpha=a, label='Bottom 10% Items')\n", (8098, 8160), True, 'import matplotlib.pyplot as pl\n'), ((8166, 8196), 'matplotlib.pyplot.plot', 'pl.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (8173, 8196), True, 'import matplotlib.pyplot as pl\n'), ((8201, 8232), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""FPR"""'], {'fontsize': 'font'}), "('FPR', fontsize=font)\n", (8210, 8232), True, 'import matplotlib.pyplot as pl\n'), ((8237, 8268), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""TPR"""'], {'fontsize': 'font'}), "('TPR', fontsize=font)\n", (8246, 8268), True, 'import matplotlib.pyplot as pl\n'), ((8273, 8308), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'loc': '(2)', 'fontsize': '(font / 2)'}), '(loc=2, fontsize=font / 2)\n', (8282, 8308), True, 'import matplotlib.pyplot as pl\n'), ((8407, 8426), 'numpy.unique', 'np.unique', (['user_ids'], {}), '(user_ids)\n', (8416, 8426), True, 'import numpy as np\n'), ((8438, 8469), 'numpy.ones', 'np.ones', (['(unique_users.size, k)'], {}), '((unique_users.size, k))\n', (8445, 8469), True, 'import numpy as np\n'), ((9454, 9474), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9468, 9474), True, 'import numpy as np\n'), ((9527, 9546), 'numpy.unique', 'np.unique', (['user_ids'], {}), '(user_ids)\n', (9536, 9546), True, 'import numpy as np\n'), ((9563, 9590), 'numpy.zeros', 'np.zeros', (['unique_users.size'], {}), '(unique_users.size)\n', (9571, 9590), True, 'import numpy as np\n'), ((9783, 9803), 'numpy.array', 'np.array', (['[]', 'np.int'], {}), '([], np.int)\n', (9791, 9803), True, 'import numpy as np\n'), ((9973, 10022), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'as_cmap': '(True)', 'reverse': '(True)'}), '(as_cmap=True, reverse=True)\n', (9994, 10022), True, 'import seaborn as sns\n'), ((10027, 10062), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(3 * fs, 2 * fs)'}), '(figsize=(3 * fs, 2 * fs))\n', (10036, 10062), True, 'import matplotlib.pyplot as pl\n'), ((10072, 10087), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(231)'], {}), '(231)\n', (10082, 10087), True, 'import matplotlib.pyplot as pl\n'), ((10092, 10140), 'matplotlib.pyplot.imshow', 'pl.imshow', (['a'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(a, interpolation='nearest', cmap=cmap)\n", (10101, 10140), True, 'import matplotlib.pyplot as pl\n'), ((10645, 10659), 'matplotlib.pyplot.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (10652, 10659), True, 'import matplotlib.pyplot as pl\n'), ((10669, 10684), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(234)'], {}), '(234)\n', (10679, 10684), True, 'import matplotlib.pyplot as pl\n'), ((10689, 10737), 'matplotlib.pyplot.imshow', 'pl.imshow', (['b'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(b, interpolation='nearest', cmap=cmap)\n", (10698, 10737), True, 'import matplotlib.pyplot as pl\n'), ((10742, 10756), 'matplotlib.pyplot.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (10749, 10756), True, 'import matplotlib.pyplot as pl\n'), ((11352, 11372), 'numpy.array', 'np.array', (['[]', 'np.int'], {}), '([], np.int)\n', (11360, 11372), True, 'import numpy as np\n'), ((11542, 11591), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'as_cmap': '(True)', 'reverse': '(True)'}), '(as_cmap=True, reverse=True)\n', (11563, 11591), True, 'import seaborn as sns\n'), ((11601, 11616), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(232)'], {}), '(232)\n', (11611, 11616), True, 'import matplotlib.pyplot as pl\n'), ((11621, 11669), 'matplotlib.pyplot.imshow', 'pl.imshow', (['a'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(a, interpolation='nearest', cmap=cmap)\n", (11630, 11669), True, 'import matplotlib.pyplot as pl\n'), ((11674, 11688), 'matplotlib.pyplot.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (11681, 11688), True, 'import matplotlib.pyplot as pl\n'), ((11853, 11868), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(235)'], {}), '(235)\n', (11863, 11868), True, 'import matplotlib.pyplot as pl\n'), ((11873, 11921), 'matplotlib.pyplot.imshow', 'pl.imshow', (['b'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(b, interpolation='nearest', cmap=cmap)\n", (11882, 11921), True, 'import matplotlib.pyplot as pl\n'), ((11926, 11940), 'matplotlib.pyplot.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (11933, 11940), True, 'import matplotlib.pyplot as pl\n'), ((12168, 12188), 'numpy.array', 'np.array', (['[]', 'np.int'], {}), '([], np.int)\n', (12176, 12188), True, 'import numpy as np\n'), ((12355, 12370), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(233)'], {}), '(233)\n', (12365, 12370), True, 'import matplotlib.pyplot as pl\n'), ((12375, 12423), 'matplotlib.pyplot.imshow', 'pl.imshow', (['a'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(a, interpolation='nearest', cmap=cmap)\n", (12384, 12423), True, 'import matplotlib.pyplot as pl\n'), ((12581, 12595), 'matplotlib.pyplot.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (12588, 12595), True, 'import matplotlib.pyplot as pl\n'), ((12605, 12620), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(236)'], {}), '(236)\n', (12615, 12620), True, 'import matplotlib.pyplot as pl\n'), ((12625, 12673), 'matplotlib.pyplot.imshow', 'pl.imshow', (['b'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(b, interpolation='nearest', cmap=cmap)\n", (12634, 12673), True, 'import matplotlib.pyplot as pl\n'), ((12678, 12692), 'matplotlib.pyplot.axis', 'pl.axis', (['"""off"""'], {}), "('off')\n", (12685, 12692), True, 'import matplotlib.pyplot as pl\n'), ((12854, 12880), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {'w_pad': '(0.1)'}), '(w_pad=0.1)\n', (12869, 12880), True, 'import matplotlib.pyplot as pl\n'), ((385, 403), 'numpy.zeros', 'np.zeros', (['tot_size'], {}), '(tot_size)\n', (393, 403), True, 'import numpy as np\n'), ((425, 456), 'numpy.random.permutation', 'np.random.permutation', (['tot_size'], {}), '(tot_size)\n', (446, 456), True, 'import numpy as np\n'), ((812, 821), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (819, 821), True, 'import matplotlib.pyplot as pl\n'), ((830, 840), 'matplotlib.pyplot.close', 'pl.close', ([], {}), '()\n', (838, 840), True, 'import matplotlib.pyplot as pl\n'), ((1083, 1108), 'numpy.where', 'np.where', (['(uim == fill_num)'], {}), '(uim == fill_num)\n', (1091, 1108), True, 'import numpy as np\n'), ((1186, 1210), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (1207, 1210), True, 'import numpy as np\n'), ((1567, 1576), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (1574, 1576), True, 'import matplotlib.pyplot as pl\n'), ((1585, 1595), 'matplotlib.pyplot.close', 'pl.close', ([], {}), '()\n', (1593, 1595), True, 'import matplotlib.pyplot as pl\n'), ((1803, 1828), 'numpy.where', 'np.where', (['(uim == fill_num)'], {}), '(uim == fill_num)\n', (1811, 1828), True, 'import numpy as np\n'), ((1877, 1895), 'numpy.round', 'np.round', (['(frac * N)'], {}), '(frac * N)\n', (1885, 1895), True, 'import numpy as np\n'), ((1917, 1943), 'numpy.round', 'np.round', (['(frac * N_holdout)'], {}), '(frac * N_holdout)\n', (1925, 1943), True, 'import numpy as np\n'), ((2391, 2400), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (2398, 2400), True, 'import matplotlib.pyplot as pl\n'), ((2409, 2419), 'matplotlib.pyplot.close', 'pl.close', ([], {}), '()\n', (2417, 2419), True, 'import matplotlib.pyplot as pl\n'), ((3579, 3601), 'numpy.mean', 'np.mean', (['se[i][ind[i]]'], {}), '(se[i][ind[i]])\n', (3586, 3601), True, 'import numpy as np\n'), ((3614, 3631), 'numpy.isnan', 'np.isnan', (['se_user'], {}), '(se_user)\n', (3622, 3631), True, 'import numpy as np\n'), ((3743, 3771), 'numpy.mean', 'np.mean', (['se[:, i][ind[:, i]]'], {}), '(se[:, i][ind[:, i]])\n', (3750, 3771), True, 'import numpy as np\n'), ((3784, 3801), 'numpy.isnan', 'np.isnan', (['se_item'], {}), '(se_item)\n', (3792, 3801), True, 'import numpy as np\n'), ((5032, 5075), 'numpy.round', 'np.round', (['(0.01 * percentile * user_ind.size)'], {}), '(0.01 * percentile * user_ind.size)\n', (5040, 5075), True, 'import numpy as np\n'), ((5096, 5139), 'numpy.round', 'np.round', (['(0.01 * percentile * item_ind.size)'], {}), '(0.01 * percentile * item_ind.size)\n', (5104, 5139), True, 'import numpy as np\n'), ((7452, 7495), 'numpy.round', 'np.round', (['(0.01 * percentile * user_ind.size)'], {}), '(0.01 * percentile * user_ind.size)\n', (7460, 7495), True, 'import numpy as np\n'), ((7516, 7559), 'numpy.round', 'np.round', (['(0.01 * percentile * item_ind.size)'], {}), '(0.01 * percentile * item_ind.size)\n', (7524, 7559), True, 'import numpy as np\n'), ((9132, 9159), 'numpy.max', 'np.max', (['actual_dcgs'], {'axis': '(1)'}), '(actual_dcgs, axis=1)\n', (9138, 9159), True, 'import numpy as np\n'), ((9186, 9211), 'numpy.max', 'np.max', (['best_dcgs'], {'axis': '(1)'}), '(best_dcgs, axis=1)\n', (9192, 9211), True, 'import numpy as np\n'), ((9653, 9692), 'numpy.std', 'np.std', (['se[user_ids == unique_users[i]]'], {}), '(se[user_ids == unique_users[i]])\n', (9659, 9692), True, 'import numpy as np\n'), ((2010, 2042), 'numpy.random.permutation', 'np.random.permutation', (['N_holdout'], {}), '(N_holdout)\n', (2031, 2042), True, 'import numpy as np\n'), ((6549, 6584), 'numpy.append', 'np.append', (['users', 'nonzero_ind[0][i]'], {}), '(users, nonzero_ind[0][i])\n', (6558, 6584), True, 'import numpy as np\n'), ((6605, 6640), 'numpy.append', 'np.append', (['items', 'nonzero_ind[1][i]'], {}), '(items, nonzero_ind[1][i])\n', (6614, 6640), True, 'import numpy as np\n'), ((6974, 7009), 'numpy.append', 'np.append', (['users', 'nonzero_ind[0][i]'], {}), '(users, nonzero_ind[0][i])\n', (6983, 7009), True, 'import numpy as np\n'), ((7030, 7065), 'numpy.append', 'np.append', (['items', 'nonzero_ind[1][i]'], {}), '(items, nonzero_ind[1][i])\n', (7039, 7065), True, 'import numpy as np\n'), ((8647, 8669), 'numpy.argsort', 'np.argsort', (['(-user_pred)'], {}), '(-user_pred)\n', (8657, 8669), True, 'import numpy as np\n'), ((9727, 9767), 'numpy.random.permutation', 'np.random.permutation', (['unique_users.size'], {}), '(unique_users.size)\n', (9748, 9767), True, 'import numpy as np\n'), ((11296, 11336), 'numpy.random.permutation', 'np.random.permutation', (['unique_users.size'], {}), '(unique_users.size)\n', (11317, 11336), True, 'import numpy as np\n'), ((12131, 12152), 'numpy.argsort', 'np.argsort', (['user_mses'], {}), '(user_mses)\n', (12141, 12152), True, 'import numpy as np\n'), ((8790, 8821), 'numpy.arange', 'np.arange', (['user_relevances.size'], {}), '(user_relevances.size)\n', (8799, 8821), True, 'import numpy as np\n'), ((9856, 9894), 'numpy.where', 'np.where', (['(user_ids == rand_user_ids[i])'], {}), '(user_ids == rand_user_ids[i])\n', (9864, 9894), True, 'import numpy as np\n'), ((11425, 11463), 'numpy.where', 'np.where', (['(user_ids == rand_user_ids[i])'], {}), '(user_ids == rand_user_ids[i])\n', (11433, 11463), True, 'import numpy as np\n'), ((12241, 12279), 'numpy.where', 'np.where', (['(user_ids == best_user_ids[i])'], {}), '(user_ids == best_user_ids[i])\n', (12249, 12279), True, 'import numpy as np\n')] |
# Various functions and methods for preprocessing/metric measurement/plotting etc...
import numpy as np
import matplotlib.pyplot as plt
########################################################################################################################
# Metrics ##############################################################################################################
########################################################################################################################
def f1(pr, tr, class_num):
"""
Calculates F1 score for a given class
:param pr: list of predicted values
:param tr: list of actual values
:param class_num: indicates class
:return: f1 score of class_num for predicted and true values in pr, tr
"""
# Filter lists by class
pred = [x == class_num for x in pr]
truth = [x == class_num for x in tr]
mix = list(zip(pred, truth))
# Find true positives, false positives and false negatives
tp = mix.count((True, True))
fp = mix.count((False, True))
fn = mix.count((True, False))
# Return f1 score, if conditions are met
if tp == 0 and fn == 0:
recall = 0
else:
recall = tp / (tp + fn)
if tp == 0 and fp == 0:
precision = 0
else:
precision = tp / (tp + fp)
if recall == 0 and precision == 0:
return 0
else:
return 2 * recall * precision / (recall + precision)
def macro_f1(predictions, truth):
"""
Calculates macro f1 score, where all classes have the same weight
:param predictions: logits of model predictions
:param truth: list of actual values
:return: macro f1 between model predictions and actual values
"""
flatten_pred = np.argmax(predictions, axis=1).flatten()
labels_flat = truth.flatten()
f1_0 = f1(flatten_pred, labels_flat, 0)
f1_1 = f1(flatten_pred, labels_flat, 1)
f1_2 = f1(flatten_pred, labels_flat, 2)
if np.sum([x == 1 for x in labels_flat]) == 0:
return (f1_0 + f1_2) / 2
else:
return (f1_0 + f1_1 + f1_2) / 3
def weighted_f1(predictions, truth):
"""
Calculates weighted f1 score, where all classes have different weights based on appearance
:param predictions: logits of model predictions
:param truth: list of actual values
:return: weighted f1 between model predictions and actual values
"""
flatten_pred = np.argmax(predictions, axis=1).flatten()
labels_flat = truth.flatten()
weight_0 = np.sum([x == 0 for x in truth])
weight_1 = np.sum([x == 1 for x in truth])
weight_2 = np.sum([x == 2 for x in truth])
f1_0 = f1(flatten_pred, labels_flat, 0)
f1_1 = f1(flatten_pred, labels_flat, 1)
f1_2 = f1(flatten_pred, labels_flat, 2)
return (weight_0 * f1_0 + weight_1 * f1_1 + weight_2 * f1_2) / len(truth)
def accuracy(predictions, truth):
"""
Calculates flat accuracy
:param predictions:
:param truth:
:return: accuracy
"""
flatten_pred = np.argmax(predictions, axis=1).flatten()
labels_flat = truth.flatten()
return np.sum(flatten_pred == labels_flat) / len(truth)
########################################################################################################################
# Plotting #############################################################################################################
########################################################################################################################
def plot(path, location, metric_names):
"""
Creates an image that displays metrics etc
:param path: data location
:param location: image name
:param axis_name: list of metric names
"""
y = np.load(path)
metric = len(y.shape)
if metric == 1 and metric_names[0] == 'Train Loss':
plotting_list = []
x = y.shape[0] / 8
for i in range(8):
plotting_list.append(np.sum(y[int(i*x):int((i+1)*x)]))
plt.plot(plotting_list)
plt.xlabel("Epoch")
plt.ylabel(metric_names[0])
elif metric > 1:
for i in range(y.shape[1]):
plt.plot(y[:, i], label=metric_names[i])
plt.legend(loc='lower right')
plt.savefig(location)
return 0
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.sum",
"numpy.load",
"matplotlib.pyplot.legend"
] | [((2501, 2534), 'numpy.sum', 'np.sum', (['[(x == 0) for x in truth]'], {}), '([(x == 0) for x in truth])\n', (2507, 2534), True, 'import numpy as np\n'), ((2548, 2581), 'numpy.sum', 'np.sum', (['[(x == 1) for x in truth]'], {}), '([(x == 1) for x in truth])\n', (2554, 2581), True, 'import numpy as np\n'), ((2595, 2628), 'numpy.sum', 'np.sum', (['[(x == 2) for x in truth]'], {}), '([(x == 2) for x in truth])\n', (2601, 2628), True, 'import numpy as np\n'), ((3718, 3731), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (3725, 3731), True, 'import numpy as np\n'), ((4211, 4232), 'matplotlib.pyplot.savefig', 'plt.savefig', (['location'], {}), '(location)\n', (4222, 4232), True, 'import matplotlib.pyplot as plt\n'), ((1954, 1993), 'numpy.sum', 'np.sum', (['[(x == 1) for x in labels_flat]'], {}), '([(x == 1) for x in labels_flat])\n', (1960, 1993), True, 'import numpy as np\n'), ((3087, 3122), 'numpy.sum', 'np.sum', (['(flatten_pred == labels_flat)'], {}), '(flatten_pred == labels_flat)\n', (3093, 3122), True, 'import numpy as np\n'), ((3971, 3994), 'matplotlib.pyplot.plot', 'plt.plot', (['plotting_list'], {}), '(plotting_list)\n', (3979, 3994), True, 'import matplotlib.pyplot as plt\n'), ((4003, 4022), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4013, 4022), True, 'import matplotlib.pyplot as plt\n'), ((4031, 4058), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['metric_names[0]'], {}), '(metric_names[0])\n', (4041, 4058), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1770), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (1749, 1770), True, 'import numpy as np\n'), ((2411, 2441), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (2420, 2441), True, 'import numpy as np\n'), ((3001, 3031), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (3010, 3031), True, 'import numpy as np\n'), ((4177, 4206), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4187, 4206), True, 'import matplotlib.pyplot as plt\n'), ((4128, 4168), 'matplotlib.pyplot.plot', 'plt.plot', (['y[:, i]'], {'label': 'metric_names[i]'}), '(y[:, i], label=metric_names[i])\n', (4136, 4168), True, 'import matplotlib.pyplot as plt\n')] |
import matplotlib as mpl
import os
import numpy as np
def figsize(scale):
fig_width_pt = 510.
inches_per_pt = 1.0/72.27
golden_mean = (np.sqrt(5.0)-1.0)/2.
fig_width = fig_width_pt * inches_per_pt * scale
fig_height = fig_width_pt * inches_per_pt * golden_mean * 0.5
fig_size = [fig_width, fig_height]
return fig_size
mpl.use('pgf')
pgf_with_custom_preamble = {
"font.family": "sansserif",
"font.serif": ['Computer Modern Roman'],
"font.size": 5,
"figure.figsize": figsize(0.25),
"text.usetex": True,
"axes.linewidth": 0.1,
"pgf.rcfonts": False,
"pgf.preamble": [
"\\usepackage[cm]{sfmath}",
"\\usepackage{units}",
]
}
mpl.rcParams.update(pgf_with_custom_preamble)
import matplotlib.pyplot as plt
# define arrays to store mean and standard deviation of Bell observable for different r
Bell_plot = []
Bell_plot_std = []
# loop over all r values, pW is needed for the file names
pW = ['0', '01', '02', '03', '04', '05', '06', '07', '08', '09', '1']
for q in pW:
# load data
p = np.load('../Werner_State_Data/learning_nvis4_nhid20_25reps_{}_p.txt'.format(q))[:, -200:]
# array to append Bell observables
Bell = []
for i in range(np.shape(p)[1]):
# evaluate correlation in x- and z-basis, which corresponds to Theta=pi/4
corrx = (8. * p[5,i] - 4. * (p[6,i] + p[7,i] + p[9,i] + p[13,i]) + 2. * (p[10,i] + p[11,i] + p[14,i] + p[15,i]))
corrz = (9. * p[0,i] - 3. * (p[1,i] + p[2,i] + p[3,i] + p[4,i] + p[8,i] + p[12,i]) + p[5,i] + p[6,i] + p[7,i] + p[9,i] + p[10,i] + p[11,i] + p[13,i] + p[14,i] + p[15,i])
# append Bell observable
Bell = np.append(Bell, np.sqrt(2.) * (corrx + corrz))
Bell_plot = np.append(Bell_plot, np.mean(Bell))
Bell_plot_std = np.append(Bell_plot_std, np.std(Bell))
######## Set up figure ##########
plt.plot(np.arange(0, 1.1, 0.1), np.sqrt(2.) * np.arange(2., -0.1, -0.2), linewidth=0.7, color='k')
x = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0]
# define arrays for blue data points and plot them
Bell2 = np.array([Bell_plot[0], Bell_plot[1], Bell_plot[2], Bell_plot[4], Bell_plot[5], Bell_plot[6], Bell_plot[7], Bell_plot[8], Bell_plot[9]])
Bell2_std = np.array([Bell_plot_std[0], Bell_plot_std[1], Bell_plot_std[2], Bell_plot_std[4], Bell_plot_std[5], Bell_plot_std[6], Bell_plot_std[7], Bell_plot_std[8], Bell_plot_std[9]])
l1 = plt.errorbar(x, Bell2[::-1], yerr=Bell2_std[::-1], marker='o', markerfacecolor='none', markeredgecolor='C0', markersize=2., linestyle='None', linewidth=0.7, markeredgewidth=0.5, color='C0')
l1_1 = plt.plot(x, Bell2[::-1], 'o', markerfacecolor='C0', markeredgecolor='none', markersize=2., linewidth=0.3, alpha=0.5)
# plot red data point at r=1
plt.errorbar(0, Bell_plot[-1], yerr=Bell_plot_std[-1], marker='o', markerfacecolor='none', markeredgecolor='C3', markersize=2., linestyle='None', linewidth=0.7, markeredgewidth=0.5, color='C3')
plt.plot(0, Bell_plot[-1], 'o', markerfacecolor='C3', markeredgecolor='none', markersize=2., linewidth=0.3, alpha=0.5)
# plot green data point at r=0.3
plt.errorbar(0.7, Bell_plot[3], yerr=Bell_plot_std[3], marker='o', markerfacecolor='none', markeredgecolor='C2', markersize=2., linestyle='None', linewidth=0.7, markeredgewidth=0.5, color='C2')
plt.plot(0.7, Bell_plot[3], 'o', markerfacecolor='C2', markeredgecolor='none', markersize=2., linewidth=0.3, alpha=0.5)
plt.hlines(y = 2, xmin = -0.1, xmax=1.1, linestyle='dashed', linewidth=0.5, color='gray')
plt.xlabel(r'$1-p$', labelpad=0.7)
plt.ylabel(r'$\mathcal{B}\left(\pi/4\right)$', labelpad=0.7)
plt.xticks([0,0.2,0.4,0.6,0.8,1])
plt.xlim(-0.05, 1.05)
plt.tick_params(axis="x", direction="in", length=1.5, width=0.3, labelsize=5, pad=1.)
plt.tick_params(axis="y", direction="in", length=1.5, width=0.3, labelsize=5, pad=1.)
plt.axvspan(xmin=2./3., xmax=1.05, facecolor='xkcd:apple green', alpha=0.15)
plt.axvspan(xmin=-0.05, xmax=2./3., facecolor='xkcd:chestnut', alpha=0.15)
plt.savefig('Fig2_c.pdf', bbox_inches='tight')
| [
"numpy.mean",
"numpy.shape",
"matplotlib.pyplot.axvspan",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"matplotlib.rcParams.update",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.tick_params",
... | [((349, 363), 'matplotlib.use', 'mpl.use', (['"""pgf"""'], {}), "('pgf')\n", (356, 363), True, 'import matplotlib as mpl\n'), ((758, 803), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['pgf_with_custom_preamble'], {}), '(pgf_with_custom_preamble)\n', (777, 803), True, 'import matplotlib as mpl\n'), ((2153, 2294), 'numpy.array', 'np.array', (['[Bell_plot[0], Bell_plot[1], Bell_plot[2], Bell_plot[4], Bell_plot[5],\n Bell_plot[6], Bell_plot[7], Bell_plot[8], Bell_plot[9]]'], {}), '([Bell_plot[0], Bell_plot[1], Bell_plot[2], Bell_plot[4], Bell_plot\n [5], Bell_plot[6], Bell_plot[7], Bell_plot[8], Bell_plot[9]])\n', (2161, 2294), True, 'import numpy as np\n'), ((2302, 2482), 'numpy.array', 'np.array', (['[Bell_plot_std[0], Bell_plot_std[1], Bell_plot_std[2], Bell_plot_std[4],\n Bell_plot_std[5], Bell_plot_std[6], Bell_plot_std[7], Bell_plot_std[8],\n Bell_plot_std[9]]'], {}), '([Bell_plot_std[0], Bell_plot_std[1], Bell_plot_std[2],\n Bell_plot_std[4], Bell_plot_std[5], Bell_plot_std[6], Bell_plot_std[7],\n Bell_plot_std[8], Bell_plot_std[9]])\n', (2310, 2482), True, 'import numpy as np\n'), ((2481, 2680), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x', 'Bell2[::-1]'], {'yerr': 'Bell2_std[::-1]', 'marker': '"""o"""', 'markerfacecolor': '"""none"""', 'markeredgecolor': '"""C0"""', 'markersize': '(2.0)', 'linestyle': '"""None"""', 'linewidth': '(0.7)', 'markeredgewidth': '(0.5)', 'color': '"""C0"""'}), "(x, Bell2[::-1], yerr=Bell2_std[::-1], marker='o',\n markerfacecolor='none', markeredgecolor='C0', markersize=2.0, linestyle\n ='None', linewidth=0.7, markeredgewidth=0.5, color='C0')\n", (2493, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2678, 2799), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'Bell2[::-1]', '"""o"""'], {'markerfacecolor': '"""C0"""', 'markeredgecolor': '"""none"""', 'markersize': '(2.0)', 'linewidth': '(0.3)', 'alpha': '(0.5)'}), "(x, Bell2[::-1], 'o', markerfacecolor='C0', markeredgecolor='none',\n markersize=2.0, linewidth=0.3, alpha=0.5)\n", (2686, 2799), True, 'import matplotlib.pyplot as plt\n'), ((2825, 3028), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(0)', 'Bell_plot[-1]'], {'yerr': 'Bell_plot_std[-1]', 'marker': '"""o"""', 'markerfacecolor': '"""none"""', 'markeredgecolor': '"""C3"""', 'markersize': '(2.0)', 'linestyle': '"""None"""', 'linewidth': '(0.7)', 'markeredgewidth': '(0.5)', 'color': '"""C3"""'}), "(0, Bell_plot[-1], yerr=Bell_plot_std[-1], marker='o',\n markerfacecolor='none', markeredgecolor='C3', markersize=2.0, linestyle\n ='None', linewidth=0.7, markeredgewidth=0.5, color='C3')\n", (2837, 3028), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3143), 'matplotlib.pyplot.plot', 'plt.plot', (['(0)', 'Bell_plot[-1]', '"""o"""'], {'markerfacecolor': '"""C3"""', 'markeredgecolor': '"""none"""', 'markersize': '(2.0)', 'linewidth': '(0.3)', 'alpha': '(0.5)'}), "(0, Bell_plot[-1], 'o', markerfacecolor='C3', markeredgecolor=\n 'none', markersize=2.0, linewidth=0.3, alpha=0.5)\n", (3027, 3143), True, 'import matplotlib.pyplot as plt\n'), ((3172, 3375), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(0.7)', 'Bell_plot[3]'], {'yerr': 'Bell_plot_std[3]', 'marker': '"""o"""', 'markerfacecolor': '"""none"""', 'markeredgecolor': '"""C2"""', 'markersize': '(2.0)', 'linestyle': '"""None"""', 'linewidth': '(0.7)', 'markeredgewidth': '(0.5)', 'color': '"""C2"""'}), "(0.7, Bell_plot[3], yerr=Bell_plot_std[3], marker='o',\n markerfacecolor='none', markeredgecolor='C2', markersize=2.0, linestyle\n ='None', linewidth=0.7, markeredgewidth=0.5, color='C2')\n", (3184, 3375), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3491), 'matplotlib.pyplot.plot', 'plt.plot', (['(0.7)', 'Bell_plot[3]', '"""o"""'], {'markerfacecolor': '"""C2"""', 'markeredgecolor': '"""none"""', 'markersize': '(2.0)', 'linewidth': '(0.3)', 'alpha': '(0.5)'}), "(0.7, Bell_plot[3], 'o', markerfacecolor='C2', markeredgecolor=\n 'none', markersize=2.0, linewidth=0.3, alpha=0.5)\n", (3374, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3487, 3576), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': '(2)', 'xmin': '(-0.1)', 'xmax': '(1.1)', 'linestyle': '"""dashed"""', 'linewidth': '(0.5)', 'color': '"""gray"""'}), "(y=2, xmin=-0.1, xmax=1.1, linestyle='dashed', linewidth=0.5,\n color='gray')\n", (3497, 3576), True, 'import matplotlib.pyplot as plt\n'), ((3578, 3611), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$1-p$"""'], {'labelpad': '(0.7)'}), "('$1-p$', labelpad=0.7)\n", (3588, 3611), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3676), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathcal{B}\\\\left(\\\\pi/4\\\\right)$"""'], {'labelpad': '(0.7)'}), "('$\\\\mathcal{B}\\\\left(\\\\pi/4\\\\right)$', labelpad=0.7)\n", (3623, 3676), True, 'import matplotlib.pyplot as plt\n'), ((3675, 3713), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 0.2, 0.4, 0.6, 0.8, 1]'], {}), '([0, 0.2, 0.4, 0.6, 0.8, 1])\n', (3685, 3713), True, 'import matplotlib.pyplot as plt\n'), ((3710, 3731), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.05)', '(1.05)'], {}), '(-0.05, 1.05)\n', (3718, 3731), True, 'import matplotlib.pyplot as plt\n'), ((3733, 3824), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'direction': '"""in"""', 'length': '(1.5)', 'width': '(0.3)', 'labelsize': '(5)', 'pad': '(1.0)'}), "(axis='x', direction='in', length=1.5, width=0.3, labelsize=\n 5, pad=1.0)\n", (3748, 3824), True, 'import matplotlib.pyplot as plt\n'), ((3819, 3910), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'direction': '"""in"""', 'length': '(1.5)', 'width': '(0.3)', 'labelsize': '(5)', 'pad': '(1.0)'}), "(axis='y', direction='in', length=1.5, width=0.3, labelsize=\n 5, pad=1.0)\n", (3834, 3910), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3991), 'matplotlib.pyplot.axvspan', 'plt.axvspan', ([], {'xmin': '(2.0 / 3.0)', 'xmax': '(1.05)', 'facecolor': '"""xkcd:apple green"""', 'alpha': '(0.15)'}), "(xmin=2.0 / 3.0, xmax=1.05, facecolor='xkcd:apple green', alpha=0.15\n )\n", (3917, 3991), True, 'import matplotlib.pyplot as plt\n'), ((3983, 4061), 'matplotlib.pyplot.axvspan', 'plt.axvspan', ([], {'xmin': '(-0.05)', 'xmax': '(2.0 / 3.0)', 'facecolor': '"""xkcd:chestnut"""', 'alpha': '(0.15)'}), "(xmin=-0.05, xmax=2.0 / 3.0, facecolor='xkcd:chestnut', alpha=0.15)\n", (3994, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4059, 4105), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Fig2_c.pdf"""'], {'bbox_inches': '"""tight"""'}), "('Fig2_c.pdf', bbox_inches='tight')\n", (4070, 4105), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1973), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (1960, 1973), True, 'import numpy as np\n'), ((1833, 1846), 'numpy.mean', 'np.mean', (['Bell'], {}), '(Bell)\n', (1840, 1846), True, 'import numpy as np\n'), ((1893, 1905), 'numpy.std', 'np.std', (['Bell'], {}), '(Bell)\n', (1899, 1905), True, 'import numpy as np\n'), ((1975, 1987), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1982, 1987), True, 'import numpy as np\n'), ((1989, 2015), 'numpy.arange', 'np.arange', (['(2.0)', '(-0.1)', '(-0.2)'], {}), '(2.0, -0.1, -0.2)\n', (1998, 2015), True, 'import numpy as np\n'), ((148, 160), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (155, 160), True, 'import numpy as np\n'), ((1293, 1304), 'numpy.shape', 'np.shape', (['p'], {}), '(p)\n', (1301, 1304), True, 'import numpy as np\n'), ((1756, 1768), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1763, 1768), True, 'import numpy as np\n')] |
###modified based on centernet###
#MIT License
#Copyright (c) 2019 <NAME>
#All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import datetime
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import cv2
import torch
import torch.utils.data as data
import matplotlib.image as mpimg
import random
import math
from PIL import Image
import re
from torch._six import container_abcs, string_classes, int_classes
def get_dataLoader(opt, args):
torch.manual_seed(opt.seed)
Dataset = get_dataset()
train_dataset = Dataset(opt, 'train')
# use ddp sampler
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=args.group_size, rank=args.rank)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.master_batch_size,
shuffle=False,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True,
collate_fn=Multiposebatch,
sampler=train_sampler
)
return train_loader, train_sampler
def Data_anchor_sample(image, anns):
maxSize = 12000
infDistance = 9999999
boxes = []
for ann in anns:
boxes.append([ann['bbox'][0], ann['bbox'][1], ann['bbox'][0] + ann['bbox'][2], ann['bbox'][1] + ann['bbox'][3]])
boxes = np.asarray(boxes, dtype=np.float32)
height, width, _ = image.shape
random_counter = 0
boxArea = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
rand_idx = random.randint(0, len(boxArea) - 1)
rand_Side = boxArea[rand_idx] ** 0.5
#anchors = [16, 32, 48, 64, 96, 128, 256, 512] can get what we want sometime, but unstable
anchors = [16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, 128, 256, 512]
distance = infDistance
anchor_idx = 5
for i, anchor in enumerate(anchors):
if abs(anchor - rand_Side) < distance:
distance = abs(anchor - rand_Side)
anchor_idx = i
target_anchor = random.choice(anchors[0:min(anchor_idx + 1, 11)])#5)])
ratio = float(target_anchor) / rand_Side
ratio = ratio * (2 ** random.uniform(-1, 1))
if int(height * ratio * width * ratio) > maxSize * maxSize:
ratio = (maxSize * maxSize / (height * width)) ** 0.5
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4]
interp_method = random.choice(interp_methods)
image = cv2.resize(image, None, None, fx=ratio, fy=ratio, interpolation=interp_method)
boxes[:, 0] *= ratio
boxes[:, 1] *= ratio
boxes[:, 2] *= ratio
boxes[:, 3] *= ratio
boxes = boxes.tolist()
for i in range(len(anns)):
anns[i]['bbox'] = [boxes[i][0], boxes[i][1], boxes[i][2] - boxes[i][0], boxes[i][3] - boxes[i][1]]
for j in range(5):
anns[i]['keypoints'][j * 3] *= ratio # [0, 3, 6, ...]
anns[i]['keypoints'][j * 3 + 1] *= ratio # [1, 4, 7, ...]
return image, anns
def get_border(border, size):
i = 1
while size - border // i <= border // i: # size > 2 * (border // i)
i *= 2
return border // i
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad) # (0, 1)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
# affine_transform(bbox[:2], trans_output)
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3, ))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
def coco_box_to_bbox(box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)
return bbox
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def get_dataset():
class Dataset(FACEHP, MultiPoseDataset):
pass
return Dataset
class FACEHP(data.Dataset):
num_classes = 1
num_joints = 5
default_resolution = [800, 800]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
flip_idx = [[0, 1], [3, 4]]
def __init__(self, opt, split):
super(FACEHP, self).__init__()
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[6, 12], [5, 11], [11, 12],
[12, 14], [14, 16], [11, 13], [13, 15]]
self.acc_idxs = [1, 2, 3, 4]
self.data_dir = opt.data_dir
self.img_dir = os.path.join(self.data_dir, 'images')
_ann_name = {'train': 'train', 'val': 'val'}
self.img_dir = opt.img_dir
self.annot_path = opt.annot_path
self.max_objs = 64
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
print('==> initializing centerface key point {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
image_ids = self.coco.getImgIds()
if split == 'train':
self.images = []
for img_id in image_ids:
idxs = self.coco.getAnnIds(imgIds=[img_id])
if len(idxs) > 0:
self.images.append(img_id)
else:
self.images = image_ids
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples)) # Loaded train 12671 samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def __len__(self):
return self.num_samples
np_str_obj_array_pattern = re.compile(r'[SaUO]')
#cv2.setNumThreads(0) # need in some system to speed training
class MultiPoseDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i: # size > 2 * (border // i)
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = len(anns)
if num_objs > self.max_objs:
num_objs = self.max_objs
anns = np.random.choice(anns, num_objs)
img = cv2.imread(img_path)
img, anns = Data_anchor_sample(img, anns)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.0, 0.05)) # for 512 * 512
#s = s * np.random.choice(np.arange(0.8, 1.3, 0.05)) #for 768 * 768
# s = s * np.random.choice(np.arange(0.3, 0.5, 0.1)) # for larger image, but speed down
s = s
_border = s * np.random.choice([0.1, 0.2, 0.25]) #
w_border = self._get_border(_border, img.shape[1]) # w > 2 * w_border
h_border = self._get_border(_border, img.shape[0]) # h > 2 * h_border
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.aug_rot:
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)
if np.random.random() < self.opt.flip: # opt.flip = 0.5
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, rot, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_res, self.opt.input_res),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_joints = self.num_joints
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
dense_kps = np.zeros((num_joints, 2, output_res, output_res),
dtype=np.float32)
dense_kps_mask = np.zeros((num_joints, output_res, output_res),
dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
kps = np.zeros((self.max_objs, num_joints * 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
wight_mask = np.ones((self.max_objs), dtype=np.float32)
kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8)
hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64)
hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64)
draw_gaussian = draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(ann['category_id']) - 1
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3) # (x,y,0/1)
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
pts[:, 0] = width - pts[:, 0] - 1
for e in self.flip_idx:
pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox = np.clip(bbox, 0, output_res - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0) or (rot != 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
#radius = self.opt.hm_gauss if self.opt.mse_loss else max(0, int(radius))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
# wh[k] = 1. * w, 1. * h
wh[k] = np.log(1. * w / 4), np.log(1. * h / 4)
ind[k] = ct_int[1] * output_res + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
# if w*h <= 20: # can get what we want sometime, but unstable
# wight_mask[k] = 15
if w*h <= 40:
wight_mask[k] = 5
if w*h <= 20:
wight_mask[k] = 10
if w*h <= 10:
wight_mask[k] = 15
if w*h <= 4:
wight_mask[k] = 0.1
num_kpts = pts[:, 2].sum()
if num_kpts == 0:
hm[cls_id, ct_int[1], ct_int[0]] = 0.9999
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = max(0, int(hp_radius))
for j in range(num_joints):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot)
if pts[j, 0] >= 0 and pts[j, 0] < output_res and \
pts[j, 1] >= 0 and pts[j, 1] < output_res:
kps[k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
kps[k, j * 2: j * 2 + 1] = kps[k, j * 2: j * 2 + 1] / w
kps[k, j * 2 + 1: j * 2 + 2] = kps[k, j * 2 + 1: j * 2 + 2] / h
kps_mask[k, j * 2: j * 2 + 2] = 1
pt_int = pts[j, :2].astype(np.int32)
hp_offset[k * num_joints + j] = pts[j, :2] - pt_int
hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]
hp_mask[k * num_joints + j] = 1
if self.opt.dense_hp:
# must be before draw center hm gaussian
draw_dense_reg(dense_kps[j], hm[cls_id], ct_int,
pts[j, :2] - ct_int, radius, is_offset=True)
draw_gaussian(dense_kps_mask[j], ct_int, radius)
draw_gaussian(hm_hp[j], pt_int, hp_radius)
if ann['bbox'][2]*ann['bbox'][3] <= 8.0:
kps_mask[k, j * 2: j * 2 + 2] = 0
draw_gaussian(hm[cls_id], ct_int, radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1] +
pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])
if rot != 0:
hm = hm * 0 + 0.9999
reg_mask *= 0
kps_mask *= 0
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
'landmarks': kps, 'hps_mask': kps_mask, 'wight_mask': wight_mask}
if self.opt.dense_hp:
dense_kps = dense_kps.reshape(num_joints * 2, output_res, output_res)
dense_kps_mask = dense_kps_mask.reshape(
num_joints, 1, output_res, output_res)
dense_kps_mask = np.concatenate([dense_kps_mask, dense_kps_mask], axis=1)
dense_kps_mask = dense_kps_mask.reshape(
num_joints * 2, output_res, output_res)
ret.update({'dense_hps': dense_kps, 'dense_hps_mask': dense_kps_mask})
del ret['hps'], ret['hps_mask']
if self.opt.reg_offset:
ret.update({'hm_offset': reg})
if self.opt.hm_hp:
ret.update({'hm_hp': hm_hp})
if self.opt.reg_hp_offset:
ret.update({'hp_offset': hp_offset, 'hp_ind': hp_ind, 'hp_mask': hp_mask})
return ret
_use_shared_memory = False
error_msg_fmt = "batch must contain tensors, numbers, dicts or lists; found {}"
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def default_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(error_msg_fmt.format(elem.dtype))
return default_collate([torch.from_numpy(b) for b in batch])
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(batch[0], int_classes):
return torch.tensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], container_abcs.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], tuple) and hasattr(batch[0], '_fields'): # namedtuple
return type(batch[0])(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(batch[0], container_abcs.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError((error_msg_fmt.format(type(batch[0]))))
def multipose_collate(batch):
objects_dims = [d.shape[0] for d in batch]
index = objects_dims.index(max(objects_dims))
# one_dim = True if len(batch[0].shape) == 1 else False
res = []
for i in range(len(batch)):
tres = np.zeros_like(batch[index], dtype=batch[index].dtype)
tres[:batch[i].shape[0]] = batch[i]
res.append(tres)
return res
def Multiposebatch(batch):
sample_batch = {}
for key in batch[0]:
if key in ['hm', 'input']:
sample_batch[key] = default_collate([d[key] for d in batch])
else:
align_batch = multipose_collate([d[key] for d in batch])
sample_batch[key] = default_collate(align_batch)
return sample_batch
| [
"numpy.clip",
"numpy.sqrt",
"re.compile",
"numpy.log",
"torch.from_numpy",
"numpy.array",
"torch.utils.data.distributed.DistributedSampler",
"numpy.sin",
"numpy.random.RandomState",
"numpy.arange",
"numpy.random.random",
"numpy.asarray",
"pycocotools.coco.COCO",
"numpy.exp",
"numpy.dot",... | [((9777, 9797), 're.compile', 're.compile', (['"""[SaUO]"""'], {}), "('[SaUO]')\n", (9787, 9797), False, 'import re\n'), ((601, 628), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (618, 628), False, 'import torch\n'), ((742, 855), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['train_dataset'], {'num_replicas': 'args.group_size', 'rank': 'args.rank'}), '(train_dataset, num_replicas\n =args.group_size, rank=args.rank)\n', (789, 855), False, 'import torch\n'), ((870, 1082), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'opt.master_batch_size', 'shuffle': '(False)', 'num_workers': 'opt.num_workers', 'pin_memory': '(True)', 'drop_last': '(True)', 'collate_fn': 'Multiposebatch', 'sampler': 'train_sampler'}), '(train_dataset, batch_size=opt.master_batch_size,\n shuffle=False, num_workers=opt.num_workers, pin_memory=True, drop_last=\n True, collate_fn=Multiposebatch, sampler=train_sampler)\n', (897, 1082), False, 'import torch\n'), ((1421, 1456), 'numpy.asarray', 'np.asarray', (['boxes'], {'dtype': 'np.float32'}), '(boxes, dtype=np.float32)\n', (1431, 1456), True, 'import numpy as np\n'), ((2498, 2527), 'random.choice', 'random.choice', (['interp_methods'], {}), '(interp_methods)\n', (2511, 2527), False, 'import random\n'), ((2540, 2618), 'cv2.resize', 'cv2.resize', (['image', 'None', 'None'], {'fx': 'ratio', 'fy': 'ratio', 'interpolation': 'interp_method'}), '(image, None, None, fx=ratio, fy=ratio, interpolation=interp_method)\n', (2550, 2618), False, 'import cv2\n'), ((3760, 3794), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float32'}), '([0, 0], dtype=np.float32)\n', (3768, 3794), True, 'import numpy as np\n'), ((4161, 4200), 'numpy.array', 'np.array', (['[0, dst_w * -0.5]', 'np.float32'], {}), '([0, dst_w * -0.5], np.float32)\n', (4169, 4200), True, 'import numpy as np\n'), ((4212, 4246), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (4220, 4246), True, 'import numpy as np\n'), ((4257, 4291), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (4265, 4291), True, 'import numpy as np\n'), ((4948, 4965), 'numpy.dot', 'np.dot', (['t', 'new_pt'], {}), '(t, new_pt)\n', (4954, 4965), True, 'import numpy as np\n'), ((5022, 5061), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (5034, 5061), False, 'import cv2\n'), ((5190, 5220), 'numpy.dot', 'np.dot', (['eigvec', '(eigval * alpha)'], {}), '(eigvec, eigval * alpha)\n', (5196, 5220), True, 'import numpy as np\n'), ((5846, 5871), 'random.shuffle', 'random.shuffle', (['functions'], {}), '(functions)\n', (5860, 5871), False, 'import random\n'), ((6085, 6163), 'numpy.array', 'np.array', (['[box[0], box[1], box[0] + box[2], box[1] + box[3]]'], {'dtype': 'np.float32'}), '([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)\n', (6093, 6163), True, 'import numpy as np\n'), ((6363, 6393), 'numpy.sqrt', 'np.sqrt', (['(b1 ** 2 - 4 * a1 * c1)'], {}), '(b1 ** 2 - 4 * a1 * c1)\n', (6370, 6393), True, 'import numpy as np\n'), ((6508, 6538), 'numpy.sqrt', 'np.sqrt', (['(b2 ** 2 - 4 * a2 * c2)'], {}), '(b2 ** 2 - 4 * a2 * c2)\n', (6515, 6538), True, 'import numpy as np\n'), ((6682, 6712), 'numpy.sqrt', 'np.sqrt', (['(b3 ** 2 - 4 * a3 * c3)'], {}), '(b3 ** 2 - 4 * a3 * c3)\n', (6689, 6712), True, 'import numpy as np\n'), ((6882, 6928), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2 * sigma * sigma))\n', (6888, 6928), True, 'import numpy as np\n'), ((3288, 3339), 'numpy.array', 'np.array', (['[-direct[1], direct[0]]'], {'dtype': 'np.float32'}), '([-direct[1], direct[0]], dtype=np.float32)\n', (3296, 3339), True, 'import numpy as np\n'), ((3388, 3403), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (3394, 3403), True, 'import numpy as np\n'), ((3405, 3420), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (3411, 3420), True, 'import numpy as np\n'), ((3919, 3961), 'numpy.array', 'np.array', (['[scale, scale]'], {'dtype': 'np.float32'}), '([scale, scale], dtype=np.float32)\n', (3927, 3961), True, 'import numpy as np\n'), ((4447, 4495), 'numpy.array', 'np.array', (['[dst_w * 0.5, dst_h * 0.5]', 'np.float32'], {}), '([dst_w * 0.5, dst_h * 0.5], np.float32)\n', (4455, 4495), True, 'import numpy as np\n'), ((4886, 4933), 'numpy.array', 'np.array', (['[pt[0], pt[1], 1.0]'], {'dtype': 'np.float32'}), '([pt[0], pt[1], 1.0], dtype=np.float32)\n', (4894, 4933), True, 'import numpy as np\n'), ((7592, 7659), 'numpy.maximum', 'np.maximum', (['masked_heatmap', '(masked_gaussian * k)'], {'out': 'masked_heatmap'}), '(masked_heatmap, masked_gaussian * k, out=masked_heatmap)\n', (7602, 7659), True, 'import numpy as np\n'), ((8527, 8564), 'os.path.join', 'os.path.join', (['self.data_dir', '"""images"""'], {}), "(self.data_dir, 'images')\n", (8539, 8564), False, 'import os\n'), ((8729, 8755), 'numpy.random.RandomState', 'np.random.RandomState', (['(123)'], {}), '(123)\n', (8750, 8755), True, 'import numpy as np\n'), ((8776, 8839), 'numpy.array', 'np.array', (['[0.2141788, 0.01817699, 0.00341571]'], {'dtype': 'np.float32'}), '([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)\n', (8784, 8839), True, 'import numpy as np\n'), ((8889, 9040), 'numpy.array', 'np.array', (['[[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -\n 0.81221408], [-0.56089297, 0.71832671, 0.41158938]]'], {'dtype': 'np.float32'}), '([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, \n -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32)\n', (8897, 9040), True, 'import numpy as np\n'), ((9199, 9225), 'pycocotools.coco.COCO', 'coco.COCO', (['self.annot_path'], {}), '(self.annot_path)\n', (9208, 9225), True, 'import pycocotools.coco as coco\n'), ((9947, 10025), 'numpy.array', 'np.array', (['[box[0], box[1], box[0] + box[2], box[1] + box[3]]'], {'dtype': 'np.float32'}), '([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)\n', (9955, 10025), True, 'import numpy as np\n'), ((10368, 10405), 'os.path.join', 'os.path.join', (['self.img_dir', 'file_name'], {}), '(self.img_dir, file_name)\n', (10380, 10405), False, 'import os\n'), ((10650, 10670), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (10660, 10670), False, 'import cv2\n'), ((10774, 10842), 'numpy.array', 'np.array', (['[img.shape[1] / 2.0, img.shape[0] / 2.0]'], {'dtype': 'np.float32'}), '([img.shape[1] / 2.0, img.shape[0] / 2.0], dtype=np.float32)\n', (10782, 10842), True, 'import numpy as np\n'), ((12271, 12373), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'trans_input', '(self.opt.input_res, self.opt.input_res)'], {'flags': 'cv2.INTER_LINEAR'}), '(img, trans_input, (self.opt.input_res, self.opt.input_res),\n flags=cv2.INTER_LINEAR)\n', (12285, 12373), False, 'import cv2\n'), ((12900, 12970), 'numpy.zeros', 'np.zeros', (['(self.num_classes, output_res, output_res)'], {'dtype': 'np.float32'}), '((self.num_classes, output_res, output_res), dtype=np.float32)\n', (12908, 12970), True, 'import numpy as np\n'), ((12983, 13047), 'numpy.zeros', 'np.zeros', (['(num_joints, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_joints, output_res, output_res), dtype=np.float32)\n', (12991, 13047), True, 'import numpy as np\n'), ((13064, 13131), 'numpy.zeros', 'np.zeros', (['(num_joints, 2, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_joints, 2, output_res, output_res), dtype=np.float32)\n', (13072, 13131), True, 'import numpy as np\n'), ((13179, 13243), 'numpy.zeros', 'np.zeros', (['(num_joints, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_joints, output_res, output_res), dtype=np.float32)\n', (13187, 13243), True, 'import numpy as np\n'), ((13284, 13330), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (13292, 13330), True, 'import numpy as np\n'), ((13341, 13400), 'numpy.zeros', 'np.zeros', (['(self.max_objs, num_joints * 2)'], {'dtype': 'np.float32'}), '((self.max_objs, num_joints * 2), dtype=np.float32)\n', (13349, 13400), True, 'import numpy as np\n'), ((13411, 13457), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (13419, 13457), True, 'import numpy as np\n'), ((13468, 13507), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.int64'}), '(self.max_objs, dtype=np.int64)\n', (13476, 13507), True, 'import numpy as np\n'), ((13526, 13565), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.uint8'}), '(self.max_objs, dtype=np.uint8)\n', (13534, 13565), True, 'import numpy as np\n'), ((13585, 13625), 'numpy.ones', 'np.ones', (['self.max_objs'], {'dtype': 'np.float32'}), '(self.max_objs, dtype=np.float32)\n', (13592, 13625), True, 'import numpy as np\n'), ((13643, 13705), 'numpy.zeros', 'np.zeros', (['(self.max_objs, self.num_joints * 2)'], {'dtype': 'np.uint8'}), '((self.max_objs, self.num_joints * 2), dtype=np.uint8)\n', (13651, 13705), True, 'import numpy as np\n'), ((13722, 13781), 'numpy.zeros', 'np.zeros', (['(self.max_objs * num_joints, 2)'], {'dtype': 'np.float32'}), '((self.max_objs * num_joints, 2), dtype=np.float32)\n', (13730, 13781), True, 'import numpy as np\n'), ((13795, 13847), 'numpy.zeros', 'np.zeros', (['(self.max_objs * num_joints)'], {'dtype': 'np.int64'}), '(self.max_objs * num_joints, dtype=np.int64)\n', (13803, 13847), True, 'import numpy as np\n'), ((13864, 13916), 'numpy.zeros', 'np.zeros', (['(self.max_objs * num_joints)'], {'dtype': 'np.int64'}), '(self.max_objs * num_joints, dtype=np.int64)\n', (13872, 13916), True, 'import numpy as np\n'), ((19077, 19107), 'torch.stack', 'torch.stack', (['batch', '(0)'], {'out': 'out'}), '(batch, 0, out=out)\n', (19088, 19107), False, 'import torch\n'), ((20766, 20819), 'numpy.zeros_like', 'np.zeros_like', (['batch[index]'], {'dtype': 'batch[index].dtype'}), '(batch[index], dtype=batch[index].dtype)\n', (20779, 20819), True, 'import numpy as np\n'), ((2215, 2236), 'random.uniform', 'random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (2229, 2236), False, 'import random\n'), ((4665, 4680), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (4675, 4680), True, 'import numpy as np\n'), ((4682, 4697), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (4692, 4697), True, 'import numpy as np\n'), ((4748, 4763), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (4758, 4763), True, 'import numpy as np\n'), ((4765, 4780), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (4775, 4780), True, 'import numpy as np\n'), ((7883, 7947), 'numpy.array', 'np.array', (['[0.40789654, 0.44719302, 0.47026115]'], {'dtype': 'np.float32'}), '([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)\n', (7891, 7947), True, 'import numpy as np\n'), ((7993, 8057), 'numpy.array', 'np.array', (['[0.28863828, 0.27408164, 0.27809835]'], {'dtype': 'np.float32'}), '([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)\n', (8001, 8057), True, 'import numpy as np\n'), ((10606, 10638), 'numpy.random.choice', 'np.random.choice', (['anns', 'num_objs'], {}), '(anns, num_objs)\n', (10622, 10638), True, 'import numpy as np\n'), ((14543, 14575), 'numpy.clip', 'np.clip', (['bbox', '(0)', '(output_res - 1)'], {}), '(bbox, 0, output_res - 1)\n', (14550, 14575), True, 'import numpy as np\n'), ((17638, 17694), 'numpy.concatenate', 'np.concatenate', (['[dense_kps_mask, dense_kps_mask]'], {'axis': '(1)'}), '([dense_kps_mask, dense_kps_mask], axis=1)\n', (17652, 17694), True, 'import numpy as np\n'), ((11479, 11540), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'w_border', 'high': '(img.shape[1] - w_border)'}), '(low=w_border, high=img.shape[1] - w_border)\n', (11496, 11540), True, 'import numpy as np\n'), ((11556, 11617), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'h_border', 'high': '(img.shape[0] - h_border)'}), '(low=h_border, high=img.shape[0] - h_border)\n', (11573, 11617), True, 'import numpy as np\n'), ((11887, 11905), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (11903, 11905), True, 'import numpy as np\n'), ((12022, 12040), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (12038, 12040), True, 'import numpy as np\n'), ((14863, 14941), 'numpy.array', 'np.array', (['[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]'], {'dtype': 'np.float32'}), '([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)\n', (14871, 14941), True, 'import numpy as np\n'), ((19813, 19853), 'torch.tensor', 'torch.tensor', (['batch'], {'dtype': 'torch.float64'}), '(batch, dtype=torch.float64)\n', (19825, 19853), False, 'import torch\n'), ((6939, 6956), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (6947, 6956), True, 'import numpy as np\n'), ((11271, 11305), 'numpy.random.choice', 'np.random.choice', (['[0.1, 0.2, 0.25]'], {}), '([0.1, 0.2, 0.25])\n', (11287, 11305), True, 'import numpy as np\n'), ((14131, 14169), 'numpy.array', 'np.array', (["ann['keypoints']", 'np.float32'], {}), "(ann['keypoints'], np.float32)\n", (14139, 14169), True, 'import numpy as np\n'), ((15039, 15058), 'numpy.log', 'np.log', (['(1.0 * w / 4)'], {}), '(1.0 * w / 4)\n', (15045, 15058), True, 'import numpy as np\n'), ((15059, 15078), 'numpy.log', 'np.log', (['(1.0 * h / 4)'], {}), '(1.0 * h / 4)\n', (15065, 15078), True, 'import numpy as np\n'), ((19913, 19932), 'torch.tensor', 'torch.tensor', (['batch'], {}), '(batch)\n', (19925, 19932), False, 'import torch\n'), ((11020, 11045), 'numpy.arange', 'np.arange', (['(0.6)', '(1.0)', '(0.05)'], {}), '(0.6, 1.0, 0.05)\n', (11029, 11045), True, 'import numpy as np\n'), ((11977, 11994), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (11992, 11994), True, 'import numpy as np\n'), ((14702, 14714), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (14711, 14714), False, 'import math\n'), ((14716, 14728), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (14725, 14728), False, 'import math\n'), ((15649, 15661), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (15658, 15661), False, 'import math\n'), ((15663, 15675), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (15672, 15675), False, 'import math\n'), ((19529, 19548), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (19545, 19548), False, 'import torch\n'), ((11714, 11731), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (11729, 11731), True, 'import numpy as np\n'), ((11777, 11794), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (11792, 11794), True, 'import numpy as np\n'), ((11836, 11853), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (11851, 11853), True, 'import numpy as np\n')] |
import numpy as np
from keras_htr import compute_output_shape
from keras_htr.adapters.base import BatchAdapter
import tensorflow as tf
class CTCAdapter(BatchAdapter):
def compute_input_lengths(self, image_arrays):
batch_size = len(image_arrays)
lstm_input_shapes = [compute_output_shape(a.shape) for a in image_arrays]
widths = [width for width, channels in lstm_input_shapes]
return np.array(widths, dtype=np.int32).reshape(batch_size, 1)
def adapt_batch(self, batch):
image_arrays, labellings = batch
current_batch_size = len(labellings)
target_width = max([a.shape[1] for a in image_arrays])
padded_arrays = self._pad_image_arrays(image_arrays, target_width)
X = np.array(padded_arrays).reshape(current_batch_size, *padded_arrays[0].shape)
target_length = max([len(labels) for labels in labellings])
padded_labellings = self._pad_labellings(labellings, target_length)
labels = np.array(padded_labellings, dtype=np.int32).reshape(current_batch_size, -1)
input_lengths = self.compute_input_lengths(image_arrays)
label_lengths = np.array([len(labelling) for labelling in labellings],
dtype=np.int32).reshape(current_batch_size, 1)
return [X, labels, input_lengths, label_lengths], labels
def adapt_x(self, image):
a = tf.keras.preprocessing.image.img_to_array(image)
x = a / 255.0
X = np.array(x).reshape(1, *x.shape)
input_lengths = self.compute_input_lengths(X)
return X, input_lengths
| [
"numpy.array",
"tensorflow.keras.preprocessing.image.img_to_array",
"keras_htr.compute_output_shape"
] | [((1404, 1452), 'tensorflow.keras.preprocessing.image.img_to_array', 'tf.keras.preprocessing.image.img_to_array', (['image'], {}), '(image)\n', (1445, 1452), True, 'import tensorflow as tf\n'), ((289, 318), 'keras_htr.compute_output_shape', 'compute_output_shape', (['a.shape'], {}), '(a.shape)\n', (309, 318), False, 'from keras_htr import compute_output_shape\n'), ((423, 455), 'numpy.array', 'np.array', (['widths'], {'dtype': 'np.int32'}), '(widths, dtype=np.int32)\n', (431, 455), True, 'import numpy as np\n'), ((753, 776), 'numpy.array', 'np.array', (['padded_arrays'], {}), '(padded_arrays)\n', (761, 776), True, 'import numpy as np\n'), ((993, 1036), 'numpy.array', 'np.array', (['padded_labellings'], {'dtype': 'np.int32'}), '(padded_labellings, dtype=np.int32)\n', (1001, 1036), True, 'import numpy as np\n'), ((1488, 1499), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1496, 1499), True, 'import numpy as np\n')] |
##############################################################
#
# ccm_unred: Deredden a flux vector using the CCM 1989 parameterization
#
# Cardelli_coeff: Calculate a,b and a+b/Rv for the Cardelli dust
# law given a wavelength lam in angstroms
#
# calc_Av_from_Balmer_decrement: derive extinction using Balmer decrement (Ha/Hb)
#
#
##############################################################
def ccm_unred(wave, flux, av, **kwargs):
"""
NAME:
CCM_UNRED
PURPOSE:
Deredden a flux vector using the CCM 1989 parameterization
EXPLANATION:
The reddening curve is that of Cardelli, Clayton, and Mathis (1989 ApJ.
345, 245), including the update for the near-UV given by O'Donnell
(1994, ApJ, 422, 158). Parameterization is valid from the IR to the
far-UV (3.5 microns to 0.1 microns).
Users might wish to consider using the alternate procedure FM_UNRED
which uses the extinction curve of Fitzpatrick (1999).
CALLING SEQUENCE:
ccm_unred(wave, flux, ebv [, R_V = ])
INPUT:
WAVE - wavelength vector (Angstroms)
FLUX - calibrated flux vector, same number of elements as WAVE
If only 3 parameters are supplied, then this vector will
updated on output to contain the dereddened flux.
EBV - color excess E(B-V), scalar. If a negative EBV is supplied,
then fluxes will be reddened rather than deredenned.
OUTPUT:
FUNRED - unreddened flux vector, same units and number of elements
as FLUX
OPTIONAL INPUT KEYWORD
R_V - scalar specifying the ratio of total selective extinction
R(V) = A(V) / E(B - V). If not specified, then R_V = 3.1
Extreme values of R(V) range from 2.75 to 5.3
EXAMPLE:
Determine how a flat spectrum (in wavelength) between 1200 A and 3200 A
is altered by a reddening of E(B-V) = 0.1. Assume an "average"
reddening for the diffuse interstellar medium (R(V) = 3.1)
>>> w = 1200 + arange(40)*50 #Create a wavelength vector
>>> f = w*0 + 1 #Create a "flat" flux vector
>>> fnew = ccm_unred(w, f, -0.1) #Redden (negative E(B-V)) flux vector
>>> plot(w,fnew)
NOTES:
(1) The CCM curve shows good agreement with the Savage & Mathis (1979)
ultraviolet curve shortward of 1400 A, but is probably
preferable between 1200 and 1400 A.
(2) Many sightlines with peculiar ultraviolet interstellar extinction
can be represented with a CCM curve, if the proper value of
R(V) is supplied.
(3) Curve is extrapolated between 912 and 1000 A as suggested by
Longo et al. (1989, ApJ, 339,474)
(4) Use the 4 parameter calling sequence if you wish to save the
original flux vector.
(5) Valencic et al. (2004, ApJ, 616, 912) revise the ultraviolet CCM
curve (3.3 -- 8.0 um-1). But since their revised curve does
not connect smoothly with longer and shorter wavelengths, it is
not included here.
REQUIRED MODULES:
scipy, numpy
REVISION HISTORY:
Written <NAME> Hughes/STX January, 1992
Extrapolate curve for wavelengths between 900 and 1000 A Dec. 1993
Use updated coefficients for near-UV from O'Donnell Feb 1994
Allow 3 parameter calling sequence April 1998
Converted to IDLV5.0 April 1998
Ported to Python <NAME> August 2012
"""
# Import modules
import numpy as n
# Set defaults
R_V = 3.1
for key in kwargs:
if key.lower() == 'r_v':
R_V = kwargs[key]
if isinstance(wave, int) or isinstance(wave, float):
x = 10000. / n.array([wave]) # Convert to inverse microns
else:
x = 10000. / n.array(wave) # Convert to inverse microns
npts = len( x )
a = n.zeros((npts))
b = n.zeros((npts))
###############################
good = n.where( (x > 0.3) & (x < 1.1) ) # Infrared
Ngood = len(x[good])
if Ngood > 0:
a[good] = 0.574 * x[good]**(1.61)
b[good] = -0.527 * x[good]**(1.61)
###############################
good = n.where( (x >= 1.1) & (x < 3.3) ) # Optical/NIR
Ngood = len(good[0])
if Ngood > 0: # Use new constants from O'Donnell (1994)
y = x[good] - 1.82
#c1 = n.array([ 0.32999, -0.77530, 0.01979, 0.72085, # Original
# -0.02427, -0.50447, 0.17699, 1. ]) # coefficients
#c2 = n.array([ -2.09002, 5.30260, -0.62251, -5.38434, # from CCM89
# 1.07233, 2.28305, 1.41338, 0. ])
c1 = n.array([ -0.505 , 1.647, -0.827, -1.718, # New coefficients
1.137, 0.701, -0.609, 0.104, 1. ]) # from O'Donnell
c2 = n.array([ 3.347, -10.805, 5.491, 11.102, # (1994)
-7.985, -3.989, 2.908, 1.952, 0. ])
a[good] = n.polyval(c1, y)
b[good] = n.polyval(c2, y)
###############################
good = n.where( (x >= 3.3) & (x < 8) ) # Mid-UV
Ngood = len(x[good])
if Ngood > 0:
y = x[good]
F_a = n.zeros((Ngood))
F_b = n.zeros((Ngood))
good1 = n.where( (y > 5.9) )
Ngood1 = len(y[good1])
if Ngood1 > 0:
y1 = y[good1] - 5.9
F_a[good1] = -0.04473 * y1**2 - 0.009779 * y1**3
F_b[good1] = 0.2130 * y1**2 + 0.1207 * y1**3
a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a
b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b
###############################
good = n.where( (x >= 8) & (x < 11) ) #Far-UV
Ngood = len(x[good])
if Ngood > 0:
y = x[good] - 8.
c1 = [ -0.070, 0.137, -0.628, -1.073 ]
c2 = [ 0.374, -0.420, 4.257, 13.670 ]
a[good] = n.polyval(c1, y)
b[good] = n.polyval(c2, y)
###############################
# Now apply extinction correction to input flux vector
A_V = av
A_lambda = A_V * (a + b / R_V)
return flux * 10.**(0.4 * A_lambda)
"""
Calculate a,b and a+b/Rv for the Cardelli dust law given a wavelength lam in angstroms
"""
def Cardelli_coeff(lamb,Rv):
import numpy as np
scalar=np.isscalar(lamb)
x=1e4/np.array(lamb,ndmin=1) #CCM x is 1/microns
a,b=np.ndarray(x.shape,x.dtype),np.ndarray(x.shape,x.dtype)
if any((x<0.3)|(10<x)):
raise ValueError('some wavelengths outside CCM 89 extinction curve range')
irs=(0.3 <= x) & (x <= 1.1)
opts = (1.1 <= x) & (x <= 3.3)
nuv1s = (3.3 <= x) & (x <= 5.9)
nuv2s = (5.9 <= x) & (x <= 8)
fuvs = (8 <= x) & (x <= 10)
#CCM Infrared
a[irs]=.574*x[irs]**1.61
b[irs]=-0.527*x[irs]**1.61
#CCM NIR/optical
a[opts]=np.polyval((.32999,-.7753,.01979,.72085,-.02427,-.50447,.17699,1),x[opts]-1.82)
b[opts]=np.polyval((-2.09002,5.3026,-.62251,-5.38434,1.07233,2.28305,1.41338,0),x[opts]-1.82)
#CCM NUV
a[nuv1s]=1.752-.316*x[nuv1s]-0.104/((x[nuv1s]-4.67)**2+.341)
b[nuv1s]=-3.09+1.825*x[nuv1s]+1.206/((x[nuv1s]-4.62)**2+.263)
y=x[nuv2s]-5.9
Fa=-.04473*y**2-.009779*y**3
Fb=-.2130*y**2-.1207*y**3
a[nuv2s]=1.752-.316*x[nuv2s]-0.104/((x[nuv2s]-4.67)**2+.341)+Fa
b[nuv2s]=-3.09+1.825*x[nuv2s]+1.206/((x[nuv2s]-4.62)**2+.263)+Fb
#CCM FUV
a[fuvs]=np.polyval((-.070,.137,-.628,-1.073),x[fuvs]-8)
b[fuvs]=np.polyval((.374,-.42,4.257,13.67),x[fuvs]-8)
AloAv = a+b/Rv
if scalar:
return a[0],b[0],AloAv[0]
else:
return a,b,AloAv
def calc_Av_from_Balmer_decrement(halpha,hbeta,halpha_err=None,hbeta_err=None,recom_ratio=2.86,R_v=3.1):
import numpy as np
halpha_ext_ratio = cardelli(6562.81, R_v)
num = recom_ratio * hbeta / halpha
try:
A_v = 2.5 * math.log10(num) / (halpha_ext_ratio - hbeta_ext_ratio)
except:
A_v = 999.9
if (halpha_err != None) and (hbeta_err != None):
dA_v = abs(2.5 / np.log(10) / (halpha_ext_ratio - hbeta_ext_ratio) * ( (halpha_err/halpha)**2. + (hbeta_err/hbeta)**2.) )**0.5
else:
dA_v = None
return A_v, dA_v
| [
"numpy.isscalar",
"numpy.where",
"numpy.log",
"numpy.array",
"numpy.zeros",
"numpy.polyval",
"numpy.ndarray"
] | [((4029, 4042), 'numpy.zeros', 'n.zeros', (['npts'], {}), '(npts)\n', (4036, 4042), True, 'import numpy as n\n'), ((4055, 4068), 'numpy.zeros', 'n.zeros', (['npts'], {}), '(npts)\n', (4062, 4068), True, 'import numpy as n\n'), ((4124, 4154), 'numpy.where', 'n.where', (['((x > 0.3) & (x < 1.1))'], {}), '((x > 0.3) & (x < 1.1))\n', (4131, 4154), True, 'import numpy as n\n'), ((4352, 4383), 'numpy.where', 'n.where', (['((x >= 1.1) & (x < 3.3))'], {}), '((x >= 1.1) & (x < 3.3))\n', (4359, 4383), True, 'import numpy as n\n'), ((5291, 5320), 'numpy.where', 'n.where', (['((x >= 3.3) & (x < 8))'], {}), '((x >= 3.3) & (x < 8))\n', (5298, 5320), True, 'import numpy as n\n'), ((5932, 5960), 'numpy.where', 'n.where', (['((x >= 8) & (x < 11))'], {}), '((x >= 8) & (x < 11))\n', (5939, 5960), True, 'import numpy as n\n'), ((6571, 6588), 'numpy.isscalar', 'np.isscalar', (['lamb'], {}), '(lamb)\n', (6582, 6588), True, 'import numpy as np\n'), ((7060, 7160), 'numpy.polyval', 'np.polyval', (['(0.32999, -0.7753, 0.01979, 0.72085, -0.02427, -0.50447, 0.17699, 1)', '(x[opts] - 1.82)'], {}), '((0.32999, -0.7753, 0.01979, 0.72085, -0.02427, -0.50447, 0.17699,\n 1), x[opts] - 1.82)\n', (7070, 7160), True, 'import numpy as np\n'), ((7149, 7249), 'numpy.polyval', 'np.polyval', (['(-2.09002, 5.3026, -0.62251, -5.38434, 1.07233, 2.28305, 1.41338, 0)', '(x[opts] - 1.82)'], {}), '((-2.09002, 5.3026, -0.62251, -5.38434, 1.07233, 2.28305, 1.41338,\n 0), x[opts] - 1.82)\n', (7159, 7249), True, 'import numpy as np\n'), ((7596, 7651), 'numpy.polyval', 'np.polyval', (['(-0.07, 0.137, -0.628, -1.073)', '(x[fuvs] - 8)'], {}), '((-0.07, 0.137, -0.628, -1.073), x[fuvs] - 8)\n', (7606, 7651), True, 'import numpy as np\n'), ((7653, 7706), 'numpy.polyval', 'np.polyval', (['(0.374, -0.42, 4.257, 13.67)', '(x[fuvs] - 8)'], {}), '((0.374, -0.42, 4.257, 13.67), x[fuvs] - 8)\n', (7663, 7706), True, 'import numpy as np\n'), ((4876, 4950), 'numpy.array', 'n.array', (['[-0.505, 1.647, -0.827, -1.718, 1.137, 0.701, -0.609, 0.104, 1.0]'], {}), '([-0.505, 1.647, -0.827, -1.718, 1.137, 0.701, -0.609, 0.104, 1.0])\n', (4883, 4950), True, 'import numpy as n\n'), ((5048, 5123), 'numpy.array', 'n.array', (['[3.347, -10.805, 5.491, 11.102, -7.985, -3.989, 2.908, 1.952, 0.0]'], {}), '([3.347, -10.805, 5.491, 11.102, -7.985, -3.989, 2.908, 1.952, 0.0])\n', (5055, 5123), True, 'import numpy as n\n'), ((5190, 5206), 'numpy.polyval', 'n.polyval', (['c1', 'y'], {}), '(c1, y)\n', (5199, 5206), True, 'import numpy as n\n'), ((5225, 5241), 'numpy.polyval', 'n.polyval', (['c2', 'y'], {}), '(c2, y)\n', (5234, 5241), True, 'import numpy as n\n'), ((5425, 5439), 'numpy.zeros', 'n.zeros', (['Ngood'], {}), '(Ngood)\n', (5432, 5439), True, 'import numpy as n\n'), ((5456, 5470), 'numpy.zeros', 'n.zeros', (['Ngood'], {}), '(Ngood)\n', (5463, 5470), True, 'import numpy as n\n'), ((5489, 5505), 'numpy.where', 'n.where', (['(y > 5.9)'], {}), '(y > 5.9)\n', (5496, 5505), True, 'import numpy as n\n'), ((6173, 6189), 'numpy.polyval', 'n.polyval', (['c1', 'y'], {}), '(c1, y)\n', (6182, 6189), True, 'import numpy as n\n'), ((6208, 6224), 'numpy.polyval', 'n.polyval', (['c2', 'y'], {}), '(c2, y)\n', (6217, 6224), True, 'import numpy as n\n'), ((6596, 6619), 'numpy.array', 'np.array', (['lamb'], {'ndmin': '(1)'}), '(lamb, ndmin=1)\n', (6604, 6619), True, 'import numpy as np\n'), ((6644, 6672), 'numpy.ndarray', 'np.ndarray', (['x.shape', 'x.dtype'], {}), '(x.shape, x.dtype)\n', (6654, 6672), True, 'import numpy as np\n'), ((6672, 6700), 'numpy.ndarray', 'np.ndarray', (['x.shape', 'x.dtype'], {}), '(x.shape, x.dtype)\n', (6682, 6700), True, 'import numpy as np\n'), ((3851, 3866), 'numpy.array', 'n.array', (['[wave]'], {}), '([wave])\n', (3858, 3866), True, 'import numpy as n\n'), ((3940, 3953), 'numpy.array', 'n.array', (['wave'], {}), '(wave)\n', (3947, 3953), True, 'import numpy as n\n'), ((8167, 8177), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (8173, 8177), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Tarea_2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zWnlDFVNS9UkQ9mCQwPC7u-tTaHEVeox
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
#datos
Lm=0.05 #Longitud en x
Ln=0.05 #Longitud en y
#Condiciones de frontera
Ts=29
Td=25
Tz=25
Tn=25
#discretizar
Nm=6
dm=Lm/(Nm-1)
m=np.linspace(0,Lm,Nm)
Nn=6
dn=Ln/(Nn-1)
n=np.linspace(0,Ln,Nn)
mmesh,nmesh=np.meshgrid(m,n,indexing='ij')
plt.scatter(mmesh,nmesh)
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
h=10
Tinf=4
Kc=0.09
Km=0.20
Lx=0.04
Ly=0.04
dx=1
#Discretizar
N=10 #Nodos en x
M=10 #Nodos en y
x=np.linspace(0,Lx,N)
y=np.linspace(0,Ly,M)
#dx=Lx/(N-1)
#dy=Ly/(M-1)
xmesh,ymesh=np.meshgrid(x,y)
p=1
T=np.zeros((M,N))
for i in range(0,N):
for j in range(0,M):
T[i][j]=30
#Esquina superior-derecha
T[0][N-1]=(2*T[1][N-1]+T[0][N-2]+h*dx/Kc*Tinf)/((h*dx)/Kc+3)
#Superficie inferior
for i in range(0,N):
T[M-1][i]=4
#Frontera izquierda
for i in range(1,M-1):
T[i][0]=(T[i-1][0]+2*T[i][1]+T[i+1][0]+2*h*dx*Tinf/Km)/((2*h*dx/Km)+4)
#Frontera superior
for i in range(1,N-1):
T[0][i]=(T[0][i+1]+T[1][i]+T[0][i-1]+2*h*dx*Tinf/Kc)/((2*h*dx/Kc)+4)
#Esquina superior-izquierda
T[0][0]=(Kc*T[0][1]+Km*T[1][0]+2*h*dx*Tinf)/(2*h*dx+Kc+Km)
while p>=0:
#Frontera derecha
for i in range(1,M-1):
T[i][N-1]=(T[i-1][N-1]+2*T[i][N-2]+T[i+1][N-1])/4
#Nodos internos
for i in range(1,M-1):
for j in range(1,N-1):
#chocolate
if i<j:
T[i][j]=(T[i][j+1]+T[i+1][j]+T[i][j-1]+T[i-1][j])/4
#molde
elif i>j:
T[i][j]=(T[i][j+1]+T[i+1][j]+T[i][j-1]+T[i-1][j])/4
#Interfase
for i in range(1,M-1):
T[i][i]=(Kc*(T[i][i+1]+T[i-1][i])+Km*(T[i][i-1]+T[i+1][i]))/(2*(Kc+Km))
p-=1
#print(T)
sns.heatmap(T)
| [
"seaborn.set",
"seaborn.heatmap",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.scatter",
"numpy.meshgrid"
] | [((269, 278), 'seaborn.set', 'sns.set', ([], {}), '()\n', (276, 278), True, 'import seaborn as sns\n'), ((418, 440), 'numpy.linspace', 'np.linspace', (['(0)', 'Lm', 'Nm'], {}), '(0, Lm, Nm)\n', (429, 440), True, 'import numpy as np\n'), ((459, 481), 'numpy.linspace', 'np.linspace', (['(0)', 'Ln', 'Nn'], {}), '(0, Ln, Nn)\n', (470, 481), True, 'import numpy as np\n'), ((493, 525), 'numpy.meshgrid', 'np.meshgrid', (['m', 'n'], {'indexing': '"""ij"""'}), "(m, n, indexing='ij')\n", (504, 525), True, 'import numpy as np\n'), ((525, 550), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mmesh', 'nmesh'], {}), '(mmesh, nmesh)\n', (536, 550), True, 'import matplotlib.pyplot as plt\n'), ((742, 763), 'numpy.linspace', 'np.linspace', (['(0)', 'Lx', 'N'], {}), '(0, Lx, N)\n', (753, 763), True, 'import numpy as np\n'), ((764, 785), 'numpy.linspace', 'np.linspace', (['(0)', 'Ly', 'M'], {}), '(0, Ly, M)\n', (775, 785), True, 'import numpy as np\n'), ((824, 841), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (835, 841), True, 'import numpy as np\n'), ((849, 865), 'numpy.zeros', 'np.zeros', (['(M, N)'], {}), '((M, N))\n', (857, 865), True, 'import numpy as np\n'), ((1889, 1903), 'seaborn.heatmap', 'sns.heatmap', (['T'], {}), '(T)\n', (1900, 1903), True, 'import seaborn as sns\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# everything that relates to ProbFuse2006 is in this library.
# Enjoy.
import os
import random
import numpy as np
from itertools import *
import shutil
def clean_out_files(output_folder):
# make sure tmp/topic_id.txt file are empty before appending, if they exist
if os.path.isdir(output_folder):
# delete all .txt files
shutil.rmtree(output_folder)
print("Cleaned all files in "+output_folder)
# create tmp folder if it does not exists
# assert(os.path.isdir(path))
os.makedirs(os.path.dirname(output_folder), exist_ok=True)
def check_relevances_exist(path):
if not os.path.isdir(path):
raise Exception("Expecting a folder "+path)
n_files = len(os.listdir(path))
if n_files != 10:
raise Exception("Expecting 10 files like 'rel1.txt' in the relevances folder, got "+str(n_files))
# This function reads from the correct input folder the two lists of parameters
# needed to tune ProbFuseAll and ProbFuseJudged's algorithms.
#
# path: the parameters file
#
# RETURNS: X and t, which are the array of segments and % required by ProbFuse
def extract_params(path):
if not os.path.isfile(path):
raise Exception("Error: cannot find the param file, you've gave me: "+path)
fp = open(path)
# first line: x parameter.
line = fp.readline().strip()
# our param file is like x\t=\t[1,2,3,...,n]
el = line.split('\t')
# extracts just the parameters (that are like [1, 2, 3, ..., n])
params = el[2]
# removes '[' and ']' characters
params = params[1:-1]
x = [y.strip() for y in params.split(',')]
# casting
x = [int(y) for y in x]
# second line: t parameter.
line = fp.readline().strip()
# our param file is like t\t=\t[1,2,3,...,n]
el = line.split('\t')
# extracts just the parameters (that are like [.1, .2, .3, ..., n])
params = el[2]
# removes '[' and ']' characters
params = params[1:-1]
t = [y.strip() for y in params.split(',')]
# casting
t = [float(y) for y in t]
fp.close()
return x,t
# This functions prints the documents' score (final output) in the given out path to file
#
# out: output file
# scores: dict with the following shape {topic: {doc: its_score__within_the_topic}}
#
# RETURNS: nothing.
def print_scores_to_file(out, scores):
with open(out, 'w') as writer:
# To properly write down the output, the topics must be ordered from 351 to 400.
for topic in sorted(scores):
# docs contains all the docuents inside this particular topic.
i = 0
docs = scores[topic]
lines = []
# obviously, we want our documents to be ranked from the highest-scored to the lowest one.
for doc in sorted(docs, key=docs.get, reverse=True)[:1000]:
lines.append(str(topic)+" Q0 "+doc+" "+str(i)+" "+str(docs[doc])+" ProbFuse2006")
i+=1
for line in lines:
writer.write(line.strip()+"\n")
# Given the dimension of the topics in our data (=1000) and the number of segments we want to split
# our data in, this function computes the segment sizes for each segment.
# Why wouldn't each segment have different lengths? Check the comments in the function if you're interested.
#
# topic_dim: =1000
# n_segments: the number of segments we split our data in.
#
# RETURNS: a vector of shape [seg1size, seg(n_segments)size] containing the sizes we want.
def compute_segment_sizes(n_segments, topic_dim):
# Computing the segment sizes. We're assuming that each topic has constant size (=1000 by default).
# This phase is extra delicate and it required a little reasoning on paper sheets.
#
# If "topic_dim/n_segments" (= segment size) is an integer, then we have no problems whatsoever.
#
# If "topic_dim/n_segments" is a fractional number, we've got some rounding problems.
# (i.e. "1000/150" = 6.66666667 = ...?)
#
# We've decided to split the segments as it follows:
# The decimal part of that ratio will represents the proportion between "larger" segments and "smaller" ones,
# where with "larger" segments we mean those segments which size will be rounded up (ceiling),
# and with "smaller" segments we mean those segments which size will be rounded down (floor).
# Keeping this ratio between larger and smaller sets will give equilibrium between segments sizes
# and it also will prevent silly bugs, like having empty segments (if we only round up(,
# or having more segments than we expect (if we only round down).
# Also, we decide to just create a segment_sizes VECTOR ([size_of_first_seg, size_of_second_seg, ...])
# and return it for future use, because of a performance (time complexity) factor.
#
# EXAMPLE: X=150 segments; 1000/150 --> 6.666... --> 0.66*150 = 100, number of rounded up segments
# (= segments of size 7); (1-0.66)*150 = 50, number of rounded down segments (= segments of size 6)
#
# Now, this solution would introduce another rounding problem (e.g. 0.66*150 = 99), so, to prevent this mess,
# we get the correct proportion by remembering the reminder (100), rather than 0.66 (=100/150)
seg_rough_size = topic_dim/n_segments
rounded_down = int(seg_rough_size)
# this init might look weird, but it will be fixed below if we have rounding problems
# if we don't have any problems, then it's fair to have rounded_up = rounded_down.
rounded_up = rounded_down
# ... we should decide how many rounded up and down segments we want.
# the # of "rounded up" segments is exactly the reminder, following the reasoning done above.
remainder = topic_dim%n_segments
# if we haven't got any rounding problems, ...
if not (remainder==0):
# there's not "rounded_up", really: we just replace the variable to make it easier to compute later.
rounded_up += 1
# the following list will be, as discussed, something like: [seg1size, seg2size, ..., seg(n_segments)size]
segment_sizes = [rounded_up for x in range(remainder)]
segment_sizes.extend ([rounded_down for x in range(n_segments-remainder)])
return segment_sizes
# Given the input set (file path to it), the # of segments, the # of training queries and the judged/all algorithm type,
# this function computes the probability p of a document in a segment s to be relevant, for each run and for each segment.
#
# in_path: relative input path, string
# n_segments: number of segments we want to split the data with
# n_training_topics: how many training topics we've got
# judged: if you want to perform the probFuseJudged algorithm; =False if you want ProbFuseAll
# n_topics: Fixed at 50 for this problem; makes no sense to change this parameter in this application
# topic_dim: Each topic, by default, has 1000 documents. For our project, it makes no sense to change this.
#
# RETURNS: a "probability" dict; shape: {run: {s: p}},
# where p is the probability that a document in segment is relevant (within run)
def compute_probabilities(in_path, n_segments, training_topics, judged, n_topics, topic_dim):
# probability dictionary; shape: {run: {segment: p}},
# where p is the probability that a document in the segment is relevant (within the run)
probability_dict = {}
# extracting all the input files from our input directory
file_list = [f for f in os.listdir(in_path)]
if len(file_list) != 10:
raise Exception("Expecting exactly 10 pre-processed files in "+in_path+"/, 1 per run. Got "+len(file_list)+".")
# As a reminder, we know that sizes are the same for each run and for each topic
# (we always get 1000/n_segments), so we need to do this computation just once.
segment_sizes = compute_segment_sizes(n_segments, topic_dim)
# for each run
for file in file_list:
# extracting the run we're analyizing from the input file:
# we need run_idx to be an integer index between 0 and 9.
run_idx = int(file.strip().split('_')[0])
# initialization for the current run: it must contain a dict itself
probability_dict[run_idx] = {}
# counter re-init; they should be like: {topic: [segment1count, segment2count, ...]}
are_rel = {}
arent_rel = {}
# file path
file_path = in_path+"/"+file
# document counters: "how many documents have we scanned in this topic, yet?"
i=0
# re-initialize the segment_idx every time we've done a run.
segment_idx=0
# For every file (run), we now count all the occurencies of "1"s (relevant) and "0"s (not relevant)
with open(file_path) as fp:
for line in fp:
# extract, strip and get the line from the file
line = line.strip()
elements = line.split(' ')
if not (len(elements)==3):
raise Exception("Something's wrong in the pre-processed files. I've got a line with "+len(elements)+" elements: "+line)
# Since we've got topics between 351 and 400, the following number will be \in [351,400].
topic = int(elements[0])
# since we're training our algorithm, just read the topics that are \in training_topics.
if (topic in training_topics):
# Relevance scores can be either 1 (relevant), 0 (not relevant) or -1 (not graded)
rel_score = int(elements[2])
# If a new topic is encountered, initialize the lists: are_rel[topic] = [seg0(topic)rel_count, ...],
# where seg0 -> seg1, etc.
# Also, reset the document counter and the segment counter from zero
if not topic in are_rel:
are_rel[topic] = np.zeros(n_segments)
arent_rel[topic] = np.zeros(n_segments)
i=0
segment_idx=0
if (rel_score==1):
are_rel[topic][segment_idx] += 1
if (rel_score==0):
arent_rel[topic][segment_idx] += 1
# we've added a document to this segment: +1! :D
i+=1
# if we've filled this segment, go to the next one and re-initialize the doc counter
if(i>=segment_sizes[segment_idx]):
segment_idx+=1
i=0
# now that we have the counters, we can compute the probabilities we need,
# for each possible segment
for seg in range(n_segments):
# sum init
s = 0
# here's the main difference between ProbFuseAll and ProbFuseJudged:
if(judged):
for t in training_topics:
# It is likely that rel + not_rel will be > 0, or it would mean
# to have ALL documents unjudged in a fixed segment/topic.
# rel+not_rel==0 is more likely to happen with high X and low t%:
# therefore, we just stay cautious and avoid dividing by zero.
if not (are_rel[t][seg] + arent_rel[t][seg]==0):
s += are_rel[t][seg]/(are_rel[t][seg]+arent_rel[t][seg])
else:
for t in training_topics:
s += are_rel[t][seg]/(segment_sizes[seg])
# these "+1" are needed to let this dictionary make sense
# e.g. "{run1: {seg1: 0.123, seg2: 0.321, ...}, ...}"
# something like {run0: {seg0: ...}, ...} would be less readable in our opinion
probability_dict[run_idx][seg+1] = s/len(training_topics)
return probability_dict
# Given the input files (the documents) and the pre-evaluated probabilities, this function will compute the
# the scores of all the documents retrieved by the 10 IR models.
# Note that this function will automatically keep track of the segments the documents are in.
# in_path: our input folder
# probabilities: data structure containing the probabilities {run: {segment: P(doc_in_this_segment | this_run)}}
# training_topics: data structure containing which topics are used to perform the training process, e.g. [topic371, ...]
# n_segments: how many segments do we split our documents in?
# topic_dim: how much large is a topic? (default: 1000)
#
# RETURNS: a dict "scores" with shape {topic: {doc: its_score__within_the_topic}}
def score_evaluate(in_path, probabilities, training_topics, n_segments, topic_dim):
file_list = [f for f in os.listdir(in_path)]
# we assume that file_list has 10 files, at this point;
# if there was a problem with our input files, we would have noticed by now.
# output init
scores = {}
segment_sizes = compute_segment_sizes(n_segments, topic_dim)
# for each run:
for file in file_list:
# extracting the run we're analyizing from the input file:
# we need run_idx to be an integer index between 1 and 10.
run_idx = int(file.strip().split('_')[0])
# file path
file_path = in_path+"/"+file
# document counters: "how many documents have we scanned in this topic, yet?"
i=0
# re-initialize the segment_idx every time we've done a run.
segment_idx=1
# we need to keep track of which topic we're extracting
current_topic=351
# For every file (run), we now compute the score for each document
with open(file_path) as fp:
# since we use the training topics to train our algorithm, it makes no sense
for line in fp:
line = line.strip()
elements = line.split(' ')
topic = int(elements[0])
if not (topic in training_topics):
doc = elements[1]
# if we've never encountered this topic, we better initialize the dict
if not topic in scores:
scores[topic] = {}
# if the document has never been encountered in this topic, we'll initialize its score.
if not doc in scores[topic]:
scores[topic][doc] = 0
# also, with a new topic, the doc counter and the segment index should be re-initalized
if(topic!=current_topic):
i=0
segment_idx=1
current_topic = topic
scores[topic][doc] += probabilities[run_idx][segment_idx]/segment_idx
# we've added a document to this segment: +1! :D
i+=1
# be aware that in this case segment_idx starts at 1 and goes up to n_segments:
# our segment_sizes is an array, and therefore accepts indexes between 0 and n_segments-1.
if(i>=segment_sizes[segment_idx-1]):
segment_idx+=1
i=0
return scores
# Our core function: it takes the input path of the 10 runs, the output path where it will write
# its output (which is a TREC-format fused run), the number of segments, the % of topics
# it'll use to train the model and the "judged" parameter to choose whichever algorithm we want.
#
# All other parameters are fixed parameters that shouldn't be changed (problem requirements).
#
# This function calles the function that computes the probabilities required by the studied paper,
# calls the score evaluation function and, finally, prints the output in the desired out_path/file.
#
# in_path: relative input path, string
# out_path: relative output path, string
# n_segments: number of segments we want to split the data with
# training_perc: how much % do we want to take out for the training process
# judged=True: if you want to perform the probFuseJudged algorithm; =False if you want ProbFuseAll
# n_topics=50: Fixed at 50 for this problem; makes no sense to change this parameter in this application
# topic_dim=1000: Each topic, by default, has 1000 documents. For our project, it makes no sense to change this.
#
# RETURNS: nothing.
def prob_fuse(in_path, out_path, n_segments, training_perc, judged=True, n_topics=50, topic_dim = 1000):
# picking training_perc*n_topics training queries (topics), to train our ProbFuse algorithm.
n_training_topics = int(n_topics*training_perc)
# sampling n_training_topics amount of topics from our dataset, from 351 to 400
possible_topics = range(351,400+1)
# our training_topics is a vector of randomly chosen topics: [topicX, topicY, ..., topicZ]
training_topics = random.sample(possible_topics, n_training_topics)
# reminder; pr has the following shape: {run: {segment: probability_a_doc_is_in_segment}}
pr = compute_probabilities(in_path, n_segments, training_topics, judged, n_topics, topic_dim)
# With these probabilities is now possible to evaluate our scores
# scores will have the following shape:
# {topic: {doc: score}}
sc = score_evaluate(in_path, pr, training_topics, n_segments, topic_dim)
# and print them out.
# Printing means saving the output file at out_path with the following format:
# <N_TOPIC> <Q0> <DOC_NAME> <INV_IDX> <SCORE> <FUSION_NAME>
print_scores_to_file(out_path, sc) | [
"random.sample",
"os.listdir",
"os.path.isfile",
"os.path.dirname",
"numpy.zeros",
"os.path.isdir",
"shutil.rmtree"
] | [((327, 355), 'os.path.isdir', 'os.path.isdir', (['output_folder'], {}), '(output_folder)\n', (340, 355), False, 'import os\n'), ((15150, 15199), 'random.sample', 'random.sample', (['possible_topics', 'n_training_topics'], {}), '(possible_topics, n_training_topics)\n', (15163, 15199), False, 'import random\n'), ((385, 413), 'shutil.rmtree', 'shutil.rmtree', (['output_folder'], {}), '(output_folder)\n', (398, 413), False, 'import shutil\n'), ((549, 579), 'os.path.dirname', 'os.path.dirname', (['output_folder'], {}), '(output_folder)\n', (564, 579), False, 'import os\n'), ((639, 658), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (652, 658), False, 'import os\n'), ((721, 737), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (731, 737), False, 'import os\n'), ((1148, 1168), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1162, 1168), False, 'import os\n'), ((7106, 7125), 'os.listdir', 'os.listdir', (['in_path'], {}), '(in_path)\n', (7116, 7125), False, 'import os\n'), ((11547, 11566), 'os.listdir', 'os.listdir', (['in_path'], {}), '(in_path)\n', (11557, 11566), False, 'import os\n'), ((9200, 9220), 'numpy.zeros', 'np.zeros', (['n_segments'], {}), '(n_segments)\n', (9208, 9220), True, 'import numpy as np\n'), ((9250, 9270), 'numpy.zeros', 'np.zeros', (['n_segments'], {}), '(n_segments)\n', (9258, 9270), True, 'import numpy as np\n')] |
# numpy.isnumeric() function
import numpy as np
# counting a substring
print(np.char.isnumeric('arfyslowy'))
# counting a substring
print(np.char.isnumeric('kloter2surga')) | [
"numpy.char.isnumeric"
] | [((83, 113), 'numpy.char.isnumeric', 'np.char.isnumeric', (['"""arfyslowy"""'], {}), "('arfyslowy')\n", (100, 113), True, 'import numpy as np\n'), ((146, 179), 'numpy.char.isnumeric', 'np.char.isnumeric', (['"""kloter2surga"""'], {}), "('kloter2surga')\n", (163, 179), True, 'import numpy as np\n')] |
"""Tests for `fake_data_for_learning` package."""
import pytest
import numpy as np
from sklearn.preprocessing import LabelEncoder
from fake_data_for_learning.fake_data_for_learning import (
BayesianNodeRV, SampleValue
)
# (Conditional) probability distributions
@pytest.fixture
def binary_pt():
return np.array([0.1, 0.9])
@pytest.fixture
def binary_cpt():
return np.array([
[0.2, 0.8],
[0.7, 0.3]
])
def test_init(binary_pt, binary_cpt):
# Successful initialization
BayesianNodeRV('X0', binary_pt)
BayesianNodeRV('X1', binary_cpt, parent_names=['X0'])
BayesianNodeRV(
'profession',
np.array([
[0.3, 0.4, 0.2, 0.1],
[0.05, 0.15, 0.3, 0.5],
[0.15, 0.05, 0.2, 0.6]
]),
values=('salaried', 'self-employed', 'student', 'unemployed'),
parent_names=['tertiary-rv']
)
# Failing initialization: Parent names must be list
with pytest.raises(TypeError):
BayesianNodeRV('X0', binary_cpt, parent_names='X1')
# Number of parent names must be compatible with shape of cpt
with pytest.raises(ValueError):
BayesianNodeRV('X1', binary_cpt)
with pytest.raises(ValueError):
BayesianNodeRV('X2', binary_cpt, parent_names=['X0', 'X1'])
def test_encoding(binary_pt):
# Default values
binary_rv = BayesianNodeRV('X0', binary_pt)
np.testing.assert_equal(
binary_rv.values,
np.array([0, 1])
)
# Non-default values
binary_rv_nondef = BayesianNodeRV(
'X0', binary_pt, ['down', 'up']
)
np.testing.assert_equal(
binary_rv_nondef.values,
np.array(['down', 'up']).astype('U')
)
with pytest.raises(ValueError):
BayesianNodeRV('X0', binary_pt, ['b', 'a'])
with pytest.raises(ValueError):
BayesianNodeRV('X0', binary_pt, values=['a', 'a'])
def test_bnrv_equality(binary_pt, binary_cpt):
rv = BayesianNodeRV('X0', binary_pt)
assert rv == rv
assert rv != BayesianNodeRV('X1', binary_pt)
assert rv != BayesianNodeRV('X0', binary_pt, values=['down', 'up'])
assert rv != BayesianNodeRV('X1', binary_cpt, parent_names=['X0'])
def test_sample_value():
assert SampleValue.possible_default_value(1)
assert not SampleValue.possible_default_value(-1)
assert not SampleValue.possible_default_value(1.)
assert not SampleValue.possible_default_value('alice')
# Check instantiaion
SampleValue(1)
le = LabelEncoder()
le.fit(['alice'])
SampleValue('alice', label_encoder=le)
# Passing value not in label encoder classes should raise an error
with pytest.raises(ValueError):
SampleValue('bob', label_encoder=le)
def test_get_probability_table(binary_cpt):
rv1c0 = BayesianNodeRV('X1', binary_cpt, parent_names=['X0'])
np.testing.assert_equal(
rv1c0.get_probability_table(parent_values={'X0': SampleValue(1)}),
binary_cpt[1, :]
)
# X2 | X0, X1
pt_X2cX0X1 = np.array([
[
[0., 1.],
[0.5, 0.5],
],
[
[0.9, 0.1],
[0.3, 0.7]
]
])
rv2c01 = BayesianNodeRV('X2', pt_X2cX0X1, parent_names=['X0', 'X1'])
np.testing.assert_equal(
rv2c01.get_probability_table(
parent_values={'X0': SampleValue(0), 'X1': SampleValue(1)}
),
pt_X2cX0X1[0, 1, :]
)
def test_get_pmf(binary_pt):
rv = BayesianNodeRV('X0', binary_pt)
assert rv.pmf(0) == binary_pt[0]
assert rv.pmf(1) == binary_pt[1]
with pytest.raises(ValueError):
rv.pmf(2)
with pytest.raises(ValueError):
rv.pmf('alice')
with pytest.raises(ValueError):
rv.pmf(1, parent_values={'Z': SampleValue(0)})
def test_get_pmf_w_parents():
rv = BayesianNodeRV(
'Y',
np.array([
[0.2, 0.8],
[0.7, 0.3]
]),
parent_names=['X']
)
assert rv.pmf(0, parent_values={'X': SampleValue(1)}) == 0.7
with pytest.raises(ValueError):
rv.pmf(1)
le = LabelEncoder()
le.fit(['alice', 'bob'])
assert rv.pmf(
1,
parent_values={'X': SampleValue('alice', label_encoder=le)}
) == 0.8
with pytest.raises(ValueError):
rv.pmf(1, parent_values={'X': SampleValue('terry', label_encoder=le)})
def test_rvs(binary_pt, binary_cpt):
rv = BayesianNodeRV('X0', binary_pt)
assert isinstance(rv.rvs(seed=42)[0], np.int64)
rv1c0 = BayesianNodeRV('X1', binary_cpt, parent_names=['X0'])
draw = rv1c0.rvs(parent_values={'X0': SampleValue(1)}, seed=42)[0]
assert draw in rv1c0.values
draws = rv1c0.rvs(size=10, parent_values={'X0': SampleValue(1)}, seed=42)
assert len(draws) == 10
# Test handling of extraneous parent values, needed for ancestral sampling
assert (
rv1c0.rvs(parent_values={'X0': SampleValue(1)}, seed=42)[0] ==
rv1c0.rvs(
parent_values={'X0': SampleValue(1), 'X2': SampleValue(42)}, seed=42
)[0]
)
# Test non-default value sampling
rv_nondef = BayesianNodeRV('X0', binary_pt, values=['down', 'up'])
assert rv_nondef.rvs(seed=42)[0] in rv_nondef.values
| [
"sklearn.preprocessing.LabelEncoder",
"fake_data_for_learning.fake_data_for_learning.SampleValue",
"numpy.array",
"pytest.raises",
"fake_data_for_learning.fake_data_for_learning.BayesianNodeRV",
"fake_data_for_learning.fake_data_for_learning.SampleValue.possible_default_value"
] | [((314, 334), 'numpy.array', 'np.array', (['[0.1, 0.9]'], {}), '([0.1, 0.9])\n', (322, 334), True, 'import numpy as np\n'), ((382, 416), 'numpy.array', 'np.array', (['[[0.2, 0.8], [0.7, 0.3]]'], {}), '([[0.2, 0.8], [0.7, 0.3]])\n', (390, 416), True, 'import numpy as np\n'), ((516, 547), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {}), "('X0', binary_pt)\n", (530, 547), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((552, 605), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X1"""', 'binary_cpt'], {'parent_names': "['X0']"}), "('X1', binary_cpt, parent_names=['X0'])\n", (566, 605), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((1369, 1400), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {}), "('X0', binary_pt)\n", (1383, 1400), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((1536, 1583), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt', "['down', 'up']"], {}), "('X0', binary_pt, ['down', 'up'])\n", (1550, 1583), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((1954, 1985), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {}), "('X0', binary_pt)\n", (1968, 1985), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2239, 2276), 'fake_data_for_learning.fake_data_for_learning.SampleValue.possible_default_value', 'SampleValue.possible_default_value', (['(1)'], {}), '(1)\n', (2273, 2276), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2474, 2488), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (2485, 2488), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2499, 2513), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2511, 2513), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2540, 2578), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['"""alice"""'], {'label_encoder': 'le'}), "('alice', label_encoder=le)\n", (2551, 2578), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2790, 2843), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X1"""', 'binary_cpt'], {'parent_names': "['X0']"}), "('X1', binary_cpt, parent_names=['X0'])\n", (2804, 2843), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((3015, 3077), 'numpy.array', 'np.array', (['[[[0.0, 1.0], [0.5, 0.5]], [[0.9, 0.1], [0.3, 0.7]]]'], {}), '([[[0.0, 1.0], [0.5, 0.5]], [[0.9, 0.1], [0.3, 0.7]]])\n', (3023, 3077), True, 'import numpy as np\n'), ((3180, 3239), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X2"""', 'pt_X2cX0X1'], {'parent_names': "['X0', 'X1']"}), "('X2', pt_X2cX0X1, parent_names=['X0', 'X1'])\n", (3194, 3239), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((3463, 3494), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {}), "('X0', binary_pt)\n", (3477, 3494), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((4088, 4102), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4100, 4102), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4407, 4438), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {}), "('X0', binary_pt)\n", (4421, 4438), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((4504, 4557), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X1"""', 'binary_cpt'], {'parent_names': "['X0']"}), "('X1', binary_cpt, parent_names=['X0'])\n", (4518, 4557), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((5106, 5160), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {'values': "['down', 'up']"}), "('X0', binary_pt, values=['down', 'up'])\n", (5120, 5160), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((656, 741), 'numpy.array', 'np.array', (['[[0.3, 0.4, 0.2, 0.1], [0.05, 0.15, 0.3, 0.5], [0.15, 0.05, 0.2, 0.6]]'], {}), '([[0.3, 0.4, 0.2, 0.1], [0.05, 0.15, 0.3, 0.5], [0.15, 0.05, 0.2, 0.6]]\n )\n', (664, 741), True, 'import numpy as np\n'), ((965, 989), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (978, 989), False, 'import pytest\n'), ((999, 1050), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_cpt'], {'parent_names': '"""X1"""'}), "('X0', binary_cpt, parent_names='X1')\n", (1013, 1050), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((1127, 1152), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1140, 1152), False, 'import pytest\n'), ((1162, 1194), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X1"""', 'binary_cpt'], {}), "('X1', binary_cpt)\n", (1176, 1194), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((1205, 1230), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1218, 1230), False, 'import pytest\n'), ((1240, 1299), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X2"""', 'binary_cpt'], {'parent_names': "['X0', 'X1']"}), "('X2', binary_cpt, parent_names=['X0', 'X1'])\n", (1254, 1299), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((1464, 1480), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1472, 1480), True, 'import numpy as np\n'), ((1721, 1746), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1734, 1746), False, 'import pytest\n'), ((1756, 1799), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt', "['b', 'a']"], {}), "('X0', binary_pt, ['b', 'a'])\n", (1770, 1799), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((1810, 1835), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1823, 1835), False, 'import pytest\n'), ((1845, 1895), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {'values': "['a', 'a']"}), "('X0', binary_pt, values=['a', 'a'])\n", (1859, 1895), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2024, 2055), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X1"""', 'binary_pt'], {}), "('X1', binary_pt)\n", (2038, 2055), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2074, 2128), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X0"""', 'binary_pt'], {'values': "['down', 'up']"}), "('X0', binary_pt, values=['down', 'up'])\n", (2088, 2128), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2147, 2200), 'fake_data_for_learning.fake_data_for_learning.BayesianNodeRV', 'BayesianNodeRV', (['"""X1"""', 'binary_cpt'], {'parent_names': "['X0']"}), "('X1', binary_cpt, parent_names=['X0'])\n", (2161, 2200), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2292, 2330), 'fake_data_for_learning.fake_data_for_learning.SampleValue.possible_default_value', 'SampleValue.possible_default_value', (['(-1)'], {}), '(-1)\n', (2326, 2330), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2346, 2385), 'fake_data_for_learning.fake_data_for_learning.SampleValue.possible_default_value', 'SampleValue.possible_default_value', (['(1.0)'], {}), '(1.0)\n', (2380, 2385), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2400, 2443), 'fake_data_for_learning.fake_data_for_learning.SampleValue.possible_default_value', 'SampleValue.possible_default_value', (['"""alice"""'], {}), "('alice')\n", (2434, 2443), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2660, 2685), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2673, 2685), False, 'import pytest\n'), ((2695, 2731), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['"""bob"""'], {'label_encoder': 'le'}), "('bob', label_encoder=le)\n", (2706, 2731), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((3579, 3604), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3592, 3604), False, 'import pytest\n'), ((3634, 3659), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3647, 3659), False, 'import pytest\n'), ((3695, 3720), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3708, 3720), False, 'import pytest\n'), ((3855, 3889), 'numpy.array', 'np.array', (['[[0.2, 0.8], [0.7, 0.3]]'], {}), '([[0.2, 0.8], [0.7, 0.3]])\n', (3863, 3889), True, 'import numpy as np\n'), ((4033, 4058), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4046, 4058), False, 'import pytest\n'), ((4253, 4278), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4266, 4278), False, 'import pytest\n'), ((1668, 1692), 'numpy.array', 'np.array', (["['down', 'up']"], {}), "(['down', 'up'])\n", (1676, 1692), True, 'import numpy as np\n'), ((4714, 4728), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (4725, 4728), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((2930, 2944), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (2941, 2944), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((3340, 3354), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(0)'], {}), '(0)\n', (3351, 3354), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((3362, 3376), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (3373, 3376), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((3760, 3774), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(0)'], {}), '(0)\n', (3771, 3774), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((3999, 4013), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (4010, 4013), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((4190, 4228), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['"""alice"""'], {'label_encoder': 'le'}), "('alice', label_encoder=le)\n", (4201, 4228), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((4318, 4356), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['"""terry"""'], {'label_encoder': 'le'}), "('terry', label_encoder=le)\n", (4329, 4356), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((4600, 4614), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (4611, 4614), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((4900, 4914), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (4911, 4914), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((4984, 4998), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(1)'], {}), '(1)\n', (4995, 4998), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n'), ((5006, 5021), 'fake_data_for_learning.fake_data_for_learning.SampleValue', 'SampleValue', (['(42)'], {}), '(42)\n', (5017, 5021), False, 'from fake_data_for_learning.fake_data_for_learning import BayesianNodeRV, SampleValue\n')] |
#!python
#
#Calculate the lattice constant and elastic constant of refractory HEAs
import os
import re
import shutil
import operator
from itertools import combinations
from pymatgen.core.periodic_table import Element
import scipy.constants
from pyemto.latticeinputs.batch import batch_head
from pyemto.utilities import distort
from monty.serialization import loadfn
import numpy as np
from pyemto.EOS import EOS
from pyemto.emto_parser import EMTOPARSER
import json
from pyemto.examples.emto_input_generator import EMTO
import math
def input_gen_eqv(jobname="NbTiVZr", emtopath="./", latname="bcc", sws0=3.0, sws_percent=10.0, sws_step=11,
concs=[[0.25, 0.25, 0.25, 0.25]], species = [['Nb','Ti','V','Zr']]):
"""
Generate input file and batch file for equial volume
Parameter
jobname: str
The jobname
latname: str
The lattice name
sws0: float
The initial sws, A
sws_percent: float
The percentage for sws, %
sws_step: int
The number of point
emtopath: str
The path of
concs: list-2D
The concentrations
species: list-2D
The species
Return
None
"""
find_primitive = False
make_supercell = None
coords_are_cartesian = True
runtime = "12:00:00"
nkx = 21
nky = 21
nkz = 21
ncpu = 1
splts = [[0]*len(species[0])]
latpath = emtopath
swsmin = sws0 * (1.0 - sws_percent/100.0)
swsmax = sws0 * (1.0 + sws_percent/100.0)
if latname == "bcc":
prims0 = np.array([
[-0.5,0.5,0.5],
[0.5,-0.5,0.5],
[0.5,0.5,-0.5]
])
basis0 = np.array([
[0.0,0.0,0.0]
])
elif latname == "fcc":
prims0 = np.array([
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.0, 0.5, 0.5]
])
basis0 = np.array([
[0.0,0.0,0.0]
])
elif latname == "hcp":
prims0 = np.array([
[0.5, -math.sqrt(3.)/2., 0.0],
[0.5, math.sqrt(3.)/2., 0.0],
[0.0, 0.0,-1.0]
])
basis0 = np.array([
[1./3., 2./3., 0.25],
[2./3., 1./3., 0.75]
])
else:
raise ValueError("Current lattice({}) is not supported".format(latname))
input_creator = EMTO(folder=emtopath, EMTOdir='/storage/home/mjl6505/bin')
jobnamei = jobname + "_" + latname
input_creator.prepare_input_files(latpath=latpath, jobname=jobnamei, species=species, splts=splts, concs=concs,
prims=prims0, basis=basis0, find_primitive=find_primitive, coords_are_cartesian=coords_are_cartesian,
latname=latname, ncpa=15, sofc='Y', nkx=nkx, nky=nky, nkz=nkz, ncpu=ncpu, parallel=False, alpcpa=0.6,
runtime=runtime, KGRN_file_type='scf', KFCD_file_type='fcd', amix=0.01, tole=1e-6, tolef=1e-6, iex=4,
niter=200, kgrn_nfi=91, make_supercell=make_supercell)
sws_range = np.linspace(swsmin, swsmax, sws_step)
input_creator.write_bmdl_kstr_shape_input()
input_creator.write_kgrn_kfcd_swsrange(sws=sws_range)
def input_gen_elastic(jobname="NbTiVZr", emtopath="./", latname="bcc", sws0=3.0, delta_max=0.05, delta_step=6,
concs=[[0.25, 0.25, 0.25, 0.25]], species = [['Nb','Ti','V','Zr']]):
find_primitive = False
make_supercell = None
coords_are_cartesian = True
ncpu = 1
runtime = "12:00:00"
splts = [[0]*4]
if isinstance(sws0, (float, int, str)):
sws0 = [float(sws0)]
latpath = emtopath
deltas = np.linspace(0, delta_max, delta_step)
if latname == "bcc":
prims0 = np.array([
[-0.5,0.5,0.5],
[0.5,-0.5,0.5],
[0.5,0.5,-0.5]
])
basis0 = np.array([
[0.0,0.0,0.0]
])
elif latname == "fcc":
prims0 = np.array([
[0.5, 0.5, 0.0],
[0.5, 0.0, 0.5],
[0.0, 0.5, 0.5]
])
basis0 = np.array([
[0.0,0.0,0.0]
])
elif latname == "hcp":
prims0 = np.array([
[0.5, -math.sqrt(3.)/2., 0.0],
[0.5, math.sqrt(3.)/2., 0.0],
[0.0, 0.0,-1.0]
])
basis0 = np.array([
[1./3., 2./3., 0.25],
[2./3., 1./3., 0.75]
])
else:
raise ValueError("Current lattice({}) is not supported".format(latname))
# We need to use a non-zero value for the first delta to break the symmetry of the structure.
deltas[0] = 0.001
# Only two distortions for cubic (third one is bulk modulus EOS fit)
distortions = ['Cprime', 'C44']
for i, distortion in enumerate(distortions):
print('#'*100)
print('distortion = ',distortion)
print('#'*100)
for delta in deltas:
print('#'*100)
print('delta = ',delta)
print('#'*100)
# These distortion matrices are from the EMTO book.
if distortion == 'Cprime':
dist_matrix = np.array([
[1+delta,0,0],
[0,1-delta,0],
[0,0,1/(1-delta**2)]
])
elif distortion == 'C44':
dist_matrix = np.array([
[1, delta, 0],
[delta ,1, 0],
[0, 0, 1/(1-delta**2)]
])
# Calculate new lattice vectors and atomic positions
prims = distort(dist_matrix, prims0)
basis = distort(dist_matrix, basis0)
# Each different distortion might need different set of nkx, nky, nkz
if distortion == 'Cprime':
nkx = 21; nky = 21; nkz = 21
elif distortion == 'C44':
nkx = 20; nky = 20; nkz = 25
input_creator = EMTO(folder=emtopath, EMTOdir='/storage/home/mjl6505/bin')
latnamei = latname + '{0}_{1:4.2f}'.format(i+1, delta)
jobnamei = jobname + "_" + latnamei
input_creator.prepare_input_files(latpath=latpath, jobname=jobnamei, species=species, splts=splts,
concs=concs, prims=prims, basis=basis, find_primitive=find_primitive, latname=latnamei,
coords_are_cartesian=coords_are_cartesian, ncpa=15, sofc='Y', nkx=nkx, nky=nky, nkz=nkz,
ncpu=ncpu, parallel=False, alpcpa=0.6, runtime=runtime, KGRN_file_type='scf', KFCD_file_type='fcd',
amix=0.01, tole=1e-6, tolef=1e-6, iex=4, niter=200, kgrn_nfi=91, make_supercell=make_supercell)
sws_range = np.array(sws0)
input_creator.write_bmdl_kstr_shape_input()
input_creator.write_kgrn_kfcd_swsrange(sws=sws_range)
def write_eqv_post(jobname="NbTiVZr", folder="./", lat="bcc", concs=[[0.25, 0.25, 0.25, 0.25]],
species=[['Nb','Ti','V','Zr']], DLM=False):
lines = """#!python
import numpy as np
from pyemto.EOS import EOS
from pyemto.emto_parser import EMTOPARSER
import json
import os
def equilv_result(jobname="test", folder="./", lat="bcc", concs=[[0.25, 0.25, 0.25, 0.25]],
species = [['Nb','Ti','V','Zr']], DLM=False):
if lat == 'bcc':
Natom = 2
elif lat == 'fcc':
Natom = 2
elif lat == 'hcp':
Natom = 2
else:
raise ValueError("Not supported")
ry2J = 2.1798741e-18
A2m = 1e-10
bohr2A = 0.529177249
# Equilibrium lattice constant output files
kgrn_path = os.path.join(folder, "kgrn", "*")
kfcd_path = os.path.join(folder, "kfcd", "*")
vol_data = EMTOPARSER(kgrn_path, kfcd_path, suffix='prn', DLM=DLM)
vol_data.create_df()
vol_df = vol_data.main_df
eos = EOS('test', method='morse')
indMin = 0
indMax = len(vol_df.SWS)
SWS0, E0, B0, grun0, error0 = eos.fit(vol_df.SWS[indMin:indMax], vol_df.EPBE[indMin:indMax], show_plot=False)
vol0 = 4./3*np.pi*(SWS0*bohr2A)**3
a = (vol0 * Natom) ** (1./3.)
eos_result = {"sws": SWS0, "vol": vol0 , "a": a, "E0": E0, "B0": B0, "grun": grun0,
"error": error0, "jobname": jobname, "latname": lat, "concs": concs, "species": species}
with open(os.path.join(folder, jobname+"-eqv.json"), 'w+') as f:
json.dump(eos_result, f, indent=4)
"""
lines += "\njobname = '{}'\n".format(jobname)
lines += "folder = '{}'\n".format(folder)
lines += "lat = '{}'\n".format(lat)
concs_str = [str(coni) for coni in concs[0]]
lines += "concs = [[{}]]\n".format(", ".join(concs_str))
lines += "species = [['{}']]\n".format("', '".join(species[0]))
lines += "\nequilv_result(jobname=jobname, folder=folder, lat=lat, concs=concs, species=species)\n"
with open(jobname + "_eqv_post.py", "w+") as fid:
fid.write(lines)
def write_elastic_post(jobname="NbTiVZr", folder="./", latname="bcc", sws0=3.0, delta_max=0.05, delta_step=6,
B=267.0, concs=[[0.25, 0.25, 0.25, 0.25]], species=[['Nb','Ti','V','Zr']], DLM=False):
lines = """#!python
import numpy as np
from pyemto.EOS import EOS
from pyemto.emto_parser import EMTOPARSER
import json
import os
def elastic_result(jobname="NbTiVZr", folder="./", latname="bcc", sws0=3.0, delta_max=0.05, delta_step=6,
B=267.0, concs=[[0.25, 0.25, 0.25, 0.25]], species = [['Nb','Ti','V','Zr']], DLM=False):
ry2J = 2.1798741e-18
A2m = 1e-10
bohr2A = 0.529177249
deltas = np.linspace(0, delta_max, delta_step)
deltas[0] = 0.001
# Elastic constants output files
elastic_kgrn_path = os.path.join(folder, "kgrn", "*")
elastic_kfcd_path = os.path.join(folder, "kfcd", "*")
ec_data = EMTOPARSER(elastic_kgrn_path, elastic_kfcd_path, suffix='prn', DLM=DLM)
ec_data.create_df()
ec_df = ec_data.main_df
eos = EOS('test', method='morse')
# d1 is the c' distortion
d1 = np.asarray(ec_df[ec_df.Struc.str.contains(latname + '1')].EPBE)
# d2 is the c44 distortion
d2 = np.asarray(ec_df[ec_df.Struc.str.contains(latname + '2')].EPBE)
cprime_coeffs, cprime_error = eos.distortion_fit(deltas, d1, num=1)
c44_coeffs, c44_error = eos.distortion_fit(deltas, d2, num=1)
# Change units to GPa
vol0 = 4./3*np.pi*(sws0*bohr2A)**3
cprime_coeffs = cprime_coeffs[0] * ry2J / (vol0*A2m**3) / 2 / 1e9
c44_coeffs = c44_coeffs[0] * ry2J / (vol0*A2m**3) / 2 / 1e9
def calc_CIJ(B, Cprime, C44):
C11 = (3.*B + 4.*Cprime)/3.
C12 = (3.*B - 2.*Cprime)/3.
CIJ = [[C11, C12, C12, 0, 0, 0],
[C12, C11, C12, 0, 0, 0],
[C12, C12, C11, 0, 0, 0],
[0, 0, 0, C44, 0, 0],
[0, 0, 0, 0, C44, 0],
[0, 0, 0, 0, 0, C44]]
return CIJ
CIJ = calc_CIJ(B, cprime_coeffs, c44_coeffs)
elastic_result = {"vol": vol0, "B": B, "jobname": jobname, "latname": latname,
"concs": concs, "species": species,"delta": deltas.tolist(), "CIJ": CIJ, "data_Cprime": d1.tolist(),
"fit_Cprime": cprime_coeffs.tolist(), "fit_err_Cprime": cprime_error.tolist(),
"data_C44": d2.tolist(), "fit_C44": c44_coeffs.tolist(), "fit_err_C44": c44_error.tolist()}
with open(os.path.join(folder, jobname+"-elastic.json"), 'w+') as f:
json.dump(elastic_result, f, indent=4)
"""
lines += "\njobname = '{}'\n".format(jobname)
lines += "folder = '{}'\n".format(folder)
lines += "latname = '{}'\n".format(latname)
concs_str = [str(coni) for coni in concs[0]]
lines += "concs = [[{}]]\n".format(", ".join(concs_str))
lines += "species = [['{}']]\n".format("', '".join(species[0]))
lines += "sws0 = {}\n".format(str(float(sws0)))
lines += "delta_max = {}\n".format(str(float(delta_max)))
lines += "delta_step = {}\n".format(str(int(delta_step)))
lines += "B = {}\n".format(str(float(B)))
lines += "\nelastic_result(jobname=jobname, folder=folder, latname=latname, " + \
"sws0=sws0, delta_max=delta_max, delta_step=delta_step, B=B, " + \
"concs=concs, species=species)\n"
with open(jobname + "_elastic_post.py", "w+") as fid:
fid.write(lines)
def parase_pbs_script(filename = "emtojob.pbs"):
"""
Parse the exe part of pbs file
Parameter
filename: str (filename-like)
The filename of the pbs script
Return
param_dict: dict
The dict of parameters.
"""
s = {"-q": "queue", "-A": "account", "-N": "job_name", "-V": "env",
"-G": "group_name"}
submit_s = {"nodes": "node", "ppn": "core", "pmem": "pmem"}
param_dict = {"module": [], "cmds": []}
with open(filename, "r") as fid:
for eachline in fid:
eachline = eachline.strip()
if eachline.startswith("#PBS"):
line_list = re.split("\s+", eachline)
if line_list[1] == "-l":
if line_list[2].startswith("walltime"):
# walltime
param_dict["walltime"] = line_list[2].split("=")[1]
else:
for item in line_list[2].split(":"):
key = item.split("=")[0]
# nodes, ppn, pmem
value = item.split("=")[1]
if key in submit_s:
param_dict[submit_s[key]] = value
else:
if line_list[1] in s:
param_dict[s[line_list[1]]] = line_list[2]
elif eachline.startswith("module"):
modules = eachline.split()[2:]
for module in modules:
param_dict["module"].append(module)
elif eachline.startswith(("cd $", "#")) or (not eachline):
#The cd $PBS_O_WORKDIR, or comments(#) or empty
pass
else:
param_dict["cmds"].append(eachline + "\n")
return param_dict
def parse_queue_script(template="emtojob.pbs", queue_type="pbs"):
"""
Parse the queue script. Currently only pbs is supported
Parameter
template: str (filename-like)
The filename of the queue script. Default: vaspjob.pbs
queue_type: str
The type of queue system. Default: pbs
Return
"""
param_dict = {}
if queue_type == "pbs":
param_dict = parase_pbs_script(filename=template)
else:
raise ValueError("Only PBS is supported now. Other system will coming soon...")
return param_dict
def merge_batchfile(batchfiles, latpath="./", jobname="test", queue_type="pbs"):
"""
Merge batch files. (Merge head file)
"""
cmd_lines = ""
if isinstance(batchfiles, str):
batchfiles = [batchfiles]
for file in batchfiles:
if os.path.isfile(file):
param_dict = parse_queue_script(template=file, queue_type=queue_type)
runtime = param_dict["walltime"]
account = param_dict["account"]
head_lines = batch_head(jobname, latpath=latpath, runtime=runtime, account=account,
queue_type=queue_type, queue_options=param_dict)
cmd_lines += "".join(param_dict["cmds"])
return cmd_lines, head_lines
def evaluate_V0(alloy={"Nb": 1, "Ti": 1, "V": 1, "Zr": 1}, norm=True):
'''
Calculate the volume of alloys using mix rule (volme conservation)
Parameter
alloy: dict
The alloy composition
norm: bool
Normalize or not
Return
v_alloy: float, unit: angstrom^3
The volume of the alloy, if norm=True, then it's the volume of single atom
'''
Avogadro = scipy.constants.Avogadro
if norm:
Natom = sum(alloy.values())
v_alloy = 0
for ele in alloy:
density = float(Element(ele).density_of_solid)/1000.
mass = float(Element(ele).atomic_mass)
if norm:
alloy[ele] = alloy[ele] / Natom
v_alloy = v_alloy + mass/density * alloy[ele]
return 1e24/Avogadro * v_alloy
def creat_folders(folder):
"""
Create folders if not exist, leave a warning if exists
"""
if os.path.exists(folder):
print("WARNING: " + folder + " exists!")
else:
os.makedirs(folder)
def issublist(listi, listAll):
"""
If listi(1D) is a sub list of listAll (2D)
Parameter
listi: list (1D)
listAll: list (2D)
Return
flag: bool
If listi is a sub list of listAll, flat=True, else False
"""
flag = False
for listj in listAll:
if operator.eq(listi, listj):
flag = True
return flag
return flag
def creat_alloy(sys_alloy=('Nb', 'Ti'), n_point=10):
"""
Create non-redundent alloys by interpolation
It will create Ax(BC)1-x like alloys
E.g. when n_point = 10,
For binary, it will generate AB9, A2B8, A3B7 ...
For ternary, it will generate A(BC)9, A2(BC)8, ... B(AC)9, B2(AC)8 ...
Parameter
sys_alloy: list or tuple
The alloy system
n_point: int
The
Return
alloy_dict: dict
The returned alloys system. The key is the alloy's name, the value is the normalized(to sum=1) composition
"""
alloys = []
alloy_dict = {}
sys_alloy = list(sys_alloy)
n_ele = len(sys_alloy)
for i_ele in range(n_ele):
eles = sys_alloy.copy()
VarEle = eles.pop(i_ele)
for i_com in range(1, n_point):
#Create A1B9, A2B8, A3B7 like alloys
alloyi = []
num1 = i_com
num2 = n_point - i_com
if num1 == 1:
num1 = ""
if num2 == 1:
num2 = ""
if n_ele == 2:
alloyName = VarEle + str(num1) + "".join(eles) + str(num2)
else:
alloyName = VarEle + str(num1) + "(" + "".join(eles) + ")" + str(num2)
com_other = round((1. - float(i_com)/float(n_point))/float(n_ele - 1), 3)
com_main = round(1 - (n_ele - 1) * com_other, 3)
for i in range(n_ele):
if i == i_ele:
alloyi.append(com_main)
else:
alloyi.append(com_other)
if not issublist(alloyi, alloys):
alloys.append(alloyi)
alloy_dict[alloyName] = alloyi
return alloy_dict
def find_pbs_script(folder="./", jobname="NbTiVZr", latname="bcc", ext="sh"):
if isinstance(latname, str):
latname = [latname]
files = os.listdir(folder)
pbs_scripts = []
for lat in latname:
lat_script = lat + "." + ext
pbs_scripts.append(lat_script)
for file in files:
if file.endswith(ext) and (lat in file) and (file != lat_script):
#ensure it is script, and include the lat, and not the lat script
pbs_scripts.append(file)
return pbs_scripts
def wflow_eqv(eqv_folder="eqv", jobname="NbTiVZr", latname="bcc", queue_type="pbs",
emtopath="./", sws0=3.0, sws_percent=10.0, sws_step=11,
concs=[[0.25, 0.25, 0.25, 0.25]], species=[['Nb','Ti','V','Zr']]):
"""
Workflow for equialium volume
Parameter
eqv_folder: str
The folder name for the input and result, "eqv"
jobname: str
The jobname, "NbTiVZr"
latname: str
The lattice name, "bcc"
queue_type = "pbs"
emtopath = "./"
sws0 = 3.0
sws_percent = 10.0
sws_step = 11
concs = [[0.25, 0.25, 0.25, 0.25]]
species = [['Nb','Ti','V','Zr']]
Return
None
"""
creat_folders(eqv_folder)
os.chdir(eqv_folder)
input_gen_eqv(jobname=jobname, emtopath=emtopath, latname=latname, sws0=sws0, sws_percent=sws_percent,
sws_step=sws_step, concs=concs, species = species)
write_eqv_post(jobname=jobname, folder=emtopath, lat=latname, concs=concs, species=species, DLM=False)
pbs_scripts = find_pbs_script(folder=emtopath, jobname=jobname, latname=latname, ext=queue_type)
cmd_lines, head_lines = merge_batchfile(pbs_scripts, latpath=emtopath, jobname=jobname, queue_type=queue_type)
script_lines = head_lines
script_lines += "\n#Change to " + eqv_folder + " folder.\n"
script_lines += "cd " + eqv_folder + "\n"
script_lines += cmd_lines
script_lines += "\npython " + jobname + "_eqv_post.py\n"
script_lines += "cd .."
os.chdir("..")
with open(jobname + "_" + eqv_folder + "." + queue_type, "w+") as fid:
fid.write(script_lines)
def wflow_elastic(elastic_folder="elastic", jobname="NbTiVZr", latname="bcc", queue_type="pbs",
emtopath="./", sws0=3.06, B=115.86, delta_max=0.05, delta_step=6,
concs=[[0.25, 0.25, 0.25, 0.25]], species=[['Nb','Ti','V','Zr']]):
"""
Workflow for single elastic constant
elastic_folder = "elastic"
jobname = "NbTiVZr"
latname = "bcc"
queue_type = "pbs"
emtopath = "./"
sws0 = 3.0626926707382136
B = 115.86573735739826
delta_max = 0.05
delta_step = 6
concs = [[0.25, 0.25, 0.25, 0.25]]
species = [['Nb','Ti','V','Zr']]
"""
creat_folders(elastic_folder)
os.chdir(elastic_folder)
input_gen_elastic(jobname=jobname, emtopath=emtopath, latname=latname, sws0=sws0, delta_max=delta_max,
delta_step=delta_step, concs=concs, species=species)
write_elastic_post(jobname=jobname, folder=emtopath, latname=latname, sws0=sws0, delta_max=delta_max,
delta_step=delta_step, B=B, concs=concs, species=species, DLM=False)
deltas = np.linspace(0, delta_max, delta_step)
deltas[0] = 0.001
lat_name = []
for i in range(2):
for delta in deltas:
lat_name.append(latname + "_" + '{0}_{1:4.2f}'.format(i+1, delta))
pbs_scripts = find_pbs_script(folder=emtopath, jobname=jobname, latname=lat_name, ext=queue_type)
cmd_lines, head_lines = merge_batchfile(pbs_scripts, latpath=emtopath, jobname=jobname, queue_type=queue_type)
script_lines = head_lines
script_lines += "\n#Change to " + elastic_folder + " folder.\n"
script_lines += "cd " + elastic_folder + "\n"
script_lines += cmd_lines
script_lines += "\npython " + jobname + "_elastic_post.py\n"
script_lines += "cd .."
os.chdir("..")
with open(jobname + "_" + elastic_folder + "." + queue_type, "w+") as fid:
fid.write(script_lines)
def wflow_eqv_elastic(jobname="NbTiVZr", latname="bcc", queue_type="pbs", emtopath="./", sws0=3.0,
sws_percent=10.0, sws_step=11, delta_max=0.05, delta_step=6,
concs=[[0.25, 0.25, 0.25, 0.25]], species=[['Nb','Ti','V','Zr']]):
elastic_folder = "elastic"
eqv_folder = "eqv"
wflow_eqv(eqv_folder=eqv_folder, jobname=jobname, latname=latname, queue_type=queue_type,
emtopath=emtopath, sws0=sws0, sws_percent=sws_percent, sws_step=sws_step,
concs=concs, species=species)
param_dict = parse_queue_script(template=jobname + "_" + eqv_folder + "." + queue_type, queue_type="pbs")
os.chdir(eqv_folder)
for cmd in param_dict["cmds"]:
if cmd.startswith("cd"):
pass
else:
os.system(cmd.strip())
os.chdir("..")
#Note: only work in current, the emtopath="./"
eqv_result = loadfn(os.path.join(eqv_folder, jobname+"-eqv.json"))
wflow_elastic(elastic_folder=elastic_folder, jobname=jobname, latname=latname, queue_type=queue_type,
emtopath=emtopath, sws0=eqv_result["sws"], B=eqv_result["B0"], delta_max=delta_max, delta_step=delta_step,
concs=concs, species=species)
param_dict = parse_queue_script(template=jobname + "_" + elastic_folder + "." + queue_type, queue_type="pbs")
os.chdir(elastic_folder)
for cmd in param_dict["cmds"]:
if cmd.startswith("cd"):
pass
else:
os.system(cmd.strip())
os.chdir("..")
def write_input(**kwargs):
with open("input.json", "w+") as fid:
json.dump(kwargs, fid, indent=4)
def batch_run_emto(files_to_copy=['run_emto_elastic.py', ' NbTiVZr.pbs']):
phase = 'bcc'
#The number of elements is range from (NEleMin, NEleMax), 0 for unary, 1 for binary
NEleMin = 0
NEleMax = 3
files_to_copy = [os.path.abspath(filei) for filei in files_to_copy]
eles = ['Al', 'Ti', 'V', 'Cr', 'Zr', 'Nb', 'Mo', 'Hf', 'Ta', 'W']
folderName = ['unary', 'binary', 'ternary', 'quaternary', 'quinary']
n_point = [1, 10, 6, 4, 5]
bohr2A = 0.529177249
if phase == 'bcc':
Natom = 2
elif phase == 'fcc':
Natom = 4
elif phase == 'hcp':
Natom = 2
else:
raise Exception('Current {phase} is not supported.'.format(phase=phase))
for i in range(NEleMin, NEleMax):
creat_folders(folderName[i])
os.chdir(folderName[i])
ele_comb = list(combinations(eles, i+1))
for sys_alloy in ele_comb:
subFolderName = "".join(sys_alloy)
creat_folders(subFolderName)
if i > 0:
os.chdir(subFolderName)
alloy_dict = creat_alloy(sys_alloy=sys_alloy, n_point=n_point[i])
for alloy in alloy_dict:
creat_folders(alloy)
os.chdir(alloy)
alloy_i = {sys_alloy[i]: alloy_dict[alloy][i] for i in range(len(sys_alloy))}
v0 = evaluate_V0(alloy=alloy_i, norm=True)
sws0 = math.pow(3.*v0 / (4.*np.pi), 1./3.)/bohr2A
write_input(jobname=alloy, latname="bcc", queue_type="pbs", emtopath="./", sws0=sws0,
sws_percent=10.0, sws_step=11, delta_max=0.05, delta_step=6,
concs=[alloy_dict[alloy]], species=[sys_alloy])
for filei in files_to_copy:
shutil.copyfile(filei, os.path.join("./", os.path.basename(filei)))
os.chdir("..")
os.chdir("..")
else:
#unary
pass
os.chdir("..")
#print(straini)
#wflow_eqv()
#batch_run_emto() | [
"math.sqrt",
"numpy.array",
"pyemto.utilities.distort",
"os.path.exists",
"re.split",
"os.listdir",
"pymatgen.core.periodic_table.Element",
"numpy.linspace",
"os.path.isfile",
"operator.eq",
"pyemto.latticeinputs.batch.batch_head",
"os.makedirs",
"math.pow",
"os.path.join",
"os.chdir",
... | [((2387, 2445), 'pyemto.examples.emto_input_generator.EMTO', 'EMTO', ([], {'folder': 'emtopath', 'EMTOdir': '"""/storage/home/mjl6505/bin"""'}), "(folder=emtopath, EMTOdir='/storage/home/mjl6505/bin')\n", (2391, 2445), False, 'from pyemto.examples.emto_input_generator import EMTO\n'), ((3022, 3059), 'numpy.linspace', 'np.linspace', (['swsmin', 'swsmax', 'sws_step'], {}), '(swsmin, swsmax, sws_step)\n', (3033, 3059), True, 'import numpy as np\n'), ((3610, 3647), 'numpy.linspace', 'np.linspace', (['(0)', 'delta_max', 'delta_step'], {}), '(0, delta_max, delta_step)\n', (3621, 3647), True, 'import numpy as np\n'), ((16160, 16182), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (16174, 16182), False, 'import os\n'), ((18592, 18610), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (18602, 18610), False, 'import os\n'), ((19723, 19743), 'os.chdir', 'os.chdir', (['eqv_folder'], {}), '(eqv_folder)\n', (19731, 19743), False, 'import os\n'), ((20498, 20512), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (20506, 20512), False, 'import os\n'), ((21289, 21313), 'os.chdir', 'os.chdir', (['elastic_folder'], {}), '(elastic_folder)\n', (21297, 21313), False, 'import os\n'), ((21681, 21718), 'numpy.linspace', 'np.linspace', (['(0)', 'delta_max', 'delta_step'], {}), '(0, delta_max, delta_step)\n', (21692, 21718), True, 'import numpy as np\n'), ((22382, 22396), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (22390, 22396), False, 'import os\n'), ((23126, 23146), 'os.chdir', 'os.chdir', (['eqv_folder'], {}), '(eqv_folder)\n', (23134, 23146), False, 'import os\n'), ((23285, 23299), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (23293, 23299), False, 'import os\n'), ((23799, 23823), 'os.chdir', 'os.chdir', (['elastic_folder'], {}), '(elastic_folder)\n', (23807, 23823), False, 'import os\n'), ((23962, 23976), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (23970, 23976), False, 'import os\n'), ((1603, 1667), 'numpy.array', 'np.array', (['[[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]]'], {}), '([[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]])\n', (1611, 1667), True, 'import numpy as np\n'), ((1725, 1752), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (1733, 1752), True, 'import numpy as np\n'), ((3691, 3755), 'numpy.array', 'np.array', (['[[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]]'], {}), '([[-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0.5, 0.5, -0.5]])\n', (3699, 3755), True, 'import numpy as np\n'), ((3813, 3840), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (3821, 3840), True, 'import numpy as np\n'), ((14825, 14845), 'os.path.isfile', 'os.path.isfile', (['file'], {}), '(file)\n', (14839, 14845), False, 'import os\n'), ((15031, 15154), 'pyemto.latticeinputs.batch.batch_head', 'batch_head', (['jobname'], {'latpath': 'latpath', 'runtime': 'runtime', 'account': 'account', 'queue_type': 'queue_type', 'queue_options': 'param_dict'}), '(jobname, latpath=latpath, runtime=runtime, account=account,\n queue_type=queue_type, queue_options=param_dict)\n', (15041, 15154), False, 'from pyemto.latticeinputs.batch import batch_head\n'), ((16251, 16270), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (16262, 16270), False, 'import os\n'), ((16586, 16611), 'operator.eq', 'operator.eq', (['listi', 'listj'], {}), '(listi, listj)\n', (16597, 16611), False, 'import operator\n'), ((23375, 23422), 'os.path.join', 'os.path.join', (['eqv_folder', "(jobname + '-eqv.json')"], {}), "(eqv_folder, jobname + '-eqv.json')\n", (23387, 23422), False, 'import os\n'), ((24055, 24087), 'json.dump', 'json.dump', (['kwargs', 'fid'], {'indent': '(4)'}), '(kwargs, fid, indent=4)\n', (24064, 24087), False, 'import json\n'), ((24324, 24346), 'os.path.abspath', 'os.path.abspath', (['filei'], {}), '(filei)\n', (24339, 24346), False, 'import os\n'), ((24878, 24901), 'os.chdir', 'os.chdir', (['folderName[i]'], {}), '(folderName[i])\n', (24886, 24901), False, 'import os\n'), ((26106, 26120), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (26114, 26120), False, 'import os\n'), ((1817, 1878), 'numpy.array', 'np.array', (['[[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]]'], {}), '([[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]])\n', (1825, 1878), True, 'import numpy as np\n'), ((1942, 1969), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (1950, 1969), True, 'import numpy as np\n'), ((3905, 3966), 'numpy.array', 'np.array', (['[[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]]'], {}), '([[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]])\n', (3913, 3966), True, 'import numpy as np\n'), ((4030, 4057), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (4038, 4057), True, 'import numpy as np\n'), ((5564, 5592), 'pyemto.utilities.distort', 'distort', (['dist_matrix', 'prims0'], {}), '(dist_matrix, prims0)\n', (5571, 5592), False, 'from pyemto.utilities import distort\n'), ((5613, 5641), 'pyemto.utilities.distort', 'distort', (['dist_matrix', 'basis0'], {}), '(dist_matrix, basis0)\n', (5620, 5641), False, 'from pyemto.utilities import distort\n'), ((5921, 5979), 'pyemto.examples.emto_input_generator.EMTO', 'EMTO', ([], {'folder': 'emtopath', 'EMTOdir': '"""/storage/home/mjl6505/bin"""'}), "(folder=emtopath, EMTOdir='/storage/home/mjl6505/bin')\n", (5925, 5979), False, 'from pyemto.examples.emto_input_generator import EMTO\n'), ((6684, 6698), 'numpy.array', 'np.array', (['sws0'], {}), '(sws0)\n', (6692, 6698), True, 'import numpy as np\n'), ((24926, 24951), 'itertools.combinations', 'combinations', (['eles', '(i + 1)'], {}), '(eles, i + 1)\n', (24938, 24951), False, 'from itertools import combinations\n'), ((2187, 2257), 'numpy.array', 'np.array', (['[[1.0 / 3.0, 2.0 / 3.0, 0.25], [2.0 / 3.0, 1.0 / 3.0, 0.75]]'], {}), '([[1.0 / 3.0, 2.0 / 3.0, 0.25], [2.0 / 3.0, 1.0 / 3.0, 0.75]])\n', (2195, 2257), True, 'import numpy as np\n'), ((4275, 4345), 'numpy.array', 'np.array', (['[[1.0 / 3.0, 2.0 / 3.0, 0.25], [2.0 / 3.0, 1.0 / 3.0, 0.75]]'], {}), '([[1.0 / 3.0, 2.0 / 3.0, 0.25], [2.0 / 3.0, 1.0 / 3.0, 0.75]])\n', (4283, 4345), True, 'import numpy as np\n'), ((5086, 5164), 'numpy.array', 'np.array', (['[[1 + delta, 0, 0], [0, 1 - delta, 0], [0, 0, 1 / (1 - delta ** 2)]]'], {}), '([[1 + delta, 0, 0], [0, 1 - delta, 0], [0, 0, 1 / (1 - delta ** 2)]])\n', (5094, 5164), True, 'import numpy as np\n'), ((12813, 12839), 're.split', 're.split', (['"""\\\\s+"""', 'eachline'], {}), "('\\\\s+', eachline)\n", (12821, 12839), False, 'import re\n'), ((15874, 15886), 'pymatgen.core.periodic_table.Element', 'Element', (['ele'], {}), '(ele)\n', (15881, 15886), False, 'from pymatgen.core.periodic_table import Element\n'), ((25112, 25135), 'os.chdir', 'os.chdir', (['subFolderName'], {}), '(subFolderName)\n', (25120, 25135), False, 'import os\n'), ((26021, 26035), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (26029, 26035), False, 'import os\n'), ((5315, 5385), 'numpy.array', 'np.array', (['[[1, delta, 0], [delta, 1, 0], [0, 0, 1 / (1 - delta ** 2)]]'], {}), '([[1, delta, 0], [delta, 1, 0], [0, 0, 1 / (1 - delta ** 2)]])\n', (5323, 5385), True, 'import numpy as np\n'), ((15816, 15828), 'pymatgen.core.periodic_table.Element', 'Element', (['ele'], {}), '(ele)\n', (15823, 15828), False, 'from pymatgen.core.periodic_table import Element\n'), ((25320, 25335), 'os.chdir', 'os.chdir', (['alloy'], {}), '(alloy)\n', (25328, 25335), False, 'import os\n'), ((25990, 26004), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (25998, 26004), False, 'import os\n'), ((25524, 25569), 'math.pow', 'math.pow', (['(3.0 * v0 / (4.0 * np.pi))', '(1.0 / 3.0)'], {}), '(3.0 * v0 / (4.0 * np.pi), 1.0 / 3.0)\n', (25532, 25569), False, 'import math\n'), ((2107, 2121), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (2116, 2121), False, 'import math\n'), ((4195, 4209), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (4204, 4209), False, 'import math\n'), ((25944, 25967), 'os.path.basename', 'os.path.basename', (['filei'], {}), '(filei)\n', (25960, 25967), False, 'import os\n'), ((2064, 2078), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (2073, 2078), False, 'import math\n'), ((4152, 4166), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (4161, 4166), False, 'import math\n')] |
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def tabulate_events(dir_path):
summary_iterators = [EventAccumulator(os.path.join(dir_path, dname)).Reload() for dname in os.listdir(dir_path)]
tags = summary_iterators[0].Tags()['scalars']
for it in summary_iterators:
assert it.Tags()['scalars'] == tags
out = defaultdict(list)
steps = []
for tag in tags:
steps = [e.step for e in summary_iterators[0].Scalars(tag)]
wall_times = [e.wall_time for e in summary_iterators[0].Scalars(tag)]
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag].append([e.value for e in events])
return out, steps, wall_times
def to_csv(log_dir_path, csv_dir_path):
dirs = os.listdir(log_dir_path)
d, steps, wall_times = tabulate_events(log_dir_path)
tags, values = zip(*d.items())
np_values = np.array(values)
csv_columns = ['step', 'wall_time']
csv_columns.extend(dirs)
print('extend', ['step', 'wall_time'].extend(dirs))
print('csv_columns', csv_columns)
for index, tag in enumerate(tags):
# df = pd.DataFrame(np_values[index], index=steps, columns=dirs)
df = pd.DataFrame(np.vstack((steps, wall_times, np_values[index].T)).T, columns=csv_columns)
df.to_csv(get_csv_file_path(csv_dir_path, tag), index=False)
def get_csv_file_path(csv_dir_path, tag):
file_name = tag.replace("/", "_") + '.csv'
folder_path = os.path.join(csv_dir_path, 'csv')
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return os.path.join(folder_path, file_name)
if __name__ == '__main__':
# example
train_id = 'SR_1D_CNN_SAMPLE-TRAIN'
log_dir_path = "/var/tensorflow/tsp/sample/logs/{}/".format(train_id)
csv_dir_path = "/var/tensorflow/tsp/sample/history/{}/".format(train_id)
to_csv(log_dir_path, csv_dir_path) | [
"os.path.exists",
"os.listdir",
"os.makedirs",
"os.path.join",
"numpy.array",
"collections.defaultdict",
"numpy.vstack"
] | [((460, 477), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (471, 477), False, 'from collections import defaultdict\n'), ((940, 964), 'os.listdir', 'os.listdir', (['log_dir_path'], {}), '(log_dir_path)\n', (950, 964), False, 'import os\n'), ((1074, 1090), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (1082, 1090), True, 'import numpy as np\n'), ((1646, 1679), 'os.path.join', 'os.path.join', (['csv_dir_path', '"""csv"""'], {}), "(csv_dir_path, 'csv')\n", (1658, 1679), False, 'import os\n'), ((1764, 1800), 'os.path.join', 'os.path.join', (['folder_path', 'file_name'], {}), '(folder_path, file_name)\n', (1776, 1800), False, 'import os\n'), ((1691, 1718), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (1705, 1718), False, 'import os\n'), ((1728, 1752), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (1739, 1752), False, 'import os\n'), ((298, 318), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (308, 318), False, 'import os\n'), ((1393, 1443), 'numpy.vstack', 'np.vstack', (['(steps, wall_times, np_values[index].T)'], {}), '((steps, wall_times, np_values[index].T))\n', (1402, 1443), True, 'import numpy as np\n'), ((245, 274), 'os.path.join', 'os.path.join', (['dir_path', 'dname'], {}), '(dir_path, dname)\n', (257, 274), False, 'import os\n')] |
#!/usr/bin/env python3
"""
Module to take in .mat MatLab files and generate spectrogram images via Short Time Fourier Transform
---------- ------------------------------ --------------------
| Data.mat | -> | Short-Time Fourier Transform | -> | Spectrogram Images |
---------- ------------------------------ --------------------
"""
from scipy import signal # imports to make spectrogram images
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import shutil
import numpy as np
import os
import scipy.io
import argparse
import glob
import math
np.seterr(divide='raise')
KEYS = ['id', 'tag', 'nS', 'sampFreq', 'marker', 'timestamp', 'data', 'trials']
CWD = os.path.dirname(os.path.realpath(__file__))
# Ranges of data points representing a certain mental state e.g. focused, unfocused or drowsy
FOCUSED_DATA = [0, 76801]
UNFOCUSED_DATA = [76801, 153600]
DROWSY_DATA = [153601, 230400]
DATA_FILES_PATH = os.path.join(CWD, 'data') # constant representing directory path to data files
STATE_DATA_OUTPUT = os.path.join(CWD, 'state-data')
CHANNELS = [4, 5, 8, 9, 10, 11, 16]
MAT = '.mat' # suffix of input files
FREQUENCY = 128 # frequency rate is 128Hz
M = 64
MAX_AMP = 2 # Max amplitude for short-time fourier transform graph
def handle_arguments():
"""
Function used to set the arguments that can be passed to the script
:return: the Parsed arguments
"""
parser = argparse.ArgumentParser(description='Split EEG data preprocess and create spectrograms')
parser.add_argument('-s', '--split', action='store_true', default=False, dest='split_data',
help='Flag used to split the data: Focused, Unfocused, and Drowsy data sets')
parser.add_argument('-i', '--images', dest='state', choices=['FOCUSED', 'UNFOCUSED', 'DROWSY', 'ALL'],
help='Flag used to determine what mental state we want to create spectrogram images for')
return parser.parse_args()
def handle_split_data(input_files, channels):
"""
Function used to handle the split of data by mental state
:return:
"""
# create directory where we will output split data
create_output_directory(STATE_DATA_OUTPUT)
for data_file in input_files:
# data from a single file
data = load_data_from_file(data_file)
# name of the output image file
output_basename = os.path.basename(data_file)
output_basename = output_basename.split('.')[0]
# full path location of directory we want to create for data file we are analyzing
output_dirpath = os.path.join(STATE_DATA_OUTPUT, output_basename)
# make a directory for data file being analyzed in order to generate images for all channels of data file.
# e.g. ./output/eeg_record2/
os.mkdir(output_dirpath)
for channel in channels:
channel_dir = os.path.join(output_dirpath, str(channel))
os.mkdir(channel_dir)
output_data_to_csv(channel_dir, data[:, channel], FOCUSED_DATA, 'FOCUSED')
output_data_to_csv(channel_dir, data[:, channel], UNFOCUSED_DATA, 'UNFOCUSED')
output_data_to_csv(channel_dir, data[:, channel], DROWSY_DATA, 'DROWSY')
def handle_create_spectrograms(state):
"""
Function used to determine what what state (e.g., FOCUSED, UNFOCUSED, DROWSY, or ALL) spectrogram
images to create
:param state:
:return None:
"""
states = []
if state == 'ALL':
states = ['FOCUSED', 'UNFOCUSED', 'DROWSY']
else:
states = [state]
# need to check if state-data directory exists in path
if not os.path.isdir(STATE_DATA_OUTPUT):
print('Error: Directory \'{0}\' with raw input data doesnt exists!'.format(STATE_DATA_OUTPUT))
exit(1)
# iterate through states that we need to generate spectrogram images for
for curr_state in states:
output_root = os.path.join(CWD, curr_state)
create_output_directory(output_root)
path_to_search = os.path.join(STATE_DATA_OUTPUT, '**', curr_state)
state_data_files = glob.glob(path_to_search, recursive=True)
for filename in state_data_files:
output_subpath = filename.replace(STATE_DATA_OUTPUT, '')
output_subpath = output_subpath.replace(curr_state, '')
output_filepath = '{0}{1}'.format(output_root, output_subpath)
os.makedirs(output_filepath)
# need to get data from file
data = load_raw_state_data(filename)
output_image = os.path.join(output_filepath, curr_state)
# 128, 256, 10mins, ./FOCUSED/eeg_record7/10/FOCUSED
interate_data(FREQUENCY, M, data, output_image)
def get_all_data_files():
"""
Function used to get string values of all files in a directory e.g.
'/create-spectrograms/data/eeg_record1.mat',
'/create-spectrograms/data/eeg_record2.mat', etc.
:return all_files: list of string values of all files in a directory
"""
all_files = []
for dirname, _, filenames in os.walk(DATA_FILES_PATH):
for filename in filenames:
# ignore anything that is not a .mat file
if MAT in filename:
# Example: complete_path_to_file = /create-spectrograms/data/eeg_record1.mat
complete_path_to_file = os.path.join(dirname, filename)
all_files.append(complete_path_to_file)
return all_files
def load_raw_state_data(path_to_file):
"""
Function to load raw state data from a csv file
:param path_to_file: the path to file we want to read
:return data: raw data from file
"""
data = np.genfromtxt(path_to_file)
return data
def load_data_from_file(path_to_file):
"""
Function used to get data from a .mat file
:param path_to_file: path to file we want to read e.g. /create-spectrograms/data/eeg_record2.mat
:return data: numpy 2-D array 25x308868 to represent all data points gathered in 25 channels
"""
raw_file = scipy.io.loadmat(path_to_file)
raw_data = raw_file['o'][0, 0]
data = raw_data[6]
return data
def generate_stft_from_data(channel, fs, m, max_amp, data, output_filepath):
"""
Function used to generate the Fast-Time Fourier Transform (stft) from data
:param channel: which channel of the data we are analyzing. Integer value between 0 - 24
:param fs: frequency sample rate e.g. 128 Hz
:param m: total number of points in window e.g. 1920
:param max_amp: max amplitude for stft plot
:param data: complete dataset from input file
:param output_filepath: path to export file of short time fourier transform plot of data
:return None:
"""
f, t, Zxx = signal.stft(data[:, channel], fs, window='blackman', nperseg=m)
plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=max_amp)
plt.title('STFT Magnitude')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.savefig(output_filepath)
def generate_spectrogram_from_data(fs, m, data, output_filepath):
"""
Function used to generate Spectrogram images
:param fs: frequency sample rate e.g. 128 Hz
:param m: total number of points in window e.g. 128
:param data: complete dataset from an input file
:param output_filepath: path to export file of spectrogram
:return None:
"""
overlap = math.floor(m * 0.9)
f, t, Sxx = signal.spectrogram(data, fs, noverlap=overlap, window=signal.tukey(m, 0.25))
try:
plt.pcolormesh(t, f, np.log10(Sxx))
plt.set_cmap('jet')
plt.axis('off')
plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, dpi=35)
plt.clf()
except FloatingPointError as e:
print('Caught divide by 0 error: {0}'.format(output_filepath))
return
def generate_graph_from_data(channel, data, output_filepath):
"""
Function used to generate time domain graph from channel data
:param channel: specific channel lead we are analyzing
:param data: complete dataset from an input file
:param output_filepath: path to export file of time domain data
:return None:
"""
x = np.linspace(0, len(data[:, channel]) / 512., len(data[:, channel]))
y = data[:, channel]
plt.plot(x, y, color='blue')
plt.title('Lead: {}'.format(str(channel)))
plt.xlabel('Time [secs]')
plt.ylabel('MicroVolts [muV]')
plt.savefig(output_filepath)
def interate_data(fs, m, data, output_file):
"""
Function used to interate through data and generate spectrogram images
:param fs:
:param m:
:param data:
:param output_file:
:return:
"""
move = 128
i = 0
j = 256
counter = 1
while j < len(data):
sub_data = data[i:j]
# FOCUSED/eeg_record7/10/FOCUSED_1
sub_output_file = '{0}_{1}'.format(output_file, str(counter))
generate_spectrogram_from_data(fs, m, sub_data, sub_output_file)
i += move
j += move
counter += 1
def create_output_directory(output_path):
"""
Function used to create the output directory for Short-Time Fourier Transform
images created for all input files and each channel of an input file.
if output directory exists, we delete it and recreate it.
:param output_path: path of the output files we want to create e.g. './output'
:return None:
"""
if os.path.isdir(output_path):
shutil.rmtree(output_path, ignore_errors=True)
os.mkdir(output_path)
def output_data_to_csv(output_dir, data, state, filename):
"""
Function used to parse out focused data and output it into csv files
:param output_dir: directory to output data
:param data: to output to csv
:param state: state we are outputting to csv e.g., focused, unfocused or drowsy
:param filename: name of file we are writing data to
:return None:
"""
output_path = os.path.join(output_dir, filename)
try:
parsed_data = np.array(data[range(state[0], state[1])])
except IndexError as e:
print('File: {0}'.format(output_path))
print('Size: {0}'.format(len(data)))
return
np.savetxt(output_path, parsed_data, delimiter=',')
def main():
"""
Main Entrance of program
:return None:
"""
args = handle_arguments()
all_files = get_all_data_files()
if args.split_data:
handle_split_data(all_files, CHANNELS)
if args.state:
handle_create_spectrograms(args.state)
if __name__ == '__main__':
main()
| [
"numpy.log10",
"matplotlib.pyplot.ylabel",
"math.floor",
"numpy.genfromtxt",
"os.walk",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.isdir",
"os.mkdir",
"matplotlib.pyplot.axis",
"glob.glob",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotl... | [((489, 510), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (503, 510), False, 'import matplotlib\n'), ((643, 668), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""raise"""'}), "(divide='raise')\n", (652, 668), True, 'import numpy as np\n'), ((1004, 1029), 'os.path.join', 'os.path.join', (['CWD', '"""data"""'], {}), "(CWD, 'data')\n", (1016, 1029), False, 'import os\n'), ((1104, 1135), 'os.path.join', 'os.path.join', (['CWD', '"""state-data"""'], {}), "(CWD, 'state-data')\n", (1116, 1135), False, 'import os\n'), ((772, 798), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (788, 798), False, 'import os\n'), ((1490, 1583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Split EEG data preprocess and create spectrograms"""'}), "(description=\n 'Split EEG data preprocess and create spectrograms')\n", (1513, 1583), False, 'import argparse\n'), ((5139, 5163), 'os.walk', 'os.walk', (['DATA_FILES_PATH'], {}), '(DATA_FILES_PATH)\n', (5146, 5163), False, 'import os\n'), ((5745, 5772), 'numpy.genfromtxt', 'np.genfromtxt', (['path_to_file'], {}), '(path_to_file)\n', (5758, 5772), True, 'import numpy as np\n'), ((6812, 6875), 'scipy.signal.stft', 'signal.stft', (['data[:, channel]', 'fs'], {'window': '"""blackman"""', 'nperseg': 'm'}), "(data[:, channel], fs, window='blackman', nperseg=m)\n", (6823, 6875), False, 'from scipy import signal\n'), ((6941, 6968), 'matplotlib.pyplot.title', 'plt.title', (['"""STFT Magnitude"""'], {}), "('STFT Magnitude')\n", (6950, 6968), True, 'import matplotlib.pyplot as plt\n'), ((6973, 7001), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (6983, 7001), True, 'import matplotlib.pyplot as plt\n'), ((7006, 7030), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [sec]"""'], {}), "('Time [sec]')\n", (7016, 7030), True, 'import matplotlib.pyplot as plt\n'), ((7036, 7064), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filepath'], {}), '(output_filepath)\n', (7047, 7064), True, 'import matplotlib.pyplot as plt\n'), ((7451, 7470), 'math.floor', 'math.floor', (['(m * 0.9)'], {}), '(m * 0.9)\n', (7461, 7470), False, 'import math\n'), ((8342, 8370), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""blue"""'}), "(x, y, color='blue')\n", (8350, 8370), True, 'import matplotlib.pyplot as plt\n'), ((8422, 8447), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [secs]"""'], {}), "('Time [secs]')\n", (8432, 8447), True, 'import matplotlib.pyplot as plt\n'), ((8452, 8482), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MicroVolts [muV]"""'], {}), "('MicroVolts [muV]')\n", (8462, 8482), True, 'import matplotlib.pyplot as plt\n'), ((8488, 8516), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filepath'], {}), '(output_filepath)\n', (8499, 8516), True, 'import matplotlib.pyplot as plt\n'), ((9478, 9504), 'os.path.isdir', 'os.path.isdir', (['output_path'], {}), '(output_path)\n', (9491, 9504), False, 'import os\n'), ((9566, 9587), 'os.mkdir', 'os.mkdir', (['output_path'], {}), '(output_path)\n', (9574, 9587), False, 'import os\n'), ((9998, 10032), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (10010, 10032), False, 'import os\n'), ((10247, 10298), 'numpy.savetxt', 'np.savetxt', (['output_path', 'parsed_data'], {'delimiter': '""","""'}), "(output_path, parsed_data, delimiter=',')\n", (10257, 10298), True, 'import numpy as np\n'), ((2455, 2482), 'os.path.basename', 'os.path.basename', (['data_file'], {}), '(data_file)\n', (2471, 2482), False, 'import os\n'), ((2656, 2704), 'os.path.join', 'os.path.join', (['STATE_DATA_OUTPUT', 'output_basename'], {}), '(STATE_DATA_OUTPUT, output_basename)\n', (2668, 2704), False, 'import os\n'), ((2866, 2890), 'os.mkdir', 'os.mkdir', (['output_dirpath'], {}), '(output_dirpath)\n', (2874, 2890), False, 'import os\n'), ((3706, 3738), 'os.path.isdir', 'os.path.isdir', (['STATE_DATA_OUTPUT'], {}), '(STATE_DATA_OUTPUT)\n', (3719, 3738), False, 'import os\n'), ((3989, 4018), 'os.path.join', 'os.path.join', (['CWD', 'curr_state'], {}), '(CWD, curr_state)\n', (4001, 4018), False, 'import os\n'), ((4091, 4140), 'os.path.join', 'os.path.join', (['STATE_DATA_OUTPUT', '"""**"""', 'curr_state'], {}), "(STATE_DATA_OUTPUT, '**', curr_state)\n", (4103, 4140), False, 'import os\n'), ((4168, 4209), 'glob.glob', 'glob.glob', (['path_to_search'], {'recursive': '(True)'}), '(path_to_search, recursive=True)\n', (4177, 4209), False, 'import glob\n'), ((6902, 6913), 'numpy.abs', 'np.abs', (['Zxx'], {}), '(Zxx)\n', (6908, 6913), True, 'import numpy as np\n'), ((7627, 7646), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""jet"""'], {}), "('jet')\n", (7639, 7646), True, 'import matplotlib.pyplot as plt\n'), ((7655, 7670), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7663, 7670), True, 'import matplotlib.pyplot as plt\n'), ((7680, 7751), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_filepath'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0)', 'dpi': '(35)'}), "(output_filepath, bbox_inches='tight', pad_inches=0, dpi=35)\n", (7691, 7751), True, 'import matplotlib.pyplot as plt\n'), ((7760, 7769), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7767, 7769), True, 'import matplotlib.pyplot as plt\n'), ((9514, 9560), 'shutil.rmtree', 'shutil.rmtree', (['output_path'], {'ignore_errors': '(True)'}), '(output_path, ignore_errors=True)\n', (9527, 9560), False, 'import shutil\n'), ((3006, 3027), 'os.mkdir', 'os.mkdir', (['channel_dir'], {}), '(channel_dir)\n', (3014, 3027), False, 'import os\n'), ((4478, 4506), 'os.makedirs', 'os.makedirs', (['output_filepath'], {}), '(output_filepath)\n', (4489, 4506), False, 'import os\n'), ((4626, 4667), 'os.path.join', 'os.path.join', (['output_filepath', 'curr_state'], {}), '(output_filepath, curr_state)\n', (4638, 4667), False, 'import os\n'), ((7542, 7563), 'scipy.signal.tukey', 'signal.tukey', (['m', '(0.25)'], {}), '(m, 0.25)\n', (7554, 7563), False, 'from scipy import signal\n'), ((7604, 7617), 'numpy.log10', 'np.log10', (['Sxx'], {}), '(Sxx)\n', (7612, 7617), True, 'import numpy as np\n'), ((5420, 5451), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (5432, 5451), False, 'import os\n')] |
import logging
import cv2
import numpy as np
from image_segmentation.extended_image import ExtendedImage
from image_segmentation.line import Line
LOGGER = logging.getLogger()
class Picture(ExtendedImage):
INDENTATION_THRESHOLD = 50
ARTIFACT_PERCENTAGE_THRESHOLD = 0.08
MINIMUM_LINE_OVERLAP = 0.25
def __init__(self, image, x_axis, y_axis, width, height, preferences=None):
super().__init__(image, x_axis, y_axis, width, height, preferences)
self.lines = []
self.indentation_threshold = self.INDENTATION_THRESHOLD
if self.preferences and self.preferences.show_pic:
cv2.imshow("Full picture", image)
cv2.waitKey(0)
def get_line(self, n):
if 0 >= n or n > len(self.lines):
return None
return self.lines[n - 1]
def get_line_coordinates(self, n):
line = self.get_line(n)
if line:
return line.get_bounding_box()
else:
return {}
def get_character_coordinates(self, n, p):
line = self.get_line(n)
if line:
return line.get_character_coordinates(p)
else:
return {}
def get_segments(self):
self.lines = self._segment_image(self.get_image())
LOGGER.debug("Getting code for the %d lines detected.", len(self.lines))
return self.lines
def get_indentation_threshold(self):
return self.indentation_threshold
def _segment_image(self, gray_image):
lines = []
img = self.get_contoured(gray_image)
sorted_ctrs = self._find_contours(img)
sorted_ctrs = self._merge_subcontours(sorted_ctrs)
if not sorted_ctrs:
return []
# Get average height and width of all lines
average_width = sum(cv2.boundingRect(ctr)[2] for i, ctr in enumerate(sorted_ctrs)) / len(sorted_ctrs)
average_height = sum(cv2.boundingRect(ctr)[3] for i, ctr in enumerate(sorted_ctrs)) / len(sorted_ctrs)
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x_axis, y_axis, width, height = cv2.boundingRect(ctr)
# Discard lines which have a very small width or height (based on the threshold)
if width < (average_width * self.ARTIFACT_PERCENTAGE_THRESHOLD) or \
height < (average_height * self.ARTIFACT_PERCENTAGE_THRESHOLD):
continue
roi = gray_image[y_axis:y_axis + height, x_axis:x_axis + width]
mask = self._get_mask(img, sorted_ctrs, i)[y_axis:y_axis + height, x_axis:x_axis + width]
result = cv2.bitwise_and(roi, roi, mask=mask)
if len(self._find_contours(result)) >= 2:
lines.append(Line(result, x_axis, y_axis, width, height, self.preferences))
# Sort lines based on y offset
lines = sorted(lines, key=lambda line: line.get_y())
LOGGER.debug("%d lines detected.", len(lines))
return lines
def _get_mask(self, img, contours, contour_index):
mask = np.zeros_like(img)
cv2.drawContours(mask, contours, contour_index, 255, -1)
return mask
def get_contoured(self, gray_image):
img = np.copy(gray_image)
points, used_contours = self.get_center_points(gray_image)
average_distance, standard_deviation = self.average_node_distance(points)
self.indentation_threshold = average_distance
horizontal_distance = int(1.5 * average_distance + 2 * standard_deviation)
for ctr, point in zip(used_contours, points):
x_axis, y_axis, width, height = cv2.boundingRect(ctr)
x_center, y_center = point[0], point[1]
minimum_height = round(0.9 * min(y_center - y_axis, y_axis + height - y_center))
cv2.rectangle(
img,
(x_center - horizontal_distance, y_center - minimum_height),
(x_center + horizontal_distance, y_center + minimum_height),
(255, 255, 255),
-1
)
return img
def _merge_subcontours(self, sorted_ctrs):
merged = []
for i, ctr in enumerate(sorted_ctrs):
x1, y1, width1, height1 = cv2.boundingRect(ctr)
remove = None
add = True
for merged_ctr in merged:
x2, y2, width2, height2 = cv2.boundingRect(merged_ctr)
if (x1 <= x2 and y1 <= y2 and x1 + width1 >= x2 + width2 and y1 + height1 >= y2 + height2) or \
(y1 < y2 < y1 + height1 and (y1 + height1 - y2) / height1 > self.MINIMUM_LINE_OVERLAP):
merged.append(np.concatenate((ctr, merged_ctr), axis=0))
remove = merged_ctr
add = False
break
if add:
merged.append(ctr)
else:
merged = [x for x in merged if x.shape != remove.shape or not np.equal(x, remove).all()]
return merged
| [
"logging.getLogger",
"numpy.copy",
"cv2.rectangle",
"cv2.drawContours",
"cv2.bitwise_and",
"numpy.equal",
"cv2.imshow",
"cv2.waitKey",
"image_segmentation.line.Line",
"numpy.concatenate",
"numpy.zeros_like",
"cv2.boundingRect"
] | [((158, 177), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (175, 177), False, 'import logging\n'), ((3051, 3069), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (3064, 3069), True, 'import numpy as np\n'), ((3078, 3134), 'cv2.drawContours', 'cv2.drawContours', (['mask', 'contours', 'contour_index', '(255)', '(-1)'], {}), '(mask, contours, contour_index, 255, -1)\n', (3094, 3134), False, 'import cv2\n'), ((3211, 3230), 'numpy.copy', 'np.copy', (['gray_image'], {}), '(gray_image)\n', (3218, 3230), True, 'import numpy as np\n'), ((632, 665), 'cv2.imshow', 'cv2.imshow', (['"""Full picture"""', 'image'], {}), "('Full picture', image)\n", (642, 665), False, 'import cv2\n'), ((678, 692), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (689, 692), False, 'import cv2\n'), ((2117, 2138), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (2133, 2138), False, 'import cv2\n'), ((2619, 2655), 'cv2.bitwise_and', 'cv2.bitwise_and', (['roi', 'roi'], {'mask': 'mask'}), '(roi, roi, mask=mask)\n', (2634, 2655), False, 'import cv2\n'), ((3618, 3639), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (3634, 3639), False, 'import cv2\n'), ((3799, 3968), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x_center - horizontal_distance, y_center - minimum_height)', '(x_center + horizontal_distance, y_center + minimum_height)', '(255, 255, 255)', '(-1)'], {}), '(img, (x_center - horizontal_distance, y_center -\n minimum_height), (x_center + horizontal_distance, y_center +\n minimum_height), (255, 255, 255), -1)\n', (3812, 3968), False, 'import cv2\n'), ((4227, 4248), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (4243, 4248), False, 'import cv2\n'), ((4380, 4408), 'cv2.boundingRect', 'cv2.boundingRect', (['merged_ctr'], {}), '(merged_ctr)\n', (4396, 4408), False, 'import cv2\n'), ((2740, 2801), 'image_segmentation.line.Line', 'Line', (['result', 'x_axis', 'y_axis', 'width', 'height', 'self.preferences'], {}), '(result, x_axis, y_axis, width, height, self.preferences)\n', (2744, 2801), False, 'from image_segmentation.line import Line\n'), ((1802, 1823), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (1818, 1823), False, 'import cv2\n'), ((1913, 1934), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (1929, 1934), False, 'import cv2\n'), ((4668, 4709), 'numpy.concatenate', 'np.concatenate', (['(ctr, merged_ctr)'], {'axis': '(0)'}), '((ctr, merged_ctr), axis=0)\n', (4682, 4709), True, 'import numpy as np\n'), ((4961, 4980), 'numpy.equal', 'np.equal', (['x', 'remove'], {}), '(x, remove)\n', (4969, 4980), True, 'import numpy as np\n')] |
# train logistic regression on mnist dataest
import numpy as np
import theano.tensor as T
import theano as K
import gzip, cPickle
import matplotlib.pyplot as plt
from random import sample, seed
import os, sys
os.chdir('data/sparse_lstm')
print(os.getcwd())
from sparse_lstm import Sparse_LSTM_wo_O_Gate_v2
from keras.models import Model, Sequential
from keras.layers import Input, Dense
from keras import regularizers
from keras.callbacks import Callback
from keras.engine import Layer
from keras.optimizers import Adadelta
import matplotlib.pyplot as plt
output_dir = __file__[:-3]
if not os.path.exists(output_dir):
os.mkdir(output_dir)
sys.setrecursionlimit(10000)
# load and normalize data
with gzip.open("data/mnist.pkl.gz",'rb') as f:
train_set_mnist, valid_set_mnist, test_set_mnist = cPickle.load(f)
train_set_mnist_img, train_set_mnist_label = train_set_mnist
test_set_mnist_img, test_set_mnist_label = test_set_mnist
print('Original train set of mnist: ' + str(train_set_mnist_img.shape))
print('Original test set of mnist: ' + str(test_set_mnist_img.shape))
train_set_mnist_mean = train_set_mnist_img.mean(axis=0)
train_set_mnist_std = train_set_mnist_img.std(axis=0)
train_set_mnist_img -= train_set_mnist_mean
train_set_mnist_img /= train_set_mnist_std + 1e-10
test_set_mnist_img -= train_set_mnist_mean
test_set_mnist_img /= train_set_mnist_std + 1e-10
# shuffle data
total_img = np.vstack([train_set_mnist_img, test_set_mnist_img])
total_label = np.hstack([train_set_mnist_label, test_set_mnist_label])
np.random.seed(10023)
np.random.shuffle(total_img)
np.random.seed(10023)
np.random.shuffle(total_label)
n = total_img.shape[0]
train_set_mnist_img = total_img[:n//2]
train_set_mnist_label = total_label[:n//2]
test_set_mnist_img = total_img[n//2:]
test_set_mnist_label = total_label[n//2:]
# logistic classifier
def get_logistic_classifier(input_dim, output_dim, C=0.01):
model = Sequential()
model.add(Dense(output_dim, input_dim=input_dim, activation='softmax', W_regularizer=regularizers.l2(C)))
model.compile(optimizer='adadelta', loss='categorical_crossentropy',
metrics=['accuracy'])
return model
# batch generator
def batch_generator(batch_size=512, img=None, label=None, n_classes=10):
img = img.astype(np.float32)
n_total = img.shape[0]
while True:
index = np.array(sample(xrange(n_total), batch_size), dtype=np.int)
img_batch = img[index]
y = np.zeros((batch_size, n_classes), dtype=np.float32)
y[np.arange(batch_size), label[index]] = 1.0
yield img_batch.copy(), y
def get_1hot_lab(label, n_classes):
y = np.zeros((label.shape[0], n_classes), dtype=np.float32)
y[np.arange(label.shape[0]), label] = 1.0
return y
train_set_mnist_label_1hot = get_1hot_lab(train_set_mnist_label, 10)
test_set_mnist_label_1hot = get_1hot_lab(test_set_mnist_label, 10)
class Eval_Callback(Callback):
def __init__(self):
super(Eval_Callback, self).__init__()
self.acc_history_train = []
self.acc_history_test = []
def on_epoch_end(self, epoch, logs={}):
out_train = self.model.evaluate(train_set_mnist_img, train_set_mnist_label_1hot, verbose=0)
out_test = self.model.evaluate(test_set_mnist_img, test_set_mnist_label_1hot, verbose=0)
print('Train => %f, test => %f' % (out_train[1], out_test[1]))
self.acc_history_train.append(out_train)
self.acc_history_test.append(out_test)
callback = Eval_Callback()
model = get_logistic_classifier(28*28, 10)
history = model.fit(train_set_mnist_img, train_set_mnist_label_1hot, batch_size=256, nb_epoch=30, verbose=1, callbacks=[callback])
plt.figure()
plt.plot([x[1] for x in callback.acc_history_train])
plt.plot([x[1] for x in callback.acc_history_test])
plt.legend(['train', 'test'])
plt.xlabel('epoch number')
plt.ylabel('accuracy')
plt.savefig('logistic_reg_mnist.png')
plt.show() | [
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"gzip.open",
"numpy.arange",
"os.path.exists",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"numpy.vstack",
"os.mkdir",
"sys.setrecursionlimit",
"matplotlib.pyplot.savefig",
"keras.models.Sequential",
"keras.regulari... | [((210, 238), 'os.chdir', 'os.chdir', (['"""data/sparse_lstm"""'], {}), "('data/sparse_lstm')\n", (218, 238), False, 'import os, sys\n'), ((648, 676), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (669, 676), False, 'import os, sys\n'), ((1411, 1463), 'numpy.vstack', 'np.vstack', (['[train_set_mnist_img, test_set_mnist_img]'], {}), '([train_set_mnist_img, test_set_mnist_img])\n', (1420, 1463), True, 'import numpy as np\n'), ((1478, 1534), 'numpy.hstack', 'np.hstack', (['[train_set_mnist_label, test_set_mnist_label]'], {}), '([train_set_mnist_label, test_set_mnist_label])\n', (1487, 1534), True, 'import numpy as np\n'), ((1535, 1556), 'numpy.random.seed', 'np.random.seed', (['(10023)'], {}), '(10023)\n', (1549, 1556), True, 'import numpy as np\n'), ((1557, 1585), 'numpy.random.shuffle', 'np.random.shuffle', (['total_img'], {}), '(total_img)\n', (1574, 1585), True, 'import numpy as np\n'), ((1586, 1607), 'numpy.random.seed', 'np.random.seed', (['(10023)'], {}), '(10023)\n', (1600, 1607), True, 'import numpy as np\n'), ((1608, 1638), 'numpy.random.shuffle', 'np.random.shuffle', (['total_label'], {}), '(total_label)\n', (1625, 1638), True, 'import numpy as np\n'), ((3684, 3696), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3694, 3696), True, 'import matplotlib.pyplot as plt\n'), ((3697, 3749), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[1] for x in callback.acc_history_train]'], {}), '([x[1] for x in callback.acc_history_train])\n', (3705, 3749), True, 'import matplotlib.pyplot as plt\n'), ((3750, 3801), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[1] for x in callback.acc_history_test]'], {}), '([x[1] for x in callback.acc_history_test])\n', (3758, 3801), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3831), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {}), "(['train', 'test'])\n", (3812, 3831), True, 'import matplotlib.pyplot as plt\n'), ((3832, 3858), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch number"""'], {}), "('epoch number')\n", (3842, 3858), True, 'import matplotlib.pyplot as plt\n'), ((3859, 3881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (3869, 3881), True, 'import matplotlib.pyplot as plt\n'), ((3882, 3919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""logistic_reg_mnist.png"""'], {}), "('logistic_reg_mnist.png')\n", (3893, 3919), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3930), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3928, 3930), True, 'import matplotlib.pyplot as plt\n'), ((245, 256), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (254, 256), False, 'import os, sys\n'), ((595, 621), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (609, 621), False, 'import os, sys\n'), ((627, 647), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (635, 647), False, 'import os, sys\n'), ((711, 747), 'gzip.open', 'gzip.open', (['"""data/mnist.pkl.gz"""', '"""rb"""'], {}), "('data/mnist.pkl.gz', 'rb')\n", (720, 747), False, 'import gzip, cPickle\n'), ((808, 823), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (820, 823), False, 'import gzip, cPickle\n'), ((1921, 1933), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1931, 1933), False, 'from keras.models import Model, Sequential\n'), ((2645, 2700), 'numpy.zeros', 'np.zeros', (['(label.shape[0], n_classes)'], {'dtype': 'np.float32'}), '((label.shape[0], n_classes), dtype=np.float32)\n', (2653, 2700), True, 'import numpy as np\n'), ((2461, 2512), 'numpy.zeros', 'np.zeros', (['(batch_size, n_classes)'], {'dtype': 'np.float32'}), '((batch_size, n_classes), dtype=np.float32)\n', (2469, 2512), True, 'import numpy as np\n'), ((2707, 2732), 'numpy.arange', 'np.arange', (['label.shape[0]'], {}), '(label.shape[0])\n', (2716, 2732), True, 'import numpy as np\n'), ((2023, 2041), 'keras.regularizers.l2', 'regularizers.l2', (['C'], {}), '(C)\n', (2038, 2041), False, 'from keras import regularizers\n'), ((2523, 2544), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (2532, 2544), True, 'import numpy as np\n')] |
# coding=utf-8
# 结构方程模型的参数估计
from __future__ import division, print_function, unicode_literals
from psy import sem, data
import numpy as np
data_ = data['ex5.11.dat']
beta = np.array([
[0, 0],
[1, 0]
])
gamma = np.array([
[1, 1],
[0, 0]
])
x = [0, 1, 2, 3, 4, 5]
lam_x = np.array([
[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
])
y = [6, 7, 8, 9, 10, 11]
lam_y = np.array([
[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
])
lam_x, lam_y, phi_x, beta, gamma, var_e, var_e_x, var_e_y = sem(data_, y, x, lam_x, lam_y, beta, gamma)
print('==========内源变量因子载荷=========')
print(lam_x)
print('=========外源变量因子载荷==========')
print(lam_y)
print('===========内源潜变量协方差矩阵=========')
print(phi_x)
print('============路径方程外源变量系数=========')
print(beta)
print('============路径方程内源变量系数=======')
print(gamma)
print('=============路径方程误差方差========')
print(np.diag(var_e))
print('============内源变量误差方差======')
print(np.diag(var_e_x))
print('=============外源变量误差方差=========')
print(np.diag(var_e_y))
| [
"numpy.array",
"numpy.diag",
"psy.sem"
] | [((176, 202), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0]])\n', (184, 202), True, 'import numpy as np\n'), ((222, 248), 'numpy.array', 'np.array', (['[[1, 1], [0, 0]]'], {}), '([[1, 1], [0, 0]])\n', (230, 248), True, 'import numpy as np\n'), ((292, 350), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]]'], {}), '([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])\n', (300, 350), True, 'import numpy as np\n'), ((413, 471), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]]'], {}), '([[1, 0], [1, 0], [1, 0], [0, 1], [0, 1], [0, 1]])\n', (421, 471), True, 'import numpy as np\n'), ((560, 603), 'psy.sem', 'sem', (['data_', 'y', 'x', 'lam_x', 'lam_y', 'beta', 'gamma'], {}), '(data_, y, x, lam_x, lam_y, beta, gamma)\n', (563, 603), False, 'from psy import sem, data\n'), ((908, 922), 'numpy.diag', 'np.diag', (['var_e'], {}), '(var_e)\n', (915, 922), True, 'import numpy as np\n'), ((966, 982), 'numpy.diag', 'np.diag', (['var_e_x'], {}), '(var_e_x)\n', (973, 982), True, 'import numpy as np\n'), ((1030, 1046), 'numpy.diag', 'np.diag', (['var_e_y'], {}), '(var_e_y)\n', (1037, 1046), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: fornax
"""
from __future__ import print_function, division
import os
import numpy as np
import pandas as pd
os.chdir(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.append(os.path.dirname(os.getcwd()))
import prepare_data1 as prep
DATA_PATH = os.path.join('..', prep.DATA_PATH)
# load EVTF
print('Loading EVTF...')
evtf = pd.read_csv(os.path.join(DATA_PATH, 'evtf.csv'))
###############################################################################
############################# OCCULTATIONS ####################################
###############################################################################
feats_mars_occultations = ['OCC_PHOBOS', 'PHO_PENUMBRA', 'PHO_UMBRA',
'MAR_PENUMBRA', 'MAR_UMBRA', 'OCC_MARS_200KM', 'OCC_MARS',
'OCC_DEIMOS', 'DEI_PENUMBRA']
#, 'DEI_UMBRA'] # DEI_UMBRA has only 2 occurences and they don't match in the
# data well (end->start->end->start)
for feat in feats_mars_occultations:
evtf['%s' % feat] = 0
for feat in feats_mars_occultations:
print('Processing %s' % feat)
if feat == 'OCC_MARS':
rule_start = lambda x: feat in x and 'START' in x and 'OCC_MARS_200KM' not in x
rule_end = lambda x: feat in x and 'END' in x and 'OCC_MARS_200KM' not in x
else:
rule_start = lambda x: feat in x and 'START' in x
rule_end = lambda x: feat in x and 'END' in x
starts = np.where(map(rule_start, evtf.description.values))[0]
ends = np.where(map(rule_end, evtf.description.values))[0]
assert(len(starts) == len(ends))
assert(starts[0] < ends[0])
# indicate ongoing events
for start, end in zip(starts, ends):
evtf.ix[start:end, '%s' % feat] = 1
###### ALL OCCULTATIONS COMBINED ######
def merge_embedded_occ(occ_idx_list):
prev_start, prev_end = occ_idx_list[0]
approved_list = []
for start, end in occ_idx_list:
if start > prev_end:
approved_list.append((prev_start, prev_end))
prev_start, prev_end = start, end
else:
prev_end = end
approved_list.append((prev_start, prev_end))
return approved_list
print('Processing all occultations')
evtf['OCC'] = 0
rule_start = lambda x: any(map(lambda y: y in x, feats_mars_occultations)) and 'START' in x
rule_end = lambda x: any(map(lambda y: y in x, feats_mars_occultations)) and 'END' in x
starts = np.where(map(rule_start, evtf.description.values))[0]
ends = np.where(map(rule_end, evtf.description.values))[0]
assert(len(starts) == len(ends))
assert(starts[0] < ends[0])
new_list = merge_embedded_occ(zip(starts, ends))
starts, ends = zip(*new_list)
for start, end in zip(starts, ends):
evtf.ix[start:end, 'OCC'] = 1
###############################################################################
############################# X/Y POINTING ####################################
###############################################################################
'''
Types NPSS and NPNS indicate the times in the mission, when the pointing
of the x axis has to switch from North to South (NPSS) or from South to North
(NPNS) in order to avoid Sun incidence on the S/C -x face in nadir pointing
mode around Mars.
In nadir pointing mode, with the x axis perpendicular to the ground track, the
angle between the S/C -x axis and the Sun direction varies around the peri-
centre by some degrees (e.g. at the switching time around mid March 2004
about 5 degrees). This means that there is not a single date and time to
switch to the correct x axis pointing or, conversely, depending on the duration
of the nadir pointing, it might therefore not be possible, to avoid Sun incidence
on the S/C -x face during a complete pericentre passage in nadir pointing
mode (neither with North nor with South pointing option). Instead, the dura-
tion of the nadir pointing has to be reduced or a small Sun incidence must be
tolerated.
'''
feats_pos_changes = ['NADIR_POINTING_X_N_TO_S_SWITCH', 'NADIR_POINTING_X_S_TO_N_SWITCH'
'EARTH_POINTING_Y_N_TO_S_SWITCH', 'EARTH_POINTING_Y_S_TO_N_SWITCH']
evtf['NADIR_POINTING_X'] = 0
evtf['EARTH_POINTING_Y'] = 0
for feat in ['NADIR_POINTING_X', 'EARTH_POINTING_Y']:
print('Processing %s' % feat)
changes = np.where(map(lambda x: feat in x, evtf.description.values))[0]
for start, end in zip(changes, np.concatenate([changes[1:], [len(evtf)]])):
evtf.ix[start:end, '%s' % feat] = 1 if 'N_TO_S' in evtf.description.values[start] else -1
evtf.ix[0:changes[0], '%s' % feat] = evtf.ix[changes[0], '%s' % feat] * -1
###############################################################################
########################## TRAJECTORY EVENTS ##################################
###############################################################################
'''
'x km descend’ and ‘x km ascend’, refer to the event
when the height of the S/C position above the Mars reference ellipsoid drops
below or rises above x km.
'''
feats_trajectory = np.unique(
filter(lambda x: x.endswith('SCEND'),
np.unique(evtf.description)))
evtf['trajectory_position_above_reference'] = 0
evtf['trajectory_direction'] = 0
changes_trajectory = np.where(map(lambda x: x.endswith('SCEND'), evtf.description.values))[0]
print('Processing trajectory changes')
for start, end in zip(changes_trajectory, np.concatenate([changes_trajectory[1:], [len(evtf)]])):
splits = evtf.description.iloc[start].split('_')
pos = int(splits[0])
updown = 1 if splits[-1] == 'ASCEND' else -1
evtf.ix[start:end, 'trajectory_position_above_reference'] = pos
evtf.ix[start:end, 'trajectory_direction'] = updown
###############################################################################
################################ SAVING #######################################
###############################################################################
evtf.drop(['description'], axis=1, inplace=True)
filename = 'evtf_processed'
savepath = os.path.join(DATA_PATH, filename + '.csv')
print('Saving to %s' % savepath)
evtf.to_csv(savepath, index=False)
| [
"os.path.abspath",
"os.path.join",
"numpy.unique",
"os.getcwd"
] | [((289, 323), 'os.path.join', 'os.path.join', (['""".."""', 'prep.DATA_PATH'], {}), "('..', prep.DATA_PATH)\n", (301, 323), False, 'import os\n'), ((6104, 6146), 'os.path.join', 'os.path.join', (['DATA_PATH', "(filename + '.csv')"], {}), "(DATA_PATH, filename + '.csv')\n", (6116, 6146), False, 'import os\n'), ((381, 416), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""evtf.csv"""'], {}), "(DATA_PATH, 'evtf.csv')\n", (393, 416), False, 'import os\n'), ((171, 196), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (186, 196), False, 'import os\n'), ((234, 245), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (243, 245), False, 'import os\n'), ((5180, 5207), 'numpy.unique', 'np.unique', (['evtf.description'], {}), '(evtf.description)\n', (5189, 5207), True, 'import numpy as np\n')] |
""" This module contains auxiliary functions to plot some information on the
RESTUD economy.
"""
# standard library
import matplotlib.pylab as plt
import numpy as np
import shutil
import shlex
import os
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import FuncFormatter
from matplotlib import cm
# Evaluation points
EDU, EXP_A, EXP_B = 10.00, 5, 5
""" Auxiliary function
"""
def prepare_directories(SPECS):
for spec in SPECS:
dirname = 'data_' + spec
try:
shutil.rmtree(dirname)
except OSError:
pass
os.mkdir(dirname)
def wage_function(edu, exp_A, exp_B, coeffs):
""" This function calculates the expected wage based on an agent's
covariates for a given parameterization.
"""
# Intercept
wage = coeffs[0]
# Schooling
wage += coeffs[1] * edu
# Experience A
wage += coeffs[2] * exp_A
wage += coeffs[3] * exp_A ** 2
# Experience B
wage += coeffs[4] * exp_B
wage += coeffs[5] * exp_B ** 2
# Transformation
wage = np.exp(wage)
# Finishing
return wage
def return_to_experience(exp_A, exp_B, coeffs, which):
""" Wrapper to evaluate the wage function for varying levels of experience.
"""
# Get wage
wage = wage_function(EDU, exp_A, exp_B, coeffs[which])
# Finishing
return wage
# Auxiliary function
def return_to_education(edu, coeffs, which):
""" Wrapper to evaluate the wage function for varying levels of education
"""
# Get wage
wage = wage_function(edu, EXP_A, EXP_B, coeffs[which])
# Finishing
return wage
""" Plotting functions
"""
def get_choice_probabilities(fname):
""" Get the choice probabilities.
"""
# Initialize container.
stats = np.tile(np.nan, (0, 4))
with open(fname) as in_file:
for line in in_file.readlines():
# Split line
list_ = shlex.split(line)
# Skip empty lines
if not list_:
continue
# If OUTCOMES is reached, then we are done for good.
if list_[0] == 'Outcomes':
break
# Any lines that do not have an integer as their first element
# are not of interest.
try:
int(list_[0])
except ValueError:
continue
# All lines that make it down here are relevant.
stats = np.vstack((stats, [float(x) for x in list_[1:]]))
# Finishing
return stats
def plot_return_experience(x, y, z, spec):
""" Function to produce plot for the return to experience.
"""
def _beautify_subplot(subplot, zlim):
subplot.view_init(azim=180 + 40)
subplot.set_ylabel('Experience A')
subplot.set_xlabel('Experience B')
subplot.set_zlabel('Wages')
subplot.zaxis.set_rotate_label(False)
subplot.set_zlabel(r'Wages (in \$1,000)', rotation=90)
subplot.zaxis.get_major_ticks()[0].set_visible(False)
# Background Color (higher numbers are lighter)
subplot.w_xaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
subplot.w_yaxis.set_pane_color((0.6, 0.6, 0.6, 1.0))
subplot.w_zaxis.set_pane_color((0.68, 0.68, 0.68, 1.0))
ax.set_zlim(zlim)
# Scaling
z['a'] = z['a'] / 1000
z['b'] = z['b'] / 1000
if spec == 'one':
zlim = [10, 35]
elif spec == 'two':
zlim = [0, 55]
elif spec == 'three':
zlim = [0, 55]
fig = plt.figure(figsize=(16, 8))
ax = fig.add_subplot(121, projection='3d')
ax.plot_surface(x, y, z['a'], rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False, alpha=0.8)
_beautify_subplot(ax, zlim)
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(x, y, z['b'], rstride=1, cstride=1, cmap=cm.jet,
linewidth=0, antialiased=False, alpha=0.8)
_beautify_subplot(ax, zlim)
# Write out to
plt.savefig('data_' + spec.lower() + '/returns_experience.png', bbox_inches='tight', format='png')
def plot_return_education(xvals, yvals, spec):
""" Function to produce plot for the return to education.
"""
labels = ['Occupation A', 'Occupation B']
# Initialize plot
ax = plt.figure(figsize=(12, 8)).add_subplot(111)
# Scaling
for occu in ['a', 'b']:
for i, _ in enumerate(xvals):
yvals[occu][i] = yvals[occu][i] / 1000
# Draw lines
ax.plot(xvals, yvals['a'], '-k', label='Occupation A', linewidth=5,
color='red', alpha=0.8)
ax.plot(xvals, yvals['b'], '-k', label='Occupation B', linewidth=5,
color='orange', alpha=0.8)
# Both axes
ax.tick_params(labelsize=16, direction='out', axis='both', top='off',
right='off')
# x-axis
ax.set_xticklabels(ax.get_xticks().astype(int))
ax.set_xlabel('Years of Schooling', fontsize=16)
# y-axis
yticks = ['{:,.0f}'.format(y) for y in ax.get_yticks().astype(int)]
ax.set_yticklabels(yticks, fontsize=16)
ax.set_ylabel(r'Wages (in \$1,000)', fontsize=16)
ax.yaxis.get_major_ticks()[0].set_visible(False)
# Set up legend
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.10),
fancybox=False, frameon=False, shadow=False, ncol=2, fontsize=20)
# Write out to
plt.savefig('data_' + spec.lower() + '/returns_schooling.png',
bbox_inches='tight', format='png')
def plot_choice_patterns(choice_probabilities, task):
""" Function to produce plot for choice patterns.
"""
deciles = range(40)
colors = ['blue', 'yellow', 'orange', 'red']
width = 0.9
# Plotting
bottom = [0] * 40
# Initialize plot
ax = plt.figure(figsize=(12, 8)).add_subplot(111)
labels = ['Home', 'School', 'Occupation A', 'Occupation B']
for j, i in enumerate([3, 2, 0, 1]):
heights = choice_probabilities[:, i]
plt.bar(deciles, heights, width, bottom=bottom, color=colors[j],
alpha=0.70)
bottom = [heights[i] + bottom[i] for i in range(40)]
# Both Axes
ax.tick_params(labelsize=16, direction='out', axis='both',
top='off',
right='off')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
# X axis
ax.set_xlabel('Period', fontsize=16)
ax.set_xlim([0, 40])
# Y axis
ax.set_ylabel('Share of Population', fontsize=16)
ax.yaxis.get_major_ticks()[0].set_visible(False)
# Legend
plt.legend(labels, loc='upper center', bbox_to_anchor=(0.5, -0.10),
fancybox=False, frameon=False, shadow=False, ncol=4,
fontsize=20)
# Write out to
plt.savefig('choices_' + task + '.png', bbox_inches='tight',
format='png')
| [
"numpy.tile",
"matplotlib.pylab.savefig",
"matplotlib.pylab.figure",
"shlex.split",
"matplotlib.pylab.legend",
"numpy.exp",
"os.mkdir",
"shutil.rmtree",
"matplotlib.pylab.bar"
] | [((1060, 1072), 'numpy.exp', 'np.exp', (['wage'], {}), '(wage)\n', (1066, 1072), True, 'import numpy as np\n'), ((1772, 1795), 'numpy.tile', 'np.tile', (['np.nan', '(0, 4)'], {}), '(np.nan, (0, 4))\n', (1779, 1795), True, 'import numpy as np\n'), ((3513, 3540), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(16, 8)'}), '(figsize=(16, 8))\n', (3523, 3540), True, 'import matplotlib.pylab as plt\n'), ((6631, 6768), 'matplotlib.pylab.legend', 'plt.legend', (['labels'], {'loc': '"""upper center"""', 'bbox_to_anchor': '(0.5, -0.1)', 'fancybox': '(False)', 'frameon': '(False)', 'shadow': '(False)', 'ncol': '(4)', 'fontsize': '(20)'}), "(labels, loc='upper center', bbox_to_anchor=(0.5, -0.1), fancybox\n =False, frameon=False, shadow=False, ncol=4, fontsize=20)\n", (6641, 6768), True, 'import matplotlib.pylab as plt\n'), ((6819, 6893), 'matplotlib.pylab.savefig', 'plt.savefig', (["('choices_' + task + '.png')"], {'bbox_inches': '"""tight"""', 'format': '"""png"""'}), "('choices_' + task + '.png', bbox_inches='tight', format='png')\n", (6830, 6893), True, 'import matplotlib.pylab as plt\n'), ((584, 601), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (592, 601), False, 'import os\n'), ((5949, 6024), 'matplotlib.pylab.bar', 'plt.bar', (['deciles', 'heights', 'width'], {'bottom': 'bottom', 'color': 'colors[j]', 'alpha': '(0.7)'}), '(deciles, heights, width, bottom=bottom, color=colors[j], alpha=0.7)\n', (5956, 6024), True, 'import matplotlib.pylab as plt\n'), ((511, 533), 'shutil.rmtree', 'shutil.rmtree', (['dirname'], {}), '(dirname)\n', (524, 533), False, 'import shutil\n'), ((1918, 1935), 'shlex.split', 'shlex.split', (['line'], {}), '(line)\n', (1929, 1935), False, 'import shlex\n'), ((4289, 4316), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (4299, 4316), True, 'import matplotlib.pylab as plt\n'), ((5746, 5773), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (5756, 5773), True, 'import matplotlib.pylab as plt\n')] |
from legendre import legendre
import seidel
import matrix
import numpy as np
from math import sqrt, pi, e
def quadrature(k):
if k % 2:
return 0
else:
return 2 / (k + 1)
def integrate(a, b, n, f):
l = legendre(n)
A = np.zeros((n, n))
B = np.zeros((n, 1))
for k in range(n):
row = []
for i in range(n):
A[k, i] = l[i] ** k
B[k] = quadrature(k)
"""D = np.linalg.inv(A)
D = np.matrix(D)
C = D * B
C = C.transpose()
Ai = np.array(C.ravel())
Ai = nparray_to_list(Ai)[0]"""
D = matrix.inv(A)
Ai = matrix.multi(D, B)
return (b - a) / 2 * sum(Ai[i] * f((b-a)/2 * l[i] + (a + b) / 2) for i in range(n))
def nparray_to_list(a):
a = list(a)
for i in range(len(a)):
a[i] = list(a[i])
return a | [
"numpy.zeros",
"matrix.multi",
"matrix.inv",
"legendre.legendre"
] | [((231, 242), 'legendre.legendre', 'legendre', (['n'], {}), '(n)\n', (239, 242), False, 'from legendre import legendre\n'), ((252, 268), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (260, 268), True, 'import numpy as np\n'), ((277, 293), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (285, 293), True, 'import numpy as np\n'), ((580, 593), 'matrix.inv', 'matrix.inv', (['A'], {}), '(A)\n', (590, 593), False, 'import matrix\n'), ((603, 621), 'matrix.multi', 'matrix.multi', (['D', 'B'], {}), '(D, B)\n', (615, 621), False, 'import matrix\n')] |
import configparser
import numpy
import sys
import time
import random
import math
import os
from copy import deepcopy
import json
from numpy.linalg import norm
from numpy import dot
import numpy as np
import codecs
from scipy.stats import spearmanr
import tensorflow as tf
import torch
import torch.nn as nn
from torch.autograd import Variable
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
if torch.cuda.is_available:
CUDA = 0
def FloatTensorWrapper(tensor, cuda=0):
if cuda >= 0:
tensor = torch.FloatTensor(tensor).cuda(cuda)
else:
tensor = torch.FloatTensor(tensor)
return tensor
def LongTensorWrapper(tensor, cuda=0):
if cuda >= 0:
tensor = torch.LongTensor(tensor).cuda(cuda)
else:
tensor = torch.LongTensor(tensor)
return tensor
def DoubleTensorWrapper(tensor, cuda=0):
if cuda >= 0:
tensor = torch.DoubleTensor(tensor).cuda(cuda)
else:
tensor = torch.DoubleTensor(tensor)
return tensor
def l2_loss(input_tensor, target_tensor):
loss_matrix = nn.functional.mse_loss(input_tensor, target_tensor, reduce=False)
return torch.sum(loss_matrix)/2
class PytorchModel(torch.nn.Module):
def __init__(self, W, attract_margin_value=1.0, repel_margin_value=0.0, l2_reg_constant=1e-9):
super(PytorchModel, self).__init__()
self.attract_margin = attract_margin_value
self.repel_margin = repel_margin_value
self.regularisation_constant = l2_reg_constant
self.init_W = nn.Embedding(W.shape[0], W.shape[1])
self.init_W.weight = nn.Parameter(torch.DoubleTensor(W), requires_grad=False)
self.dynamic_W = nn.Embedding(W.shape[0], W.shape[1])
self.dynamic_W.weight = nn.Parameter(torch.DoubleTensor(W), requires_grad=True)
def attract_cost(self, attract_examples, negative_examples_attract):
np_attract_examples = np.array(attract_examples)
np_negative_examples_attract = np.array(negative_examples_attract)
attract_examples_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_attract_examples[:,0], CUDA))))
attract_examples_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_attract_examples[:,1], CUDA))))
negative_examples_attract_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_attract[:,0], CUDA))))
negative_examples_attract_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_attract[:,1], CUDA))))
# dot product between the example pairs.
attract_similarity_between_examples = torch.sum(torch.mul(attract_examples_left, attract_examples_right), 1)
# dot product of each word in the example with its negative example.
attract_similarity_to_negatives_left = torch.sum(torch.mul(attract_examples_left, negative_examples_attract_left), 1)
attract_similarity_to_negatives_right = torch.sum(torch.mul(attract_examples_right, negative_examples_attract_right), 1)
attract_cost = nn.functional.relu(self.attract_margin + attract_similarity_to_negatives_left - attract_similarity_between_examples) + \
nn.functional.relu(self.attract_margin + attract_similarity_to_negatives_right - attract_similarity_between_examples)
original_attract_examples_left = self.init_W(LongTensorWrapper(np_attract_examples[:,0], CUDA))
original_attract_examples_right = self.init_W(LongTensorWrapper(np_attract_examples[:,1], CUDA))
original_negative_examples_attract_left = self.init_W(LongTensorWrapper(np_negative_examples_attract[:,0], CUDA))
original_negative_examples_attract_right = self.init_W(LongTensorWrapper(np_negative_examples_attract[:,1], CUDA))
# and then define the respective regularisation costs:
regularisation_cost_attract = self.regularisation_constant * (l2_loss(original_attract_examples_left, attract_examples_left) + l2_loss(original_attract_examples_right, attract_examples_right))
attract_cost += regularisation_cost_attract
return attract_cost
def repel_cost(self, repel_examples, negative_examples_repel):
np_repel_examples = np.array(repel_examples)
np_negative_examples_repel = np.array(negative_examples_repel)
repel_examples_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_repel_examples[:,0], CUDA))))
repel_examples_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_repel_examples[:,1], CUDA))))
negative_examples_repel_left = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_repel[:,0], CUDA))))
negative_examples_repel_right = nn.functional.normalize(self.dynamic_W(Variable(LongTensorWrapper(np_negative_examples_repel[:,1], CUDA))))
# dot product between the example pairs.
repel_similarity_between_examples = torch.sum(torch.mul(repel_examples_left, repel_examples_right), 1)
# dot product of each word in the example with its negative example.
repel_similarity_to_negatives_left = torch.sum(torch.mul(repel_examples_left, negative_examples_repel_left), 1)
repel_similarity_to_negatives_right = torch.sum(torch.mul(repel_examples_right, negative_examples_repel_right), 1)
repel_cost = nn.functional.relu(self.repel_margin + repel_similarity_to_negatives_left - repel_similarity_between_examples) + \
nn.functional.relu(self.repel_margin + repel_similarity_to_negatives_right - repel_similarity_between_examples)
# load the original distributional vectors for the example pairs:
original_repel_examples_left = self.init_W(LongTensorWrapper(np_repel_examples[:,0], CUDA))
original_repel_examples_right = self.init_W(LongTensorWrapper(np_repel_examples[:,1], CUDA))
original_negative_examples_repel_left = self.init_W(LongTensorWrapper(np_negative_examples_repel[:,0], CUDA))
original_negative_examples_repel_right = self.init_W(LongTensorWrapper(np_negative_examples_repel[:,1], CUDA))
# and then define the respective regularisation costs:
regularisation_cost_repel = self.regularisation_constant * (l2_loss(original_repel_examples_left, repel_examples_left) + l2_loss(original_repel_examples_right, repel_examples_right))
repel_cost += regularisation_cost_repel
return repel_cost
class ExperimentRun:
"""
This class stores all of the data and hyperparameters required for an Attract-Repel run.
"""
def __init__(self, config_filepath):
"""
To initialise the class, we need to supply the config file, which contains the location of
the pretrained (distributional) word vectors, the location of (potentially more than one)
collections of linguistic constraints (one pair per line), as well as the
hyperparameters of the Attract-Repel procedure (as detailed in the TACL paper).
"""
self.config = configparser.RawConfigParser()
try:
self.config.read(config_filepath)
except:
print("Couldn't read config file from", config_filepath)
return None
distributional_vectors_filepath = self.config.get("data", "distributional_vectors")
try:
self.output_filepath = self.config.get("data", "output_filepath")
except:
self.output_filepath = "results/final_vectors.txt"
# load initial distributional word vectors.
distributional_vectors = load_word_vectors(distributional_vectors_filepath)
if not distributional_vectors:
return
print("SimLex score (Spearman's rho coefficient) of initial vectors is:\n")
simlex_scores(distributional_vectors)
self.vocabulary = set(distributional_vectors.keys())
# this will be used to load constraints
self.vocab_index = {}
self.inverted_index = {}
for idx, word in enumerate(self.vocabulary):
self.vocab_index[word] = idx
self.inverted_index[idx] = word
# load list of filenames for synonyms and antonyms.
synonym_list = self.config.get("data", "synonyms").replace("[","").replace("]", "").replace(" ", "").split(",")
antonym_list = self.config.get("data", "antonyms").replace("[","").replace("]", "").replace(" ", "").split(",")
self.synonyms = set()
self.antonyms = set()
if synonym_list != "":
# and we then have all the information to load all linguistic constraints
for syn_filepath in synonym_list:
if syn_filepath != "":
self.synonyms = self.synonyms | self.load_constraints(syn_filepath)
else:
self.synonyms = set()
if antonym_list != "":
for ant_filepath in antonym_list:
if ant_filepath != "":
self.antonyms = self.antonyms | self.load_constraints(ant_filepath)
else:
self.antonyms = set()
# finally, load the experiment hyperparameters:
self.load_experiment_hyperparameters()
self.embedding_size = random.choice(list(distributional_vectors.values())).shape[0]
self.vocabulary_size = len(self.vocabulary)
# Next, prepare the matrix of initial vectors and initialise the model.
numpy_embedding = numpy.zeros((self.vocabulary_size, self.embedding_size), dtype="float64")
for idx in range(0, self.vocabulary_size):
numpy_embedding[idx, :] = distributional_vectors[self.inverted_index[idx]]
self.model = PytorchModel(numpy_embedding,
attract_margin_value=self.attract_margin_value,
repel_margin_value=self.repel_margin_value,
l2_reg_constant=self.regularisation_constant_value)
if CUDA >= 0:
self.model.cuda(CUDA)
def load_constraints(self, constraints_filepath):
"""
This methods reads a collection of constraints from the specified file, and returns a set with
all constraints for which both of their constituent words are in the specified vocabulary.
"""
constraints_filepath.strip()
constraints = set()
with codecs.open(constraints_filepath, "r", "utf-8") as f:
for line in f:
word_pair = line.split()
if word_pair[0] in self.vocabulary and word_pair[1] in self.vocabulary and word_pair[0] != word_pair[1]:
constraints |= {(self.vocab_index[word_pair[0]], self.vocab_index[word_pair[1]])}
return constraints
def load_experiment_hyperparameters(self):
"""
This method loads/sets the hyperparameters of the procedure as specified in the paper.
"""
self.attract_margin_value = self.config.getfloat("hyperparameters", "attract_margin")
self.repel_margin_value = self.config.getfloat("hyperparameters", "repel_margin")
self.batch_size = int(self.config.getfloat("hyperparameters", "batch_size"))
self.regularisation_constant_value = self.config.getfloat("hyperparameters", "l2_reg_constant")
self.max_iter = self.config.getfloat("hyperparameters", "max_iter")
self.log_scores_over_time = self.config.get("experiment", "log_scores_over_time")
self.print_simlex = self.config.get("experiment", "print_simlex")
if self.log_scores_over_time in ["True", "true"]:
self.log_scores_over_time = True
else:
self.log_scores_over_time = False
if self.print_simlex in ["True", "true"]:
self.print_simlex = True
else:
self.print_simlex = False
print("\nExperiment hyperparameters (attract_margin, repel_margin, batch_size, l2_reg_constant, max_iter):", \
self.attract_margin_value, self.repel_margin_value, self.batch_size, self.regularisation_constant_value, self.max_iter)
def extract_negative_examples(self, list_minibatch, attract_batch = True):
"""
For each example in the minibatch, this method returns the closest vector which is not
in each words example pair.
"""
np_list_minibatch = np.array(list_minibatch)
list_of_representations = []
list_of_indices = []
lefts = Variable(LongTensorWrapper(np_list_minibatch[:, 0], CUDA))
rights = Variable(LongTensorWrapper(np_list_minibatch[:, 1], CUDA))
representations = [nn.functional.normalize(self.model.dynamic_W(lefts)).data.cpu().numpy(), nn.functional.normalize(self.model.dynamic_W(rights)).data.cpu().numpy()]
for idx, (example_left, example_right) in enumerate(list_minibatch):
list_of_representations.append(representations[0][idx])
list_of_representations.append(representations[1][idx])
list_of_indices.append(example_left)
list_of_indices.append(example_right)
condensed_distance_list = pdist(list_of_representations, 'cosine')
square_distance_list = squareform(condensed_distance_list)
if attract_batch:
default_value = 2.0 # value to set for given attract/repel pair, so that it can not be found as closest or furthest away.
else:
default_value = 0.0 # for antonyms, we want the opposite value from the synonym one. Cosine Distance is [0,2].
for i in range(len(square_distance_list)):
square_distance_list[i,i]=default_value
if i % 2 == 0:
square_distance_list[i,i+1] = default_value
else:
square_distance_list[i,i-1] = default_value
if attract_batch:
negative_example_indices = numpy.argmin(square_distance_list,axis=1) # for each of the 100 elements, finds the index which has the minimal cosine distance (i.e. most similar).
else:
negative_example_indices = numpy.argmax(square_distance_list, axis=1) # for antonyms, find the least similar one.
negative_examples = []
for idx in range(len(list_minibatch)):
negative_example_left = list_of_indices[negative_example_indices[2 * idx]]
negative_example_right = list_of_indices[negative_example_indices[2 * idx + 1]]
negative_examples.append((negative_example_left, negative_example_right))
negative_examples = mix_sampling(list_minibatch, negative_examples)
return negative_examples
def attract_repel(self):
"""
This method repeatedly applies optimisation steps to fit the word vectors to the provided linguistic constraints.
"""
current_iteration = 0
# Post-processing: remove synonym pairs which are deemed to be both synonyms and antonyms:
for antonym_pair in self.antonyms:
if antonym_pair in self.synonyms:
self.synonyms.remove(antonym_pair)
self.synonyms = list(self.synonyms)
self.antonyms = list(self.antonyms)
self.syn_count = len(self.synonyms)
self.ant_count = len(self.antonyms)
print("\nAntonym pairs:", len(self.antonyms), "Synonym pairs:", len(self.synonyms))
list_of_simlex = []
list_of_wordsim = []
syn_batches = int(self.syn_count / self.batch_size)
ant_batches = int(self.ant_count / self.batch_size)
batches_per_epoch = syn_batches + ant_batches
print("\nRunning the optimisation procedure for", self.max_iter, "iterations...")
last_time = time.time()
if self.log_scores_over_time:
fwrite_simlex = open("results/simlex_scores.txt", "w")
fwrite_wordsim = open("results/wordsim_scores.txt", "w")
# set optimizer
attract_optimizer = torch.optim.Adagrad(self.model.dynamic_W.parameters(), lr=0.01)
repel_optimizer = torch.optim.Adagrad(self.model.dynamic_W.parameters(), lr=0.01)
while current_iteration < self.max_iter:
# how many attract/repel batches we've done in this epoch so far.
antonym_counter = 0
synonym_counter = 0
order_of_synonyms = [i for i in range(0, self.syn_count)]
order_of_antonyms = [i for i in range(0, self.ant_count)]
random.shuffle(order_of_synonyms)
random.shuffle(order_of_antonyms)
# list of 0 where we run synonym batch, 1 where we run antonym batch
list_of_batch_types = [0] * batches_per_epoch
list_of_batch_types[syn_batches:] = [1] * ant_batches # all antonym batches to 1
random.shuffle(list_of_batch_types)
if current_iteration == 0:
print("\nStarting epoch:", current_iteration+1, "\n")
else:
print("\nStarting epoch:", current_iteration+1, "Last epoch took:", round(time.time() - last_time, 1), "seconds. \n")
last_time = time.time()
for batch_index in range(0, batches_per_epoch):
# we can Log SimLex / WordSim scores
if self.log_scores_over_time and (batch_index % (batches_per_epoch/20) == 0):
(simlex_score, wordsim_score) = self.create_vector_dictionary()
list_of_simlex.append(simlex_score)
list_of_wordsim.append(wordsim_score)
fwrite_simlex.write(len(list_of_simlex)+1, simlex_score)
fwrite_wordsim.write(len(list_of_simlex)+1, wordsim_score)
syn_or_ant_batch = list_of_batch_types[batch_index]
if syn_or_ant_batch == 0:
# do one synonymy batch:
synonymy_examples = [self.synonyms[order_of_synonyms[x]] for x in range(synonym_counter * self.batch_size, (synonym_counter+1) * self.batch_size)]
current_negatives = self.extract_negative_examples(synonymy_examples, attract_batch=True)
attract_cost = self.model.attract_cost(synonymy_examples, current_negatives)
# apply gradients
self.model.zero_grad()
torch.sum(attract_cost).backward()
self.model.dynamic_W.weight.grad.data.clamp_(-2.0, 2.0)
attract_optimizer.step()
synonym_counter += 1
else:
antonymy_examples = [self.antonyms[order_of_antonyms[x]] for x in range(antonym_counter * self.batch_size, (antonym_counter+1) * self.batch_size)]
current_negatives = self.extract_negative_examples(antonymy_examples, attract_batch=False)
repel_cost = self.model.repel_cost(antonymy_examples, current_negatives)
# apply gradients
self.model.zero_grad()
torch.sum(repel_cost).backward()
self.model.dynamic_W.weight.grad.data.clamp_(-2.0, 2.0)
repel_optimizer.step()
antonym_counter += 1
current_iteration += 1
self.create_vector_dictionary() # whether to print SimLex score at the end of each epoch
def create_vector_dictionary(self):
"""
Extracts the current word vectors from TensorFlow embeddings and (if print_simlex=True) prints their SimLex scores.
"""
log_time = time.time()
current_vectors = self.model.dynamic_W.weight.data.cpu().numpy()
self.word_vectors = {}
for idx in range(0, self.vocabulary_size):
self.word_vectors[self.inverted_index[idx]] = normalise_vector(current_vectors[idx, :])
if self.log_scores_over_time or self.print_simlex:
(score_simlex, score_wordsim) = simlex_scores(self.word_vectors, self.print_simlex)
return (score_simlex, score_wordsim)
return (1.0, 1.0)
def random_different_from(top_range, number_to_not_repeat):
result = random.randint(0, top_range-1)
while result == number_to_not_repeat:
result = random.randint(0, top_range-1)
return result
def mix_sampling(list_of_examples, negative_examples):
"""
Converts half of the negative examples to random words from the batch (that are not in the given example pair).
"""
mixed_negative_examples = []
batch_size = len(list_of_examples)
for idx, (left_idx, right_idx) in enumerate(negative_examples):
new_left = left_idx
new_right = right_idx
if random.random() >= 0.5:
new_left = list_of_examples[random_different_from(batch_size, idx)][random.randint(0, 1)]
if random.random() >= 0.5:
new_right = list_of_examples[random_different_from(batch_size, idx)][random.randint(0, 1)]
mixed_negative_examples.append((new_left, new_right))
return mixed_negative_examples
def normalise_word_vectors(word_vectors, norm=1.0):
"""
This method normalises the collection of word vectors provided in the word_vectors dictionary.
"""
for word in word_vectors:
word_vectors[word] /= math.sqrt((word_vectors[word]**2).sum() + 1e-6)
word_vectors[word] = word_vectors[word] * norm
return word_vectors
def load_word_vectors(fname, isBinary=False):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
print("Loading pretrained word vectors from", fname)
word_vecs = {}
if isBinary:
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = numpy.dtype('float64').itemsize * layer1_size
for line in range(vocab_size):
word = b""
while True:
ch = f.read(1)
if ch == b' ':
break
if ch != b'\n':
word += ch
word_vecs[word.decode()] = numpy.fromstring(f.read(binary_len), dtype='float64')
else:
f = codecs.open(fname, 'r', 'utf-8')
f.readline()
for line in f:
line = line.split(" ", 1)
key = line[0].lower()
word_vecs[key] = numpy.fromstring(line[1], dtype="float64", sep=" ")
print(len(word_vecs), "vectors loaded from", fname)
return word_vecs
def print_word_vectors(word_vectors, write_path):
"""
This function prints the collection of word vectors to file, in a plain textual format.
"""
f_write = codecs.open(write_path, 'w', 'utf-8')
for key in word_vectors:
f_write.write(key+" "+" ".join(map(str, numpy.round(word_vectors[key], decimals=6))))
print("Printed", len(word_vectors), "word vectors to:", write_path)
def simlex_analysis(word_vectors, language="english", source="simlex"):
"""
This method computes the Spearman's rho correlation (with p-value) of the supplied word vectors.
"""
pair_list = []
if source == "simlex":
fread_simlex=codecs.open("evaluation/simlex-" + language + ".txt", 'r', 'utf-8')
elif source == "simlex-old":
fread_simlex=codecs.open("evaluation/simlex-english-old.txt", 'r', 'utf-8')
elif source == "simverb":
fread_simlex=codecs.open("evaluation/simverb.txt", 'r', 'utf-8')
elif source == "wordsim":
fread_simlex=codecs.open("evaluation/ws-353/wordsim353-" + language + ".txt", 'r', 'utf-8') # specify english, english-rel, etc.
line_number = 0
for line in fread_simlex:
if line_number > 0:
tokens = line.split()
word_i = tokens[0].lower()
word_j = tokens[1].lower()
score = float(tokens[2])
if word_i in word_vectors and word_j in word_vectors:
pair_list.append( ((word_i, word_j), score) )
else:
pass
line_number += 1
if not pair_list:
return (0.0, 0)
pair_list.sort(key=lambda x: - x[1])
coverage = len(pair_list)
extracted_list = []
extracted_scores = {}
for (x,y) in pair_list:
(word_i, word_j) = x
current_distance = distance(word_vectors[word_i], word_vectors[word_j])
extracted_scores[(word_i, word_j)] = current_distance
extracted_list.append(((word_i, word_j), current_distance))
extracted_list.sort(key=lambda x: x[1])
spearman_original_list = []
spearman_target_list = []
for position_1, (word_pair, score_1) in enumerate(pair_list):
score_2 = extracted_scores[word_pair]
position_2 = extracted_list.index((word_pair, score_2))
spearman_original_list.append(position_1)
spearman_target_list.append(position_2)
spearman_rho = spearmanr(spearman_original_list, spearman_target_list)
return round(spearman_rho[0], 3), coverage
def normalise_vector(v1):
return v1 / norm(v1)
def distance(v1, v2, normalised_vectors=False):
"""
Returns the cosine distance between two vectors.
If the vectors are normalised, there is no need for the denominator, which is always one.
"""
if normalised_vectors:
return 1 - dot(v1, v2)
else:
return 1 - dot(v1, v2) / ( norm(v1) * norm(v2) )
def simlex_scores(word_vectors, print_simlex=True):
simlex_score, simlex_coverage = simlex_analysis(word_vectors, "english")
ws_score, ws_coverage = simlex_analysis(word_vectors, "english", source="wordsim")
simverb_score, simverb_coverage = simlex_analysis(word_vectors, "english", source="simverb")
simlex_old, cov_old = simlex_analysis(word_vectors, "english", source="simlex-old")
print("SimLex score for", "english", "is:", simlex_score, "Original SimLex score is:", simlex_old, "coverage:", simlex_coverage, "/ 999")
print("SimVerb score for", "english", "is:", simverb_score, "coverage:", simverb_coverage, "/ 3500")
print("WordSim score for", "english", "is:", ws_score, "coverage:", ws_coverage, "/ 353\n")
simlex_score_en = simlex_score
ws_score_en = ws_score
return simlex_score_en, ws_score_en
def run_experiment(config_filepath):
"""
This method runs the counterfitting experiment, printing the SimLex-999 score of the initial
vectors, then counter-fitting them using the supplied linguistic constraints.
We then print the SimLex-999 score of the final vectors, and save them to a .txt file in the
results directory.
"""
current_experiment = ExperimentRun(config_filepath)
current_experiment.attract_repel()
print("\nSimLex score (Spearman's rho coefficient) of the final vectors is:")
simlex_scores(current_experiment.word_vectors), "\n"
os.system("mkdir -p results")
print_word_vectors(current_experiment.word_vectors, current_experiment.output_filepath)
def main():
"""
The user can provide the location of the config file as an argument.
If no location is specified, the default config file (experiment_parameters.cfg) is used.
"""
try:
config_filepath = sys.argv[1]
except:
print("\nUsing the default config file: experiment_parameters.cfg\n")
config_filepath = "experiment_parameters.cfg"
run_experiment(config_filepath)
if __name__=='__main__':
main()
| [
"torch.mul",
"torch.LongTensor",
"numpy.array",
"torch.sum",
"numpy.linalg.norm",
"torch.DoubleTensor",
"numpy.dot",
"numpy.argmin",
"scipy.stats.spearmanr",
"numpy.fromstring",
"numpy.dtype",
"random.randint",
"torch.nn.Embedding",
"numpy.round",
"torch.nn.functional.mse_loss",
"scipy... | [((1087, 1152), 'torch.nn.functional.mse_loss', 'nn.functional.mse_loss', (['input_tensor', 'target_tensor'], {'reduce': '(False)'}), '(input_tensor, target_tensor, reduce=False)\n', (1109, 1152), True, 'import torch.nn as nn\n'), ((20439, 20471), 'random.randint', 'random.randint', (['(0)', '(top_range - 1)'], {}), '(0, top_range - 1)\n', (20453, 20471), False, 'import random\n'), ((23014, 23051), 'codecs.open', 'codecs.open', (['write_path', '"""w"""', '"""utf-8"""'], {}), "(write_path, 'w', 'utf-8')\n", (23025, 23051), False, 'import codecs\n'), ((25231, 25286), 'scipy.stats.spearmanr', 'spearmanr', (['spearman_original_list', 'spearman_target_list'], {}), '(spearman_original_list, spearman_target_list)\n', (25240, 25286), False, 'from scipy.stats import spearmanr\n'), ((27198, 27227), 'os.system', 'os.system', (['"""mkdir -p results"""'], {}), "('mkdir -p results')\n", (27207, 27227), False, 'import os\n'), ((614, 639), 'torch.FloatTensor', 'torch.FloatTensor', (['tensor'], {}), '(tensor)\n', (631, 639), False, 'import torch\n'), ((796, 820), 'torch.LongTensor', 'torch.LongTensor', (['tensor'], {}), '(tensor)\n', (812, 820), False, 'import torch\n'), ((981, 1007), 'torch.DoubleTensor', 'torch.DoubleTensor', (['tensor'], {}), '(tensor)\n', (999, 1007), False, 'import torch\n'), ((1164, 1186), 'torch.sum', 'torch.sum', (['loss_matrix'], {}), '(loss_matrix)\n', (1173, 1186), False, 'import torch\n'), ((1555, 1591), 'torch.nn.Embedding', 'nn.Embedding', (['W.shape[0]', 'W.shape[1]'], {}), '(W.shape[0], W.shape[1])\n', (1567, 1591), True, 'import torch.nn as nn\n'), ((1703, 1739), 'torch.nn.Embedding', 'nn.Embedding', (['W.shape[0]', 'W.shape[1]'], {}), '(W.shape[0], W.shape[1])\n', (1715, 1739), True, 'import torch.nn as nn\n'), ((1936, 1962), 'numpy.array', 'np.array', (['attract_examples'], {}), '(attract_examples)\n', (1944, 1962), True, 'import numpy as np\n'), ((2002, 2037), 'numpy.array', 'np.array', (['negative_examples_attract'], {}), '(negative_examples_attract)\n', (2010, 2037), True, 'import numpy as np\n'), ((4337, 4361), 'numpy.array', 'np.array', (['repel_examples'], {}), '(repel_examples)\n', (4345, 4361), True, 'import numpy as np\n'), ((4399, 4432), 'numpy.array', 'np.array', (['negative_examples_repel'], {}), '(negative_examples_repel)\n', (4407, 4432), True, 'import numpy as np\n'), ((7203, 7233), 'configparser.RawConfigParser', 'configparser.RawConfigParser', ([], {}), '()\n', (7231, 7233), False, 'import configparser\n'), ((9628, 9701), 'numpy.zeros', 'numpy.zeros', (['(self.vocabulary_size, self.embedding_size)'], {'dtype': '"""float64"""'}), "((self.vocabulary_size, self.embedding_size), dtype='float64')\n", (9639, 9701), False, 'import numpy\n'), ((12549, 12573), 'numpy.array', 'np.array', (['list_minibatch'], {}), '(list_minibatch)\n', (12557, 12573), True, 'import numpy as np\n'), ((13316, 13356), 'scipy.spatial.distance.pdist', 'pdist', (['list_of_representations', '"""cosine"""'], {}), "(list_of_representations, 'cosine')\n", (13321, 13356), False, 'from scipy.spatial.distance import pdist\n'), ((13388, 13423), 'scipy.spatial.distance.squareform', 'squareform', (['condensed_distance_list'], {}), '(condensed_distance_list)\n', (13398, 13423), False, 'from scipy.spatial.distance import squareform\n'), ((15955, 15966), 'time.time', 'time.time', ([], {}), '()\n', (15964, 15966), False, 'import time\n'), ((19863, 19874), 'time.time', 'time.time', ([], {}), '()\n', (19872, 19874), False, 'import time\n'), ((20529, 20561), 'random.randint', 'random.randint', (['(0)', '(top_range - 1)'], {}), '(0, top_range - 1)\n', (20543, 20561), False, 'import random\n'), ((22526, 22558), 'codecs.open', 'codecs.open', (['fname', '"""r"""', '"""utf-8"""'], {}), "(fname, 'r', 'utf-8')\n", (22537, 22558), False, 'import codecs\n'), ((23508, 23575), 'codecs.open', 'codecs.open', (["('evaluation/simlex-' + language + '.txt')", '"""r"""', '"""utf-8"""'], {}), "('evaluation/simlex-' + language + '.txt', 'r', 'utf-8')\n", (23519, 23575), False, 'import codecs\n'), ((25378, 25386), 'numpy.linalg.norm', 'norm', (['v1'], {}), '(v1)\n', (25382, 25386), False, 'from numpy.linalg import norm\n'), ((1634, 1655), 'torch.DoubleTensor', 'torch.DoubleTensor', (['W'], {}), '(W)\n', (1652, 1655), False, 'import torch\n'), ((1785, 1806), 'torch.DoubleTensor', 'torch.DoubleTensor', (['W'], {}), '(W)\n', (1803, 1806), False, 'import torch\n'), ((2732, 2788), 'torch.mul', 'torch.mul', (['attract_examples_left', 'attract_examples_right'], {}), '(attract_examples_left, attract_examples_right)\n', (2741, 2788), False, 'import torch\n'), ((2937, 3001), 'torch.mul', 'torch.mul', (['attract_examples_left', 'negative_examples_attract_left'], {}), '(attract_examples_left, negative_examples_attract_left)\n', (2946, 3001), False, 'import torch\n'), ((3065, 3131), 'torch.mul', 'torch.mul', (['attract_examples_right', 'negative_examples_attract_right'], {}), '(attract_examples_right, negative_examples_attract_right)\n', (3074, 3131), False, 'import torch\n'), ((3160, 3280), 'torch.nn.functional.relu', 'nn.functional.relu', (['(self.attract_margin + attract_similarity_to_negatives_left -\n attract_similarity_between_examples)'], {}), '(self.attract_margin +\n attract_similarity_to_negatives_left - attract_similarity_between_examples)\n', (3178, 3280), True, 'import torch.nn as nn\n'), ((3304, 3430), 'torch.nn.functional.relu', 'nn.functional.relu', (['(self.attract_margin + attract_similarity_to_negatives_right -\n attract_similarity_between_examples)'], {}), '(self.attract_margin +\n attract_similarity_to_negatives_right - attract_similarity_between_examples\n )\n', (3322, 3430), True, 'import torch.nn as nn\n'), ((5101, 5153), 'torch.mul', 'torch.mul', (['repel_examples_left', 'repel_examples_right'], {}), '(repel_examples_left, repel_examples_right)\n', (5110, 5153), False, 'import torch\n'), ((5300, 5360), 'torch.mul', 'torch.mul', (['repel_examples_left', 'negative_examples_repel_left'], {}), '(repel_examples_left, negative_examples_repel_left)\n', (5309, 5360), False, 'import torch\n'), ((5422, 5484), 'torch.mul', 'torch.mul', (['repel_examples_right', 'negative_examples_repel_right'], {}), '(repel_examples_right, negative_examples_repel_right)\n', (5431, 5484), False, 'import torch\n'), ((5511, 5625), 'torch.nn.functional.relu', 'nn.functional.relu', (['(self.repel_margin + repel_similarity_to_negatives_left -\n repel_similarity_between_examples)'], {}), '(self.repel_margin + repel_similarity_to_negatives_left -\n repel_similarity_between_examples)\n', (5529, 5625), True, 'import torch.nn as nn\n'), ((5649, 5764), 'torch.nn.functional.relu', 'nn.functional.relu', (['(self.repel_margin + repel_similarity_to_negatives_right -\n repel_similarity_between_examples)'], {}), '(self.repel_margin + repel_similarity_to_negatives_right -\n repel_similarity_between_examples)\n', (5667, 5764), True, 'import torch.nn as nn\n'), ((10556, 10603), 'codecs.open', 'codecs.open', (['constraints_filepath', '"""r"""', '"""utf-8"""'], {}), "(constraints_filepath, 'r', 'utf-8')\n", (10567, 10603), False, 'import codecs\n'), ((14079, 14121), 'numpy.argmin', 'numpy.argmin', (['square_distance_list'], {'axis': '(1)'}), '(square_distance_list, axis=1)\n', (14091, 14121), False, 'import numpy\n'), ((14282, 14324), 'numpy.argmax', 'numpy.argmax', (['square_distance_list'], {'axis': '(1)'}), '(square_distance_list, axis=1)\n', (14294, 14324), False, 'import numpy\n'), ((16705, 16738), 'random.shuffle', 'random.shuffle', (['order_of_synonyms'], {}), '(order_of_synonyms)\n', (16719, 16738), False, 'import random\n'), ((16751, 16784), 'random.shuffle', 'random.shuffle', (['order_of_antonyms'], {}), '(order_of_antonyms)\n', (16765, 16784), False, 'import random\n'), ((17030, 17065), 'random.shuffle', 'random.shuffle', (['list_of_batch_types'], {}), '(list_of_batch_types)\n', (17044, 17065), False, 'import random\n'), ((20982, 20997), 'random.random', 'random.random', ([], {}), '()\n', (20995, 20997), False, 'import random\n'), ((21128, 21143), 'random.random', 'random.random', ([], {}), '()\n', (21141, 21143), False, 'import random\n'), ((22704, 22755), 'numpy.fromstring', 'numpy.fromstring', (['line[1]'], {'dtype': '"""float64"""', 'sep': '""" """'}), "(line[1], dtype='float64', sep=' ')\n", (22720, 22755), False, 'import numpy\n'), ((23630, 23692), 'codecs.open', 'codecs.open', (['"""evaluation/simlex-english-old.txt"""', '"""r"""', '"""utf-8"""'], {}), "('evaluation/simlex-english-old.txt', 'r', 'utf-8')\n", (23641, 23692), False, 'import codecs\n'), ((25648, 25659), 'numpy.dot', 'dot', (['v1', 'v2'], {}), '(v1, v2)\n', (25651, 25659), False, 'from numpy import dot\n'), ((550, 575), 'torch.FloatTensor', 'torch.FloatTensor', (['tensor'], {}), '(tensor)\n', (567, 575), False, 'import torch\n'), ((733, 757), 'torch.LongTensor', 'torch.LongTensor', (['tensor'], {}), '(tensor)\n', (749, 757), False, 'import torch\n'), ((916, 942), 'torch.DoubleTensor', 'torch.DoubleTensor', (['tensor'], {}), '(tensor)\n', (934, 942), False, 'import torch\n'), ((17356, 17367), 'time.time', 'time.time', ([], {}), '()\n', (17365, 17367), False, 'import time\n'), ((21086, 21106), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (21100, 21106), False, 'import random\n'), ((21233, 21253), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (21247, 21253), False, 'import random\n'), ((23744, 23795), 'codecs.open', 'codecs.open', (['"""evaluation/simverb.txt"""', '"""r"""', '"""utf-8"""'], {}), "('evaluation/simverb.txt', 'r', 'utf-8')\n", (23755, 23795), False, 'import codecs\n'), ((25689, 25700), 'numpy.dot', 'dot', (['v1', 'v2'], {}), '(v1, v2)\n', (25692, 25700), False, 'from numpy import dot\n'), ((22088, 22110), 'numpy.dtype', 'numpy.dtype', (['"""float64"""'], {}), "('float64')\n", (22099, 22110), False, 'import numpy\n'), ((23847, 23925), 'codecs.open', 'codecs.open', (["('evaluation/ws-353/wordsim353-' + language + '.txt')", '"""r"""', '"""utf-8"""'], {}), "('evaluation/ws-353/wordsim353-' + language + '.txt', 'r', 'utf-8')\n", (23858, 23925), False, 'import codecs\n'), ((25705, 25713), 'numpy.linalg.norm', 'norm', (['v1'], {}), '(v1)\n', (25709, 25713), False, 'from numpy.linalg import norm\n'), ((25716, 25724), 'numpy.linalg.norm', 'norm', (['v2'], {}), '(v2)\n', (25720, 25724), False, 'from numpy.linalg import norm\n'), ((23130, 23172), 'numpy.round', 'numpy.round', (['word_vectors[key]'], {'decimals': '(6)'}), '(word_vectors[key], decimals=6)\n', (23141, 23172), False, 'import numpy\n'), ((17284, 17295), 'time.time', 'time.time', ([], {}), '()\n', (17293, 17295), False, 'import time\n'), ((18587, 18610), 'torch.sum', 'torch.sum', (['attract_cost'], {}), '(attract_cost)\n', (18596, 18610), False, 'import torch\n'), ((19301, 19322), 'torch.sum', 'torch.sum', (['repel_cost'], {}), '(repel_cost)\n', (19310, 19322), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Plot sensitivity and false positive rate for output of "core_and_accessory_results.py"
"""
import glob
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
tenthousand = glob.glob("cluster_results/core/*.csv")
files_dict = []
kmer_dict = {}
for kmer in tqdm([1,2,4,6,8,10]):
sens_fpr_dict = {}
for threshold in tqdm(["0","05","1","15","2","25", "3","35","4","45", "5","55","6","65", "7", "75", "8","85", "9","95", "10"]):
Result = []
print("importing files...")
for x in tenthousand:
#try:
df = pd.read_csv(x, engine= "python")
if "group_" in df["Gene Name"][0]:
continue
#except:
#continue
if kmer == 1:
Result.append(df["Result_"+threshold][0])
elif kmer == 2:
Result.append(df["Result_"+threshold][1])
elif kmer == 4:
Result.append(df["Result_"+threshold][2])
elif kmer == 6:
Result.append(df["Result_"+threshold][3])
elif kmer == 8:
Result.append(df["Result_"+threshold][4])
elif kmer == 10:
Result.append(df["Result_"+threshold][5])
print("calculating sensitivity and specificity...")
try:
kmer_sens = (Result.count("TP")) / (Result.count("TP") + Result.count("FN"))
except:
kmer_sens = 0
kmer_fpr = (Result.count("FP")) / (Result.count("FP") + Result.count("TN"))
sens_fpr_local = {"sensitivity" : kmer_sens, "false positive rate" : kmer_fpr}
sens_fpr_dict.update({threshold: sens_fpr_local})
kmer_dict.update({kmer : sens_fpr_dict})
files_dict.append(kmer_dict)
print("plotting...")
to_plot_files = files_dict[0]
one_kmers = to_plot_files[1]
two_kmers =to_plot_files[2]
four_kmers =to_plot_files[4]
six_kmers =to_plot_files[6]
eight_kmers =to_plot_files[8]
ten_kmers =to_plot_files[10]
fig, ax = plt.subplots()
fig_2, ax_2 = plt.subplots()
kmer_list_dict = {1:one_kmers,2:two_kmers,4:four_kmers, 6:six_kmers,8:eight_kmers,10:ten_kmers}#,20:twenty_kmers}
#kmer_list_dict = {10:ten_kmers}#,20:twenty_kmers}
for kmer_length, kmer in kmer_list_dict.items():
sense = []
fpr = []
thresh_list = [0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1]
for key, value in kmer.items():
for k, v in value.items():
if k == "sensitivity":
sense.append(v)
else:
fpr.append(v)
ax.plot(fpr, sense, label = str(kmer_length))
ax.set_xlim([0.2,0.5])
ax.set_ylim([0.4,1])
ax.scatter(fpr, sense, s=15)
#ax.legend(title="k-mer length", loc = 'lower right')
ax.set_xlabel("false positive rate")
ax.set_ylabel("sensitivity")
import numpy as np
x = np.linspace(-5,5,100)
ax.plot(x, x, '-k', linestyle='--', linewidth = 0.7)
fig.savefig('centroid_core_zoomed.pdf')
| [
"pandas.read_csv",
"tqdm.tqdm",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"glob.glob"
] | [((220, 259), 'glob.glob', 'glob.glob', (['"""cluster_results/core/*.csv"""'], {}), "('cluster_results/core/*.csv')\n", (229, 259), False, 'import glob\n'), ((305, 330), 'tqdm.tqdm', 'tqdm', (['[1, 2, 4, 6, 8, 10]'], {}), '([1, 2, 4, 6, 8, 10])\n', (309, 330), False, 'from tqdm import tqdm\n'), ((2063, 2077), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2075, 2077), True, 'import matplotlib.pyplot as plt\n'), ((2092, 2106), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2104, 2106), True, 'import matplotlib.pyplot as plt\n'), ((2932, 2955), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (2943, 2955), True, 'import numpy as np\n'), ((371, 497), 'tqdm.tqdm', 'tqdm', (["['0', '05', '1', '15', '2', '25', '3', '35', '4', '45', '5', '55', '6',\n '65', '7', '75', '8', '85', '9', '95', '10']"], {}), "(['0', '05', '1', '15', '2', '25', '3', '35', '4', '45', '5', '55', '6',\n '65', '7', '75', '8', '85', '9', '95', '10'])\n", (375, 497), False, 'from tqdm import tqdm\n'), ((603, 634), 'pandas.read_csv', 'pd.read_csv', (['x'], {'engine': '"""python"""'}), "(x, engine='python')\n", (614, 634), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
## Example 1
x = np.linspace(-3,3,100)
obj_fun = np.cos(14.5 * x - 0.3) + x*(x + 0.2) + 1.01
fig, ax = plt.subplots(1,1,figsize=(10,6))
ax.plot(x,obj_fun)
ax.axvline(x = x[np.argmin(obj_fun)],color='r',ls='--')
ax.set_ylabel(r'$f(x)$')
ax.set_xlabel(r'$x$')
plt.savefig('local_global_objective_function.png',dpi=300,bbox_inches='tight')
plt.close('all')
## Example 2 Surface plot
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
x2 = np.linspace(-6,6,100)
y2 = np.linspace(-6,6,100)
X,Y = np.meshgrid(x2,y2)
obj_fun2 = ((X+0.5)**4 - 30*X**2- 20*X + (Y+0.5)**4 - 30*Y**2- 20*Y)/100
fig = plt.figure(figsize=(5,5))
ax = fig.gca(projection='3d')
# Plot the surface.
surf = ax.plot_surface(X, Y, obj_fun2, cmap=cm.jet,linewidth=0, antialiased=False)
ax.set_ylabel(r'$x$')
ax.set_xlabel(r'$y$')
plt.savefig('local_global_objective_function3D.png',dpi=300,bbox_inches='tight')
plt.close('all')
## Example 2 contour plot
fig, ax = plt.subplots(1,1,figsize=(6,5))
cp = ax.contourf(X, Y, obj_fun2, 20, cmap='jet')
plt.colorbar(cp)
# ax.clabel(cp, inline=True, fontsize=10)
ax.set_ylabel(r'$y$')
ax.set_xlabel(r'$x$')
plt.savefig('local_global_objective_function_contour.png',dpi=300,bbox_inches='tight')
plt.close('all') | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.argmin",
"numpy.meshgrid",
"matplotlib.pyplot.subplots"
] | [((51, 75), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (64, 75), True, 'import matplotlib.pyplot as plt\n'), ((96, 119), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(100)'], {}), '(-3, 3, 100)\n', (107, 119), True, 'import numpy as np\n'), ((183, 218), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 6)'}), '(1, 1, figsize=(10, 6))\n', (195, 218), True, 'import matplotlib.pyplot as plt\n'), ((338, 423), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""local_global_objective_function.png"""'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('local_global_objective_function.png', dpi=300, bbox_inches='tight'\n )\n", (349, 423), True, 'import matplotlib.pyplot as plt\n'), ((417, 433), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (426, 433), True, 'import matplotlib.pyplot as plt\n'), ((535, 558), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(100)'], {}), '(-6, 6, 100)\n', (546, 558), True, 'import numpy as np\n'), ((562, 585), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(100)'], {}), '(-6, 6, 100)\n', (573, 585), True, 'import numpy as np\n'), ((590, 609), 'numpy.meshgrid', 'np.meshgrid', (['x2', 'y2'], {}), '(x2, y2)\n', (601, 609), True, 'import numpy as np\n'), ((690, 716), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (700, 716), True, 'import matplotlib.pyplot as plt\n'), ((893, 980), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""local_global_objective_function3D.png"""'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('local_global_objective_function3D.png', dpi=300, bbox_inches=\n 'tight')\n", (904, 980), True, 'import matplotlib.pyplot as plt\n'), ((974, 990), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (983, 990), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1065), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 5)'}), '(1, 1, figsize=(6, 5))\n', (1043, 1065), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1128), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cp'], {}), '(cp)\n', (1124, 1128), True, 'import matplotlib.pyplot as plt\n'), ((1216, 1308), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""local_global_objective_function_contour.png"""'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('local_global_objective_function_contour.png', dpi=300,\n bbox_inches='tight')\n", (1227, 1308), True, 'import matplotlib.pyplot as plt\n'), ((1303, 1319), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1312, 1319), True, 'import matplotlib.pyplot as plt\n'), ((128, 150), 'numpy.cos', 'np.cos', (['(14.5 * x - 0.3)'], {}), '(14.5 * x - 0.3)\n', (134, 150), True, 'import numpy as np\n'), ((252, 270), 'numpy.argmin', 'np.argmin', (['obj_fun'], {}), '(obj_fun)\n', (261, 270), True, 'import numpy as np\n')] |
import numpy as np
import pyFAI
import h5py
import fabio
### This function integrates a 2D image using integrate2D pyfai's function and save the results in a h5file named Results_name of the h5 file containing the image
### 1) It looks on the image on th h5 file
### 2) creates a mask based on the int_max and int_min the user gives
### 3) Integrate the 2D image in number of given sectors (npt_azim) and with a given number of points in the radial direction (npt_rad)
### 4) saves the results in a h5file named Results_name of the h5 file containing the image
### Definition of the function inputs:
### root_data: the path of the file where the h5 file is saved
### h5file: the name of the h5 file where the images are saved
### scan: The name of the group (in h5file) on which the concerned measurement are saved
### detector name: name of rhe used detector
### poni_file: the path of the poni file which define the experimental geometry (ai)
### int_min: minimum intensity below which the pixels have to be masked (deleted)
### int_max: maximum intensity above which the pixels have to be masked (deleted)
### npt_rad: number of points in the radial direction (number of 2theta points)
### npt_azim: number of sectors for the integration in the azimutha direction (around the ring)
### x_unit: the unity of the x axis (2th_deg or 2th_rad or q_A^-1...)
### im_dark: the name of the dark image (if no dark image exist please give 0 as argument)
### im_mask: the name of the mask image (if no mask image exist please give 0 as argument)
def integration_2D(
root_data,
h5file,
scan,
detector_name,
poni_file,
npt_rad,
npt_azim,
x_unit,
im_dark,
im_mask,
):
print(im_dark)
fh5_save = h5py.File(
root_data + "/" + "Results" + "_" + h5file, "a"
) ### Create th file in which will be saved the results (integration, ...)
level_1 = fh5_save.create_group(
scan
) ### Create the group on which will be putted the data of integration
level_1_subg_1 = level_1.create_group("raw_integration_2D")
level_1_subg_3 = level_1_subg_1.create_group("Integration_parameter")
level_1_subg_3.create_dataset("npt_azim", dtype="f", data=int(npt_azim))
level_1_subg_3.create_dataset("npt_rad", dtype="f", data=int(npt_rad))
ai = pyFAI.load(
poni_file
) ### Load the poni file describing the integration geometry
r_h5file = h5py.File(root_data + "/" + h5file, "r") ### Read the h5 file
image_nc = r_h5file[
"/" + scan + "/measurement/" + detector_name
] ### Read the image or images
image = np.float64(image_nc) # convert the image matrix from int32 to float
print("### The matrix image is a " + str(np.ndim(image)) + "D matrix")
if np.ndim(image) == 2: # do a test on the dimension of the image matrix
rslt_matrix_cts = np.zeros((int(npt_rad), int(npt_azim) + 1), float)
rslt_matrix_chi = np.zeros((int(npt_azim)), float)
rslt_matrix_tth = np.zeros((int(npt_rad)), float)
if im_mask == "0":
mask_mat = np.zeros(
(np.shape(image)[0], np.shape(image)[1]), float
) # creating a zeroes matrix as no mask will be used in this case
print("### No mask was used for the integration")
else:
mask_mat = fabio.open(im_mask).data
print("### The image: " + im_mask + " " + "was used as mask")
# print('### Do not worry I am creating the mask')
# for ix in range(np.shape(image)[0]): # loop on the image dimension and generation of the mask
# for iy in range(np.shape(image)[1]):
# if (image[ix,iy] < np.float(int_min) or image[ix,iy] > np.float(int_max)):
# mask_mat[ix,iy] = 1
# #print(str(ix) + '/' + str(iy))
# print('### Hola, I finished creating the mask')
print(
"### Integration started with a"
+ " "
+ npt_rad
+ " "
+ "steps in tth and with a"
+ " "
+ npt_azim
+ " "
+ "sectors"
)
if im_dark == "0": # integration of the image
cts, tth, chi = ai.integrate2d(
image,
int(npt_rad),
int(npt_azim),
correctSolidAngle=True,
polarization_factor=0.95,
unit=x_unit,
method="splitpixel",
mask=mask_mat,
)
else:
cts, tth, chi = ai.integrate2d(
image,
int(npt_rad),
int(npt_azim),
correctSolidAngle=True,
polarization_factor=0.95,
unit=x_unit,
method="splitpixel",
mask=mask_mat,
dark=im_dark,
)
print("### Integration copmleted")
# print(np.shape(tth))
# print(np.shape(cts))
# print(np.shape(rslt_matrix_cts))
rslt_matrix_cts[:, 0] = tth[:]
rslt_matrix_chi[:] = chi[:]
rslt_matrix_tth[:] = tth[:]
print("### Saving in h5file started")
for ichi in range(np.shape(chi)[0]):
rslt_matrix_cts[:, ichi + 1] = cts[ichi, :]
level_1_subg_2 = level_1_subg_1.create_group("image_00000")
level_1_subg_2.create_dataset("tth_vs_cts", dtype="f", data=rslt_matrix_cts)
level_1_subg_2.create_dataset("chi", dtype="f", data=rslt_matrix_chi)
level_1_subg_2.create_dataset("tth", dtype="f", data=rslt_matrix_tth)
print("### Saving in h5file completed")
print("### Hope to see you again")
else:
# print('### The matrix image is a ' + str(np.ndim(image)) + 'D matrix')
if im_mask == "0":
mask_mat = np.zeros(
(np.shape(image)[0], np.shape(image)[1]), float
) # creating a zeroes matrix to be filled later with the mask
print("### No mask was used for the integration")
else:
mask_mat = fabio.open(im_mask).data
print("### The image: " + im_mask + " " + "was used as mask")
for i in range(np.shape(image)[0]):
print("*#*#*#*#* Processing of image_" + "%.0f" % i + "*#*#*#*#*")
rslt_matrix_cts = np.zeros((int(npt_rad), int(npt_azim) + 1), float)
rslt_matrix_chi = np.zeros((int(npt_azim)), float)
rslt_matrix_tth = np.zeros((int(npt_rad)), float)
# mask_mat = np.zeros((np.shape(image)[0], np.shape(image)[1]), float)
# print('### Do not worry I am creating the mask')
# for ix in range(np.shape(image)[1]):
# for iy in range(np.shape(image)[2]):
# if (image[i,ix,iy] < np.float(int_min) or image[i,ix,iy] > np.float(int_max)):
# mask_mat[ix,iy] = 1
# print('### Hola, I finished creating the mask')
print(
"### Integration started with a"
+ " "
+ npt_rad
+ " "
+ "steps in tth and with a"
+ " "
+ npt_azim
+ " "
+ "sectors"
)
if im_dark == "0":
cts, tth, chi = ai.integrate2d(
image[i],
int(npt_rad),
int(npt_azim),
correctSolidAngle=True,
polarization_factor=0.95,
unit=x_unit,
method="splitpixel",
mask=mask_mat,
)
else:
cts, tth, chi = ai.integrate2d(
image[i],
int(npt_rad),
int(npt_azim),
correctSolidAngle=True,
polarization_factor=0.95,
unit=x_unit,
method="splitpixel",
mask=mask_mat,
dark=im_dark,
)
print("### Integration copmleted")
rslt_matrix_cts[:, 0] = tth[:]
rslt_matrix_chi[:] = chi[:]
rslt_matrix_tth[:] = tth[:]
print("### Saving in h5file started")
for ichi in range(np.shape(chi)[0]):
rslt_matrix_cts[:, ichi + 1] = cts[ichi, :]
if i < 10:
level_1_subg_2 = level_1_subg_1.create_group(
"image" + "_0000" + "%.0f" % i
)
if (i > 9) and (i < 100):
level_1_subg_2 = level_1_subg_1.create_group(
"image" + "_000" + "%.0f" % i
)
if (i > 99) and (i < 1000):
level_1_subg_2 = level_1_subg_1.create_group(
"image" + "_00" + "%.0f" % i
)
if (i > 999) and (i < 10000):
level_1_subg_2 = level_1_subg_1.create_group(
"image" + "_0" + "%.0f" % i
)
if (i > 9999) and (i < 100000):
level_1_subg_2 = level_1_subg_1.create_group("image" + "_" + "%.0f" % i)
level_1_subg_2.create_dataset("tth_vs_cts", dtype="f", data=rslt_matrix_cts)
level_1_subg_2.create_dataset("chi", dtype="f", data=rslt_matrix_chi)
level_1_subg_2.create_dataset("tth", dtype="f", data=rslt_matrix_tth)
print("### Saving in h5file completed")
return
| [
"pyFAI.load",
"numpy.float64",
"numpy.ndim",
"h5py.File",
"fabio.open",
"numpy.shape"
] | [((1771, 1829), 'h5py.File', 'h5py.File', (["(root_data + '/' + 'Results' + '_' + h5file)", '"""a"""'], {}), "(root_data + '/' + 'Results' + '_' + h5file, 'a')\n", (1780, 1829), False, 'import h5py\n'), ((2353, 2374), 'pyFAI.load', 'pyFAI.load', (['poni_file'], {}), '(poni_file)\n', (2363, 2374), False, 'import pyFAI\n'), ((2467, 2507), 'h5py.File', 'h5py.File', (["(root_data + '/' + h5file)", '"""r"""'], {}), "(root_data + '/' + h5file, 'r')\n", (2476, 2507), False, 'import h5py\n'), ((2660, 2680), 'numpy.float64', 'np.float64', (['image_nc'], {}), '(image_nc)\n', (2670, 2680), True, 'import numpy as np\n'), ((2813, 2827), 'numpy.ndim', 'np.ndim', (['image'], {}), '(image)\n', (2820, 2827), True, 'import numpy as np\n'), ((3390, 3409), 'fabio.open', 'fabio.open', (['im_mask'], {}), '(im_mask)\n', (3400, 3409), False, 'import fabio\n'), ((5297, 5310), 'numpy.shape', 'np.shape', (['chi'], {}), '(chi)\n', (5305, 5310), True, 'import numpy as np\n'), ((6178, 6197), 'fabio.open', 'fabio.open', (['im_mask'], {}), '(im_mask)\n', (6188, 6197), False, 'import fabio\n'), ((6302, 6317), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (6310, 6317), True, 'import numpy as np\n'), ((2775, 2789), 'numpy.ndim', 'np.ndim', (['image'], {}), '(image)\n', (2782, 2789), True, 'import numpy as np\n'), ((8458, 8471), 'numpy.shape', 'np.shape', (['chi'], {}), '(chi)\n', (8466, 8471), True, 'import numpy as np\n'), ((3161, 3176), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (3169, 3176), True, 'import numpy as np\n'), ((3181, 3196), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (3189, 3196), True, 'import numpy as np\n'), ((5953, 5968), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5961, 5968), True, 'import numpy as np\n'), ((5973, 5988), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (5981, 5988), True, 'import numpy as np\n')] |
# -*- coding:utf-8 -*-
import unittest
import nose
import dmr
import os
import numpy as np
from tests.settings import (DMR_DOC_FILEPATH, DMR_VEC_FILEPATH,
K, BETA, SIGMA, L, mk_dmr_dat, count_word_freq)
class DMRTestCase(unittest.TestCase):
NUM_VECS = 10
def setUp(self):
np.random.seed(0)
if not os.path.exists(DMR_DOC_FILEPATH)\
or not os.path.exists(DMR_VEC_FILEPATH):
mk_dmr_dat()
def _init_mdmr(self):
corpus = dmr.Corpus.read(DMR_DOC_FILEPATH)
vcorpus = dmr.Corpus.read(DMR_VEC_FILEPATH, dtype=float)
# General 1-NUM_VECS vecs per doc with mean=vec
vecs = [[(1.0, np.random.normal(loc=vec))
for i in range(np.random.randint(1, self.NUM_VECS))]
for vec in vcorpus]
voca = dmr.Vocabulary()
docs = voca.read_corpus(corpus)
lda = dmr.MDMR(K, SIGMA, BETA, docs, vecs, voca.size())
return voca, docs, vecs, lda
def test_mdmr___init__(self):
'''
MDMR.__init__
'''
voca, docs, vecs, lda = self._init_mdmr()
# L
self.assertEqual(lda.L, L)
# n_m_z
self.assertAlmostEqual(np.sum(lda.n_m_z[0]), 10)
self.assertAlmostEqual(np.sum(lda.n_m_z[1]), 10)
# n_z_w
wfreq = count_word_freq(docs)
self.assertAlmostEqual(np.sum(lda.n_z_w[:, 0]),
wfreq[0] + K * BETA)
self.assertAlmostEqual(np.sum(lda.n_z_w[:, 1]),
wfreq[1] + K * BETA)
# n_z
self.assertAlmostEqual(lda.n_z[0], np.sum(lda.n_z_w[0, :]))
self.assertAlmostEqual(lda.n_z[1], np.sum(lda.n_z_w[1, :]))
# z_m_n
self.assertAlmostEqual(list(lda.z_m_n[0]).count(0), lda.n_m_z[0, 0])
self.assertAlmostEqual(list(lda.z_m_n[0]).count(1), lda.n_m_z[0, 1])
def test_mdmr_inference(self):
'''
MDMR.inference
'''
voca, docs, vecs, lda = self._init_mdmr()
n_m_z_0 = np.sum(lda.n_m_z[0])
n_m_z_1 = np.sum(lda.n_m_z[1])
n_z_w_0 = np.sum(lda.n_z_w[:, 0])
n_z_w_1 = np.sum(lda.n_z_w[:, 1])
lda.inference()
self.assertAlmostEquals(np.sum(lda.n_m_z[0]), n_m_z_0)
self.assertAlmostEquals(np.sum(lda.n_m_z[1]), n_m_z_1)
self.assertAlmostEquals(np.sum(lda.n_z_w[:, 0]), n_z_w_0)
self.assertAlmostEquals(np.sum(lda.n_z_w[:, 1]), n_z_w_1)
def test_mdmr_learning(self):
'''
MDMR.learning
'''
voca, docs, vecs, lda = self._init_mdmr()
lda.learning(1, voca)
np.testing.assert_almost_equal([L]*len(lda.docs), np.sum(lda.n_m_z, axis=1))
def test_mdmr_get_alpha(self):
'''
MDMR.get_alpha
'''
voca, docs, vecs, lda = self._init_mdmr()
lda.vecs = [
[np.array([0.1, 0.1]), np.array([0.2, 0.2])],
[np.array([1.0, 1.0]), np.array([0.5, 0.5])],
]
lda.lens = [np.array([0.5, 0.5]), np.array([0.8, 0.2])]
lda.Lambda = np.array([
[1.0, 1.0],
[2.0, 0.5]
])
alpha = lda.get_alpha(lda.Lambda)
self.assertAlmostEqual(alpha[0, 0],
0.5 * np.exp(0.1+0.1) + 0.5 * np.exp(0.2+0.2))
self.assertAlmostEqual(alpha[0, 1],
0.5 * np.exp(0.2+0.05) + 0.5 * np.exp(0.4+0.1))
self.assertAlmostEqual(alpha[1, 0],
0.8 * np.exp(1.0+1.0) + 0.2 * np.exp(0.5+0.5))
self.assertAlmostEqual(alpha[1, 1],
0.8 * np.exp(2.0+0.5) + 0.2 * np.exp(1.0+0.25))
def test_mdmr__dll_common(self):
'''
MDMR._dll_common
'''
voca, docs, vecs, lda = self._init_mdmr()
lda.vecs = [
[np.array([0.1, 0.1]), np.array([0.2, 0.2])],
[np.array([1.0, 1.0]), np.array([0.5, 0.5])],
]
lda.lens = [np.array([0.5, 0.5]), np.array([0.8, 0.2])]
lda.Lambda = np.array([
[1.0, 1.0],
[2.0, 0.5]
])
common = lda._dll_common(lda.Lambda)
self.assertAlmostEqual(common[0, 0, 0],
0.5 * 0.1 * np.exp(0.1+0.1) + 0.5 * 0.2 * np.exp(0.2+0.2))
self.assertAlmostEqual(common[0, 0, 1],
0.5 * 0.1 * np.exp(0.1+0.1) + 0.5 * 0.2 * np.exp(0.2+0.2))
self.assertAlmostEqual(common[0, 1, 0],
0.5 * 0.1 * np.exp(0.2+0.05) + 0.5 * 0.2 * np.exp(0.4+0.1))
self.assertAlmostEqual(common[0, 1, 1],
0.5 * 0.1 * np.exp(0.2+0.05) + 0.5 * 0.2 * np.exp(0.4+0.1))
self.assertAlmostEqual(common[1, 0, 0],
0.8 * 1.0 * np.exp(1.0+1.0) + 0.2 * 0.5 * np.exp(0.5+0.5))
self.assertAlmostEqual(common[1, 0, 1],
0.8 * 1.0 * np.exp(1.0+1.0) + 0.2 * 0.5 * np.exp(0.5+0.5))
self.assertAlmostEqual(common[1, 1, 0],
0.8 * 1.0 * np.exp(2.0+0.5) + 0.2 * 0.5 * np.exp(1.0+0.25))
self.assertAlmostEqual(common[1, 1, 1],
0.8 * 1.0 * np.exp(2.0+0.5) + 0.2 * 0.5 * np.exp(1.0+0.25))
if __name__ == '__main__':
nose.main(argv=['nose', '-v'])
| [
"numpy.random.normal",
"os.path.exists",
"tests.settings.count_word_freq",
"dmr.Vocabulary",
"dmr.Corpus.read",
"tests.settings.mk_dmr_dat",
"numpy.exp",
"numpy.sum",
"numpy.array",
"numpy.random.randint",
"numpy.random.seed",
"nose.main"
] | [((5021, 5051), 'nose.main', 'nose.main', ([], {'argv': "['nose', '-v']"}), "(argv=['nose', '-v'])\n", (5030, 5051), False, 'import nose\n'), ((293, 310), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (307, 310), True, 'import numpy as np\n'), ((482, 515), 'dmr.Corpus.read', 'dmr.Corpus.read', (['DMR_DOC_FILEPATH'], {}), '(DMR_DOC_FILEPATH)\n', (497, 515), False, 'import dmr\n'), ((534, 580), 'dmr.Corpus.read', 'dmr.Corpus.read', (['DMR_VEC_FILEPATH'], {'dtype': 'float'}), '(DMR_VEC_FILEPATH, dtype=float)\n', (549, 580), False, 'import dmr\n'), ((805, 821), 'dmr.Vocabulary', 'dmr.Vocabulary', ([], {}), '()\n', (819, 821), False, 'import dmr\n'), ((1306, 1327), 'tests.settings.count_word_freq', 'count_word_freq', (['docs'], {}), '(docs)\n', (1321, 1327), False, 'from tests.settings import DMR_DOC_FILEPATH, DMR_VEC_FILEPATH, K, BETA, SIGMA, L, mk_dmr_dat, count_word_freq\n'), ((1980, 2000), 'numpy.sum', 'np.sum', (['lda.n_m_z[0]'], {}), '(lda.n_m_z[0])\n', (1986, 2000), True, 'import numpy as np\n'), ((2019, 2039), 'numpy.sum', 'np.sum', (['lda.n_m_z[1]'], {}), '(lda.n_m_z[1])\n', (2025, 2039), True, 'import numpy as np\n'), ((2058, 2081), 'numpy.sum', 'np.sum', (['lda.n_z_w[:, 0]'], {}), '(lda.n_z_w[:, 0])\n', (2064, 2081), True, 'import numpy as np\n'), ((2100, 2123), 'numpy.sum', 'np.sum', (['lda.n_z_w[:, 1]'], {}), '(lda.n_z_w[:, 1])\n', (2106, 2123), True, 'import numpy as np\n'), ((3021, 3055), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 0.5]]'], {}), '([[1.0, 1.0], [2.0, 0.5]])\n', (3029, 3055), True, 'import numpy as np\n'), ((3917, 3951), 'numpy.array', 'np.array', (['[[1.0, 1.0], [2.0, 0.5]]'], {}), '([[1.0, 1.0], [2.0, 0.5]])\n', (3925, 3951), True, 'import numpy as np\n'), ((425, 437), 'tests.settings.mk_dmr_dat', 'mk_dmr_dat', ([], {}), '()\n', (435, 437), False, 'from tests.settings import DMR_DOC_FILEPATH, DMR_VEC_FILEPATH, K, BETA, SIGMA, L, mk_dmr_dat, count_word_freq\n'), ((1190, 1210), 'numpy.sum', 'np.sum', (['lda.n_m_z[0]'], {}), '(lda.n_m_z[0])\n', (1196, 1210), True, 'import numpy as np\n'), ((1247, 1267), 'numpy.sum', 'np.sum', (['lda.n_m_z[1]'], {}), '(lda.n_m_z[1])\n', (1253, 1267), True, 'import numpy as np\n'), ((1359, 1382), 'numpy.sum', 'np.sum', (['lda.n_z_w[:, 0]'], {}), '(lda.n_z_w[:, 0])\n', (1365, 1382), True, 'import numpy as np\n'), ((1448, 1471), 'numpy.sum', 'np.sum', (['lda.n_z_w[:, 1]'], {}), '(lda.n_z_w[:, 1])\n', (1454, 1471), True, 'import numpy as np\n'), ((1564, 1587), 'numpy.sum', 'np.sum', (['lda.n_z_w[0, :]'], {}), '(lda.n_z_w[0, :])\n', (1570, 1587), True, 'import numpy as np\n'), ((1632, 1655), 'numpy.sum', 'np.sum', (['lda.n_z_w[1, :]'], {}), '(lda.n_z_w[1, :])\n', (1638, 1655), True, 'import numpy as np\n'), ((2182, 2202), 'numpy.sum', 'np.sum', (['lda.n_m_z[0]'], {}), '(lda.n_m_z[0])\n', (2188, 2202), True, 'import numpy as np\n'), ((2245, 2265), 'numpy.sum', 'np.sum', (['lda.n_m_z[1]'], {}), '(lda.n_m_z[1])\n', (2251, 2265), True, 'import numpy as np\n'), ((2308, 2331), 'numpy.sum', 'np.sum', (['lda.n_z_w[:, 0]'], {}), '(lda.n_z_w[:, 0])\n', (2314, 2331), True, 'import numpy as np\n'), ((2374, 2397), 'numpy.sum', 'np.sum', (['lda.n_z_w[:, 1]'], {}), '(lda.n_z_w[:, 1])\n', (2380, 2397), True, 'import numpy as np\n'), ((2628, 2653), 'numpy.sum', 'np.sum', (['lda.n_m_z'], {'axis': '(1)'}), '(lda.n_m_z, axis=1)\n', (2634, 2653), True, 'import numpy as np\n'), ((2956, 2976), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (2964, 2976), True, 'import numpy as np\n'), ((2978, 2998), 'numpy.array', 'np.array', (['[0.8, 0.2]'], {}), '([0.8, 0.2])\n', (2986, 2998), True, 'import numpy as np\n'), ((3852, 3872), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (3860, 3872), True, 'import numpy as np\n'), ((3874, 3894), 'numpy.array', 'np.array', (['[0.8, 0.2]'], {}), '([0.8, 0.2])\n', (3882, 3894), True, 'import numpy as np\n'), ((326, 358), 'os.path.exists', 'os.path.exists', (['DMR_DOC_FILEPATH'], {}), '(DMR_DOC_FILEPATH)\n', (340, 358), False, 'import os\n'), ((379, 411), 'os.path.exists', 'os.path.exists', (['DMR_VEC_FILEPATH'], {}), '(DMR_VEC_FILEPATH)\n', (393, 411), False, 'import os\n'), ((2823, 2843), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (2831, 2843), True, 'import numpy as np\n'), ((2845, 2865), 'numpy.array', 'np.array', (['[0.2, 0.2]'], {}), '([0.2, 0.2])\n', (2853, 2865), True, 'import numpy as np\n'), ((2881, 2901), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2889, 2901), True, 'import numpy as np\n'), ((2903, 2923), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (2911, 2923), True, 'import numpy as np\n'), ((3719, 3739), 'numpy.array', 'np.array', (['[0.1, 0.1]'], {}), '([0.1, 0.1])\n', (3727, 3739), True, 'import numpy as np\n'), ((3741, 3761), 'numpy.array', 'np.array', (['[0.2, 0.2]'], {}), '([0.2, 0.2])\n', (3749, 3761), True, 'import numpy as np\n'), ((3777, 3797), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (3785, 3797), True, 'import numpy as np\n'), ((3799, 3819), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (3807, 3819), True, 'import numpy as np\n'), ((661, 686), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'vec'}), '(loc=vec)\n', (677, 686), True, 'import numpy as np\n'), ((3195, 3212), 'numpy.exp', 'np.exp', (['(0.1 + 0.1)'], {}), '(0.1 + 0.1)\n', (3201, 3212), True, 'import numpy as np\n'), ((3219, 3236), 'numpy.exp', 'np.exp', (['(0.2 + 0.2)'], {}), '(0.2 + 0.2)\n', (3225, 3236), True, 'import numpy as np\n'), ((3298, 3316), 'numpy.exp', 'np.exp', (['(0.2 + 0.05)'], {}), '(0.2 + 0.05)\n', (3304, 3316), True, 'import numpy as np\n'), ((3323, 3340), 'numpy.exp', 'np.exp', (['(0.4 + 0.1)'], {}), '(0.4 + 0.1)\n', (3329, 3340), True, 'import numpy as np\n'), ((3402, 3419), 'numpy.exp', 'np.exp', (['(1.0 + 1.0)'], {}), '(1.0 + 1.0)\n', (3408, 3419), True, 'import numpy as np\n'), ((3426, 3443), 'numpy.exp', 'np.exp', (['(0.5 + 0.5)'], {}), '(0.5 + 0.5)\n', (3432, 3443), True, 'import numpy as np\n'), ((3505, 3522), 'numpy.exp', 'np.exp', (['(2.0 + 0.5)'], {}), '(2.0 + 0.5)\n', (3511, 3522), True, 'import numpy as np\n'), ((3529, 3547), 'numpy.exp', 'np.exp', (['(1.0 + 0.25)'], {}), '(1.0 + 0.25)\n', (3535, 3547), True, 'import numpy as np\n'), ((4104, 4121), 'numpy.exp', 'np.exp', (['(0.1 + 0.1)'], {}), '(0.1 + 0.1)\n', (4110, 4121), True, 'import numpy as np\n'), ((4134, 4151), 'numpy.exp', 'np.exp', (['(0.2 + 0.2)'], {}), '(0.2 + 0.2)\n', (4140, 4151), True, 'import numpy as np\n'), ((4223, 4240), 'numpy.exp', 'np.exp', (['(0.1 + 0.1)'], {}), '(0.1 + 0.1)\n', (4229, 4240), True, 'import numpy as np\n'), ((4253, 4270), 'numpy.exp', 'np.exp', (['(0.2 + 0.2)'], {}), '(0.2 + 0.2)\n', (4259, 4270), True, 'import numpy as np\n'), ((4342, 4360), 'numpy.exp', 'np.exp', (['(0.2 + 0.05)'], {}), '(0.2 + 0.05)\n', (4348, 4360), True, 'import numpy as np\n'), ((4373, 4390), 'numpy.exp', 'np.exp', (['(0.4 + 0.1)'], {}), '(0.4 + 0.1)\n', (4379, 4390), True, 'import numpy as np\n'), ((4462, 4480), 'numpy.exp', 'np.exp', (['(0.2 + 0.05)'], {}), '(0.2 + 0.05)\n', (4468, 4480), True, 'import numpy as np\n'), ((4493, 4510), 'numpy.exp', 'np.exp', (['(0.4 + 0.1)'], {}), '(0.4 + 0.1)\n', (4499, 4510), True, 'import numpy as np\n'), ((4582, 4599), 'numpy.exp', 'np.exp', (['(1.0 + 1.0)'], {}), '(1.0 + 1.0)\n', (4588, 4599), True, 'import numpy as np\n'), ((4612, 4629), 'numpy.exp', 'np.exp', (['(0.5 + 0.5)'], {}), '(0.5 + 0.5)\n', (4618, 4629), True, 'import numpy as np\n'), ((4701, 4718), 'numpy.exp', 'np.exp', (['(1.0 + 1.0)'], {}), '(1.0 + 1.0)\n', (4707, 4718), True, 'import numpy as np\n'), ((4731, 4748), 'numpy.exp', 'np.exp', (['(0.5 + 0.5)'], {}), '(0.5 + 0.5)\n', (4737, 4748), True, 'import numpy as np\n'), ((4820, 4837), 'numpy.exp', 'np.exp', (['(2.0 + 0.5)'], {}), '(2.0 + 0.5)\n', (4826, 4837), True, 'import numpy as np\n'), ((4850, 4868), 'numpy.exp', 'np.exp', (['(1.0 + 0.25)'], {}), '(1.0 + 0.25)\n', (4856, 4868), True, 'import numpy as np\n'), ((4940, 4957), 'numpy.exp', 'np.exp', (['(2.0 + 0.5)'], {}), '(2.0 + 0.5)\n', (4946, 4957), True, 'import numpy as np\n'), ((4970, 4988), 'numpy.exp', 'np.exp', (['(1.0 + 0.25)'], {}), '(1.0 + 0.25)\n', (4976, 4988), True, 'import numpy as np\n'), ((715, 750), 'numpy.random.randint', 'np.random.randint', (['(1)', 'self.NUM_VECS'], {}), '(1, self.NUM_VECS)\n', (732, 750), True, 'import numpy as np\n')] |
# Copyright 2020, General Electric Company. All rights reserved. See https://github.com/xcist/code/blob/master/LICENSE
import numpy as np
from catsim.GetMu import GetMu
Mu = []
Mu.append(GetMu('water', 70))
Mu.append(GetMu('water', 70.0))
Mu.append(GetMu('bone', (30, 50, 70)))
Mu.append(GetMu('bone', [30, 50, 70]))
Mu.append(GetMu('al', range(10, 60, 10)))
types = ['int', 'float', 'tuple', 'list', 'range']
for mu in Mu:
print()
for ii in range(len(mu)):
print(mu[ii], end="\n")
print()
Evec = np.array([(20, 30, 40), (50, 60, 70)], dtype=np.single)
mu = GetMu('water',Evec)
print(mu,type(mu)) | [
"numpy.array",
"catsim.GetMu.GetMu"
] | [((519, 574), 'numpy.array', 'np.array', (['[(20, 30, 40), (50, 60, 70)]'], {'dtype': 'np.single'}), '([(20, 30, 40), (50, 60, 70)], dtype=np.single)\n', (527, 574), True, 'import numpy as np\n'), ((580, 600), 'catsim.GetMu.GetMu', 'GetMu', (['"""water"""', 'Evec'], {}), "('water', Evec)\n", (585, 600), False, 'from catsim.GetMu import GetMu\n'), ((190, 208), 'catsim.GetMu.GetMu', 'GetMu', (['"""water"""', '(70)'], {}), "('water', 70)\n", (195, 208), False, 'from catsim.GetMu import GetMu\n'), ((220, 240), 'catsim.GetMu.GetMu', 'GetMu', (['"""water"""', '(70.0)'], {}), "('water', 70.0)\n", (225, 240), False, 'from catsim.GetMu import GetMu\n'), ((252, 279), 'catsim.GetMu.GetMu', 'GetMu', (['"""bone"""', '(30, 50, 70)'], {}), "('bone', (30, 50, 70))\n", (257, 279), False, 'from catsim.GetMu import GetMu\n'), ((291, 318), 'catsim.GetMu.GetMu', 'GetMu', (['"""bone"""', '[30, 50, 70]'], {}), "('bone', [30, 50, 70])\n", (296, 318), False, 'from catsim.GetMu import GetMu\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 10:30:25 2018
Try to predict in which lab an animal was trained based on its behavior
@author: guido
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from os.path import join
import seaborn as sns
import datajoint as dj
from ibl_pipeline import subject, acquisition, action, behavior, reference
from ibl_pipeline.analyses import behavior as behavior_analysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
# Settings
path = '/home/guido/Figures/Behavior/'
iterations = 5 # how often to decode
num_splits = 3 # n in n-fold cross validation
decoding_metrics = ['perf_easy','n_trials','threshold','bias','reaction_time','training_time']
decoding_metrics_control = ['perf_easy','n_trials','threshold','bias','reaction_time','training_time','time_zone']
# Decoding function with n-fold cross validation
def decoding(resp, labels, clf, num_splits):
kf = KFold(n_splits=num_splits, shuffle=True)
y_pred = np.array([])
y_true = np.array([])
for train_index, test_index in kf.split(resp):
train_resp = resp[train_index]
test_resp = resp[test_index]
clf.fit(train_resp, [labels[j] for j in train_index])
y_pred = np.append(y_pred, clf.predict(test_resp))
y_true = np.append(y_true, [labels[j] for j in test_index])
f1 = f1_score(y_true, y_pred, labels=np.unique(labels), average='micro')
return f1
# Query list of subjects
all_sub = subject.Subject * subject.SubjectLab & 'subject_birth_date > "2018-09-01"' & 'subject_line IS NULL OR subject_line="C57BL/6J"'
subjects = all_sub.fetch('subject_nickname')
# Create dataframe with behavioral metrics of all mice
learning = pd.DataFrame(columns=['mouse','lab','time_zone','learned','date_learned','training_time','perf_easy','n_trials','threshold','bias','reaction_time','lapse_low','lapse_high'])
for i, nickname in enumerate(subjects):
if np.mod(i+1,10) == 0:
print('Loading data of subject %d of %d'%(i+1,len(subjects)))
# Gather behavioral data for subject
subj = subject.Subject * subject.SubjectLab & 'subject_nickname="%s"'%nickname
behav = pd.DataFrame((behavior_analysis.BehavioralSummaryByDate * subject.Subject * subject.SubjectLab &
'subject_nickname="%s"'%nickname).proj('session_date', 'performance_easy').fetch(as_dict=True, order_by='session_date'))
rt = pd.DataFrame(((behavior_analysis.BehavioralSummaryByDate.ReactionTimeByDate * subject.Subject * subject.SubjectLab &
'subject_nickname="%s"'%nickname)).proj('session_date', 'median_reaction_time').fetch(as_dict=True, order_by='session_date'))
psych = pd.DataFrame(((behavior_analysis.BehavioralSummaryByDate.PsychResults * subject.Subject * subject.SubjectLab &
'subject_nickname="%s"'%nickname)).proj('session_date', 'n_trials_stim','threshold','bias','lapse_low','lapse_high').fetch(as_dict=True, order_by='session_date'))
# Find first session in which mouse is trained
first_trained_session = subj.aggr(behavior_analysis.SessionTrainingStatus & 'training_status="trained"', first_trained='min(session_start_time)')
untrainable_session = subj.aggr(behavior_analysis.SessionTrainingStatus & 'training_status="untrainable"', first_trained='min(session_start_time)')
if len(first_trained_session) == 0 & len(untrainable_session) == 0:
learning.loc[i,'learned'] = 'in training'
learning.loc[i,'training_time'] = len(behav)
elif len(first_trained_session) == 0 & len(untrainable_session) == 1:
learning.loc[i,'learned'] = 'untrainable'
learning.loc[i,'training_time'] = len(behav)
else:
first_trained_session_datetime = first_trained_session.fetch1('first_trained')
first_trained_session_date = first_trained_session_datetime.date()
learning.loc[i,'learned'] = 'trained'
learning.loc[i,'date_learned'] = first_trained_session_date
learning.loc[i,'training_time'] = sum(behav.session_date < first_trained_session_date)
learning.loc[i,'perf_easy'] = float(behav.performance_easy[behav.session_date == first_trained_session_date])*100
psych['n_trials'] = n_trials = [sum(s) for s in psych.n_trials_stim]
learning.loc[i,'n_trials'] = float(psych.n_trials[psych.session_date == first_trained_session_date])
learning.loc[i,'threshold'] = float(psych.threshold[psych.session_date == first_trained_session_date])
learning.loc[i,'bias'] = float(psych.bias[psych.session_date == first_trained_session_date])
learning.loc[i,'lapse_low'] = float(psych.lapse_low[psych.session_date == first_trained_session_date])
learning.loc[i,'lapse_high'] = float(psych.lapse_high[psych.session_date == first_trained_session_date])
if sum(rt.session_date == first_trained_session_date) == 0:
learning.loc[i,'reaction_time'] = float(rt.median_reaction_time[np.argmin(np.array(abs(rt.session_date - first_trained_session_date)))])*1000
else:
learning.loc[i,'reaction_time'] = float(rt.median_reaction_time[rt.session_date == first_trained_session_date])*1000
# Add mouse and lab info to dataframe
learning.loc[i,'mouse'] = nickname
lab_name = subj.fetch1('lab_name')
learning.loc[i,'lab'] = lab_name
lab_time = reference.Lab * reference.LabLocation & 'lab_name="%s"'%lab_name
time_zone = lab_time.fetch('time_zone')[0]
if time_zone == ('Europe/Lisbon' or 'Europe/London'):
time_zone_number = 0
elif time_zone == 'America/New_York':
time_zone_number = -5
elif time_zone == 'America/Los_Angeles':
time_zone_number = -7
learning.loc[i,'time_zone'] = time_zone_number
# Select mice that learned
learned = learning[learning['learned'] == 'trained']
# Merge some labs
pd.options.mode.chained_assignment = None # deactivate warning
learned.loc[learned['lab'] == 'zadorlab','lab'] = 'churchlandlab'
learned.loc[learned['lab'] == 'mrsicflogellab','lab'] = 'cortexlab'
# Add (n = x) to lab names
for i in learned.index.values:
learned.loc[i,'lab_n'] = learned.loc[i,'lab'] + ' (n=' + str(sum(learned['lab'] == learned.loc[i,'lab'])) + ')'
# Initialize decoders
print('\nDecoding of lab membership..')
decod = learned
clf_rf = RandomForestClassifier(n_estimators=100)
clf_nb = GaussianNB()
clf_lr = LogisticRegression(solver='liblinear', multi_class='auto', max_iter=500)
# Perform decoding of lab membership
decoding_result = pd.DataFrame(columns=['random_forest','naive_bayes','log_res','rf_shuf','nb_shuf','lr_shuf'])
decoding_control = pd.DataFrame(columns=['random_forest','naive_bayes','log_res','rf_shuf','nb_shuf','lr_shuf'])
decoding_set = decod[decoding_metrics].values
control_set = decod[decoding_metrics_control].values
for i in range(iterations):
if np.mod(i+1,100) == 0:
print('Iteration %d of %d'%(i+1,iterations))
# Original dataset
decoding_result.loc[i,'random_forest'] = decoding(decoding_set, list(decod['lab']), clf_rf, num_splits)
decoding_result.loc[i,'naive_bayes'] = decoding(decoding_set, list(decod['lab']), clf_nb, num_splits)
decoding_result.loc[i,'log_res'] = decoding(decoding_set, list(decod['lab']), clf_lr, num_splits)
# Shuffled dataset
decoding_result.loc[i,'rf_shuf'] = decoding(decoding_set, list(decod['lab'].sample(frac=1)), clf_rf, num_splits)
decoding_result.loc[i,'nb_shuf'] = decoding(decoding_set, list(decod['lab'].sample(frac=1)), clf_nb, num_splits)
decoding_result.loc[i,'lr_shuf'] = decoding(decoding_set, list(decod['lab'].sample(frac=1)), clf_lr, num_splits)
# Positive control dataset
decoding_control.loc[i,'random_forest'] = decoding(control_set, list(decod['lab']), clf_rf, num_splits)
decoding_control.loc[i,'naive_bayes'] = decoding(control_set, list(decod['lab']), clf_nb, num_splits)
decoding_control.loc[i,'log_res'] = decoding(control_set, list(decod['lab']), clf_lr, num_splits)
# Positive control
decoding_control.loc[i,'rf_shuf'] = decoding(control_set, list(decod['lab'].sample(frac=1)), clf_rf, num_splits)
decoding_control.loc[i,'nb_shuf'] = decoding(control_set, list(decod['lab'].sample(frac=1)), clf_nb, num_splits)
decoding_control.loc[i,'lr_shuf'] = decoding(control_set, list(decod['lab'].sample(frac=1)), clf_lr, num_splits)
# Calculate if any decoders perform above chance (positive values in perc indicate above chance-level performance)
#perc = [np.percentile(logres-np.mean(shuf_lr),5), np.percentile(bayes-np.mean(shuf_nb),5), np.percentile(random_forest-np.mean(shuf_rf),5)]
# Plot decoding results
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
sns.violinplot(data=pd.concat([decoding_result['random_forest']-decoding_result['rf_shuf'],
decoding_result['naive_bayes']-decoding_result['nb_shuf'],
decoding_result['log_res']-decoding_result['lr_shuf']], axis=1), color=[0.6,0.6,0.6], ax=ax1)
ax1.plot([-1,3],[0,0],'r--')
ax1.set(ylabel='Decoding performance over chance level\n(F1 score)', title='Decoding of lab membership',
xticklabels=['Random\nForest','Naive\nBayes','Logistic\nRegression'], ylim=[-0.2, 0.5])
plt.setp(ax1.xaxis.get_majorticklabels(), rotation=40)
sns.violinplot(data=pd.concat([decoding_control['random_forest']-decoding_control['rf_shuf'],
decoding_control['naive_bayes']-decoding_control['nb_shuf'],
decoding_control['log_res']-decoding_control['lr_shuf']], axis=1), color=[0.6,0.6,0.6], ax=ax2)
ax2.plot([-1,3],[0,0],'r--')
ax2.set(ylabel='Decoding performance over chance level\n(F1 score)', title='Decoding of lab membership including time zone',
xticklabels=['Random\nForest','Naive\nBayes','Logistic\nRegression'], ylim=[-0.2, 0.5])
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=40)
plt.tight_layout(pad = 2)
fig.set_size_inches((5, 5), forward=False)
plt.savefig(join(path,'decoding_lab_membership.pdf'), dpi=300)
plt.savefig(join(path,'decoding_lab_membership.png'), dpi=300)
| [
"numpy.unique",
"os.path.join",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"numpy.append",
"numpy.array",
"pandas.concat",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"sklearn.naive_bayes.GaussianNB",
"sklearn.model_selection.KFold",
"numpy.... | [((1951, 2144), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['mouse', 'lab', 'time_zone', 'learned', 'date_learned', 'training_time',\n 'perf_easy', 'n_trials', 'threshold', 'bias', 'reaction_time',\n 'lapse_low', 'lapse_high']"}), "(columns=['mouse', 'lab', 'time_zone', 'learned',\n 'date_learned', 'training_time', 'perf_easy', 'n_trials', 'threshold',\n 'bias', 'reaction_time', 'lapse_low', 'lapse_high'])\n", (1963, 2144), True, 'import pandas as pd\n'), ((6530, 6570), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (6552, 6570), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6580, 6592), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (6590, 6592), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((6602, 6674), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""auto"""', 'max_iter': '(500)'}), "(solver='liblinear', multi_class='auto', max_iter=500)\n", (6620, 6674), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6731, 6833), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['random_forest', 'naive_bayes', 'log_res', 'rf_shuf', 'nb_shuf', 'lr_shuf']"}), "(columns=['random_forest', 'naive_bayes', 'log_res', 'rf_shuf',\n 'nb_shuf', 'lr_shuf'])\n", (6743, 6833), True, 'import pandas as pd\n'), ((6844, 6946), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['random_forest', 'naive_bayes', 'log_res', 'rf_shuf', 'nb_shuf', 'lr_shuf']"}), "(columns=['random_forest', 'naive_bayes', 'log_res', 'rf_shuf',\n 'nb_shuf', 'lr_shuf'])\n", (6856, 6946), True, 'import pandas as pd\n'), ((8893, 8928), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (8905, 8928), True, 'import matplotlib.pyplot as plt\n'), ((10145, 10168), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(2)'}), '(pad=2)\n', (10161, 10168), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1208), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'num_splits', 'shuffle': '(True)'}), '(n_splits=num_splits, shuffle=True)\n', (1173, 1208), False, 'from sklearn.model_selection import KFold\n'), ((1222, 1234), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1230, 1234), True, 'import numpy as np\n'), ((1248, 1260), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1256, 1260), True, 'import numpy as np\n'), ((10227, 10268), 'os.path.join', 'join', (['path', '"""decoding_lab_membership.pdf"""'], {}), "(path, 'decoding_lab_membership.pdf')\n", (10231, 10268), False, 'from os.path import join\n'), ((10290, 10331), 'os.path.join', 'join', (['path', '"""decoding_lab_membership.png"""'], {}), "(path, 'decoding_lab_membership.png')\n", (10294, 10331), False, 'from os.path import join\n'), ((1526, 1576), 'numpy.append', 'np.append', (['y_true', '[labels[j] for j in test_index]'], {}), '(y_true, [labels[j] for j in test_index])\n', (1535, 1576), True, 'import numpy as np\n'), ((2172, 2189), 'numpy.mod', 'np.mod', (['(i + 1)', '(10)'], {}), '(i + 1, 10)\n', (2178, 2189), True, 'import numpy as np\n'), ((7072, 7090), 'numpy.mod', 'np.mod', (['(i + 1)', '(100)'], {}), '(i + 1, 100)\n', (7078, 7090), True, 'import numpy as np\n'), ((8949, 9159), 'pandas.concat', 'pd.concat', (["[decoding_result['random_forest'] - decoding_result['rf_shuf'], \n decoding_result['naive_bayes'] - decoding_result['nb_shuf'], \n decoding_result['log_res'] - decoding_result['lr_shuf']]"], {'axis': '(1)'}), "([decoding_result['random_forest'] - decoding_result['rf_shuf'], \n decoding_result['naive_bayes'] - decoding_result['nb_shuf'], \n decoding_result['log_res'] - decoding_result['lr_shuf']], axis=1)\n", (8958, 9159), True, 'import pandas as pd\n'), ((9544, 9759), 'pandas.concat', 'pd.concat', (["[decoding_control['random_forest'] - decoding_control['rf_shuf'], \n decoding_control['naive_bayes'] - decoding_control['nb_shuf'], \n decoding_control['log_res'] - decoding_control['lr_shuf']]"], {'axis': '(1)'}), "([decoding_control['random_forest'] - decoding_control['rf_shuf'],\n decoding_control['naive_bayes'] - decoding_control['nb_shuf'], \n decoding_control['log_res'] - decoding_control['lr_shuf']], axis=1)\n", (9553, 9759), True, 'import pandas as pd\n'), ((1618, 1635), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1627, 1635), True, 'import numpy as np\n')] |
import argparse
import numpy as np
from pyimzml.ImzMLWriter import ImzMLWriter
from pyImagingMSpec.inMemoryIMS import inMemoryIMS
from scipy.optimize import least_squares
from pyimzml.ImzMLParser import ImzMLParser
from pyimzml.ImzMLWriter import ImzMLWriter
from scipy.signal import medfilt2d
import logging
def fit_fun(x, t):
v = np.polyval(x, t)
#v = x[0]+x[1]*t**x[2]
return v
def fun(x, t, y):
e = fit_fun(x, t) - y
return e
def find_nearest(v, t):
"""
v: vector of values
t: target value
"""
v=np.asarray(v)
ix = np.searchsorted(v,t)
if any([ix == len(v), ix == 0]):
return ix
return ix-1+np.argmin(np.abs(v[[ix-1,ix]]-t))
def find_max_of_nearest(v, i, t, n):
"""
return the most intense of nearby peaks
"""
assert(v.shape==i.shape)
v=np.asarray(v)
ix = np.searchsorted(v,t)
ix_l = np.max([0, ix-n])
ix_u = np.min([v.shape[0], ix+n])
_v = v[ix_l:ix_u]
_i = i[ix_l:ix_u]
ix = np.arange(ix_l, ix_u, dtype=int)
return ix[np.argmax(_i)] if _v.shape[0]!=0 else None
def find_max_in_range(v, i, t, r):
"""
v: vector of values
i: vector of intensities
t: target value
r: range to consider
"""
assert(v.shape==i.shape)
ix_l = np.searchsorted(v, t-r)
ix_u = np.searchsorted(v, t+r)
_v = v[ix_l:ix_u]
_i = i[ix_l:ix_u]
tf = (_v>=t-r) & (_v<t+r)
_v = _v[tf]
_i = _i[tf]
ix = np.arange(ix_l, ix_u)[tf]
return ix[np.argmax(_i)] if _v.shape[0]!=0 else None
def get_delta(ref_mzs, mzs, intensities, max_delta_ppm=20.):
delta = []
for mz in ref_mzs:
delta_mz = 1e-6*mz*max_delta_ppm
# ix = find_max_of_nearest(mzs, intensities, mz, n=3)
ix = find_max_in_range(mzs, intensities, mz, delta_mz)
if ix:
delta.append(1e6 * (mzs[ix] - mz) / mz)
#delta.append(mz-mzs[ix])
else:
delta.append(np.nan)
return np.asarray(delta)
def generate_data(t, x, noise=0, n_outliers=0, random_state=30):
y = fit_fun(x, t)
rnd = np.random.RandomState(random_state)
error = noise * rnd.randn(t.size)
outliers = rnd.randint(0, t.size, n_outliers)
error[outliers] *= 35
return y + error
def fit_spectrum(mzs, intensities, ref_mzs, ref_pcts, max_delta_ppm, mz_min, mz_max, x0=[1, 1, 1],
weight_by_occurance=True, stabilise=True, intensity_threshold=0):
mzs, intensities = map(lambda x: x[intensities>intensity_threshold], [mzs, intensities])
delta = get_delta(ref_mzs, mzs, intensities, max_delta_ppm=max_delta_ppm)
ref_mzs, ref_pcts, delta = map(lambda x:x[~np.isnan(delta)], [ref_mzs, ref_pcts, delta])
#print(delta)
if stabilise:
_x = [mz_min, mz_max]
_y = [0, 0]
else:
_x = []
_y = []
for ref_mz, ref_pct, dt in zip(ref_mzs, ref_pcts.astype('int'), delta):
if not weight_by_occurance:
ref_pct = 1
for ii in np.arange(ref_pct):
_x.append(ref_mz)
_y.append(dt)
_x, _y = map(np.asarray, (_x, _y))
_r = least_squares(fun, x0, loss='huber', f_scale=0.1, args=(_x, _y))
return _r, (_x, _y)
def recalibrate_spectrum(mzs, r):
est_error = generate_data(mzs, r)
mzs = mzs + 1e-6 * est_error * mzs
return mzs
def calculate_mass_deviation(input_filename, known_peaks):
ims_dataset = inMemoryIMS(input_filename)
delta = np.zeros([len(ims_dataset.coords), len(known_peaks)])
for ii in range(len(ims_dataset.coords)):
spec = ims_dataset.get_spectrum(ii).get_spectrum(source='centroids')
delta[ii,:] = get_delta(known_peaks, spec[0], spec[1])
return delta
def poly_from_deltas(known_mzs, delta, max_ppm=100, polyorder=3):
f = lambda x: np.median(x[np.abs(x) < max_ppm])
median_delta = [f(delta[:, ii]) for ii in range(len(known_mzs))]
z = np.polyfit(known_mzs, median_delta, polyorder)
p = np.poly1d(z)
return p
def do_recalibration(input_filename, output_filename, p):
ims_dataset = inMemoryIMS(input_filename)
with ImzMLWriter(output_filename) as file_out:
for ii in range(len(ims_dataset.coords)):
spec = ims_dataset.get_spectrum(ii).get_spectrum(source='centroids')
mzs = spec[0]
mzs_recal = [m-(1e-6)*m*p(m) for m in mzs]
file_out.addSpectrum(mzs_recal, spec[1], ims_dataset.coords[ii,[1,0,2]])
def recalibrate_dataset(input_filename, output_filename, known_peaks):
deltas = calculate_mass_deviation(input_filename, known_peaks)
p = poly_from_deltas(known_peaks, deltas)
do_recalibration(input_filename, output_filename, p)
def fit_dataset(imzml, ref_formula, x0=[1,1,1], max_delta_ppm=3., mz_min=200, mz_max=1000):
fit = []
for spec_ix, coords in enumerate(imzml.coordinates):
if spec_ix%500==0:
logging.debug(spec_ix/float(len(imzml.coordinates)))
spec = imzml.getspectrum(index=spec_ix)
mzs, intensities = np.asarray(spec[0]), np.asarray(spec[1])
_r = fit_spectrum(mzs, intensities, ref_formula.mz, ref_formula.percent, max_delta_ppm, mz_min, mz_max, x0)
fit.append(_r.x)
return fit
def recal(imzml_out_fn, imzml, fit, m=3):
# Write recalibrated dataset
# spatial smoothing on recal params
im3 = []
for ii in range(len(fit[0])):
im = np.mean([fit[spec_ix][ii] for spec_ix in range(len(imzml.coordinates))]) + np.zeros((imzml.imzmldict["max count of pixels y"], imzml.imzmldict["max count of pixels x"]))
for spec_ix, (x,y,z) in enumerate(imzml.coordinates):
im[y - 1, x - 1] = fit[spec_ix][ii]
im = medfilt2d(im, m)
im3.append(im)
im3 = np.dstack(im3)
# recal and write
with ImzMLWriter(imzml_out_fn) as imzml_out:
for spec_ix, coords in enumerate(imzml.coordinates):
if spec_ix%500==0:
logging.debug(spec_ix/float(len(imzml.coordinates)))
mzs, intensities = imzml.getspectrum(index=spec_ix)
mzs = np.asarray(mzs)
mzs = recalibrate_spectrum(mzs, im3[coords[1]-1, coords[0]-1, :])
imzml_out.addSpectrum(mzs, intensities, coords)
def robust_recalibration(imzml_fn, imzml_fn_r, ref_formula, numpeaks, smoothing, x0=[1, 1]):
import os
imzml = ImzMLParser(imzml_fn)
# calculate fit parameters with varying numbers of peaks
fit = fit_dataset(imzml, ref_formula, x0=x0, max_delta_ppm=numpeaks)
# do fit with different spatial smoothing
recal(imzml_fn_r, imzml, fit, m=smoothing)
return fit
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="recalibrate centroided imaging MS file")
parser.add_argument('input', type=str, help="input imaging MS file")
parser.add_argument('output', type=str, help="output filename")
parser.add_argument('known_peaks', metavar='N', type=float, nargs='+',
help='an integer for the accumulator')
args = parser.parse_args()
recalibrate_dataset(args.input, args.output, args.known_peaks)
| [
"numpy.polyfit",
"pyimzml.ImzMLWriter.ImzMLWriter",
"numpy.poly1d",
"numpy.random.RandomState",
"numpy.arange",
"scipy.optimize.least_squares",
"argparse.ArgumentParser",
"numpy.searchsorted",
"numpy.asarray",
"numpy.max",
"numpy.polyval",
"numpy.min",
"numpy.abs",
"scipy.signal.medfilt2d"... | [((337, 353), 'numpy.polyval', 'np.polyval', (['x', 't'], {}), '(x, t)\n', (347, 353), True, 'import numpy as np\n'), ((544, 557), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (554, 557), True, 'import numpy as np\n'), ((567, 588), 'numpy.searchsorted', 'np.searchsorted', (['v', 't'], {}), '(v, t)\n', (582, 588), True, 'import numpy as np\n'), ((827, 840), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (837, 840), True, 'import numpy as np\n'), ((850, 871), 'numpy.searchsorted', 'np.searchsorted', (['v', 't'], {}), '(v, t)\n', (865, 871), True, 'import numpy as np\n'), ((882, 901), 'numpy.max', 'np.max', (['[0, ix - n]'], {}), '([0, ix - n])\n', (888, 901), True, 'import numpy as np\n'), ((911, 939), 'numpy.min', 'np.min', (['[v.shape[0], ix + n]'], {}), '([v.shape[0], ix + n])\n', (917, 939), True, 'import numpy as np\n'), ((991, 1023), 'numpy.arange', 'np.arange', (['ix_l', 'ix_u'], {'dtype': 'int'}), '(ix_l, ix_u, dtype=int)\n', (1000, 1023), True, 'import numpy as np\n'), ((1272, 1297), 'numpy.searchsorted', 'np.searchsorted', (['v', '(t - r)'], {}), '(v, t - r)\n', (1287, 1297), True, 'import numpy as np\n'), ((1307, 1332), 'numpy.searchsorted', 'np.searchsorted', (['v', '(t + r)'], {}), '(v, t + r)\n', (1322, 1332), True, 'import numpy as np\n'), ((1959, 1976), 'numpy.asarray', 'np.asarray', (['delta'], {}), '(delta)\n', (1969, 1976), True, 'import numpy as np\n'), ((2076, 2111), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (2097, 2111), True, 'import numpy as np\n'), ((3103, 3167), 'scipy.optimize.least_squares', 'least_squares', (['fun', 'x0'], {'loss': '"""huber"""', 'f_scale': '(0.1)', 'args': '(_x, _y)'}), "(fun, x0, loss='huber', f_scale=0.1, args=(_x, _y))\n", (3116, 3167), False, 'from scipy.optimize import least_squares\n'), ((3399, 3426), 'pyImagingMSpec.inMemoryIMS.inMemoryIMS', 'inMemoryIMS', (['input_filename'], {}), '(input_filename)\n', (3410, 3426), False, 'from pyImagingMSpec.inMemoryIMS import inMemoryIMS\n'), ((3893, 3939), 'numpy.polyfit', 'np.polyfit', (['known_mzs', 'median_delta', 'polyorder'], {}), '(known_mzs, median_delta, polyorder)\n', (3903, 3939), True, 'import numpy as np\n'), ((3948, 3960), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (3957, 3960), True, 'import numpy as np\n'), ((4052, 4079), 'pyImagingMSpec.inMemoryIMS.inMemoryIMS', 'inMemoryIMS', (['input_filename'], {}), '(input_filename)\n', (4063, 4079), False, 'from pyImagingMSpec.inMemoryIMS import inMemoryIMS\n'), ((5719, 5733), 'numpy.dstack', 'np.dstack', (['im3'], {}), '(im3)\n', (5728, 5733), True, 'import numpy as np\n'), ((6322, 6343), 'pyimzml.ImzMLParser.ImzMLParser', 'ImzMLParser', (['imzml_fn'], {}), '(imzml_fn)\n', (6333, 6343), False, 'from pyimzml.ImzMLParser import ImzMLParser\n'), ((6627, 6704), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""recalibrate centroided imaging MS file"""'}), "(description='recalibrate centroided imaging MS file')\n", (6650, 6704), False, 'import argparse\n'), ((1446, 1467), 'numpy.arange', 'np.arange', (['ix_l', 'ix_u'], {}), '(ix_l, ix_u)\n', (1455, 1467), True, 'import numpy as np\n'), ((2979, 2997), 'numpy.arange', 'np.arange', (['ref_pct'], {}), '(ref_pct)\n', (2988, 2997), True, 'import numpy as np\n'), ((4089, 4117), 'pyimzml.ImzMLWriter.ImzMLWriter', 'ImzMLWriter', (['output_filename'], {}), '(output_filename)\n', (4100, 4117), False, 'from pyimzml.ImzMLWriter import ImzMLWriter\n'), ((5669, 5685), 'scipy.signal.medfilt2d', 'medfilt2d', (['im', 'm'], {}), '(im, m)\n', (5678, 5685), False, 'from scipy.signal import medfilt2d\n'), ((5765, 5790), 'pyimzml.ImzMLWriter.ImzMLWriter', 'ImzMLWriter', (['imzml_out_fn'], {}), '(imzml_out_fn)\n', (5776, 5790), False, 'from pyimzml.ImzMLWriter import ImzMLWriter\n'), ((669, 696), 'numpy.abs', 'np.abs', (['(v[[ix - 1, ix]] - t)'], {}), '(v[[ix - 1, ix]] - t)\n', (675, 696), True, 'import numpy as np\n'), ((1038, 1051), 'numpy.argmax', 'np.argmax', (['_i'], {}), '(_i)\n', (1047, 1051), True, 'import numpy as np\n'), ((1486, 1499), 'numpy.argmax', 'np.argmax', (['_i'], {}), '(_i)\n', (1495, 1499), True, 'import numpy as np\n'), ((5002, 5021), 'numpy.asarray', 'np.asarray', (['spec[0]'], {}), '(spec[0])\n', (5012, 5021), True, 'import numpy as np\n'), ((5023, 5042), 'numpy.asarray', 'np.asarray', (['spec[1]'], {}), '(spec[1])\n', (5033, 5042), True, 'import numpy as np\n'), ((5451, 5550), 'numpy.zeros', 'np.zeros', (["(imzml.imzmldict['max count of pixels y'], imzml.imzmldict[\n 'max count of pixels x'])"], {}), "((imzml.imzmldict['max count of pixels y'], imzml.imzmldict[\n 'max count of pixels x']))\n", (5459, 5550), True, 'import numpy as np\n'), ((6048, 6063), 'numpy.asarray', 'np.asarray', (['mzs'], {}), '(mzs)\n', (6058, 6063), True, 'import numpy as np\n'), ((2651, 2666), 'numpy.isnan', 'np.isnan', (['delta'], {}), '(delta)\n', (2659, 2666), True, 'import numpy as np\n'), ((3794, 3803), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (3800, 3803), True, 'import numpy as np\n')] |
import os
import numpy as np
import pydub as pd
from abc import ABC, abstractmethod
class Messenger(ABC):
""" Abstract methods """
def __init__(self, files_path=None):
if files_path is None:
self.message_left, self.message_right = np.array([]), np.array([])
else:
self.message_left, self.message_right = self._getMessage(files_path)
@abstractmethod
def __add__(self, other):
""" Add two messages
Args:
other (AudioMessage): message to be added
"""
pass
@abstractmethod
def __repr__(self):
""" Return string representation of message
"""
pass
def __len__(self) -> 'int':
""" Return length of message
Returns:
int: left length of message
"""
return self.message_left.shape[0]
def __getitem__(self, key) -> 'list[int]':
""" Return item at index
Args:
key (int): index
Returns:
int: item at index
"""
return (self.message_left[key], self.message_right[key])
def __iter__(self) -> 'list[np.ndarray]':
""" Return iterator of message
Returns:
np.ndarray: iterator of message
"""
return (self.message_left.__iter__(), self.message_right.__iter__())
def __next__(self) -> int:
""" Return next item in message
Returns:
int: next item in message
"""
return (self.message_left.__next__(), self.message_right.__next__())
def _getMessage(self, files_path) -> 'list[np.ndarray]':
""" Return message from files
Args:
files_path (str): path of files
Raises:
TypeError: if files_path is not str or list
Returns:
list[np.ndarray]: message from files, left and right channel
"""
# check if is string with path to directory
# or list of mp3 files
if isinstance(files_path, str):
message_left, message_rigth = self._getMessageFromDirectory(files_path)
elif isinstance(files_path, list):
message_left, message_rigth = self._getMessageFromFiles(files_path)
else:
raise TypeError("files_path must be string or list")
return self._concatenateMessages(message_left), self._concatenateMessages(message_rigth)
def _getMessageFromDirectory(self, files_path) -> 'list[np.ndarray]':
""" Return message from files in directory
Args:
files_path (str): path of files
Returns:
list[np.ndarray]: message from files
"""
# get all mp3 files in directory
files = self._getMp3FilesInDirectory(files_path)
# get all messages from mp3 files
message_left, message_rigth = self._getMessageFromFiles(files)
# concatenate all messages
return message_left, message_rigth
def _getMp3FilesInDirectory(self, files_path) -> list:
""" Return files in directory
Args:
files_path (str): path of files
Returns:
list: files in directory
"""
# get all mp3 files in directory
files = []
for file in os.listdir(files_path):
# add paths of m3 files to list
files.append(files_path + '/' + file)
return files
def _getMessageFromFiles(self, files) -> list:
""" Return message from files
Args:
files (list): list of files
Returns:
list: message from files
"""
# get all messages from files
message_left, message_rigth = [], []
for file in files:
message_l, message_r = self._getMessageFromFile(file)
message_left.append(message_l)
message_rigth.append(message_r)
return message_left, message_rigth
@abstractmethod
def _getMessageFromFile(self, file) -> np.ndarray:
pass
def _concatenateMessages(self, messages) -> np.ndarray:
""" Return concatenated message
Args:
messages (list): list of messages
Returns:
np.ndarray: concatenated message
"""
# concatenate all messages
return np.concatenate(messages)
| [
"numpy.array",
"os.listdir",
"numpy.concatenate"
] | [((3270, 3292), 'os.listdir', 'os.listdir', (['files_path'], {}), '(files_path)\n', (3280, 3292), False, 'import os\n'), ((4319, 4343), 'numpy.concatenate', 'np.concatenate', (['messages'], {}), '(messages)\n', (4333, 4343), True, 'import numpy as np\n'), ((262, 274), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (270, 274), True, 'import numpy as np\n'), ((276, 288), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (284, 288), True, 'import numpy as np\n')] |
# 2021.03.20
# @yifan
#
import numpy as np
from skimage.util import view_as_windows
from scipy.fftpack import dct, idct
def Shrink(X, win):
X = view_as_windows(X, (1,win,win,1), (1,win,win,1))
return X.reshape(X.shape[0], X.shape[1], X.shape[2], -1)
def invShrink(X, win):
S = X.shape
X = X.reshape(S[0], S[1], S[2], -1, 1, win, win, 1)
X = np.moveaxis(X, 5, 2)
X = np.moveaxis(X, 6, 4)
return X.reshape(S[0], win*S[1], win*S[2], -1)
class DCT():
def __init__(self, N=8, P=8):
self.N = N
self.P = P
self.W = 8
self.H = 8
def transform(self, a):
S = list(a.shape)
a = a.reshape(-1, self.N, self.P, 1)
a = dct(dct(a, axis=1, norm='ortho'), axis=2, norm='ortho')
return a.reshape(S)
def inverse_transform(self, a):
S = list(a.shape)
a = a.reshape(-1, self.N, self.P, 1)
a = idct(idct(a, axis=1, norm='ortho'), axis=2, norm='ortho')
return a.reshape(S)
def ML_inverse_transform(self, Xraw, X):
llsr = LLSR(onehot=False)
llsr.fit(X.reshape(-1, X.shape[-1]), Xraw.reshape(-1, X.shape[-1]))
S = X.shape
X = llsr.predict_proba(X.reshape(-1, X.shape[-1])).reshape(S)
return X
class ZigZag():
def __init__(self):
self.idx = []
def zig_zag(self, i, j, n):
if i + j >= n:
return n * n - 1 - self.zig_zag(n - 1 - i, n - 1 - j, n)
k = (i + j) * (i + j + 1) // 2
return k + i if (i + j) & 1 else k + j
def zig_zag_getIdx(self, N):
idx = np.zeros((N, N))
for i in range(N):
for j in range(N):
idx[i, j] = self.zig_zag(i, j, N)
return idx.reshape(-1)
def transform(self, X):
self.idx = self.zig_zag_getIdx((int)(np.sqrt(X.shape[-1]))).astype('int32')
S = list(X.shape)
X = X.reshape(-1, X.shape[-1])
return X[:, np.argsort(self.idx)].reshape(S)
def inverse_transform(self, X):
self.idx = self.zig_zag_getIdx((int)(np.sqrt(X.shape[-1]))).astype('int32')
S = list(X.shape)
X = X.reshape(-1, X.shape[-1])
return X[:, self.idx].reshape(S)
class LLSR():
def __init__(self, onehot=True, normalize=False):
self.onehot = onehot
self.normalize = normalize
self.weight = []
def fit(self, X, Y):
if self.onehot == True:
Y = np.eye(len(np.unique(Y)))[Y.reshape(-1)]
A = np.ones((X.shape[0], 1))
X = np.concatenate((X, A), axis=1)
self.weight, _, _, _ = np.linalg.lstsq(X, Y, rcond=None)
return self
def predict(self, X):
pred = self.predict_proba(X)
return np.argmax(pred, axis=1)
def predict_proba(self, X):
A = np.ones((X.shape[0], 1))
X = np.concatenate((X, A), axis=1)
pred = np.matmul(X, self.weight)
if self.normalize == True:
pred = (pred - np.min(pred, axis=1, keepdims=True))/ np.sum((pred - np.min(pred, axis=1, keepdims=True) + 1e-15), axis=1, keepdims=True)
return pred
def score(self, X, Y):
pred = self.predict(X)
return accuracy_score(Y, pred) | [
"numpy.sqrt",
"numpy.ones",
"numpy.unique",
"numpy.argmax",
"scipy.fftpack.idct",
"numpy.min",
"numpy.argsort",
"numpy.zeros",
"scipy.fftpack.dct",
"numpy.matmul",
"numpy.concatenate",
"numpy.linalg.lstsq",
"numpy.moveaxis",
"skimage.util.view_as_windows"
] | [((149, 203), 'skimage.util.view_as_windows', 'view_as_windows', (['X', '(1, win, win, 1)', '(1, win, win, 1)'], {}), '(X, (1, win, win, 1), (1, win, win, 1))\n', (164, 203), False, 'from skimage.util import view_as_windows\n'), ((363, 383), 'numpy.moveaxis', 'np.moveaxis', (['X', '(5)', '(2)'], {}), '(X, 5, 2)\n', (374, 383), True, 'import numpy as np\n'), ((392, 412), 'numpy.moveaxis', 'np.moveaxis', (['X', '(6)', '(4)'], {}), '(X, 6, 4)\n', (403, 412), True, 'import numpy as np\n'), ((1591, 1607), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (1599, 1607), True, 'import numpy as np\n'), ((2506, 2530), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2513, 2530), True, 'import numpy as np\n'), ((2543, 2573), 'numpy.concatenate', 'np.concatenate', (['(X, A)'], {'axis': '(1)'}), '((X, A), axis=1)\n', (2557, 2573), True, 'import numpy as np\n'), ((2605, 2638), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['X', 'Y'], {'rcond': 'None'}), '(X, Y, rcond=None)\n', (2620, 2638), True, 'import numpy as np\n'), ((2738, 2761), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (2747, 2761), True, 'import numpy as np\n'), ((2807, 2831), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2814, 2831), True, 'import numpy as np\n'), ((2844, 2874), 'numpy.concatenate', 'np.concatenate', (['(X, A)'], {'axis': '(1)'}), '((X, A), axis=1)\n', (2858, 2874), True, 'import numpy as np\n'), ((2890, 2915), 'numpy.matmul', 'np.matmul', (['X', 'self.weight'], {}), '(X, self.weight)\n', (2899, 2915), True, 'import numpy as np\n'), ((708, 736), 'scipy.fftpack.dct', 'dct', (['a'], {'axis': '(1)', 'norm': '"""ortho"""'}), "(a, axis=1, norm='ortho')\n", (711, 736), False, 'from scipy.fftpack import dct, idct\n'), ((913, 942), 'scipy.fftpack.idct', 'idct', (['a'], {'axis': '(1)', 'norm': '"""ortho"""'}), "(a, axis=1, norm='ortho')\n", (917, 942), False, 'from scipy.fftpack import dct, idct\n'), ((2978, 3013), 'numpy.min', 'np.min', (['pred'], {'axis': '(1)', 'keepdims': '(True)'}), '(pred, axis=1, keepdims=True)\n', (2984, 3013), True, 'import numpy as np\n'), ((1825, 1845), 'numpy.sqrt', 'np.sqrt', (['X.shape[-1]'], {}), '(X.shape[-1])\n', (1832, 1845), True, 'import numpy as np\n'), ((1949, 1969), 'numpy.argsort', 'np.argsort', (['self.idx'], {}), '(self.idx)\n', (1959, 1969), True, 'import numpy as np\n'), ((2068, 2088), 'numpy.sqrt', 'np.sqrt', (['X.shape[-1]'], {}), '(X.shape[-1])\n', (2075, 2088), True, 'import numpy as np\n'), ((2464, 2476), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (2473, 2476), True, 'import numpy as np\n'), ((3031, 3066), 'numpy.min', 'np.min', (['pred'], {'axis': '(1)', 'keepdims': '(True)'}), '(pred, axis=1, keepdims=True)\n', (3037, 3066), True, 'import numpy as np\n')] |
import numpy as np
import utils
test_np = np.ndarray(shape=(100, 256, 256, 1))
train_np = np.ndarray(shape=(800, 256, 256, 1))
valid_np = np.ndarray(shape=(100, 256, 256, 1))
train_np_gt = np.ndarray(shape=(800, 64, 64, 2))
valid_np_gt = np.ndarray(shape=(100, 64, 64, 2))
train_np_real = np.ndarray(shape=(800, 256, 256, 3))
valid_np_real = np.ndarray(shape=(100, 256, 256, 3))
index = 0
with open('train.txt', 'r') as file:
lines = file.readlines()
for line in lines:
if line.strip():
line = line.split('\n')
img = utils.read_image('../gray/' + line[0]).astype(np.uint8)
img, _ = utils.cvt2Lab(img)
img = img.reshape(256, 256, 1).astype(np.int8)
img_gt = utils.read_image('../color_64/' + line[0]).astype(np.uint8)
_, img_gt = utils.cvt2Lab(img_gt)
img_gt = img_gt.astype(np.int8)
img_real = utils.read_image('../color_256/' + line[0]).astype(np.uint8)
train_np[index] = img
train_np_gt[index] = img_gt
train_np_real[index] = img_real
index = index + 1
with open('../train.npy', 'wb') as file:
np.save(file, train_np)
with open('../train_gt.npy', 'wb') as file:
np.save(file, train_np_gt)
with open('../train_real.npy', 'wb') as file:
np.save(file, train_np_real)
index = 0
with open('valid.txt', 'r') as file:
lines = file.readlines()
for line in lines:
if line.strip():
line = line.split('\n')
img = utils.read_image('../gray/' + line[0]).astype(np.uint8)
img, _ = utils.cvt2Lab(img)
img = img.reshape(256, 256, 1).astype(np.int8)
img_gt = utils.read_image('../color_64/' + line[0]).astype(np.uint8)
_, img_gt = utils.cvt2Lab(img_gt)
img_gt = img_gt.astype(np.int8)
img_real = utils.read_image('../color_256/' + line[0]).astype(np.uint8)
valid_np[index] = img
valid_np_gt[index] = img_gt
valid_np_real[index] = img_real
index = index + 1
with open('../valid.npy', 'wb') as file:
np.save(file, valid_np)
with open('../valid_gt.npy', 'wb') as file:
np.save(file, valid_np_gt)
with open('../valid_real.npy', 'wb') as file:
np.save(file, valid_np_real)
index = 0
with open('test.txt', 'r') as file:
lines = file.readlines()
for line in lines:
if line.strip():
line = line.split('\n')
img = utils.read_image('../test_gray/' + line[0]).astype(np.uint8)
img, _ = utils.cvt2Lab(img)
img = img.reshape(256, 256, 1).astype(np.int8)
test_np[index] = img
index = index + 1
with open('../test.npy', 'wb') as file:
np.save(file, test_np) | [
"utils.cvt2Lab",
"utils.read_image",
"numpy.ndarray",
"numpy.save"
] | [((43, 79), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(100, 256, 256, 1)'}), '(shape=(100, 256, 256, 1))\n', (53, 79), True, 'import numpy as np\n'), ((92, 128), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(800, 256, 256, 1)'}), '(shape=(800, 256, 256, 1))\n', (102, 128), True, 'import numpy as np\n'), ((140, 176), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(100, 256, 256, 1)'}), '(shape=(100, 256, 256, 1))\n', (150, 176), True, 'import numpy as np\n'), ((192, 226), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(800, 64, 64, 2)'}), '(shape=(800, 64, 64, 2))\n', (202, 226), True, 'import numpy as np\n'), ((241, 275), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(100, 64, 64, 2)'}), '(shape=(100, 64, 64, 2))\n', (251, 275), True, 'import numpy as np\n'), ((293, 329), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(800, 256, 256, 3)'}), '(shape=(800, 256, 256, 3))\n', (303, 329), True, 'import numpy as np\n'), ((346, 382), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(100, 256, 256, 3)'}), '(shape=(100, 256, 256, 3))\n', (356, 382), True, 'import numpy as np\n'), ((1169, 1192), 'numpy.save', 'np.save', (['file', 'train_np'], {}), '(file, train_np)\n', (1176, 1192), True, 'import numpy as np\n'), ((1242, 1268), 'numpy.save', 'np.save', (['file', 'train_np_gt'], {}), '(file, train_np_gt)\n', (1249, 1268), True, 'import numpy as np\n'), ((1320, 1348), 'numpy.save', 'np.save', (['file', 'train_np_real'], {}), '(file, train_np_real)\n', (1327, 1348), True, 'import numpy as np\n'), ((2136, 2159), 'numpy.save', 'np.save', (['file', 'valid_np'], {}), '(file, valid_np)\n', (2143, 2159), True, 'import numpy as np\n'), ((2209, 2235), 'numpy.save', 'np.save', (['file', 'valid_np_gt'], {}), '(file, valid_np_gt)\n', (2216, 2235), True, 'import numpy as np\n'), ((2287, 2315), 'numpy.save', 'np.save', (['file', 'valid_np_real'], {}), '(file, valid_np_real)\n', (2294, 2315), True, 'import numpy as np\n'), ((2764, 2786), 'numpy.save', 'np.save', (['file', 'test_np'], {}), '(file, test_np)\n', (2771, 2786), True, 'import numpy as np\n'), ((639, 657), 'utils.cvt2Lab', 'utils.cvt2Lab', (['img'], {}), '(img)\n', (652, 657), False, 'import utils\n'), ((823, 844), 'utils.cvt2Lab', 'utils.cvt2Lab', (['img_gt'], {}), '(img_gt)\n', (836, 844), False, 'import utils\n'), ((1606, 1624), 'utils.cvt2Lab', 'utils.cvt2Lab', (['img'], {}), '(img)\n', (1619, 1624), False, 'import utils\n'), ((1790, 1811), 'utils.cvt2Lab', 'utils.cvt2Lab', (['img_gt'], {}), '(img_gt)\n', (1803, 1811), False, 'import utils\n'), ((2577, 2595), 'utils.cvt2Lab', 'utils.cvt2Lab', (['img'], {}), '(img)\n', (2590, 2595), False, 'import utils\n'), ((562, 600), 'utils.read_image', 'utils.read_image', (["('../gray/' + line[0])"], {}), "('../gray/' + line[0])\n", (578, 600), False, 'import utils\n'), ((739, 781), 'utils.read_image', 'utils.read_image', (["('../color_64/' + line[0])"], {}), "('../color_64/' + line[0])\n", (755, 781), False, 'import utils\n'), ((913, 956), 'utils.read_image', 'utils.read_image', (["('../color_256/' + line[0])"], {}), "('../color_256/' + line[0])\n", (929, 956), False, 'import utils\n'), ((1529, 1567), 'utils.read_image', 'utils.read_image', (["('../gray/' + line[0])"], {}), "('../gray/' + line[0])\n", (1545, 1567), False, 'import utils\n'), ((1706, 1748), 'utils.read_image', 'utils.read_image', (["('../color_64/' + line[0])"], {}), "('../color_64/' + line[0])\n", (1722, 1748), False, 'import utils\n'), ((1880, 1923), 'utils.read_image', 'utils.read_image', (["('../color_256/' + line[0])"], {}), "('../color_256/' + line[0])\n", (1896, 1923), False, 'import utils\n'), ((2495, 2538), 'utils.read_image', 'utils.read_image', (["('../test_gray/' + line[0])"], {}), "('../test_gray/' + line[0])\n", (2511, 2538), False, 'import utils\n')] |
import numpy as np
def get_ranks(array):
args_tmp = np.argsort(array)
args = np.empty_like(args_tmp)
args[args_tmp] = np.arange(len(args))
return args
| [
"numpy.argsort",
"numpy.empty_like"
] | [((58, 75), 'numpy.argsort', 'np.argsort', (['array'], {}), '(array)\n', (68, 75), True, 'import numpy as np\n'), ((87, 110), 'numpy.empty_like', 'np.empty_like', (['args_tmp'], {}), '(args_tmp)\n', (100, 110), True, 'import numpy as np\n')] |
"""Tests for NoiseTable."""
import numpy as np
from src.utils.noise_table import NoiseTable
def test_mirrored_sample():
table = NoiseTable(size=1000)
rng = np.random.default_rng()
vec = table.sample_index_vec(rng, 100, None)
noise = table.get_vec(vec)
vec.mirror = True
mirrored_noise = table.get_vec(vec)
assert (noise == -mirrored_noise).all()
| [
"src.utils.noise_table.NoiseTable",
"numpy.random.default_rng"
] | [((135, 156), 'src.utils.noise_table.NoiseTable', 'NoiseTable', ([], {'size': '(1000)'}), '(size=1000)\n', (145, 156), False, 'from src.utils.noise_table import NoiseTable\n'), ((167, 190), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (188, 190), True, 'import numpy as np\n')] |
import numpy as np
import pickle
from pathlib import Path
source_path = "./data/unirep/stability"
for path in Path(source_path).rglob('*.npz'):
pickle_file = str(path).replace('npz', 'p')
seq_dict = dict()
data = np.load(path, allow_pickle=True)
print(path)
dups = set()
dup_count = 0
for id in data.files:
if id in dups:
dup_count += 1
continue
if id in seq_dict:
del seq_dict[id]
dups.add(id)
dup_count += 1
continue
seq = data[id].item()['seq']
if seq.shape[0] > 1000:
print(seq.shape)
continue
seq_dict[id] = seq
print("dups: ", dup_count)
with open(pickle_file, 'wb') as f:
pickle.dump(seq_dict, f)
| [
"numpy.load",
"pickle.dump",
"pathlib.Path"
] | [((228, 260), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (235, 260), True, 'import numpy as np\n'), ((111, 128), 'pathlib.Path', 'Path', (['source_path'], {}), '(source_path)\n', (115, 128), False, 'from pathlib import Path\n'), ((764, 788), 'pickle.dump', 'pickle.dump', (['seq_dict', 'f'], {}), '(seq_dict, f)\n', (775, 788), False, 'import pickle\n')] |
"""
A set of large-scale tests which test code updates against previously-run "golden"
results.
The idea here is that any new updates (except for major versions) should be non-breaking;
firstly, they should not break the API, so that the tests should run without crashing without
being changed.
Secondly, the actual results of running the basic functions should remain the same for
the same input code, except for potential bug-fixes. In these cases, these tests should
pick these changes up. The test data should then be changed to reflect the new gold
standard, and if applicable, a new test should be written that reflects the previous
broken code.
Thirdly, it enforces that new features, where possible, are added in such a way as to
keep the default behaviour constant. That is, the tests here should *not* run the added
feature, and therefore should continue to produce the same test results regardless of
the new feature added. The new feature should be accompanied by its own tests, whether
in this or another test module. If a new feature *must* be included by default, then
it must be implemented in a new major version of the code, at which point the test data
is able to be updated.
Comparison tests here are meant to be as small as possible while attempting to form
a reasonable test: they should be of reduced data such as power spectra or global xHI
measurements, and they should be generated with small simulations.
"""
import pytest
import h5py
import logging
import matplotlib as mpl
import numpy as np
from py21cmfast import config, global_params
from . import produce_integration_test_data as prd
logger = logging.getLogger("21cmFAST")
logger.setLevel(logging.INFO)
options = list(prd.OPTIONS.keys())
options_pt = list(prd.OPTIONS_PT.keys())
options_halo = list(prd.OPTIONS_HALO.keys())
@pytest.mark.parametrize("name", options)
def test_power_spectra_coeval(name, module_direc, plt):
redshift, kwargs = prd.OPTIONS[name]
print(f"Options used for the test at z={redshift}: ", kwargs)
# First get pre-made data
with h5py.File(prd.get_filename("power_spectra", name), "r") as fl:
true_powers = {
"_".join(key.split("_")[1:]): value[...]
for key, value in fl["coeval"].items()
if key.startswith("power_")
}
# Now compute the Coeval object
with config.use(direc=module_direc, regenerate=False, write=True):
with global_params.use(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR):
# Note that if zprime_step_factor is set in kwargs, it will over-ride this.
test_k, test_powers, _ = prd.produce_coeval_power_spectra(
redshift, **kwargs
)
if plt == mpl.pyplot:
make_coeval_comparison_plot(test_k, true_powers, test_powers, plt)
for key, value in true_powers.items():
print(f"Testing {key}")
assert np.sum(~np.isclose(value, test_powers[key], atol=0, rtol=1e-2)) < 10
np.testing.assert_allclose(value, test_powers[key], atol=0, rtol=1e-1)
@pytest.mark.parametrize("name", options)
def test_power_spectra_lightcone(name, module_direc, plt):
redshift, kwargs = prd.OPTIONS[name]
print(f"Options used for the test at z={redshift}: ", kwargs)
# First get pre-made data
with h5py.File(prd.get_filename("power_spectra", name), "r") as fl:
true_powers = {}
true_global = {}
for key in fl["lightcone"].keys():
if key.startswith("power_"):
true_powers["_".join(key.split("_")[1:])] = fl["lightcone"][key][...]
elif key.startswith("global_"):
true_global[key] = fl["lightcone"][key][...]
# Now compute the lightcone
with config.use(direc=module_direc, regenerate=False, write=True):
with global_params.use(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR):
# Note that if zprime_step_factor is set in kwargs, it will over-ride this.
test_k, test_powers, lc = prd.produce_lc_power_spectra(redshift, **kwargs)
if plt == mpl.pyplot:
make_lightcone_comparison_plot(
test_k, lc.node_redshifts, true_powers, true_global, test_powers, lc, plt
)
for key, value in true_powers.items():
print(f"Testing {key}")
# Ensure all but 10 of the values is within 1%, and none of the values
# is outside 10%
assert np.sum(~np.isclose(value, test_powers[key], atol=0, rtol=1e-2)) < 10
assert np.allclose(value, test_powers[key], atol=0, rtol=1e-1)
for key, value in true_global.items():
print(f"Testing Global {key}")
assert np.allclose(value, getattr(lc, key), atol=0, rtol=1e-3)
def make_lightcone_comparison_plot(
k, z, true_powers, true_global, test_powers, lc, plt
):
n = len(true_global) + len(true_powers)
fig, ax = plt.subplots(2, n, figsize=(3 * n, 5))
for i, (key, val) in enumerate(true_powers.items()):
make_comparison_plot(
k, val, test_powers[key], ax[:, i], xlab="k", ylab=f"{key} Power"
)
for i, (key, val) in enumerate(true_global.items(), start=i + 1):
make_comparison_plot(
z, val, getattr(lc, key), ax[:, i], xlab="z", ylab=f"{key}"
)
def make_coeval_comparison_plot(k, true_powers, test_powers, plt):
fig, ax = plt.subplots(
2, len(true_powers), figsize=(3 * len(true_powers), 6), sharex=True
)
for i, (key, val) in enumerate(true_powers.items()):
make_comparison_plot(
k, val, test_powers[key], ax[:, i], xlab="k", ylab=f"{key} Power"
)
def make_comparison_plot(x, true, test, ax, logx=True, logy=True, xlab=None, ylab=None):
ax[0].plot(x, true, label="True")
ax[0].plot(x, test, label="Test")
if logx:
ax[0].set_xscale("log")
if logy:
ax[0].set_yscale("log")
if xlab:
ax[0].set_xlabel(xlab)
if ylab:
ax[0].set_ylabel(ylab)
ax[0].legend()
ax[1].plot(x, (test - true) / true)
ax[1].set_ylabel("Fractional Difference")
@pytest.mark.parametrize("name", options_pt)
def test_perturb_field_data(name):
redshift, kwargs = prd.OPTIONS_PT[name]
print("Options used for the test: ", kwargs)
# First get pre-made data
with h5py.File(prd.get_filename("perturb_field_data", name), "r") as f:
power_dens = f["power_dens"][...]
power_vel = f["power_vel"][...]
pdf_dens = f["pdf_dens"][...]
pdf_vel = f["pdf_vel"][...]
with global_params.use(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR):
# Note that if zprime_step_factor is set in kwargs, it will over-ride this.
(
k_dens,
p_dens,
k_vel,
p_vel,
x_dens,
y_dens,
x_vel,
y_vel,
ic,
) = prd.produce_perturb_field_data(redshift, **kwargs)
assert np.allclose(power_dens, p_dens, atol=5e-3, rtol=1e-3)
assert np.allclose(power_vel, p_vel, atol=5e-3, rtol=1e-3)
assert np.allclose(pdf_dens, y_dens, atol=5e-3, rtol=1e-3)
assert np.allclose(pdf_vel, y_vel, atol=5e-3, rtol=1e-3)
@pytest.mark.parametrize("name", options_halo)
def test_halo_field_data(name):
redshift, kwargs = prd.OPTIONS_HALO[name]
print("Options used for the test: ", kwargs)
# First get pre-made data
with h5py.File(prd.get_filename("halo_field_data", name), "r") as f:
n_pt_halos = f["n_pt_halos"][...]
pt_halo_masses = f["pt_halo_masses"][...]
with global_params.use(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR):
# Note that if zprime_step_factor is set in kwargs, it will over-ride this.
pt_halos = prd.produce_halo_field_data(redshift, **kwargs)
assert np.allclose(n_pt_halos, pt_halos.n_halos, atol=5e-3, rtol=1e-3)
assert np.allclose(
np.sum(pt_halo_masses), np.sum(pt_halos.halo_masses), atol=5e-3, rtol=1e-3
)
| [
"logging.getLogger",
"numpy.allclose",
"py21cmfast.global_params.use",
"py21cmfast.config.use",
"numpy.isclose",
"numpy.testing.assert_allclose",
"pytest.mark.parametrize",
"numpy.sum"
] | [((1632, 1661), 'logging.getLogger', 'logging.getLogger', (['"""21cmFAST"""'], {}), "('21cmFAST')\n", (1649, 1661), False, 'import logging\n'), ((1818, 1858), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', 'options'], {}), "('name', options)\n", (1841, 1858), False, 'import pytest\n'), ((3046, 3086), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', 'options'], {}), "('name', options)\n", (3069, 3086), False, 'import pytest\n'), ((6060, 6103), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', 'options_pt'], {}), "('name', options_pt)\n", (6083, 6103), False, 'import pytest\n'), ((7160, 7205), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', 'options_halo'], {}), "('name', options_halo)\n", (7183, 7205), False, 'import pytest\n'), ((6916, 6971), 'numpy.allclose', 'np.allclose', (['power_dens', 'p_dens'], {'atol': '(0.005)', 'rtol': '(0.001)'}), '(power_dens, p_dens, atol=0.005, rtol=0.001)\n', (6927, 6971), True, 'import numpy as np\n'), ((6981, 7034), 'numpy.allclose', 'np.allclose', (['power_vel', 'p_vel'], {'atol': '(0.005)', 'rtol': '(0.001)'}), '(power_vel, p_vel, atol=0.005, rtol=0.001)\n', (6992, 7034), True, 'import numpy as np\n'), ((7044, 7097), 'numpy.allclose', 'np.allclose', (['pdf_dens', 'y_dens'], {'atol': '(0.005)', 'rtol': '(0.001)'}), '(pdf_dens, y_dens, atol=0.005, rtol=0.001)\n', (7055, 7097), True, 'import numpy as np\n'), ((7107, 7158), 'numpy.allclose', 'np.allclose', (['pdf_vel', 'y_vel'], {'atol': '(0.005)', 'rtol': '(0.001)'}), '(pdf_vel, y_vel, atol=0.005, rtol=0.001)\n', (7118, 7158), True, 'import numpy as np\n'), ((7772, 7837), 'numpy.allclose', 'np.allclose', (['n_pt_halos', 'pt_halos.n_halos'], {'atol': '(0.005)', 'rtol': '(0.001)'}), '(n_pt_halos, pt_halos.n_halos, atol=0.005, rtol=0.001)\n', (7783, 7837), True, 'import numpy as np\n'), ((2349, 2409), 'py21cmfast.config.use', 'config.use', ([], {'direc': 'module_direc', 'regenerate': '(False)', 'write': '(True)'}), '(direc=module_direc, regenerate=False, write=True)\n', (2359, 2409), False, 'from py21cmfast import config, global_params\n'), ((2972, 3041), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['value', 'test_powers[key]'], {'atol': '(0)', 'rtol': '(0.1)'}), '(value, test_powers[key], atol=0, rtol=0.1)\n', (2998, 3041), True, 'import numpy as np\n'), ((3723, 3783), 'py21cmfast.config.use', 'config.use', ([], {'direc': 'module_direc', 'regenerate': '(False)', 'write': '(True)'}), '(direc=module_direc, regenerate=False, write=True)\n', (3733, 3783), False, 'from py21cmfast import config, global_params\n'), ((4485, 4539), 'numpy.allclose', 'np.allclose', (['value', 'test_powers[key]'], {'atol': '(0)', 'rtol': '(0.1)'}), '(value, test_powers[key], atol=0, rtol=0.1)\n', (4496, 4539), True, 'import numpy as np\n'), ((6505, 6573), 'py21cmfast.global_params.use', 'global_params.use', ([], {'zprime_step_factor': 'prd.DEFAULT_ZPRIME_STEP_FACTOR'}), '(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR)\n', (6522, 6573), False, 'from py21cmfast import config, global_params\n'), ((7539, 7607), 'py21cmfast.global_params.use', 'global_params.use', ([], {'zprime_step_factor': 'prd.DEFAULT_ZPRIME_STEP_FACTOR'}), '(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR)\n', (7556, 7607), False, 'from py21cmfast import config, global_params\n'), ((7868, 7890), 'numpy.sum', 'np.sum', (['pt_halo_masses'], {}), '(pt_halo_masses)\n', (7874, 7890), True, 'import numpy as np\n'), ((7892, 7920), 'numpy.sum', 'np.sum', (['pt_halos.halo_masses'], {}), '(pt_halos.halo_masses)\n', (7898, 7920), True, 'import numpy as np\n'), ((2424, 2492), 'py21cmfast.global_params.use', 'global_params.use', ([], {'zprime_step_factor': 'prd.DEFAULT_ZPRIME_STEP_FACTOR'}), '(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR)\n', (2441, 2492), False, 'from py21cmfast import config, global_params\n'), ((3798, 3866), 'py21cmfast.global_params.use', 'global_params.use', ([], {'zprime_step_factor': 'prd.DEFAULT_ZPRIME_STEP_FACTOR'}), '(zprime_step_factor=prd.DEFAULT_ZPRIME_STEP_FACTOR)\n', (3815, 3866), False, 'from py21cmfast import config, global_params\n'), ((2903, 2957), 'numpy.isclose', 'np.isclose', (['value', 'test_powers[key]'], {'atol': '(0)', 'rtol': '(0.01)'}), '(value, test_powers[key], atol=0, rtol=0.01)\n', (2913, 2957), True, 'import numpy as np\n'), ((4409, 4463), 'numpy.isclose', 'np.isclose', (['value', 'test_powers[key]'], {'atol': '(0)', 'rtol': '(0.01)'}), '(value, test_powers[key], atol=0, rtol=0.01)\n', (4419, 4463), True, 'import numpy as np\n')] |
# Preppin' Data 2021 Week 01
import os
import pandas
import numpy
# Load csv
data = pandas.read_csv('unprepped_data\\PD 2021 Wk 1 Input - Bike Sales.csv')
# Split the 'Store-Bike' into 'Store' and 'Bike'
data[['Store','Bike']] = data['Store - Bike'].str.split(' - ', expand=True)
# Clean up the 'Bike' field to: Mountain, Gravel, Road
data['Bike'] = data['Bike'].str.lower()
data['Bike'] = data['Bike'].str[0]
data['Bike'] = numpy.where(data['Bike']=='m','Mountain',numpy.where(data['Bike']=='r','Road','Gravel'))
# Create a 'Quarter' and 'Day of Month' fields
data['Date'] = pandas.to_datetime(data['Date'])
data['Quarter'] = data['Date'].dt.quarter
data['Day of Month'] = data['Date'].dt.day
# Remove the first 10 orders
data = data.loc[(data['Order ID'] >= 11)]
# Output the data as a csv
data = data.drop(['Store - Bike','Date'], axis=1)
data.to_csv('prepped_data\\PD 2021 Wk 1 Input - Bike Sales.csv', index=False)
print("data prepped!")
| [
"numpy.where",
"pandas.to_datetime",
"pandas.read_csv"
] | [((87, 157), 'pandas.read_csv', 'pandas.read_csv', (['"""unprepped_data\\\\PD 2021 Wk 1 Input - Bike Sales.csv"""'], {}), "('unprepped_data\\\\PD 2021 Wk 1 Input - Bike Sales.csv')\n", (102, 157), False, 'import pandas\n'), ((582, 614), 'pandas.to_datetime', 'pandas.to_datetime', (["data['Date']"], {}), "(data['Date'])\n", (600, 614), False, 'import pandas\n'), ((471, 521), 'numpy.where', 'numpy.where', (["(data['Bike'] == 'r')", '"""Road"""', '"""Gravel"""'], {}), "(data['Bike'] == 'r', 'Road', 'Gravel')\n", (482, 521), False, 'import numpy\n')] |
import copy
import numpy as np
import imageio
import torch
import torch.nn.functional as F
from models.rendering import get_rays_tourism, sample_points, volume_render
def test_time_optimize(args, model, meta_state_dict, tto_view):
"""
quicky optimize the meta trained model to a target appearance
and return the corresponding network weights
"""
model.load_state_dict(meta_state_dict)
optim = torch.optim.SGD(model.parameters(), args.tto_lr)
pixels = tto_view['img'].reshape(-1, 3)
rays_o, rays_d = get_rays_tourism(tto_view['H'], tto_view['W'],
tto_view['kinv'], tto_view['pose'])
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
num_rays = rays_d.shape[0]
for step in range(args.tto_steps):
indices = torch.randint(num_rays, size=[args.tto_batchsize])
raybatch_o, raybatch_d = rays_o[indices], rays_d[indices]
pixelbatch = pixels[indices]
t_vals, xyz = sample_points(raybatch_o, raybatch_d,
tto_view['bound'][0], tto_view['bound'][1],
args.num_samples, perturb=True)
optim.zero_grad()
rgbs, sigmas = model(xyz)
colors = volume_render(rgbs, sigmas, t_vals)
loss = F.mse_loss(colors, pixelbatch)
loss.backward()
optim.step()
state_dict = copy.deepcopy(model.state_dict())
return state_dict
def synthesize_view(args, model, H, W, kinv, pose, bound):
"""
given camera intrinsics and camera pose, synthesize a novel view
"""
rays_o, rays_d = get_rays_tourism(H, W, kinv, pose)
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
t_vals, xyz = sample_points(rays_o, rays_d, bound[0], bound[1],
args.num_samples, perturb=False)
synth = []
num_rays = rays_d.shape[0]
with torch.no_grad():
for i in range(0, num_rays, args.test_batchsize):
rgbs_batch, sigmas_batch = model(xyz[i:i+args.test_batchsize])
color_batch = volume_render(rgbs_batch, sigmas_batch, t_vals[i:i+args.test_batchsize])
synth.append(color_batch)
synth = torch.cat(synth, dim=0).reshape(H, W, 3)
return synth
def interpolate_views(args, model, prev, next):
"""
generate new views by interpolating between aspect ratio,
focal length, camera pose and scene appearance of two views
"""
synth_views = []
for t in np.linspace(0, 1, 60):
H = int(prev['H']*(1-t) + next['H']*t)
W = int(prev['W']*(1-t) + next['W']*t)
focal = prev['focal']*(1-t) + next['focal']*t
kinv = torch.as_tensor([[1/focal, 0, -0.5*W/focal],
[0, -1/focal, 0.5*H/focal],
[0, 0, -1]], device=focal.device)
pose = prev['pose']*(1-t) + next['pose']*t
bound = prev['bound']*(1-t) + next['bound']*t
state_dict = {name:
prev['state_dict'][name]*(1-t) + next['state_dict'][name]*t
for name in next['state_dict']
}
model.load_state_dict(state_dict)
view = synthesize_view(args, model, H, W, kinv, pose, bound)
synth_views.append(view.cpu().numpy())
return synth_views
def resize_video(video_frames, max_H, max_W):
"""
downsample the frames by two and pad the frame boundaries
to have the same shape
Note: downsampling is done without anti-aliasing
"""
max_H = max_H//2 + 5 # add a five pixel margin
max_W = max_W//2 + 5
padded_frames = []
for frame in video_frames:
frame = frame[::2, ::2]
H, W, C = frame.shape
# create a white canvas
canvas = np.ones([max_H, max_W, C])
# calculate center offset
off_H, off_W = (max_H - H)//2, (max_W - W)//2
# paste the frame in the center
canvas[off_H:off_H+H, off_W:off_W+W] = frame
padded_frames.append(canvas)
return padded_frames
def create_interpolation_video(args, model, meta_state_dict, test_set, device):
"""
create interpolation video like the original project demo
https://www.matthewtancik.com/learnit
"""
heights = []
widths = []
view_idx = args.interpolate[0]
img, pose, kinv, bound = test_set[view_idx]
prev = {}
prev['view_idx'] = view_idx
prev['H'], prev['W'] = img.shape[:2]
prev['img'], prev['kinv'] = img.to(device), kinv.to(device)
prev['pose'], prev['bound'] = pose.to(device), bound.to(device)
prev['focal'] = 1/prev['kinv'][0][0]
prev['state_dict'] = test_time_optimize(args, model, meta_state_dict, prev)
heights.append(prev['H'])
widths.append(prev['W'])
video_frames = []
for view_idx in args.interpolate[1:]:
img, pose, kinv, bound = test_set[view_idx]
next = {}
next['view_idx'] = view_idx
next['H'], next['W'] = img.shape[:2]
next['img'], next['kinv'] = img.to(device), kinv.to(device)
next['pose'], next['bound'] = pose.to(device), bound.to(device)
next['focal'] = 1/next['kinv'][0][0]
next['state_dict'] = test_time_optimize(args, model, meta_state_dict, next)
synth_views = interpolate_views(args, model, prev, next)
video_frames.extend(synth_views)
logging.info(f"test view {prev['view_idx']} and {next['view_idx']} interpolated")
prev = next
heights.append(prev['H'])
widths.append(prev['W'])
# resize frames
max_H = max(heights)
max_W = max(widths)
video_frames = resize_video(video_frames, max_H, max_W)
video_frames = np.stack(video_frames, axis=0)
video_frames = (video_frames*255).astype(np.uint8)
imageio.mimwrite("interpolation.mp4", video_frames, fps=30)
return None
| [
"torch.nn.functional.mse_loss",
"torch.as_tensor",
"numpy.ones",
"models.rendering.get_rays_tourism",
"imageio.mimwrite",
"models.rendering.sample_points",
"numpy.stack",
"numpy.linspace",
"torch.randint",
"torch.no_grad",
"models.rendering.volume_render",
"torch.cat"
] | [((543, 630), 'models.rendering.get_rays_tourism', 'get_rays_tourism', (["tto_view['H']", "tto_view['W']", "tto_view['kinv']", "tto_view['pose']"], {}), "(tto_view['H'], tto_view['W'], tto_view['kinv'], tto_view[\n 'pose'])\n", (559, 630), False, 'from models.rendering import get_rays_tourism, sample_points, volume_render\n'), ((1649, 1683), 'models.rendering.get_rays_tourism', 'get_rays_tourism', (['H', 'W', 'kinv', 'pose'], {}), '(H, W, kinv, pose)\n', (1665, 1683), False, 'from models.rendering import get_rays_tourism, sample_points, volume_render\n'), ((1769, 1856), 'models.rendering.sample_points', 'sample_points', (['rays_o', 'rays_d', 'bound[0]', 'bound[1]', 'args.num_samples'], {'perturb': '(False)'}), '(rays_o, rays_d, bound[0], bound[1], args.num_samples, perturb\n =False)\n', (1782, 1856), False, 'from models.rendering import get_rays_tourism, sample_points, volume_render\n'), ((2532, 2553), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(60)'], {}), '(0, 1, 60)\n', (2543, 2553), True, 'import numpy as np\n'), ((5750, 5780), 'numpy.stack', 'np.stack', (['video_frames'], {'axis': '(0)'}), '(video_frames, axis=0)\n', (5758, 5780), True, 'import numpy as np\n'), ((5840, 5899), 'imageio.mimwrite', 'imageio.mimwrite', (['"""interpolation.mp4"""', 'video_frames'], {'fps': '(30)'}), "('interpolation.mp4', video_frames, fps=30)\n", (5856, 5899), False, 'import imageio\n'), ((823, 873), 'torch.randint', 'torch.randint', (['num_rays'], {'size': '[args.tto_batchsize]'}), '(num_rays, size=[args.tto_batchsize])\n', (836, 873), False, 'import torch\n'), ((1000, 1118), 'models.rendering.sample_points', 'sample_points', (['raybatch_o', 'raybatch_d', "tto_view['bound'][0]", "tto_view['bound'][1]", 'args.num_samples'], {'perturb': '(True)'}), "(raybatch_o, raybatch_d, tto_view['bound'][0], tto_view[\n 'bound'][1], args.num_samples, perturb=True)\n", (1013, 1118), False, 'from models.rendering import get_rays_tourism, sample_points, volume_render\n'), ((1273, 1308), 'models.rendering.volume_render', 'volume_render', (['rgbs', 'sigmas', 't_vals'], {}), '(rgbs, sigmas, t_vals)\n', (1286, 1308), False, 'from models.rendering import get_rays_tourism, sample_points, volume_render\n'), ((1324, 1354), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['colors', 'pixelbatch'], {}), '(colors, pixelbatch)\n', (1334, 1354), True, 'import torch.nn.functional as F\n'), ((1944, 1959), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1957, 1959), False, 'import torch\n'), ((2718, 2840), 'torch.as_tensor', 'torch.as_tensor', (['[[1 / focal, 0, -0.5 * W / focal], [0, -1 / focal, 0.5 * H / focal], [0, 0, -1]\n ]'], {'device': 'focal.device'}), '([[1 / focal, 0, -0.5 * W / focal], [0, -1 / focal, 0.5 * H /\n focal], [0, 0, -1]], device=focal.device)\n', (2733, 2840), False, 'import torch\n'), ((3796, 3822), 'numpy.ones', 'np.ones', (['[max_H, max_W, C]'], {}), '([max_H, max_W, C])\n', (3803, 3822), True, 'import numpy as np\n'), ((2120, 2194), 'models.rendering.volume_render', 'volume_render', (['rgbs_batch', 'sigmas_batch', 't_vals[i:i + args.test_batchsize]'], {}), '(rgbs_batch, sigmas_batch, t_vals[i:i + args.test_batchsize])\n', (2133, 2194), False, 'from models.rendering import get_rays_tourism, sample_points, volume_render\n'), ((2247, 2270), 'torch.cat', 'torch.cat', (['synth'], {'dim': '(0)'}), '(synth, dim=0)\n', (2256, 2270), False, 'import torch\n')] |
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
from grid.clients.keras import KerasClient
from grid.workers.compute import GridCompute
import time
from threading import Thread
import pytest
client = None
compute_id = None
@pytest.fixture
def client():
client = KerasClient()
return client
def wait_for_discovery(client):
ipfs_id = client.id.split(":")
compute_id = None
time.sleep(30)
print(client.stats)
for stats in client.stats:
if ipfs_id[1] in stats['id']:
compute_id = stats['id']
if compute_id is None:
time.sleep(15)
else:
assert(compute_id is not None)
# TODO probably shouldn't have to try again
for stats in client.stats:
if ipfs_id[1] in stats['id']:
compute_id = stats['id']
return compute_id
def test_integration(client):
compute_id = wait_for_discovery(client)
assert(compute_id is not None)
input = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
target = np.array([[0], [1], [1], [0]])
model = Sequential()
model.add(Dense(8, input_dim=2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
sgd = SGD(lr=0.1)
model.compile(loss='binary_crossentropy', optimizer=sgd)
model, train_spec = client.fit(
model,
input,
target,
epochs=20,
log_interval=100,
preferred_node=f"{compute_id}")
| [
"keras.layers.core.Activation",
"grid.clients.keras.KerasClient",
"time.sleep",
"keras.models.Sequential",
"numpy.array",
"keras.optimizers.SGD",
"keras.layers.core.Dense"
] | [((357, 370), 'grid.clients.keras.KerasClient', 'KerasClient', ([], {}), '()\n', (368, 370), False, 'from grid.clients.keras import KerasClient\n'), ((485, 499), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (495, 499), False, 'import time\n'), ((1034, 1076), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (1042, 1076), True, 'import numpy as np\n'), ((1090, 1120), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (1098, 1120), True, 'import numpy as np\n'), ((1134, 1146), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1144, 1146), False, 'from keras.models import Sequential\n'), ((1290, 1301), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.1)'}), '(lr=0.1)\n', (1293, 1301), False, 'from keras.optimizers import SGD\n'), ((667, 681), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (677, 681), False, 'import time\n'), ((1161, 1182), 'keras.layers.core.Dense', 'Dense', (['(8)'], {'input_dim': '(2)'}), '(8, input_dim=2)\n', (1166, 1182), False, 'from keras.layers.core import Dense, Activation\n'), ((1198, 1216), 'keras.layers.core.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (1208, 1216), False, 'from keras.layers.core import Dense, Activation\n'), ((1232, 1240), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1237, 1240), False, 'from keras.layers.core import Dense, Activation\n'), ((1256, 1277), 'keras.layers.core.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (1266, 1277), False, 'from keras.layers.core import Dense, Activation\n')] |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from ...hypothesiser.probability import PDAHypothesiser
from ...hypothesiser.distance import DistanceHypothesiser
from ...measures import Mahalanobis
from ...models.measurement.linear import LinearGaussian
from ...types.array import CovarianceMatrix
from ...models.transition.linear import ConstantVelocity, CombinedLinearGaussianTransitionModel
from ...predictor.kalman import KalmanPredictor
from ...updater.kalman import ExtendedKalmanUpdater
@pytest.fixture()
def measurement_model():
return LinearGaussian(ndim_state=4, mapping=[0, 2],
noise_covar=CovarianceMatrix(np.diag([1, 1])))
@pytest.fixture()
def predictor():
transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(1),
ConstantVelocity(1)])
return KalmanPredictor(transition_model)
@pytest.fixture()
def updater(measurement_model):
return ExtendedKalmanUpdater(measurement_model)
@pytest.fixture()
def probability_hypothesiser(predictor, updater):
return PDAHypothesiser(predictor, updater,
clutter_spatial_density=1.2e-2,
prob_detect=0.9, prob_gate=0.99)
@pytest.fixture()
def distance_hypothesiser(predictor, updater):
return DistanceHypothesiser(predictor, updater, Mahalanobis(), 10)
| [
"pytest.fixture",
"numpy.diag"
] | [((507, 523), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (521, 523), False, 'import pytest\n'), ((681, 697), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (695, 697), False, 'import pytest\n'), ((930, 946), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (944, 946), False, 'import pytest\n'), ((1034, 1050), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1048, 1050), False, 'import pytest\n'), ((1270, 1286), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1284, 1286), False, 'import pytest\n'), ((660, 675), 'numpy.diag', 'np.diag', (['[1, 1]'], {}), '([1, 1])\n', (667, 675), True, 'import numpy as np\n')] |
#!/usr/bin/env python
__author__ = '<NAME>'
#============================================================================
import os
import sys
import time
import uuid
import shutil
import importlib
import subprocess
import numpy as np
from Utils.utils import Printer, ParserJSON
#============================================================================
home = os.getcwd()
sys.path.append('%s/ParamGenerator/Spearmint/' % home)
sys.path.append('%s/ParamGenerator/Spearmint/spearmint' % home)
for directory in ['kernels', 'models', 'sampling', 'schedulers', 'transformations', 'utils']:
sys.path.append('%s/ParamGenerator/Spearmint/spearmint/%s' % (home, directory))
sys.path.append('%s/ParamGenerator/Spearmint/spearmint/utils/database' % home)
from main import get_options, get_suggestion, parse_db_address, save_job
from spearmint.utils.database.mongodb import MongoDB
from spearmint.resources.resource import parse_resources_from_config
#============================================================================
class Spearmint(Printer):
def __init__(self, config_file, work_dir):
Printer.__init__(self, 'Spearmint', color = 'grey')
self.work_dir = work_dir
print(config_file)
self._parse_config_file(config_file)
try:
self.batch_size = self.param_dict['resources']['my-machine']['max-concurrent']
# self.num_batches = self.param_dict['general']['batches_per_round']
except KeyError:
# self.num_batches = 1
self.batch_size = 1
self.all_params, self.all_losses = [], []
def rand_gens(self, var_type = 'float', size = 1):
if var_type == 'float':
return np.random.uniform(low = 0, high = 1, size = size)
else:
raise NotImplementedError
def _parse_config_file(self, config_file):
self.json_parser = ParserJSON(file_name = config_file)
self.json_parser.parse()
self.param_dict = self.json_parser.param_dict
# now get the total number of variables
# and create a dictionary with the size of each variable
self.total_size = 0
self.var_sizes = []
self.var_names = []
for var_name, var_dict in self.param_dict['variables'].items():
self.total_size += var_dict['size']
self.var_sizes.append(int(var_dict['size']))
self.var_names.append(var_name)
# self.total_size += var_dict[list(var_dict)[0]]['size']
# self.var_sizes.append(int(var_dict[list(var_dict)[0]]['size']))
# self.var_names.append(list(var_dict)[0])
#
def _generate_uniform(self, num_samples = 10):
self.container, self.sampled_params = {}, {}
values = []
for var_index, var_name in enumerate(self.var_names):
sampled_values = self.rand_gens(var_type = self.param_dict['variables'][var_name]['type'], size = (self.param_dict['variables'][var_name]['size'], num_samples))
values.extend(sampled_values)
self.container[var_name] = sampled_values
values = np.array(values)
self.proposed = values.transpose()
def _parse_observations(self, observations):
all_params, all_losses = [], []
for observation in observations:
params = []
for var_name in self.var_names:
params.extend(observation[var_name]['samples'])
if len(self.all_params) > 0:
if np.amin([np.linalg.norm(params - old_param) for old_param in self.all_params]) > 1e-6:
all_losses.append(observation['loss'])
all_params.append(params)
else:
all_losses.append(observation['loss'])
all_params.append(params)
for index, element in enumerate(all_params):
self.all_params.append(element)
self.all_losses.append(all_losses[index])
return all_params, all_losses
def _create_mongo_instance(self):
self.db_path = '%s/db_%s/' % (self.work_dir, self.param_dict['experiment-name'])
print(self.db_path)
try:
shutil.rmtree(self.db_path)
except:
pass
os.mkdir(self.db_path)
subprocess.call('mongod --fork --logpath %s/mongodb.log --dbpath %s' % (self.db_path, self.db_path), shell = True)
def _create_spearmint_parameters(self):
self._create_mongo_instance()
self.options, self.exp_dir = get_options(self.work_dir)
self.resources = parse_resources_from_config(self.options)
self.chooser_module = importlib.import_module('spearmint.choosers.' + self.options['chooser'])
self.chooser = self.chooser_module.init(self.options)
self.experiment_name = self.options.get('experiment-name', 'unnamed_experiment')
self.db_address = self.options['database']['address']
self.db = MongoDB(database_address = self.db_address)
def _sample_parameter_sets(self, num_samples, observations):
all_params, all_losses = self._parse_observations(observations)
self._create_spearmint_parameters()
# dump all observations in database
for index, param in enumerate(all_params):
print('PARAM', param, all_losses[index])
params = {}
start_index = 0
for var_index, var_name in enumerate(self.var_names):
var_dict = self.param_dict['variables'][var_name]
params[var_name] = {'type': var_dict['type'], 'values': np.array(param[start_index : start_index + var_dict['size']])}
start_index += var_dict['size']
job = {'id': index + 1, 'expt_dir': self.work_dir, 'tasks': ['main'], 'resource': 'my-machine', 'main-file': 'main_file.py',
'language': 'PYTHON', 'status': 'new', 'submit time': time.time(), 'start time': time.time(), 'end time': None,
'params': params}
time.sleep(0.1)
job['values'] = {'main': all_losses[index]}
job['status'] = 'complete'
job['end time'] = time.time()
# for key, value in job.items():
# print(key, value)
self.db.save(job, self.experiment_name, 'jobs', {'id': job['id']})
self.proposed = []
for resource_name, resource in self.resources.items():
print('RUNNING SPEARMINT')
suggested_job = get_suggestion(self.chooser, resource.tasks, self.db, self.exp_dir, self.options, resource_name)
print('DONE')
vector = []
for var_name in self.var_names:
vector.extend(suggested_job['params'][var_name]['values'])
vector = np.array(vector)
for index in range(num_samples):
self.proposed.append(vector)
print('PROPOSED', self.proposed)
subprocess.call('mongod --shutdown --logpath %s/mongodb.log --dbpath %s' % (self.db_path, self.db_path), shell = True)
def choose(self, num_samples = None, observations = None):
current_dir = os.getcwd()
os.chdir(self.work_dir)
if not num_samples:
num_samples = self.batch_size
if observations:
self._print('proposing samples')
self._sample_parameter_sets(num_samples, observations)
else:
self._print('choosing uniformly')
self._generate_uniform(1)
os.chdir(current_dir)
# print('SHAPE', self.proposed.shape)
return self.proposed
| [
"main.get_suggestion",
"importlib.import_module",
"spearmint.resources.resource.parse_resources_from_config",
"time.time",
"numpy.linalg.norm",
"time.sleep",
"os.getcwd",
"os.chdir",
"Utils.utils.ParserJSON",
"numpy.array",
"shutil.rmtree",
"os.mkdir",
"subprocess.call",
"numpy.random.unif... | [((373, 384), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (382, 384), False, 'import os\n'), ((385, 439), 'sys.path.append', 'sys.path.append', (["('%s/ParamGenerator/Spearmint/' % home)"], {}), "('%s/ParamGenerator/Spearmint/' % home)\n", (400, 439), False, 'import sys\n'), ((440, 503), 'sys.path.append', 'sys.path.append', (["('%s/ParamGenerator/Spearmint/spearmint' % home)"], {}), "('%s/ParamGenerator/Spearmint/spearmint' % home)\n", (455, 503), False, 'import sys\n'), ((679, 757), 'sys.path.append', 'sys.path.append', (["('%s/ParamGenerator/Spearmint/spearmint/utils/database' % home)"], {}), "('%s/ParamGenerator/Spearmint/spearmint/utils/database' % home)\n", (694, 757), False, 'import sys\n'), ((599, 678), 'sys.path.append', 'sys.path.append', (["('%s/ParamGenerator/Spearmint/spearmint/%s' % (home, directory))"], {}), "('%s/ParamGenerator/Spearmint/spearmint/%s' % (home, directory))\n", (614, 678), False, 'import sys\n'), ((1107, 1156), 'Utils.utils.Printer.__init__', 'Printer.__init__', (['self', '"""Spearmint"""'], {'color': '"""grey"""'}), "(self, 'Spearmint', color='grey')\n", (1123, 1156), False, 'from Utils.utils import Printer, ParserJSON\n'), ((1764, 1797), 'Utils.utils.ParserJSON', 'ParserJSON', ([], {'file_name': 'config_file'}), '(file_name=config_file)\n', (1774, 1797), False, 'from Utils.utils import Printer, ParserJSON\n'), ((2829, 2845), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2837, 2845), True, 'import numpy as np\n'), ((3744, 3766), 'os.mkdir', 'os.mkdir', (['self.db_path'], {}), '(self.db_path)\n', (3752, 3766), False, 'import os\n'), ((3769, 3886), 'subprocess.call', 'subprocess.call', (["('mongod --fork --logpath %s/mongodb.log --dbpath %s' % (self.db_path, self\n .db_path))"], {'shell': '(True)'}), "('mongod --fork --logpath %s/mongodb.log --dbpath %s' % (\n self.db_path, self.db_path), shell=True)\n", (3784, 3886), False, 'import subprocess\n'), ((3990, 4016), 'main.get_options', 'get_options', (['self.work_dir'], {}), '(self.work_dir)\n', (4001, 4016), False, 'from main import get_options, get_suggestion, parse_db_address, save_job\n'), ((4043, 4084), 'spearmint.resources.resource.parse_resources_from_config', 'parse_resources_from_config', (['self.options'], {}), '(self.options)\n', (4070, 4084), False, 'from spearmint.resources.resource import parse_resources_from_config\n'), ((4111, 4183), 'importlib.import_module', 'importlib.import_module', (["('spearmint.choosers.' + self.options['chooser'])"], {}), "('spearmint.choosers.' + self.options['chooser'])\n", (4134, 4183), False, 'import importlib\n'), ((4402, 4443), 'spearmint.utils.database.mongodb.MongoDB', 'MongoDB', ([], {'database_address': 'self.db_address'}), '(database_address=self.db_address)\n', (4409, 4443), False, 'from spearmint.utils.database.mongodb import MongoDB\n'), ((6071, 6191), 'subprocess.call', 'subprocess.call', (["('mongod --shutdown --logpath %s/mongodb.log --dbpath %s' % (self.db_path,\n self.db_path))"], {'shell': '(True)'}), "('mongod --shutdown --logpath %s/mongodb.log --dbpath %s' %\n (self.db_path, self.db_path), shell=True)\n", (6086, 6191), False, 'import subprocess\n'), ((6270, 6281), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6279, 6281), False, 'import os\n'), ((6284, 6307), 'os.chdir', 'os.chdir', (['self.work_dir'], {}), '(self.work_dir)\n', (6292, 6307), False, 'import os\n'), ((6555, 6576), 'os.chdir', 'os.chdir', (['current_dir'], {}), '(current_dir)\n', (6563, 6576), False, 'import os\n'), ((1610, 1653), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0)', 'high': '(1)', 'size': 'size'}), '(low=0, high=1, size=size)\n', (1627, 1653), True, 'import numpy as np\n'), ((3696, 3723), 'shutil.rmtree', 'shutil.rmtree', (['self.db_path'], {}), '(self.db_path)\n', (3709, 3723), False, 'import shutil\n'), ((5322, 5337), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (5332, 5337), False, 'import time\n'), ((5436, 5447), 'time.time', 'time.time', ([], {}), '()\n', (5445, 5447), False, 'import time\n'), ((5708, 5809), 'main.get_suggestion', 'get_suggestion', (['self.chooser', 'resource.tasks', 'self.db', 'self.exp_dir', 'self.options', 'resource_name'], {}), '(self.chooser, resource.tasks, self.db, self.exp_dir, self.\n options, resource_name)\n', (5722, 5809), False, 'from main import get_options, get_suggestion, parse_db_address, save_job\n'), ((5947, 5963), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (5955, 5963), True, 'import numpy as np\n'), ((5236, 5247), 'time.time', 'time.time', ([], {}), '()\n', (5245, 5247), False, 'import time\n'), ((5263, 5274), 'time.time', 'time.time', ([], {}), '()\n', (5272, 5274), False, 'import time\n'), ((4948, 5007), 'numpy.array', 'np.array', (["param[start_index:start_index + var_dict['size']]"], {}), "(param[start_index:start_index + var_dict['size']])\n", (4956, 5007), True, 'import numpy as np\n'), ((3150, 3184), 'numpy.linalg.norm', 'np.linalg.norm', (['(params - old_param)'], {}), '(params - old_param)\n', (3164, 3184), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import random
s=[]
with open('dirstart.txt','r') as fp:
for line in fp:
s.append(int(line))
g=[]
with open('dirdest.txt','r') as fp:
for line in fp:
g.append(int(line))
v={}
with open('dirvertices.txt','r') as vfp:
for line in vfp:
l=line.split(' ')
v[int(l[0])]=[int(f) for f in l[1].split(',')]
def dist(a,b):
import numpy as np
return np.linalg.norm(np.array(a)-np.array(b))
def vdist(a,b):
return dist(v[a],v[b])
def pick(l):
return l[random.randint(0,len(l)-1)]
def pickneither(k,l):
while True:
p=pick([vv for vv in v.keys()])
if p not in k and p not in l:
return p
def picknot(l):
while True:
p=pick([vv for vv in v.keys()])
if p not in l:
return p
radius=35
# random to/from terminal
for num in range(1,26):
print('snapshot',num)
inst=[]
while len(inst)<30:
if random.randint(0,1):
#start at terminal
while True:
st=pick(s)
ok=True
for ss in inst:
if vdist(st,ss[0]) <= radius:
ok=False
break
if ok:
break
while True:
en=picknot(s)
ok=True
for ss in inst:
if vdist(en,ss[1]) <= radius:
ok=False
break
if ok:
break
print('add',st,en)
inst.append((st,en))
else:
#start somewhere else
while True:
st=picknot(g)
ok=True
for ss in inst:
if vdist(st,ss[0]) <= radius:
ok=False
break
if ok:
break
while True:
en=pick(g)
ok=True
for ss in inst:
if vdist(en,ss[1]) <= radius:
ok=False
break
if ok:
break
print('add',st,en)
inst.append((st,en))
# check
for i in range(len(inst)-1):
for j in range(i+1,len(inst)):
assert(vdist(inst[i][0],inst[j][0])>radius)
assert(vdist(inst[i][1],inst[j][1])>radius)
with open("snapshot-%d"%num,'w') as fp:
for i in inst:
fp.write("%d %d\n"%i)
# terminal to terminal
for num in range(1,26):
print('term2term',num)
inst=[]
while len(inst)<20:
#start at terminal
while True:
st=pick(s)
ok=True
for ss in inst:
if vdist(st,ss[0]) <= radius:
ok=False
break
if ok:
break
while True:
en=pick(g)
ok=True
for ss in inst:
if vdist(en,ss[1]) <= radius:
ok=False
break
if ok:
break
print('add',st,en)
inst.append((st,en))
# check
for i in range(len(inst)-1):
for j in range(i+1,len(inst)):
assert(vdist(inst[i][0],inst[j][0])>radius)
assert(vdist(inst[i][1],inst[j][1])>radius)
with open("term2term-%d"%num,'w') as fp:
for i in inst:
fp.write("%d %d\n"%i)
| [
"numpy.array",
"random.randint"
] | [((870, 890), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (884, 890), False, 'import random\n'), ((402, 413), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (410, 413), True, 'import numpy as np\n'), ((414, 425), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (422, 425), True, 'import numpy as np\n')] |
# AntiBiofilm Peptide Research
# Department of Computer Science and Engineering, Santa Clara University
# Author: <NAME>
# A python script that performs forward selection and hyperparameter optimization
# in order to find the best performing SVR model for the MBEC peptides
# Loss function used is RMSE
# The script dumps the features found from forward selection along with the
# average RMSE calculated with cross validation
# ------------------------------------------------------------------------------
# Libraries
# ------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import json
import sys
import copy
import warnings
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.utils.validation import column_or_1d
from sklearn.svm import SVR
from sklearn.feature_selection import RFE
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import RepeatedKFold
warnings.filterwarnings("ignore")
# ------------------------------------------------------------------------------
# Variables
# ------------------------------------------------------------------------------
num_features = 200
C = [0.001, 0.01, 0.1, 1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 100, 1000]
gamma = [0.001, 0.01, 0.1, 1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 100]
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
def main():
feat_list = []
# Forward selection loop
for n in range(0, num_features + 1):
print('\nFeatures: ' + str(n))
peptides = pd.read_csv('../../data/mbec_training_data.csv')
peptides = peptides.drop(columns=['Name', 'Seq'])
labels = peptides.columns.values.tolist()
labels.remove('MBEC(uM)')
feat_RMSE = []
top_feat_list = []
# Loop to populate list of features every iteration
for i, l in enumerate(labels):
small_feat_list = copy.deepcopy(feat_list)
if l in feat_list:
continue
small_feat_list.append(l)
peptides_small = peptides.copy()
for f in labels:
if f not in small_feat_list:
peptides_small = peptides_small.drop(columns=[f])
y = peptides_small['MBEC(uM)'].to_numpy()
peptides_small = peptides_small.drop(columns=['MBEC(uM)'])
num_feat = peptides_small.shape
# Normalize features
min_max_scaler = preprocessing.MinMaxScaler()
X_norm = min_max_scaler.fit_transform(peptides_small)
pca_features = []
for j in range(1, num_feat[1]+ 1):
pca_features.append(j)
avg_RMSE = []
# Loops through each hyperparameter combination
for n in pca_features:
for c in C:
sys.stdout.write('Feature Loop: %s PCA comp: %s c: %s \r' % (i,n,c))
sys.stdout.flush()
for g in gamma:
SVR_rbf = SVR(kernel='rbf', C=c, gamma=g)
RMSE = []
kf = RepeatedKFold(n_splits=5, n_repeats = 20)
# Cross validation
for train_index, test_index in kf.split(X_norm):
X_train, X_test = X_norm[train_index], X_norm[test_index]
y_train, y_test = y[train_index], y[test_index]
y_train = y_train.reshape(-1,1)
y_train = column_or_1d(y_train, warn=False)
model = SVR_rbf.fit(X_train, y_train)
y_pred = model.predict(X_test)
test_error = mean_squared_error(y_test, y_pred)
RMSE.append(np.sqrt(test_error))
avg_RMSE.append(np.mean(RMSE))
# Take minimum RMSE across all runs, select this feature, append to found list
feat_RMSE.append(np.min(avg_RMSE))
top_feat_list.append(copy.deepcopy(small_feat_list))
# Dump RMSE and current features found from forward selection
idx = np.argmin(feat_RMSE)
feat_list = copy.deepcopy(top_feat_list[idx])
with open ('mbec_fs_rmse.txt', 'a', encoding="utf-8") as f:
f.write(str(n) + '\t' + str(np.around(feat_RMSE[idx], 3)) + '\n')
with open('mbec_forward_selection_features.json', 'w') as f:
json.dump(feat_list, f)
if __name__ == "__main__":
main() | [
"sklearn.model_selection.RepeatedKFold",
"numpy.mean",
"numpy.sqrt",
"pandas.read_csv",
"numpy.min",
"sklearn.metrics.mean_squared_error",
"numpy.around",
"sklearn.utils.validation.column_or_1d",
"copy.deepcopy",
"numpy.argmin",
"sys.stdout.flush",
"sklearn.svm.SVR",
"sklearn.preprocessing.M... | [((1012, 1045), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1035, 1045), False, 'import warnings\n'), ((1785, 1833), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/mbec_training_data.csv"""'], {}), "('../../data/mbec_training_data.csv')\n", (1796, 1833), True, 'import pandas as pd\n'), ((4430, 4450), 'numpy.argmin', 'np.argmin', (['feat_RMSE'], {}), '(feat_RMSE)\n', (4439, 4450), True, 'import numpy as np\n'), ((4471, 4504), 'copy.deepcopy', 'copy.deepcopy', (['top_feat_list[idx]'], {}), '(top_feat_list[idx])\n', (4484, 4504), False, 'import copy\n'), ((2156, 2180), 'copy.deepcopy', 'copy.deepcopy', (['feat_list'], {}), '(feat_list)\n', (2169, 2180), False, 'import copy\n'), ((2697, 2725), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (2723, 2725), False, 'from sklearn import preprocessing\n'), ((4741, 4764), 'json.dump', 'json.dump', (['feat_list', 'f'], {}), '(feat_list, f)\n', (4750, 4764), False, 'import json\n'), ((4262, 4278), 'numpy.min', 'np.min', (['avg_RMSE'], {}), '(avg_RMSE)\n', (4268, 4278), True, 'import numpy as np\n'), ((4313, 4343), 'copy.deepcopy', 'copy.deepcopy', (['small_feat_list'], {}), '(small_feat_list)\n', (4326, 4343), False, 'import copy\n'), ((3094, 3164), 'sys.stdout.write', 'sys.stdout.write', (["('Feature Loop: %s PCA comp: %s c: %s \\r' % (i, n, c))"], {}), "('Feature Loop: %s PCA comp: %s c: %s \\r' % (i, n, c))\n", (3110, 3164), False, 'import sys\n'), ((3183, 3201), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3199, 3201), False, 'import sys\n'), ((3272, 3303), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'C': 'c', 'gamma': 'g'}), "(kernel='rbf', C=c, gamma=g)\n", (3275, 3303), False, 'from sklearn.svm import SVR\n'), ((3368, 3407), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': '(5)', 'n_repeats': '(20)'}), '(n_splits=5, n_repeats=20)\n', (3381, 3407), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((3787, 3820), 'sklearn.utils.validation.column_or_1d', 'column_or_1d', (['y_train'], {'warn': '(False)'}), '(y_train, warn=False)\n', (3799, 3820), False, 'from sklearn.utils.validation import column_or_1d\n'), ((3989, 4023), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4007, 4023), False, 'from sklearn.metrics import mean_squared_error\n'), ((4126, 4139), 'numpy.mean', 'np.mean', (['RMSE'], {}), '(RMSE)\n', (4133, 4139), True, 'import numpy as np\n'), ((4621, 4649), 'numpy.around', 'np.around', (['feat_RMSE[idx]', '(3)'], {}), '(feat_RMSE[idx], 3)\n', (4630, 4649), True, 'import numpy as np\n'), ((4064, 4083), 'numpy.sqrt', 'np.sqrt', (['test_error'], {}), '(test_error)\n', (4071, 4083), True, 'import numpy as np\n')] |
"""
Scripts to parse the gender data for the PhD recipients.
genderize.io
gender-api.com
"""
import urllib
import json
import yaml
import glob
import numpy as np
import astropy
from astropy.io import ascii
import requests
import os
### gender-api.com
GENDER_API_KEY = os.getenv(GENDER_API_KEY)
### genederize.io
GENDERIZE_API_KEY = os.getenv(GENDERIZE_API_KEY)
def querry_apis(in_file = '../data/tmp_names.dat', out_file='../data/tmp_names_with_gender.dat'):
"""
in_file = '../data/ads_unique_names.dat'
Take a list of unique first names and run then through:
gender-api.com
genederize.io
Write out a table with the responses.
"""
out_file = open(out_file,'w')
out_file.write('name gender1 accuracy1 samples1 gender2 accuracy2 samples2\n')
with open(in_file) as f:
for line in f:
name = line.strip()
print(name)
url1 = 'https://gender-api.com/get?name={}&key={}'.format(name, GENDER_API_KEY)
response1 = requests.get(url1)
#print(response1.json())
if (response1.json()['gender'] == 'unknown') & (response1.json()['accuracy'] == 0):
out_file.write('{0}\t{1}\t{2}\t{2}\t'.format(name,'None',np.nan))
else:
out_file.write('{0}\t{1}\t{2}\t{3}\t'.format(name,response1.json()['gender'],response1.json()['accuracy'],response1.json()['samples']))
url2 = 'https://api.genderize.io/?name={}'.format(name)
response2 = requests.get(url2)
print(response2.json())
if response2.json()['gender'] == None:
out_file.write('{0}\t{1}\t{1}\n'.format('None',np.nan))
else:
out_file.write('{}\t{:5.0f}\t{}\n'.format(response2.json()['gender'],response2.json()['probability']*100,response2.json()['count']))
# count_file = ascii.read(out_file)
# print('MALE: {} {}'.format(np.sum(count_file['gender1'] == 'male'), np.sum(count_file['gender2'] == 'male')))
# print('FEMALE: {} {}'.format(np.sum(count_file['gender1'] == 'female'), np.sum(count_file['gender2'] == 'female')))
# print('NONE: {} {}'.format(np.sum(count_file['gender1'] == 'None'), np.sum(count_file['gender2'] == 'None')))
out_file.close()
def read_name_genders(file='../data/ads_unique_names_with_gender.dat'):
"""
Return a list dictionary of male, female and unknown names.
"""
name_genders = {}
api_results = ascii.read(file)
for line in api_results:
if (float(line['accuracy1']) > 50.):
name_genders[line['name']] = line['gender1']
else:
name_genders[line['name']] = 'None'
return name_genders
def parse_names(in_file = '../data/ads_first_names.dat'):
names = []
first_initials = []
with open(in_file,'r') as f:
for line in f:
ll = line.strip()
if (len(ll) == 2) and (ll[-1] == '.'):
first_initials.append(ll)
else:
names.append(ll)
unq_names = np.unique(names)
print("There are {} names and {} unique names.".format(len(names), len(unq_names)))
#print unq_names
print("There are {} entries with only a first initial.".format(len(first_initials)))
with open('../data/ads_unique_names.dat', 'w') as f_out:
for l in unq_names:
f_out.write(l)
f_out.write('\n')
print("Output file is ads_unique_names.dat")
def gender_breakdown():
gender_names = read_name_genders()
years = np.arange(1970, 2019, 1)
os.system('rm ../data/gender_breakdown.dat')
file_out = open('../data/gender_breakdown.dat','a')
file_out.write('# year all women men nodata\n')
for year in years:
file = '../data/ads_{}_parse.dat'.format(year)
gn = []
print(file)
data = ascii.read(file, delimiter='|', names=['n','name','school','year','foo'])
for i in range(len(data)):
try:
tmp_name = data[i]['name'].split(',')[1].split()
if (len(tmp_name[0]) == 2) and (tmp_name[0][-1] == '.') and (len(tmp_name) > 1) and (tmp_name[1][0] != '-'):
name = tmp_name[1]
name = name.replace('.','').replace('(','').replace(')','')
if name.startswith('-'):
name = name[1:]
try:
gn.append(gender_names[name])
except:
gn.append('None')
elif (len(tmp_name[0]) == 2) and (tmp_name[0][-1] == '.') and (len(tmp_name) == 1):
gn.append('None')
else:
name = tmp_name[0]
name = name.replace('.','').replace('(','').replace(')','')
if name.startswith('-'):
name = name[1:]
try:
gn.append(gender_names[name])
except:
gn.append('None')
print(name)
except:
print(data[i]['name'])
gn = np.array(gn, dtype='str')
file_out.write("{}\t{}\t{}\t{}\t{}\n".format(year, len(gn), np.sum(gn == 'female'), np.sum(gn == 'male'), np.sum(gn == 'None')))
print("{}\t{}\t{}\t{}\t{}\n".format(year, len(gn), np.sum(gn == 'female'), np.sum(gn == 'male'), np.sum(gn == 'None')))
print("Output file is ../data/gender_breakdown.dat")
file_out.close()
def compare_gender_assignments():
out_file = '../data/ads_unique_names_with_gender.dat'
count_file = ascii.read(out_file)
index = ((count_file['gender1'] == 'female') & (count_file['gender2']=='male'))
print(count_file[index])
index = ((count_file['gender1'] == 'male') & (count_file['gender2']=='female'))
print(count_file[index])
| [
"numpy.unique",
"os.getenv",
"requests.get",
"numpy.array",
"numpy.sum",
"os.system",
"astropy.io.ascii.read",
"numpy.arange"
] | [((271, 296), 'os.getenv', 'os.getenv', (['GENDER_API_KEY'], {}), '(GENDER_API_KEY)\n', (280, 296), False, 'import os\n'), ((336, 364), 'os.getenv', 'os.getenv', (['GENDERIZE_API_KEY'], {}), '(GENDERIZE_API_KEY)\n', (345, 364), False, 'import os\n'), ((2254, 2270), 'astropy.io.ascii.read', 'ascii.read', (['file'], {}), '(file)\n', (2264, 2270), False, 'from astropy.io import ascii\n'), ((2741, 2757), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (2750, 2757), True, 'import numpy as np\n'), ((3208, 3232), 'numpy.arange', 'np.arange', (['(1970)', '(2019)', '(1)'], {}), '(1970, 2019, 1)\n', (3217, 3232), True, 'import numpy as np\n'), ((3235, 3279), 'os.system', 'os.system', (['"""rm ../data/gender_breakdown.dat"""'], {}), "('rm ../data/gender_breakdown.dat')\n", (3244, 3279), False, 'import os\n'), ((4864, 4884), 'astropy.io.ascii.read', 'ascii.read', (['out_file'], {}), '(out_file)\n', (4874, 4884), False, 'from astropy.io import ascii\n'), ((3489, 3566), 'astropy.io.ascii.read', 'ascii.read', (['file'], {'delimiter': '"""|"""', 'names': "['n', 'name', 'school', 'year', 'foo']"}), "(file, delimiter='|', names=['n', 'name', 'school', 'year', 'foo'])\n", (3499, 3566), False, 'from astropy.io import ascii\n'), ((4400, 4425), 'numpy.array', 'np.array', (['gn'], {'dtype': '"""str"""'}), "(gn, dtype='str')\n", (4408, 4425), True, 'import numpy as np\n'), ((938, 956), 'requests.get', 'requests.get', (['url1'], {}), '(url1)\n', (950, 956), False, 'import requests\n'), ((1369, 1387), 'requests.get', 'requests.get', (['url2'], {}), '(url2)\n', (1381, 1387), False, 'import requests\n'), ((4491, 4513), 'numpy.sum', 'np.sum', (["(gn == 'female')"], {}), "(gn == 'female')\n", (4497, 4513), True, 'import numpy as np\n'), ((4515, 4535), 'numpy.sum', 'np.sum', (["(gn == 'male')"], {}), "(gn == 'male')\n", (4521, 4535), True, 'import numpy as np\n'), ((4537, 4557), 'numpy.sum', 'np.sum', (["(gn == 'None')"], {}), "(gn == 'None')\n", (4543, 4557), True, 'import numpy as np\n'), ((4616, 4638), 'numpy.sum', 'np.sum', (["(gn == 'female')"], {}), "(gn == 'female')\n", (4622, 4638), True, 'import numpy as np\n'), ((4640, 4660), 'numpy.sum', 'np.sum', (["(gn == 'male')"], {}), "(gn == 'male')\n", (4646, 4660), True, 'import numpy as np\n'), ((4662, 4682), 'numpy.sum', 'np.sum', (["(gn == 'None')"], {}), "(gn == 'None')\n", (4668, 4682), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from tqdm import tqdm
from joblib import Parallel, delayed
import os
bitsize = 1024
total_sample = 110913349
data_save_folder = './data'
file = './data/%s_%s.npy' % (total_sample, bitsize)
f = np.memmap(file, dtype = np.bool, shape = (total_sample, bitsize))
def _sum(memmap, x):
return memmap[x, ].sum()
P = Parallel(n_jobs=16) #
res = P(delayed(_sum)(f, i) for i in tqdm(range(total_sample)))
pd.Series(res).to_pickle('./data/%s_%s_NumOnBits.pkl' % (total_sample, bitsize))
print('Done!') | [
"joblib.Parallel",
"joblib.delayed",
"numpy.memmap",
"pandas.Series"
] | [((236, 297), 'numpy.memmap', 'np.memmap', (['file'], {'dtype': 'np.bool', 'shape': '(total_sample, bitsize)'}), '(file, dtype=np.bool, shape=(total_sample, bitsize))\n', (245, 297), True, 'import numpy as np\n'), ((360, 379), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(16)'}), '(n_jobs=16)\n', (368, 379), False, 'from joblib import Parallel, delayed\n'), ((448, 462), 'pandas.Series', 'pd.Series', (['res'], {}), '(res)\n', (457, 462), True, 'import pandas as pd\n'), ((390, 403), 'joblib.delayed', 'delayed', (['_sum'], {}), '(_sum)\n', (397, 403), False, 'from joblib import Parallel, delayed\n')] |
"""
---------------------------------------------------------------------
-- Author: <NAME>
---------------------------------------------------------------------
Util functions for partitioning input data
"""
import numpy as np
def partition_train_val(x_train, y_train, proportion, num_classes, shuffle=True):
"""
Partition data in train and validation
Args:
x_train: (array) corresponding array containing the input data
y_train: (array) corresponding array containing the labels of the input data
proportion: (float) proportion of examples to consider in the train dataset (0.0-1.0)
num_classes: (int) number of labels
Returns:
train_data: (array) corresponding array containing partitioned train data
train_labels: (array) corresponding array containing partitioned labels of train data
val_data: (array) corresponding array containing partitioned validation data
val_labels: (array) corresponding array containing partitioned labels of validation data
"""
# initialize numpy arrays
train_data_indices = np.array([], dtype=np.int32)
val_data_indices = np.array([], dtype=np.int32)
# iterate over the number of classes
for i in range(0, num_classes):
# get indices of a specific class
subdata = np.where(y_train == i)[0]
num_samples = subdata.shape[0]
# randomly partition the indices based on specified proportion
indices = np.random.permutation(num_samples)
train_size = int(proportion * num_samples)
train_indices, val_indices = indices[:train_size], indices[train_size:]
# get partitioned indices
train_subdata, val_subdata = subdata[train_indices], subdata[val_indices]
# concatenate indices of all classes
train_data_indices = np.hstack([train_data_indices, train_subdata])
val_data_indices = np.hstack([val_data_indices, val_subdata])
if shuffle:
np.random.shuffle(train_data_indices)
np.random.shuffle(val_data_indices)
# get new data based on the partitioned proportions
train_data, train_labels = x_train[train_data_indices], y_train[train_data_indices]
val_data, val_labels = x_train[val_data_indices], y_train[val_data_indices]
return train_data, train_labels, val_data, val_labels
def create_semisupervised_dataset(x, y, num_classes, num_labeled=100):
"""
Partition data into labeled and unlabeled data given labeled size per class
Args:
x_train: (array) corresponding array containing the input data
y_train: (array) corresponding array containing the labels of the input data
num_classes: (int) number of classes
num_labeled: (float) number of examples to consider in the labeled dataset
Returns:
x_labeled: (array) corresponding array containing partitioned labeled data
y_labeled: (array) corresponding array containing partitioned labels of labeled data
x_unlabaled: (array) corresponding array containing partitioned unlabeled data
y_unlabeled: (array) corresponding array containing partitioned labels of unlabeled data
"""
if num_labeled % num_classes != 0:
raise "wished number of labeled samples not divisible by number of classes"
# get number of labeled data per class (balanced partition)
labeled_per_class = num_labeled//num_classes
x_labeled = [0] * num_classes
y_labeled = [0] * num_classes
x_unlabeled = [0] * num_classes
y_unlabeled = [0] * num_classes
# iterate over the number of classes
for i in range(num_classes):
# get indices of a specific class
indices = np.where(y == i)[0]
# randomly partition the indices based on specified proportion
indices = np.random.permutation(indices)
x_labeled[i] = x[indices[:labeled_per_class]]
y_labeled[i] = y[indices[:labeled_per_class]]
x_unlabeled[i] = x[indices[labeled_per_class:]]
y_unlabeled[i] = y[indices[labeled_per_class:]]
return np.vstack(x_labeled), np.hstack(y_labeled), np.vstack(x_unlabeled), np.hstack(y_unlabeled)
def flatten_array(x):
"""
Flatten to 2D array
Args:
x: (array) corresponding array containing data
Returns:
flatten: (array) corresponding array containing the flatten data
"""
shape = np.prod(x.shape[1:])
return x.reshape(-1,shape)
| [
"numpy.prod",
"numpy.hstack",
"numpy.where",
"numpy.random.permutation",
"numpy.array",
"numpy.vstack",
"numpy.random.shuffle"
] | [((1105, 1133), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (1113, 1133), True, 'import numpy as np\n'), ((1157, 1185), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (1165, 1185), True, 'import numpy as np\n'), ((4392, 4412), 'numpy.prod', 'np.prod', (['x.shape[1:]'], {}), '(x.shape[1:])\n', (4399, 4412), True, 'import numpy as np\n'), ((1478, 1512), 'numpy.random.permutation', 'np.random.permutation', (['num_samples'], {}), '(num_samples)\n', (1499, 1512), True, 'import numpy as np\n'), ((1834, 1880), 'numpy.hstack', 'np.hstack', (['[train_data_indices, train_subdata]'], {}), '([train_data_indices, train_subdata])\n', (1843, 1880), True, 'import numpy as np\n'), ((1908, 1950), 'numpy.hstack', 'np.hstack', (['[val_data_indices, val_subdata]'], {}), '([val_data_indices, val_subdata])\n', (1917, 1950), True, 'import numpy as np\n'), ((1974, 2011), 'numpy.random.shuffle', 'np.random.shuffle', (['train_data_indices'], {}), '(train_data_indices)\n', (1991, 2011), True, 'import numpy as np\n'), ((2018, 2053), 'numpy.random.shuffle', 'np.random.shuffle', (['val_data_indices'], {}), '(val_data_indices)\n', (2035, 2053), True, 'import numpy as np\n'), ((3802, 3832), 'numpy.random.permutation', 'np.random.permutation', (['indices'], {}), '(indices)\n', (3823, 3832), True, 'import numpy as np\n'), ((4070, 4090), 'numpy.vstack', 'np.vstack', (['x_labeled'], {}), '(x_labeled)\n', (4079, 4090), True, 'import numpy as np\n'), ((4092, 4112), 'numpy.hstack', 'np.hstack', (['y_labeled'], {}), '(y_labeled)\n', (4101, 4112), True, 'import numpy as np\n'), ((4114, 4136), 'numpy.vstack', 'np.vstack', (['x_unlabeled'], {}), '(x_unlabeled)\n', (4123, 4136), True, 'import numpy as np\n'), ((4138, 4160), 'numpy.hstack', 'np.hstack', (['y_unlabeled'], {}), '(y_unlabeled)\n', (4147, 4160), True, 'import numpy as np\n'), ((1324, 1346), 'numpy.where', 'np.where', (['(y_train == i)'], {}), '(y_train == i)\n', (1332, 1346), True, 'import numpy as np\n'), ((3693, 3709), 'numpy.where', 'np.where', (['(y == i)'], {}), '(y == i)\n', (3701, 3709), True, 'import numpy as np\n')] |
annotations_dic = \
{"lipsUpperOuter": [61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291,78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308],
"lipsLowerOuter": [146, 91, 181, 84, 17, 314, 405, 321, 375, 291,78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308],
"lipsUpperInner": [78, 191, 80, 81, 82, 13, 312, 311, 310, 415, 308],
"lipsLowerInner": [78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308],
"rightEyeUpper0": [246, 161, 160, 159, 158, 157, 173],
"rightEyeLower0": [33, 7, 163, 144, 145, 153, 154, 155, 133],
"rightEyeUpper1": [247, 30, 29, 27, 28, 56, 190],
"rightEyeLower1": [130, 25, 110, 24, 23, 22, 26, 112, 243],
"rightEyeUpper2": [113, 225, 224, 223, 222, 221, 189],
"rightEyeLower2": [113, 225, 224, 223, 222, 221, 189,226, 31, 228, 229, 230, 231, 232, 233, 244],
"rightEyeLower3": [143, 111, 117, 118, 119, 120, 121, 128, 245],
"rightEyebrowUpper": [35, 124, 46, 53, 52, 65,156, 70, 63, 105, 66, 107, 55, 193],
"rightEyebrowLower": [35, 124, 46, 53, 52, 65],
"leftEyeUpper0": [466, 388, 387, 386, 385, 384, 398],
"leftEyeLower0": [263, 249, 390, 373, 374, 380, 381, 382, 362],
"leftEyeUpper1": [467, 260, 259, 257, 258, 286, 414],
"leftEyeLower1": [359, 255, 339, 254, 253, 252, 256, 341, 463],
"leftEyeUpper2": [342, 445, 444, 443, 442, 441, 413],
"leftEyeLower2": [446, 261, 448, 449, 450, 451, 452, 453, 464],
"leftEyeLower3": [372, 340, 346, 347, 348, 349, 350, 357, 465],
"leftEyebrowUpper": [383, 300, 293, 334, 296, 336, 285, 417,265, 353, 276, 283, 282, 295],
"leftEyebrowLower": [265, 353, 276, 283, 282, 295],
"midwayBetweenEyes": [168],
"noseTip": [1],
"noseBottom": [2],
"noseRightCorner": [98],
"noseLeftCorner": [327],
"rightCheek": [50,101,36,206,207,187],
#[117,118,101,36,205,187,137,234,117]
"leftCheek": [330,266,426,427,411,280]
}
from .custompoly import customfillpoly
import cv2
import numpy as np
# import matplotlib.pyplot as plt
# from blendtest import *
def sort_poly_fill_list(chk):
for i in range(len(chk)-1):
if(chk[i+1][0] < chk[i][0]):
break
chkfirst = chk[:i+1]
chksecond = sorted(chk[i+1:],key=lambda k: [k[0], k[1]],reverse= True)
print(chkfirst,chksecond)
chkfirst.extend(chksecond)
return chkfirst
def fill_part(image, keypoints, part,setcolor):
temp = []
for index in annotations_dic[part]:
# print(index)
temp.append((int(keypoints[index]['X']), int(keypoints[index]['Y'])))
# temp.append(face_landmarks)
# print(temp)
if part == "leftCheek" or "rightCheek":
mask = np.zeros((image.shape[0],image.shape[1],3))
# chkfirst = sort_poly_fill_list(chk)
temp = sort_poly_fill_list(temp)
# cv2.fillPoly(image, np.int32([temp]), setcolor, lineType=cv2.LINE_AA)
mask = cv2.fillPoly(mask, np.int32([temp]), (255, 255, 255), lineType=cv2.LINE_AA)
image = do_blending(image,setcolor,mask)
cv2.imshow("region mask"+str(part),mask)
cv2.imwrite("mask"+str(part)+".png",mask)
else:
mask = np.zeros((image.shape[0],image.shape[1],3))
# chkfirst = sort_poly_fill_list(chk)
temp = sort_poly_fill_list(temp)
cv2.fillPoly(image, np.int32([temp]), setcolor, lineType=cv2.LINE_AA)
mask = cv2.fillPoly(mask, np.int32([temp]), (255, 255, 255), lineType=cv2.LINE_AA)
# image = do_blending(image,setcolor,mask)
cv2.imshow("region mask"+str(part),mask)
cv2.imwrite("mask"+str(part)+".png",mask)
# image = custompolyfill(image,temp)
# plt.fill(temp[0], temp[1], 'k', alpha=0.3)
#
# with open(part+'.txt', 'w') as f:
# for item in temp:
# f.write("%s\n" % str(item))
return image
#
# chk = [(206, 325),
# (212, 337),
# (221, 348),
# (234, 355),
# (250, 358),
# (266, 355),
# (278, 347),
# (287, 335),
# (292, 323),
# (296, 311),
# (209, 314),
# (215, 326),
# (220, 332),
# (228, 338),
# (237, 342),
# (250, 344),
# (262, 342),
# (272, 337),
# (279, 330),
# (283, 324),
# (289, 312),
# ]
#
# mask = np.zeros((400,400))
# chkfirst = sort_poly_fill_list(chk)
# mask = cv2.fillPoly(mask, np.int32([chkfirst]), (255, 255, 255))
# # mask = customfillpoly(mask,chk)
#
# mask2 = np.zeros((400,400))
# for x,y in chk:
# cv2.circle(mask2,(x,y),1,255,1)
#
# cv2.imshow("mask1",mask)
# cv2.imshow("mask2",mask2) | [
"numpy.zeros",
"numpy.int32"
] | [((2534, 2579), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1], 3)'], {}), '((image.shape[0], image.shape[1], 3))\n', (2542, 2579), True, 'import numpy as np\n'), ((2975, 3020), 'numpy.zeros', 'np.zeros', (['(image.shape[0], image.shape[1], 3)'], {}), '((image.shape[0], image.shape[1], 3))\n', (2983, 3020), True, 'import numpy as np\n'), ((2763, 2779), 'numpy.int32', 'np.int32', (['[temp]'], {}), '([temp])\n', (2771, 2779), True, 'import numpy as np\n'), ((3122, 3138), 'numpy.int32', 'np.int32', (['[temp]'], {}), '([temp])\n', (3130, 3138), True, 'import numpy as np\n'), ((3202, 3218), 'numpy.int32', 'np.int32', (['[temp]'], {}), '([temp])\n', (3210, 3218), True, 'import numpy as np\n')] |
__copyright__ = "Copyright (c) Microsoft Corporation and Mila - Quebec AI Institute"
__license__ = "MIT"
"""Metrics for MDPs
"""
import numpy as np
import ot
from segar.factors.number_factors import NumericFactor
from segar.factors.bools import BooleanFactor
from segar.factors.arrays import VectorFactor
from segar.metrics import wasserstein_distance
def task_set_init_dist(tasks1: list, tasks2: list) -> float:
"""Calculates the distance between initializations of two sets of tasks.
This uses the w-2 distance, ensuring that sets of factors remain
concordant within thing and task membership. Cost function is squared
euclidean. This bring in assumptions about the underlying distribution,
which may not reflect the true distribution from which factors are sampled.
Note: if the tasks have not been sampled from, this will return 0.
:param tasks1: First set of tasks (must have been sampled from).
:param tasks2: Second set of tasks (must have been sampled from).
:return: W-2 distance between tasks.
"""
# First get the initializations from the tasks
task_things1 = [task.initial_state for task in tasks1]
task_things2 = [task.initial_state for task in tasks2]
# Next we need representations that OT can handle. Because things
# contain different sets of factors, we need to collect the ones that
# have the same sets.
thing_factor_sets = []
for thing_list in task_things1 + task_things2:
for thing in thing_list:
factors = [
f
for f in thing.keys()
if issubclass(f, (NumericFactor, BooleanFactor, VectorFactor))
]
if factors not in thing_factor_sets:
thing_factor_sets.append(factors)
# We need to calculate the pairwise distances between all pairs of
# tasks. This will be our final cost matrix.
n1 = len(task_things1)
n2 = len(task_things2)
pairwise_distances = np.zeros((n1, n2))
def calculate_distance(things_1, things_2, factor_set) -> float:
things_1_ = [
thing_
for thing_ in things_1
if all(thing_.has_factor(factor) for factor in factor_set)
]
things_2_ = [
thing_
for thing_ in things_2
if all(thing_.has_factor(factor) for factor in factor_set)
]
if len(things_1_) == 0 and len(things_2_) == 0:
return 0.0
if len(things_1_) == 0 or len(things_2_) == 0:
return 100.0 # Large number for empty sets
s1 = np.concatenate(
[thing_.to_numpy(factor_set)[None, :] for thing_ in things_1_]
)
s2 = np.concatenate(
[thing_.to_numpy(factor_set)[None, :] for thing_ in things_2_]
)
return wasserstein_distance(s1, s2)
# Next loop through the types of things
for i, things1 in enumerate(task_things1):
for j, things2 in enumerate(task_things2):
distance = 0.0
for factor_set in thing_factor_sets:
d = calculate_distance(things1, things2, factor_set)
# Square distances as these are euclidean.
distance += d ** 2
pairwise_distances[i, j] = distance
# The pairwise square distances are now the cost function for an OT
# problem.
w1 = np.ones((n1,)) / float(n1)
w2 = np.ones((n2,)) / float(n2)
return np.sqrt(ot.emd2(w1, w2, pairwise_distances, numItermax=100000))
| [
"ot.emd2",
"numpy.zeros",
"segar.metrics.wasserstein_distance",
"numpy.ones"
] | [((1981, 1999), 'numpy.zeros', 'np.zeros', (['(n1, n2)'], {}), '((n1, n2))\n', (1989, 1999), True, 'import numpy as np\n'), ((2821, 2849), 'segar.metrics.wasserstein_distance', 'wasserstein_distance', (['s1', 's2'], {}), '(s1, s2)\n', (2841, 2849), False, 'from segar.metrics import wasserstein_distance\n'), ((3377, 3391), 'numpy.ones', 'np.ones', (['(n1,)'], {}), '((n1,))\n', (3384, 3391), True, 'import numpy as np\n'), ((3413, 3427), 'numpy.ones', 'np.ones', (['(n2,)'], {}), '((n2,))\n', (3420, 3427), True, 'import numpy as np\n'), ((3459, 3513), 'ot.emd2', 'ot.emd2', (['w1', 'w2', 'pairwise_distances'], {'numItermax': '(100000)'}), '(w1, w2, pairwise_distances, numItermax=100000)\n', (3466, 3513), False, 'import ot\n')] |
import sys, os
from read_struc import read_struc
from math import sin, cos
import numpy as np
def euler2rotmat(phi,ssi,rot):
cs=cos(ssi)
cp=cos(phi)
ss=sin(ssi)
sp=sin(phi)
cscp=cs*cp
cssp=cs*sp
sscp=ss*cp
sssp=ss*sp
crot=cos(rot)
srot=sin(rot)
r1 = crot * cscp + srot * sp
r2 = srot * cscp - crot * sp
r3 = sscp
r4 = crot * cssp - srot * cp
r5 = srot * cssp + crot * cp
r6 = sssp
r7 = -crot * ss
r8 = -srot * ss
r9 = cs
return ((r1,r2,r3),(r4,r5,r6),(r7,r8,r9))
datfile = sys.argv[1]
header, strucs = read_struc(open(datfile))
strucs = list(strucs)
pivots = []
for h in header:
if not h.startswith("#pivot"):
h = h.rstrip()
if h.startswith("#centered"): assert h.endswith(" false"), h
continue
assert not h.startswith("#pivot auto"), h
hh = h.split()
assert hh[1] == str(len(pivots)+1), h
assert len(hh) == 5, h
pivot = [float(v) for v in hh[2:5]]
pivots.append(np.array(pivot))
results = []
for struc in strucs:
result_struc = []
for lnr, l in enumerate(struc[1]):
ll = [float(v) for v in l.split()]
assert len(ll) == 6 #no ensembles
rotmat = euler2rotmat(*ll[:3])
rotmat = np.array(rotmat)
trans = np.array(ll[3:6])
p = pivots[lnr]
pp = (-p * rotmat).sum(axis=1) + p
trans += pp
result = np.eye(4)
result[:3,:3] = rotmat
result[:3,3] = trans
result[3][3] = 1
result_struc.append(result.tolist())
results.append(result_struc)
import json
print(json.dumps(results, indent=2))
| [
"numpy.eye",
"json.dumps",
"math.cos",
"numpy.array",
"math.sin"
] | [((133, 141), 'math.cos', 'cos', (['ssi'], {}), '(ssi)\n', (136, 141), False, 'from math import sin, cos\n'), ((149, 157), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (152, 157), False, 'from math import sin, cos\n'), ((165, 173), 'math.sin', 'sin', (['ssi'], {}), '(ssi)\n', (168, 173), False, 'from math import sin, cos\n'), ((181, 189), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (184, 189), False, 'from math import sin, cos\n'), ((259, 267), 'math.cos', 'cos', (['rot'], {}), '(rot)\n', (262, 267), False, 'from math import sin, cos\n'), ((277, 285), 'math.sin', 'sin', (['rot'], {}), '(rot)\n', (280, 285), False, 'from math import sin, cos\n'), ((1601, 1630), 'json.dumps', 'json.dumps', (['results'], {'indent': '(2)'}), '(results, indent=2)\n', (1611, 1630), False, 'import json\n'), ((1001, 1016), 'numpy.array', 'np.array', (['pivot'], {}), '(pivot)\n', (1009, 1016), True, 'import numpy as np\n'), ((1255, 1271), 'numpy.array', 'np.array', (['rotmat'], {}), '(rotmat)\n', (1263, 1271), True, 'import numpy as np\n'), ((1288, 1305), 'numpy.array', 'np.array', (['ll[3:6]'], {}), '(ll[3:6])\n', (1296, 1305), True, 'import numpy as np\n'), ((1410, 1419), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1416, 1419), True, 'import numpy as np\n')] |
import numpy as np
from loadsounds import parse_file, load_data_definition, reshape_dataset
data_def_file = 'sounddata-csv.yml'
datafile = 'cherry-sound-20200218113643319806.csv'
data_chunk = load_data_definition(data_def_file)
csv_dataset = parse_file(datafile,np.array([]),data_chunk, by_channel=True)
#sound_dataset = np.zeros((csv_dataset[0], 1, 1, data_chunk))
#index = 0
#for d in csv_dataset[1]:
# sound_dataset[index][0][0] = d
# index = index + 1
import datetime
import json
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
print('tensorflow version - '+tf.__version__)
model_file_path ='sound-classification-csv-model.h5'
# model name should be used other style
model = models.load_model(model_file_path)
predicted_results ={}
for channel, csvds in enumerate(csv_dataset[1]):
sound_dataset = reshape_dataset(csvds, data_chunk)
predicted = model.predict(sound_dataset)
print('channel:{}'.format(channel))
result = predicted.tolist()
for r in result:
print('{0}<->{1}'.format(r[0],r[1]))
predicted_results[channel] = result
outputMessageJson = {'timestamp':'{0:%Y/%m/%dT%H:%M:%S.%f}'.format(datetime.datetime.now()),'predicted': []}
for p in predicted_results.keys():
outputMessageJson['predicted'].append({'channel':str(p), 'predicted':predicted_results[p]})
print('result - {}'.format(json.dumps(outputMessageJson))) | [
"loadsounds.reshape_dataset",
"loadsounds.load_data_definition",
"json.dumps",
"numpy.array",
"datetime.datetime.now",
"tensorflow.keras.models.load_model"
] | [((193, 228), 'loadsounds.load_data_definition', 'load_data_definition', (['data_def_file'], {}), '(data_def_file)\n', (213, 228), False, 'from loadsounds import parse_file, load_data_definition, reshape_dataset\n'), ((719, 753), 'tensorflow.keras.models.load_model', 'models.load_model', (['model_file_path'], {}), '(model_file_path)\n', (736, 753), False, 'from tensorflow.keras import datasets, layers, models\n'), ((263, 275), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (271, 275), True, 'import numpy as np\n'), ((847, 881), 'loadsounds.reshape_dataset', 'reshape_dataset', (['csvds', 'data_chunk'], {}), '(csvds, data_chunk)\n', (862, 881), False, 'from loadsounds import parse_file, load_data_definition, reshape_dataset\n'), ((1173, 1196), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1194, 1196), False, 'import datetime\n'), ((1374, 1403), 'json.dumps', 'json.dumps', (['outputMessageJson'], {}), '(outputMessageJson)\n', (1384, 1403), False, 'import json\n')] |
import logging
import sys
from time import time
import numpy as np
import itertools as it
import csv
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from zensols.actioncli import persisted
from zensols.dltools import CudaConfig
from zensols.dlqaclass import (
QADataLoader,
Net,
)
logger = logging.getLogger(__name__)
class IrDataset(object):
def __init__(self, data, cuda):
self.data = data
self.cuda = cuda
def __getitem__(self, i):
# paragraph, question, label, paragraph ID
p = self.data[i]
lab = self.cuda.singleton(p[2], dtype=torch.long)
i = self.cuda.singleton(p[0].id, dtype=torch.long)
mats = (p[0].matrix, p[1].matrix,
p[0].feature_matrix, p[1].feature_matrix,
p[0].common_matrix(p[1]))
return (mats, lab, i,)
def __len__(self):
return len(self.data)
class QAModelManager(object):
SECTION = 'nn_model'
def __init__(self, config):
self.config = config
# binary classification
self.n_labels = 2
# CUDA configuration resource
self.cuda = CudaConfig()
# model parameters
self.cnf = config.populate(section=self.SECTION)
# whether or not to debug the network
self.debug = self.cnf.debug
# location of where to store and load the model
self.model_path = config.get_option_path('model_path', self.SECTION)
# results paths
self.validation_path = config.get_option_path('validation_path', self.SECTION)
self.test_path = config.get_option_path('test_path', self.SECTION)
self.pred_path = config.get_option_path('pred_path', self.SECTION)
if self.debug:
logger.setLevel(logging.DEBUG)
@property
@persisted('_data_loader')
def data_loader(self):
return QADataLoader(self.config)
@property
def dataset(self):
loader = self.data_loader
return tuple(map(lambda x: loader.get_dataset(x)[0],
'train test'.split()))
@property
def ir_datasets(self):
cuda = CudaConfig(use_cuda=False)
logger.debug(f'creating dataset...')
train, test = self.dataset
train, test = IrDataset(train, cuda), IrDataset(test, cuda)
logger.debug(f'created datasets')
return train, test
@property
def dataloaders(self):
train, test = self.ir_datasets
self.train_dataset = train
self.test_dataset = test
# obtain training indices that will be used for validation
num_train = len(train)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(self.cnf.valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
logger.debug(f'preparing data loaders')
# prepare data loaders
train_loader = DataLoader(
train, batch_size=self.cnf.batch_size, sampler=train_sampler)
valid_loader = DataLoader(
train, batch_size=self.cnf.batch_size, sampler=valid_sampler)
test_loader = DataLoader(
test, batch_size=self.cnf.batch_size)
logger.debug(f'created loaders')
return train_loader, valid_loader, test_loader
def create_model(self):
ds = self.dataset
ds_idx = 1 if len(ds[0]) == 0 else 0
point = ds[ds_idx][0]
logger.debug(f'ds: {type(point[0])}')
logger.debug(f'ds: {type(point[1])}')
para_shape = point[0].matrix.shape
ques_shape = point[1].matrix.shape
para_f_shape = point[0].feature_matrix.shape
ques_f_shape = point[1].feature_matrix.shape
logger.debug(f'shapes: paragraph {para_shape}, question: {ques_shape}')
model = Net(para_shape, ques_shape, para_f_shape, ques_f_shape,
self.n_labels, self.cnf, self.debug)
return self.cuda.to(model)
def create_optimizer_criterion(self, model):
# opt = torch.optim.SGD(model.parameters(), lr=self.cnf.learning_rate)
opt = optim.Adam(model.parameters(), lr=self.cnf.learning_rate)
loss = nn.CrossEntropyLoss()
#loss = nn.NLLLoss()
return opt, loss
def _write_validation(self, train_loss, validation_loss,
decreased, mode='a'):
if self.validation_path is not None:
self.validation_path.parent.mkdir(parents=True, exist_ok=True)
with open(self.validation_path, mode) as csvfile:
writer = csv.writer(csvfile)
writer.writerow((train_loss, validation_loss, decreased))
def train(self):
logger.info('training...')
bail_on_early_stop = True
logger.debug(f'loading corpus')
train, valid, test = self.dataloaders
logger.debug(f'created all three dataloaders')
if self.debug:
epochs = 1
max_training = 1
do_validate = False
else:
epochs = self.cnf.epochs
max_training = sys.maxsize
do_validate = True
logger.debug('creating model')
model = self.create_model()
if model is None:
return
logger.debug('creating optimizer and criterion')
optimizer, criterion = self.create_optimizer_criterion(model)
# set initial "min" to infinity
valid_loss_min = np.Inf
self._write_validation(*'train validation decreased'.split(), mode='w')
t0 = time()
logger.debug(f'training with {epochs} epochs')
for epoch in range(epochs):
logger.debug(f'starting epoc: {epoch}')
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
dl_data = it.islice(enumerate(train), max_training)
for i, (mats, labels, pid) in dl_data:
logger.debug(f'data: {len(mats)} {mats[0].shape}')
mats = tuple(map(self.cuda.to, mats))
optimizer.zero_grad()
# forward pass, get our log probs
try:
output = model(mats)
# calculate the loss with the logps and the labels
loss = criterion(output, labels)
except Exception as e:
print(e)
return
loss.backward()
# update/iterate over the error surface
optimizer.step()
train_loss += loss.item() * labels.size(0)
logger.debug(f'loss ({i}): {train_loss}')
if i > 0 and ((i % 30) == 0):
logger.info(f'{i}; train loss={train_loss / i}')
#gc.collect()
if do_validate:
# prep model for evaluation
model.eval()
for mats, labels, pid in valid:
logger.debug(f'data: {len(mats)} {mats[0].shape}')
mats = tuple(map(self.cuda.to, mats))
# forward pass: compute predicted outputs by passing
# inputs to the model
output = model(mats)
# calculate the loss
loss = criterion(output, labels)
# update running validation loss
valid_loss += loss.item() * labels.size(0)
# calculate average loss over an epoch
train_loss = train_loss / len(train)
valid_loss = valid_loss / len(valid)
decrease = valid_loss <= valid_loss_min
logger.info(f'epoch: {epoch+1}, training loss: {train_loss:.6f}, ' +
f'validation Loss: {valid_loss:.6f}')
self._write_validation(train_loss, valid_loss, str(decrease).lower())
# save model if validation loss has decreased
if decrease:
logger.info(f'validation loss decreased ' +
f'({valid_loss_min:.6f} --> {valid_loss:.6f})')
logger.info(f'saving model to {self.model_path}')
model_file = str(self.model_path.absolute())
self.model_path.parent.mkdir(parents=True, exist_ok=True)
torch.save(model.state_dict(), model_file)
valid_loss_min = valid_loss
elif bail_on_early_stop:
break
logger.info(f'trained in {(time() - t0):.3f}s')
self.cuda.empty_cache()
return model
def test(self, model, train_start=None, writer=sys.stdout):
logger.info('testing...')
test_start = time()
if train_start is None:
train_start = test_start
train, valid, test = self.dataloaders
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(self.n_labels))
class_total = list(0. for i in range(self.n_labels))
optimizer, criterion = self.create_optimizer_criterion(model)
# prep model for evaluation
model.eval()
for mats, labels, pid in test:
logger.debug(f'data: {len(mats)} {mats[0].shape}')
mats = tuple(map(self.cuda.to, mats))
# forward pass: compute predicted outputs by passing inputs
# to the model
with torch.no_grad():
output = model(mats)
# calculate the loss
loss = criterion(output, labels)
# update test loss
test_loss += loss.item() * labels.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(labels.data.view_as(pred)))
logger.debug(f'labels: {labels}')
logger.debug(f'correct: {correct}')
# calculate test accuracy for each object class
for i in range(len(labels)):
label = labels.data[i]
logger.debug(f'label: {label}, {class_correct[label]}')
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss / len(test.dataset)
logger.info(f'test Loss: {test_loss:.6f}')
for i in range(self.n_labels):
if class_total[i] > 0:
acc = 100 * class_correct[i] / class_total[i]
writer.write(f'test accuracy of label {i}: {acc:2.0f}% ' +
f'({np.sum(class_correct[i]):2.0f}/' +
f'{np.sum(class_total[i]):2.0f})\n')
else:
writer.write(f'test accuracy of {i}: no training examples\n')
acc = 100. * np.sum(class_correct) / np.sum(class_total)
writer.write(f'test accuracy (overall): {acc:2.0f}% ' +
f'({np.sum(class_correct):2.0f}/' +
f'{np.sum(class_total):2.0f})\n')
time_train = test_start - train_start
time_test = time() - test_start
time_all = time() - train_start
writer.write(f'time: train: {time_train:.1f}s, ' +
f'test: {time_test:.1f}s, ' +
f'all: {time_all:.1f}s\n')
self.cuda.empty_cache()
def load_model(self):
model_file = str(self.model_path.absolute())
logger.info(f'loading model from {model_file}')
state = torch.load(model_file)
model = self.create_model()
model.load_state_dict(state)
return model
def predict(self, model):
train, valid, test = self.dataloaders
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
optimizer, criterion = self.create_optimizer_criterion(model)
# prep model for evaluation
model.eval()
preds = []
for data, labels, pids in test:
# forward pass: compute predicted outputs by passing inputs
# to the model
with torch.no_grad():
output = model(data)
# calculate the loss
loss = criterion(output, labels)
# update test loss
test_loss += loss.item() * data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(labels.data.view_as(pred)))
proba = torch.exp(output[:, 1])
pred_data = torch.stack((proba, labels.float(), correct.float(), pids.float()))
pred_data = pred_data.transpose(0, 1)
preds.append(pred_data)
preds = torch.cat(preds)
preds = preds.cpu().clone().detach()
td = self.ir_datasets[1].data
# by dataset ID, get the paragraph and section (article) IDs
pdat = map(lambda i: (td[i][0].id, td[i][0].tid, td[i][0].tid == td[i][1].paragraph.tid),
preds[:, 3].int())
pdat = torch.tensor(tuple(pdat), dtype=self.cuda.data_type)
preds = torch.cat((preds[:, :3], pdat), 1)
para_acc = preds[:, 2].sum() / preds.shape[0]
# filter by matching article
match_articles = preds[preds[:, 5].nonzero().squeeze()]
# filter on positive labels (matching sections should predict true)
article_acc = match_articles[:, 1].sum() / match_articles.shape[0]
print(f'paragraph accuracy: {para_acc}, article accuracy: {article_acc}')
self.pred_path.parent.mkdir(parents=True, exist_ok=True)
with open(self.pred_path, 'w') as f:
f.write(f'Probability,Label,Correct,Paragraph ID,Article ID,Matching Article\n')
np.savetxt(f, preds.numpy(), fmt='%2.2f', delimiter=',')
def rank_question(self, model, ques, paras, limit=sys.maxsize):
#eps = 1e-6
eps = None
logger.info(f'ranking {ques}')
gold_para = ques.paragraph
ds = map(lambda p: (p, ques, False),
filter(lambda p: p.id != gold_para.id, paras))
ds = list(it.islice(ds, limit))
ds.append((gold_para, ques, True))
dl = DataLoader(IrDataset(ds, CudaConfig(use_cuda=False)),
batch_size=self.cnf.batch_size)
optimizer, criterion = self.create_optimizer_criterion(model)
# prep model for evaluation
model.eval()
preds = []
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
t0 = time()
for mats, labels, pids in dl:
logger.debug(f'data: {len(mats)} {mats[0].shape}')
mats = tuple(map(self.cuda.to, mats))
# forward pass: compute predicted outputs by passing inputs
# to the model
with torch.no_grad():
output = model(mats)
# calculate the loss
loss = criterion(output, labels)
# update test loss
test_loss += loss.item() * labels.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(labels.data.view_as(pred)))
probs = torch.exp(output)
proba = probs[:, 1]
if eps is not None:
ps = probs.sum(dim=1)
tol = ps[torch.nonzero(abs(1.0 - ps) > eps)]
if tol.shape[0] > 0:
logger.warning(f'N predictions of binary probabilities ' +
f'not in error ({eps}): {tol.shape[0]}')
pred_data = torch.stack(
(proba, labels.float(), correct.float(), pids.float()))
pred_data = pred_data.transpose(0, 1)
preds.append(pred_data.cpu())
preds = torch.cat(preds)
_, indicies = torch.sort(preds, 0, descending=True)
preds = preds[indicies[:, 0]]
logger.debug(f'gold paragraph: {gold_para.id}')
rank_row = torch.nonzero(preds[:, 3] == gold_para.id)
rank_idx = int(rank_row[0][0])
logger.info(f'calc rank: {rank_idx} in {time()-t0:.2f}s')
return {'rank': rank_idx,
'paragraph_id': gold_para.id,
'question_id': ques.id,
'n_paragraphs': len(ds),
'preds': preds}
def tmp(self):
if 0:
model = self.train()
else:
model = self.load_model()
self.test(model)
| [
"logging.getLogger",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.exp",
"zensols.actioncli.persisted",
"zensols.dlqaclass.Net",
"torch.utils.data.sampler.SubsetRandomSampler",
"torch.sort",
"csv.writer",
"numpy.floor",
"zensols.dlqaclass.QADataLoader",
"time.time",
"torch.cat",
"iterto... | [((406, 433), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (423, 433), False, 'import logging\n'), ((1892, 1917), 'zensols.actioncli.persisted', 'persisted', (['"""_data_loader"""'], {}), "('_data_loader')\n", (1901, 1917), False, 'from zensols.actioncli import persisted\n'), ((1233, 1245), 'zensols.dltools.CudaConfig', 'CudaConfig', ([], {}), '()\n', (1243, 1245), False, 'from zensols.dltools import CudaConfig\n'), ((1960, 1985), 'zensols.dlqaclass.QADataLoader', 'QADataLoader', (['self.config'], {}), '(self.config)\n', (1972, 1985), False, 'from zensols.dlqaclass import QADataLoader, Net\n'), ((2224, 2250), 'zensols.dltools.CudaConfig', 'CudaConfig', ([], {'use_cuda': '(False)'}), '(use_cuda=False)\n', (2234, 2250), False, 'from zensols.dltools import CudaConfig\n'), ((2764, 2790), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2781, 2790), True, 'import numpy as np\n'), ((3014, 3044), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (3033, 3044), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3069, 3099), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_idx'], {}), '(valid_idx)\n', (3088, 3099), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((3202, 3274), 'torch.utils.data.DataLoader', 'DataLoader', (['train'], {'batch_size': 'self.cnf.batch_size', 'sampler': 'train_sampler'}), '(train, batch_size=self.cnf.batch_size, sampler=train_sampler)\n', (3212, 3274), False, 'from torch.utils.data import DataLoader\n'), ((3311, 3383), 'torch.utils.data.DataLoader', 'DataLoader', (['train'], {'batch_size': 'self.cnf.batch_size', 'sampler': 'valid_sampler'}), '(train, batch_size=self.cnf.batch_size, sampler=valid_sampler)\n', (3321, 3383), False, 'from torch.utils.data import DataLoader\n'), ((3419, 3467), 'torch.utils.data.DataLoader', 'DataLoader', (['test'], {'batch_size': 'self.cnf.batch_size'}), '(test, batch_size=self.cnf.batch_size)\n', (3429, 3467), False, 'from torch.utils.data import DataLoader\n'), ((4087, 4184), 'zensols.dlqaclass.Net', 'Net', (['para_shape', 'ques_shape', 'para_f_shape', 'ques_f_shape', 'self.n_labels', 'self.cnf', 'self.debug'], {}), '(para_shape, ques_shape, para_f_shape, ques_f_shape, self.n_labels, self\n .cnf, self.debug)\n', (4090, 4184), False, 'from zensols.dlqaclass import QADataLoader, Net\n'), ((4451, 4472), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4470, 4472), False, 'from torch import nn\n'), ((5810, 5816), 'time.time', 'time', ([], {}), '()\n', (5814, 5816), False, 'from time import time\n'), ((9020, 9026), 'time.time', 'time', ([], {}), '()\n', (9024, 9026), False, 'from time import time\n'), ((11884, 11906), 'torch.load', 'torch.load', (['model_file'], {}), '(model_file)\n', (11894, 11906), False, 'import torch\n'), ((13140, 13156), 'torch.cat', 'torch.cat', (['preds'], {}), '(preds)\n', (13149, 13156), False, 'import torch\n'), ((13529, 13563), 'torch.cat', 'torch.cat', (['(preds[:, :3], pdat)', '(1)'], {}), '((preds[:, :3], pdat), 1)\n', (13538, 13563), False, 'import torch\n'), ((14965, 14971), 'time.time', 'time', ([], {}), '()\n', (14969, 14971), False, 'from time import time\n'), ((16288, 16304), 'torch.cat', 'torch.cat', (['preds'], {}), '(preds)\n', (16297, 16304), False, 'import torch\n'), ((16327, 16364), 'torch.sort', 'torch.sort', (['preds', '(0)'], {'descending': '(True)'}), '(preds, 0, descending=True)\n', (16337, 16364), False, 'import torch\n'), ((16478, 16520), 'torch.nonzero', 'torch.nonzero', (['(preds[:, 3] == gold_para.id)'], {}), '(preds[:, 3] == gold_para.id)\n', (16491, 16520), False, 'import torch\n'), ((2811, 2852), 'numpy.floor', 'np.floor', (['(self.cnf.valid_size * num_train)'], {}), '(self.cnf.valid_size * num_train)\n', (2819, 2852), True, 'import numpy as np\n'), ((10047, 10067), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (10056, 10067), False, 'import torch\n'), ((11220, 11239), 'numpy.sum', 'np.sum', (['class_total'], {}), '(class_total)\n', (11226, 11239), True, 'import numpy as np\n'), ((11482, 11488), 'time.time', 'time', ([], {}), '()\n', (11486, 11488), False, 'from time import time\n'), ((11521, 11527), 'time.time', 'time', ([], {}), '()\n', (11525, 11527), False, 'from time import time\n'), ((12764, 12784), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (12773, 12784), False, 'import torch\n'), ((12922, 12945), 'torch.exp', 'torch.exp', (['output[:, 1]'], {}), '(output[:, 1])\n', (12931, 12945), False, 'import torch\n'), ((14533, 14553), 'itertools.islice', 'it.islice', (['ds', 'limit'], {}), '(ds, limit)\n', (14542, 14553), True, 'import itertools as it\n'), ((15540, 15560), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (15549, 15560), False, 'import torch\n'), ((15698, 15715), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (15707, 15715), False, 'import torch\n'), ((4844, 4863), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (4854, 4863), False, 'import csv\n'), ((9746, 9761), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9759, 9761), False, 'import torch\n'), ((11196, 11217), 'numpy.sum', 'np.sum', (['class_correct'], {}), '(class_correct)\n', (11202, 11217), True, 'import numpy as np\n'), ((12465, 12480), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12478, 12480), False, 'import torch\n'), ((14636, 14662), 'zensols.dltools.CudaConfig', 'CudaConfig', ([], {'use_cuda': '(False)'}), '(use_cuda=False)\n', (14646, 14662), False, 'from zensols.dltools import CudaConfig\n'), ((15239, 15254), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15252, 15254), False, 'import torch\n'), ((8826, 8832), 'time.time', 'time', ([], {}), '()\n', (8830, 8832), False, 'from time import time\n'), ((11385, 11404), 'numpy.sum', 'np.sum', (['class_total'], {}), '(class_total)\n', (11391, 11404), True, 'import numpy as np\n'), ((16608, 16614), 'time.time', 'time', ([], {}), '()\n', (16612, 16614), False, 'from time import time\n'), ((11329, 11350), 'numpy.sum', 'np.sum', (['class_correct'], {}), '(class_correct)\n', (11335, 11350), True, 'import numpy as np\n'), ((11045, 11067), 'numpy.sum', 'np.sum', (['class_total[i]'], {}), '(class_total[i])\n', (11051, 11067), True, 'import numpy as np\n'), ((10978, 11002), 'numpy.sum', 'np.sum', (['class_correct[i]'], {}), '(class_correct[i])\n', (10984, 11002), True, 'import numpy as np\n')] |
import pandas as pd
import os
import numpy as np
import glob
inf = glob.glob('/home/wequ0318/sleep/data/eeg_fpz_cz/*.npz')
for _f in inf:
with np.load(_f) as f:
data = f["x"]
labels = f["y"]
sampling_rate = f["fs"]
df_data = pd.DataFrame(np.squeeze(data))
df_label = pd.DataFrame(labels)
fname = os.path.basename(_f)
num = df_data.shape[0]
df_epoch = pd.DataFrame(np.arange(num))
df_name = pd.DataFrame(np.repeat(fname[:-4], num))
df = pd.concat([df_data, df_label, df_epoch, df_name], axis=1)
df.to_csv(fname.replace('npz', 'csv'), sep=',', index=False)
| [
"numpy.repeat",
"numpy.arange",
"numpy.squeeze",
"os.path.basename",
"pandas.DataFrame",
"numpy.load",
"pandas.concat",
"glob.glob"
] | [((67, 122), 'glob.glob', 'glob.glob', (['"""/home/wequ0318/sleep/data/eeg_fpz_cz/*.npz"""'], {}), "('/home/wequ0318/sleep/data/eeg_fpz_cz/*.npz')\n", (76, 122), False, 'import glob\n'), ((147, 158), 'numpy.load', 'np.load', (['_f'], {}), '(_f)\n', (154, 158), True, 'import numpy as np\n'), ((311, 331), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {}), '(labels)\n', (323, 331), True, 'import pandas as pd\n'), ((348, 368), 'os.path.basename', 'os.path.basename', (['_f'], {}), '(_f)\n', (364, 368), False, 'import os\n'), ((520, 577), 'pandas.concat', 'pd.concat', (['[df_data, df_label, df_epoch, df_name]'], {'axis': '(1)'}), '([df_data, df_label, df_epoch, df_name], axis=1)\n', (529, 577), True, 'import pandas as pd\n'), ((274, 290), 'numpy.squeeze', 'np.squeeze', (['data'], {}), '(data)\n', (284, 290), True, 'import numpy as np\n'), ((432, 446), 'numpy.arange', 'np.arange', (['num'], {}), '(num)\n', (441, 446), True, 'import numpy as np\n'), ((479, 505), 'numpy.repeat', 'np.repeat', (['fname[:-4]', 'num'], {}), '(fname[:-4], num)\n', (488, 505), True, 'import numpy as np\n')] |
import functools
import numpy as np
from estimagic.batch_evaluators import joblib_batch_evaluator
from src.manfred.minimize_manfred import minimize_manfred
def minimize_manfred_estimagic(
internal_criterion_and_derivative,
x,
lower_bounds,
upper_bounds,
convergence_relative_params_tolerance=0.001,
convergence_direct_search_mode="fast",
max_criterion_evaluations=100_000,
step_sizes=None,
max_step_sizes=None,
direction_window=3,
gradient_weight=0.5,
momentum=0.05,
linesearch_active=True,
linesearch_frequency=3,
linesearch_n_points=5,
noise_seed=0,
noise_n_evaluations_per_x=1,
batch_evaluator=joblib_batch_evaluator,
batch_evaluator_options=None,
):
"""MANFRED algorithm with internal estimagic optimizer interface.
MANFRED stands for Monotone Approximate Noise resistent algorithm For Robust
optimization without Exact Derivatives
It combines a very robust direct search step with an efficient line search step
based on a search direction that is a byproduct of the direct search.
It is meant for optimization problems that fulfill the following conditions:
- A low number of parameters (10 is already a lot)
- Presence of substantial true noise, i.e. the criterion function is stochastic and
the noise is large enough to introduce local minima.
- Bounds on all parameters are known
Despite being able to handle small local minima introduced by noise, MANFRED is a
local optimization algorithm. If you need a global solution in the presence of
multiple minima you need to run it from several starting points.
MANFRED has the following features:
- Highly parallelizable: You can scale MANFRED to up to 2 ** n_params *
n_evaluations_per_x cores for a non parallelized criterion function.
- Monotone: Only function values that actually lead to an improvement are used.
Args:
convergence_relative_params_tolerance (float): Maximal change in parameter
vectors between two iterations to declare convergence.
convergence_direct_search_mode (str): One of "fast", "thorough". If thorough,
convergence is only declared if a two sided search for all parameters
does not yield any improvement.
max_criterion_evaluations (int): Maximal number of criterion evaluations. This
The actual number of evaluations might be higher, because we only check
at the start of each iteration if the maximum is reached.
step_sizes (float or list): Step size or list of step sizes for the direct
search step of the optimization. This step size refers to a rescaled
parameter vector where all lower bounds are 0 and all upper bounds are 1. It
is thus a relative step size.
This is also the step size used to calculate an approximated gradient via
finite differences because the approximated gradient is a free by product
of the direct search. If a list of step sizes is provided, the algorithm is
run with each step size in the list until convergence. Especially for noisy
problems it is good to use a list of decreasing step sizes.
max_step_sizes (float or list): Maximum step size that can be taken in any
direction during the line search step. This step size also refers to the
rescaled parameter vector. It needs to be a float or a list of
the same length as step_sizes. A large max_step_size can lead to a fast
convergence if the search direction is good. This is especially helpful at
the beginning. Later, a small max_step limits the search space for the line
search and can thus increase precision.
direction_window (int): How many accepted parameters are used to
determine if a parameter has momentum and we can thus switch
to one-sided search for that parameter.
gradient_weight (float): The search direction for the line search step is a
weighted average of the negative gradient and direction taken in the last
successful direct search step (both normalized to unit length).
gradient_weight determines the weight of the gradient in this combination.
Since the gradient contains quantitative information on the steepness in
each direction it can lead to very fast convergence but is more sensitive
to noise. Moreover, it only contains first order information.
The direction from the direct search contains some second order information
and is more robust to noise because it only uses the ordering of function
values and not their size.
momentum (float): The search direction is momentum * past search direction +
(1 - momentum) * momentum free current search direction. More momentum can
help to break out of local minima and to average out noise in the
search direction calculation.
linesearch_active (bool): Whether line search is used.
linesearch_frequency (int or list): If linesearch_active is True this number
specifies every how many iterations we do a line search step after
the direct search step.Line search steps can lead to fast progress
and/or refined solutions and the number of required function
evaluations does not depend on the dimensionality. The disadvantage
is that they make caching more inefficient by leaving the
grid and that they make it harder to check convergence of the
direct search with a given step size. 3 seems to be a sweet spot. Can be a
list with the same length as step_sizes.
linesearch_n_points (int): At how many points the function is evaluated during
the line search. More points mean higher precision but also more sensitivity
to noise.
noise_seed (int): Starting point of a seed sequence.
noise_n_evaluations_per_x (int): How often the criterion function is evaluated
at each parameter vector in order to average out noise.
batch_evaluator (callable): An estimagic batch evaluator.
batch_evaluator_options (dict): Keyword arguments for the batch evaluator.
"""
algo_info = {
"primary_criterion_entry": "root_contributions",
"parallelizes": True,
"needs_scaling": False,
"name": "manfred",
}
if batch_evaluator_options is None:
batch_evaluator_options = {}
criterion = functools.partial(
internal_criterion_and_derivative, algorithm_info=algo_info, task="criterion"
)
options = {
"step_sizes": step_sizes,
"max_fun": max_criterion_evaluations,
"convergence_direct_search_mode": convergence_direct_search_mode,
"xtol": convergence_relative_params_tolerance,
"direction_window": direction_window,
"xtol": convergence_relative_params_tolerance,
"linesearch_active": linesearch_active,
"linesearch_frequency": linesearch_frequency,
"linesearch_n_points": linesearch_n_points,
"max_step_sizes": max_step_sizes,
"n_evaluations_per_x": noise_n_evaluations_per_x,
"seed": noise_seed,
"gradient_weight": gradient_weight,
"momentum": momentum,
"batch_evaluator": batch_evaluator,
"batch_evaluator_options": batch_evaluator_options,
}
unit_x = _x_to_unit_cube(x, lower_bounds, upper_bounds)
def func(x, seed, lower_bounds, upper_bounds):
x = _x_from_unit_cube(x, lower_bounds, upper_bounds)
np.random.seed(seed)
residuals = criterion(x)
return {"root_contributions": residuals, "value": residuals @ residuals}
partialed_func = functools.partial(
func, lower_bounds=lower_bounds, upper_bounds=upper_bounds
)
res = minimize_manfred(
func=partialed_func,
x=unit_x,
lower_bounds=np.zeros(len(x)),
upper_bounds=np.ones(len(x)),
**options,
)
return res
def _x_to_unit_cube(x, lower_bounds, upper_bounds):
return (x - lower_bounds) / (upper_bounds - lower_bounds)
def _x_from_unit_cube(unit_x, lower_bounds, upper_bounds):
return unit_x * (upper_bounds - lower_bounds) + lower_bounds
| [
"functools.partial",
"numpy.random.seed"
] | [((6673, 6774), 'functools.partial', 'functools.partial', (['internal_criterion_and_derivative'], {'algorithm_info': 'algo_info', 'task': '"""criterion"""'}), "(internal_criterion_and_derivative, algorithm_info=\n algo_info, task='criterion')\n", (6690, 6774), False, 'import functools\n'), ((7916, 7993), 'functools.partial', 'functools.partial', (['func'], {'lower_bounds': 'lower_bounds', 'upper_bounds': 'upper_bounds'}), '(func, lower_bounds=lower_bounds, upper_bounds=upper_bounds)\n', (7933, 7993), False, 'import functools\n'), ((7759, 7779), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7773, 7779), True, 'import numpy as np\n')] |
from __future__ import print_function
from cepbp.common.input_testing import TestInputs
from cepbp.common.logs import Logs
from cepbp.common.custom_error_handler import CustomError
from cepbp.areas import Areas
from cepbp.perimeters import Perimeters
import configparser
import logging
import sys
import numpy as np
class ShapeName(object):
def __init__(self):
pass
def print_shape(self, shape):
print("The shape is a %s" % self.shape)
class DimensionCalculator(ShapeName, Perimeters, Areas, TestInputs):
"""
General class to calculate the area of various shapes. Only the rectangles and circles are supported for perimeters and areas.
Parameters
----------
*args: integer
Only the first element in args is used and corresponds to the user's group ID
**kwargs: dictionary
Only the key 'submitted_by' is used and corresponds to the user's name
Example
----------
from cepbp.dimension_calculator import DimensionCalculator
dc = DimensionCalculator(1, submitted_by='<NAME>')
dc.output_dimension_value((3, 4), 'rectangle', 'areas')
>>> Request submitted by <NAME>
>>> Request submitted by group 1
>>> rectangle area: 12.000
"""
def __init__(self, *args, **kwargs):
TestInputs.__init__(self)
self.group_nb = ''
if args:
self.group_nb = str(args[0])
self.submitted_by = str(kwargs.get('submitted_by', ''))
self.logger = Logs()
self.logger.get_logger(name='logs_shape_dimension')
def output_dimension_value(self, measurements, shape, dimension):
"""
General function to handle all the steps to calcuate the dimension for a shape and measurement set.
The result of the calculations are printed and logged.
The inputs are checked for basic errors.
Parameters
----------
measurements: tuple of integers
Tuple object containing the measurement of the object of interest
shape: string
'rectangle' for example
dimension: string
'perimeter' for example
"""
if self.submitted_by:
self.logger.print_and_log('Request submitted by %s' % self.submitted_by)
if self.group_nb:
self.logger.print_and_log('Request submitted by group %s' % self.group_nb)
if (isinstance(measurements, float) | isinstance(measurements, int)):
measurements = [measurements]
try:
errors = self.run_input_tests(shape, dimension, measurements)
if errors:
msg = '\n'.join(errors)
raise CustomError(msg, (measurements, shape, dimension))
except CustomError as e:
self.logger.print_and_log('Error: {}\nData: \n{}'.format(e.msg, e.data), 'error')
return None
if dimension == 'perimeter':
dimension_value = self.perimeter_value(measurements, shape)
if dimension == 'area':
dimension_value = self.area_value(measurements, shape)
try:
if np.isinf(dimension_value):
msg = 'The dimension value is infinite. Please check inputs'
raise CustomError(msg, (measurements, shape, dimension))
except Exception as e:
self.logger.print_and_log('Error: {}\nData: \n{}'.format(e.msg, e.data), 'error')
return None
self.logger.print_and_log('%s %s: %0.3f' % (shape, dimension, dimension_value))
return dimension_value
| [
"cepbp.common.input_testing.TestInputs.__init__",
"cepbp.common.logs.Logs",
"cepbp.common.custom_error_handler.CustomError",
"numpy.isinf"
] | [((1278, 1303), 'cepbp.common.input_testing.TestInputs.__init__', 'TestInputs.__init__', (['self'], {}), '(self)\n', (1297, 1303), False, 'from cepbp.common.input_testing import TestInputs\n'), ((1476, 1482), 'cepbp.common.logs.Logs', 'Logs', ([], {}), '()\n', (1480, 1482), False, 'from cepbp.common.logs import Logs\n'), ((3091, 3116), 'numpy.isinf', 'np.isinf', (['dimension_value'], {}), '(dimension_value)\n', (3099, 3116), True, 'import numpy as np\n'), ((2651, 2701), 'cepbp.common.custom_error_handler.CustomError', 'CustomError', (['msg', '(measurements, shape, dimension)'], {}), '(msg, (measurements, shape, dimension))\n', (2662, 2701), False, 'from cepbp.common.custom_error_handler import CustomError\n'), ((3217, 3267), 'cepbp.common.custom_error_handler.CustomError', 'CustomError', (['msg', '(measurements, shape, dimension)'], {}), '(msg, (measurements, shape, dimension))\n', (3228, 3267), False, 'from cepbp.common.custom_error_handler import CustomError\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.