code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""A module for reading, parsing, and preprocessing trodes data
collected during robot calibration routines.
"""
import numpy as np
import pandas as pd
from scipy import ndimage
from . import readTrodesExtractedDataFile3 as read_trodes
def get_trodes_files(data_dir, trodes_name):
"""Generate names of all the trodes files from a calibration recording.
Assumes data is saved in the default trodes filesystem and channels are
named appropriately in the trodes configuration file.
Parameters
----------
data_dir : str
Parent directory where the trodes data lives
trodes_name : str
Name of original *.rec trodes file
Returns
-------
trodes_files : dict
The file names for each channel of a calibration recording. More
specifically, `x_push_file` is the *.dat file for the `x` actuator
`push` valve command recording. Similarly, `y_pot_file` is the
*.dat for the `y` actuator potentiometer recording.
"""
trodes_files = {
'time_file': data_dir + '/%s.analog/%s.timestamps.dat' % ( trodes_name, trodes_name),
'x_push_file': data_dir + '/%s.DIO/%s.dio_xPush.dat' % ( trodes_name, trodes_name),
'x_pull_file': data_dir + '/%s.DIO/%s.dio_xPull.dat' % (trodes_name, trodes_name),
'y_push_file': data_dir + '/%s.DIO/%s.dio_yPush.dat' % ( trodes_name, trodes_name),
'y_pull_file': data_dir + '/%s.DIO/%s.dio_yPull.dat' % ( trodes_name, trodes_name),
'z_push_file': data_dir + '/%s.DIO/%s.dio_zPush.dat' % ( trodes_name, trodes_name),
'z_pull_file': data_dir + '/%s.DIO/%s.dio_zPull.dat' % ( trodes_name, trodes_name),
'x_pot_file': data_dir + '/%s.analog/%s.analog_potX.dat' % ( trodes_name, trodes_name),
'y_pot_file': data_dir + '/%s.analog/%s.analog_potY.dat' % ( trodes_name, trodes_name),
'z_pot_file': data_dir + '/%s.analog/%s.analog_potZ.dat' % ( trodes_name, trodes_name)
}
return trodes_files
def read_data(trodes_files, sampling_rate=3000):
"""Read all the trodes file data using the SpikeGadgets
`readTrodesExtractedDataFile` script.
Parameters
----------
trodes_files : dict
The file names for each channel of a calibration recording. For
example, as returned by get_trodes_files().
sampling_rate : int
Specifying a rate (Hz) lower than the SpikeGadgets MCU clock rate of
30 kHz will downsample the data to speed up parsing.
Returns
-------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording.
"""
# clockrate = np.float_(read_trodes.readTrodesExtractedDataFile(trodes_files['time_file'])['clock rate'])
clockrate = 30000
ds = int(clockrate / sampling_rate)
calibration_data = {
'clockrate': clockrate,
'sampling_rate': sampling_rate,
'time': {
'units': 'samples',
'time': read_trodes.readTrodesExtractedDataFile(trodes_files['time_file'])['data'][0:-1:ds]
},
'DIO': {
'x_push': read_trodes.readTrodesExtractedDataFile(trodes_files['x_push_file'])['data'],
'x_pull': read_trodes.readTrodesExtractedDataFile(trodes_files['x_pull_file'])['data'],
'y_push': read_trodes.readTrodesExtractedDataFile(trodes_files['y_push_file'])['data'],
'y_pull': read_trodes.readTrodesExtractedDataFile(trodes_files['y_pull_file'])['data'],
'z_push': read_trodes.readTrodesExtractedDataFile(trodes_files['z_push_file'])['data'],
'z_pull': read_trodes.readTrodesExtractedDataFile(trodes_files['z_pull_file'])['data']
},
'analog': {
'x_pot': read_trodes.readTrodesExtractedDataFile(trodes_files['x_pot_file'])['data']['voltage'][0:-1:ds],
'y_pot': read_trodes.readTrodesExtractedDataFile(trodes_files['y_pot_file'])['data']['voltage'][0:-1:ds],
'z_pot': read_trodes.readTrodesExtractedDataFile(trodes_files['z_pot_file'])['data']['voltage'][0:-1:ds]
}
}
return calibration_data
def to_numpy(calibration_data):
"""Convert the calibration data to numpy arrays
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
Returns
-------
calibration_data : dict
Numpy-converted calibration data.
"""
calibration_data['time']['time'] = np.array(
[t[0] for t in calibration_data['time']['time']],
dtype='float_'
)
for key in calibration_data['DIO'].keys():
calibration_data['DIO'][key] = np.array(
[i[0] for i in calibration_data['DIO'][key]],
dtype='float_'
)
return calibration_data
def to_seconds(calibration_data, start_at_zero=True):
"""Convert the calibration data time units to seconds.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
start_at_zero : bool
If True, the start time will be set to 0.
Returns
-------
calibration_data : dict
Seconds-converted calibration data
"""
if calibration_data['time']['units'] is not 'seconds':
if start_at_zero:
for key in calibration_data['DIO'].keys():
calibration_data['DIO'][key] = (
calibration_data['DIO'][key] - calibration_data['time']['time'][
0]
) / calibration_data['clockrate']
calibration_data['time']['time'] = (
calibration_data['time']['time'] -
calibration_data['time']['time'][0]
) / calibration_data['clockrate']
else:
for key in calibration_data['DIO'].keys():
calibration_data['DIO'][key] = calibration_data['DIO'][key] / calibration_data['clockrate']
calibration_data['time']['time'] = calibration_data['time']['time'] / calibration_data['clockrate']
else:
pass
return calibration_data
def pots_to_cm(calibration_data, supply_voltage=3.3, pot_range=5.0):
"""Convert the potentiometer data units to cm.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
supply_voltage : float
Maximum voltage for the potentiometers
pot_range : float
Potentiometer maximum travel range in cm
Returns
-------
calibration_data : dict
Calibration data with potentiometer data convert to cm
"""
trodes_max_bits = 32767.0
trodes_max_volts = 10.0
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = (
calibration_data['analog'][key] / trodes_max_bits * trodes_max_volts / supply_voltage * pot_range
)
return calibration_data
def median_filter_pots(calibration_data, width):
"""Apply a median filter to the potentiometer series.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
width : int
Width (in samples) of window used for median filter. Should be odd.
If not, one is added.
Returns
-------
calibration_data : dict
Calibration data with median-filtered potentiometers
"""
# convert width units to samples
if width == 0:
pass
elif (width % 2) != 0:
width += 1
else:
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = ndimage.median_filter(
calibration_data['analog'][key], size=(width)
)
return calibration_data
def pots_to_volts(calibration_data):
"""Convert the potentiometer data units to volts.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
Returns
-------
calibration_data : dict
Calibration data with potentiometer data convert to volts
"""
trodes_max_bits = 32767.0
trodes_max_volts = 10.0
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = (
calibration_data['analog'][key] / trodes_max_bits * trodes_max_volts
)
return calibration_data
def pots_to_bits(calibration_data, supply_voltage=3.3, controller_max_bits=1023):
"""Convert the potentiometer data units to microcontroller bits.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
supply_voltage : float
Maximum voltage for the potentiometers
controller_max_bits : int
Maximum bits for the microcontroller
Returns
-------
calibration_data : dict
Calibration data with potentiometer data convert to microcontroller
bits
"""
trodes_max_bits = 32767.0
trodes_max_volts = 10.0
for key in calibration_data['analog'].keys():
calibration_data['analog'][key] = np.round(
calibration_data['analog'][key] / trodes_max_bits * trodes_max_volts /
supply_voltage * controller_max_bits
)
return calibration_data
def get_valve_transitions(calibration_data):
"""Get the valve start and stop times.
Parameters
----------
calibration_data : dict
All of the digital (DIO) and analog data corresponding to the trodes
files for the a calibration recording. For example, as returned by
read_data().
Returns
-------
start_times : dict
Times at which each of the valves transitioned from closed to open
stop_times : dict
Times at which each of the valves transitioned from open to closed
"""
start_times = {
'x_push': calibration_data['DIO']['x_push'][1::2],
'x_pull': calibration_data['DIO']['x_pull'][1::2],
'y_push': calibration_data['DIO']['y_push'][1::2],
'y_pull': calibration_data['DIO']['y_pull'][1::2],
'z_push': calibration_data['DIO']['z_push'][1::2],
'z_pull': calibration_data['DIO']['z_pull'][1::2]
}
stop_times = {
'x_push': calibration_data['DIO']['x_push'][2::2],
'x_pull': calibration_data['DIO']['x_pull'][2::2],
'y_push': calibration_data['DIO']['y_push'][2::2],
'y_pull': calibration_data['DIO']['y_pull'][2::2],
'z_push': calibration_data['DIO']['z_push'][2::2],
'z_pull': calibration_data['DIO']['z_pull'][2::2]
}
return start_times, stop_times
def get_calibration_frame(
data_dir,
trodes_name,
sampling_rate=3000,
medfilter_width=11,
pot_units='cm'
):
"""Generate a data frame for estimating robot calibration parameters.
State variables include the starting positions and valve open
durations for each actuator. The response variable is displacement.
Durations and displacements are assumed to be negative for the pull
valves by convention.
Parameters
----------
data_dir : str
Parent directory where the trodes data lives
trodes_name : str
Name of original .rec trodes file
sampling_rate : int
Specifying a rate (Hz) lower than the SpikeGadgets MCU clock rate of
30 kHz will downsample the data and speed up the parsing.
medfilter_width : int
Width, in samples, of the median fitler applied to the potentiometer
recordings.
pot_units : str
Units to return potentiometer recordings. Can be `cm`, `volts`, or
`bits`.
Returns
-------
data_frame : pandas.core.frame.DataFrame
A pandas data frame with columns `start_time`, `x_position`,
`x_duration`, `x_displacement`, `y_position`, `y_duration`,
`y_displacement`, `z_position`, `z_duration`, and `z_displacement`.
"""
trodes_files = get_trodes_files(data_dir, trodes_name)
calibration_data = read_data(trodes_files, sampling_rate)
calibration_data = to_numpy(calibration_data)
calibration_data = median_filter_pots(calibration_data, width=medfilter_width)
calibration_data = to_seconds(calibration_data)
if pot_units == 'cm':
calibration_data = pots_to_cm(calibration_data)
elif pot_units == 'volts':
calibration_data = pots_to_volts(calibration_data)
elif pot_units == 'bits':
calibration_data = pots_to_bits(calibration_data)
start_times, stop_times = get_valve_transitions(calibration_data)
num_events = start_times['x_push'].size + start_times['x_pull'].size
# sort start times
x_start_times = np.concatenate([start_times['x_push'], start_times['x_pull']])
y_start_times = np.concatenate([start_times['y_push'], start_times['y_pull']])
z_start_times = np.concatenate([start_times['z_push'], start_times['z_pull']])
x_order = np.argsort(x_start_times)
y_order = np.argsort(y_start_times)
z_order = np.argsort(z_start_times)
# estimate valve period
valve_period = np.median(np.diff(x_start_times[x_order]))
# match start times to position indices
start_indices = np.searchsorted(calibration_data['time']['time'], x_start_times[x_order])
stop_indices = start_indices + int(valve_period * sampling_rate)
# make data frame
data_frame = pd.DataFrame(
data={
'start_time': x_start_times[x_order],
'x_position': calibration_data['analog']['x_pot'][start_indices],
'x_duration': np.concatenate([
stop_times['x_push'] - start_times['x_push'],
start_times['x_pull'] - stop_times['x_pull'] # scipy.ndimagen convention
])[x_order],
'x_displacement': (
calibration_data['analog']['x_pot'][stop_indices] -
calibration_data['analog']['x_pot'][start_indices]
),
'y_position': calibration_data['analog']['y_pot'][start_indices],
'y_duration': np.concatenate([
stop_times['y_push'] - start_times['y_push'],
start_times['y_pull'] - stop_times['y_pull'] # scipy.ndimagen convention
])[y_order],
'y_displacement': (
calibration_data['analog']['y_pot'][stop_indices] -
calibration_data['analog']['y_pot'][start_indices]
),
'z_position': calibration_data['analog']['z_pot'][start_indices],
'z_duration': np.concatenate([
stop_times['z_push'] - start_times['z_push'],
start_times['z_pull'] - stop_times['z_pull'] # scipy.ndimagen convention
])[z_order],
'z_displacement': (
calibration_data['analog']['z_pot'][stop_indices] -
calibration_data['analog']['z_pot'][start_indices]
)
}
)
return data_frame
def get_traces_frame(
data_dir,
trodes_name,
sampling_rate=3000,
medfilter_width=11,
pot_units='cm'
):
"""Generate a data frame containing the position trajectories of
each actuator in response to each of the valve duration commands.
Similar to get_calibration_frame(), but returns the entire
trajectory instead of just the starting positions and
displacements.
Parameters
----------
data_dir : str
Parent directory where the trodes data lives
trodes_name : str
Name of original .rec trodes file
sampling_rate : int
Specifying a rate (Hz) lower than the SpikeGadgets MCU clock rate of
30 kHz will downsample the data, speed up parsing, and return
a smaller data frame.
medfilter_width : int
Width, in samples, of the median fitler applied to the potentiometer
recordings.
pot_units : str
Units to return potentiometer recordings. Can be `cm`, `volts`, or
`bits`.
Returns
-------
data_frame : pandas.core.frame.DataFrame
A pandas data frame with columns `trace_time`, `start_time`,
`x_start_position`, `x_duration`, `x_displacement`,
`y_start_position`, `y_duration`, `y_displacement`,
`z_start_position`, `z_duration`, and `z_displacement`.
"""
trodes_files = get_trodes_files(data_dir, trodes_name)
calibration_data = read_data(trodes_files, sampling_rate)
calibration_data = to_numpy(calibration_data)
calibration_data = median_filter_pots(calibration_data, width=medfilter_width)
calibration_data = to_seconds(calibration_data)
if pot_units == 'cm':
calibration_data = pots_to_cm(calibration_data)
elif pot_units == 'volts':
calibration_data = pots_to_volts(calibration_data)
elif pot_units == 'bits':
calibration_data = pots_to_bits(calibration_data)
start_times, stop_times = get_valve_transitions(calibration_data)
num_events = start_times['x_push'].size + start_times['x_pull'].size
# sort start times
x_start_times = np.concatenate([start_times['x_push'], start_times['x_pull']])
y_start_times = np.concatenate([start_times['y_push'], start_times['y_pull']])
z_start_times = np.concatenate([start_times['z_push'], start_times['z_pull']])
x_order = np.argsort(x_start_times)
y_order = np.argsort(y_start_times)
z_order = np.argsort(z_start_times)
# estimate valve period
valve_period = np.median(np.diff(x_start_times[x_order]))
# estimate trace start times
start_indices = np.searchsorted(calibration_data['time']['time'], x_start_times[x_order])
trace_start_times = calibration_data['time']['time'][start_indices]
# estimate trace durations
x_durations = np.concatenate([
stop_times['x_push'] - start_times['x_push'],
start_times['x_pull'] - stop_times['x_pull'] # scipy.ndimagen convention
])[x_order]
y_durations = np.concatenate([
stop_times['y_push'] - start_times['y_push'],
start_times['y_pull'] - stop_times['y_pull'] # scipy.ndimagen convention
])[y_order]
z_durations = np.concatenate([
stop_times['z_push'] - start_times['z_push'],
start_times['z_pull'] - stop_times['z_pull'] # scipy.ndimagen convention
])[z_order]
# initialize data frame
num_rows = start_indices[-1] + int(valve_period * sampling_rate) - start_indices[0]
time_window = np.arange(start_indices[0], start_indices[-1] + int(valve_period * sampling_rate))
data_frame = pd.DataFrame(
data={
'trace_time': calibration_data['time']['time'][time_window],
'start_time': np.zeros(num_rows),
'x_start_position': np.zeros(num_rows),
'y_start_position': np.zeros(num_rows),
'z_start_position': np.zeros(num_rows),
'x_duration': np.zeros(num_rows),
'y_duration': np.zeros(num_rows),
'z_duration': np.zeros(num_rows),
'x_displacement': calibration_data['analog']['x_pot'][time_window],
'y_displacement': calibration_data['analog']['y_pot'][time_window],
'z_displacement': calibration_data['analog']['z_pot'][time_window]
}
)
# fill in data frame an event at a time
# currently slow, need to vectorize/optimize
start_indices = start_indices - start_indices[0]
for event in range(num_events - 1):
#print(str(event) + ' of ' + str(num_events))
data_frame['start_time'][start_indices[event]:start_indices[event + 1]] = (
trace_start_times[event]
)
data_frame['x_duration'][start_indices[event]:start_indices[event + 1]] = (
x_durations[event]
)
data_frame['x_start_position'][start_indices[event]:start_indices[event + 1]] = (
data_frame['x_displacement'][start_indices[event]]
)
data_frame['y_duration'][start_indices[event]:start_indices[event + 1]] = (
y_durations[event]
)
data_frame['y_start_position'][start_indices[event]:start_indices[event + 1]] = (
data_frame['y_displacement'][start_indices[event]]
)
data_frame['z_duration'][start_indices[event]:start_indices[event + 1]] = (
z_durations[event]
)
data_frame['z_start_position'][start_indices[event]:start_indices[event + 1]] = (
data_frame['z_displacement'][start_indices[event]]
)
data_frame['start_time'][start_indices[-1]:-1] = (
trace_start_times[-1]
)
data_frame['x_duration'][start_indices[-1]:-1] = (
x_durations[-1]
)
data_frame['x_start_position'][start_indices[-1]:-1] = (
data_frame['x_displacement'][start_indices[-1]]
)
data_frame['y_duration'][start_indices[-1]:-1] = (
y_durations[-1]
)
data_frame['y_start_position'][start_indices[-1]:-1] = (
data_frame['y_displacement'][start_indices[-1]]
)
data_frame['z_duration'][start_indices[-1]:-1] = (
z_durations[-1]
)
data_frame['z_start_position'][start_indices[-1]:-1] = (
data_frame['z_displacement'][start_indices[-1]]
)
# adjust trace time
data_frame['trace_time'] -= data_frame['start_time']
# adjust displacements
data_frame['x_displacement'] -= data_frame['x_start_position']
data_frame['y_displacement'] -= data_frame['y_start_position']
data_frame['z_displacement'] -= data_frame['z_start_position']
return data_frame
| [
"numpy.searchsorted",
"numpy.diff",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"scipy.ndimage.median_filter",
"numpy.round"
] | [((4641, 4715), 'numpy.array', 'np.array', (["[t[0] for t in calibration_data['time']['time']]"], {'dtype': '"""float_"""'}), "([t[0] for t in calibration_data['time']['time']], dtype='float_')\n", (4649, 4715), True, 'import numpy as np\n'), ((13684, 13746), 'numpy.concatenate', 'np.concatenate', (["[start_times['x_push'], start_times['x_pull']]"], {}), "([start_times['x_push'], start_times['x_pull']])\n", (13698, 13746), True, 'import numpy as np\n'), ((13767, 13829), 'numpy.concatenate', 'np.concatenate', (["[start_times['y_push'], start_times['y_pull']]"], {}), "([start_times['y_push'], start_times['y_pull']])\n", (13781, 13829), True, 'import numpy as np\n'), ((13850, 13912), 'numpy.concatenate', 'np.concatenate', (["[start_times['z_push'], start_times['z_pull']]"], {}), "([start_times['z_push'], start_times['z_pull']])\n", (13864, 13912), True, 'import numpy as np\n'), ((13927, 13952), 'numpy.argsort', 'np.argsort', (['x_start_times'], {}), '(x_start_times)\n', (13937, 13952), True, 'import numpy as np\n'), ((13967, 13992), 'numpy.argsort', 'np.argsort', (['y_start_times'], {}), '(y_start_times)\n', (13977, 13992), True, 'import numpy as np\n'), ((14007, 14032), 'numpy.argsort', 'np.argsort', (['z_start_times'], {}), '(z_start_times)\n', (14017, 14032), True, 'import numpy as np\n'), ((14187, 14260), 'numpy.searchsorted', 'np.searchsorted', (["calibration_data['time']['time']", 'x_start_times[x_order]'], {}), "(calibration_data['time']['time'], x_start_times[x_order])\n", (14202, 14260), True, 'import numpy as np\n'), ((18068, 18130), 'numpy.concatenate', 'np.concatenate', (["[start_times['x_push'], start_times['x_pull']]"], {}), "([start_times['x_push'], start_times['x_pull']])\n", (18082, 18130), True, 'import numpy as np\n'), ((18151, 18213), 'numpy.concatenate', 'np.concatenate', (["[start_times['y_push'], start_times['y_pull']]"], {}), "([start_times['y_push'], start_times['y_pull']])\n", (18165, 18213), True, 'import numpy as np\n'), ((18234, 18296), 'numpy.concatenate', 'np.concatenate', (["[start_times['z_push'], start_times['z_pull']]"], {}), "([start_times['z_push'], start_times['z_pull']])\n", (18248, 18296), True, 'import numpy as np\n'), ((18311, 18336), 'numpy.argsort', 'np.argsort', (['x_start_times'], {}), '(x_start_times)\n', (18321, 18336), True, 'import numpy as np\n'), ((18351, 18376), 'numpy.argsort', 'np.argsort', (['y_start_times'], {}), '(y_start_times)\n', (18361, 18376), True, 'import numpy as np\n'), ((18391, 18416), 'numpy.argsort', 'np.argsort', (['z_start_times'], {}), '(z_start_times)\n', (18401, 18416), True, 'import numpy as np\n'), ((18560, 18633), 'numpy.searchsorted', 'np.searchsorted', (["calibration_data['time']['time']", 'x_start_times[x_order]'], {}), "(calibration_data['time']['time'], x_start_times[x_order])\n", (18575, 18633), True, 'import numpy as np\n'), ((4824, 4894), 'numpy.array', 'np.array', (["[i[0] for i in calibration_data['DIO'][key]]"], {'dtype': '"""float_"""'}), "([i[0] for i in calibration_data['DIO'][key]], dtype='float_')\n", (4832, 4894), True, 'import numpy as np\n'), ((10070, 10191), 'numpy.round', 'np.round', (["(calibration_data['analog'][key] / trodes_max_bits * trodes_max_volts /\n supply_voltage * controller_max_bits)"], {}), "(calibration_data['analog'][key] / trodes_max_bits *\n trodes_max_volts / supply_voltage * controller_max_bits)\n", (10078, 10191), True, 'import numpy as np\n'), ((14090, 14121), 'numpy.diff', 'np.diff', (['x_start_times[x_order]'], {}), '(x_start_times[x_order])\n', (14097, 14121), True, 'import numpy as np\n'), ((18474, 18505), 'numpy.diff', 'np.diff', (['x_start_times[x_order]'], {}), '(x_start_times[x_order])\n', (18481, 18505), True, 'import numpy as np\n'), ((18755, 18868), 'numpy.concatenate', 'np.concatenate', (["[stop_times['x_push'] - start_times['x_push'], start_times['x_pull'] -\n stop_times['x_pull']]"], {}), "([stop_times['x_push'] - start_times['x_push'], start_times[\n 'x_pull'] - stop_times['x_pull']])\n", (18769, 18868), True, 'import numpy as np\n'), ((18942, 19055), 'numpy.concatenate', 'np.concatenate', (["[stop_times['y_push'] - start_times['y_push'], start_times['y_pull'] -\n stop_times['y_pull']]"], {}), "([stop_times['y_push'] - start_times['y_push'], start_times[\n 'y_pull'] - stop_times['y_pull']])\n", (18956, 19055), True, 'import numpy as np\n'), ((19129, 19242), 'numpy.concatenate', 'np.concatenate', (["[stop_times['z_push'] - start_times['z_push'], start_times['z_pull'] -\n stop_times['z_pull']]"], {}), "([stop_times['z_push'] - start_times['z_push'], start_times[\n 'z_pull'] - stop_times['z_pull']])\n", (19143, 19242), True, 'import numpy as np\n'), ((8354, 8420), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (["calibration_data['analog'][key]"], {'size': 'width'}), "(calibration_data['analog'][key], size=width)\n", (8375, 8420), False, 'from scipy import ndimage\n'), ((19660, 19678), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (19668, 19678), True, 'import numpy as np\n'), ((19712, 19730), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (19720, 19730), True, 'import numpy as np\n'), ((19764, 19782), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (19772, 19782), True, 'import numpy as np\n'), ((19816, 19834), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (19824, 19834), True, 'import numpy as np\n'), ((19862, 19880), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (19870, 19880), True, 'import numpy as np\n'), ((19908, 19926), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (19916, 19926), True, 'import numpy as np\n'), ((19954, 19972), 'numpy.zeros', 'np.zeros', (['num_rows'], {}), '(num_rows)\n', (19962, 19972), True, 'import numpy as np\n'), ((14552, 14665), 'numpy.concatenate', 'np.concatenate', (["[stop_times['x_push'] - start_times['x_push'], start_times['x_pull'] -\n stop_times['x_pull']]"], {}), "([stop_times['x_push'] - start_times['x_push'], start_times[\n 'x_pull'] - stop_times['x_pull']])\n", (14566, 14665), True, 'import numpy as np\n'), ((15040, 15153), 'numpy.concatenate', 'np.concatenate', (["[stop_times['y_push'] - start_times['y_push'], start_times['y_pull'] -\n stop_times['y_pull']]"], {}), "([stop_times['y_push'] - start_times['y_push'], start_times[\n 'y_pull'] - stop_times['y_pull']])\n", (15054, 15153), True, 'import numpy as np\n'), ((15528, 15641), 'numpy.concatenate', 'np.concatenate', (["[stop_times['z_push'] - start_times['z_push'], start_times['z_pull'] -\n stop_times['z_pull']]"], {}), "([stop_times['z_push'] - start_times['z_push'], start_times[\n 'z_pull'] - stop_times['z_pull']])\n", (15542, 15641), True, 'import numpy as np\n')] |
'''
Script integrating detumble with orbit/magnetic field knowledge
'''
# from detumble.py_funcs import detumble_B_cross,detumble_B_dot,get_B_dot, detumble_B_dot_bang_bang
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import numpy as np
import scipy.integrate as integrate
from orbit_propagation import get_orbit_pos, get_B_field_at_point
# from GNC.cmake_build_debug import SGP4_cpp as SGP4
# from util_funcs.py_funcs.frame_conversions import eci2ecef
import time_functions_cpp as tfcpp
import frame_conversions_cpp as fccpp
import detumble_cpp as dcpp
import time
import euler_cpp as ecpp
# clear figures
plt.close('all')
pi = math.pi
#--------------------Values from FSW sim----------------------------------------
# # Seed Initial Position/Velocity with TLE - BEESAT-1
# # (optional) - can instead replace this with r_i, v_i as np.array(3)
# line1 = ('1 35933U 09051C 19315.45643387 .00000096 00000-0 32767-4 0 9991')
# line2 = ('2 35933 98.6009 127.6424 0006914 92.0098 268.1890 14.56411486538102')
#
# # Simulation Parameters
# tstart = datetime(2019, 12, 30, 00, 00, 00)
# tstep = .1 # [sec] - 1 Hz
#
# # Initial Spacecraft Attitude
# q_i = np.array([1, 0, 0, 0]) # quaternion
# w_i = np.array([.01, .05, -.03]) # radians/sec
#
# # Spacecraft Properties
# I = np.array([[17,0,0],[0,18,0],[0,0,22]])
# mass = 1.0 # kg
#--------------------End Values from FSW sim---------------------------------------
# inertia properties (add real later)
Ixx = 0.34375
Iyy = 0.34375
Izz = 0.34375
I = np.array([[Ixx, 0.0, 0.0],[0.0, Iyy, 0.0], [0.0, 0.0, Izz]])
max_dipoles = np.array([[8.8e-3], [1.373e-2], [8.2e-3]])
# initial attitude conditions, radians & rad/s
q_0 = np.array([[1.0],[0.0],[0.0],[0.0]]) # initial quaternion, scalar last
w_0 = np.array([[.01],[.05],[-.03]]) # initial rotation rate, rad/s
# initial state: quaternion, rotation rate
x_0 = np.squeeze(np.concatenate((q_0,w_0)))
# initial orbit state conditions, TLE+epoch
epoch = '2019-12-30T00:00:00.00'
line1 = ('1 35933U 09051C 19315.45643387 .00000096 00000-0 32767-4 0 9991')
line2 = ('2 35933 98.6009 127.6424 0006914 92.0098 268.1890 14.56411486538102')
TLE = {'line1': line1, 'line2': line2}
# initial orbit time (fair warning, this is the time for PyCubed, not Orsted)
MJD = 58847.0
GMST_0 = tfcpp.MJD2GMST(MJD)
mean_motion = 14.46/(24*3600)*2*math.pi # mean motion, radians/second
period = 2*pi/mean_motion # Period, seconds
# feed in a vector of times and plot orbit
t0 = 0.0
tf = 600
tstep = .1
times = np.arange(t0,tf,tstep)
n = len(times)
# preallocate position storage matrix
positions_ECI = np.zeros((n-1,3))
positions_ECEF = np.zeros((n-1,3))
B_field_body = np.zeros((n-1,3))
B_field_ECI_vec = np.zeros((n-1,3))
B_field_NED_vec = np.zeros((n-1,3))
B_dot_body = np.zeros((n-1,3))
w_vec = np.zeros((n-1,3))
q_vec = np.zeros((n-1,4))
M_vec = np.zeros((n-1,3))
# Define function for calculating full state derivative
t = time.time()
# extract position info at all times (from Python)
x = x_0;
for i in range(len(times)-1):
# Get GMST at this time
GMST = tfcpp.MJD2GMST(MJD + times[i] / 60.0 / 60.0 / 24.0)
positions_ECI[i,:] = get_orbit_pos(TLE, epoch, times[i])
# convert to ECEF
R_ECI2ECEF = fccpp.eci2ecef(GMST)
positions_ECEF[i,:] = np.transpose(R_ECI2ECEF @ np.transpose(positions_ECI[i,:]))
lat, lon, alt = fccpp.ecef2lla(np.transpose(positions_ECEF[i,:]))
R_ECEF2ENU = fccpp.ecef2enu(lat, lon)
# get magnetic field at position
B_field_NED = get_B_field_at_point(positions_ECEF[i,:]) # North, East, Down
B_field_NED_vec[i,:] = np.transpose(B_field_NED) # store for later analysis
B_field_ENU = np.array([[B_field_NED[1]],[B_field_NED[0]],[-B_field_NED[2]]]) # north, east, down to east, north, up
# get magnetic field in body frame (for detumble algorithm)
# R_body2ECI = quat2DCM(np.transpose(x[0:4]))
B_field_ECEF = np.transpose(R_ECEF2ENU) @ B_field_ENU
B_field_ECI = np.transpose(R_ECI2ECEF) @ B_field_ECEF
# Correct to max expected value of Earth's magnetic field if pyIGRF throws a huge value
# if i > 0:
# if np.linalg.norm(B_field_ECI) > 7e-08:
# B_field_ECI = B_field_ECI/np.linalg.norm(B_field_ECI)*np.linalg.norm(B_field_ECI_vec[i-1,:])#* 7e-08
B_field_ECI_vec[i,:] = np.transpose(B_field_ECI)
q_ECI2body = ecpp.get_inverse_quaternion(x[0:4])
B_field_body[i,:] = ecpp.rotate_vec(B_field_ECI, q_ECI2body)
# Get B_dot based on previous measurement
if i>0:
B_1 = np.transpose(B_field_body[i-1,:])
B_2 = np.transpose(B_field_body[i,:])
B_dot = dcpp.get_B_dot(B_1,B_2,tstep)
B_dot_body[i,:] = np.transpose(B_dot)
# Validate B_dot algorithm
# k_B_dot = -5e-6
# dipole = dcpp.detumble_B_dot(np.transpose(B_field_body[i,:]),B_dot, k_B_dot)
# Torque on spacecraft
# M = np.cross(dipole, np.transpose(B_field_body[i, :]))
# Validate B_dot bang bang control law:
# include 1e-9 factor to get nanoTesla back into SI units
dipole = dcpp.detumble_B_dot_bang_bang(np.transpose(B_dot_body[i,:]),max_dipoles)
bang_bang_gain = 1e-9 # 5e-6
M = np.cross(np.squeeze(dipole), bang_bang_gain*np.transpose(B_field_body[i, :]))
# # Validate B_cross_c++
# k_B_cross = 4.0*math.pi/period*(2)*Ixx*2e-1
# M = dcpp.detumble_B_cross(np.transpose(x[4:7]),np.transpose(B_field_body[i,:]),k_B_cross)
# # Validate B_cross_python
# k_B_cross = 4.0*math.pi/period*(2)*Ixx*1.0e-1
# M = detumble_B_cross(np.transpose(x[4:7]), np.transpose(B_field_body[i, :]), k_B_cross)
else:
M = np.zeros((3,1))
# store for plotting
M_vec[i,:] = np.transpose(M)
# Propagate dynamics/kinematics forward using commanded moment
y = integrate.odeint(ecpp.get_attitude_derivative, x, (times[i],times[i+1]), (M, I), tfirst=True)
# Store angular velocity
w_vec[i,:] = y[-1,4:7]
q_vec[i,:] = y[-1,0:4]
# Update full attitude state
x = y[-1,:]
elapsed = time.time() - t
print(elapsed)
# need tfirst = true for t,y ordered inputs. Include parameters/extra arguments as tuple.
#
# extract position info from C++
# typerun - type of run verification 'v', catalog 'c', manual 'm'
# typeinput - type of manual input mfe 'm', epoch 'e', dayofyr 'd'
# opsmode - mode of operation afspc or improved 'a', 'i'
# whichconst - which set of constants to use 72, 84
# get gravity constants first
# wgs72 = SGP4.get_gravconsttype(72)
#satrec = SGP4.twoline2rv_wrapper(line1, line2, 72)
#satrec_ptr = SGP4.get_new_satrec()
# # plot trajectory
# fig = plt.figure()
# ax = plt.axes(projection='3d')
# ax.plot3D(positions_ECI[:,0],positions_ECI[:,1],positions_ECI[:,2])
# ax.set_title('Orbit, ECI')
# # # Esoteric plotting function
# with plt.rc_context(rc={'interactive': False}):
# plt.show()
# # plt.show(block = True)
# plot angular velocity over time
fig2 = plt.figure()
plt.plot(w_vec[:,0])
plt.plot(w_vec[:,1])
plt.plot(w_vec[:,2])
plt.title('Angular velocity components')
# # Plot trajectory in ECEF
# fig3 = plt.figure()
# ax = plt.axes(projection='3d')
# ax.plot3D(positions_ECEF[:,0],positions_ECEF[:,1],positions_ECEF[:,2])
# ax.set_title('Orbit, ECEF')
# # # Esoteric plotting function
# with plt.rc_context(rc={'interactive': False}):
# plt.show()
# plot B field components as a function of time
fig4 = plt.figure()
plt.plot(B_field_body[:,0])
plt.plot(B_field_body[:,1])
plt.plot(B_field_body[:,2])
plt.title('Magnetic field components, B')
plt.show()
# plot B dotcomponents as a function of time
fig5 = plt.figure()
plt.plot(B_dot_body[:,0])
plt.plot(B_dot_body[:,1])
plt.plot(B_dot_body[:,2])
plt.title('Magnetic field rate of change, B_dot')
plt.show()
# plot B dot components as a function of time
fig5 = plt.figure()
plt.plot(B_field_ECI_vec[:,0])
plt.plot(B_field_ECI_vec[:,1])
plt.plot(B_field_ECI_vec[:,2])
plt.title('Magnetic field ECI')
plt.show()
# plot quaternion over time
fig6 = plt.figure()
plt.plot(q_vec[:,0])
plt.plot(q_vec[:,1])
plt.plot(q_vec[:,2])
plt.plot(q_vec[:,3])
plt.title('quaternion components')
plt.show()
# plot Moment over time
fig7 = plt.figure()
plt.plot(M_vec[:,0])
plt.plot(M_vec[:,1])
plt.plot(M_vec[:,2])
plt.title('Moment components')
plt.show()
# plot norm of velocity vector over time
fig8 = plt.figure()
plt.plot(times[0:n-1]/period,np.linalg.norm(w_vec,axis=1))
plt.title('B_dot convergence')
plt.xlabel('Period')
plt.ylabel('Norm of angular rate, [rad/s]')
plt.show()
# # Plot North, East, Down (directly from IGRF) to see if singularities coming from pyIGRF or a coordinate transformation
# fig9 = plt.figure()
# plt.plot(B_field_NED_vec[:,0])
# plt.plot(B_field_NED_vec[:,1])
# plt.plot(B_field_NED_vec[:,2])
# plt.title('Components of B field in NED (from pyIGRF)')
# plt.show() | [
"matplotlib.pyplot.ylabel",
"euler_cpp.rotate_vec",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"detumble_cpp.get_B_dot",
"time_functions_cpp.MJD2GMST",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"orbit_propagation.get_orbit_pos",
"matplotlib.pyplot.close",
"numpy.concatenat... | [((647, 663), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (656, 663), True, 'import matplotlib.pyplot as plt\n'), ((1578, 1639), 'numpy.array', 'np.array', (['[[Ixx, 0.0, 0.0], [0.0, Iyy, 0.0], [0.0, 0.0, Izz]]'], {}), '([[Ixx, 0.0, 0.0], [0.0, Iyy, 0.0], [0.0, 0.0, Izz]])\n', (1586, 1639), True, 'import numpy as np\n'), ((1653, 1694), 'numpy.array', 'np.array', (['[[0.0088], [0.01373], [0.0082]]'], {}), '([[0.0088], [0.01373], [0.0082]])\n', (1661, 1694), True, 'import numpy as np\n'), ((1750, 1788), 'numpy.array', 'np.array', (['[[1.0], [0.0], [0.0], [0.0]]'], {}), '([[1.0], [0.0], [0.0], [0.0]])\n', (1758, 1788), True, 'import numpy as np\n'), ((1846, 1881), 'numpy.array', 'np.array', (['[[0.01], [0.05], [-0.03]]'], {}), '([[0.01], [0.05], [-0.03]])\n', (1854, 1881), True, 'import numpy as np\n'), ((2379, 2398), 'time_functions_cpp.MJD2GMST', 'tfcpp.MJD2GMST', (['MJD'], {}), '(MJD)\n', (2393, 2398), True, 'import time_functions_cpp as tfcpp\n'), ((2616, 2640), 'numpy.arange', 'np.arange', (['t0', 'tf', 'tstep'], {}), '(t0, tf, tstep)\n', (2625, 2640), True, 'import numpy as np\n'), ((2709, 2729), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2717, 2729), True, 'import numpy as np\n'), ((2744, 2764), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2752, 2764), True, 'import numpy as np\n'), ((2777, 2797), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2785, 2797), True, 'import numpy as np\n'), ((2813, 2833), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2821, 2833), True, 'import numpy as np\n'), ((2849, 2869), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2857, 2869), True, 'import numpy as np\n'), ((2880, 2900), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2888, 2900), True, 'import numpy as np\n'), ((2906, 2926), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2914, 2926), True, 'import numpy as np\n'), ((2932, 2952), 'numpy.zeros', 'np.zeros', (['(n - 1, 4)'], {}), '((n - 1, 4))\n', (2940, 2952), True, 'import numpy as np\n'), ((2958, 2978), 'numpy.zeros', 'np.zeros', (['(n - 1, 3)'], {}), '((n - 1, 3))\n', (2966, 2978), True, 'import numpy as np\n'), ((3038, 3049), 'time.time', 'time.time', ([], {}), '()\n', (3047, 3049), False, 'import time\n'), ((7144, 7156), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7154, 7156), True, 'import matplotlib.pyplot as plt\n'), ((7157, 7178), 'matplotlib.pyplot.plot', 'plt.plot', (['w_vec[:, 0]'], {}), '(w_vec[:, 0])\n', (7165, 7178), True, 'import matplotlib.pyplot as plt\n'), ((7178, 7199), 'matplotlib.pyplot.plot', 'plt.plot', (['w_vec[:, 1]'], {}), '(w_vec[:, 1])\n', (7186, 7199), True, 'import matplotlib.pyplot as plt\n'), ((7199, 7220), 'matplotlib.pyplot.plot', 'plt.plot', (['w_vec[:, 2]'], {}), '(w_vec[:, 2])\n', (7207, 7220), True, 'import matplotlib.pyplot as plt\n'), ((7220, 7260), 'matplotlib.pyplot.title', 'plt.title', (['"""Angular velocity components"""'], {}), "('Angular velocity components')\n", (7229, 7260), True, 'import matplotlib.pyplot as plt\n'), ((7605, 7617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7615, 7617), True, 'import matplotlib.pyplot as plt\n'), ((7618, 7646), 'matplotlib.pyplot.plot', 'plt.plot', (['B_field_body[:, 0]'], {}), '(B_field_body[:, 0])\n', (7626, 7646), True, 'import matplotlib.pyplot as plt\n'), ((7646, 7674), 'matplotlib.pyplot.plot', 'plt.plot', (['B_field_body[:, 1]'], {}), '(B_field_body[:, 1])\n', (7654, 7674), True, 'import matplotlib.pyplot as plt\n'), ((7674, 7702), 'matplotlib.pyplot.plot', 'plt.plot', (['B_field_body[:, 2]'], {}), '(B_field_body[:, 2])\n', (7682, 7702), True, 'import matplotlib.pyplot as plt\n'), ((7702, 7743), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetic field components, B"""'], {}), "('Magnetic field components, B')\n", (7711, 7743), True, 'import matplotlib.pyplot as plt\n'), ((7744, 7754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7752, 7754), True, 'import matplotlib.pyplot as plt\n'), ((7808, 7820), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7818, 7820), True, 'import matplotlib.pyplot as plt\n'), ((7821, 7847), 'matplotlib.pyplot.plot', 'plt.plot', (['B_dot_body[:, 0]'], {}), '(B_dot_body[:, 0])\n', (7829, 7847), True, 'import matplotlib.pyplot as plt\n'), ((7847, 7873), 'matplotlib.pyplot.plot', 'plt.plot', (['B_dot_body[:, 1]'], {}), '(B_dot_body[:, 1])\n', (7855, 7873), True, 'import matplotlib.pyplot as plt\n'), ((7873, 7899), 'matplotlib.pyplot.plot', 'plt.plot', (['B_dot_body[:, 2]'], {}), '(B_dot_body[:, 2])\n', (7881, 7899), True, 'import matplotlib.pyplot as plt\n'), ((7899, 7948), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetic field rate of change, B_dot"""'], {}), "('Magnetic field rate of change, B_dot')\n", (7908, 7948), True, 'import matplotlib.pyplot as plt\n'), ((7949, 7959), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7957, 7959), True, 'import matplotlib.pyplot as plt\n'), ((8014, 8026), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8024, 8026), True, 'import matplotlib.pyplot as plt\n'), ((8027, 8058), 'matplotlib.pyplot.plot', 'plt.plot', (['B_field_ECI_vec[:, 0]'], {}), '(B_field_ECI_vec[:, 0])\n', (8035, 8058), True, 'import matplotlib.pyplot as plt\n'), ((8058, 8089), 'matplotlib.pyplot.plot', 'plt.plot', (['B_field_ECI_vec[:, 1]'], {}), '(B_field_ECI_vec[:, 1])\n', (8066, 8089), True, 'import matplotlib.pyplot as plt\n'), ((8089, 8120), 'matplotlib.pyplot.plot', 'plt.plot', (['B_field_ECI_vec[:, 2]'], {}), '(B_field_ECI_vec[:, 2])\n', (8097, 8120), True, 'import matplotlib.pyplot as plt\n'), ((8120, 8151), 'matplotlib.pyplot.title', 'plt.title', (['"""Magnetic field ECI"""'], {}), "('Magnetic field ECI')\n", (8129, 8151), True, 'import matplotlib.pyplot as plt\n'), ((8152, 8162), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8160, 8162), True, 'import matplotlib.pyplot as plt\n'), ((8199, 8211), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8209, 8211), True, 'import matplotlib.pyplot as plt\n'), ((8212, 8233), 'matplotlib.pyplot.plot', 'plt.plot', (['q_vec[:, 0]'], {}), '(q_vec[:, 0])\n', (8220, 8233), True, 'import matplotlib.pyplot as plt\n'), ((8233, 8254), 'matplotlib.pyplot.plot', 'plt.plot', (['q_vec[:, 1]'], {}), '(q_vec[:, 1])\n', (8241, 8254), True, 'import matplotlib.pyplot as plt\n'), ((8254, 8275), 'matplotlib.pyplot.plot', 'plt.plot', (['q_vec[:, 2]'], {}), '(q_vec[:, 2])\n', (8262, 8275), True, 'import matplotlib.pyplot as plt\n'), ((8275, 8296), 'matplotlib.pyplot.plot', 'plt.plot', (['q_vec[:, 3]'], {}), '(q_vec[:, 3])\n', (8283, 8296), True, 'import matplotlib.pyplot as plt\n'), ((8296, 8330), 'matplotlib.pyplot.title', 'plt.title', (['"""quaternion components"""'], {}), "('quaternion components')\n", (8305, 8330), True, 'import matplotlib.pyplot as plt\n'), ((8331, 8341), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8339, 8341), True, 'import matplotlib.pyplot as plt\n'), ((8373, 8385), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8383, 8385), True, 'import matplotlib.pyplot as plt\n'), ((8386, 8407), 'matplotlib.pyplot.plot', 'plt.plot', (['M_vec[:, 0]'], {}), '(M_vec[:, 0])\n', (8394, 8407), True, 'import matplotlib.pyplot as plt\n'), ((8407, 8428), 'matplotlib.pyplot.plot', 'plt.plot', (['M_vec[:, 1]'], {}), '(M_vec[:, 1])\n', (8415, 8428), True, 'import matplotlib.pyplot as plt\n'), ((8428, 8449), 'matplotlib.pyplot.plot', 'plt.plot', (['M_vec[:, 2]'], {}), '(M_vec[:, 2])\n', (8436, 8449), True, 'import matplotlib.pyplot as plt\n'), ((8449, 8479), 'matplotlib.pyplot.title', 'plt.title', (['"""Moment components"""'], {}), "('Moment components')\n", (8458, 8479), True, 'import matplotlib.pyplot as plt\n'), ((8480, 8490), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8488, 8490), True, 'import matplotlib.pyplot as plt\n'), ((8539, 8551), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8549, 8551), True, 'import matplotlib.pyplot as plt\n'), ((8611, 8641), 'matplotlib.pyplot.title', 'plt.title', (['"""B_dot convergence"""'], {}), "('B_dot convergence')\n", (8620, 8641), True, 'import matplotlib.pyplot as plt\n'), ((8642, 8662), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Period"""'], {}), "('Period')\n", (8652, 8662), True, 'import matplotlib.pyplot as plt\n'), ((8663, 8706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Norm of angular rate, [rad/s]"""'], {}), "('Norm of angular rate, [rad/s]')\n", (8673, 8706), True, 'import matplotlib.pyplot as plt\n'), ((8707, 8717), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8715, 8717), True, 'import matplotlib.pyplot as plt\n'), ((1969, 1995), 'numpy.concatenate', 'np.concatenate', (['(q_0, w_0)'], {}), '((q_0, w_0))\n', (1983, 1995), True, 'import numpy as np\n'), ((3180, 3231), 'time_functions_cpp.MJD2GMST', 'tfcpp.MJD2GMST', (['(MJD + times[i] / 60.0 / 60.0 / 24.0)'], {}), '(MJD + times[i] / 60.0 / 60.0 / 24.0)\n', (3194, 3231), True, 'import time_functions_cpp as tfcpp\n'), ((3258, 3293), 'orbit_propagation.get_orbit_pos', 'get_orbit_pos', (['TLE', 'epoch', 'times[i]'], {}), '(TLE, epoch, times[i])\n', (3271, 3293), False, 'from orbit_propagation import get_orbit_pos, get_B_field_at_point\n'), ((3334, 3354), 'frame_conversions_cpp.eci2ecef', 'fccpp.eci2ecef', (['GMST'], {}), '(GMST)\n', (3348, 3354), True, 'import frame_conversions_cpp as fccpp\n'), ((3529, 3553), 'frame_conversions_cpp.ecef2enu', 'fccpp.ecef2enu', (['lat', 'lon'], {}), '(lat, lon)\n', (3543, 3553), True, 'import frame_conversions_cpp as fccpp\n'), ((3610, 3652), 'orbit_propagation.get_B_field_at_point', 'get_B_field_at_point', (['positions_ECEF[i, :]'], {}), '(positions_ECEF[i, :])\n', (3630, 3652), False, 'from orbit_propagation import get_orbit_pos, get_B_field_at_point\n'), ((3699, 3724), 'numpy.transpose', 'np.transpose', (['B_field_NED'], {}), '(B_field_NED)\n', (3711, 3724), True, 'import numpy as np\n'), ((3777, 3842), 'numpy.array', 'np.array', (['[[B_field_NED[1]], [B_field_NED[0]], [-B_field_NED[2]]]'], {}), '([[B_field_NED[1]], [B_field_NED[0]], [-B_field_NED[2]]])\n', (3785, 3842), True, 'import numpy as np\n'), ((4417, 4442), 'numpy.transpose', 'np.transpose', (['B_field_ECI'], {}), '(B_field_ECI)\n', (4429, 4442), True, 'import numpy as np\n'), ((4460, 4495), 'euler_cpp.get_inverse_quaternion', 'ecpp.get_inverse_quaternion', (['x[0:4]'], {}), '(x[0:4])\n', (4487, 4495), True, 'import euler_cpp as ecpp\n'), ((4520, 4560), 'euler_cpp.rotate_vec', 'ecpp.rotate_vec', (['B_field_ECI', 'q_ECI2body'], {}), '(B_field_ECI, q_ECI2body)\n', (4535, 4560), True, 'import euler_cpp as ecpp\n'), ((5844, 5859), 'numpy.transpose', 'np.transpose', (['M'], {}), '(M)\n', (5856, 5859), True, 'import numpy as np\n'), ((5936, 6036), 'scipy.integrate.odeint', 'integrate.odeint', (['ecpp.get_attitude_derivative', 'x', '(times[i], times[i + 1])', '(M, I)'], {'tfirst': '(True)'}), '(ecpp.get_attitude_derivative, x, (times[i], times[i + 1]),\n (M, I), tfirst=True)\n', (5952, 6036), True, 'import scipy.integrate as integrate\n'), ((6175, 6186), 'time.time', 'time.time', ([], {}), '()\n', (6184, 6186), False, 'import time\n'), ((8581, 8610), 'numpy.linalg.norm', 'np.linalg.norm', (['w_vec'], {'axis': '(1)'}), '(w_vec, axis=1)\n', (8595, 8610), True, 'import numpy as np\n'), ((3477, 3511), 'numpy.transpose', 'np.transpose', (['positions_ECEF[i, :]'], {}), '(positions_ECEF[i, :])\n', (3489, 3511), True, 'import numpy as np\n'), ((4017, 4041), 'numpy.transpose', 'np.transpose', (['R_ECEF2ENU'], {}), '(R_ECEF2ENU)\n', (4029, 4041), True, 'import numpy as np\n'), ((4074, 4098), 'numpy.transpose', 'np.transpose', (['R_ECI2ECEF'], {}), '(R_ECI2ECEF)\n', (4086, 4098), True, 'import numpy as np\n'), ((4634, 4670), 'numpy.transpose', 'np.transpose', (['B_field_body[i - 1, :]'], {}), '(B_field_body[i - 1, :])\n', (4646, 4670), True, 'import numpy as np\n'), ((4682, 4714), 'numpy.transpose', 'np.transpose', (['B_field_body[i, :]'], {}), '(B_field_body[i, :])\n', (4694, 4714), True, 'import numpy as np\n'), ((4730, 4761), 'detumble_cpp.get_B_dot', 'dcpp.get_B_dot', (['B_1', 'B_2', 'tstep'], {}), '(B_1, B_2, tstep)\n', (4744, 4761), True, 'import detumble_cpp as dcpp\n'), ((4786, 4805), 'numpy.transpose', 'np.transpose', (['B_dot'], {}), '(B_dot)\n', (4798, 4805), True, 'import numpy as np\n'), ((5785, 5801), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (5793, 5801), True, 'import numpy as np\n'), ((3408, 3441), 'numpy.transpose', 'np.transpose', (['positions_ECI[i, :]'], {}), '(positions_ECI[i, :])\n', (3420, 3441), True, 'import numpy as np\n'), ((5213, 5243), 'numpy.transpose', 'np.transpose', (['B_dot_body[i, :]'], {}), '(B_dot_body[i, :])\n', (5225, 5243), True, 'import numpy as np\n'), ((5314, 5332), 'numpy.squeeze', 'np.squeeze', (['dipole'], {}), '(dipole)\n', (5324, 5332), True, 'import numpy as np\n'), ((5349, 5381), 'numpy.transpose', 'np.transpose', (['B_field_body[i, :]'], {}), '(B_field_body[i, :])\n', (5361, 5381), True, 'import numpy as np\n')] |
#! /usr/bin/python3
# <NAME> 2019
import tkinter as tk
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import parser
import re
# Importing mathematical functions and constants individually for UX
from numpy import sin, cos, tan, arcsin, arccos, arctan, log as ln, log10 as log, e, pi
# Converts user input into a mathematical function
def func_parser(f):
func_str = func_fld.get().replace(' ', '') # clean spaces
func_str = func_str.replace('^','**') # clean exponents
func_str = func_str.replace(')(', ')*(') # clean parenthetical multiplication
for match in reversed(re.compile('\d[(\w]|[\w)]\d|\)\w|[xie]\(').findall(func_str)):
i=func_str.find(match)
func_str = func_str[:i+1] + '*' + func_str[i+1:]
if func_str.find('x') == -1:
func_str+='+0*x'
code = parser.expr(func_str).compile()
return lambda x : eval(code)
def graph_sum():
# Reset right frame and get input
global g_frame
g_frame.destroy()
g_frame = tk.Frame(root)
g_frame.pack(side=tk.RIGHT)
b = float(upr_bnd_fld.get())
a = float(lwr_bnd_fld.get())
n = int(sub_int_fld.get())
f = func_parser(func_fld.get())
# Preps data
x_dscr = np.linspace(a,b,n+1)
y_dscr = f(x_dscr)
axis_pad = abs(int((a-b)/8))
x_cont = np.linspace(a - axis_pad, b + axis_pad, 100)
y_cont = f(x_cont)
hg = hg_mtd.get()
dx = (b-a)/n
alignment = 'edge'
method = 'Left'
if hg == 0: # Left Riemann
x_dscr = x_dscr[:-1]
y_dscr = y_dscr[:-1]
elif hg == 1: # Right Riemann
x_dscr = x_dscr[1:]
y_dscr = y_dscr[1:]
method = "Right"
dx = -(b-a)/n
elif hg == 2: # Midpoint
x_dscr = (x_dscr[:-1] + x_dscr[1:])/2
y_dscr = f(x_dscr)
alignment='center'
method = "Midpoint"
# Calculates sum and reports it to user
sum_text.set('Sum = ' + str(sum(map(lambda x : float(x)*abs(dx), y_dscr))))
# Creates figure and sets color
figure = plt.Figure(figsize=(4,5), dpi=125)
figure.patch.set_facecolor(WINDOW_BG)
graph = figure.add_subplot(111)
graph.set_facecolor(FIELD_BG)
# Plots the function, the height generators, and the rectangle estimations
graph.plot(x_cont, y_cont, FUNC_CURVE)
graph.plot(x_cont, [0 for _ in range(100)], 'black')
if a - axis_pad < 0 and b + axis_pad > 0:
graph.plot([0 for _ in range(100)], y_cont, 'black')
graph.plot(x_dscr, y_dscr, '.b', markersize=8)
graph.bar(x_dscr, y_dscr, width=dx, alpha=0.2, align=alignment,edgecolor=RECTANGLES)
chart_type = FigureCanvasTkAgg(figure, g_frame)
chart_type.get_tk_widget().pack(ipadx=50,ipady=200)
graph.set_title(method + ' Riemann Sum with {} Sub-Intervals'.format(n))
# Color Constants
LABEL_TEXT = '#191919'
FIELD_TEXT = '#373737'
FIELD_BG = '#FFFAFF'
FUNC_CURVE = '#0A2463'
RECTANGLES = '#3E92CC'
WINDOW_BG = '#E8EBF0'
# Size Constants
FIELD_WIDTH = 40
PADX = 20
# Font Constants
LABEL_FONT=('MS Sans Serif', '14')
INPUT_FONT=('MS Sans Serif', '12')
# Configure Window
root = tk.Tk()
root.title("Riemann Sum Graphing Calculator")
root.geometry('960x540')
root.pack_propagate(0)
root.configure(background=WINDOW_BG)
dash = tk.Frame(root, bg=WINDOW_BG)
dash.pack(side=tk.LEFT)
g_frame = tk.Frame(root)
g_frame.pack(side=tk.RIGHT)
# Function
tk.Label(dash, text="Function (in terms of x):", fg=LABEL_TEXT, bg=WINDOW_BG, font=LABEL_FONT).pack(anchor=tk.W, padx=PADX)
func_fld = tk.Entry(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=FIELD_WIDTH, justify='center')
func_fld.pack(anchor=tk.W, padx=PADX)
# Lower Bound
tk.Label(dash, text="Lower Bound:", fg=LABEL_TEXT, bg=WINDOW_BG, font=LABEL_FONT).pack(anchor=tk.W, padx=PADX)
lwr_bnd_fld = tk.Entry(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=FIELD_WIDTH, justify='center')
lwr_bnd_fld.pack(anchor=tk.W, padx=PADX)
# Upper Bound
tk.Label(dash, text="Upper Bound:", fg=LABEL_TEXT, bg=WINDOW_BG, font=LABEL_FONT).pack(anchor=tk.W, padx=PADX)
upr_bnd_fld = tk.Entry(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=FIELD_WIDTH, justify='center')
upr_bnd_fld.pack(anchor=tk.W, padx=PADX)
# Sub-Intervals
tk.Label(dash, text="Sub-Intervals (n):", fg=LABEL_TEXT, bg=WINDOW_BG, font=LABEL_FONT).pack(anchor=tk.W, padx=PADX)
sub_int_fld = tk.Entry(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=FIELD_WIDTH, justify='center')
sub_int_fld.pack(anchor=tk.W, padx=PADX)
# Height Generation Method
hg_mtd = tk.IntVar()
hg_mtd.set(0)
hg_mtds = ["Left", "Right", "Midpoint"]
tk.Label(dash,
text="Select a height generation method:", fg=LABEL_TEXT, bg=WINDOW_BG, font=LABEL_FONT).pack(anchor=tk.W, padx=PADX)
for i, mtd in enumerate(hg_mtds):
tk.Radiobutton(dash,
text=mtd,
variable=hg_mtd,
value=i,
fg=LABEL_TEXT,
bg=WINDOW_BG,
font=INPUT_FONT).pack(anchor=tk.W, padx=PADX)
# Graph It! Button
tk.Button(dash, text="Graph It!", command=graph_sum, fg=FIELD_BG, bg=FUNC_CURVE, font=LABEL_FONT, width=int(FIELD_WIDTH/2)+2).pack(pady=25)
# Sum
sum_text = tk.StringVar()
sum_text.set('Sum = ')
sum_field = tk.Label(dash, textvariable=sum_text, fg=RECTANGLES, bg=WINDOW_BG, font=LABEL_FONT)
sum_field.pack(anchor=tk.W, padx=PADX)
# Error field
# tk.Label(dash, text="Please enter the required information.").pack()
root.mainloop()
| [
"tkinter.IntVar",
"tkinter.Entry",
"re.compile",
"matplotlib.pyplot.Figure",
"parser.expr",
"tkinter.Radiobutton",
"tkinter.StringVar",
"numpy.linspace",
"tkinter.Tk",
"tkinter.Label",
"tkinter.Frame",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
] | [((3162, 3169), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3167, 3169), True, 'import tkinter as tk\n'), ((3308, 3336), 'tkinter.Frame', 'tk.Frame', (['root'], {'bg': 'WINDOW_BG'}), '(root, bg=WINDOW_BG)\n', (3316, 3336), True, 'import tkinter as tk\n'), ((3371, 3385), 'tkinter.Frame', 'tk.Frame', (['root'], {}), '(root)\n', (3379, 3385), True, 'import tkinter as tk\n'), ((3561, 3662), 'tkinter.Entry', 'tk.Entry', (['dash'], {'fg': 'FIELD_TEXT', 'bg': 'FIELD_BG', 'font': 'INPUT_FONT', 'width': 'FIELD_WIDTH', 'justify': '"""center"""'}), "(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=\n FIELD_WIDTH, justify='center')\n", (3569, 3662), True, 'import tkinter as tk\n'), ((3836, 3937), 'tkinter.Entry', 'tk.Entry', (['dash'], {'fg': 'FIELD_TEXT', 'bg': 'FIELD_BG', 'font': 'INPUT_FONT', 'width': 'FIELD_WIDTH', 'justify': '"""center"""'}), "(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=\n FIELD_WIDTH, justify='center')\n", (3844, 3937), True, 'import tkinter as tk\n'), ((4114, 4215), 'tkinter.Entry', 'tk.Entry', (['dash'], {'fg': 'FIELD_TEXT', 'bg': 'FIELD_BG', 'font': 'INPUT_FONT', 'width': 'FIELD_WIDTH', 'justify': '"""center"""'}), "(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=\n FIELD_WIDTH, justify='center')\n", (4122, 4215), True, 'import tkinter as tk\n'), ((4400, 4501), 'tkinter.Entry', 'tk.Entry', (['dash'], {'fg': 'FIELD_TEXT', 'bg': 'FIELD_BG', 'font': 'INPUT_FONT', 'width': 'FIELD_WIDTH', 'justify': '"""center"""'}), "(dash, fg=FIELD_TEXT, bg=FIELD_BG, font=INPUT_FONT, width=\n FIELD_WIDTH, justify='center')\n", (4408, 4501), True, 'import tkinter as tk\n'), ((4575, 4586), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (4584, 4586), True, 'import tkinter as tk\n'), ((5241, 5255), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (5253, 5255), True, 'import tkinter as tk\n'), ((5291, 5379), 'tkinter.Label', 'tk.Label', (['dash'], {'textvariable': 'sum_text', 'fg': 'RECTANGLES', 'bg': 'WINDOW_BG', 'font': 'LABEL_FONT'}), '(dash, textvariable=sum_text, fg=RECTANGLES, bg=WINDOW_BG, font=\n LABEL_FONT)\n', (5299, 5379), True, 'import tkinter as tk\n'), ((1060, 1074), 'tkinter.Frame', 'tk.Frame', (['root'], {}), '(root)\n', (1068, 1074), True, 'import tkinter as tk\n'), ((1273, 1297), 'numpy.linspace', 'np.linspace', (['a', 'b', '(n + 1)'], {}), '(a, b, n + 1)\n', (1284, 1297), True, 'import numpy as np\n'), ((1368, 1412), 'numpy.linspace', 'np.linspace', (['(a - axis_pad)', '(b + axis_pad)', '(100)'], {}), '(a - axis_pad, b + axis_pad, 100)\n', (1379, 1412), True, 'import numpy as np\n'), ((2085, 2120), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {'figsize': '(4, 5)', 'dpi': '(125)'}), '(figsize=(4, 5), dpi=125)\n', (2095, 2120), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2714), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['figure', 'g_frame'], {}), '(figure, g_frame)\n', (2697, 2714), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n'), ((3426, 3525), 'tkinter.Label', 'tk.Label', (['dash'], {'text': '"""Function (in terms of x):"""', 'fg': 'LABEL_TEXT', 'bg': 'WINDOW_BG', 'font': 'LABEL_FONT'}), "(dash, text='Function (in terms of x):', fg=LABEL_TEXT, bg=\n WINDOW_BG, font=LABEL_FONT)\n", (3434, 3525), True, 'import tkinter as tk\n'), ((3711, 3797), 'tkinter.Label', 'tk.Label', (['dash'], {'text': '"""Lower Bound:"""', 'fg': 'LABEL_TEXT', 'bg': 'WINDOW_BG', 'font': 'LABEL_FONT'}), "(dash, text='Lower Bound:', fg=LABEL_TEXT, bg=WINDOW_BG, font=\n LABEL_FONT)\n", (3719, 3797), True, 'import tkinter as tk\n'), ((3989, 4075), 'tkinter.Label', 'tk.Label', (['dash'], {'text': '"""Upper Bound:"""', 'fg': 'LABEL_TEXT', 'bg': 'WINDOW_BG', 'font': 'LABEL_FONT'}), "(dash, text='Upper Bound:', fg=LABEL_TEXT, bg=WINDOW_BG, font=\n LABEL_FONT)\n", (3997, 4075), True, 'import tkinter as tk\n'), ((4269, 4361), 'tkinter.Label', 'tk.Label', (['dash'], {'text': '"""Sub-Intervals (n):"""', 'fg': 'LABEL_TEXT', 'bg': 'WINDOW_BG', 'font': 'LABEL_FONT'}), "(dash, text='Sub-Intervals (n):', fg=LABEL_TEXT, bg=WINDOW_BG, font\n =LABEL_FONT)\n", (4277, 4361), True, 'import tkinter as tk\n'), ((4643, 4751), 'tkinter.Label', 'tk.Label', (['dash'], {'text': '"""Select a height generation method:"""', 'fg': 'LABEL_TEXT', 'bg': 'WINDOW_BG', 'font': 'LABEL_FONT'}), "(dash, text='Select a height generation method:', fg=LABEL_TEXT, bg\n =WINDOW_BG, font=LABEL_FONT)\n", (4651, 4751), True, 'import tkinter as tk\n'), ((884, 905), 'parser.expr', 'parser.expr', (['func_str'], {}), '(func_str)\n', (895, 905), False, 'import parser\n'), ((4817, 4924), 'tkinter.Radiobutton', 'tk.Radiobutton', (['dash'], {'text': 'mtd', 'variable': 'hg_mtd', 'value': 'i', 'fg': 'LABEL_TEXT', 'bg': 'WINDOW_BG', 'font': 'INPUT_FONT'}), '(dash, text=mtd, variable=hg_mtd, value=i, fg=LABEL_TEXT, bg=\n WINDOW_BG, font=INPUT_FONT)\n', (4831, 4924), True, 'import tkinter as tk\n'), ((664, 713), 're.compile', 're.compile', (['"""\\\\d[(\\\\w]|[\\\\w)]\\\\d|\\\\)\\\\w|[xie]\\\\("""'], {}), "('\\\\d[(\\\\w]|[\\\\w)]\\\\d|\\\\)\\\\w|[xie]\\\\(')\n", (674, 713), False, 'import re\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import fire
import logging
from tqdm import tqdm
from collections import Counter
import pathlib
import pickle
import json
import glob
#import dask.dataframe as dd
from tqdm.auto import tqdm
tqdm.pandas()
ProgressBar().register()
from util import Helper
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
import json
import os
class cell_migration(Helper):
eps = 0.01
def __init__(self, config_path=os.path.abspath(os.getcwd())+'/config.json'):
self.V = self.L_*self.H*self.W
self.L1= self.V/(self.M*self.W*self.H)
self.d1 = self.Dc/self.Dn
self.d2 = 0
self.d3 = self.Qn*self.C0*self.L**2/self.Dn
self.e1 = self.Uc*self.L/self.Dc
self.e2 = self.A0*self.N0/self.Dc
self.e3 = self.Qcb0*self.N0*self.C0*self.L**2/self.Dc
self.e4 = self.Qcd0*self.L**2/(self.Dc*self.N0)
self.l_ = self.L/self.L_ #L = L^
self.l1 = self.L1/self.L_
self.a = int((self.l_+self.l1)/self.dx)#end of the real tube
self.b = int(1/self.dt) # n of step for iteration -> time
self.e = int(self.l_/self.dx) #end of our experiment: end of real+img. tube
#concentration of cell
self.c = pd.DataFrame(np.zeros([self.a+1, self.b+1]))
self.c.iloc[:,0] = 0
self.c.iloc[0,1:] = 1
#concentration of nutrient
self.n = pd.DataFrame(np.zeros([self.a+1, self.b+1]))
self.n.iloc[:int(1/self.dx),0] = 0
self.n.iloc[0,:] = 0
self.n.iloc[int(1/self.dx):,:] = 1
def f1(self,i):
f = self.e1*self.dt/(2*self.dx) - self.dt/self.dx**2 - self.e2*self.dt/(4*self.dx**2) \
*(self.n.iloc[:,i].shift(-1) - self.n.iloc[:,i].shift(1))
return f
def g1(self,i):
g = (1+2*self.dt/self.dx**2 - self.e2*self.dt/self.dx**2 * \
(self.n.iloc[:,i].shift(-1) -2*self.n.iloc[:,i] + self.n.iloc[:,i].shift(1)) \
- self.e3*self.dt*self.n.iloc[:,i]*(1-self.c.iloc[:,i]) + self.e4*self.dt/(self.n.iloc[:,i]+eps))
return g
def k1(self,i):
k = (-self.e1*self.dt/(2*self.dx) -self.dt/self.dx**2 + self.e2*self.dt/(4*self.dx**2)\
*(self.n.iloc[:,i].shift(-1) - self.n.iloc[:,i].shift(1)))
return k
# x => 1
def f2(self,i):
f =self.e1*self.dt/(2*self.dx) - self.dt/self.dx**2
return f
def g2(self,i):
f = 1 + 2*self.dt/self.dx**2 + self.e3*self.dt*(1-self.c.iloc[self.e+1:,i]) + self.e4*self.dt/(1+eps)
return f
def k2(self,i):
f = -self.e1*self.dt/(2*self.dx) - self.dt/self.dx**2
return f
def n_new(self,i):
phi = self.d3 * self.dx**2 * self.c.values[1:self.e+1,i] + 2
A = (-np.diag(phi) + np.diag(np.ones(self.e-1),1) + np.diag(np.ones(self.e-1),-1))
A[-1] = np.append(np.zeros(self.e-1),1)
return np.linalg.solve(A, np.append(np.zeros(self.e-1),1))
def n_new2(self,i):
phi = self.d3 * self.dx**2 * self.c + 2
A = (-np.diag(phi) + np.diag(np.ones(self.e-1),1) + np.diag(np.ones(self.e-1),-1))
A[-1] = np.append(np.zeros(self.e-1),1)
return np.linalg.solve(A, np.append(np.zeros(self.e-1),1))
def n_new3(self,i):
phi = self.d3 * self.dx**2 * self.c.values[1:self.e+1,i] + 2
A = (-np.diag(phi) + np.diag(np.ones(self.e-1),1) + np.diag(np.ones(self.e-1),-1))
A[-1] = np.append(np.zeros(self.e-1),1)
return A
def new_c(self,j):
f_diag = self.f1(j)
f_diag[self.e] = (self.e1*self.dt/(2*self.dx) - self.dt/self.dx**2 - self.e2*self.dt/(4*self.dx**2)*(self.n.iloc[self.e+1,j] - self.n.iloc[self.e-1,j]))
f_diag[self.e+1:] = self.f2(j)
#g1
g_diag = self.g1(j)
g_diag[self.e] = (1+2*self.dt/self.dx**2 - self.e2*self.dt/self.dx**2\
*(self.n.iloc[self.e+1,j] - 2*self.n.iloc[self.e,j] + self.n.iloc[self.e-1,j]) \
- self.e3*self.dt*self.n.iloc[self.e,j]*(1-self.c.iloc[self.e,j]) + self.e4*self.dt/(self.n.iloc[self.e,j]+eps))
g_diag[self.e+1:] = self.g2(j)
g_diag[self.a+1] = 1
#k1
k_diag = self.k1(j).shift(1)
k_diag[self.e] = (-self.e1*self.dt/(2*self.dx) -self.dt/self.dx**2 + self.e2*self.dt/(4*self.dx**2)*(self.n.iloc[self.e+1,j] - self.n.iloc[self.e-1,j]))
k_diag[self.e+1:] = self.k2(j)
k_diag[self.a+1] = 0
c_df_test = pd.DataFrame(np.zeros(self.c.shape))
c_df_test = c_df_test + self.c.values
c_test = c_df_test.iloc[1:,j-1].values
c_test[0] = c_test[0] - self.k2(j)
c_test = np.append(c_test,0)
U = np.diag(g_diag.dropna()) + np.diag(k_diag.dropna(),-1) + np.diag(f_diag.dropna(),1)
U[self.a, self.a-2] = -1
return np.linalg.solve(U, c_test)[:-1]
def compute_all(self):
for cq in range(0,self.b):
self.n.iloc[1:self.e+1,cq+1] = self.n_new(cq)[:]
self.c.iloc[1:,cq+1] = self.new_c(cq)[:]
def compute_all_all(self):
comp = self.compute_all(var1,var2)
return com.sum()
def avg_channel(self):
return self.c.values[1:self.e,1:self.a].sum() / (self.e*(self.a))
def avg_entering(self):
return self.c.values[self.e,1:self.a].sum() / (self.a)
def plotting_conc(self):
fig_n = sns.lineplot(x = np.tile(np.arange(0,cm.a+1),cm.b+1), y = pd.melt(cm.n).value, hue = np.repeat(np.arange(0,cm.a+1),cm.b+1),palette = "Blues")
fig_c = sns.lineplot(x = np.tile(np.arange(0,cm.a+1),cm.b+1), y = pd.melt(cm.c).value, hue = np.repeat(np.arange(0,cm.a+1),cm.b+1),palette = "Blues")
plt.xlabel("x")
plt.ylabel("concentration")
plt.title("Cell & Nutrient Concentration")
fig_n.legend_.remove()
plt.plot(np.arange(self.a), np.zeros(self.a)+self.avg_channel(), linestyle='dashed')
plt.plot(np.arange(self.a), np.zeros(self.a)+self.avg_entering(), linestyle='-.')
#plt.text(self.a+self.b-9,self.avg_channel()-0.1, 'Avg # of Cells in a Channel')
#plt.text(self.a+self.b-9,self.avg_entering()-0.1, 'Avg # of Cells entering')
plt.savefig(self.plot_conc)
if __name__ == "__main__":
fire.Fire(cell_migration)
| [
"numpy.linalg.solve",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"fire.Fire",
"matplotlib.pyplot.xlabel",
"os.getcwd",
"numpy.append",
"numpy.diag",
"numpy.zeros",
"matplotlib.pyplot.title",
"tqdm.auto.tqdm.pandas",
"pandas.melt",
"numpy.arange"
] | [((249, 262), 'tqdm.auto.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (260, 262), False, 'from tqdm.auto import tqdm\n'), ((6402, 6427), 'fire.Fire', 'fire.Fire', (['cell_migration'], {}), '(cell_migration)\n', (6411, 6427), False, 'import fire\n'), ((4783, 4803), 'numpy.append', 'np.append', (['c_test', '(0)'], {}), '(c_test, 0)\n', (4792, 4803), True, 'import numpy as np\n'), ((5824, 5839), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (5834, 5839), True, 'import matplotlib.pyplot as plt\n'), ((5848, 5875), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""concentration"""'], {}), "('concentration')\n", (5858, 5875), True, 'import matplotlib.pyplot as plt\n'), ((5884, 5926), 'matplotlib.pyplot.title', 'plt.title', (['"""Cell & Nutrient Concentration"""'], {}), "('Cell & Nutrient Concentration')\n", (5893, 5926), True, 'import matplotlib.pyplot as plt\n'), ((6342, 6369), 'matplotlib.pyplot.savefig', 'plt.savefig', (['self.plot_conc'], {}), '(self.plot_conc)\n', (6353, 6369), True, 'import matplotlib.pyplot as plt\n'), ((1373, 1407), 'numpy.zeros', 'np.zeros', (['[self.a + 1, self.b + 1]'], {}), '([self.a + 1, self.b + 1])\n', (1381, 1407), True, 'import numpy as np\n'), ((1538, 1572), 'numpy.zeros', 'np.zeros', (['[self.a + 1, self.b + 1]'], {}), '([self.a + 1, self.b + 1])\n', (1546, 1572), True, 'import numpy as np\n'), ((2989, 3009), 'numpy.zeros', 'np.zeros', (['(self.e - 1)'], {}), '(self.e - 1)\n', (2997, 3009), True, 'import numpy as np\n'), ((3268, 3288), 'numpy.zeros', 'np.zeros', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3276, 3288), True, 'import numpy as np\n'), ((3568, 3588), 'numpy.zeros', 'np.zeros', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3576, 3588), True, 'import numpy as np\n'), ((4606, 4628), 'numpy.zeros', 'np.zeros', (['self.c.shape'], {}), '(self.c.shape)\n', (4614, 4628), True, 'import numpy as np\n'), ((4949, 4975), 'numpy.linalg.solve', 'np.linalg.solve', (['U', 'c_test'], {}), '(U, c_test)\n', (4964, 4975), True, 'import numpy as np\n'), ((5984, 6001), 'numpy.arange', 'np.arange', (['self.a'], {}), '(self.a)\n', (5993, 6001), True, 'import numpy as np\n'), ((6077, 6094), 'numpy.arange', 'np.arange', (['self.a'], {}), '(self.a)\n', (6086, 6094), True, 'import numpy as np\n'), ((547, 558), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (556, 558), False, 'import os\n'), ((2940, 2959), 'numpy.ones', 'np.ones', (['(self.e - 1)'], {}), '(self.e - 1)\n', (2947, 2959), True, 'import numpy as np\n'), ((3055, 3075), 'numpy.zeros', 'np.zeros', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3063, 3075), True, 'import numpy as np\n'), ((3219, 3238), 'numpy.ones', 'np.ones', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3226, 3238), True, 'import numpy as np\n'), ((3334, 3354), 'numpy.zeros', 'np.zeros', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3342, 3354), True, 'import numpy as np\n'), ((3519, 3538), 'numpy.ones', 'np.ones', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3526, 3538), True, 'import numpy as np\n'), ((6003, 6019), 'numpy.zeros', 'np.zeros', (['self.a'], {}), '(self.a)\n', (6011, 6019), True, 'import numpy as np\n'), ((6096, 6112), 'numpy.zeros', 'np.zeros', (['self.a'], {}), '(self.a)\n', (6104, 6112), True, 'import numpy as np\n'), ((2886, 2898), 'numpy.diag', 'np.diag', (['phi'], {}), '(phi)\n', (2893, 2898), True, 'import numpy as np\n'), ((2909, 2928), 'numpy.ones', 'np.ones', (['(self.e - 1)'], {}), '(self.e - 1)\n', (2916, 2928), True, 'import numpy as np\n'), ((3165, 3177), 'numpy.diag', 'np.diag', (['phi'], {}), '(phi)\n', (3172, 3177), True, 'import numpy as np\n'), ((3188, 3207), 'numpy.ones', 'np.ones', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3195, 3207), True, 'import numpy as np\n'), ((3465, 3477), 'numpy.diag', 'np.diag', (['phi'], {}), '(phi)\n', (3472, 3477), True, 'import numpy as np\n'), ((3488, 3507), 'numpy.ones', 'np.ones', (['(self.e - 1)'], {}), '(self.e - 1)\n', (3495, 3507), True, 'import numpy as np\n'), ((5539, 5561), 'numpy.arange', 'np.arange', (['(0)', '(cm.a + 1)'], {}), '(0, cm.a + 1)\n', (5548, 5561), True, 'import numpy as np\n'), ((5572, 5585), 'pandas.melt', 'pd.melt', (['cm.n'], {}), '(cm.n)\n', (5579, 5585), True, 'import pandas as pd\n'), ((5609, 5631), 'numpy.arange', 'np.arange', (['(0)', '(cm.a + 1)'], {}), '(0, cm.a + 1)\n', (5618, 5631), True, 'import numpy as np\n'), ((5698, 5720), 'numpy.arange', 'np.arange', (['(0)', '(cm.a + 1)'], {}), '(0, cm.a + 1)\n', (5707, 5720), True, 'import numpy as np\n'), ((5731, 5744), 'pandas.melt', 'pd.melt', (['cm.c'], {}), '(cm.c)\n', (5738, 5744), True, 'import pandas as pd\n'), ((5768, 5790), 'numpy.arange', 'np.arange', (['(0)', '(cm.a + 1)'], {}), '(0, cm.a + 1)\n', (5777, 5790), True, 'import numpy as np\n')] |
"""
Misc utilities
"""
import argparse
from datetime import datetime
import logging
import os
import shutil
import subprocess
import sys
import coloredlogs
import git
import numpy as np
_logger = logging.getLogger()
def print_info(opt, log_dir=None):
""" Logs source code configuration
"""
_logger.info('Command: {}'.format(' '.join(sys.argv)))
# Print commit ID
try:
repo = git.Repo(search_parent_directories=True)
git_sha = repo.head.object.hexsha
git_date = datetime.fromtimestamp(repo.head.object.committed_date).strftime('%Y-%m-%d')
git_message = repo.head.object.message
_logger.info('Source is from Commit {} ({}): {}'.format(git_sha[:8], git_date, git_message.strip()))
# Also create diff file in the log directory
if log_dir is not None:
with open(os.path.join(log_dir, 'compareHead.diff'), 'w') as fid:
subprocess.run(['git', 'diff'], stdout=fid)
except git.exc.InvalidGitRepositoryError:
pass
# Arguments
arg_str = ['{}: {}'.format(key, value) for key, value in vars(opt).items()]
arg_str = ', '.join(arg_str)
_logger.info('Arguments: {}'.format(arg_str))
def prepare_logger(config: dict, output_to_file=True):
"""Creates logging directory, and installs colorlogs
Args:
config (dict): Program configuration, should include 'log_path' field.
output_to_file (bool): Whether to write log to file also
Returns:
logger (logging.Logger)
"""
fmt = '%(asctime)s [%(levelname)s] %(name)s - %(message)s'
datefmt = '%m/%d %H:%M:%S'
logger = logging.getLogger()
coloredlogs.install(level='INFO', logger=logger, fmt=fmt, datefmt=datefmt)
if output_to_file:
log_path = config['log_path']
os.makedirs(log_path, exist_ok=True)
log_formatter = logging.Formatter(fmt, datefmt=datefmt)
file_handler = logging.FileHandler('{}/log.txt'.format(log_path))
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
logger.info('Output and logs will be saved to {}'.format(log_path))
print_info(config, log_path)
else:
print_info(config)
return logger
class ReservoirSampler(object):
def __init__(self, max_size):
self._size = max_size
self._all_data = None
self._n = 0
def update(self, *args):
items = args
if self._all_data is None:
self._all_data = [[] for _ in range(len(items))]
elif len(args) != len(self._all_data):
raise AssertionError('Number of items must be consistent with previous calls')
k = list(items[0].keys())[0]
batch_size = items[0][k].shape[0]
# Save images from a random data batch using reservoir sampling
for b in range(batch_size):
self._n += 1
if self._n <= self._size:
for i in range(len(items)):
self._all_data[i].append({k: items[i][k][b] for k in items[i]})
else:
r = np.random.randint(self._n)
if r < self._size:
for i in range(len(items)):
self._all_data[i][r] = {k: items[i][k][b] for k in items[i]}
def get_samples(self):
samples = []
for data in self._all_data:
samples.append({k: [d[k] for d in data] for k in data[0].keys()})
return samples | [
"logging.getLogger",
"datetime.datetime.fromtimestamp",
"os.makedirs",
"coloredlogs.install",
"logging.Formatter",
"subprocess.run",
"os.path.join",
"numpy.random.randint",
"git.Repo"
] | [((200, 219), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (217, 219), False, 'import logging\n'), ((1641, 1660), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1658, 1660), False, 'import logging\n'), ((1665, 1739), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""', 'logger': 'logger', 'fmt': 'fmt', 'datefmt': 'datefmt'}), "(level='INFO', logger=logger, fmt=fmt, datefmt=datefmt)\n", (1684, 1739), False, 'import coloredlogs\n'), ((410, 450), 'git.Repo', 'git.Repo', ([], {'search_parent_directories': '(True)'}), '(search_parent_directories=True)\n', (418, 450), False, 'import git\n'), ((1809, 1845), 'os.makedirs', 'os.makedirs', (['log_path'], {'exist_ok': '(True)'}), '(log_path, exist_ok=True)\n', (1820, 1845), False, 'import os\n'), ((1870, 1909), 'logging.Formatter', 'logging.Formatter', (['fmt'], {'datefmt': 'datefmt'}), '(fmt, datefmt=datefmt)\n', (1887, 1909), False, 'import logging\n'), ((512, 567), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['repo.head.object.committed_date'], {}), '(repo.head.object.committed_date)\n', (534, 567), False, 'from datetime import datetime\n'), ((925, 968), 'subprocess.run', 'subprocess.run', (["['git', 'diff']"], {'stdout': 'fid'}), "(['git', 'diff'], stdout=fid)\n", (939, 968), False, 'import subprocess\n'), ((3094, 3120), 'numpy.random.randint', 'np.random.randint', (['self._n'], {}), '(self._n)\n', (3111, 3120), True, 'import numpy as np\n'), ((853, 894), 'os.path.join', 'os.path.join', (['log_dir', '"""compareHead.diff"""'], {}), "(log_dir, 'compareHead.diff')\n", (865, 894), False, 'import os\n')] |
import os
import h5py
import pandas as pd
import numpy as np
import torchvision.transforms as transforms
import torch
from torch.utils.data import Dataset
from .word_utils import Corpus
from PIL import Image
import torch.nn.functional as F
from collections import Iterable
from torch.autograd import Variable
class ResizeAnnotation:
"""Resize the largest of the sides of the annotation to a given size"""
def __init__(self, size):
if not isinstance(size, (int, Iterable)):
raise TypeError("Got inappropriate size arg: {}".format(size))
self.size = size
def __call__(self, img):
im_h, im_w = img.shape[-2:]
scale_h, scale_w = self.size / im_h, self.size / im_w
resized_h = int(np.round(im_h * scale_h))
resized_w = int(np.round(im_w * scale_w))
out = (
F.interpolate(
Variable(img).unsqueeze(0).unsqueeze(0),
size=(resized_h, resized_w),
mode="bilinear",
align_corners=True,
)
.squeeze()
.data
)
return out
def load_rgb_frames(image_dir, vid, start, num, margin=1, im_size=512, tv_transform=None):
frames = []
for i in range(start, start+num, margin):
img_path = os.path.join(image_dir, vid, '{:0>5d}.jpg'.format(i))
img = Image.open(img_path).convert('RGB')
img = tv_transform(img) # [3, h, w]
frames.append(img.numpy())
frames_array = np.asarray(frames, dtype=np.float32) # [nf, 3, h, w]
return torch.from_numpy(frames_array.transpose([1, 0, 2, 3]))
class PairData():
def __init__(self, pd_data):
self.frame_path = pd_data[0]
self.size = pd_data[1:3].astype(int)
self.instance_id = int(pd_data[3])
self.video_id, self.frame_id = self.frame_path.split('/')
self.frame_id = int(self.frame_id)
self.txt = pd_data[4]
class VideoTextDataset(Dataset):
def __init__(self, opt, mode='train'):
super(VideoTextDataset, self).__init__()
print('loading dataset')
if mode == 'train':
fr_name = pd.read_csv('{}/datasets/{}/preprocessed/train.txt'.format(opt.project_root, opt.dataset), header=None).T
# (n, 2)
else:
fr_name = pd.read_csv('{}/datasets/{}/preprocessed/test.txt'.format(opt.project_root, opt.dataset), header=None).T
# (n, 2)
# parsing pd data into PairData List
self._parse_list(fr_name)
self.corpus = Corpus('{}/word_embedding'.format(opt.project_root))
# change frame_root here
self.frame_root = 'A2D_path/Release/frames_cv2'
self.mask_root = 'A2D_path/a2d_annotation_with_instances'
self.im_size = opt.resize
self.opt = opt
to_tensor = transforms.ToTensor()
resize = transforms.Resize((opt.resize, opt.resize))
self.transform = transforms.Compose([resize, to_tensor])
self.transform_mask = transforms.Compose([ResizeAnnotation(opt.resize)])
def _parse_list(self, pd_data):
self.pair_list = []
for col in pd_data:
self.pair_list.append(PairData(pd_data[col].values))
def load_image(self, item, im_size, single=False):
record = self.pair_list[item]
video_size = torch.from_numpy(record.size)
video_id = record.video_id
frame_id = record.frame_id
if single:
frames = load_rgb_frames(self.frame_root, video_id, frame_id, 1, 1, im_size, self.transform)
return frames, video_size
start_idx = max(1, frame_id - 8)
# frames: [3, nf, h, w]
frames = load_rgb_frames(self.frame_root, video_id, start_idx, 16, self.opt.data_margin, im_size, self.transform)
return frames, video_size
def load_mask(self, item):
record = self.pair_list[item]
video_id = record.video_id
frame_id = record.frame_id
instance_id = record.instance_id
with h5py.File(os.path.join(self.mask_root, video_id, '{:0>5d}.h5'.format(frame_id)), 'r') as f:
instances = f['instance'][()]
idx = np.where(instances==instance_id)[0][0]
if instances.shape[0] == 1:
mask = f['reMask'][()].transpose(1, 0)
else:
mask = f['reMask'][()][idx].transpose(1, 0)
mask = self.transform_mask(torch.from_numpy(mask).float())
mask[mask > 0] = 1
return mask
def __getitem__(self, item):
video, video_size = self.load_image(item, self.im_size, single=self.opt.single_im)
mask = self.load_mask(item)
txt, txt_mask = self.corpus.tokenize(self.pair_list[item].txt, self.opt.sentence_length)
return video_size, video, txt, txt_mask, mask
def __len__(self):
return len(self.pair_list)
| [
"PIL.Image.open",
"numpy.where",
"numpy.asarray",
"torch.from_numpy",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"torchvision.transforms.Compose",
"numpy.round"
] | [((1498, 1534), 'numpy.asarray', 'np.asarray', (['frames'], {'dtype': 'np.float32'}), '(frames, dtype=np.float32)\n', (1508, 1534), True, 'import numpy as np\n'), ((2823, 2844), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2842, 2844), True, 'import torchvision.transforms as transforms\n'), ((2862, 2905), 'torchvision.transforms.Resize', 'transforms.Resize', (['(opt.resize, opt.resize)'], {}), '((opt.resize, opt.resize))\n', (2879, 2905), True, 'import torchvision.transforms as transforms\n'), ((2932, 2971), 'torchvision.transforms.Compose', 'transforms.Compose', (['[resize, to_tensor]'], {}), '([resize, to_tensor])\n', (2950, 2971), True, 'import torchvision.transforms as transforms\n'), ((3326, 3355), 'torch.from_numpy', 'torch.from_numpy', (['record.size'], {}), '(record.size)\n', (3342, 3355), False, 'import torch\n'), ((747, 771), 'numpy.round', 'np.round', (['(im_h * scale_h)'], {}), '(im_h * scale_h)\n', (755, 771), True, 'import numpy as np\n'), ((797, 821), 'numpy.round', 'np.round', (['(im_w * scale_w)'], {}), '(im_w * scale_w)\n', (805, 821), True, 'import numpy as np\n'), ((1363, 1383), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1373, 1383), False, 'from PIL import Image\n'), ((4163, 4197), 'numpy.where', 'np.where', (['(instances == instance_id)'], {}), '(instances == instance_id)\n', (4171, 4197), True, 'import numpy as np\n'), ((4410, 4432), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (4426, 4432), False, 'import torch\n'), ((882, 895), 'torch.autograd.Variable', 'Variable', (['img'], {}), '(img)\n', (890, 895), False, 'from torch.autograd import Variable\n')] |
import json
import backoff
import numpy as np
import requests
from aws_xray_sdk.core import xray_recorder
from ..config import config
from ..result import Result
from ..tasks import Task
class GeneExpression(Task):
def _format_result(self, result):
# Return a list of formatted results.
return Result(result)
@xray_recorder.capture("GeneExpression.compute")
@backoff.on_exception(
backoff.expo, requests.exceptions.RequestException, max_time=30
)
def compute(self):
# the genes to get expression data for
genes = self.task_def["genes"]
# whether to perform feature scaling (defaults to True)
# In r we currently use the data matrix of the Seurat object.
# scale = self.task_def.get("scale", True)
request = {"genes": genes}
r = requests.post(
f"{config.R_WORKER_URL}/v0/runExpression",
headers={"content-type": "application/json"},
data=json.dumps(request),
)
# raise an exception if an HTTPError if one occurred because otherwise r.json() will fail
r.raise_for_status()
resultR = r.json()
truncatedR = resultR["truncatedExpression"]
resultR = resultR["rawExpression"]
result = {}
if not len(resultR):
result[genes[0]] = {
"error": 404,
"message": "Gene {} not found!".format(genes[0]),
}
else:
for gene in resultR.keys():
view = resultR[gene]
# can't do summary stats on list with None's
# casting to np array replaces None with np.nan
viewnp = np.array(view, dtype=np.float)
# This is not necessary and is also costly, but I leave it commented as a reminder
# that this object has integer zeros and floating point for n!=0.
# expression = [float(item) for item in view]
mean = float(np.nanmean(viewnp))
stdev = float(np.nanstd(viewnp))
result[gene] = {"truncatedExpression": {}, "rawExpression": {}}
result[gene]["rawExpression"] = {
"mean": mean,
"stdev": stdev,
"expression": view,
}
viewTr = truncatedR[gene]
viewnpTr = np.array(viewTr, dtype=np.float)
minimum = float(np.nanmin(viewnpTr))
maximum = float(np.nanmax(viewnpTr))
result[gene]["truncatedExpression"] = {
"min": minimum,
"max": maximum,
"expression": viewTr,
}
return self._format_result(result)
| [
"numpy.nanstd",
"json.dumps",
"backoff.on_exception",
"aws_xray_sdk.core.xray_recorder.capture",
"numpy.array",
"numpy.nanmean",
"numpy.nanmax",
"numpy.nanmin"
] | [((339, 386), 'aws_xray_sdk.core.xray_recorder.capture', 'xray_recorder.capture', (['"""GeneExpression.compute"""'], {}), "('GeneExpression.compute')\n", (360, 386), False, 'from aws_xray_sdk.core import xray_recorder\n'), ((392, 481), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', 'requests.exceptions.RequestException'], {'max_time': '(30)'}), '(backoff.expo, requests.exceptions.RequestException,\n max_time=30)\n', (412, 481), False, 'import backoff\n'), ((978, 997), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (988, 997), False, 'import json\n'), ((1693, 1723), 'numpy.array', 'np.array', (['view'], {'dtype': 'np.float'}), '(view, dtype=np.float)\n', (1701, 1723), True, 'import numpy as np\n'), ((2393, 2425), 'numpy.array', 'np.array', (['viewTr'], {'dtype': 'np.float'}), '(viewTr, dtype=np.float)\n', (2401, 2425), True, 'import numpy as np\n'), ((1996, 2014), 'numpy.nanmean', 'np.nanmean', (['viewnp'], {}), '(viewnp)\n', (2006, 2014), True, 'import numpy as np\n'), ((2046, 2063), 'numpy.nanstd', 'np.nanstd', (['viewnp'], {}), '(viewnp)\n', (2055, 2063), True, 'import numpy as np\n'), ((2458, 2477), 'numpy.nanmin', 'np.nanmin', (['viewnpTr'], {}), '(viewnpTr)\n', (2467, 2477), True, 'import numpy as np\n'), ((2511, 2530), 'numpy.nanmax', 'np.nanmax', (['viewnpTr'], {}), '(viewnpTr)\n', (2520, 2530), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.utils.data import Dataset
class DatasetNiftySampler(Dataset):
"""
A simple adapter
converting NiftyNet sampler's output into PyTorch Dataset properties
"""
def __init__(self, sampler):
super(DatasetNiftySampler, self).__init__()
self.sampler = sampler
def __getitem__(self, index):
data = self.sampler(idx=index)
# Transpose to PyTorch format
image = np.transpose(data['image'], (0, 5, 1, 2, 3, 4))
label = np.transpose(data['label'], (0, 5, 1, 2, 3, 4))
image = torch.from_numpy(image).float()
label = torch.from_numpy(label).float()
return image, label
def __len__(self):
return len(self.sampler.reader.output_list)
| [
"numpy.transpose",
"torch.from_numpy"
] | [((462, 509), 'numpy.transpose', 'np.transpose', (["data['image']", '(0, 5, 1, 2, 3, 4)'], {}), "(data['image'], (0, 5, 1, 2, 3, 4))\n", (474, 509), True, 'import numpy as np\n'), ((526, 573), 'numpy.transpose', 'np.transpose', (["data['label']", '(0, 5, 1, 2, 3, 4)'], {}), "(data['label'], (0, 5, 1, 2, 3, 4))\n", (538, 573), True, 'import numpy as np\n'), ((591, 614), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (607, 614), False, 'import torch\n'), ((639, 662), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (655, 662), False, 'import torch\n')] |
import numpy as np
def select_new_feature_indices(random_state, x, n_features):
if random_state is None:
lf_idxs = np.random.permutation(x.shape[1])[:n_features]
rf_idxs = np.random.permutation(x.shape[1])[:n_features]
else:
lf_idxs = np.random.RandomState(seed=random_state).permutation(x.shape[1])[:n_features]
rf_idxs = np.random.RandomState(seed=random_state).permutation(x.shape[1])[:n_features]
return lf_idxs, rf_idxs
| [
"numpy.random.RandomState",
"numpy.random.permutation"
] | [((130, 163), 'numpy.random.permutation', 'np.random.permutation', (['x.shape[1]'], {}), '(x.shape[1])\n', (151, 163), True, 'import numpy as np\n'), ((195, 228), 'numpy.random.permutation', 'np.random.permutation', (['x.shape[1]'], {}), '(x.shape[1])\n', (216, 228), True, 'import numpy as np\n'), ((270, 310), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (291, 310), True, 'import numpy as np\n'), ((366, 406), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (387, 406), True, 'import numpy as np\n')] |
import json
import pyautogui
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.metrics import structural_similarity as ssim
# for 1920,1080 resolution:
# first
# 479,927
# 672,927
# 479,1071
# 672,1071
# second
# 680,927
# 873,927
# 680,1071
# 873,1071
# third
# 881,927
# 1074,927
# 881,1071
# 1074,1071
# fourth
# 1083,927
# 1276,927
# 1083,1071
# 1276,1071
# fifth
# 1284,927
# 1477,927
# 1284,1071
# 1477,1071
def showimg(img):
imageasarray = np.array(img)
first = imageasarray[927:1071, 479:672, :]
firstasimage = cv.cvtColor(first, cv.COLOR_RGB2BGR)
second = imageasarray[927:1071, 680:873, :]
secondasimage = cv.cvtColor(second, cv.COLOR_RGB2BGR)
third = imageasarray[927:1071, 881:1074, :]
thirdasimage = cv.cvtColor(third, cv.COLOR_RGB2BGR)
fourth = imageasarray[927:1071, 1083:1276, :]
fourthasimage = cv.cvtColor(fourth, cv.COLOR_RGB2BGR)
fifth = imageasarray[927:1071, 1284:1477, :]
fifthasimage = cv.cvtColor(fifth, cv.COLOR_RGB2BGR)
DATA = '../TFT/DATA2/'
# cv.imshow('sliced image', firstasimage)
cv.imwrite(DATA + 'firstchamp.png', firstasimage)
# cv.waitKey()
# cv.imshow('sliced image', secondasimage)
cv.imwrite(DATA + 'secondchamp.png', secondasimage)
# cv.waitKey()
# cv.imshow('sliced image', thirdasimage)
cv.imwrite(DATA + 'thirdchamp.png', thirdasimage)
# cv.waitKey()
# cv.imshow('sliced image', fourthasimage)
cv.imwrite(DATA + 'fourthchamp.png', fourthasimage)
# cv.waitKey()
# cv.imshow('sliced image', fifthasimage)
cv.imwrite(DATA + 'fifthchamp.png', fifthasimage)
# cv.waitKey()
#features1 = get_feature_points(firstasimage)
return [first, second, third, fourth, fifth]
def imagetoData():
# take a screenshot of the screen and store it in memory, then
# convert the PIL/Pillow image to an OpenCV compatible NumPy array
# and finally write the image to disk
image = pyautogui.screenshot()
print(np.array(image).shape)
return showimg(image)
# image = cv.cvtColor(np.array(image), cv.COLOR_RGB2BGR)
# print(image)
# return image
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def compare_images(imageA, imageB, title):
# compute the mean squared error and structural similarity
# index for the images
m = mse(imageA, imageB)
s = ssim(imageA, imageB,multichannel=True)
# setup the figure
fig = plt.figure(title)
plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))
# show first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(imageA, cmap=plt.cm.gray)
plt.axis("off")
# show the second image
ax = fig.add_subplot(1, 2, 2)
plt.imshow(imageB, cmap=plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
if __name__ == "__main__":
images = imagetoData()
image2 = cv.imread("../TFT/DATA/all/shen.png")
for img in images:
compare_images(cv.cvtColor(img, cv.COLOR_RGB2BGR), image2, "")
| [
"matplotlib.pyplot.imshow",
"cv2.imwrite",
"skimage.metrics.structural_similarity",
"pyautogui.screenshot",
"numpy.array",
"matplotlib.pyplot.figure",
"cv2.cvtColor",
"matplotlib.pyplot.axis",
"cv2.imread",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show"
] | [((534, 547), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (542, 547), True, 'import numpy as np\n'), ((615, 651), 'cv2.cvtColor', 'cv.cvtColor', (['first', 'cv.COLOR_RGB2BGR'], {}), '(first, cv.COLOR_RGB2BGR)\n', (626, 651), True, 'import cv2 as cv\n'), ((720, 757), 'cv2.cvtColor', 'cv.cvtColor', (['second', 'cv.COLOR_RGB2BGR'], {}), '(second, cv.COLOR_RGB2BGR)\n', (731, 757), True, 'import cv2 as cv\n'), ((825, 861), 'cv2.cvtColor', 'cv.cvtColor', (['third', 'cv.COLOR_RGB2BGR'], {}), '(third, cv.COLOR_RGB2BGR)\n', (836, 861), True, 'import cv2 as cv\n'), ((932, 969), 'cv2.cvtColor', 'cv.cvtColor', (['fourth', 'cv.COLOR_RGB2BGR'], {}), '(fourth, cv.COLOR_RGB2BGR)\n', (943, 969), True, 'import cv2 as cv\n'), ((1038, 1074), 'cv2.cvtColor', 'cv.cvtColor', (['fifth', 'cv.COLOR_RGB2BGR'], {}), '(fifth, cv.COLOR_RGB2BGR)\n', (1049, 1074), True, 'import cv2 as cv\n'), ((1153, 1202), 'cv2.imwrite', 'cv.imwrite', (["(DATA + 'firstchamp.png')", 'firstasimage'], {}), "(DATA + 'firstchamp.png', firstasimage)\n", (1163, 1202), True, 'import cv2 as cv\n'), ((1273, 1324), 'cv2.imwrite', 'cv.imwrite', (["(DATA + 'secondchamp.png')", 'secondasimage'], {}), "(DATA + 'secondchamp.png', secondasimage)\n", (1283, 1324), True, 'import cv2 as cv\n'), ((1394, 1443), 'cv2.imwrite', 'cv.imwrite', (["(DATA + 'thirdchamp.png')", 'thirdasimage'], {}), "(DATA + 'thirdchamp.png', thirdasimage)\n", (1404, 1443), True, 'import cv2 as cv\n'), ((1514, 1565), 'cv2.imwrite', 'cv.imwrite', (["(DATA + 'fourthchamp.png')", 'fourthasimage'], {}), "(DATA + 'fourthchamp.png', fourthasimage)\n", (1524, 1565), True, 'import cv2 as cv\n'), ((1635, 1684), 'cv2.imwrite', 'cv.imwrite', (["(DATA + 'fifthchamp.png')", 'fifthasimage'], {}), "(DATA + 'fifthchamp.png', fifthasimage)\n", (1645, 1684), True, 'import cv2 as cv\n'), ((2017, 2039), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {}), '()\n', (2037, 2039), False, 'import pyautogui\n'), ((2801, 2840), 'skimage.metrics.structural_similarity', 'ssim', (['imageA', 'imageB'], {'multichannel': '(True)'}), '(imageA, imageB, multichannel=True)\n', (2805, 2840), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((2873, 2890), 'matplotlib.pyplot.figure', 'plt.figure', (['title'], {}), '(title)\n', (2883, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2895, 2941), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('MSE: %.2f, SSIM: %.2f' % (m, s))"], {}), "('MSE: %.2f, SSIM: %.2f' % (m, s))\n", (2907, 2941), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3039), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imageA'], {'cmap': 'plt.cm.gray'}), '(imageA, cmap=plt.cm.gray)\n', (3013, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3059), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3052, 3059), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3162), 'matplotlib.pyplot.imshow', 'plt.imshow', (['imageB'], {'cmap': 'plt.cm.gray'}), '(imageB, cmap=plt.cm.gray)\n', (3136, 3162), True, 'import matplotlib.pyplot as plt\n'), ((3167, 3182), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3175, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3209, 3219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3217, 3219), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3325), 'cv2.imread', 'cv.imread', (['"""../TFT/DATA/all/shen.png"""'], {}), "('../TFT/DATA/all/shen.png')\n", (3297, 3325), True, 'import cv2 as cv\n'), ((2050, 2065), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2058, 2065), True, 'import numpy as np\n'), ((3373, 3407), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2BGR'], {}), '(img, cv.COLOR_RGB2BGR)\n', (3384, 3407), True, 'import cv2 as cv\n')] |
import pandas as pd
import numpy as np
import os
from PyQt5.QtCore import QAbstractTableModel, Qt, QVariant
import cubetools.config as cfg
class Model():
'''Creates data for the UI and export'''
def __init__(self):
super().__init__()
self.valid_cncfilelist = self.check_cnc_filepaths()
def check_cnc_filepaths(self) -> dict():
'''Returns checked filepaths from config.py
Returns:
dict: {NAME:PATH} format of the checked machine entries from cfg'''
filelist = {}
if cfg.path_to_cnc:
for name, dir_path in cfg.path_to_cnc.items():
dir_path_string = str(dir_path)
if (os.path.isfile(dir_path_string + '/tool.t') and
os.path.isfile(dir_path_string + '/tool_p.tch')):
filelist[name] = dir_path_string
return filelist
def parse_headers(self, header_line: str) -> dict():
'''Gets headers indexes to define column widths
Parameters:
header_line(str): raw readline from file
Returns:
colspecs_dict(dict): columns {<COL_NAME>: (i_start, i_end)} '''
colspecs_dict = {}
column_names = header_line.split()
col_idx = []
prev_i = 0
if set(["T"]).issubset(column_names):
for i, k in enumerate(header_line):
if k != " ":
if (i != prev_i+1):
col_idx.append(i)
prev_i = i
col_idx.append(len(header_line)+1)
colspecs_dict = {name: (col_idx[i], col_idx[i+1])
for (name, i)
in zip(column_names, range(len(col_idx)-1))}
return colspecs_dict
def read_tooltable(self, toolt_cncfile: str) -> pd.DataFrame():
'''Reads tool-file into pandas-dataframe
Parameters:
toolt_cncfile(file): full path fwf(fixed-width-field) file
Returns:
dftools(dataframe): pandas dataframe with all cols/rows'''
with open(toolt_cncfile) as data_toolt:
table_toolt = data_toolt.readlines()
header_line = table_toolt[1]
headers = self.parse_headers(header_line)
if headers != dict():
dftools = pd.read_fwf(toolt_cncfile,
skiprows=2, skipfooter=1,
names=headers.keys(),
colspecs=list(headers.values()),
index_col=None)
dftools = dftools.dropna(subset=["T"])
dftools = dftools.astype({"T": int})
return dftools
else:
return pd.DataFrame()
def export_tooltable(self,
machines_selected: list,
fileformats_selected: set,
path_field: str):
'''Exports pandas-tables in various formats
Parameters:
path_field(str): path for the exported files
machines_selected(list): list of names to search for in the cfg
fileformats_selected(set): set of extensions to export
Returns:
saved files in formats from [fileformats_selected] in folder [pathfield]'''
self.fileformats_allowed = {'xlsx', 'csv', 'json'}
self.fileformats = (fileformats_selected & self.fileformats_allowed)
self.machines_selected = machines_selected
self.ui_path_field = str(path_field)
self.machinelist = dict([(name, path) for name, path
in self.valid_cncfilelist.items()
if name in self.machines_selected])
for mach_name, dir_path in self.machinelist.items():
toolt = self.read_tooltable(dir_path + "/tool.t")
toolpt = self.read_tooltable(dir_path + "/tool_p.tch")
file_to_save = self.ui_path_field + "/" + mach_name
for ext in self.fileformats:
if ext == "xlsx":
toolt.to_excel(file_to_save + '.xlsx',
index=False)
toolpt.to_excel(file_to_save + '_magazine.xlsx',
index=False)
if ext == "csv":
toolt.to_csv(file_to_save + '.csv',
index=False)
toolpt.to_csv(file_to_save + '_magazine.csv',
index=False)
if ext == "json":
toolt.to_json(file_to_save + '.json')
toolpt.to_json(file_to_save + '_magazine.json')
class ToolSummaryTable(QAbstractTableModel, Model):
'''Provides datatable for the preview'''
def __init__(self, machine_selected):
super().__init__()
mainmodel = Model()
self.machine_name = machine_selected
if self.machine_name in self.valid_cncfilelist.keys():
self.tool_file = self.valid_cncfilelist[self.machine_name]
self.tooldf = mainmodel.read_tooltable(self.tool_file + "tool.t")
self.magazindf = mainmodel.read_tooltable(self.tool_file + "tool_p.tch")
self.summarydf = self.tooldf.loc[self.tooldf['T'].isin(self.magazindf['T'])]
self.summarydf = self.summarydf[['T', 'NAME', 'DOC', "L"]]
self.summarydf = self.assign_toolstatus(self.summarydf)
def rowCount(self, index):
return self.summarydf.shape[0]
def columnCount(self, index):
return self.summarydf.shape[1]
def data(self, index, role):
if role != Qt.DisplayRole:
return QVariant()
return str(self.summarydf.iloc[index.row(), index.column()])
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole or orientation != Qt.Horizontal:
return QVariant()
return self.summarydf.columns[section]
def assign_toolstatus(self, summarydf):
refdb_df = cfg.refdb_sample
summarydf['L_NOM'] = summarydf['T'].map(refdb_df.set_index('T')['L_NOM'])
summarydf['Status'] = np.where(summarydf['L'] > summarydf['L_NOM'],
'Check is failed',
'Tool is OK')
summarydf.loc[pd.isna(summarydf['L_NOM']) == True, 'Status'] = \
'Not checked'
return summarydf
| [
"PyQt5.QtCore.QVariant",
"numpy.where",
"os.path.isfile",
"cubetools.config.path_to_cnc.items",
"pandas.DataFrame",
"pandas.isna"
] | [((1816, 1830), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1828, 1830), True, 'import pandas as pd\n'), ((6133, 6211), 'numpy.where', 'np.where', (["(summarydf['L'] > summarydf['L_NOM'])", '"""Check is failed"""', '"""Tool is OK"""'], {}), "(summarydf['L'] > summarydf['L_NOM'], 'Check is failed', 'Tool is OK')\n", (6141, 6211), True, 'import numpy as np\n'), ((589, 612), 'cubetools.config.path_to_cnc.items', 'cfg.path_to_cnc.items', ([], {}), '()\n', (610, 612), True, 'import cubetools.config as cfg\n'), ((2722, 2736), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2734, 2736), True, 'import pandas as pd\n'), ((5661, 5671), 'PyQt5.QtCore.QVariant', 'QVariant', ([], {}), '()\n', (5669, 5671), False, 'from PyQt5.QtCore import QAbstractTableModel, Qt, QVariant\n'), ((5882, 5892), 'PyQt5.QtCore.QVariant', 'QVariant', ([], {}), '()\n', (5890, 5892), False, 'from PyQt5.QtCore import QAbstractTableModel, Qt, QVariant\n'), ((682, 725), 'os.path.isfile', 'os.path.isfile', (["(dir_path_string + '/tool.t')"], {}), "(dir_path_string + '/tool.t')\n", (696, 725), False, 'import os\n'), ((749, 796), 'os.path.isfile', 'os.path.isfile', (["(dir_path_string + '/tool_p.tch')"], {}), "(dir_path_string + '/tool_p.tch')\n", (763, 796), False, 'import os\n'), ((6312, 6339), 'pandas.isna', 'pd.isna', (["summarydf['L_NOM']"], {}), "(summarydf['L_NOM'])\n", (6319, 6339), True, 'import pandas as pd\n')] |
import logging
import sys
import time
import sklearn.cluster
import cv2
import numpy as np
import itertools
import random
from threading import Thread
from config.config import *
from sensors.pipeline import Pipeline
from utils.functions import overrides, get_class_name, current_time_millis, deprecated
from scipy.interpolate import interp1d
if os.uname().machine == 'armv7l': # probably runnig on RaspPi
import picamera
import picamera.array
else:
picamera = None
# create and setup the camera object
if picamera is None or USE_USB_CAMERA:
camera = cv2.VideoCapture(0 if picamera is None else 0)
camera.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_RESOLUTION[0])
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_RESOLUTION[1])
# camera.set(cv2.CAP_PROP_AUTO_EXPOSURE, 0)
# camera.set(cv2.CAP_PROP_EXPOSURE, .0001)
# camera.set(cv2.CAP_PROP_AUTOFOCUS, 0)
# camera.set(cv2.CAP_PROP_GAIN, 1)
# camera.set(cv2.CAP_PROP_BACKLIGHT, 100)
# camera.set(cv2.CAP_PROP_SETTINGS, 1)
picamera = None
else:
camera = picamera.PiCamera()
# camera.resolution = PYCAMERA_RESOLUTION
camera.framerate = 32
camera.exposure_mode = "antishake"
class _ReadCameraPipeline(Pipeline):
def __init__(self):
Pipeline.__init__(self)
self.__last_sucess = False
self.__last_capture = None
Thread(target=self.__read).start()
time.sleep(2)
def __read(self):
while True:
if picamera is None:
self.__last_sucess, self.__last_capture = camera.read()
else:
array = picamera.array.PiRGBArray(camera, size=CAMERA_RESOLUTION)
camera.capture(array, format='bgr', resize=CAMERA_RESOLUTION, use_video_port=True)
self.__last_capture = array.array
self.__last_sucess = True
def _execute(self, inp):
return self.__last_sucess and self.__last_capture is not None, self.__last_capture
class ConvertColorspacePipeline(Pipeline):
""" Converts an image to a target colorspace. The input image is assumed to be in BGR. """
def __init__(self, to='hsv'):
Pipeline.__init__(self)
self.__target_colorspace = to
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: BGR-image (np.array)
:return: image in the target color space (np.array)
"""
if self.__target_colorspace == "hsv":
return True, cv2.cvtColor(inp, cv2.COLOR_BGR2HSV)
elif self.__target_colorspace == "grayscale":
return True, cv2.cvtColor(inp, cv2.COLOR_BGR2GRAY)
else:
logging.warning('Unsupported color space', self.__target_colorspace)
return False, None
class ColorThresholdPipeline(Pipeline):
""" Creates a binary image where white pixels are in the given color threshold """
def __init__(self, color):
Pipeline.__init__(self)
if type(color) == str:
if color == 'red':
self.threshold_lower = np.array([140, 50, 50])
self.threshold_upper = np.array([160, 255, 255])
elif color == 'yellow':
self.threshold_lower = np.array([30, 50, 50])
self.threshold_upper = np.array([70, 255, 255])
elif color == 'orange':
self.threshold_lower = np.array([15, 50, 50])
self.threshold_upper = np.array([25, 255, 255])
elif color == 'magenta':
self.threshold_lower = np.array([interp1d([0, 360], [0, 180])(300),
interp1d([0, 100], [0, 255])(10),
interp1d([0, 100], [0, 255])(10)])
self.threshold_upper = np.array([interp1d([0, 360], [0, 180])(330),
interp1d([0, 100], [0, 255])(100),
interp1d([0, 100], [0, 255])(100)])
else:
raise ValueError('Unsupported color', color)
elif type(color) == tuple:
self.threshold_lower, self.threshold_upper = color
else:
raise ValueError('Unsupported argument type', type(color), '(must be str or tuple)')
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: An image (np.array)
:return: A binary image (np.array)
"""
colmask = cv2.inRange(inp, self.threshold_lower, self.threshold_upper)
return True, colmask
class ErodeDilatePipeline(Pipeline):
""" Applies an erode and dilate filter on an image """
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: an image (np.array)
:return: the filtered image (np.array)
"""
x = cv2.erode(inp, None, iterations=2)
x = cv2.dilate(x, None, iterations=2)
return True, x
class GetLargestContourPipeline(Pipeline):
""" Finds the largest contour in the image and returns its bounding box"""
def __init__(self, min_contour_size=DETECTION_SIZE_THRESHOLD):
Pipeline.__init__(self)
self.__min_contour_size = min_contour_size
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: a binary image (np.array)
:return: a bounding box (tuple (x, y, w, h) )
"""
_, cnts, _ = cv2.findContours(inp, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
# only proceed if at least one contour was found
if len(cnts) > 0:
largest_contour = max(cnts, key=cv2.contourArea)
if cv2.contourArea(largest_contour) > inp.shape[0] * inp.shape[1] * self.__min_contour_size:
bbox = tuple(cv2.boundingRect(largest_contour))
return True, bbox
return False, None
class FastColorDetectionPipeline(Pipeline):
""" A heuristical color detection approach. Not actually fast. """
def __init__(self, color):
Pipeline.__init__(self)
if type(color) == str:
if color == 'red':
self.threshold_lower = np.array([140, 50, 50])
self.threshold_upper = np.array([160, 255, 255])
elif color == 'yellow':
self.threshold_lower = np.array([30, 50, 50])
self.threshold_upper = np.array([70, 255, 255])
elif color == 'orange':
self.threshold_lower = np.array([15, 50, 50])
self.threshold_upper = np.array([25, 255, 255])
elif color == 'magenta':
self.threshold_lower = np.array([interp1d([0, 360], [0, 180])(300),
interp1d([0, 100], [0, 255])(10),
interp1d([0, 100], [0, 255])(10)])
self.threshold_upper = np.array([interp1d([0, 360], [0, 180])(330),
interp1d([0, 100], [0, 255])(100),
interp1d([0, 100], [0, 255])(100)])
else:
raise ValueError('Unsupported color', color)
elif type(color) == tuple:
self.threshold_lower, self.threshold_upper = color
else:
raise ValueError('Unsupported argument type', type(color), '(must be str or tuple)')
def _execute(self, inp):
height, width = inp.shape
startt = current_time_millis()
horizontal_segments = []
for y in range(0, height, SCANLINE_DISTANCE):
start = None
for x in range(0, width):
if self.__pixel_in_range(inp[y, x]):
if start is None:
start = x
elif start is not None and x - start > SCANLINE_DISTANCE:
horizontal_segments.append((y, start, x))
start = None
print("HSCAN", current_time_millis() - startt)
startt = current_time_millis()
vertical_segments = []
for x in range(0, width, SCANLINE_DISTANCE):
start = None
for y in range(0, height):
if self.__pixel_in_range(inp[y, x]):
if start is None:
start = y
elif start is not None and y - start > SCANLINE_DISTANCE:
vertical_segments.append((x, start, y))
start = None
print("VSCAN", current_time_millis() - startt)
startt = current_time_millis()
largest_bbox = None
largest_area = 0
for (hy, hx1, hx2), (vx, vy1, vy2) in itertools.product(horizontal_segments, vertical_segments):
if vy1 <= hy <= vy2 and hx1 <= vx <= hx2:
w, h = hx2 - hx1, vy2 - vy1
if w*h > largest_area:
largest_bbox = (hx1, vy1, w, h)
print("BBOX", current_time_millis() - startt)
return largest_bbox is None, largest_bbox
def __pixel_in_range(self, pixel):
return pixel > 0
#return np.all(pixel >= self.threshold_lower) and np.all(pixel <= self.threshold_upper)
class TrackBBOXPipeline(Pipeline):
""" Tracks an initial bbox over several frames. """
supported_tracking_algorithms = ['MIL', 'BOOSTING', 'KCF', 'TLC', 'MEDIANFLOW', 'GOTURN']
def __init__(self, initial_frame, initial_bbox, tracking_algorithm='MIL'):
Pipeline.__init__(self)
if tracking_algorithm not in TrackBBOXPipeline.supported_tracking_algorithms:
raise ValueError('Invalid tracking algorithm', tracking_algorithm)
self.__tracker = cv2.Tracker_create(tracking_algorithm)
self.__tracker.init(initial_frame, initial_bbox)
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: an image (np.array)
:return: the bounding box as it was tracked in the image ( tuple (x, y, w, h) )
"""
return self.__tracker.update(inp)
class FindYDeviationPipeline(Pipeline):
""" Finds the deviation of a bounding box on the x-axis"""
def __init__(self):
Pipeline.__init__(self)
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: a bounding box and the image coordinates (tuple)
:return: the deviation of the bounding box along the x axis (float in [-1, 1])
"""
(left_x, _, width, _), (_, image_width, _) = inp
mid_x = image_width / 2
max_dev = mid_x
left_x -= mid_x
dev = (left_x + width/2) / max_dev
return True, dev
class GetImageDimensionsPipeline(Pipeline):
""" Returns the dimensions of the image """
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: an image (np.array)
:return: the dimensions of the image ( tuple (h, w) )
"""
return True, inp.shape
class EdgeDetectionPipeline(Pipeline):
""" Applies canny edge detection on an image """
def __init__(self, threshold_lower=100, threshold_upper=200):
Pipeline.__init__(self)
self.threshold_lower = threshold_lower
self.threshold_upper = threshold_upper
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: an image (np.array)
:return: an image containing the edges of the input image (np.array)
"""
return True, cv2.Canny(inp, self.threshold_lower, self.threshold_upper)
class HaarcascadePipeline(Pipeline):
""" Applies HAAR Cascade detection on an input image """
def __init__(self, haarfile):
Pipeline.__init__(self)
self.detector = cv2.CascadeClassifier(haarfile)
@overrides(Pipeline)
def _execute(self, inp):
"""
:param inp: an image (np.array)
:return: a list of bounding boxes for the found object (list)
"""
return True, self.detector.detectMultiScale(inp)
@deprecated
class DBSCANPipeline(Pipeline):
def __init__(self, eps, min_neighs):
Pipeline.__init__(self)
self.__eps = eps
self.__min_neighs = min_neighs
@overrides(Pipeline)
def _execute(self, inp):
points = []
height, width = inp.shape
for y in range(0, height):
for x in range(0, width):
points.append(np.array([x, y]))
points = np.array(points)
dbscan = sklearn.cluster.DBSCAN(eps=self.__eps, min_samples=self.__min_neighs)
labels = dbscan.fit_predict(points)
unique, counts = np.unique(labels, return_counts=True)
l_max = unique[np.argmax(counts)]
largest_cluster = [p for i, p in enumerate(points) if labels[i] == l_max]
min_x = min(largest_cluster, key=lambda p: p[0])
max_x = min(largest_cluster, key=lambda p: p[0])
min_y = min(largest_cluster, key=lambda p: p[1])
max_y = min(largest_cluster, key=lambda p: p[1])
return True, (min_x, min_y, max_x-min_x, max_y-min_y)
@deprecated
class FindLegsPipeline(Pipeline):
def __init__(self):
Pipeline.__init__(self)
@overrides(Pipeline)
def _execute(self, inp):
result = np.zeros(inp.shape)
height, width = inp.shape
segment_towers = []
last_segments = []
this_segments = []
for y in range(int(height/3), height, 10):
edge_points = []
last_segments, this_segments = this_segments, []
for x in range(0, width):
if inp[y, x] > 0:
edge_points.append(x)
for i in range(1, len(edge_points)):
x1, x2 = edge_points[i-1], edge_points[i]
if 40 < x2 - x1 < 100:
this_tower_idx = None
found_upper = False
for ly, lx1, lx2, tower_idx in last_segments:
ix1, ix2 = max(x1, lx1), min(x2, lx2)
if ix2 <= ix1:
continue # empty intersection
elif ix2 - ix1 > 0.75 * x2 - x1:
segment_towers[tower_idx].append((y, x1, x2))
this_tower_idx = tower_idx
found_upper = True
if not found_upper:
this_tower_idx = len(segment_towers)
segment_towers.append([(y, x1, x2)])
this_segments.append((y, x1, x2, this_tower_idx))
leg_candidates = []
for tower in segment_towers:
if len(tower) > 1:
(top_y, top_x1, top_x2), (bot_y, bot_x1, bot_x2) = tower[0], tower[-1]
leg_candidates.append([(int(top_x1 + (top_x2-top_x1)/2), top_y),
(int(bot_x1 + (bot_x2 - bot_x1)/1), bot_y)])
for y, x1, x2 in tower:
for x in range(x1, x2):
result[y, x] = 255
for yy in range(max(0, y-10), min(height-1, y+10)):
result[yy, x1] = 255
result[yy, x2] = 255
return True, (result, leg_candidates)
class KalmanFilterPipeline(Pipeline):
""" Applies a Kalman Filter on an input signal"""
def __init__(self, process_noise=.001, sensor_noise=.4):
Pipeline.__init__(self)
self.__state = 0
self.__error = 1
self.__process_noise = process_noise
self.__sensor_noise = sensor_noise
self.__kalman_gain = 1
def _execute(self, inp):
"""
:param inp: a signal (float)
:return: the filtered signal (float)
"""
# predict
self.__error = self.__error + self.__process_noise
# update
self.__kalman_gain = self.__error / (self.__error + self.__process_noise)
self.__state = self.__state + self.__kalman_gain * (inp - self.__state)
self.__error = (1 - self.__kalman_gain) * self.__process_noise
return True, self.__state
READ_CAMERA_PIPELINE = _ReadCameraPipeline()
| [
"time.sleep",
"scipy.interpolate.interp1d",
"numpy.array",
"cv2.CascadeClassifier",
"cv2.erode",
"cv2.Tracker_create",
"itertools.product",
"cv2.contourArea",
"picamera.array.PiRGBArray",
"utils.functions.overrides",
"logging.warning",
"picamera.PiCamera",
"numpy.argmax",
"utils.functions.... | [((572, 618), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0 if picamera is None else 0)'], {}), '(0 if picamera is None else 0)\n', (588, 618), False, 'import cv2\n'), ((1053, 1072), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (1070, 1072), False, 'import picamera\n'), ((2238, 2257), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (2247, 2257), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((4307, 4326), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (4316, 4326), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((4675, 4694), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (4684, 4694), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((5233, 5252), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (5242, 5252), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((9773, 9792), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (9782, 9792), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((10184, 10203), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (10193, 10203), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((10719, 10738), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (10728, 10738), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((11219, 11238), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (11228, 11238), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((11718, 11737), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (11727, 11737), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((12149, 12168), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (12158, 12168), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((13131, 13150), 'utils.functions.overrides', 'overrides', (['Pipeline'], {}), '(Pipeline)\n', (13140, 13150), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((1256, 1279), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (1273, 1279), False, 'from sensors.pipeline import Pipeline\n'), ((1403, 1416), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1413, 1416), False, 'import time\n'), ((2169, 2192), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (2186, 2192), False, 'from sensors.pipeline import Pipeline\n'), ((2933, 2956), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (2950, 2956), False, 'from sensors.pipeline import Pipeline\n'), ((4481, 4541), 'cv2.inRange', 'cv2.inRange', (['inp', 'self.threshold_lower', 'self.threshold_upper'], {}), '(inp, self.threshold_lower, self.threshold_upper)\n', (4492, 4541), False, 'import cv2\n'), ((4847, 4881), 'cv2.erode', 'cv2.erode', (['inp', 'None'], {'iterations': '(2)'}), '(inp, None, iterations=2)\n', (4856, 4881), False, 'import cv2\n'), ((4894, 4927), 'cv2.dilate', 'cv2.dilate', (['x', 'None'], {'iterations': '(2)'}), '(x, None, iterations=2)\n', (4904, 4927), False, 'import cv2\n'), ((5151, 5174), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (5168, 5174), False, 'from sensors.pipeline import Pipeline\n'), ((5427, 5486), 'cv2.findContours', 'cv2.findContours', (['inp', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_NONE'], {}), '(inp, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n', (5443, 5486), False, 'import cv2\n'), ((6021, 6044), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (6038, 6044), False, 'from sensors.pipeline import Pipeline\n'), ((7471, 7492), 'utils.functions.current_time_millis', 'current_time_millis', ([], {}), '()\n', (7490, 7492), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((8010, 8031), 'utils.functions.current_time_millis', 'current_time_millis', ([], {}), '()\n', (8029, 8031), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((8545, 8566), 'utils.functions.current_time_millis', 'current_time_millis', ([], {}), '()\n', (8564, 8566), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((8666, 8723), 'itertools.product', 'itertools.product', (['horizontal_segments', 'vertical_segments'], {}), '(horizontal_segments, vertical_segments)\n', (8683, 8723), False, 'import itertools\n'), ((9455, 9478), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (9472, 9478), False, 'from sensors.pipeline import Pipeline\n'), ((9671, 9709), 'cv2.Tracker_create', 'cv2.Tracker_create', (['tracking_algorithm'], {}), '(tracking_algorithm)\n', (9689, 9709), False, 'import cv2\n'), ((10154, 10177), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (10171, 10177), False, 'from sensors.pipeline import Pipeline\n'), ((11094, 11117), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (11111, 11117), False, 'from sensors.pipeline import Pipeline\n'), ((11632, 11655), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (11649, 11655), False, 'from sensors.pipeline import Pipeline\n'), ((11680, 11711), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['haarfile'], {}), '(haarfile)\n', (11701, 11711), False, 'import cv2\n'), ((12054, 12077), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (12071, 12077), False, 'from sensors.pipeline import Pipeline\n'), ((12390, 12406), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (12398, 12406), True, 'import numpy as np\n'), ((12565, 12602), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (12574, 12602), True, 'import numpy as np\n'), ((13101, 13124), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (13118, 13124), False, 'from sensors.pipeline import Pipeline\n'), ((13197, 13216), 'numpy.zeros', 'np.zeros', (['inp.shape'], {}), '(inp.shape)\n', (13205, 13216), True, 'import numpy as np\n'), ((15364, 15387), 'sensors.pipeline.Pipeline.__init__', 'Pipeline.__init__', (['self'], {}), '(self)\n', (15381, 15387), False, 'from sensors.pipeline import Pipeline\n'), ((11430, 11488), 'cv2.Canny', 'cv2.Canny', (['inp', 'self.threshold_lower', 'self.threshold_upper'], {}), '(inp, self.threshold_lower, self.threshold_upper)\n', (11439, 11488), False, 'import cv2\n'), ((12626, 12643), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (12635, 12643), True, 'import numpy as np\n'), ((1360, 1386), 'threading.Thread', 'Thread', ([], {'target': 'self.__read'}), '(target=self.__read)\n', (1366, 1386), False, 'from threading import Thread\n'), ((1615, 1672), 'picamera.array.PiRGBArray', 'picamera.array.PiRGBArray', (['camera'], {'size': 'CAMERA_RESOLUTION'}), '(camera, size=CAMERA_RESOLUTION)\n', (1640, 1672), False, 'import picamera\n'), ((2483, 2519), 'cv2.cvtColor', 'cv2.cvtColor', (['inp', 'cv2.COLOR_BGR2HSV'], {}), '(inp, cv2.COLOR_BGR2HSV)\n', (2495, 2519), False, 'import cv2\n'), ((2663, 2731), 'logging.warning', 'logging.warning', (['"""Unsupported color space"""', 'self.__target_colorspace'], {}), "('Unsupported color space', self.__target_colorspace)\n", (2678, 2731), False, 'import logging\n'), ((3059, 3082), 'numpy.array', 'np.array', (['[140, 50, 50]'], {}), '([140, 50, 50])\n', (3067, 3082), True, 'import numpy as np\n'), ((3122, 3147), 'numpy.array', 'np.array', (['[160, 255, 255]'], {}), '([160, 255, 255])\n', (3130, 3147), True, 'import numpy as np\n'), ((5648, 5680), 'cv2.contourArea', 'cv2.contourArea', (['largest_contour'], {}), '(largest_contour)\n', (5663, 5680), False, 'import cv2\n'), ((6147, 6170), 'numpy.array', 'np.array', (['[140, 50, 50]'], {}), '([140, 50, 50])\n', (6155, 6170), True, 'import numpy as np\n'), ((6210, 6235), 'numpy.array', 'np.array', (['[160, 255, 255]'], {}), '([160, 255, 255])\n', (6218, 6235), True, 'import numpy as np\n'), ((7960, 7981), 'utils.functions.current_time_millis', 'current_time_millis', ([], {}), '()\n', (7979, 7981), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((8495, 8516), 'utils.functions.current_time_millis', 'current_time_millis', ([], {}), '()\n', (8514, 8516), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((8936, 8957), 'utils.functions.current_time_millis', 'current_time_millis', ([], {}), '()\n', (8955, 8957), False, 'from utils.functions import overrides, get_class_name, current_time_millis, deprecated\n'), ((2599, 2636), 'cv2.cvtColor', 'cv2.cvtColor', (['inp', 'cv2.COLOR_BGR2GRAY'], {}), '(inp, cv2.COLOR_BGR2GRAY)\n', (2611, 2636), False, 'import cv2\n'), ((3223, 3245), 'numpy.array', 'np.array', (['[30, 50, 50]'], {}), '([30, 50, 50])\n', (3231, 3245), True, 'import numpy as np\n'), ((3285, 3309), 'numpy.array', 'np.array', (['[70, 255, 255]'], {}), '([70, 255, 255])\n', (3293, 3309), True, 'import numpy as np\n'), ((5767, 5800), 'cv2.boundingRect', 'cv2.boundingRect', (['largest_contour'], {}), '(largest_contour)\n', (5783, 5800), False, 'import cv2\n'), ((6311, 6333), 'numpy.array', 'np.array', (['[30, 50, 50]'], {}), '([30, 50, 50])\n', (6319, 6333), True, 'import numpy as np\n'), ((6373, 6397), 'numpy.array', 'np.array', (['[70, 255, 255]'], {}), '([70, 255, 255])\n', (6381, 6397), True, 'import numpy as np\n'), ((12355, 12371), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (12363, 12371), True, 'import numpy as np\n'), ((3385, 3407), 'numpy.array', 'np.array', (['[15, 50, 50]'], {}), '([15, 50, 50])\n', (3393, 3407), True, 'import numpy as np\n'), ((3447, 3471), 'numpy.array', 'np.array', (['[25, 255, 255]'], {}), '([25, 255, 255])\n', (3455, 3471), True, 'import numpy as np\n'), ((6473, 6495), 'numpy.array', 'np.array', (['[15, 50, 50]'], {}), '([15, 50, 50])\n', (6481, 6495), True, 'import numpy as np\n'), ((6535, 6559), 'numpy.array', 'np.array', (['[25, 255, 255]'], {}), '([25, 255, 255])\n', (6543, 6559), True, 'import numpy as np\n'), ((3558, 3586), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 360]', '[0, 180]'], {}), '([0, 360], [0, 180])\n', (3566, 3586), False, 'from scipy.interpolate import interp1d\n'), ((3642, 3670), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (3650, 3670), False, 'from scipy.interpolate import interp1d\n'), ((3725, 3753), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (3733, 3753), False, 'from scipy.interpolate import interp1d\n'), ((3809, 3837), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 360]', '[0, 180]'], {}), '([0, 360], [0, 180])\n', (3817, 3837), False, 'from scipy.interpolate import interp1d\n'), ((3893, 3921), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (3901, 3921), False, 'from scipy.interpolate import interp1d\n'), ((3977, 4005), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (3985, 4005), False, 'from scipy.interpolate import interp1d\n'), ((6646, 6674), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 360]', '[0, 180]'], {}), '([0, 360], [0, 180])\n', (6654, 6674), False, 'from scipy.interpolate import interp1d\n'), ((6730, 6758), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (6738, 6758), False, 'from scipy.interpolate import interp1d\n'), ((6813, 6841), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (6821, 6841), False, 'from scipy.interpolate import interp1d\n'), ((6897, 6925), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 360]', '[0, 180]'], {}), '([0, 360], [0, 180])\n', (6905, 6925), False, 'from scipy.interpolate import interp1d\n'), ((6981, 7009), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (6989, 7009), False, 'from scipy.interpolate import interp1d\n'), ((7065, 7093), 'scipy.interpolate.interp1d', 'interp1d', (['[0, 100]', '[0, 255]'], {}), '([0, 100], [0, 255])\n', (7073, 7093), False, 'from scipy.interpolate import interp1d\n')] |
# -*- coding: utf-8 -*-
"""
=== DFT_pyaudio_basics.py =================================================
Demo für framebasierte DFT
Eine Audio-Datei wird blockweise eingelesen, in numpy-Arrays umgewandelt
dann wird die DFT von linkem und rechten Kanal berechnet, ein Teil der
DFT-Bins wird zu Null gesetzt und davon die inverse DFT berechnet. Das
Ergebnis wird wieder als Audiostream ausgegeben.
===========================================================================
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import os
from numpy import (pi, log10, exp, sqrt, sin, cos, tan, angle, arange,
linspace, array, zeros, ones)
from numpy.fft import fft, ifft, fftshift, ifftshift, fftfreq
import matplotlib.pyplot as plt
from matplotlib.pyplot import (figure, plot, stem, grid, xlabel, ylabel,
subplot, title, clf, xlim, ylim)
import pyaudio
import wave
np_type = np.int16 # data type for audio samples
CHUNK = 256 # number of samples in one frame
# path = '/home/muenker/Daten/share/Musi/wav/'
path = '../_media/'
# filename = 'Ole_16bit.wav'
filename = 'SpaceRipple.wav'
wf = wave.open(os.path.join(path, filename))
n_chan = wf.getnchannels() # number of channels in wav-file
w_samp = wf.getsampwidth() # wordlength of samples
rate_in = wf.getframerate() # samplerate in wav-file
print("Channels:", n_chan, "\nSample width:",w_samp,"bytes\nSample rate:",rate_in)
wf = wave.open(os.path.join(path, filename))
p = pyaudio.PyAudio() # instantiate PyAudio + setup PortAudio system
# open a stream on the desired device with the desired audio parameters
# for reading or writing
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# initialize arrays for samples
samples_in = samples_out = zeros(CHUNK*2, dtype=np_type) # stereo
samples_l = samples_r = zeros(CHUNK, dtype=np_type) # mono
data_out = 'dummy'
while data_out:
# read CHUNK frames to string, convert to numpy array and split in R / L chan.:
# R / L samples are interleaved, each sample is 16 bit = 2 Bytes
samples_in = np.fromstring(wf.readframes(CHUNK), dtype=np_type)
## Example for dtype = np.int8 (8 bits) = 1 ndarray element,
## two consecutive bytes / ndarray elements = 1 sample
## Split signal into L and R channel:
# samples_l[0::2] = samples_in[0::4]
# samples_l[1::2] = samples_in[1::4]
# samples_r[0::2] = samples_in[2::4]
# samples_r[1::2] = samples_in[3::4]
#
## Do some numpy magic with samples_l and samples_r
# ...
#
# And re-combine L and R channel:
# samples_out[0::4] = samples_l[0::2]
# samples_out[1::4] = samples_l[1::2]
# samples_out[2::4] = samples_r[0::2]
# samples_out[3::4] = samples_r[1::2]
#---------------------------------------------------------------------------
## dtype = np.int16 (16 bits): 1 ndarray element = 1 sample :
samples_l = samples_in[0::2]
samples_r = samples_in[1::2]
if len(samples_r) < 2:
break # break out of the while loop when out of data
# Check whether there was enough data for a full frame
if len(samples_r) < CHUNK: # check whether frame has full length
samples_out = samples_np = zeros(len(samples_in), dtype=np_type)
samples_l = samples_l = zeros(len(samples_in)/2, dtype=np_type)
# ---- Numpy Magic happens here (suppress all FFT bins > 63) ---------------
fft_l = fft(samples_in[0::2]) # convert to frequency domain
fft_r = fft(samples_in[1::2])
fft_l[64:-64] = 0 # set NFFT-bins between 63 ... NFFT-63 = 0
fft_r[64:-64] = 0 # maintaining symmetric spectrum -> real-valued time signal
ifft_l = ifft(fft_l).astype(np_type).real # delete imaginary part, abs() doesn't
ifft_r = ifft(fft_r).astype(np_type).real # work with bipolar signals
samples_out[0::2] = ifft_l # convert back to time domain
samples_out[1::2] = ifft_r
# data_out = np.chararray.tostring(samples_np.astype(np_type)) # convert back to string
data_out = np.chararray.tostring(samples_out) # convert back to string
stream.write(data_out) # play audio by writing audio data to the stream (blocking)
stream.stop_stream() # pause audio stream
stream.close() # close audio stream
p.terminate() # close PyAudio & terminate PortAudio system
# see: http://stackoverflow.com/questions/23370556/recording-24-bit-audio-with-pyaudio
# http://stackoverflow.com/questions/16767248/how-do-i-write-a-24-bit-wav-file-in-python? | [
"numpy.fft.fft",
"os.path.join",
"numpy.chararray.tostring",
"numpy.zeros",
"pyaudio.PyAudio",
"numpy.fft.ifft"
] | [((1544, 1561), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (1559, 1561), False, 'import pyaudio\n'), ((1959, 1990), 'numpy.zeros', 'zeros', (['(CHUNK * 2)'], {'dtype': 'np_type'}), '(CHUNK * 2, dtype=np_type)\n', (1964, 1990), False, 'from numpy import pi, log10, exp, sqrt, sin, cos, tan, angle, arange, linspace, array, zeros, ones\n'), ((2024, 2051), 'numpy.zeros', 'zeros', (['CHUNK'], {'dtype': 'np_type'}), '(CHUNK, dtype=np_type)\n', (2029, 2051), False, 'from numpy import pi, log10, exp, sqrt, sin, cos, tan, angle, arange, linspace, array, zeros, ones\n'), ((1208, 1236), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1220, 1236), False, 'import os\n'), ((1509, 1537), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (1521, 1537), False, 'import os\n'), ((3617, 3638), 'numpy.fft.fft', 'fft', (['samples_in[0::2]'], {}), '(samples_in[0::2])\n', (3620, 3638), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fftfreq\n'), ((3682, 3703), 'numpy.fft.fft', 'fft', (['samples_in[1::2]'], {}), '(samples_in[1::2])\n', (3685, 3703), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fftfreq\n'), ((4242, 4276), 'numpy.chararray.tostring', 'np.chararray.tostring', (['samples_out'], {}), '(samples_out)\n', (4263, 4276), True, 'import numpy as np\n'), ((3874, 3885), 'numpy.fft.ifft', 'ifft', (['fft_l'], {}), '(fft_l)\n', (3878, 3885), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fftfreq\n'), ((3960, 3971), 'numpy.fft.ifft', 'ifft', (['fft_r'], {}), '(fft_r)\n', (3964, 3971), False, 'from numpy.fft import fft, ifft, fftshift, ifftshift, fftfreq\n')] |
#!/usr/bin/env python
from auvlib.data_tools import std_data
from auvlib.bathy_maps import mesh_map
import numpy as np
''' mix std pings from mid run and low run and make a mesh fro draping '''
low_pings = std_data.mbes_ping.read_data("Data/EM2040/low/pings_centered.cereal")
mid_pings = std_data.mbes_ping.read_data("Data/EM2040/mid/pings_centered.cereal")
choosen_pings = low_pings + mid_pings
V, F, bounds = mesh_map.mesh_from_pings(choosen_pings, 0.5)
mesh_map.show_mesh(V, F)
np.savez("Data/EM2040/mesh.npz", V= V, F=F, bounds=bounds)
print("???") | [
"numpy.savez",
"auvlib.data_tools.std_data.mbes_ping.read_data",
"auvlib.bathy_maps.mesh_map.show_mesh",
"auvlib.bathy_maps.mesh_map.mesh_from_pings"
] | [((209, 278), 'auvlib.data_tools.std_data.mbes_ping.read_data', 'std_data.mbes_ping.read_data', (['"""Data/EM2040/low/pings_centered.cereal"""'], {}), "('Data/EM2040/low/pings_centered.cereal')\n", (237, 278), False, 'from auvlib.data_tools import std_data\n'), ((291, 360), 'auvlib.data_tools.std_data.mbes_ping.read_data', 'std_data.mbes_ping.read_data', (['"""Data/EM2040/mid/pings_centered.cereal"""'], {}), "('Data/EM2040/mid/pings_centered.cereal')\n", (319, 360), False, 'from auvlib.data_tools import std_data\n'), ((415, 459), 'auvlib.bathy_maps.mesh_map.mesh_from_pings', 'mesh_map.mesh_from_pings', (['choosen_pings', '(0.5)'], {}), '(choosen_pings, 0.5)\n', (439, 459), False, 'from auvlib.bathy_maps import mesh_map\n'), ((460, 484), 'auvlib.bathy_maps.mesh_map.show_mesh', 'mesh_map.show_mesh', (['V', 'F'], {}), '(V, F)\n', (478, 484), False, 'from auvlib.bathy_maps import mesh_map\n'), ((486, 543), 'numpy.savez', 'np.savez', (['"""Data/EM2040/mesh.npz"""'], {'V': 'V', 'F': 'F', 'bounds': 'bounds'}), "('Data/EM2040/mesh.npz', V=V, F=F, bounds=bounds)\n", (494, 543), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import rospy
import time
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import OccupancyGrid
import tf
# p(x) = 1 - \frac{1}{1 + e^l(x)}
def l2p(l):
return 1 - (1/(1+np.exp(l)))
# l(x) = log(\frac{p(x)}{1 - p(x)})
def p2l(p):
return np.log(p/(1-p))
class GridMapping:
def __init__(self, map_center_x, map_center_y, map_size_x, map_size_y, map_resolution, laser_min_angle, laser_max_angle, laser_resolution, laser_max_dist, sensor_model_p_occ, sensor_model_p_free, sensor_model_p_prior):
self.map_center_x = map_center_x #meter
self.map_center_y = map_center_y #meter
self.map_size_x = map_size_x #meter
self.map_size_y = map_size_y #meter
self.map_resolution = map_resolution #meter/cell
self.laser_min_angle = laser_min_angle #radian
self.laser_max_angle = laser_max_angle #radian
self.laser_resolution = laser_resolution #radian
self.laser_max_dist = laser_max_dist #meter
self.sensor_model_l_occ = p2l(sensor_model_p_occ)
self.sensor_model_l_free = p2l(sensor_model_p_free)
self.sensor_model_l_prior = p2l(sensor_model_p_prior)
map_rows = int(map_size_y / map_resolution)
map_cols = int(map_size_x / map_resolution)
self.gridmap = self.sensor_model_l_prior * np.ones((map_rows, map_cols))
def to_xy (self, i, j):
x = j * self.map_resolution + self.map_center_x
y = i * self.map_resolution + self.map_center_y
return x, y
def to_ij (self, x, y):
i = (y-self.map_center_y) / self.map_resolution
j = (x-self.map_center_x) / self.map_resolution
return i, j
def is_inside (self, i, j):
return i<self.gridmap.shape[0] and j<self.gridmap.shape[1] and i>=0 and j>=0
def raycast_update(self, x0, y0, theta, d):
# see: https://www.ros.org/reps/rep-0117.html
# Detections that are too close to the sensor to quantify shall be represented by -Inf.
# Erroneous detections shall be represented by quiet (non-signaling) NaNs.
# Finally, out of range detections will be represented by +Inf.
if np.isinf(d) and np.sign(d) == +1:
d = self.laser_max_dist
elif np.isinf(d) or np.isnan(d):
return
x1 = x0 + d*np.cos(theta)
y1 = y0 + d*np.sin(theta)
i0, j0 = self.to_ij(x0, y0)
i1, j1 = self.to_ij(x1, y1)
d_cells = d / self.map_resolution
ip, jp, is_hit = self.bresenham(i0, j0, i1, j1, d_cells)
if not np.isnan(d) and d != self.laser_max_dist and self.is_inside(int(ip),int(jp)):
# Hit!
self.gridmap[int(ip),int(jp)] += self.sensor_model_l_occ - self.sensor_model_l_prior
return
#bresenham method is used to plot the lines
def bresenham (self, i0, j0, i1, j1, d, debug=False): # i0, j0 (starting point)
dx = np.absolute(j1-j0)
dy = -1 * np.absolute(i1-i0)
sx = -1
if j0<j1:
sx = 1
sy = -1
if i0<i1:
sy = 1
jp, ip = j0, i0
err = dx+dy # error value e_xy
while True: # loop
if (jp == j1 and ip == i1) or (np.sqrt((jp-j0)**2+(ip-i0)**2) >= d) or not self.is_inside(ip, jp):
return ip, jp, False
elif self.gridmap[int(ip),int(jp)]==100:
return ip, jp, True
if self.is_inside(ip, jp):
# miss:
self.gridmap[int(ip),int(jp)] += self.sensor_model_l_free - self.sensor_model_l_prior
e2 = 2*err
if e2 >= dy: # e_xy+e_x > 0
err += dy
jp += sx
if e2 <= dx: # e_xy+e_y < 0
err += dx
ip += sy
def update(self, x, y, theta, scan):
# test by printing robot trajectory
#i,j = self.to_ij(x,y)
#self.gridmap[int(i), int(j)] = 100
for i, z in enumerate(scan):
self.raycast_update(x, y, (theta + self.laser_min_angle + i*self.laser_resolution), z)
return self.gridmap
class GridMappingROS:
def __init__(self):
rospy.init_node('RosGridMapping', anonymous=True)
self.is_gridmapping_initialized = False
self.map_last_publish = rospy.Time()
self.prev_robot_x = -99999999
self.prev_robot_y = -99999999
self.sensor_model_p_occ = rospy.get_param('~sensor_model_p_occ', 0.75)
self.sensor_model_p_free = rospy.get_param('~sensor_model_p_free', 0.45)
self.sensor_model_p_prior = rospy.get_param('~sensor_model_p_prior', 0.5)
self.robot_frame = rospy.get_param('~robot_frame', 'base_link')
self.map_frame = rospy.get_param('~map_frame', 'map')
self.map_center_x = rospy.get_param('~map_center_x', -1.0)
self.map_center_y = rospy.get_param('~map_center_y', -1.0)
self.map_size_x = rospy.get_param('~map_size_x', 32.0)
self.map_size_y = rospy.get_param('~map_size_y', 12.0)
self.map_resolution = rospy.get_param('~map_resolution', 0.1)
self.map_publish_freq = rospy.get_param('~map_publish_freq', 1.0)
self.update_movement = rospy.get_param('~update_movement', 0.1)
# Creata a OccupancyGrid message template
self.map_msg = OccupancyGrid()
self.map_msg.header.frame_id = self.map_frame
self.map_msg.info.resolution = self.map_resolution
self.map_msg.info.width = int(self.map_size_x / self.map_resolution)
self.map_msg.info.height = int(self.map_size_y / self.map_resolution)
self.map_msg.info.origin.position.x = self.map_center_x
self.map_msg.info.origin.position.y = self.map_center_y
self.laser_sub = rospy.Subscriber("scan", LaserScan, self.laserscan_callback, queue_size=2)
self.map_pub = rospy.Publisher('map', OccupancyGrid, queue_size=2)
self.tf_sub = tf.TransformListener()
def init_gridmapping(self, laser_min_angle, laser_max_angle, laser_resolution, laser_max_dist):
self.gridmapping = GridMapping(self.map_center_x, self.map_center_y, self.map_size_x, self.map_size_y, self.map_resolution, laser_min_angle, laser_max_angle, laser_resolution, laser_max_dist, self.sensor_model_p_occ, self.sensor_model_p_free, self.sensor_model_p_prior)
self.is_gridmapping_initialized = True
# https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Quaternion_to_Euler_angles_conversion
def quarternion_to_yaw(self, qx, qy, qz, qw):
siny_cosp = 2 * (qw * qz + qx * qy)
cosy_cosp = 1 - 2 * (qy * qy + qz * qz)
return np.arctan2(siny_cosp, cosy_cosp)
def publish_occupancygrid(self, gridmap, stamp):
# Convert gridmap to ROS supported data type : int8[]
# http://docs.ros.org/en/melodic/api/nav_msgs/html/msg/OccupancyGrid.html
# The map data, in row-major order, starting with (0,0). Occupancy probabilities are in the range [0,100]. Unknown is -1.
gridmap_p = l2p(gridmap)
#unknown_mask = (gridmap_p == self.sensor_model_p_prior) # for setting unknown cells to -1
gridmap_int8 = (gridmap_p*100).astype(dtype=np.int8)
#gridmap_int8[unknown_mask] = -1 # for setting unknown cells to -1
# Publish map
self.map_msg.data = gridmap_int8
self.map_msg.header.stamp = stamp
self.map_pub.publish(self.map_msg)
rospy.loginfo_once("Published map!")
def laserscan_callback(self, data):
if not self.is_gridmapping_initialized:
self.init_gridmapping(data.angle_min, data.angle_max, data.angle_increment, data.range_max)
self.tf_sub.waitForTransform(self.map_frame, self.robot_frame, data.header.stamp, rospy.Duration(1.0))
try:
# get the robot position associated with the current laserscan
(x, y, _),(qx, qy, qz, qw) = self.tf_sub.lookupTransform(self.map_frame, self.robot_frame, data.header.stamp)
theta = self.quarternion_to_yaw(qx, qy, qz, qw)
# check the movement if update is needed
if ( (x-self.prev_robot_x)**2 + (y-self.prev_robot_y)**2 >= self.update_movement**2 ):
gridmap = self.gridmapping.update(x, y, theta, data.ranges).flatten() # update map
self.prev_robot_x = x
self.prev_robot_y = y
# publish map (with the specified frequency)
if (self.map_last_publish.to_sec() + 1.0/self.map_publish_freq < rospy.Time.now().to_sec() ):
self.map_last_publish = rospy.Time.now()
self.publish_occupancygrid(gridmap, data.header.stamp)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as e:
rospy.logerr(e)
gm = GridMappingROS()
while not rospy.is_shutdown():
rospy.spin()
| [
"rospy.logerr",
"numpy.sqrt",
"rospy.loginfo_once",
"rospy.init_node",
"numpy.log",
"numpy.arctan2",
"tf.TransformListener",
"numpy.sin",
"nav_msgs.msg.OccupancyGrid",
"numpy.exp",
"rospy.spin",
"numpy.isinf",
"rospy.Subscriber",
"numpy.ones",
"rospy.get_param",
"rospy.Time.now",
"ro... | [((294, 313), 'numpy.log', 'np.log', (['(p / (1 - p))'], {}), '(p / (1 - p))\n', (300, 313), True, 'import numpy as np\n'), ((9072, 9091), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (9089, 9091), False, 'import rospy\n'), ((9097, 9109), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (9107, 9109), False, 'import rospy\n'), ((3002, 3022), 'numpy.absolute', 'np.absolute', (['(j1 - j0)'], {}), '(j1 - j0)\n', (3013, 3022), True, 'import numpy as np\n'), ((4317, 4366), 'rospy.init_node', 'rospy.init_node', (['"""RosGridMapping"""'], {'anonymous': '(True)'}), "('RosGridMapping', anonymous=True)\n", (4332, 4366), False, 'import rospy\n'), ((4447, 4459), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (4457, 4459), False, 'import rospy\n'), ((4573, 4617), 'rospy.get_param', 'rospy.get_param', (['"""~sensor_model_p_occ"""', '(0.75)'], {}), "('~sensor_model_p_occ', 0.75)\n", (4588, 4617), False, 'import rospy\n'), ((4654, 4699), 'rospy.get_param', 'rospy.get_param', (['"""~sensor_model_p_free"""', '(0.45)'], {}), "('~sensor_model_p_free', 0.45)\n", (4669, 4699), False, 'import rospy\n'), ((4736, 4781), 'rospy.get_param', 'rospy.get_param', (['"""~sensor_model_p_prior"""', '(0.5)'], {}), "('~sensor_model_p_prior', 0.5)\n", (4751, 4781), False, 'import rospy\n'), ((4818, 4862), 'rospy.get_param', 'rospy.get_param', (['"""~robot_frame"""', '"""base_link"""'], {}), "('~robot_frame', 'base_link')\n", (4833, 4862), False, 'import rospy\n'), ((4899, 4935), 'rospy.get_param', 'rospy.get_param', (['"""~map_frame"""', '"""map"""'], {}), "('~map_frame', 'map')\n", (4914, 4935), False, 'import rospy\n'), ((4972, 5010), 'rospy.get_param', 'rospy.get_param', (['"""~map_center_x"""', '(-1.0)'], {}), "('~map_center_x', -1.0)\n", (4987, 5010), False, 'import rospy\n'), ((5047, 5085), 'rospy.get_param', 'rospy.get_param', (['"""~map_center_y"""', '(-1.0)'], {}), "('~map_center_y', -1.0)\n", (5062, 5085), False, 'import rospy\n'), ((5122, 5158), 'rospy.get_param', 'rospy.get_param', (['"""~map_size_x"""', '(32.0)'], {}), "('~map_size_x', 32.0)\n", (5137, 5158), False, 'import rospy\n'), ((5195, 5231), 'rospy.get_param', 'rospy.get_param', (['"""~map_size_y"""', '(12.0)'], {}), "('~map_size_y', 12.0)\n", (5210, 5231), False, 'import rospy\n'), ((5268, 5307), 'rospy.get_param', 'rospy.get_param', (['"""~map_resolution"""', '(0.1)'], {}), "('~map_resolution', 0.1)\n", (5283, 5307), False, 'import rospy\n'), ((5344, 5385), 'rospy.get_param', 'rospy.get_param', (['"""~map_publish_freq"""', '(1.0)'], {}), "('~map_publish_freq', 1.0)\n", (5359, 5385), False, 'import rospy\n'), ((5422, 5462), 'rospy.get_param', 'rospy.get_param', (['"""~update_movement"""', '(0.1)'], {}), "('~update_movement', 0.1)\n", (5437, 5462), False, 'import rospy\n'), ((5537, 5552), 'nav_msgs.msg.OccupancyGrid', 'OccupancyGrid', ([], {}), '()\n', (5550, 5552), False, 'from nav_msgs.msg import OccupancyGrid\n'), ((5975, 6049), 'rospy.Subscriber', 'rospy.Subscriber', (['"""scan"""', 'LaserScan', 'self.laserscan_callback'], {'queue_size': '(2)'}), "('scan', LaserScan, self.laserscan_callback, queue_size=2)\n", (5991, 6049), False, 'import rospy\n'), ((6073, 6124), 'rospy.Publisher', 'rospy.Publisher', (['"""map"""', 'OccupancyGrid'], {'queue_size': '(2)'}), "('map', OccupancyGrid, queue_size=2)\n", (6088, 6124), False, 'import rospy\n'), ((6147, 6169), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (6167, 6169), False, 'import tf\n'), ((6876, 6908), 'numpy.arctan2', 'np.arctan2', (['siny_cosp', 'cosy_cosp'], {}), '(siny_cosp, cosy_cosp)\n', (6886, 6908), True, 'import numpy as np\n'), ((7666, 7702), 'rospy.loginfo_once', 'rospy.loginfo_once', (['"""Published map!"""'], {}), "('Published map!')\n", (7684, 7702), False, 'import rospy\n'), ((1411, 1440), 'numpy.ones', 'np.ones', (['(map_rows, map_cols)'], {}), '((map_rows, map_cols))\n', (1418, 1440), True, 'import numpy as np\n'), ((2248, 2259), 'numpy.isinf', 'np.isinf', (['d'], {}), '(d)\n', (2256, 2259), True, 'import numpy as np\n'), ((3039, 3059), 'numpy.absolute', 'np.absolute', (['(i1 - i0)'], {}), '(i1 - i0)\n', (3050, 3059), True, 'import numpy as np\n'), ((7987, 8006), 'rospy.Duration', 'rospy.Duration', (['(1.0)'], {}), '(1.0)\n', (8001, 8006), False, 'import rospy\n'), ((222, 231), 'numpy.exp', 'np.exp', (['l'], {}), '(l)\n', (228, 231), True, 'import numpy as np\n'), ((2264, 2274), 'numpy.sign', 'np.sign', (['d'], {}), '(d)\n', (2271, 2274), True, 'import numpy as np\n'), ((2331, 2342), 'numpy.isinf', 'np.isinf', (['d'], {}), '(d)\n', (2339, 2342), True, 'import numpy as np\n'), ((2346, 2357), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (2354, 2357), True, 'import numpy as np\n'), ((2399, 2412), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2405, 2412), True, 'import numpy as np\n'), ((2433, 2446), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2439, 2446), True, 'import numpy as np\n'), ((2641, 2652), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (2649, 2652), True, 'import numpy as np\n'), ((9022, 9037), 'rospy.logerr', 'rospy.logerr', (['e'], {}), '(e)\n', (9034, 9037), False, 'import rospy\n'), ((3337, 3377), 'numpy.sqrt', 'np.sqrt', (['((jp - j0) ** 2 + (ip - i0) ** 2)'], {}), '((jp - j0) ** 2 + (ip - i0) ** 2)\n', (3344, 3377), True, 'import numpy as np\n'), ((8822, 8838), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (8836, 8838), False, 'import rospy\n'), ((8749, 8765), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (8763, 8765), False, 'import rospy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 3 08:33:16 2021
@author: athulsun
"""
from mat4py import loadmat
from scipy.signal import filtfilt
import numpy as np
from scipy.interpolate import interp1d,PchipInterpolator
import matplotlib.pyplot as plt
import os
import sys
from dtqpy.src.classes.DTQPy_CLASS_OPTS import *
from dtqpy.src.classes.DTQPy_CLASS_SETUP import *
from dtqpy.src.DTQPy_solve import DTQPy_solve
def f_dtqp_fowt(LinearModels,disturbance):
# load linear models
Chan = LinearModels['Chan']
#breakpoint()
# obtain the size of the arrays
nl = len(Chan)
nx,nx = np.shape(Chan[0]['A'])
nx,nu = np.shape(Chan[0]['B'])
ny = len(LinearModels['OutName'])
OutputName = LinearModels['OutName']
# initialize
Aw = np.zeros((nl,nx,nx))
Bw = np.zeros((nl,nx,nu))
Cw = np.zeros((nl,ny,nx))
Dw = np.zeros((nl,ny,nu))
xw = np.zeros((nx,nl))
uw = np.zeros((nu,nl))
yw = np.zeros((nl,ny))
ws = np.zeros((nl))
# collect
for i in range(nl):
Aw[i,:,:] = np.array(Chan[i]['A'])
Bw[i,:,:] = np.array(Chan[i]['B'])
Cw[i,:,:] = np.array(Chan[i]['C'])
Dw[i,:,:] = np.array(Chan[i]['D'])
xw[:,i] = np.squeeze(np.array(Chan[i]['xop']))
uw[:,i] = np.squeeze(np.array(Chan[i]['uop']))
yw[i,:] = np.squeeze(np.array(Chan[i]['yop']))
ws[i] = Chan[i]['WindSpeed']
# construct LPV models
# A matrix
A_op_pp = PchipInterpolator(ws, Aw, axis = 0)
A_op = lambda w: A_op_pp(w)
# Bmatrix
B_op_pp = PchipInterpolator(ws, Bw, axis = 0)
B_op = lambda w: B_op_pp(w)
# Cmatrix
C_op_pp = PchipInterpolator(ws,Cw,axis = 0)
C_op = lambda w: C_op_pp(w)
# Dmatrix
D_op_pp = PchipInterpolator(ws,Dw,axis = 0)
D_op = lambda w: D_op_pp(w)
# control operating points
Uo_pp = PchipInterpolator(ws,uw,axis = 1)
Uo_fun = lambda w: Uo_pp(w)
# state operating points
Xo_pp = PchipInterpolator(ws, xw, axis = 1)
Xo_fun = lambda w: Xo_pp(w)
# outputs
Yo_pp = PchipInterpolator(ws, yw, axis = 0)
Yo_fun = lambda w: Yo_pp(w)
# first time derivative of state operating points
DXo_pp = Xo_pp.derivative
DXo_pp = DXo_pp(nu=1)
DXo_fun = lambda w: DXo_pp(w)
Wind_o = disturbance['Chan']
Wind_speed = np.array(Wind_o['RtVAvgxh'])
tt = np.array(Wind_o['tt'])
filterflag = 1
if filterflag:
t_f = 1
dt = tt[2,0]-tt[1,0]
nb = int(np.floor(t_f/dt))
b = np.ones((nb,))/nb
a = b*nb
Wind_speed = filtfilt(b,1,Wind_speed,axis = 0)
opts = options()
opts.dt.nt = 1000
opts.solver.tolerence = 1e-16
opts.solver.maxiters = 1000000
opts.solver.function = 'pyoptsparse'
time = np.linspace(tt[0],tt[-1],opts.dt.nt)
W_pp = PchipInterpolator(np.squeeze(tt),np.squeeze(Wind_speed))
dW_pp = W_pp.derivative
dW_pp = dW_pp(nu = 1)
DW_fun = lambda t: dW_pp(t)
W_fun = lambda t: W_pp(t)
DXoDt_fun = lambda t: (-DXo_fun(W_fun(t)).T*DW_fun(t)).T
def BuildLambda(Ax):
return lambda t: Ax(t)
def TVmat2cell(f,time):
"""
function to convert nt*nx*nz matrix to nx*nx cell
"""
# evaluate function
At = f(time)
s = np.shape(At)
if len(s) ==4:
At = np.squeeze(At)
elif len(s) == 3:
At = np.squeeze(At)
At= At.T
# get size
try:
null,m,n = np.shape(At)
except:
null,m = np.shape(At)
n = 1
# initialize storage
A = np.empty((m,n),dtype = 'O')
Aval = np.empty((8,8))
#breakpoint()
for i in range(m):
for j in range(n):
try:
Ax = PchipInterpolator(np.squeeze(time),At[:,i,j],axis = 0)
except:
Ax = PchipInterpolator(np.squeeze(time),At[:,i],axis = 0)
# work around, as defining lambda functions in a loop in python is tricky
A[i,j] = BuildLambda(Ax)
return A
## Disc2 cont
def BuildFunction(w_ops,X):
Xpp = PchipInterpolator(w_ops,X)
return lambda w: Xpp(w)
# Generator speed function
GS_fun = BuildFunction(ws,xw[4,:])
# -1*GS function
GSn_fun = BuildFunction(ws,-xw[4,:])
# Generator torque
GT_fun = BuildFunction(ws,uw[1,:])
# -Generator torque
GTn_fun = BuildFunction(ws,-uw[1,:])
# Blade pitch
BP_fun = BuildFunction(ws,uw[2,:])
# Generator power
GP_fun = BuildFunction(ws,-uw[1,:]*xw[4,:])
# State operating point values
r = Xo_fun(ws)
# lambda function to find the values of lambda function at specific indices
indexat = lambda expr,index: expr[index,:]
# get shape
nws,nx,nu = np.shape(Bw)
# initialize
ub = np.ones((nx,1))*np.inf
lb = -np.ones((nx,1))*np.inf
# set ub values for PtfmPitch and Genspeed
ub[0] = np.deg2rad(6)
ub[4] = 0.7913+0.0001
# initialize
UBx = np.empty((nx,1),dtype = 'O')
LBx = np.empty((nx,1),dtype = 'O')
# need this function to define anaonymous functions in a loop in python
def BuildLambdaUB(ub,indexat,Xo_fun,W_fun,i):
return lambda t: ub - indexat(Xo_fun(W_fun(t)),i)
# build ub and lb functions
for i in range(nx):
UBx[i,0] = BuildLambdaUB(ub[i],indexat,Xo_fun,W_fun,i)
LBx[i,0] = BuildLambdaUB(lb[i],indexat,Xo_fun,W_fun,i)
# control bounds
UBc = np.array([[lambda t: W_fun(t)-W_fun(t)],
[lambda t: max(uw[1,:])-GT_fun(W_fun(t))],
[lambda t: max(uw[2,:])-BP_fun(W_fun(t))]])
LBc = np.array([[lambda t: W_fun(t)-W_fun(t)],
[lambda t: min(uw[1,:])-GT_fun(W_fun(t))],
[lambda t: min(uw[2,:])-BP_fun(W_fun(t))]])
# initial state
X0_n = np.array( [[0.0493],
[0.1957],
[0.0000],
[0.0001],
[0.7913],
[0],
[0],
[0]])
UBs = X0_n - Xo_fun(W_fun(0))[None].T
LBs = X0_n - Xo_fun(W_fun(0))[None].T
# UB,LB
UB = [Simple_Bounds() for n in range(3)]
LB = [Simple_Bounds() for n in range(3)]
# states
UB[0].right = 2
UB[0].matrix = UBx
LB[0].right = 2
LB[0].matrix = LBx
# control bounds
UB[1].right = 1
UB[1].matrix = UBc
LB[1].right = 1
LB[1].matrix = LBc
# initial state
UB[2].right = 4
UB[2].matrix = UBs
LB[2].right = 4
LB[2].matrix = LBs
# lagrange terms
R1 = 1e-0; R2 = 1e+8
lx = 0
L = [LQ_objective() for n in range(5)]
# uRu
L[lx].left = 1
L[lx].right = 1
L[lx].matrix = np.diag([0,R1,R2])
lx = lx+1
# uPX
L[lx].left = 1
L[lx].right = 2
Lmat = np.zeros((nu,nx)); Lmat[1,4] = -1
L[lx].matrix = Lmat
lx = lx+1
L[lx].left = 0;
L[lx].right = 1
L2mat = np.zeros((1,nu),dtype = 'O')
L2mat[0,1] = lambda t: GSn_fun(W_fun(t))
L[lx].matrix = L2mat
lx = lx+1
L[lx].left = 0
L[lx].right = 2
L3mat = np.zeros((1,nx),dtype = 'O')
L3mat[0,4] = lambda t: GTn_fun(W_fun(t))
L[lx].matrix = L3mat
lx = lx+1
L[lx].left = 0
L[lx].right = 0
L4mat = np.empty((1,1),dtype = 'O')
L4mat[0,0] = lambda t: GP_fun(W_fun(t))
L[lx].matrix = L4mat
#
scale = Scaling(right = 1, matrix = np.array([1,1e-16,1e-4]))
# setup
s = setup()
s.A = TVmat2cell(lambda t: A_op(W_fun(t)),time)
s.B = TVmat2cell(lambda t: B_op(W_fun(t)),time)
s.d = TVmat2cell(DXoDt_fun,time)
s.Lagrange = L
s.UB = UB
s.LB = LB
s.Scaling = scale
s.t0 = 0
s.tf = 600
#breakpoint()
[T,Ul,Xl,P,F,internal,opts] = DTQPy_solve(s,opts)
# calculate offset
Xo_off = np.squeeze(Xo_fun(W_fun(T))).T
Uo_off = np.squeeze(Uo_fun(W_fun(T))).T
# Add offset to estimated states
X = Xl + Xo_off
U = Ul + Uo_off
# plot
fig, ((ax1,ax2,ax3)) = plt.subplots(3,1,)
# wind
ax1.plot(T,U[:,0])
ax1.set_title('Wind Speed [m/s]')
ax1.set_xlim([0,600])
# torue
ax2.plot(T,U[:,1]/1e+07)
ax2.set_ylim([1.8,2])
ax2.set_title('Gen Torque [MWm]')
ax2.set_xlim([0,600])
# blade pitch
ax3.plot(T,U[:,2])
#ax3.set_ylim([0.2, 0.3])
ax3.set_title('Bld Pitch [rad/s]')
ax3.set_xlim([0,600])
fig.subplots_adjust(hspace = 0.65)
fig2, ((ax1,ax2)) = plt.subplots(2,1)
# PtfmPitch
ax1.plot(T,np.rad2deg(X[:,0]))
ax1.set_xlim([0,600])
ax1.set_title('Ptfm Pitch [deg]')
# FenSpeed
ax2.plot(T,X[:,4])
ax2.set_xlim([0,600])
ax2.set_title('Gen Speed [rad/s]')
fig2.subplots_adjust(hspace = 0.65)
plt.show()
if __name__ == '__main__':
ex_name = os.path.dirname(os.path.realpath(__file__))
Wind_file = ex_name + os.sep + '072720_183300.mat'
Wind_o = loadmat(Wind_file)
Linfile = ex_name + os.sep +'SS2py.mat'
LinearModels = loadmat(Linfile)
f_dtqp_fowt(LinearModels,Wind_o)
| [
"numpy.ones",
"mat4py.loadmat",
"scipy.signal.filtfilt",
"scipy.interpolate.PchipInterpolator",
"numpy.floor",
"numpy.diag",
"numpy.squeeze",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.deg2rad",
"numpy.empty",
"matplotlib.pyplot.subplots",
"os.path.realpath",
"numpy.shape",
... | [((635, 657), 'numpy.shape', 'np.shape', (["Chan[0]['A']"], {}), "(Chan[0]['A'])\n", (643, 657), True, 'import numpy as np\n'), ((670, 692), 'numpy.shape', 'np.shape', (["Chan[0]['B']"], {}), "(Chan[0]['B'])\n", (678, 692), True, 'import numpy as np\n'), ((800, 822), 'numpy.zeros', 'np.zeros', (['(nl, nx, nx)'], {}), '((nl, nx, nx))\n', (808, 822), True, 'import numpy as np\n'), ((830, 852), 'numpy.zeros', 'np.zeros', (['(nl, nx, nu)'], {}), '((nl, nx, nu))\n', (838, 852), True, 'import numpy as np\n'), ((860, 882), 'numpy.zeros', 'np.zeros', (['(nl, ny, nx)'], {}), '((nl, ny, nx))\n', (868, 882), True, 'import numpy as np\n'), ((890, 912), 'numpy.zeros', 'np.zeros', (['(nl, ny, nu)'], {}), '((nl, ny, nu))\n', (898, 912), True, 'import numpy as np\n'), ((921, 939), 'numpy.zeros', 'np.zeros', (['(nx, nl)'], {}), '((nx, nl))\n', (929, 939), True, 'import numpy as np\n'), ((948, 966), 'numpy.zeros', 'np.zeros', (['(nu, nl)'], {}), '((nu, nl))\n', (956, 966), True, 'import numpy as np\n'), ((975, 993), 'numpy.zeros', 'np.zeros', (['(nl, ny)'], {}), '((nl, ny))\n', (983, 993), True, 'import numpy as np\n'), ((1002, 1014), 'numpy.zeros', 'np.zeros', (['nl'], {}), '(nl)\n', (1010, 1014), True, 'import numpy as np\n'), ((1500, 1533), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['ws', 'Aw'], {'axis': '(0)'}), '(ws, Aw, axis=0)\n', (1517, 1533), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((1597, 1630), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['ws', 'Bw'], {'axis': '(0)'}), '(ws, Bw, axis=0)\n', (1614, 1630), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((1694, 1727), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['ws', 'Cw'], {'axis': '(0)'}), '(ws, Cw, axis=0)\n', (1711, 1727), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((1789, 1822), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['ws', 'Dw'], {'axis': '(0)'}), '(ws, Dw, axis=0)\n', (1806, 1822), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((1899, 1932), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['ws', 'uw'], {'axis': '(1)'}), '(ws, uw, axis=1)\n', (1916, 1932), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((2007, 2040), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['ws', 'xw'], {'axis': '(1)'}), '(ws, xw, axis=1)\n', (2024, 2040), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((2102, 2135), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['ws', 'yw'], {'axis': '(0)'}), '(ws, yw, axis=0)\n', (2119, 2135), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((2367, 2395), 'numpy.array', 'np.array', (["Wind_o['RtVAvgxh']"], {}), "(Wind_o['RtVAvgxh'])\n", (2375, 2395), True, 'import numpy as np\n'), ((2405, 2427), 'numpy.array', 'np.array', (["Wind_o['tt']"], {}), "(Wind_o['tt'])\n", (2413, 2427), True, 'import numpy as np\n'), ((2830, 2868), 'numpy.linspace', 'np.linspace', (['tt[0]', 'tt[-1]', 'opts.dt.nt'], {}), '(tt[0], tt[-1], opts.dt.nt)\n', (2841, 2868), True, 'import numpy as np\n'), ((4976, 4988), 'numpy.shape', 'np.shape', (['Bw'], {}), '(Bw)\n', (4984, 4988), True, 'import numpy as np\n'), ((5132, 5145), 'numpy.deg2rad', 'np.deg2rad', (['(6)'], {}), '(6)\n', (5142, 5145), True, 'import numpy as np\n'), ((5200, 5228), 'numpy.empty', 'np.empty', (['(nx, 1)'], {'dtype': '"""O"""'}), "((nx, 1), dtype='O')\n", (5208, 5228), True, 'import numpy as np\n'), ((5239, 5267), 'numpy.empty', 'np.empty', (['(nx, 1)'], {'dtype': '"""O"""'}), "((nx, 1), dtype='O')\n", (5247, 5267), True, 'import numpy as np\n'), ((6047, 6119), 'numpy.array', 'np.array', (['[[0.0493], [0.1957], [0.0], [0.0001], [0.7913], [0], [0], [0]]'], {}), '([[0.0493], [0.1957], [0.0], [0.0001], [0.7913], [0], [0], [0]])\n', (6055, 6119), True, 'import numpy as np\n'), ((6867, 6887), 'numpy.diag', 'np.diag', (['[0, R1, R2]'], {}), '([0, R1, R2])\n', (6874, 6887), True, 'import numpy as np\n'), ((6962, 6980), 'numpy.zeros', 'np.zeros', (['(nu, nx)'], {}), '((nu, nx))\n', (6970, 6980), True, 'import numpy as np\n'), ((7088, 7116), 'numpy.zeros', 'np.zeros', (['(1, nu)'], {'dtype': '"""O"""'}), "((1, nu), dtype='O')\n", (7096, 7116), True, 'import numpy as np\n'), ((7253, 7281), 'numpy.zeros', 'np.zeros', (['(1, nx)'], {'dtype': '"""O"""'}), "((1, nx), dtype='O')\n", (7261, 7281), True, 'import numpy as np\n'), ((7419, 7446), 'numpy.empty', 'np.empty', (['(1, 1)'], {'dtype': '"""O"""'}), "((1, 1), dtype='O')\n", (7427, 7446), True, 'import numpy as np\n'), ((7910, 7930), 'dtqpy.src.DTQPy_solve.DTQPy_solve', 'DTQPy_solve', (['s', 'opts'], {}), '(s, opts)\n', (7921, 7930), False, 'from dtqpy.src.DTQPy_solve import DTQPy_solve\n'), ((8162, 8180), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (8174, 8180), True, 'import matplotlib.pyplot as plt\n'), ((8614, 8632), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (8626, 8632), True, 'import matplotlib.pyplot as plt\n'), ((8899, 8909), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8907, 8909), True, 'import matplotlib.pyplot as plt\n'), ((9074, 9092), 'mat4py.loadmat', 'loadmat', (['Wind_file'], {}), '(Wind_file)\n', (9081, 9092), False, 'from mat4py import loadmat\n'), ((9156, 9172), 'mat4py.loadmat', 'loadmat', (['Linfile'], {}), '(Linfile)\n', (9163, 9172), False, 'from mat4py import loadmat\n'), ((1077, 1099), 'numpy.array', 'np.array', (["Chan[i]['A']"], {}), "(Chan[i]['A'])\n", (1085, 1099), True, 'import numpy as np\n'), ((1120, 1142), 'numpy.array', 'np.array', (["Chan[i]['B']"], {}), "(Chan[i]['B'])\n", (1128, 1142), True, 'import numpy as np\n'), ((1163, 1185), 'numpy.array', 'np.array', (["Chan[i]['C']"], {}), "(Chan[i]['C'])\n", (1171, 1185), True, 'import numpy as np\n'), ((1206, 1228), 'numpy.array', 'np.array', (["Chan[i]['D']"], {}), "(Chan[i]['D'])\n", (1214, 1228), True, 'import numpy as np\n'), ((2616, 2650), 'scipy.signal.filtfilt', 'filtfilt', (['b', '(1)', 'Wind_speed'], {'axis': '(0)'}), '(b, 1, Wind_speed, axis=0)\n', (2624, 2650), False, 'from scipy.signal import filtfilt\n'), ((2896, 2910), 'numpy.squeeze', 'np.squeeze', (['tt'], {}), '(tt)\n', (2906, 2910), True, 'import numpy as np\n'), ((2911, 2933), 'numpy.squeeze', 'np.squeeze', (['Wind_speed'], {}), '(Wind_speed)\n', (2921, 2933), True, 'import numpy as np\n'), ((3345, 3357), 'numpy.shape', 'np.shape', (['At'], {}), '(At)\n', (3353, 3357), True, 'import numpy as np\n'), ((3731, 3758), 'numpy.empty', 'np.empty', (['(m, n)'], {'dtype': '"""O"""'}), "((m, n), dtype='O')\n", (3739, 3758), True, 'import numpy as np\n'), ((3774, 3790), 'numpy.empty', 'np.empty', (['(8, 8)'], {}), '((8, 8))\n', (3782, 3790), True, 'import numpy as np\n'), ((4309, 4336), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['w_ops', 'X'], {}), '(w_ops, X)\n', (4326, 4336), False, 'from scipy.interpolate import interp1d, PchipInterpolator\n'), ((5016, 5032), 'numpy.ones', 'np.ones', (['(nx, 1)'], {}), '((nx, 1))\n', (5023, 5032), True, 'import numpy as np\n'), ((8664, 8683), 'numpy.rad2deg', 'np.rad2deg', (['X[:, 0]'], {}), '(X[:, 0])\n', (8674, 8683), True, 'import numpy as np\n'), ((8968, 8994), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (8984, 8994), False, 'import os\n'), ((1267, 1291), 'numpy.array', 'np.array', (["Chan[i]['xop']"], {}), "(Chan[i]['xop'])\n", (1275, 1291), True, 'import numpy as np\n'), ((1322, 1346), 'numpy.array', 'np.array', (["Chan[i]['uop']"], {}), "(Chan[i]['uop'])\n", (1330, 1346), True, 'import numpy as np\n'), ((1377, 1401), 'numpy.array', 'np.array', (["Chan[i]['yop']"], {}), "(Chan[i]['yop'])\n", (1385, 1401), True, 'import numpy as np\n'), ((2530, 2548), 'numpy.floor', 'np.floor', (['(t_f / dt)'], {}), '(t_f / dt)\n', (2538, 2548), True, 'import numpy as np\n'), ((2560, 2574), 'numpy.ones', 'np.ones', (['(nb,)'], {}), '((nb,))\n', (2567, 2574), True, 'import numpy as np\n'), ((3416, 3430), 'numpy.squeeze', 'np.squeeze', (['At'], {}), '(At)\n', (3426, 3430), True, 'import numpy as np\n'), ((3600, 3612), 'numpy.shape', 'np.shape', (['At'], {}), '(At)\n', (3608, 3612), True, 'import numpy as np\n'), ((5049, 5065), 'numpy.ones', 'np.ones', (['(nx, 1)'], {}), '((nx, 1))\n', (5056, 5065), True, 'import numpy as np\n'), ((7564, 7592), 'numpy.array', 'np.array', (['[1, 1e-16, 0.0001]'], {}), '([1, 1e-16, 0.0001])\n', (7572, 7592), True, 'import numpy as np\n'), ((3474, 3488), 'numpy.squeeze', 'np.squeeze', (['At'], {}), '(At)\n', (3484, 3488), True, 'import numpy as np\n'), ((3650, 3662), 'numpy.shape', 'np.shape', (['At'], {}), '(At)\n', (3658, 3662), True, 'import numpy as np\n'), ((3935, 3951), 'numpy.squeeze', 'np.squeeze', (['time'], {}), '(time)\n', (3945, 3951), True, 'import numpy as np\n'), ((4039, 4055), 'numpy.squeeze', 'np.squeeze', (['time'], {}), '(time)\n', (4049, 4055), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Multi Channels VAE (MCVAE)
==========================
Credit: <NAME> & <NAME>
The Multi Channel VAE (MCVAE) is an extension of the variational autoencoder
able to jointly model multiple data source named channels.
The `test` variable must be set to False to run a full training.
"""
import os
import sys
import time
import copy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset
from brainboard import Board
from brainite.models import MCVAE
from brainite.losses import MCVAELoss
test = True
n_samples = 500
n_channels = 3
n_feats = 4
true_lat_dims = 2
fit_lat_dims = 5
snr = 10
adam_lr = 2e-3
n_epochs = 3 if test else 5000
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
############################################################################
# Create synthetic data
# ---------------------
#
# Generate multiple sources (channels) of data through a linear generative
# model
class GeneratorUniform(nn.Module):
""" Generate multiple sources (channels) of data through a linear
generative model:
z ~ N(0,I)
for c_idx in n_channels:
x_ch = W_ch(c_idx)
where 'W_ch' is an arbitrary linear mapping z -> x_ch
"""
def __init__(self, lat_dim=2, n_channels=2, n_feats=5, seed=100):
super(GeneratorUniform, self).__init__()
self.lat_dim = lat_dim
self.n_channels = n_channels
self.n_feats = n_feats
self.seed = seed
np.random.seed(self.seed)
W = []
for c_idx in range(n_channels):
w_ = np.random.uniform(-1, 1, (self.n_feats, lat_dim))
u, s, vt = np.linalg.svd(w_, full_matrices=False)
w = (u if self.n_feats >= lat_dim else vt)
W.append(torch.nn.Linear(lat_dim, self.n_feats, bias=False))
W[c_idx].weight.data = torch.FloatTensor(w)
self.W = torch.nn.ModuleList(W)
def forward(self, z):
if isinstance(z, list):
return [self.forward(_) for _ in z]
if type(z) == np.ndarray:
z = torch.FloatTensor(z)
assert z.size(1) == self.lat_dim
obs = []
for ch in range(self.n_channels):
x = self.W[ch](z)
obs.append(x.detach())
return obs
class SyntheticDataset(Dataset):
def __init__(self, n_samples=500, lat_dim=2, n_feats=5, n_channels=2,
generatorclass=GeneratorUniform, snr=1, train=True):
super(SyntheticDataset, self).__init__()
self.n_samples = n_samples
self.lat_dim = lat_dim
self.n_feats = n_feats
self.n_channels = n_channels
self.snr = snr
self.train = train
seed = (7 if self.train is True else 14)
np.random.seed(seed)
self.z = np.random.normal(size=(self.n_samples, self.lat_dim))
self.generator = generatorclass(
lat_dim=self.lat_dim, n_channels=self.n_channels,
n_feats=self.n_feats)
self.x = self.generator(self.z)
self.X, self.X_noisy = preprocess_and_add_noise(self.x, snr=snr)
self.X = [np.expand_dims(x.astype(np.float32), axis=1) for x in self.X]
def __len__(self):
return self.n_samples
def __getitem__(self, item):
return [x[item] for x in self.X]
@property
def shape(self):
return (len(self), len(self.X))
def preprocess_and_add_noise(x, snr, seed=0):
if not isinstance(snr, list):
snr = [snr] * len(x)
scalers = [StandardScaler().fit(c_arr) for c_arr in x]
x_std = [scalers[c_idx].transform(x[c_idx]) for c_idx in range(len(x))]
# seed for reproducibility in training/testing based on prime number basis
seed = (seed + 3 * int(snr[0] + 1) + 5 * len(x) + 7 * x[0].shape[0] +
11 * x[0].shape[1])
np.random.seed(seed)
x_std_noisy = []
for c_idx, arr in enumerate(x_std):
sigma_noise = np.sqrt(1. / snr[c_idx])
x_std_noisy.append(arr + sigma_noise * np.random.randn(*arr.shape))
return x_std, x_std_noisy
ds_train = SyntheticDataset(
n_samples=n_samples, lat_dim=true_lat_dims, n_feats=n_feats,
n_channels=n_channels, train=True, snr=snr)
ds_val = SyntheticDataset(
n_samples=n_samples, lat_dim=true_lat_dims, n_feats=n_feats,
n_channels=n_channels, train=False, snr=snr)
datasets = {"train": ds_train, "val": ds_val}
dataloaders = {x: torch.utils.data.DataLoader(
datasets[x], batch_size=n_samples, shuffle=True, num_workers=1)
for x in ["train", "val"]}
############################################################################
# Sparse vs non sparse
# --------------------
#
# Train a sparse and a non sparse MCVAE.
def train_model(dataloaders, model, device, criterion, optimizer,
scheduler=None, n_epochs=100, checkpointdir=None,
save_after_epochs=1, board=None, board_updates=None,
load_best=False):
""" General function to train a model and display training metrics.
Parameters
----------
dataloaders: dict of torch.utils.data.DataLoader
the train & validation data loaders.
model: nn.Module
the model to be trained.
device: torch.device
the device to work on.
criterion: torch.nn._Loss
the criterion to be optimized.
optimizer: torch.optim.Optimizer
the optimizer.
scheduler: torch.optim.lr_scheduler, default None
the scheduler.
n_epochs: int, default 100
the number of epochs.
checkpointdir: str, default None
a destination folder where intermediate models/histories will be
saved.
save_after_epochs: int, default 1
determines when the model is saved and represents the number of
epochs before saving.
board: brainboard.Board, default None
a board to display live results.
board_updates: list of callable, default None
update displayed item on the board.
load_best: bool, default False
optionally load the best model regarding the loss.
"""
since = time.time()
if board_updates is not None:
board_updates = listify(board_updates)
best_model_wts = copy.deepcopy(model.state_dict())
best_loss = sys.float_info.max
dataset_sizes = {x: len(dataloaders[x]) for x in ["train", "val"]}
model = model.to(device)
for epoch in range(n_epochs):
print("Epoch {0}/{1}".format(epoch, n_epochs - 1))
print("-" * 10)
for phase in ["train", "val"]:
if phase == "train":
model.train()
else:
model.eval()
running_loss = 0.0
for batch_data in dataloaders[phase]:
batch_data = to_device(batch_data, device)
# Zero the parameter gradients
optimizer.zero_grad()
# Forward:
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
outputs, layer_outputs = model(batch_data)
criterion.layer_outputs = layer_outputs
loss, extra_loss = criterion(outputs)
# Backward + optimize only if in training phase
if phase == "train":
loss.backward()
optimizer.step()
# Statistics
running_loss += loss.item() * batch_data[0].size(0)
if scheduler is not None and phase == "train":
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
print("{0} Loss: {1:.4f}".format(phase, epoch_loss))
if board is not None:
board.update_plot("loss_{0}".format(phase), epoch, epoch_loss)
# Display validation classification results
if board_updates is not None and phase == "val":
for update in board_updates:
update(model, board, outputs, layer_outputs)
# Deep copy the best model
if phase == "val" and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
# Save intermediate results
if checkpointdir is not None and epoch % save_after_epochs == 0:
outfile = os.path.join(
checkpointdir, "model_{0}.pth".format(epoch))
checkpoint(
model=model, outfile=outfile, optimizer=optimizer,
scheduler=scheduler, epoch=epoch, epoch_loss=epoch_loss)
print()
time_elapsed = time.time() - since
print("Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60))
print("Best val loss: {:4f}".format(best_loss))
# Load best model weights
if load_best:
model.load_state_dict(best_model_wts)
def listify(data):
""" Ensure that the input is a list or tuple.
Parameters
----------
arr: list or array
the input data.
Returns
-------
out: list
the liftify input data.
"""
if isinstance(data, list) or isinstance(data, tuple):
return data
else:
return [data]
def to_device(data, device):
""" Transfer data to device.
Parameters
----------
data: tensor or list of tensor
the data to be transfered.
device: torch.device
the device to work on.
Returns
-------
out: tensor or list of tensor
the transfered data.
"""
if isinstance(data, list):
return [tensor.to(device) for tensor in data]
else:
return data.to(device)
def checkpoint(model, outfile, optimizer=None, scheduler=None,
**kwargs):
""" Save the weights of a given model.
Parameters
----------
model: nn.Module
the model to be saved.
outfile: str
the destination file name.
optimizer: torch.optim.Optimizer
the optimizer.
scheduler: torch.optim.lr_scheduler, default None
the scheduler.
kwargs: dict
others parameters to be saved.
"""
kwargs.update(model=model.state_dict())
if optimizer is not None:
kwargs.update(optimizer=optimizer.state_dict())
if scheduler is not None:
kwargs.update(scheduler=scheduler.state_dict())
torch.save(kwargs, outfile)
def update_dropout_rate(model, board, outputs, layer_outputs=None):
""" Display the dropout rate.
"""
if model.log_alpha is not None:
do = np.sort(model.dropout.numpy().reshape(-1))
board.update_hist("dropout_probability", do)
models = {}
torch.manual_seed(42)
models["mcvae"] = MCVAE(
latent_dim=fit_lat_dims, n_channels=n_channels,
n_feats=[n_feats] * n_channels, vae_model="dense",
vae_kwargs={}, sparse=False)
torch.manual_seed(42)
models["smcvae"] = MCVAE(
latent_dim=fit_lat_dims, n_channels=n_channels,
n_feats=[n_feats] * n_channels, vae_model="dense",
vae_kwargs={}, sparse=True)
for model_name, model in models.items():
print("- model:", model_name)
print(model)
board = Board(env=model_name)
optimizer = torch.optim.Adam(params=model.parameters(), lr=adam_lr)
criterion = MCVAELoss(n_channels, beta=1., sparse=False)
train_model(dataloaders, model, device, criterion, optimizer,
n_epochs=n_epochs, board=board,
board_updates=update_dropout_rate)
############################################################################
# Display results
pred = {} # Prediction
z = {} # Latent Space
g = {} # Generative Parameters
x_hat = {} # Reconstructed channels
for model_name, model in models.items():
model.eval()
X = [torch.from_numpy(x).to(device) for x in datasets["val"].X]
print("--", model_name)
print("-- X", [x.size() for x in X])
with torch.no_grad():
q = model.encode(X) # encoded distribution q(z|x)
print("-- encoded distribution q(z|x)", [n for n in q])
z[model_name] = model.p_to_prediction(q)
print("-- z", [e.shape for e in z[model_name]])
if model.sparse:
z[model_name] = model.apply_threshold(z[model_name], 0.2)
z[model_name] = np.array(z[model_name]).reshape(-1) # flatten
print("-- z", z[model_name].shape)
g[model_name] = [
model.vae[c_idx].encode.w_mu.weight.detach().cpu().numpy()
for c_idx in range(n_channels)]
g[model_name] = np.array(g[model_name]).reshape(-1) #flatten
############################################################################
# With such a simple dataset, mcvae and sparse-mcvae gives the same results in
# terms of latent space and generative parameters.
# However, only with the sparse model is able to easily identify the
# important latent dimensions.
plt.figure()
plt.subplot(1,2,1)
plt.hist([z["smcvae"], z["mcvae"]], bins=20, color=["k", "gray"])
plt.legend(["Sparse", "Non sparse"])
plt.title("Latent dimensions distribution")
plt.ylabel("Count")
plt.xlabel("Value")
plt.subplot(1,2,2)
plt.hist([g["smcvae"], g["mcvae"]], bins=20, color=["k", "gray"])
plt.legend(["Sparse", "Non sparse"])
plt.title(r"Generative parameters $\mathbf{\theta} = \{\mathbf{\theta}_1 "
r"\ldots \mathbf{\theta}_C\}$")
plt.xlabel("Value")
do = np.sort(models["smcvae"].dropout.numpy().reshape(-1))
plt.figure()
plt.bar(range(len(do)), do)
plt.suptitle("Dropout probability of {0} fitted latent dimensions in Sparse "
"Model".format(fit_lat_dims))
plt.title("{0} true latent dimensions".format(true_lat_dims))
plt.show()
| [
"matplotlib.pyplot.hist",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"torch.set_grad_enabled",
"torch.nn.ModuleList",
"matplotlib.pyplot.xlabel",
"numpy.random.seed",
"brainboard.Board",
"brainite.models.MCVAE",
"numpy.random.norma... | [((10781, 10802), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (10798, 10802), False, 'import torch\n'), ((10821, 10958), 'brainite.models.MCVAE', 'MCVAE', ([], {'latent_dim': 'fit_lat_dims', 'n_channels': 'n_channels', 'n_feats': '([n_feats] * n_channels)', 'vae_model': '"""dense"""', 'vae_kwargs': '{}', 'sparse': '(False)'}), "(latent_dim=fit_lat_dims, n_channels=n_channels, n_feats=[n_feats] *\n n_channels, vae_model='dense', vae_kwargs={}, sparse=False)\n", (10826, 10958), False, 'from brainite.models import MCVAE\n'), ((10968, 10989), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (10985, 10989), False, 'import torch\n'), ((11009, 11145), 'brainite.models.MCVAE', 'MCVAE', ([], {'latent_dim': 'fit_lat_dims', 'n_channels': 'n_channels', 'n_feats': '([n_feats] * n_channels)', 'vae_model': '"""dense"""', 'vae_kwargs': '{}', 'sparse': '(True)'}), "(latent_dim=fit_lat_dims, n_channels=n_channels, n_feats=[n_feats] *\n n_channels, vae_model='dense', vae_kwargs={}, sparse=True)\n", (11014, 11145), False, 'from brainite.models import MCVAE\n'), ((12940, 12952), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12950, 12952), True, 'import matplotlib.pyplot as plt\n'), ((12953, 12973), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (12964, 12973), True, 'import matplotlib.pyplot as plt\n'), ((12972, 13037), 'matplotlib.pyplot.hist', 'plt.hist', (["[z['smcvae'], z['mcvae']]"], {'bins': '(20)', 'color': "['k', 'gray']"}), "([z['smcvae'], z['mcvae']], bins=20, color=['k', 'gray'])\n", (12980, 13037), True, 'import matplotlib.pyplot as plt\n'), ((13038, 13074), 'matplotlib.pyplot.legend', 'plt.legend', (["['Sparse', 'Non sparse']"], {}), "(['Sparse', 'Non sparse'])\n", (13048, 13074), True, 'import matplotlib.pyplot as plt\n'), ((13075, 13118), 'matplotlib.pyplot.title', 'plt.title', (['"""Latent dimensions distribution"""'], {}), "('Latent dimensions distribution')\n", (13084, 13118), True, 'import matplotlib.pyplot as plt\n'), ((13119, 13138), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (13129, 13138), True, 'import matplotlib.pyplot as plt\n'), ((13139, 13158), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (13149, 13158), True, 'import matplotlib.pyplot as plt\n'), ((13159, 13179), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (13170, 13179), True, 'import matplotlib.pyplot as plt\n'), ((13178, 13243), 'matplotlib.pyplot.hist', 'plt.hist', (["[g['smcvae'], g['mcvae']]"], {'bins': '(20)', 'color': "['k', 'gray']"}), "([g['smcvae'], g['mcvae']], bins=20, color=['k', 'gray'])\n", (13186, 13243), True, 'import matplotlib.pyplot as plt\n'), ((13244, 13280), 'matplotlib.pyplot.legend', 'plt.legend', (["['Sparse', 'Non sparse']"], {}), "(['Sparse', 'Non sparse'])\n", (13254, 13280), True, 'import matplotlib.pyplot as plt\n'), ((13281, 13401), 'matplotlib.pyplot.title', 'plt.title', (['"""Generative parameters $\\\\mathbf{\\\\theta} = \\\\{\\\\mathbf{\\\\theta}_1 \\\\ldots \\\\mathbf{\\\\theta}_C\\\\}$"""'], {}), "(\n 'Generative parameters $\\\\mathbf{\\\\theta} = \\\\{\\\\mathbf{\\\\theta}_1 \\\\ldots \\\\mathbf{\\\\theta}_C\\\\}$'\n )\n", (13290, 13401), True, 'import matplotlib.pyplot as plt\n'), ((13398, 13417), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (13408, 13417), True, 'import matplotlib.pyplot as plt\n'), ((13478, 13490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13488, 13490), True, 'import matplotlib.pyplot as plt\n'), ((13703, 13713), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13711, 13713), True, 'import matplotlib.pyplot as plt\n'), ((3934, 3954), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3948, 3954), True, 'import numpy as np\n'), ((4518, 4613), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['datasets[x]'], {'batch_size': 'n_samples', 'shuffle': '(True)', 'num_workers': '(1)'}), '(datasets[x], batch_size=n_samples, shuffle=True,\n num_workers=1)\n', (4545, 4613), False, 'import torch\n'), ((6187, 6198), 'time.time', 'time.time', ([], {}), '()\n', (6196, 6198), False, 'import time\n'), ((10482, 10509), 'torch.save', 'torch.save', (['kwargs', 'outfile'], {}), '(kwargs, outfile)\n', (10492, 10509), False, 'import torch\n'), ((11263, 11284), 'brainboard.Board', 'Board', ([], {'env': 'model_name'}), '(env=model_name)\n', (11268, 11284), False, 'from brainboard import Board\n'), ((11373, 11418), 'brainite.losses.MCVAELoss', 'MCVAELoss', (['n_channels'], {'beta': '(1.0)', 'sparse': '(False)'}), '(n_channels, beta=1.0, sparse=False)\n', (11382, 11418), False, 'from brainite.losses import MCVAELoss\n'), ((839, 864), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (862, 864), False, 'import torch\n'), ((1605, 1630), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1619, 1630), True, 'import numpy as np\n'), ((2018, 2040), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['W'], {}), '(W)\n', (2037, 2040), False, 'import torch\n'), ((2872, 2892), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2886, 2892), True, 'import numpy as np\n'), ((2910, 2963), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.n_samples, self.lat_dim)'}), '(size=(self.n_samples, self.lat_dim))\n', (2926, 2963), True, 'import numpy as np\n'), ((4038, 4063), 'numpy.sqrt', 'np.sqrt', (['(1.0 / snr[c_idx])'], {}), '(1.0 / snr[c_idx])\n', (4045, 4063), True, 'import numpy as np\n'), ((8740, 8751), 'time.time', 'time.time', ([], {}), '()\n', (8749, 8751), False, 'import time\n'), ((12007, 12022), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12020, 12022), False, 'import torch\n'), ((1704, 1753), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(self.n_feats, lat_dim)'], {}), '(-1, 1, (self.n_feats, lat_dim))\n', (1721, 1753), True, 'import numpy as np\n'), ((1777, 1815), 'numpy.linalg.svd', 'np.linalg.svd', (['w_'], {'full_matrices': '(False)'}), '(w_, full_matrices=False)\n', (1790, 1815), True, 'import numpy as np\n'), ((1979, 1999), 'torch.FloatTensor', 'torch.FloatTensor', (['w'], {}), '(w)\n', (1996, 1999), False, 'import torch\n'), ((2198, 2218), 'torch.FloatTensor', 'torch.FloatTensor', (['z'], {}), '(z)\n', (2215, 2218), False, 'import torch\n'), ((12349, 12372), 'numpy.array', 'np.array', (['z[model_name]'], {}), '(z[model_name])\n', (12357, 12372), True, 'import numpy as np\n'), ((12584, 12607), 'numpy.array', 'np.array', (['g[model_name]'], {}), '(g[model_name])\n', (12592, 12607), True, 'import numpy as np\n'), ((1892, 1942), 'torch.nn.Linear', 'torch.nn.Linear', (['lat_dim', 'self.n_feats'], {'bias': '(False)'}), '(lat_dim, self.n_feats, bias=False)\n', (1907, 1942), False, 'import torch\n'), ((3625, 3641), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3639, 3641), False, 'from sklearn.preprocessing import StandardScaler\n'), ((11869, 11888), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (11885, 11888), False, 'import torch\n'), ((4110, 4137), 'numpy.random.randn', 'np.random.randn', (['*arr.shape'], {}), '(*arr.shape)\n', (4125, 4137), True, 'import numpy as np\n'), ((7063, 7103), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (7085, 7103), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import numpy as np
import deeptools.countReadsPerBin as countR
from deeptools import parserCommon
from deeptools.utilities import smartLabels
from deeptools._version import __version__
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
parser = \
argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
``multiBamSummary`` computes the read coverages for genomic regions for typically two or more BAM files.
The analysis can be performed for the entire genome by running the program in 'bins' mode.
If you want to count the read coverage for specific regions only, use the ``BED-file`` mode instead.
The standard output of ``multiBamSummary`` is a compressed numpy array (``.npz``).
It can be directly used to calculate and visualize pairwise correlation values between the read coverages using the tool 'plotCorrelation'.
Similarly, ``plotPCA`` can be used for principal component analysis of the read coverages using the .npz file.
Note that using a single bigWig file is only recommended if you want to produce a bedGraph file (i.e., with the ``--outRawCounts`` option; the default output file cannot be used by ANY deepTools program if only a single file was supplied!).
A detailed sub-commands help is available by typing:
multiBamSummary bins -h
multiBamSummary BED-file -h
""",
epilog='example usages:\n'
'multiBamSummary bins --bamfiles file1.bam file2.bam -o results.npz \n\n'
'multiBamSummary BED-file --BED selection.bed --bamfiles file1.bam file2.bam \n'
'-o results.npz'
' \n\n',
conflict_handler='resolve')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
subparsers = parser.add_subparsers(
title="commands",
dest='command',
description='subcommands',
help='subcommands',
metavar='')
parent_parser = parserCommon.getParentArgParse(binSize=False)
read_options_parser = parserCommon.read_options()
# bins mode options
subparsers.add_parser(
'bins',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[bamcorrelate_args(case='bins'),
parent_parser, read_options_parser,
parserCommon.gtf_options(suppress=True)
],
help="The coverage calculation is done for consecutive bins of equal "
"size (10 kilobases by default). This mode is useful to assess the "
"genome-wide similarity of BAM files. The bin size and "
"distance between bins can be adjusted.",
add_help=False,
usage='%(prog)s '
'--bamfiles file1.bam file2.bam '
'-o results.npz \n')
# BED file arguments
subparsers.add_parser(
'BED-file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[bamcorrelate_args(case='BED-file'),
parent_parser, read_options_parser,
parserCommon.gtf_options()
],
help="The user provides a BED file that contains all regions "
"that should be considered for the coverage analysis. A "
"common use is to compare ChIP-seq coverages between two "
"different samples for a set of peak regions.",
usage='%(prog)s --BED selection.bed --bamfiles file1.bam file2.bam -o results.npz\n',
add_help=False)
return parser
def bamcorrelate_args(case='bins'):
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--bamfiles', '-b',
metavar='FILE1 FILE2',
help='List of indexed bam files separated by spaces.',
nargs='+',
required=True)
required.add_argument('--outFileName', '-out', '-o',
help='File name to save the coverage matrix. This matrix '
'can be subsequently plotted using plotCorrelation or '
'or plotPCA.',
type=parserCommon.writableFile)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument("--help", "-h", action="help",
help="show this help message and exit")
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by a space, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--smartLabels',
action='store_true',
help='Instead of manually specifying labels for the input '
'BAM files, this causes deepTools to use the file name '
'after removing the path and extension.')
optional.add_argument('--genomeChunkSize',
type=int,
default=None,
help='Manually specify the size of the genome provided to each processor. '
'The default value of None specifies that this is determined by read '
'density of the BAM file.')
if case == 'bins':
optional.add_argument('--binSize', '-bs',
metavar='INT',
help='Length in bases of the window used '
'to sample the genome. (Default: %(default)s)',
default=10000,
type=int)
optional.add_argument('--distanceBetweenBins', '-n',
metavar='INT',
help='By default, multiBamSummary considers consecutive '
'bins of the specified --binSize. However, to '
'reduce the computation time, a larger distance '
'between bins can by given. Larger distances '
'result in fewer bins considered. (Default: %(default)s)',
default=0,
type=int)
required.add_argument('--BED',
help=argparse.SUPPRESS,
default=None)
else:
optional.add_argument('--binSize', '-bs',
help=argparse.SUPPRESS,
default=10000,
type=int)
optional.add_argument('--distanceBetweenBins', '-n',
help=argparse.SUPPRESS,
metavar='INT',
default=0,
type=int)
required.add_argument('--BED',
help='Limits the coverage analysis to '
'the regions specified in these files.',
metavar='FILE1.bed FILE2.bed',
nargs='+',
required=True)
group = parser.add_argument_group('Output optional options')
group.add_argument('--outRawCounts',
help='Save the counts per region to a tab-delimited file.',
type=parserCommon.writableFile,
metavar='FILE')
group.add_argument('--scalingFactors',
help='Compute scaling factors (in the DESeq2 manner) '
'compatible for use with bamCoverage and write them to a '
'file. The file has tab-separated columns "sample" and '
'"scalingFactor".',
type=parserCommon.writableFile,
metavar='FILE')
return parser
def process_args(args=None):
args = parse_arguments().parse_args(args)
if args.labels and len(args.bamfiles) != len(args.labels):
print("The number of labels does not match the number of bam files.")
exit(0)
if not args.labels:
if args.smartLabels:
args.labels = smartLabels(args.bamfiles)
else:
args.labels = [os.path.basename(x) for x in args.bamfiles]
return args
def main(args=None):
"""
1. get read counts at different positions either
all of same length or from genomic regions from the BED file
2. save data for further plotting
"""
args = process_args(args)
if 'BED' in args:
bed_regions = args.BED
else:
bed_regions = None
if len(args.bamfiles) == 1 and not (args.outRawCounts or args.scalingFactors):
sys.stderr.write("You've input a single BAM file and not specified "
"--outRawCounts or --scalingFactors. The resulting output will NOT be "
"useful with any deepTools program!\n")
stepsize = args.binSize + args.distanceBetweenBins
c = countR.CountReadsPerBin(
args.bamfiles,
args.binSize,
numberOfSamples=None,
genomeChunkSize=args.genomeChunkSize,
numberOfProcessors=args.numberOfProcessors,
verbose=args.verbose,
region=args.region,
bedFile=bed_regions,
blackListFileName=args.blackListFileName,
extendReads=args.extendReads,
minMappingQuality=args.minMappingQuality,
ignoreDuplicates=args.ignoreDuplicates,
center_read=args.centerReads,
samFlag_include=args.samFlagInclude,
samFlag_exclude=args.samFlagExclude,
minFragmentLength=args.minFragmentLength,
maxFragmentLength=args.maxFragmentLength,
stepSize=stepsize,
zerosToNans=False,
out_file_for_raw_data=args.outRawCounts)
num_reads_per_bin = c.run(allArgs=args)
sys.stderr.write("Number of bins "
"found: {}\n".format(num_reads_per_bin.shape[0]))
if num_reads_per_bin.shape[0] < 2:
exit("ERROR: too few non zero bins found.\n"
"If using --region please check that this "
"region is covered by reads.\n")
# numpy will append .npz to the file name if we don't do this...
if args.outFileName:
f = open(args.outFileName, "wb")
np.savez_compressed(f,
matrix=num_reads_per_bin,
labels=args.labels)
f.close()
if args.scalingFactors:
f = open(args.scalingFactors, 'w')
f.write("sample\tscalingFactor\n")
scalingFactors = countR.estimateSizeFactors(num_reads_per_bin)
for sample, scalingFactor in zip(args.labels, scalingFactors):
f.write("{}\t{:6.4f}\n".format(sample, scalingFactor))
f.close()
if args.outRawCounts:
# append to the generated file the
# labels
header = "#'chr'\t'start'\t'end'\t"
header += "'" + "'\t'".join(args.labels) + "'\n"
f = open(args.outRawCounts, 'r+')
content = f.read()
f.seek(0, 0)
f.write(header + content)
f.close()
if __name__ == "__main__":
main()
| [
"deeptools.countReadsPerBin.CountReadsPerBin",
"numpy.savez_compressed",
"argparse.ArgumentParser",
"deeptools.parserCommon.read_options",
"deeptools.utilities.smartLabels",
"sys.stderr.write",
"deeptools.countReadsPerBin.estimateSizeFactors",
"deeptools.parserCommon.gtf_options",
"os.path.basename"... | [((286, 309), 'numpy.seterr', 'np.seterr', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (295, 309), True, 'import numpy as np\n'), ((367, 1699), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': '"""\n\n``multiBamSummary`` computes the read coverages for genomic regions for typically two or more BAM files.\nThe analysis can be performed for the entire genome by running the program in \'bins\' mode.\nIf you want to count the read coverage for specific regions only, use the ``BED-file`` mode instead.\nThe standard output of ``multiBamSummary`` is a compressed numpy array (``.npz``).\nIt can be directly used to calculate and visualize pairwise correlation values between the read coverages using the tool \'plotCorrelation\'.\nSimilarly, ``plotPCA`` can be used for principal component analysis of the read coverages using the .npz file.\nNote that using a single bigWig file is only recommended if you want to produce a bedGraph file (i.e., with the ``--outRawCounts`` option; the default output file cannot be used by ANY deepTools program if only a single file was supplied!).\n\nA detailed sub-commands help is available by typing:\n\n multiBamSummary bins -h\n\n multiBamSummary BED-file -h\n\n\n"""', 'epilog': '"""example usages:\nmultiBamSummary bins --bamfiles file1.bam file2.bam -o results.npz \n\nmultiBamSummary BED-file --BED selection.bed --bamfiles file1.bam file2.bam \n-o results.npz \n\n"""', 'conflict_handler': '"""resolve"""'}), '(formatter_class=argparse.\n RawDescriptionHelpFormatter, description=\n """\n\n``multiBamSummary`` computes the read coverages for genomic regions for typically two or more BAM files.\nThe analysis can be performed for the entire genome by running the program in \'bins\' mode.\nIf you want to count the read coverage for specific regions only, use the ``BED-file`` mode instead.\nThe standard output of ``multiBamSummary`` is a compressed numpy array (``.npz``).\nIt can be directly used to calculate and visualize pairwise correlation values between the read coverages using the tool \'plotCorrelation\'.\nSimilarly, ``plotPCA`` can be used for principal component analysis of the read coverages using the .npz file.\nNote that using a single bigWig file is only recommended if you want to produce a bedGraph file (i.e., with the ``--outRawCounts`` option; the default output file cannot be used by ANY deepTools program if only a single file was supplied!).\n\nA detailed sub-commands help is available by typing:\n\n multiBamSummary bins -h\n\n multiBamSummary BED-file -h\n\n\n"""\n , epilog=\n """example usages:\nmultiBamSummary bins --bamfiles file1.bam file2.bam -o results.npz \n\nmultiBamSummary BED-file --BED selection.bed --bamfiles file1.bam file2.bam \n-o results.npz \n\n"""\n , conflict_handler=\'resolve\')\n', (390, 1699), False, 'import argparse\n'), ((2131, 2176), 'deeptools.parserCommon.getParentArgParse', 'parserCommon.getParentArgParse', ([], {'binSize': '(False)'}), '(binSize=False)\n', (2161, 2176), False, 'from deeptools import parserCommon\n'), ((2203, 2230), 'deeptools.parserCommon.read_options', 'parserCommon.read_options', ([], {}), '()\n', (2228, 2230), False, 'from deeptools import parserCommon\n'), ((3731, 3770), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (3754, 3770), False, 'import argparse\n'), ((9502, 10182), 'deeptools.countReadsPerBin.CountReadsPerBin', 'countR.CountReadsPerBin', (['args.bamfiles', 'args.binSize'], {'numberOfSamples': 'None', 'genomeChunkSize': 'args.genomeChunkSize', 'numberOfProcessors': 'args.numberOfProcessors', 'verbose': 'args.verbose', 'region': 'args.region', 'bedFile': 'bed_regions', 'blackListFileName': 'args.blackListFileName', 'extendReads': 'args.extendReads', 'minMappingQuality': 'args.minMappingQuality', 'ignoreDuplicates': 'args.ignoreDuplicates', 'center_read': 'args.centerReads', 'samFlag_include': 'args.samFlagInclude', 'samFlag_exclude': 'args.samFlagExclude', 'minFragmentLength': 'args.minFragmentLength', 'maxFragmentLength': 'args.maxFragmentLength', 'stepSize': 'stepsize', 'zerosToNans': '(False)', 'out_file_for_raw_data': 'args.outRawCounts'}), '(args.bamfiles, args.binSize, numberOfSamples=None,\n genomeChunkSize=args.genomeChunkSize, numberOfProcessors=args.\n numberOfProcessors, verbose=args.verbose, region=args.region, bedFile=\n bed_regions, blackListFileName=args.blackListFileName, extendReads=args\n .extendReads, minMappingQuality=args.minMappingQuality,\n ignoreDuplicates=args.ignoreDuplicates, center_read=args.centerReads,\n samFlag_include=args.samFlagInclude, samFlag_exclude=args.\n samFlagExclude, minFragmentLength=args.minFragmentLength,\n maxFragmentLength=args.maxFragmentLength, stepSize=stepsize,\n zerosToNans=False, out_file_for_raw_data=args.outRawCounts)\n', (9525, 10182), True, 'import deeptools.countReadsPerBin as countR\n'), ((9207, 9394), 'sys.stderr.write', 'sys.stderr.write', (['"""You\'ve input a single BAM file and not specified --outRawCounts or --scalingFactors. The resulting output will NOT be useful with any deepTools program!\n"""'], {}), '(\n """You\'ve input a single BAM file and not specified --outRawCounts or --scalingFactors. The resulting output will NOT be useful with any deepTools program!\n"""\n )\n', (9223, 9394), False, 'import sys\n'), ((10800, 10868), 'numpy.savez_compressed', 'np.savez_compressed', (['f'], {'matrix': 'num_reads_per_bin', 'labels': 'args.labels'}), '(f, matrix=num_reads_per_bin, labels=args.labels)\n', (10819, 10868), True, 'import numpy as np\n'), ((11083, 11128), 'deeptools.countReadsPerBin.estimateSizeFactors', 'countR.estimateSizeFactors', (['num_reads_per_bin'], {}), '(num_reads_per_bin)\n', (11109, 11128), True, 'import deeptools.countReadsPerBin as countR\n'), ((8668, 8694), 'deeptools.utilities.smartLabels', 'smartLabels', (['args.bamfiles'], {}), '(args.bamfiles)\n', (8679, 8694), False, 'from deeptools.utilities import smartLabels\n'), ((2482, 2521), 'deeptools.parserCommon.gtf_options', 'parserCommon.gtf_options', ([], {'suppress': '(True)'}), '(suppress=True)\n', (2506, 2521), False, 'from deeptools import parserCommon\n'), ((3221, 3247), 'deeptools.parserCommon.gtf_options', 'parserCommon.gtf_options', ([], {}), '()\n', (3245, 3247), False, 'from deeptools import parserCommon\n'), ((8736, 8755), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (8752, 8755), False, 'import os\n')] |
# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import numpy as np
import pandapipes as pp
import pandas as pd
from pandapipes.plotting import simple_plot
from pandapipes.properties.fluids import get_fluid
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def pipeflow_openmodelica_comparison(net, log_results=True, friction_model='colebrook',
only_update_hydraulic_matrix=False):
pp.pipeflow(
net, stop_condition="tol", iter=100, tol_p=1e-7, tol_v=1e-7, friction_model=friction_model,
only_update_hydraulic_matrix=only_update_hydraulic_matrix)
p_om = net.junction.p_om
p_valid = pd.notnull(p_om)
p_om = p_om.loc[p_valid]
if get_fluid(net).is_gas:
if 'pipe' in net:
v_diff_from_pipe, v_diff_to_pipe, v_diff_mean_pipe, v_diff_abs_pipe, \
v_mean_pandapipes_pipe, v_om_pipe = retrieve_velocity_gas(net, 'pipe')
else:
v_diff_abs_pipe = pd.Series()
v_om_pipe = pd.Series()
v_mean_pandapipes_pipe = pd.Series()
v_diff_from_pipe = pd.Series()
v_diff_to_pipe = pd.Series()
v_diff_mean_pipe = pd.Series()
diff_results_v_pipe = pd.DataFrame(
{"diff_v_from_pipe": v_diff_from_pipe, "diff_v_to_pipe": v_diff_to_pipe,
"diff_v_mean_pipe": v_diff_mean_pipe, "diff_v_abs_pipe": v_diff_abs_pipe})
if 'valve' in net:
v_diff_from_valve, v_diff_to_valve, v_diff_mean_valve, v_diff_abs_valve, \
v_mean_pandapipes_valve, v_om_valve = retrieve_velocity_gas(net, 'valve')
else:
v_diff_abs_valve = pd.Series()
v_om_valve = pd.Series()
v_mean_pandapipes_valve = pd.Series()
v_diff_from_valve = pd.Series()
v_diff_to_valve = pd.Series()
v_diff_mean_valve = pd.Series()
diff_results_v_valve = pd.DataFrame(
{"diff_v_from_valve": v_diff_from_valve, "diff_v_to_valve": v_diff_to_valve,
"diff_v_mean_valve": v_diff_mean_valve, "diff_v_abs_valve": v_diff_abs_valve})
else:
if 'pipe' in net:
v_diff_mean_pipe, v_diff_abs_pipe, v_mean_pandapipes_pipe, v_om_pipe = \
retrieve_velocity_liquid(net, element = "pipe")
else:
v_diff_abs_pipe = pd.Series()
v_om_pipe = pd.Series()
v_mean_pandapipes_pipe = pd.Series()
v_diff_mean_pipe = pd.Series()
if 'valve' in net:
v_diff_mean_valve, v_diff_abs_valve, v_mean_pandapipes_valve, v_om_pipe = \
retrieve_velocity_liquid(net, element = "valve")
else:
v_diff_abs_valve = pd.Series()
v_om_valve = pd.Series()
v_mean_pandapipes_valve = pd.Series()
v_diff_mean_valve = pd.Series()
diff_results_v_pipe = pd.DataFrame({"diff_v_mean_pipe": v_diff_mean_pipe,
"diff_v_abs_pipe": v_diff_abs_pipe})
diff_results_v_valve = pd.DataFrame({"diff_v_mean_valve": v_diff_mean_valve,
"diff_v_abs_valve": v_diff_abs_valve})
v_diff_abs = v_diff_abs_pipe.append(v_diff_abs_valve, ignore_index=True)
v_diff_abs.dropna(inplace=True)
p_pandapipes = net.res_junction.p_bar.loc[p_valid].values.astype(np.float64).round(4)
p_diff = np.abs(1 - p_pandapipes / p_om)
p_diff = pd.Series(p_diff, range(len(p_diff)))
v_diff_abs = pd.Series(v_diff_abs, range(len(v_diff_abs)))
'''
print("\n p_diff = \n", p_diff)
print("\n v_diff_abs = \n", v_diff_abs)
print("\n p_diff < 0.01 = \n", p_diff < 0.01)
print("\n v_diff_abs < 0.05 = \n", v_diff_abs < 0.05)
'''
if log_results:
logger.info("p_OM %s" % p_om)
logger.info("p_PP %s" % p_pandapipes)
logger.info("v_OM_pipe %s" % v_om_pipe)
logger.info("v_PP_valve %s" % v_om_valve)
logger.info("v_PP_pipe %s" % v_mean_pandapipes_pipe)
logger.info("v_PP_valve %s" % v_mean_pandapipes_valve)
logger.info("Druckdifferenz: %s" % p_diff)
logger.info("Geschwindigkeitsdifferenz Rohr: \n %s" % diff_results_v_pipe)
logger.info("Geschwindigkeitsdifferenz Ventil: \n %s" % diff_results_v_valve)
return p_diff, v_diff_abs
def retrieve_velocity_liquid(net, element="pipe"):
if 'v_om' not in net[element]:
net[element]['v_om'] = []
v_om = net[element].v_om
v_valid = pd.notnull(v_om)
v_om = v_om.loc[v_valid]
v_om[v_om == 0] += 0.0001
if element == "pipe":
v_mean_pandapipes = net.res_pipe.v_mean_m_per_s.loc[v_valid].values.astype(
np.float64).round(4)
if element == "valve":
v_mean_pandapipes = net.res_valve.v_mean_m_per_s.loc[v_valid].values.astype(
np.float64).round(4)
v_mean_pandapipes[v_mean_pandapipes == 0] += 0.0001
v_diff_mean = np.abs(1 - v_mean_pandapipes / v_om)
v_diff_abs = np.abs(v_om - v_mean_pandapipes)
v_diff_mean = pd.Series(v_diff_mean, range(len(v_diff_mean)))
v_diff_abs = pd.Series(v_diff_abs, range(len(v_diff_abs)))
v_om = pd.Series(v_om,range(len(v_om)))
return v_diff_mean, v_diff_abs, v_mean_pandapipes, v_om
def retrieve_velocity_gas(net, element='pipe'):
if 'v_om' not in net[element]:
net[element]['v_om'] = []
v_om = net[element].v_om
v_valid = pd.notnull(v_om)
v_om = v_om.loc[v_valid]
res_element = net['res_' + element].loc[v_valid, :]
v_from_pandapipes = res_element.v_from_m_per_s.values.astype(np.float64).round(4)
v_to_pandapipes = res_element.v_to_m_per_s.values.astype(np.float64).round(4)
v_mean_pandapipes = res_element.v_mean_m_per_s.values.astype(np.float64).round(4)
v_om[v_om == 0] += 0.0001
v_mean_pandapipes[v_mean_pandapipes == 0] += 0.0001
v_from_pandapipes[v_from_pandapipes == 0] += 0.0001
v_to_pandapipes[v_to_pandapipes == 0] += 0.0001
v_diff_from = np.abs(1 - v_from_pandapipes / v_om)
v_diff_to = np.abs(1 - v_to_pandapipes / v_om)
v_diff_mean = np.abs(1 - v_mean_pandapipes / v_om)
v_diff_abs = np.abs(v_om - v_mean_pandapipes)
v_diff_mean = pd.Series(v_diff_mean, range(len(v_diff_mean)))
v_diff_from = pd.Series(v_diff_from, range(len(v_diff_from)))
v_diff_to = pd.Series(v_diff_to, range(len(v_diff_to)))
v_diff_abs = pd.Series(v_diff_abs, range(len(v_diff_abs)))
v_om = pd.Series(v_om, range(len(v_om)))
return v_diff_from, v_diff_to, v_diff_mean, v_diff_abs, v_mean_pandapipes, v_om | [
"logging.getLogger",
"numpy.abs",
"pandas.Series",
"pandas.DataFrame",
"pandapipes.pipeflow",
"pandas.notnull",
"pandapipes.properties.fluids.get_fluid"
] | [((475, 502), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (492, 502), False, 'import logging\n'), ((672, 845), 'pandapipes.pipeflow', 'pp.pipeflow', (['net'], {'stop_condition': '"""tol"""', 'iter': '(100)', 'tol_p': '(1e-07)', 'tol_v': '(1e-07)', 'friction_model': 'friction_model', 'only_update_hydraulic_matrix': 'only_update_hydraulic_matrix'}), "(net, stop_condition='tol', iter=100, tol_p=1e-07, tol_v=1e-07,\n friction_model=friction_model, only_update_hydraulic_matrix=\n only_update_hydraulic_matrix)\n", (683, 845), True, 'import pandapipes as pp\n'), ((896, 912), 'pandas.notnull', 'pd.notnull', (['p_om'], {}), '(p_om)\n', (906, 912), True, 'import pandas as pd\n'), ((3642, 3673), 'numpy.abs', 'np.abs', (['(1 - p_pandapipes / p_om)'], {}), '(1 - p_pandapipes / p_om)\n', (3648, 3673), True, 'import numpy as np\n'), ((4739, 4755), 'pandas.notnull', 'pd.notnull', (['v_om'], {}), '(v_om)\n', (4749, 4755), True, 'import pandas as pd\n'), ((5181, 5217), 'numpy.abs', 'np.abs', (['(1 - v_mean_pandapipes / v_om)'], {}), '(1 - v_mean_pandapipes / v_om)\n', (5187, 5217), True, 'import numpy as np\n'), ((5235, 5267), 'numpy.abs', 'np.abs', (['(v_om - v_mean_pandapipes)'], {}), '(v_om - v_mean_pandapipes)\n', (5241, 5267), True, 'import numpy as np\n'), ((5665, 5681), 'pandas.notnull', 'pd.notnull', (['v_om'], {}), '(v_om)\n', (5675, 5681), True, 'import pandas as pd\n'), ((6236, 6272), 'numpy.abs', 'np.abs', (['(1 - v_from_pandapipes / v_om)'], {}), '(1 - v_from_pandapipes / v_om)\n', (6242, 6272), True, 'import numpy as np\n'), ((6289, 6323), 'numpy.abs', 'np.abs', (['(1 - v_to_pandapipes / v_om)'], {}), '(1 - v_to_pandapipes / v_om)\n', (6295, 6323), True, 'import numpy as np\n'), ((6342, 6378), 'numpy.abs', 'np.abs', (['(1 - v_mean_pandapipes / v_om)'], {}), '(1 - v_mean_pandapipes / v_om)\n', (6348, 6378), True, 'import numpy as np\n'), ((6396, 6428), 'numpy.abs', 'np.abs', (['(v_om - v_mean_pandapipes)'], {}), '(v_om - v_mean_pandapipes)\n', (6402, 6428), True, 'import numpy as np\n'), ((950, 964), 'pandapipes.properties.fluids.get_fluid', 'get_fluid', (['net'], {}), '(net)\n', (959, 964), False, 'from pandapipes.properties.fluids import get_fluid\n'), ((1464, 1632), 'pandas.DataFrame', 'pd.DataFrame', (["{'diff_v_from_pipe': v_diff_from_pipe, 'diff_v_to_pipe': v_diff_to_pipe,\n 'diff_v_mean_pipe': v_diff_mean_pipe, 'diff_v_abs_pipe': v_diff_abs_pipe}"], {}), "({'diff_v_from_pipe': v_diff_from_pipe, 'diff_v_to_pipe':\n v_diff_to_pipe, 'diff_v_mean_pipe': v_diff_mean_pipe, 'diff_v_abs_pipe':\n v_diff_abs_pipe})\n", (1476, 1632), True, 'import pandas as pd\n'), ((2158, 2334), 'pandas.DataFrame', 'pd.DataFrame', (["{'diff_v_from_valve': v_diff_from_valve, 'diff_v_to_valve': v_diff_to_valve,\n 'diff_v_mean_valve': v_diff_mean_valve, 'diff_v_abs_valve':\n v_diff_abs_valve}"], {}), "({'diff_v_from_valve': v_diff_from_valve, 'diff_v_to_valve':\n v_diff_to_valve, 'diff_v_mean_valve': v_diff_mean_valve,\n 'diff_v_abs_valve': v_diff_abs_valve})\n", (2170, 2334), True, 'import pandas as pd\n'), ((3122, 3214), 'pandas.DataFrame', 'pd.DataFrame', (["{'diff_v_mean_pipe': v_diff_mean_pipe, 'diff_v_abs_pipe': v_diff_abs_pipe}"], {}), "({'diff_v_mean_pipe': v_diff_mean_pipe, 'diff_v_abs_pipe':\n v_diff_abs_pipe})\n", (3134, 3214), True, 'import pandas as pd\n'), ((3286, 3382), 'pandas.DataFrame', 'pd.DataFrame', (["{'diff_v_mean_valve': v_diff_mean_valve, 'diff_v_abs_valve': v_diff_abs_valve}"], {}), "({'diff_v_mean_valve': v_diff_mean_valve, 'diff_v_abs_valve':\n v_diff_abs_valve})\n", (3298, 3382), True, 'import pandas as pd\n'), ((1209, 1220), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1218, 1220), True, 'import pandas as pd\n'), ((1245, 1256), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1254, 1256), True, 'import pandas as pd\n'), ((1294, 1305), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1303, 1305), True, 'import pandas as pd\n'), ((1337, 1348), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1346, 1348), True, 'import pandas as pd\n'), ((1378, 1389), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1387, 1389), True, 'import pandas as pd\n'), ((1421, 1432), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1430, 1432), True, 'import pandas as pd\n'), ((1897, 1908), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1906, 1908), True, 'import pandas as pd\n'), ((1934, 1945), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1943, 1945), True, 'import pandas as pd\n'), ((1984, 1995), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (1993, 1995), True, 'import pandas as pd\n'), ((2028, 2039), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2037, 2039), True, 'import pandas as pd\n'), ((2070, 2081), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2079, 2081), True, 'import pandas as pd\n'), ((2114, 2125), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2123, 2125), True, 'import pandas as pd\n'), ((2582, 2593), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2591, 2593), True, 'import pandas as pd\n'), ((2618, 2629), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2627, 2629), True, 'import pandas as pd\n'), ((2667, 2678), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2676, 2678), True, 'import pandas as pd\n'), ((2710, 2721), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2719, 2721), True, 'import pandas as pd\n'), ((2948, 2959), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2957, 2959), True, 'import pandas as pd\n'), ((2985, 2996), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2994, 2996), True, 'import pandas as pd\n'), ((3035, 3046), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (3044, 3046), True, 'import pandas as pd\n'), ((3079, 3090), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (3088, 3090), True, 'import pandas as pd\n')] |
import numpy as np
import pandas as pd
from extract_pulse import get_pulse
from rm_qu_fitting import mcmc_fit, plot_mcmc_samp
from rm_synthesis import synthesis_rm, get_rm, plot_rm_synthesis
from translate_data import translate_file, plot_spec
def read_file(file_name='RM-190303-1.txt'):
##### Read File; Format File
data = pd.read_csv('../CalData/' + file_name, sep='\s+', header=None)
freq = np.linspace(1000, 1500, data.loc[:, 1].max()+1)
tbin = 49.152 / 1e3
##### I, Q, U, V - 2D numpy array; axis0 is frequency channel, axis1 is time sample
I = data.iloc[:, 3].values.reshape(data.loc[:, 1].max()+1, data.loc[:, 2].max()+1)
Q = data.iloc[:, 4].values.reshape(data.loc[:, 1].max()+1, data.loc[:, 2].max()+1)
U = data.iloc[:, 5].values.reshape(data.loc[:, 1].max()+1, data.loc[:, 2].max()+1)
V = data.iloc[:, 6].values.reshape(data.loc[:, 1].max()+1, data.loc[:, 2].max()+1)
return I, Q, U, V, freq, tbin
if __name__ == '__main__':
file_name ='210608_135610_21.rf.TSb4'
##### Read Data
# I, Q, U, V, freq, tbin = read_file()
I, Q, U, V, freq, tbin = translate_file(file_name)
##### Extract pulse
I, Q, U, _, freq, snr, center_freq = get_pulse(I, Q, U, V, freq)
##### RM synthesis
rm_list, Linear = synthesis_rm(I, Q, U, freq, rm_left=-20000, rm_right=20000)
RM, RM_error_left, RM_error_right = get_rm(rm_list, Linear, snr)
print('RM: {:.0f} +{:.0f} -{:.0f}'.format(RM, RM_error_right, RM_error_left))
plot_rm_synthesis(rm_list, Linear, save=False)
rm_max = RM
##### Squeeze the time dimension and turn the data into one dimension
Q, U = np.mean(Q, axis=1), np.mean(U, axis=1)
##### QU fitting with MCMC, Q and U - 1D numpy array; axis0 is frequency channel
result, RM, RM_error_left, RM_error_right = mcmc_fit(Q, U, freq, rm_left=-20000, rm_right=20000)
print('RM: {:.0f} +{:.0f} -{:.0f}'.format(RM, RM_error_right, RM_error_left))
plot_mcmc_samp(result, save=False)
##### plot spectrum
plot_spec(file_name, rm_max, save=False) | [
"rm_qu_fitting.plot_mcmc_samp",
"numpy.mean",
"translate_data.translate_file",
"translate_data.plot_spec",
"pandas.read_csv",
"extract_pulse.get_pulse",
"rm_qu_fitting.mcmc_fit",
"rm_synthesis.synthesis_rm",
"rm_synthesis.plot_rm_synthesis",
"rm_synthesis.get_rm"
] | [((338, 401), 'pandas.read_csv', 'pd.read_csv', (["('../CalData/' + file_name)"], {'sep': '"""\\\\s+"""', 'header': 'None'}), "('../CalData/' + file_name, sep='\\\\s+', header=None)\n", (349, 401), True, 'import pandas as pd\n'), ((1179, 1204), 'translate_data.translate_file', 'translate_file', (['file_name'], {}), '(file_name)\n', (1193, 1204), False, 'from translate_data import translate_file, plot_spec\n'), ((1271, 1298), 'extract_pulse.get_pulse', 'get_pulse', (['I', 'Q', 'U', 'V', 'freq'], {}), '(I, Q, U, V, freq)\n', (1280, 1298), False, 'from extract_pulse import get_pulse\n'), ((1363, 1422), 'rm_synthesis.synthesis_rm', 'synthesis_rm', (['I', 'Q', 'U', 'freq'], {'rm_left': '(-20000)', 'rm_right': '(20000)'}), '(I, Q, U, freq, rm_left=-20000, rm_right=20000)\n', (1375, 1422), False, 'from rm_synthesis import synthesis_rm, get_rm, plot_rm_synthesis\n'), ((1464, 1492), 'rm_synthesis.get_rm', 'get_rm', (['rm_list', 'Linear', 'snr'], {}), '(rm_list, Linear, snr)\n', (1470, 1492), False, 'from rm_synthesis import synthesis_rm, get_rm, plot_rm_synthesis\n'), ((1579, 1625), 'rm_synthesis.plot_rm_synthesis', 'plot_rm_synthesis', (['rm_list', 'Linear'], {'save': '(False)'}), '(rm_list, Linear, save=False)\n', (1596, 1625), False, 'from rm_synthesis import synthesis_rm, get_rm, plot_rm_synthesis\n'), ((1962, 2014), 'rm_qu_fitting.mcmc_fit', 'mcmc_fit', (['Q', 'U', 'freq'], {'rm_left': '(-20000)', 'rm_right': '(20000)'}), '(Q, U, freq, rm_left=-20000, rm_right=20000)\n', (1970, 2014), False, 'from rm_qu_fitting import mcmc_fit, plot_mcmc_samp\n'), ((2101, 2135), 'rm_qu_fitting.plot_mcmc_samp', 'plot_mcmc_samp', (['result'], {'save': '(False)'}), '(result, save=False)\n', (2115, 2135), False, 'from rm_qu_fitting import mcmc_fit, plot_mcmc_samp\n'), ((2169, 2209), 'translate_data.plot_spec', 'plot_spec', (['file_name', 'rm_max'], {'save': '(False)'}), '(file_name, rm_max, save=False)\n', (2178, 2209), False, 'from translate_data import translate_file, plot_spec\n'), ((1790, 1808), 'numpy.mean', 'np.mean', (['Q'], {'axis': '(1)'}), '(Q, axis=1)\n', (1797, 1808), True, 'import numpy as np\n'), ((1810, 1828), 'numpy.mean', 'np.mean', (['U'], {'axis': '(1)'}), '(U, axis=1)\n', (1817, 1828), True, 'import numpy as np\n')] |
from styx_msgs.msg import TrafficLight
import cv2
import numpy as np
class TLClassifier(object):
def __init__(self):
#TODO load classifier
pass
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([170,150,220])
upper = np.array([179,255,255])
mask = cv2.inRange(image, lower, upper)
pixels = cv2.countNonZero(mask)
if pixels > 100 :
return TrafficLight.RED
return TrafficLight.UNKNOWN
| [
"cv2.inRange",
"numpy.array",
"cv2.countNonZero",
"cv2.cvtColor"
] | [((527, 565), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (539, 565), False, 'import cv2\n'), ((582, 607), 'numpy.array', 'np.array', (['[170, 150, 220]'], {}), '([170, 150, 220])\n', (590, 607), True, 'import numpy as np\n'), ((622, 647), 'numpy.array', 'np.array', (['[179, 255, 255]'], {}), '([179, 255, 255])\n', (630, 647), True, 'import numpy as np\n'), ((661, 693), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (672, 693), False, 'import cv2\n'), ((711, 733), 'cv2.countNonZero', 'cv2.countNonZero', (['mask'], {}), '(mask)\n', (727, 733), False, 'import cv2\n')] |
import numpy as np
import pandas as pd
import neurokit2 as nk
# =============================================================================
# Events
# =============================================================================
def test_events_find():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
assert list(events["Onset"]) == [0, 236, 550, 864]
events = nk.events_find(signal, duration_min = 150)
assert list(events["Onset"]) == [236, 550]
events = nk.events_find(signal, inter_min = 300)
assert list(events["Onset"]) == [0, 550, 864]
def test_events_to_mne():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
events, event_id = nk.events_to_mne(events)
assert event_id == {'Event': 0}
def test_plot_events_in_signal():
signal = np.cos(np.linspace(start=0, stop=20, num=1000))
events = nk.events_find(signal)
data = nk.plot_events_in_signal(signal, events, show=False)
assert len(data['Event_Onset']) == 1000 | [
"neurokit2.events_to_mne",
"neurokit2.events_find",
"neurokit2.plot_events_in_signal",
"numpy.linspace"
] | [((334, 356), 'neurokit2.events_find', 'nk.events_find', (['signal'], {}), '(signal)\n', (348, 356), True, 'import neurokit2 as nk\n'), ((426, 466), 'neurokit2.events_find', 'nk.events_find', (['signal'], {'duration_min': '(150)'}), '(signal, duration_min=150)\n', (440, 466), True, 'import neurokit2 as nk\n'), ((530, 567), 'neurokit2.events_find', 'nk.events_find', (['signal'], {'inter_min': '(300)'}), '(signal, inter_min=300)\n', (544, 567), True, 'import neurokit2 as nk\n'), ((724, 746), 'neurokit2.events_find', 'nk.events_find', (['signal'], {}), '(signal)\n', (738, 746), True, 'import neurokit2 as nk\n'), ((770, 794), 'neurokit2.events_to_mne', 'nk.events_to_mne', (['events'], {}), '(events)\n', (786, 794), True, 'import neurokit2 as nk\n'), ((943, 965), 'neurokit2.events_find', 'nk.events_find', (['signal'], {}), '(signal)\n', (957, 965), True, 'import neurokit2 as nk\n'), ((977, 1029), 'neurokit2.plot_events_in_signal', 'nk.plot_events_in_signal', (['signal', 'events'], {'show': '(False)'}), '(signal, events, show=False)\n', (1001, 1029), True, 'import neurokit2 as nk\n'), ((280, 319), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(20)', 'num': '(1000)'}), '(start=0, stop=20, num=1000)\n', (291, 319), True, 'import numpy as np\n'), ((670, 709), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(20)', 'num': '(1000)'}), '(start=0, stop=20, num=1000)\n', (681, 709), True, 'import numpy as np\n'), ((889, 928), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(20)', 'num': '(1000)'}), '(start=0, stop=20, num=1000)\n', (900, 928), True, 'import numpy as np\n')] |
import numpy as np
from .utils.math_utils import subsets
from .aps import aps
r_is_initialized = False
class GlobalImport:
# https://stackoverflow.com/a/53255802
# This doesn't seem to like to be imported from elsewhere, e.g.,
# from utils. Maybe with some work it might be possible too.
def __enter__(self):
return self
def __exit__(self, *args):
import inspect
self.collector = inspect.getargvalues(inspect.getouterframes(inspect.currentframe())[1].frame).locals
globals().update(self.collector)
def init_r():
global r_is_initialized
if r_is_initialized:
return
with GlobalImport() as gi:
try:
from rpy2.robjects import r
from rpy2.robjects import numpy2ri
from rpy2.robjects.packages import importr
except ImportError as e:
msg = ["To use the candidate parent algorithms pc, mb or ges you",
"need to have R installed. Pc and mb require the R-package",
"bnlearn; ges requires pcalg. Finally, you also need to",
"have the Python package rpy2 installed to interface with R."]
raise Exception(' '.join(msg)) from e
load_funcs = """
datapath_or_matrix_to_numeric_dataframe <- function(data_path_or_matrix,
discrete=TRUE,
arities=FALSE)
{
if (typeof(data_path_or_matrix) == "character") {
data <- read.table(data_path_or_matrix, header = FALSE)
}
else {
data <- data_path_or_matrix
mode(data) = "numeric"
data <- data.frame(data)
}
if (discrete) {
if (arities) {
arities <- data[1,]
data <- data[2:nrow(data),]
} else {
arities <- lapply(data, function(x) length(unique(x)))
}
data[] <- lapply(data, as.factor)
for(i in 1:length(arities)) {
levels(data[, i]) <- as.character(0:(arities[[i]] - 1))
}
}
colnames(data) <- 0:(ncol(data)-1)
return(data)
}
"""
r(load_funcs)
r_is_initialized = True
def convert_to_r_data(data):
# Input is sumu.Data
init_r()
numpy2ri.activate()
datar = r.matrix(data.all().flatten(),
nrow=data.N,
ncol=data.n,
byrow=True)
numpy2ri.deactivate()
discrete = data.discrete
arities = True if data.arities is not False else False
datar = r['datapath_or_matrix_to_numeric_dataframe'](datar,
discrete=discrete,
arities=arities)
return datar
def candidates_to_str(C):
return '|'.join([' '.join([str(node) for node in C[v]]) for v in sorted(C)])
def parse_candidates(C_str):
return {v[0]: v[1] for v in zip(range(C_str.count("|") + 1), [tuple(int(c) for c in C.split()) for C in C_str.split("|")])}
def _adjust_number_candidates(K, C, method, scores=None):
assert method in ['random', 'top'], "fill method should be random or top"
if method == 'top':
assert scores is not None, "scorepath (-s) required for fill == top"
C = dict(C.items())
n = len(C)
for v in C:
add_n = max(0, K - len(C[v]))
add_from = [add_node for add_node in range(n) if add_node not in C[v] + (v,)]
if method == 'random':
if len(C[v]) < K:
C[v] = C[v] + tuple(np.random.choice(add_from, add_n, replace=False))
elif len(C[v]) > K:
C[v] = np.random.choice(C[v], K, replace=False)
if method == 'top':
if len(C[v]) < K:
C_v_top = sorted([(parent, scores.local(v, np.array([parent])))
for parent in add_from],
key=lambda item: item[1], reverse=True)[:add_n]
C_v_top = tuple([c[0] for c in C_v_top])
C[v] = C[v] + C_v_top
elif len(C[v]) > K:
C[v] = sorted([(parent, scores.local(v, np.array([parent])))
for parent in C[v]],
key=lambda item: item[1], reverse=True)[:K]
C[v] = [c[0] for c in C[v]]
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def _most_freq_candidates(K, Cs):
C = {v: list() for v in range(len(Cs[0]))}
for C_i in Cs:
for v in C_i:
C[v] += C_i[v]
for v in C:
C[v] = [i[0] for i in sorted([(u, C[v].count(u)) for u in C if C[v].count(u) > 0], key=lambda i: i[1], reverse=True)][:K]
C[v] = tuple(sorted(C[v]))
return C
def hybrid(K, **kwargs):
algos = kwargs.get("halgos")
fill = kwargs.get("hfill")
assert not [algos, fill].count(None), "list of algorithms to use (-ha) and tie breaking method (-hf) required for algo == hybrid"
if fill == "top":
scores = kwargs["scores"]
def vote(Cs):
C = {v: set() for v in Cs[0][0]}
for v in C:
k = 0
while len(C[v]) < K:
to_add = tuple()
for i in range(len(algos)):
to_add += tuple(Cs[i][k][v])
k += 1
if len(C[v].union(set(to_add))) <= K:
C[v] = C[v].union(set(to_add))
else:
to_add = {0: to_add}
for u in set(to_add[0]).difference(C[v]):
if to_add[0].count(u) in to_add:
to_add[to_add[0].count(u)] = to_add[to_add[0].count(u)].union({u})
else:
to_add[to_add[0].count(u)] = {u}
del to_add[0]
for count in sorted(to_add.keys(), reverse=True):
if len(C[v].union(to_add[count])) <= K:
C[v] = C[v].union(to_add[count])
else:
if fill == 'random':
C[v] = C[v].union(np.random.choice(list(to_add[count]), K - len(C[v]), replace=False))
elif fill == 'top':
C_v_top = sorted([(parent, scores[v][(parent,)])
for parent in to_add[count]],
key=lambda item: item[1], reverse=True)[:K - len(C[v])]
C_v_top = set([c[0] for c in C_v_top])
C[v] = C[v].union(C_v_top)
break
return C
C = [tuple(algo[a](k, **kwargs) for k in range(1, K+1)) for a in algos]
C = vote(C)
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def rnd(K, **kwargs):
n = kwargs.get("n")
assert n is not None, "nvars (-n) required for algo == rnd"
C = dict()
for v in range(n):
C[v] = tuple(sorted(np.random.choice([u for u in range(n) if u != v], K, replace=False)))
return C
def ges(K, **kwargs):
"""Greedy equivalence search :footcite:`chickering:2002`.
GES is implemented in the R package pcalg :footcite:`hauser:2012,kalisch:2012`,
for which the function :py:func:`pcalg` provides a Python wrapping.
"""
init_r()
data = kwargs["data"]
data = convert_to_r_data(data)
B = kwargs.get("B", 20)
fill = kwargs.get("fill", "top")
scores = kwargs.get("scores")
if "B" in kwargs:
Cs = list()
for i in range(kwargs["B"]):
bsample = data.rx(r["sample"](data.nrow, data.nrow, replace=True), True)
Cs.append(pcalg("ges", K, bsample))
C = _most_freq_candidates(K, Cs)
else:
C = pcalg("ges", K, data)
if fill:
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def pcalg(method, K, data):
init_r()
base = importr("base")
importr('pcalg')
dollar = base.__dict__["$"]
n = data.ncol
C = dict({node: list() for node in range(n)})
data = r["data.matrix"](data)
if method == 'ges':
score = r["new"]("GaussL0penObsScore", data)
cpdag = r["ges"](score).rx2("essgraph")
for v in range(n):
# NOTE: undirected edges are represented as bidirected!
# See pcalg documentation at
# https://cran.r-project.org/web/packages/pcalg/pcalg.pdf
# for ges and EssGraph.
# Also running the ges example confirms this.
C[v] = [v-1 for v in sorted(dollar(cpdag, ".in.edges").rx2(v+1))]
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def pc(K, **kwargs):
init_r()
data = kwargs.get("data")
data = convert_to_r_data(data)
alpha = kwargs.get("alpha", 0.1)
max_sx = kwargs.get("max_sx", 1)
B = kwargs.get("B", 20)
fill = kwargs.get("fill", "top")
scores = kwargs.get("scores")
if B is not None:
Cs = list()
for i in range(B):
bsample = data.rx(r["sample"](data.nrow,
data.nrow,
replace=True), True)
Cs.append(bnlearn("pc", K, bsample, alpha=alpha, max_sx=max_sx))
C = _most_freq_candidates(K, Cs)
else:
C = bnlearn("pc", K, data, alpha=alpha, max_sx=max_sx)
if fill is not None:
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def mb(K, **kwargs):
init_r()
data = kwargs.get("data")
data = convert_to_r_data(data)
alpha = kwargs.get("alpha", 0.1)
max_sx = kwargs.get("max_sx", 1)
B = kwargs.get("B", 20)
fill = kwargs.get("fill", "top")
scores = kwargs.get("scores")
if B is not None:
Cs = list()
for i in range(B):
bsample = data.rx(r["sample"](data.nrow,data.nrow,replace=True), True)
Cs.append(bnlearn("mb", K, bsample, alpha=alpha, max_sx=max_sx))
C = _most_freq_candidates(K, Cs)
else:
C = bnlearn("mb", K, data, alpha=alpha, max_sx=max_sx)
if fill is not None:
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def hc(K, **kwargs):
init_r()
datapath = kwargs.get("datapath")
assert datapath is not None, "datapath (-d) required for algo == hc"
B = kwargs.get("B")
if B is None:
B = 20
fill = kwargs.get("fill")
if fill is None:
fill = "top"
scores = kwargs.get("scores")
data = r['load_dat'](datapath)
if B != "none":
Cs = list()
for i in range(B):
bsample = data.rx(r["sample"](data.nrow, data.nrow, replace=True), True)
Cs.append(bnlearn("hc", K, bsample))
C = _most_freq_candidates(K, Cs)
else:
C = bnlearn("hc", K, data)
if fill != "none":
C = _adjust_number_candidates(K, C, fill, scores=scores)
return C
def bnlearn(method, K, data, **kwargs):
init_r()
R_bnlearn = importr('bnlearn')
n = data.ncol
C = dict({v: list() for v in range(n)})
if method == 'mb':
bn = R_bnlearn.iamb(data, alpha=kwargs["alpha"], max_sx=kwargs["max_sx"])
if method == 'pc':
bn = R_bnlearn.pc_stable(data, alpha=kwargs["alpha"], max_sx=kwargs["max_sx"])
if method == 'hc':
# Uses BIC by default
bn = R_bnlearn.hc(data)
for v in range(n):
if method == 'mb':
mb = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('mb')]
for u in mb:
if u not in C[v]:
C[v].append(u)
if method == 'pc':
nbr = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('nbr')]
children = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('children')]
for u in nbr:
if u in children:
continue
if u not in C[v]:
C[v].append(u)
if method == 'hc':
pset = [int(u) for u in bn.rx2('nodes').rx2(str(v)).rx2('parents')]
for u in pset:
if u not in C[v]:
C[v].append(u)
for v in C:
C[v] = tuple(sorted(C[v]))
return C
def opt(K, **kwargs):
scores = kwargs.get("scores")
n = kwargs.get("n")
C = np.array([[v for v in range(n) if v != u] for u in range(n)], dtype=np.int32)
pset_posteriors = aps(scores.all_candidate_restricted_scores(C),
as_dict=True, normalize=True)
C = dict()
for v in pset_posteriors:
postsums = dict()
for candidate_set in subsets(set(pset_posteriors).difference({v}), K, K):
postsums[candidate_set] = np.logaddexp.reduce([pset_posteriors[v][pset]
for pset in subsets(candidate_set, 0, K)])
C[v] = max(postsums, key=lambda candidate_set: postsums[candidate_set])
return C
def top(K, **kwargs):
scores = kwargs["scores"]
assert scores is not None, "scorepath (-s) required for algo == top"
C = dict()
for v in range(scores.data.n):
top_candidates = sorted([(parent, scores.local(v, np.array([parent])))
for parent in range(scores.data.n) if parent != v],
key=lambda item: item[1], reverse=True)[:K]
top_candidates = tuple(sorted(c[0] for c in top_candidates))
C[v] = top_candidates
return C
def greedy(K, **kwargs):
s = kwargs.get("s")
scores = kwargs.get("scores")
assert not [s, scores].count(None), "s (-gs) and scorepath (-s) required for algo == greedy"
def unimportance(v, u, U):
pi_v = [scores.local(v, S) for S in subsets([m for m in U if m != u], 0, s)]
return max(pi_v)
C = dict({v: list() for v in range(scores.n)})
for v in range(scores.n):
U = [u for u in range(scores.n) if u != v]
while len(C[v]) < K:
least_unimportant = min([(u, unimportance(v, u, U)) for u in U], key=lambda item: item[1])[0]
C[v].append(least_unimportant)
U = [u for u in U if u != least_unimportant]
C[v] = tuple(sorted(C[v]))
return C
def greedy_1(K, **kwargs):
scores = kwargs.get("scores")
assert scores is not None, "scorepath (-s) required for algo == greedy-1"
def highest_uncovered(v, U):
return max([(u, scores.local(v, np.array(S + (u,))))
for S in subsets(C[v], 0, [len(C[v]) if scores.maxid == -1 else min(len(C[v]), scores.maxid-1)][0])
for u in U], key=lambda item: item[1])[0]
C = dict({int(v): list() for v in range(scores.n)})
for v in C:
U = [u for u in C if u != v]
while len(C[v]) < K:
u_hat = highest_uncovered(v, U)
C[v].append(u_hat)
U = [u for u in U if u != u_hat]
C[v] = tuple(sorted(C[v]))
return C
def greedy_lite(K, **kwargs):
scores = kwargs.get("scores")
k = kwargs.get("k")
assert scores is not None
if k is None:
k = min(6, K)
assert k <= K
def k_highest_uncovered(v, U, k):
uncovereds = {(u, scores._local(v, np.array(S + (u,))))
for S in subsets(C[v], 0, [len(C[v]) if scores.maxid == -1 else min(len(C[v]), scores.maxid-1)][0])
for u in U}
k_highest = set()
while len(k_highest) < k:
u_hat = max(uncovereds, key=lambda pair: pair[1])
k_highest.add(u_hat[0])
uncovereds.remove(u_hat)
return k_highest
C = dict({int(v): set() for v in range(scores.data.n)})
for v in C:
U = [u for u in C if u != v]
while len(C[v]) < K - k:
u_hat = k_highest_uncovered(v, U, 1)
C[v].update(u_hat)
U = [u for u in U if u not in u_hat]
C[v].update(k_highest_uncovered(v, U, k))
C[v] = tuple(sorted(C[v]))
scores.clear_cache()
return C
def greedy_1_sum(K, **kwargs):
scores = kwargs.get("scores")
assert scores is not None, "scorepath (-s) required for algo == greedy-1"
def highest_uncovered_sum(v, U):
sums = list()
for u in U:
sums.append((u, np.logaddexp.reduce([scores[v][tuple(sorted(set(S + (u,))))]
for S in subsets(C[v], 0, len(C[v]))])))
return max(sums, key=lambda item: item[1])[0]
C = dict({v: list() for v in scores})
for v in scores:
U = [u for u in scores if u != v]
while len(C[v]) < K:
u_hat = highest_uncovered_sum(v, U)
C[v].append(u_hat)
U = [u for u in U if u != u_hat]
C[v] = tuple(sorted(C[v]))
return C
def greedy_1_double(K, **kwargs):
scores = kwargs.get("scores")
assert scores is not None, "scorepath (-s) required for algo == greedy-1-double"
def highest_uncovered(v, U):
return max([(u, scores[v][tuple(sorted(set(S + (u,))))])
for S in subsets(C[v], 0, len(C[v]))
for u in U], key=lambda item: item[1])[0]
def highest_double_uncovered(v, U):
return max([((u, m), scores[v][tuple(sorted(set(S + (u, m))))])
for S in subsets(C[v], 0, len(C[v]))
for u in U for m in set(U).difference({u})], key=lambda item: item[1])[0]
C = dict({v: list() for v in scores})
for v in scores:
U = [u for u in scores if u != v]
while len(C[v]) < K:
if len(C[v]) == K - 1:
u_hat = highest_uncovered(v, U)
C[v].append(u_hat)
U = [u for u in U if u != u_hat]
else:
u_hat, m_hat = highest_double_uncovered(v, U)
C[v].append(u_hat)
C[v].append(m_hat)
U = [u for u in U if u not in [u_hat, m_hat]]
C[v] = tuple(sorted(C[v]))
return C
def greedy_1_double_sum(K, **kwargs):
scores = kwargs.get("scores")
assert scores is not None, "scorepath (-s) required for algo == greedy-1"
def highest_uncovered_sum(v, U):
sums = list()
for u in U:
sums.append((u, np.logaddexp.reduce([scores[v][tuple(sorted(set(S + (u,))))]
for S in subsets(C[v], 0, len(C[v]))])))
return max(sums, key=lambda item: item[1])[0]
def highest_double_uncovered_sum(v, U):
sums = list()
for u in U:
for m in set(U).difference({u}):
sums.append(((u, m), np.logaddexp.reduce([scores[v][tuple(sorted(set(S + (u, m))))]
for S in subsets(C[v], 0, len(C[v]))])))
return max(sums, key=lambda item: item[1])[0]
C = dict({v: list() for v in scores})
for v in scores:
U = [u for u in scores if u != v]
while len(C[v]) < K:
if len(C[v]) == K - 1:
u_hat = highest_uncovered_sum(v, U)
C[v].append(u_hat)
U = [u for u in U if u != u_hat]
else:
u_hat, m_hat = highest_double_uncovered_sum(v, U)
C[v].append(u_hat)
C[v].append(m_hat)
U = [u for u in U if u not in [u_hat, m_hat]]
C[v] = tuple(sorted(C[v]))
return C
def greedy_2(K, **kwargs):
scores = kwargs.get("scores")
assert scores is not None, "scorepath (-s) required for algo == greedy-2"
C = dict({v: set() for v in scores})
for v in scores:
psets_leq_K = sorted([(pset, scores[v][pset])
for pset in scores[v] if len(pset) <= K],
key=lambda item: item[1],
reverse=True)
psets_leq_K = [item[0] for item in psets_leq_K]
i = 0
while len(C[v]) < K:
if len(C[v].union(psets_leq_K[i])) <= K:
C[v] = C[v].union(psets_leq_K[i])
else:
pset_diff = list(set(psets_leq_K[i]).difference(C[v]))
C[v] = C[v].union(np.random.choice(pset_diff, K - len(C[v]), replace=False))
i += 1
C[v] = tuple(sorted(C[v]))
return C
def greedy_2_s(K, **kwargs):
s = kwargs.get("s")
scores = kwargs.get("scores")
assert not [s, scores].count(None), "s (-gs) and scorepath (-s) required for algo == greedy-2-s"
C = dict({v: set() for v in scores})
for v in scores:
psets_leq_s = sorted([(pset, scores[v][pset])
for pset in scores[v] if len(pset) <= s],
key=lambda item: item[1],
reverse=True)
psets_leq_s = [item[0] for item in psets_leq_s]
i = 0
while len(C[v]) < K:
if len(C[v].union(psets_leq_s[i])) <= K:
C[v] = C[v].union(psets_leq_s[i])
else:
pset_diff = list(set(psets_leq_s[i]).difference(C[v]))
C[v] = C[v].union(np.random.choice(pset_diff, K - len(C[v]), replace=False))
i += 1
C[v] = tuple(sorted(C[v]))
return C
def greedy_3(K, **kwargs):
pset_posteriors = kwargs.get("pset_posteriors")
assert pset_posteriors is not None, "pset posteriors path (-p) required for algo == greedy-3"
return greedy_2(K, scores=pset_posteriors)
def greedy_2_inverse(K, **kwargs):
scores = kwargs.get("scores")
assert scores is not None, "scorepath (-s) required for algo == greedy-2-inverse"
n = len(scores)
C = dict({v: set(scores).difference({v}) for v in scores})
for v in scores:
psets_leq_n_minus_K = sorted([(pset, scores[v][pset])
for pset in scores[v] if len(pset) <= n - K],
key=lambda item: item[1])
psets_leq_n_minus_K = [item[0] for item in psets_leq_n_minus_K]
i = 0
while len(C[v]) > K:
if len(C[v].difference(psets_leq_n_minus_K[i])) >= K:
C[v] = C[v].difference(psets_leq_n_minus_K[i])
else:
pset_intersection = list(set(psets_leq_n_minus_K[i]).intersection(C[v]))
C[v] = C[v].difference(np.random.choice(pset_intersection, len(C[v]) - K, replace=False))
i += 1
C[v] = tuple(sorted(C[v]))
return C
def greedy_2_s_inverse(K, **kwargs):
s = kwargs.get("s")
scores = kwargs.get("scores")
assert not [s, scores].count(None), "s (-gs) and scorepath (-s) required for algo == greedy-2-s-inverse"
n = len(scores)
C = dict({v: set(scores).difference({v}) for v in scores})
for v in scores:
psets_leq_s = sorted([(pset, scores[v][pset])
for pset in scores[v] if len(pset) <= s],
key=lambda item: item[1])
psets_leq_s = [item[0] for item in psets_leq_s]
i = 0
while len(C[v]) > K:
if len(C[v].difference(psets_leq_s[i])) >= K:
C[v] = C[v].difference(psets_leq_s[i])
else:
pset_intersection = list(set(psets_leq_s[i]).intersection(C[v]))
C[v] = C[v].difference(np.random.choice(pset_intersection, len(C[v]) - K, replace=False))
i += 1
C[v] = tuple(sorted(C[v]))
return C
def greedy_backward_forward(K, **kwargs):
scores = kwargs.get("scores")
assert scores is not None, "scorepath (-s) required for algo == greedy-backward-forward"
def min_max(v):
return min([max([(u, scores.local(v, np.array(S + (u,))))
for S in subsets(C[v].difference({u}), 0, [len(C[v]) - 1 if scores.maxid == -1 else min(len(C[v]) - 1, scores.maxid-1)][0])],
key=lambda item: item[1])
for u in C[v]], key=lambda item: item[1])[0]
def highest_uncovered(v, U):
return max([(u, scores.local(v, np.array(S + (u,))))
for S in subsets(C[v], 0, [len(C[v]) if scores.maxid == -1 else min(len(C[v]), scores.maxid-1)][0])
for u in U],
key=lambda item: item[1])[0]
C = rnd(K, n=scores.n)
C = {v: set(C[v]) for v in C}
for v in C:
C_prev = dict(C)
while True:
u_hat = min_max(v)
C[v] = C[v].difference({u_hat})
u_hat = highest_uncovered(v, set(C).difference(C[v]).difference({v}))
C[v].add(u_hat)
if C == C_prev:
break
else:
C_prev = dict(C)
C[v] = tuple(sorted(C[v]))
return C
def pessy(K, **kwargs):
s = kwargs.get("s")
scores = kwargs.get("scores")
assert not [s, scores].count(None), "s (-gs) and scorepath (-s) required for algo == pessy"
def sum_scores(v, u):
sums = list()
for Y in subsets(sorted(C[v] + [u]),
min(len(C[v]) + 1, max(0, K - s)),
min(len(C[v]) + 1, max(0, K - s))):
sums.append(np.logaddexp.reduce([scores.local(v, S) for S in subsets(Y, 0, len(Y))]))
return min(sums)
C = dict({v: list() for v in range(scores.n)})
for v in range(scores.n):
U = [u for u in range(scores.n) if u != v]
while len(C[v]) < K:
max_u = max([(u, sum_scores(v, u)) for u in U], key=lambda item: item[1])[0]
C[v].append(max_u)
U = [u for u in U if u != max_u]
C[v] = tuple(sorted(C[v]))
return C
candidate_parent_algorithm = {
"opt": opt,
# "rnd": rnd,
"top": top,
"pc": pc,
"mb": mb,
"ges": ges,
# "hc": hc,
# "greedy": greedy,
# "pessy": pessy,
"greedy": greedy_1,
"greedy-lite": greedy_lite,
# "greedy-2": greedy_2,
# "greedy-3": greedy_3,
# "greedy-1-double": greedy_1_double,
# "greedy-1-sum": greedy_1_sum,
# "greedy-1-double-sum": greedy_1_double_sum,
# "greedy-2-inverse": greedy_2_inverse,
# "greedy-2-s": greedy_2_s,
# "greedy-2-s-inverse": greedy_2_s_inverse,
"back-forth": greedy_backward_forward,
# "hybrid": hybrid,
}
def eval_candidates(C, pset_posteriors):
v_cover = dict()
for v in C:
v_cover[v] = np.exp(np.logaddexp.reduce([pset_posteriors[v][pset] for pset in subsets(C[v], 0, len(C[v]))]))
return v_cover
def eval_candidates_gmean(pset_covers):
return np.exp(np.log(list(pset_covers.values())).mean())
def eval_candidates_amean(pset_covers):
return np.mean(list(pset_covers.values()))
| [
"numpy.random.choice",
"inspect.currentframe",
"rpy2.robjects.packages.importr",
"numpy.array",
"rpy2.robjects.numpy2ri.deactivate",
"rpy2.robjects.numpy2ri.activate",
"rpy2.robjects.r"
] | [((2174, 2187), 'rpy2.robjects.r', 'r', (['load_funcs'], {}), '(load_funcs)\n', (2175, 2187), False, 'from rpy2.robjects import r\n'), ((2291, 2310), 'rpy2.robjects.numpy2ri.activate', 'numpy2ri.activate', ([], {}), '()\n', (2308, 2310), False, 'from rpy2.robjects import numpy2ri\n'), ((2459, 2480), 'rpy2.robjects.numpy2ri.deactivate', 'numpy2ri.deactivate', ([], {}), '()\n', (2478, 2480), False, 'from rpy2.robjects import numpy2ri\n'), ((8053, 8068), 'rpy2.robjects.packages.importr', 'importr', (['"""base"""'], {}), "('base')\n", (8060, 8068), False, 'from rpy2.robjects.packages import importr\n'), ((8073, 8089), 'rpy2.robjects.packages.importr', 'importr', (['"""pcalg"""'], {}), "('pcalg')\n", (8080, 8089), False, 'from rpy2.robjects.packages import importr\n'), ((11140, 11158), 'rpy2.robjects.packages.importr', 'importr', (['"""bnlearn"""'], {}), "('bnlearn')\n", (11147, 11158), False, 'from rpy2.robjects.packages import importr\n'), ((3693, 3733), 'numpy.random.choice', 'np.random.choice', (['C[v]', 'K'], {'replace': '(False)'}), '(C[v], K, replace=False)\n', (3709, 3733), True, 'import numpy as np\n'), ((15345, 15363), 'numpy.array', 'np.array', (['(S + (u,))'], {}), '(S + (u,))\n', (15353, 15363), True, 'import numpy as np\n'), ((3588, 3636), 'numpy.random.choice', 'np.random.choice', (['add_from', 'add_n'], {'replace': '(False)'}), '(add_from, add_n, replace=False)\n', (3604, 3636), True, 'import numpy as np\n'), ((474, 496), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (494, 496), False, 'import inspect\n'), ((13320, 13338), 'numpy.array', 'np.array', (['[parent]'], {}), '([parent])\n', (13328, 13338), True, 'import numpy as np\n'), ((14572, 14590), 'numpy.array', 'np.array', (['(S + (u,))'], {}), '(S + (u,))\n', (14580, 14590), True, 'import numpy as np\n'), ((24164, 24182), 'numpy.array', 'np.array', (['(S + (u,))'], {}), '(S + (u,))\n', (24172, 24182), True, 'import numpy as np\n'), ((3851, 3869), 'numpy.array', 'np.array', (['[parent]'], {}), '([parent])\n', (3859, 3869), True, 'import numpy as np\n'), ((23803, 23821), 'numpy.array', 'np.array', (['(S + (u,))'], {}), '(S + (u,))\n', (23811, 23821), True, 'import numpy as np\n'), ((4195, 4213), 'numpy.array', 'np.array', (['[parent]'], {}), '([parent])\n', (4203, 4213), True, 'import numpy as np\n')] |
# Calculation of Z equivalent for the circuit
# This function will return an array with the frequency and respective Z
# in the same order of the experimental data
# ------------------------------------------------------
# Copyright (C) 2020 <NAME>
# Licensed under the MIT license, see LICENSE.
# ------------------------------------------------------
import numpy as np
#Function serie circuit calculation
def s(*argv):
z=0
for arg in argv:
if (arg!=None):
z = z + arg
return z
#Function parallel circuit calculation
def p(*argv):
z=0
for arg in argv:
if (arg!=None):
z = z + (1/arg)
z = (1/z)
return z
def impedance(freq,circ,comps,param):
#Replacing simbols with values in the circuit
for f in range(len(param)):
circ = circ.replace(comps[f],str(param[f]),1)
#Calcule equivalent impedace
Zeq = eval(circ)
#Store impedance parameters
sol = [freq,Zeq.real,Zeq.imag,abs(Zeq),np.angle(Zeq, deg=True)]
return sol
| [
"numpy.angle"
] | [((1020, 1043), 'numpy.angle', 'np.angle', (['Zeq'], {'deg': '(True)'}), '(Zeq, deg=True)\n', (1028, 1043), True, 'import numpy as np\n')] |
from src.utils.bbox import BBox
import numpy as np
"""
Pose configuration
"""
class PoseConfig:
# The joint order defined by the system
NAMES = ["head", "leftShoulder", "rightShoulder", "leftElbow", "rightElbow", "leftWrist", "rightWrist", "leftHip",
"rightHip", "leftKnee", "rightKnee", "leftAnkle", "rightAnkle"]
HEAD, L_SHOULDER, R_SHOULDER, L_ELBOW, R_ELBOW, L_WRIST, R_WRIST = 0, 1, 2, 3, 4, 5, 6
L_HIP, R_HIP, L_KNEE, R_KNEE, L_ANKLE, R_ANKLE = 7, 8, 9, 10, 11, 12
# The available bones
BONES = [(1, 3), (3, 5), (2, 4), (4, 6), (7, 9), (9, 11), (8, 10), (10, 12), (7, 8), (1, 2), (1, 7), (2, 8)]
"""Return the total number of joints """
@staticmethod
def get_total_joints():
return len(PoseConfig.NAMES)
"""Return the total number of bones """
@staticmethod
def get_total_bones():
return len(PoseConfig.BONES)
"""
Wrap a 3D pose (numpy array of size <PoseConfig.get_total_joints(),3> )
"""
class Pose3D:
FROM_HUMAN_36_PERMUTATION = [6, 7, 10, 8, 11, 9, 12, 3, 0, 4, 1, 5, 2]
def __init__(self, npArray):
if len(npArray.shape) != 2 or npArray.shape[0] != PoseConfig.get_total_joints() or npArray.shape[1] != 3:
raise Exception("Pose 3D only accepts numpy array with shape : <total joints, 3 DIM>")
self.joints = npArray
"""Build a 3D pose from a numpy human36M ordered content"""
@staticmethod
def build_from_human36(npArray):
return Pose3D(npArray[Pose3D.FROM_HUMAN_36_PERMUTATION, :])
"""Return the 3D joints as numpy array"""
def get_joints(self):
return self.joints.copy()
def __str__(self):
return self.joints.__str__()
"""
Wrap a 2D pose (numpy array of size <PoseConfig.get_total_joints(),2> )
"""
class Pose2D:
# The joints isn't in the same order in the different dataset
FROM_MPII_PERMUTATION = [9, 13, 12, 14, 11, 15, 10, 3, 2, 4, 1, 5, 0]
FROM_COCO_PERMUTATION = [0, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
FROM_COCO2_PERMUTATION = [0, 4, 1, 5, 2, 6, 3, 10, 7, 11, 8, 12, 9]
TO_HUMAN_36_PERMUTATION = [8, 10, 12, 7, 9, 11, 0, 1, 3, 5, 2, 4, 6]
# FROM_POSE2D_PERMUTATION = [0, 5, 2, 6, 3, 7, 4, 11, 8, 12, 9, 13, 10]
def __init__(self, npArray):
if len(npArray.shape) != 2 or npArray.shape[0] != PoseConfig.get_total_joints() or npArray.shape[1] != 2:
raise Exception("Pose 2D only accepts numpy array with shape : <total joints, 2 DIM>")
self.joints = npArray
self.is_active_mask = []
for joint_id in range(PoseConfig.get_total_joints()):
self.is_active_mask.append(not np.array_equal(self.joints[joint_id, [0, 1]], [-1, -1]))
self.is_active_mask = np.array(self.is_active_mask)
"""Build a 2D pose from a numpy mpii ordered content"""
@staticmethod
def build_from_mpii(npArray):
return Pose2D(npArray[Pose2D.FROM_MPII_PERMUTATION, :])
"""Build a 2D pose from a numpy coco ordered content"""
@staticmethod
def build_from_coco(npArray):
joints = npArray[Pose2D.FROM_COCO_PERMUTATION, :]
return Pose2D(joints)
"""Build a 2D pose from a json with the format : {jointName : {'x','y'}, ...}
with jointName in PoseConfig.NAMES"""
@staticmethod
def build_from_JSON(json):
joints = np.zeros([PoseConfig.get_total_joints(), 2]) - 1.0
for jointId, name in enumerate(PoseConfig.NAMES):
joints[jointId, 0] = json[name]['x']
joints[jointId, 1] = json[name]['y']
return Pose2D(joints)
"""Return the 2D joints as numpy array"""
def get_joints(self):
return self.joints.copy()
"""Scale the x,y position by xScaler, yScaler"""
def scale(self, xScaler=1.0, yScaler=1.0):
joints = self.joints.copy()
joints[self.is_active_mask, 0] = joints[self.is_active_mask, 0] * xScaler
joints[self.is_active_mask, 1] = joints[self.is_active_mask, 1] * yScaler
return Pose2D(joints)
def to_pose_3d_features2(self):
joints = self.joints.copy()
# TODO : 2 next lines useless
# normalize features
center = self.get_gravity_center()
joints[self.is_active_mask, :] = joints[self.is_active_mask, :] - center
joints[self.is_active_mask, :] = joints[self.is_active_mask, :] - joints[self.is_active_mask, :].min(0)
joints[self.is_active_mask, :] = joints[self.is_active_mask, :] / joints[self.is_active_mask, :].max(0)
return joints.reshape(-1)
"""Convert the 2D pose to the numpy features required by the 2D=>3D model"""
def to_pose_3d_features(self):
joints = self.joints.copy()
# normalize features
center_hip = (joints[7, :] + joints[8, :]) / 2.0
joints[:, 0] = joints[:, 0] - center_hip[0]
joints[:, 1] = joints[:, 1] - center_hip[1]
joints = joints / (np.absolute(joints).max() + 0.0000000000001)
joints[:, 1] = joints[:, 1]
# convert to human36 joints order
joints = joints[Pose2D.TO_HUMAN_36_PERMUTATION, :]
# build a batch of 1 record
features = np.concatenate([joints[:, 0], joints[:, 1]])
features = np.expand_dims(features, axis=0)
return features
"""Return the total number of labeled joints (x and y position are != -1)"""
def total_labeled_joints(self):
return self.is_active_mask.sum()
"""Return the mask of labeled joints (x and y position are != -1)"""
def get_active_joints(self):
return self.is_active_mask.copy()
"""Return true if the given joint_id is labeled"""
def is_active_joint(self, joint_id):
return self.is_active_mask[joint_id]
def distance_to(self, that):
mask_1 = that.get_active_joints()
mask_2 = self.get_active_joints()
mask = mask_1 & mask_2
j1 = self.get_joints()[mask, :]
j2 = that.get_joints()[mask, :]
return np.sqrt(((j1 - j2) ** 2).sum(1)).mean()
def get_gravity_center(self):
return self.joints[self.is_active_mask, :].mean(0)
"""Transform the pose in a bounding box or return the 100%, 100% box if impossible"""
def to_bbox(self):
if self.is_active_mask.sum() < 3:
return BBox(0, 1, 0, 1)
min_x, max_x = self.joints[self.is_active_mask, 0].min(), self.joints[self.is_active_mask, 0].max()
min_y, max_y = self.joints[self.is_active_mask, 1].min(), self.joints[self.is_active_mask, 1].max()
return BBox(min_x, max_x, min_y, max_y)
"""Return the pose in absolute coordinate if recorded from the given bbox"""
def to_absolute_coordinate_from(self, bbox):
joints = self.joints.copy()
joints[self.is_active_mask, 0] = joints[self.is_active_mask, 0] * (
bbox.get_max_x() - bbox.get_min_x()) + bbox.get_min_x()
joints[self.is_active_mask, 1] = joints[self.is_active_mask, 1] * (
bbox.get_max_y() - bbox.get_min_y()) + bbox.get_min_y()
return Pose2D(joints)
"""Return the pose in the coordinate of the given bbox"""
def to_relative_coordinate_into(self, bbox):
joints = self.joints.copy()
scale_x = bbox.get_max_x() - bbox.get_min_x()
scale_y = bbox.get_max_y() - bbox.get_min_y()
joints[self.is_active_mask, 0] = (joints[self.is_active_mask, 0] - bbox.get_min_x()) / scale_x
joints[self.is_active_mask, 1] = (joints[self.is_active_mask, 1] - bbox.get_min_y()) / scale_y
return Pose2D(joints)
"""Clamp the results in the selected range :min_value, max_value"""
def clamp(self, min_value, max_value):
new_joints = self.joints.copy()
new_joints[self.is_active_mask, :] = np.clip(new_joints[self.is_active_mask, :], min_value, max_value)
return Pose2D(new_joints)
def __str__(self):
return self.joints.__str__()
| [
"numpy.clip",
"numpy.absolute",
"src.utils.bbox.BBox",
"numpy.array",
"numpy.array_equal",
"numpy.expand_dims",
"numpy.concatenate"
] | [((2756, 2785), 'numpy.array', 'np.array', (['self.is_active_mask'], {}), '(self.is_active_mask)\n', (2764, 2785), True, 'import numpy as np\n'), ((5183, 5227), 'numpy.concatenate', 'np.concatenate', (['[joints[:, 0], joints[:, 1]]'], {}), '([joints[:, 0], joints[:, 1]])\n', (5197, 5227), True, 'import numpy as np\n'), ((5247, 5279), 'numpy.expand_dims', 'np.expand_dims', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (5261, 5279), True, 'import numpy as np\n'), ((6566, 6598), 'src.utils.bbox.BBox', 'BBox', (['min_x', 'max_x', 'min_y', 'max_y'], {}), '(min_x, max_x, min_y, max_y)\n', (6570, 6598), False, 'from src.utils.bbox import BBox\n'), ((7797, 7862), 'numpy.clip', 'np.clip', (['new_joints[self.is_active_mask, :]', 'min_value', 'max_value'], {}), '(new_joints[self.is_active_mask, :], min_value, max_value)\n', (7804, 7862), True, 'import numpy as np\n'), ((6316, 6332), 'src.utils.bbox.BBox', 'BBox', (['(0)', '(1)', '(0)', '(1)'], {}), '(0, 1, 0, 1)\n', (6320, 6332), False, 'from src.utils.bbox import BBox\n'), ((2668, 2723), 'numpy.array_equal', 'np.array_equal', (['self.joints[joint_id, [0, 1]]', '[-1, -1]'], {}), '(self.joints[joint_id, [0, 1]], [-1, -1])\n', (2682, 2723), True, 'import numpy as np\n'), ((4944, 4963), 'numpy.absolute', 'np.absolute', (['joints'], {}), '(joints)\n', (4955, 4963), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import Support_functions_Morse as EF
def Morse_V(r):
V=D_e*(1-2.718**(-a*(r-r_e)))**2
return V
m,h = 1,1
Xmin = 0.5
Xmax = 6
D_e = 10
a = 0.8
r_e = 1
R = np.linspace(Xmin, Xmax, 10**3)
for p in range(len(R)):
if Morse_V(R[p]) < D_e:
Xmin = R[p] - 1
Pos = p
break
R=np.linspace(Xmin, Xmax, 10**4)
EF.Constant_feeder(h, m, D_e, Morse_V)
Eigen_E=EF.Eigen_Range_finder(R, 0, D_e*.9, 10, 0.1)
fig=plt.figure()
ax=plt.axes(xlabel="r", ylabel="Psi", ylim=(0, Eigen_E[-1]*1.1))
R=np.linspace(Xmin, Xmax, 10**5)
EF.Plot_Eq(R, Eigen_E, ax)
EF.Analatic_mult(R, np.arange(len(Eigen_E)), D_e, a, r_e, ax)
ax.plot(R, Morse_V(R))
plt.legend()
plt.show()
| [
"Support_functions_Morse.Constant_feeder",
"Support_functions_Morse.Plot_Eq",
"Support_functions_Morse.Eigen_Range_finder",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((207, 239), 'numpy.linspace', 'np.linspace', (['Xmin', 'Xmax', '(10 ** 3)'], {}), '(Xmin, Xmax, 10 ** 3)\n', (218, 239), True, 'import numpy as np\n'), ((325, 357), 'numpy.linspace', 'np.linspace', (['Xmin', 'Xmax', '(10 ** 4)'], {}), '(Xmin, Xmax, 10 ** 4)\n', (336, 357), True, 'import numpy as np\n'), ((356, 394), 'Support_functions_Morse.Constant_feeder', 'EF.Constant_feeder', (['h', 'm', 'D_e', 'Morse_V'], {}), '(h, m, D_e, Morse_V)\n', (374, 394), True, 'import Support_functions_Morse as EF\n'), ((404, 451), 'Support_functions_Morse.Eigen_Range_finder', 'EF.Eigen_Range_finder', (['R', '(0)', '(D_e * 0.9)', '(10)', '(0.1)'], {}), '(R, 0, D_e * 0.9, 10, 0.1)\n', (425, 451), True, 'import Support_functions_Morse as EF\n'), ((453, 465), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (463, 465), True, 'import matplotlib.pyplot as plt\n'), ((469, 532), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlabel': '"""r"""', 'ylabel': '"""Psi"""', 'ylim': '(0, Eigen_E[-1] * 1.1)'}), "(xlabel='r', ylabel='Psi', ylim=(0, Eigen_E[-1] * 1.1))\n", (477, 532), True, 'import matplotlib.pyplot as plt\n'), ((533, 565), 'numpy.linspace', 'np.linspace', (['Xmin', 'Xmax', '(10 ** 5)'], {}), '(Xmin, Xmax, 10 ** 5)\n', (544, 565), True, 'import numpy as np\n'), ((564, 590), 'Support_functions_Morse.Plot_Eq', 'EF.Plot_Eq', (['R', 'Eigen_E', 'ax'], {}), '(R, Eigen_E, ax)\n', (574, 590), True, 'import Support_functions_Morse as EF\n'), ((676, 688), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (686, 688), True, 'import matplotlib.pyplot as plt\n'), ((689, 699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (697, 699), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 02:11:48 2019
@author: abhijay
"""
import argparse
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
import seaborn as sns
sns.set()
class recurrent_classifier:
def __init__(self, train_path):
self.seq_x, self.seq_y, self.labels, self.noOfLabels = self.get_data(train_path)
self.test_seq_x, self.test_seq_y, self.test_labels, self.test_noOfLabels = self.get_data(test_path)
# For generating features from the data
def make_feature_y(self, t, x, y, is_yhat=False):
if t-(self.historyLen)<0:
y_history = y[0:t]
else:
y_history = y[t-(self.historyLen):t]
y_history = np.pad(y_history, pad_width=(0,self.historyLen-len(y_history)), mode='constant', constant_values=(0))
y_history_ = np.zeros((len(y_history),len(self.labels)))
for i, ind in enumerate(y_history):
if ind>0:
y_history_[i,ind-1] = 1
f_n = np.append( x[t], y_history_)
if is_yhat:
return f_n
else:
return ( f_n, y[t])
# For making the instances
def process_data(self, seq_x, seq_y):
L = [] # Initialize set of classification examples
for i, (x, y) in enumerate(zip( seq_x, seq_y)):
L_i = []
for t in range(x.shape[0]):
L_i_ = self.make_feature_y( t, x, y)
L_i.append( L_i_)
L.append(L_i)
return L
# For converting the instances to a feature set X
def make_XY( self, L):
X = []
Y = []
for seq in L:
for (x,y) in seq:
X.append(x)
Y.append(y)
X = np.array(X)
Y = np.array(Y)
return X, Y
# For training on a linear classifier
def learn_classifier( self, L):
# Convert the instances to a feature set
X, Y = self.make_XY( L)
linearClassifier = svm.SVC(kernel='linear', gamma='scale')
print ("\nTraining on.....", X.shape)
fit = linearClassifier.fit(X, Y)
return linearClassifier
# For calculating the recurrent error
def calc_recurrent_error(self, L, linearClassifier):
error = []
mistakes = []
for seq in L:
X=[]
Y=[]
for (x,y) in seq:
X.append(x)
Y.append(y)
# Predict on each example in the sequence
Y_hat = linearClassifier.predict(np.array(X))
error.append(sum(Y_hat-Y!=0)/len(Y))
mistakes.append(sum(Y_hat-Y!=0))
return np.mean( error), sum(mistakes)
def calc_iid_error(self, L, linearClassifier):
X, Y = self.make_XY(L)
Y_hat = linearClassifier.predict(X)
error = sum(Y_hat-Y!=0)/len(Y)
# print ("IID Error: ", error)
return error
def exact_imitation( self):
# For training data
L = self.process_data( self.train_seq_x, self.train_seq_y)
# h = Classifier Learner
linearClassifier = self.learn_classifier(L)
return linearClassifier, L
def learning_via_dagger( self, L_ei, beta_j):
d_max = 5
val_error = []
val_mistakes = []
test_error = []
test_mistakes = []
print ("\n===== beta_j ===== ",beta_j)
print ("===== Training on L_ei =====")
# h = Classifier Learner
H_hat = self.learn_classifier(L_ei)
# Best H_hat
best_h_hat = H_hat
val_error_lowest = 100
L_best = L_ei.copy()
print ("\n===== Applying Dagger =====")
L = L_ei.copy()
for dagger_iteration in range(d_max):
# For training data
for i, (x, y) in enumerate(zip( self.train_seq_x, self.train_seq_y)):
y_hat = []
# For each example in the sequence
for t in range(x.shape[0]):
L_i = self.make_feature_y( t, x, y_hat, True)
# The policy we are following
if np.random.random_sample() >= beta_j:
y_hat.append(H_hat.predict(np.array([L_i]))[0])
else:
y_hat.append(y[t])
# Aggregate data
if y_hat[t] != y[t]:
L.append([( L_i, y[t])])
# Train a classifier after data aggregation
H_hat = self.learn_classifier(L)
val_error_, val_mistakes_ = self.calc_recurrent_error( self.L_val, H_hat)
val_error.append(val_error_)
val_mistakes.append(val_mistakes_)
print ("Recurrent Val Error: ", val_error_)
if val_error_ < val_error_lowest:
val_error_lowest = val_error_
best_h_hat = H_hat
L_best = L.copy()
test_error_, test_mistakes_ = self.calc_recurrent_error( self.L_test, H_hat)
test_error.append(test_error_)
test_mistakes.append(test_mistakes_)
print ("Recurrent Test Error: ", test_error_)
# decay
# beta_j *= 0.85
return best_h_hat, L_best, val_error, val_mistakes, test_error, test_mistakes
# For getting the data
def get_data(self, path):
with open(path, "r") as f:
lines = f.readlines()
seq_x = []
seq_y = []
seq_x_ = []
seq_y_ = []
labels = set()
for line in lines:
line = line.strip().split("\t")
if line[0] == '':
if seq_x_:
labels.update(seq_y_)
seq_x.append(np.array(seq_x_))
seq_y.append(np.array(seq_y_))
seq_x_ = []
seq_y_ = []
continue
else:
seq_x_.append( np.array([int(i) for i in line[1][2:]]))
seq_y_.append( line[2])
labels = list(labels)
labels.sort()
noOfLabels = len(labels)
return seq_x, seq_y, labels, noOfLabels
def plot( self, ei_value, y_values, yLabel, name, saveAs):
plt.figure()
ax = sns.lineplot(x=np.arange( 1, 6), y=np.repeat( ei_value, 5), label="Exact Imitation", dashes=True)
for i in np.arange( 0, 5):
ax = sns.lineplot(x=np.arange( 1, 6), y=y_values[i], label="Dagger, beta = "+str( round((i+5)*0.1,1)), markers=True)
ax.set_title(name)
ax.set_xlabel("Iterations")
ax.set_ylabel(yLabel)
box = ax.get_position()
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax.set_position([box.x0, box.y0, box.width * 0.65, box.height])
# ax.set_ylim(min(min(y_values))-np.mean(y_values)/10,max(max(y_values))+np.mean(y_values)/10)
ax.figure.savefig("plots/"+saveAs+".png")
if __name__ == "__main__":
parser=argparse.ArgumentParser()
parser.add_argument('--dataset', help='Specifies the dataset to use')
args=parser.parse_args()
dataset = args.dataset
# dataset = 'nettalk_stress'
# dataset = 'ocr_fold0_sm'
train_path = "datasets/"+dataset+"_train.txt"
test_path = "datasets/"+dataset+"_test.txt"
dataset = dataset.split('_')[0]
print ("Dataset: "+dataset)
# Get data
recurrentClassifier = recurrent_classifier( train_path)
recurrentClassifier.dataset = dataset
# Preprocessing
l = []
for seq in recurrentClassifier.seq_x:
l.append(len(seq))
seq_y=[]
for y in recurrentClassifier.seq_y:
seq_y.append([recurrentClassifier.labels.index(y_)+1 for y_ in y])
recurrentClassifier.seq_y = seq_y
seq_y=[]
for y in recurrentClassifier.test_seq_y:
seq_y.append([recurrentClassifier.labels.index(y_)+1 for y_ in y])
recurrentClassifier.test_seq_y = seq_y
# Decide y_history length in the feature generation
recurrentClassifier.historyLen = 2
# Separate a validation set from the training dataset
if dataset == 'nettalk':
trainLen = int(0.9*len(recurrentClassifier.seq_x))
recurrentClassifier.train_seq_x = recurrentClassifier.seq_x[0:trainLen]
recurrentClassifier.train_seq_y = recurrentClassifier.seq_y[0:trainLen]
recurrentClassifier.val_seq_x = recurrentClassifier.seq_x[trainLen+1::]
recurrentClassifier.val_seq_y = recurrentClassifier.seq_y[trainLen+1::]
elif dataset == 'ocr':
trainLen = int(0.1*len(recurrentClassifier.seq_x))
recurrentClassifier.train_seq_x = recurrentClassifier.seq_x[trainLen+1::]
recurrentClassifier.train_seq_y = recurrentClassifier.seq_y[trainLen+1::]
recurrentClassifier.val_seq_x = recurrentClassifier.seq_x[0:trainLen]
recurrentClassifier.val_seq_y = recurrentClassifier.seq_y[0:trainLen]
print ("===== Perform exact_imiattion =====")
learned_classifier, L = recurrentClassifier.exact_imitation()
# For validation data
recurrentClassifier.L_val = recurrentClassifier.process_data( recurrentClassifier.val_seq_x, recurrentClassifier.val_seq_y)
val_error_ei, val_mistakes_ei = recurrentClassifier.calc_recurrent_error( recurrentClassifier.L_val, learned_classifier)
print ("\nRecurrent Error on Val data:\n", val_error_ei)
print ("\nIID Val Error on Val data:\n", recurrentClassifier.calc_iid_error( recurrentClassifier.L_val, learned_classifier))
# For test data
recurrentClassifier.L_test = recurrentClassifier.process_data( recurrentClassifier.test_seq_x, recurrentClassifier.test_seq_y)
test_error_ei, test_mistakes_ei = recurrentClassifier.calc_recurrent_error( recurrentClassifier.L_test, learned_classifier)
print ("\nRecurrent Test Error:\n", test_error_ei)
print ("\nIID Test Error:\n", recurrentClassifier.calc_iid_error( recurrentClassifier.L_test, learned_classifier))
print ("\n===== learning_via_dagger =====")
val_error = []
val_mistakes =[]
test_error = []
test_mistakes = []
for beta in np.linspace(0.5, 0.9, 5):
classifier, L_i, val_error_, val_mistakes_, test_error_, test_mistakes_ = recurrentClassifier.learning_via_dagger( L.copy(), beta)
val_error.append(val_error_)
val_mistakes.append(val_mistakes_)
test_error.append(test_error_)
test_mistakes.append(test_mistakes_)
recurrentClassifier.plot(val_error_ei, val_error, "Error rate", "Recurrent Error on Val_data ("+dataset+")", dataset+"/val_data_recurrent_error")
recurrentClassifier.plot(test_error_ei, test_error, "Error rate", "Recurrent Error on Test_data ("+dataset+")", dataset+"/test_data_recurrent_error")
recurrentClassifier.plot(val_mistakes_ei, val_mistakes, "Mistakes", "Mistakes on Val_data ("+dataset+")", dataset+"/val_data_mistakes")
recurrentClassifier.plot(test_mistakes_ei, test_mistakes, "Mistakes", "Mistakes on Test_data ("+dataset+")", dataset+"/test_data_mistakes") | [
"numpy.mean",
"seaborn.set",
"numpy.repeat",
"numpy.random.random_sample",
"argparse.ArgumentParser",
"numpy.append",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.arange",
"sklearn.svm.SVC"
] | [((223, 232), 'seaborn.set', 'sns.set', ([], {}), '()\n', (230, 232), True, 'import seaborn as sns\n'), ((7466, 7491), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7489, 7491), False, 'import argparse\n'), ((10625, 10649), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.9)', '(5)'], {}), '(0.5, 0.9, 5)\n', (10636, 10649), True, 'import numpy as np\n'), ((1087, 1114), 'numpy.append', 'np.append', (['x[t]', 'y_history_'], {}), '(x[t], y_history_)\n', (1096, 1114), True, 'import numpy as np\n'), ((1923, 1934), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1931, 1934), True, 'import numpy as np\n'), ((1947, 1958), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1955, 1958), True, 'import numpy as np\n'), ((2188, 2227), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'gamma': '"""scale"""'}), "(kernel='linear', gamma='scale')\n", (2195, 2227), False, 'from sklearn import svm\n'), ((6710, 6722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6720, 6722), True, 'import matplotlib.pyplot as plt\n'), ((6851, 6866), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (6860, 6866), True, 'import numpy as np\n'), ((2914, 2928), 'numpy.mean', 'np.mean', (['error'], {}), '(error)\n', (2921, 2928), True, 'import numpy as np\n'), ((2774, 2785), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2782, 2785), True, 'import numpy as np\n'), ((6751, 6766), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (6760, 6766), True, 'import numpy as np\n'), ((6771, 6793), 'numpy.repeat', 'np.repeat', (['ei_value', '(5)'], {}), '(ei_value, 5)\n', (6780, 6793), True, 'import numpy as np\n'), ((6901, 6916), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (6910, 6916), True, 'import numpy as np\n'), ((4512, 4537), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (4535, 4537), True, 'import numpy as np\n'), ((6211, 6227), 'numpy.array', 'np.array', (['seq_x_'], {}), '(seq_x_)\n', (6219, 6227), True, 'import numpy as np\n'), ((6262, 6278), 'numpy.array', 'np.array', (['seq_y_'], {}), '(seq_y_)\n', (6270, 6278), True, 'import numpy as np\n'), ((4600, 4615), 'numpy.array', 'np.array', (['[L_i]'], {}), '([L_i])\n', (4608, 4615), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta
from math import floor
import matplotlib.pyplot as plt
import os
from helper_functions import read_in_NNDSS, read_in_Reff_file
from params import case_insertion_threshold
class Person:
"""
Individuals in the forecast
"""
def __init__(self,parent, infection_time,detection_time, recovery_time,category:str):
"""
Category is one of 'I','A','S' for Imported, Asymptomatic and Symptomatic
"""
self.parent = parent
self.infection_time = infection_time
self.detection_time = detection_time
self.recovery_time = recovery_time
self.category = category
class Forecast:
"""
Forecast object that contains methods to simulate a forcast forward, given Reff and current state.
"""
def __init__(self,current, state,start_date,
forecast_date, cases_file_date,
VoC_flag='', scenario=''
):
"""Create forecast object with parameters in preperation for running simulation.
Args:
current (list of ints): A list of the infected people at the start of the simulation.
state (str): The state to simulate.
start_date (str): The %Y-%m-%d string of the start date of the forecast.
forecast_date ([type], optional): Date to forecast from. Usually the same as cases_file_date.
cases_file_date (str): Date of cases file to use. Format "2021-01-01".
VoC_flag (str, optional): Which VoC to increase Reff to. Can be empty str.
scenario (str, optional): Filename suffix for scenario run. Can be empty str.
"""
import numpy as np
from params import local_detection, a_local_detection, qi_d, alpha_i, k
self.initial_state = current.copy() # Observed cases on start day
# Create an object list of Persons based on observed cases on start day/
people = ['I']*current[0] + ['A']*current[1] + ['S']*current[2]
self.initial_people = {i: Person(0,0,0,0,cat) for i,cat in enumerate(people)}
self.state = state
#start date sets day 0 in script to start_date
self.start_date = pd.to_datetime(start_date,format='%Y-%m-%d')
self.alpha_i = alpha_i[state]
self.qi = qi_d[state] # Probability of *unobserved* imported infectious individuals
self.symptomatic_detection_prob = local_detection[state]
self.asymptomatic_detection_prob = a_local_detection[state]
self.k = k # Hard coded
self.qua_ai = 2 if state=='NSW' else 1 # Pre-march-quarantine version of alpha_i.
self.gam = 1/2
self.ps = 0.7 # Probability of being symptomatic
# Increase Reff to due this VoC
self.VoC_flag = VoC_flag
# Add an optional scenario flag to load in specific Reff scenarios and save results. This does not change the run behaviour of the simulations.
self.scenario = scenario
# forecast_date and cases_file_date are usually the same.
self.forecast_date = (pd.to_datetime(forecast_date,format='%Y-%m-%d') - self.start_date).days # Total number of days in simulation
self.cases_file_date = cases_file_date
# Load in Reff data before running all sims
self.Reff_all = read_in_Reff_file(self.cases_file_date, self.VoC_flag, scenario=self.scenario)
##### Assumption dates.
# Date from which quarantine was started
self.quarantine_change_date = pd.to_datetime('2020-04-15',format='%Y-%m-%d').dayofyear - self.start_date.dayofyear
# Day from which to reduce imported overseas cases escapes due to quarantine worker vaccination
self.hotel_quarantine_vaccine_start = (pd.to_datetime("2021-05-01",format='%Y-%m-%d') - self.start_date).days
# Day from which to start treating imported cases as delta cases
self.VoC_on_imported_effect_start = (pd.to_datetime("2021-05-01",format='%Y-%m-%d') - self.start_date).days
# This is a parameter which decreases the detection probability before the date where VIC started testing properly. Could be removed in future.
if state == "VIC":
self.test_campaign_date = (pd.to_datetime('2020-06-01',format='%Y-%m-%d') - self.start_date).days
self.test_campaign_factor = 1.5
else:
self.test_campaign_date = None
assert len(people) == sum(current), "Number of people entered does not equal sum of counts in current status"
def initialise_sim(self,curr_time=0):
"""
Given some number of cases in self.initial_state (copied),
simulate undetected cases in each category and their
infectious times. Updates self.current for each person.
"""
from math import ceil
if curr_time ==0:
self.alpha_s = 1/(self.ps + self.gam*(1-self.ps))
self.alpha_a = self.gam * self.alpha_s
self.current = self.initial_state.copy()
self.people = self.initial_people.copy()
#N samples for each of infection and detection times
#Grab now and iterate through samples to save simulation
self.generate_times(size=10000)
self.get_inf_time = self.iter_inf_time()
self.get_detect_time = self.iter_detect_time()
#counters for terminating early
self.inf_backcast_counter = 0
self.inf_nowcast_counter = 0
self.inf_forecast_counter = 0
#assign infection time to those discovered
# obs time is day =0
for person in self.people.keys():
self.people[person].infection_time = -1*next(self.get_detect_time)
else:
#reinitialising, so actual people need times
#assume all symptomatic
prob_symp_given_detect = self.symptomatic_detection_prob*self.ps/(
self.symptomatic_detection_prob*self.ps + self.asymptomatic_detection_prob*(1-self.ps)
)
num_symp = binom.rvs(n=int(self.current[2]), p=prob_symp_given_detect)
for person in range(int(self.current[2])):
self.infected_queue.append(len(self.people))
#inf_time = next(self.get_inf_time) #remove?
detection_time = next(self.get_detect_time)
if person <= num_symp:
new_person = Person(-1,
curr_time-1*detection_time ,
curr_time, 0, 'S')
else:
new_person = Person(-1,
curr_time-1*detection_time ,
curr_time, 0, 'A')
self.people[len(self.people)] = new_person
#self.cases[max(0,ceil(new_person.infection_time)), 2] +=1
#num undetected is nbinom (num failures given num detected)
if self.current[2]==0:
num_undetected_s = nbinom.rvs(1,self.symptomatic_detection_prob)
else:
num_undetected_s = nbinom.rvs(self.current[2],self.symptomatic_detection_prob)
total_s = num_undetected_s + self.current[2]
#infer some non detected asymp at initialisation
if total_s==0:
num_undetected_a = nbinom.rvs(1, self.ps)
else:
num_undetected_a = nbinom.rvs(total_s, self.ps)
#simulate cases that will be detected within the next week
if curr_time==0:
#Add each undetected case into people
for n in range(num_undetected_a):
self.people[len(self.people)] = Person(0, curr_time-1*next(self.get_inf_time) , 0, 0, 'A')
self.current[1] +=1
for n in range(num_undetected_s):
self.people[len(self.people)] = Person(0, curr_time-1*next(self.get_inf_time) , 0, 0, 'S')
self.current[2] +=1
else:
#reinitialised, so add these cases back onto cases
#Add each undetected case into people
for n in range(num_undetected_a):
new_person = Person(-1, curr_time-1*next(self.get_inf_time) , 0, 0, 'A')
self.infected_queue.append(len(self.people))
self.people[len(self.people)] = new_person
self.cases[max(0,ceil(new_person.infection_time)),1] +=1
for n in range(num_undetected_s):
new_person = Person(-1, curr_time-1*next(self.get_inf_time) , 0, 0, 'S')
self.infected_queue.append(len(self.people))
self.people[len(self.people)] = new_person
self.cases[max(0,ceil(new_person.infection_time)),2] +=1
def read_in_Reff(self):
"""
Read in Reff CSV that was produced by the generate_R_L_forecasts.py script.
"""
import pandas as pd
df_forecast = self.Reff_all
# Get R_I values and store in object.
self.R_I = df_forecast.loc[(df_forecast.type=='R_I')&(df_forecast.state==self.state),self.num_of_sim%2000].values[0]
df_forecast = df_forecast.loc[df_forecast.type=='R_L'] # Get only R_L forecasts
df_forecast = df_forecast.set_index(['state','date'])
dfReff_dict = df_forecast.loc[self.state,[0,1]].to_dict(orient='index')
Reff_lookupstate = {}
for key, stats in dfReff_dict.items():
#instead of mean and std, take all columns as samples of Reff
newkey = (key - self.start_date).days #convert key to days since start date for easier indexing
Reff_lookupstate[newkey] = df_forecast.loc[(self.state,key),self.num_of_sim%2000]
self.Reff = Reff_lookupstate
def generate_new_cases(self,parent_key, Reff,k,travel=False):
"""
Generate offspring for each parent, check if they travel. The parent_key parameter lets us find the parent from the array self.people containing the objects from the branching process.
"""
from math import ceil
from numpy.random import random
# Check parent category
if self.people[parent_key].category=='S': # Symptomatic
num_offspring = nbinom.rvs(n=k,p= 1- self.alpha_s*Reff/(self.alpha_s*Reff + k))
elif self.people[parent_key].category=='A': # Asymptomatic
num_offspring = nbinom.rvs(n=k, p = 1- self.alpha_a*Reff/(self.alpha_a*Reff + k))
else: # Imported
Reff = self.R_I
# Apply vaccine reduction for hotel quarantine workers
if self.people[parent_key].infection_time >= self.hotel_quarantine_vaccine_start:
p_vh = 0.9+beta.rvs(2,4)*9/100 # p_{v,h} is the proportion of hotel quarantine workers vaccinated
v_eh = 0.83+beta.rvs(2,2)*14/100 # v_{e,h} is the overall vaccine effectiveness
Reff *= (1-p_vh*v_eh)
# Apply increase escape rate due to Delta variant.
if self.people[parent_key].infection_time >= self.VoC_on_imported_effect_start:
Reff = Reff*1.39*1.3
if self.people[parent_key].infection_time < self.quarantine_change_date:
# factor of 3 times infectiousness prequarantine changes
num_offspring = nbinom.rvs(n=k, p = 1- self.qua_ai*Reff/(self.qua_ai*Reff + k))
else:
num_offspring = nbinom.rvs(n=k, p = 1- self.alpha_i*Reff/(self.alpha_i*Reff + k))
if num_offspring >0:
num_sympcases = self.new_symp_cases(num_offspring)
if self.people[parent_key].category=='A':
child_times = []
for new_case in range(num_offspring):
#define each offspring
inf_time = self.people[parent_key].infection_time + next(self.get_inf_time)
if inf_time > self.forecast_date:
self.inf_forecast_counter +=1
#normal case within state
if self.people[parent_key].category=='A':
child_times.append(ceil(inf_time))
if ceil(inf_time) > self.cases.shape[0]:
#new infection exceeds the simulation time, not recorded
self.cases_after = self.cases_after + 1
else:
#within forecast time
detection_rv = random()
detect_time = inf_time + next(self.get_detect_time)
recovery_time = 0 #for now not tracking recoveries
if new_case <= num_sympcases-1: #minus 1 as new_case ranges from 0 to num_offspring-1
#first num_sympcases are symnptomatic, rest are asymptomatic
category = 'S'
self.cases[max(0,ceil(inf_time)-1),2] += 1
if self.test_campaign_date is not None:
#see if case is during a testing campaign
if inf_time <self.test_campaign_date:
detect_prob = self.symptomatic_detection_prob
else:
detect_prob = min(0.95,self.symptomatic_detection_prob*self.test_campaign_factor)
else:
detect_prob = self.symptomatic_detection_prob
if detection_rv < detect_prob:
#case detected
#only care about detected cases
if detect_time < self.cases.shape[0]:
if detect_time <self.forecast_date:
if detect_time >self.forecast_date -14:
self.inf_nowcast_counter +=1
elif detect_time >self.forecast_date - 60:
self.inf_backcast_counter +=1
self.observed_cases[max(0,ceil(detect_time)-1),2] += 1
else:
category = 'A'
self.cases[max(0,ceil(inf_time)-1),1] += 1
#detect_time = 0
if self.test_campaign_date is not None:
#see if case is during a testing campaign
if inf_time <self.test_campaign_date:
detect_prob = self.asymptomatic_detection_prob
else:
detect_prob = min(0.95,self.asymptomatic_detection_prob*self.test_campaign_factor)
else:
detect_prob=self.asymptomatic_detection_prob
if detection_rv < detect_prob:
#case detected
#detect_time = inf_time + next(self.get_detect_time)
if detect_time < self.cases.shape[0]:
#counters increment before data date
if detect_time <self.forecast_date:
if detect_time >self.forecast_date - 14:
self.inf_nowcast_counter +=1
elif detect_time> self.forecast_date - 60:
self.inf_backcast_counter +=1
self.observed_cases[max(0,ceil(detect_time)-1),1] += 1
#add new infected to queue
self.infected_queue.append(len(self.people))
#add person to tracked people
self.people[len(self.people)] = Person(parent_key, inf_time, detect_time,recovery_time, category)
def simulate(self, end_time,sim,seed):
"""
Simulate forward until end_time
"""
from collections import deque
from math import ceil
import gc
np.random.seed(seed)
self.num_of_sim = sim
self.read_in_Reff()
#generate storage for cases
self.cases = np.zeros(shape=(end_time, 3),dtype=float)
self.observed_cases = np.zeros_like(self.cases)
self.observed_cases[0,:] = self.initial_state.copy()
#Initalise undetected cases and add them to current
self.initialise_sim()
#number of cases after end time
self.cases_after = 0 #gets incremented in generate new cases
#Record day 0 cases
self.cases[0,:] = self.current.copy()
# Generate imported cases
new_imports = []
unobs_imports =[]
for day in range(end_time):
# Values for a and b are initialised in import_cases_model() which is called by read_in_cases() during setup.
a = self.a_dict[day]
b = self.b_dict[day]
# Dij = number of observed imported infectious individuals
Dij = nbinom.rvs(a, 1-1/(b+1))
# Uij = number of *unobserved* imported infectious individuals
unobserved_a = 1 if Dij == 0 else Dij
Uij = nbinom.rvs(unobserved_a, p=self.qi)
unobs_imports.append(Uij)
new_imports.append(Dij + Uij)
for day, imports in enumerate(new_imports):
self.cases[day,0] = imports
for n in range(imports):
#Generate people
if n - unobs_imports[day]>=0:
#number of observed people
new_person = Person(0,day,day +next(self.get_detect_time),0,'I')
self.people[len(self.people)] = new_person
if new_person.detection_time <= end_time:
self.observed_cases[max(0,ceil(new_person.detection_time)-1),0] +=1
else:
#unobserved people
new_person = Person(0,day,0,0,'I')
self.people[len(self.people)] = new_person
if day <= end_time:
self.cases[max(0,day-1), 0] +=1
#####
#Create queue for infected people
self.infected_queue = deque()
#Assign people to infected queue
for key, person in self.people.items():
#add to the queue
self.infected_queue.append(key)
#Record their times
if person.infection_time> end_time:
#initial undetected cases have slim chance to be infected
#after end_time
if person.category!='I':
#imports shouldn't count for extinction counts
self.cases_after +=1
print("cases after at initialisation")
#Cases already recorded at initialise_sim() by addding to
# self.current
#Record initial inferred obs including importations.
self.inferred_initial_obs = self.observed_cases[0,:].copy()
#print(self.inferred_initial_obs, self.current)
# General simulation through time by proceeding through queue
# of infecteds
n_resim = 0
self.bad_sim = False
reinitialising_window = 3
self.daycount= 0
while len(self.infected_queue)>0:
day_end = self.people[self.infected_queue[0]].detection_time
if day_end < self.forecast_date:
if self.inf_backcast_counter > self.max_backcast_cases:
print("Sim "+str(self.num_of_sim
)+" in "+self.state+" has > "+str(self.max_backcast_cases)+" cases in backcast. Ending")
self.num_too_many+=1
self.bad_sim = True
break
elif self.inf_nowcast_counter > self.max_nowcast_cases:
print("Sim "+str(self.num_of_sim
)+" in "+self.state+" has > "+str(
self.max_nowcast_cases
)+" cases in nowcast. Ending")
self.num_too_many+=1
self.bad_sim = True
break
else:
#check max cases for after forecast date
if self.inf_forecast_counter>self.max_cases:
#hold value forever
if day_end < self.cases.shape[0]-1:
self.cases[ceil(day_end):,2] = self.cases[ceil(day_end)-2,2]
self.observed_cases[ceil(day_end):,2] = self.observed_cases[ceil(day_end)-2,2]
else:
self.cases_after +=1
print("Sim "+str(self.num_of_sim
)+" in "+self.state+" has >"+str(self.max_cases)+" cases in forecast period.")
self.num_too_many+=1
break
## stop if parent infection time greater than end time
if self.people[self.infected_queue[0]].infection_time >end_time:
self.infected_queue.popleft()
print("queue had someone exceed end_time!!")
else:
#take approproate Reff based on parent's infection time
curr_time = self.people[self.infected_queue[0]].infection_time
if type(self.Reff)==int:
Reff = 1
print("using flat Reff")
elif type(self.Reff)==dict:
while True:
#sometimes initial cases infection time is pre
#Reff data, so take the earliest one
try:
Reff = self.Reff[ceil(curr_time)-1]
except KeyError:
if curr_time>0:
print("Unable to find Reff for this parent at time: %.2f" % curr_time)
raise KeyError
curr_time +=1
continue
break
#generate new cases with times
parent_key = self.infected_queue.popleft()
#recorded within generate new cases
self.generate_new_cases(parent_key,Reff=Reff,k = self.k)
#self.people.clear()
if self.bad_sim ==False:
#Check simulation for discrepancies
for day in range(7,end_time):
#each day runs through self.infected_queue
missed_outbreak = self.data_check(day) #True or False
if missed_outbreak:
self.daycount +=1
if self.daycount >= reinitialising_window:
n_resim +=1
#print("Local outbreak in "+self.state+" not simulated on day %i" % day)
#cases to add
#treat current like empty list
self.current[2] = max(0,self.actual[day] - sum(self.observed_cases[day,1:]))
self.current[2] += max(0,self.actual[day-1] - sum(self.observed_cases[day-1,1:]))
self.current[2] += max(0,self.actual[day-2] - sum(self.observed_cases[day-2,1:]))
#how many cases are symp to asymp
prob_symp_given_detect = self.symptomatic_detection_prob*self.ps/(
self.symptomatic_detection_prob*self.ps + self.asymptomatic_detection_prob*(1-self.ps)
)
num_symp = binom.rvs(n=int(self.current[2]),
p=prob_symp_given_detect)
#distribute observed cases over 3 days
#Triangularly
self.observed_cases[max(0,day),2] += num_symp//2
self.cases[max(0,day),2] += num_symp//2
self.observed_cases[max(0,day-1),2] += num_symp//3
self.cases[max(0,day-1),2] += num_symp//3
self.observed_cases[max(0,day-2),2] += num_symp//6
self.cases[max(0,day-2),2] +=num_symp//6
#add asymptomatic
num_asymp = self.current[2] - num_symp
self.observed_cases[max(0,day),2] += num_asymp//2
self.cases[max(0,day),2] += num_asymp//2
self.observed_cases[max(0,day-1),2] += num_asymp//3
self.cases[max(0,day-1),2] += num_asymp//3
self.observed_cases[max(0,day-2),2] += num_asymp//6
self.cases[max(0,day-2),2] +=num_asymp//6
self.initialise_sim(curr_time=day)
#print("Reinitialising with %i new cases " % self.current[2] )
#reset days to zero
self.daycount = 0
if n_resim> 10:
print("This sim reinitilaised %i times" % n_resim)
self.bad_sim = True
n_resim = 0
break
#Each check of day needs to simulate the cases before moving
# to next check, otherwise will be doubling up on undetecteds
while len(self.infected_queue)>0:
day_end = self.people[self.infected_queue[0]].detection_time
#check for exceeding max_cases
if day_end <self.forecast_date:
if self.inf_backcast_counter > self.max_backcast_cases:
print("Sim "+str(self.num_of_sim
)+" in "+self.state+" has > "+str(self.max_backcast_cases)+" cases in backcast. Ending")
self.num_too_many+=1
self.bad_sim = True
break
elif self.inf_nowcast_counter > self.max_nowcast_cases:
print("Sim "+str(self.num_of_sim
)+" in "+self.state+" has > "+str(
self.max_nowcast_cases
)+" cases in nowcast. Ending")
self.num_too_many+=1
self.bad_sim = True
break
else:
if self.inf_forecast_counter> self.max_cases:
day_inf = self.people[self.infected_queue[0]].infection_time
self.cases[ceil(day_inf):,2] = self.cases[ceil(day_inf)-2,2]
self.observed_cases[ceil(day_inf):,2] = self.observed_cases[ceil(day_inf)-2,2]
print("Sim "+str(self.num_of_sim
)+" in "+self.state+" has >"+str(self.max_cases)+" cases in forecast period.")
self.num_too_many+=1
break
## stop if parent infection time greater than end time
if self.people[self.infected_queue[0]].infection_time >end_time:
personkey =self.infected_queue.popleft()
print("queue had someone exceed end_time!!")
else:
#take approproate Reff based on parent's infection time
curr_time = self.people[self.infected_queue[0]].infection_time
if type(self.Reff)==int:
Reff = 2
elif type(self.Reff)==dict:
while True:
#sometimes initial cases infection time is pre
#Reff data, so take the earliest one
try:
Reff = self.Reff[ceil(curr_time)-1]
except KeyError:
if curr_time>0:
print("Unable to find Reff for this parent at time: %.2f" % curr_time)
raise KeyError
curr_time +=1
continue
break
#generate new cases with times
parent_key = self.infected_queue.popleft()
self.generate_new_cases(parent_key,Reff=Reff,k=self.k)
#missed_outbreak = max(1,missed_outbreak*0.9)
else:
#pass in here if while queue loop completes
continue
#only reach here if while loop breaks, so break the data check
break
self.people.clear()
gc.collect()
if self.bad_sim:
#return NaN arrays for all bad_sims
self.cumulative_cases = np.empty_like(self.cases)
self.cumulative_cases[:] = np.nan
return (self.cumulative_cases,self.cumulative_cases, {
'qs':self.symptomatic_detection_prob,
'metric':np.nan,
'qa':self.asymptomatic_detection_prob,
'qi':self.qi,
'alpha_a':self.alpha_a,
'alpha_s':self.alpha_s,
#'accept':self.accept,
'ps':self.ps,
'bad_sim':self.bad_sim,
'cases_after':self.cases_after,
'num_of_sim':self.num_of_sim,
}
)
else:
#good sim
## Perform metric for ABC
# self.get_metric(end_time)
return (
self.cases.copy(),
self.observed_cases.copy(), {
'qs':self.symptomatic_detection_prob,
'metric':np.nan,
'qa':self.asymptomatic_detection_prob,
'qi':self.qi,
'alpha_a':self.alpha_a,
'alpha_s':self.alpha_s,
#'accept':self.metric>=0.8,
'ps':self.ps,
'bad_sim':self.bad_sim,
'cases_after':self.cases_after,
'num_of_sim':self.num_of_sim,
}
)
def to_df(self,results):
"""
Put results from the simulation into a pandas dataframe and record as h5 format. This is called externally by the run_state.py script.
"""
import pandas as pd
df_results = pd.DataFrame()
n_sims = results['symp_inci'].shape[1]
days = results['symp_inci'].shape[0]
sim_vars=['bad_sim','metrics','qs','qa','qi',
'accept','cases_after','alpha_a','alpha_s','ps']
for key, item in results.items():
if key not in sim_vars:
df_results = df_results.append(
pd.DataFrame(
item.T,index=pd.MultiIndex.from_product([
[key], range(n_sims)],
names=['Category', 'sim']
)
)
)
df_results.columns = pd.date_range(start = self.start_date,
periods=days #num of days
)
df_results.columns = [col.strftime('%Y-%m-%d') for
col in df_results.columns]
#Record simulation variables
for var in sim_vars:
df_results[var] = [results[var][sim] for cat,sim in df_results.index]
print('VoC_flag is', self.VoC_flag)
print("Saving results for state "+self.state)
df_results.to_parquet(
"./results/"+self.state+self.start_date.strftime(
format='%Y-%m-%d')+"sim_R_L"+str(n_sims)+"days_"+str(days)+self.VoC_flag+self.scenario+".parquet",
)
return df_results
def data_check(self,day):
"""
A metric to calculate how far the simulation is from the actual data
"""
try:
actual_3_day_total = 0
for i in range(3):
actual_3_day_total += self.actual[max(0,day-i)]
threshold = case_insertion_threshold*max(1,sum(
self.observed_cases[
max(0,day-2):day+1,2] + self.observed_cases[
max(0,day-2):day+1,1]
)
)
if actual_3_day_total > threshold:
return min(3,actual_3_day_total/threshold)
else:
#long absence, then a case, reintroduce
week_in_sim = sum(self.observed_cases[
max(0,day-7):day+1,2] + self.observed_cases[
max(0,day-7):day+1,1])
if week_in_sim == 0:
if actual_3_day_total >0:
return actual_3_day_total
#no outbreak missed
return False
except KeyError:
#print("No cases on day %i" % day)
return False
# Deprecated as no long using ABC
# def get_metric(self,end_time,omega=0.2):
# """
# Calculate the value of the metric of the current sim compared to NNDSS data.
# """
# self.actual_array = np.array([self.actual[day]
# #if day not in missed_dates else 0
# for day in range(end_time) ])
# #calculate case differences
# #moving windows
# sim_cases =self.observed_cases[
# :len(self.actual_array),2] + \
# self.observed_cases[:
# len(self.actual_array),1] #include asymp cases.
# #convolution with 1s should do cum sum
# window = 7
# sim_cases = np.convolve(sim_cases,
# [1]*window,mode='valid')
# actual_cum = np.convolve(self.actual_array,
# [1]*window,mode='valid')
# cases_diff = abs(sim_cases - actual_cum)
# #if sum(cases_diff) <= omega * sum(self.actual_array):
# #cumulative diff passes, calculate metric
# #sum over days number of times within omega of actual
# self.metric = sum(
# np.square(cases_diff)#,np.maximum(omega* actual_cum,7)
# )
# self.metric = self.metric/(end_time-window) #max is end_time
def read_in_cases(self):
"""
Read in NNDSS case data to measure incidence against simulation. Nothing is returned as results are saved in object.
"""
import pandas as pd
from datetime import timedelta
import glob
df = read_in_NNDSS(self.cases_file_date) # Call helper_function
self.import_cases_model(df)
df = df.loc[df.STATE==self.state]
if self.state=='VIC':
#data quality issue
df.loc[df.date_inferred<='2019-01-01','date_inferred'] = df.loc[
df.date_inferred<='2019-01-01','date_inferred'
] + pd.offsets.DateOffset(year=2020)
df.loc[df.date_inferred=='2002-07-03','date_inferred'] = pd.to_datetime('2020-07-03')
df.loc[df.date_inferred=='2002-07-17','date_inferred'] = pd.to_datetime('2020-07-17')
df = df.groupby(['date_inferred'])[['imported','local']].sum()
df.reset_index(inplace=True)
#make date integer from start of year
timedelta_from_start = df.date_inferred - self.start_date
df['date'] = timedelta_from_start.apply(lambda x: x.days)
#df['date'] = df.date_inferred.apply(lambda x: x.dayofyear) -self.start_date.dayofyear
df = df.sort_values(by='date')
df = df.set_index('date')
#fill missing dates with 0 up to end_time
df = df.reindex(range(self.end_time), fill_value=0)
## calculate window of cases to measure against
if df.index.values[-1] >60:
#if final day of data is later than day 90, then remove first 90 days
forecast_days = self.end_time-self.forecast_date
self.cases_to_subtract = sum(df.local.values[:-1*(60+forecast_days)])
self.cases_to_subtract_now = sum(df.local.values[:-1*(14+forecast_days)])
else:
self.cases_to_subtract = 0
self.cases_to_subtract_now = 0
#self.imported_total = sum(df.imported.values)
self.max_cases = max(500000,sum(df.local.values) + sum(df.imported.values))
self.max_backcast_cases = max(100,4*(sum(df.local.values) - self.cases_to_subtract))
self.max_nowcast_cases = max(10, 1.5*(sum(df.local.values) - self.cases_to_subtract_now))
print("Local cases in last 14 days is %i" % (sum(df.local.values) - self.cases_to_subtract_now) )
print('Max limits: ', self.max_cases, self.max_backcast_cases, self.max_nowcast_cases)
self.actual = df.local.to_dict()
def import_cases_model(self, df):
"""
This function takes the NNDSS/linelist data and creates a set of parameters to generate imported (overseas acquired) cases over time.
Resulting parameter dict is saved in self.a_dict and self.a_dict rather than being returned.
"""
from datetime import timedelta
def get_date_index(date):
#subtract 4 from date to infer period of entry when infected
date = date-timedelta(days=4)
n_days_into_sim = (date - self.start_date).days
return n_days_into_sim
prior_alpha = 0.5 # Changed from 1 to lower prior (26/03/2021)
prior_beta = 1/5
df['date_index'] = df.date_inferred.apply(get_date_index)
df_state = df[df['STATE'] == self.state]
counts_by_date = df_state.groupby('date_index').imported.sum()
# Replace our value for $a$ with an exponential moving average
moving_average_a = {}
smoothing_factor = 0.1
current_ema = counts_by_date.get(-11, default = 0) # exponential moving average start
# Loop through each day up to forecast - 4 (as recent imports are not discovered yet)
for j in range(-10, self.forecast_date-4):
count_on_day = counts_by_date.get(j, default = 0)
current_ema = smoothing_factor*count_on_day + (1-smoothing_factor)*current_ema
moving_average_a[j] = prior_alpha+current_ema
# Set the imports moving forward to match last window
for j in range(self.forecast_date-4, self.end_time):
moving_average_a[j] = prior_alpha+current_ema
self.a_dict = moving_average_a
# Set all betas to prior plus effective period size of 1
self.b_dict = {i:prior_beta+1 for i in range(self.end_time)}
def generate_times(self, i=3.64, j=3.07, m=5.505, n=0.948, size=10000):
"""
Helper function. Generate large amount of gamma draws to save on simulation time later
"""
self.inf_times = np.random.gamma(i/j, j, size =size) #shape and scale
self.detect_times = np.random.gamma(m/n,n, size = size)
def iter_inf_time(self):
"""
Helper function. Access Next inf_time.
"""
from itertools import cycle
for time in cycle(self.inf_times):
yield time
def iter_detect_time(self):
"""
Helper function. Access Next detect_time.
"""
from itertools import cycle
for time in cycle(self.detect_times):
yield time
def new_symp_cases(self,num_new_cases:int):
"""
Given number of new cases generated, assign them to symptomatic (S) with probability ps
"""
#repeated Bernoulli trials is a Binomial (assuming independence of development of symptoms)
symp_cases = binom.rvs(n=num_new_cases, p=self.ps)
return symp_cases | [
"scipy.stats.beta.rvs",
"datetime.timedelta",
"pandas.date_range",
"pandas.to_datetime",
"collections.deque",
"numpy.random.random",
"helper_functions.read_in_NNDSS",
"numpy.random.gamma",
"numpy.random.seed",
"pandas.DataFrame",
"itertools.cycle",
"gc.collect",
"pandas.offsets.DateOffset",
... | [((2254, 2299), 'pandas.to_datetime', 'pd.to_datetime', (['start_date'], {'format': '"""%Y-%m-%d"""'}), "(start_date, format='%Y-%m-%d')\n", (2268, 2299), True, 'import pandas as pd\n'), ((3356, 3434), 'helper_functions.read_in_Reff_file', 'read_in_Reff_file', (['self.cases_file_date', 'self.VoC_flag'], {'scenario': 'self.scenario'}), '(self.cases_file_date, self.VoC_flag, scenario=self.scenario)\n', (3373, 3434), False, 'from helper_functions import read_in_NNDSS, read_in_Reff_file\n'), ((15976, 15996), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (15990, 15996), True, 'import numpy as np\n'), ((16113, 16155), 'numpy.zeros', 'np.zeros', ([], {'shape': '(end_time, 3)', 'dtype': 'float'}), '(shape=(end_time, 3), dtype=float)\n', (16121, 16155), True, 'import numpy as np\n'), ((16185, 16210), 'numpy.zeros_like', 'np.zeros_like', (['self.cases'], {}), '(self.cases)\n', (16198, 16210), True, 'import numpy as np\n'), ((18155, 18162), 'collections.deque', 'deque', ([], {}), '()\n', (18160, 18162), False, 'from collections import deque\n'), ((28804, 28816), 'gc.collect', 'gc.collect', ([], {}), '()\n', (28814, 28816), False, 'import gc\n'), ((30501, 30515), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (30513, 30515), True, 'import pandas as pd\n'), ((31159, 31209), 'pandas.date_range', 'pd.date_range', ([], {'start': 'self.start_date', 'periods': 'days'}), '(start=self.start_date, periods=days)\n', (31172, 31209), True, 'import pandas as pd\n'), ((34608, 34643), 'helper_functions.read_in_NNDSS', 'read_in_NNDSS', (['self.cases_file_date'], {}), '(self.cases_file_date)\n', (34621, 34643), False, 'from helper_functions import read_in_NNDSS, read_in_Reff_file\n'), ((38881, 38917), 'numpy.random.gamma', 'np.random.gamma', (['(i / j)', 'j'], {'size': 'size'}), '(i / j, j, size=size)\n', (38896, 38917), True, 'import numpy as np\n'), ((38962, 38998), 'numpy.random.gamma', 'np.random.gamma', (['(m / n)', 'n'], {'size': 'size'}), '(m / n, n, size=size)\n', (38977, 38998), True, 'import numpy as np\n'), ((39156, 39177), 'itertools.cycle', 'cycle', (['self.inf_times'], {}), '(self.inf_times)\n', (39161, 39177), False, 'from itertools import cycle\n'), ((39365, 39389), 'itertools.cycle', 'cycle', (['self.detect_times'], {}), '(self.detect_times)\n', (39370, 39389), False, 'from itertools import cycle\n'), ((39705, 39742), 'scipy.stats.binom.rvs', 'binom.rvs', ([], {'n': 'num_new_cases', 'p': 'self.ps'}), '(n=num_new_cases, p=self.ps)\n', (39714, 39742), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((6998, 7044), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', (['(1)', 'self.symptomatic_detection_prob'], {}), '(1, self.symptomatic_detection_prob)\n', (7008, 7044), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((7089, 7149), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', (['self.current[2]', 'self.symptomatic_detection_prob'], {}), '(self.current[2], self.symptomatic_detection_prob)\n', (7099, 7149), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((7316, 7338), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', (['(1)', 'self.ps'], {}), '(1, self.ps)\n', (7326, 7338), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((7384, 7412), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', (['total_s', 'self.ps'], {}), '(total_s, self.ps)\n', (7394, 7412), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((10199, 10269), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', ([], {'n': 'k', 'p': '(1 - self.alpha_s * Reff / (self.alpha_s * Reff + k))'}), '(n=k, p=1 - self.alpha_s * Reff / (self.alpha_s * Reff + k))\n', (10209, 10269), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((16947, 16977), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', (['a', '(1 - 1 / (b + 1))'], {}), '(a, 1 - 1 / (b + 1))\n', (16957, 16977), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((17116, 17151), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', (['unobserved_a'], {'p': 'self.qi'}), '(unobserved_a, p=self.qi)\n', (17126, 17151), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((28926, 28951), 'numpy.empty_like', 'np.empty_like', (['self.cases'], {}), '(self.cases)\n', (28939, 28951), True, 'import numpy as np\n'), ((35072, 35100), 'pandas.to_datetime', 'pd.to_datetime', (['"""2020-07-03"""'], {}), "('2020-07-03')\n", (35086, 35100), True, 'import pandas as pd\n'), ((35170, 35198), 'pandas.to_datetime', 'pd.to_datetime', (['"""2020-07-17"""'], {}), "('2020-07-17')\n", (35184, 35198), True, 'import pandas as pd\n'), ((3123, 3171), 'pandas.to_datetime', 'pd.to_datetime', (['forecast_date'], {'format': '"""%Y-%m-%d"""'}), "(forecast_date, format='%Y-%m-%d')\n", (3137, 3171), True, 'import pandas as pd\n'), ((3557, 3604), 'pandas.to_datetime', 'pd.to_datetime', (['"""2020-04-15"""'], {'format': '"""%Y-%m-%d"""'}), "('2020-04-15', format='%Y-%m-%d')\n", (3571, 3604), True, 'import pandas as pd\n'), ((3794, 3841), 'pandas.to_datetime', 'pd.to_datetime', (['"""2021-05-01"""'], {'format': '"""%Y-%m-%d"""'}), "('2021-05-01', format='%Y-%m-%d')\n", (3808, 3841), True, 'import pandas as pd\n'), ((3986, 4033), 'pandas.to_datetime', 'pd.to_datetime', (['"""2021-05-01"""'], {'format': '"""%Y-%m-%d"""'}), "('2021-05-01', format='%Y-%m-%d')\n", (4000, 4033), True, 'import pandas as pd\n'), ((10358, 10428), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', ([], {'n': 'k', 'p': '(1 - self.alpha_a * Reff / (self.alpha_a * Reff + k))'}), '(n=k, p=1 - self.alpha_a * Reff / (self.alpha_a * Reff + k))\n', (10368, 10428), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((34970, 35002), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2020)'}), '(year=2020)\n', (34991, 35002), True, 'import pandas as pd\n'), ((37316, 37333), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (37325, 37333), False, 'from datetime import timedelta\n'), ((4277, 4324), 'pandas.to_datetime', 'pd.to_datetime', (['"""2020-06-01"""'], {'format': '"""%Y-%m-%d"""'}), "('2020-06-01', format='%Y-%m-%d')\n", (4291, 4324), True, 'import pandas as pd\n'), ((11279, 11347), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', ([], {'n': 'k', 'p': '(1 - self.qua_ai * Reff / (self.qua_ai * Reff + k))'}), '(n=k, p=1 - self.qua_ai * Reff / (self.qua_ai * Reff + k))\n', (11289, 11347), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((11393, 11463), 'scipy.stats.nbinom.rvs', 'nbinom.rvs', ([], {'n': 'k', 'p': '(1 - self.alpha_i * Reff / (self.alpha_i * Reff + k))'}), '(n=k, p=1 - self.alpha_i * Reff / (self.alpha_i * Reff + k))\n', (11403, 11463), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((12097, 12111), 'math.ceil', 'ceil', (['inf_time'], {}), '(inf_time)\n', (12101, 12111), False, 'from math import ceil\n'), ((12371, 12379), 'numpy.random.random', 'random', ([], {}), '()\n', (12377, 12379), False, 'from numpy.random import random\n'), ((12062, 12076), 'math.ceil', 'ceil', (['inf_time'], {}), '(inf_time)\n', (12066, 12076), False, 'from math import ceil\n'), ((8350, 8381), 'math.ceil', 'ceil', (['new_person.infection_time'], {}), '(new_person.infection_time)\n', (8354, 8381), False, 'from math import ceil\n'), ((8678, 8709), 'math.ceil', 'ceil', (['new_person.infection_time'], {}), '(new_person.infection_time)\n', (8682, 8709), False, 'from math import ceil\n'), ((10665, 10679), 'scipy.stats.beta.rvs', 'beta.rvs', (['(2)', '(4)'], {}), '(2, 4)\n', (10673, 10679), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((10782, 10796), 'scipy.stats.beta.rvs', 'beta.rvs', (['(2)', '(2)'], {}), '(2, 2)\n', (10790, 10796), False, 'from scipy.stats import nbinom, erlang, beta, binom, gamma, poisson, beta\n'), ((20366, 20379), 'math.ceil', 'ceil', (['day_end'], {}), '(day_end)\n', (20370, 20379), False, 'from math import ceil\n'), ((20397, 20410), 'math.ceil', 'ceil', (['day_end'], {}), '(day_end)\n', (20401, 20410), False, 'from math import ceil\n'), ((20461, 20474), 'math.ceil', 'ceil', (['day_end'], {}), '(day_end)\n', (20465, 20474), False, 'from math import ceil\n'), ((20501, 20514), 'math.ceil', 'ceil', (['day_end'], {}), '(day_end)\n', (20505, 20514), False, 'from math import ceil\n'), ((12796, 12810), 'math.ceil', 'ceil', (['inf_time'], {}), '(inf_time)\n', (12800, 12810), False, 'from math import ceil\n'), ((14133, 14147), 'math.ceil', 'ceil', (['inf_time'], {}), '(inf_time)\n', (14137, 14147), False, 'from math import ceil\n'), ((17750, 17781), 'math.ceil', 'ceil', (['new_person.detection_time'], {}), '(new_person.detection_time)\n', (17754, 17781), False, 'from math import ceil\n'), ((21636, 21651), 'math.ceil', 'ceil', (['curr_time'], {}), '(curr_time)\n', (21640, 21651), False, 'from math import ceil\n'), ((26546, 26559), 'math.ceil', 'ceil', (['day_inf'], {}), '(day_inf)\n', (26550, 26559), False, 'from math import ceil\n'), ((26577, 26590), 'math.ceil', 'ceil', (['day_inf'], {}), '(day_inf)\n', (26581, 26590), False, 'from math import ceil\n'), ((26645, 26658), 'math.ceil', 'ceil', (['day_inf'], {}), '(day_inf)\n', (26649, 26658), False, 'from math import ceil\n'), ((26685, 26698), 'math.ceil', 'ceil', (['day_inf'], {}), '(day_inf)\n', (26689, 26698), False, 'from math import ceil\n'), ((13997, 14014), 'math.ceil', 'ceil', (['detect_time'], {}), '(detect_time)\n', (14001, 14014), False, 'from math import ceil\n'), ((15462, 15479), 'math.ceil', 'ceil', (['detect_time'], {}), '(detect_time)\n', (15466, 15479), False, 'from math import ceil\n'), ((27862, 27877), 'math.ceil', 'ceil', (['curr_time'], {}), '(curr_time)\n', (27866, 27877), False, 'from math import ceil\n')] |
# encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
import datetime
import tushare as ts
import redis
import json
import talib
import numpy as np
from retry import retry
from vnpy.trader.app.ctaStrategy.ctaBase import ENGINETYPE_TRADING
from restclient import GET
import os
import time
from vnpy.trader.vtObject import VtBarData
from vnpy.trader.vtConstant import EMPTY_STRING
from vnpy.trader.app.ctaStrategy.ctaTemplate import (CtaTemplate,
BarManager,
ArrayManager)
import time
########################################################################
class cnBanZhuanStrategyDR(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'CN'
author = u'用Python的交易员'
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'atrLength',
'fastMaLength',
'slowMaLength',
'rsiLength',
'rsiEntry',
'trailingPercent']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'atrMa',
'rsiValue',
'rsiBuy',
'rsiSell']
def __init__(self, ctaEngine, setting):
fileName = 'param.json'
try:
f = file(fileName)
except IOError:
print('读取param参数配置出错,请检查')
# 解析json文件-----------------------------------------------------
mysetting = json.load(f)
redisHost = str(mysetting['redisHost']) #
redisPort = str(mysetting['redisPort'])
self.r = redis.Redis(host=redisHost, port=int(redisPort), db=8)
self.contract_size = 0
self.slowMaLength = 0
self.fastMaLength = 0
self.margin_percent = 0
self.stop_profit = 0
"""Constructor"""
super(cnBanZhuanStrategyDR, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
self.last_entry_price = 0
self.pos = 0
self.inited = False
self.trading = False
self.init_data_loaded = False
# 策略变量
self.bar = None # K线对象
self.barMinute = EMPTY_STRING # K线当前的分钟
self.bufferSize = 100 # 需要缓存的数据的大小
self.bufferCount = 0 # 目前已经缓存了的数据的计数
self.highArray = np.zeros(self.bufferSize) # K线最高价的数组
self.lowArray = np.zeros(self.bufferSize) # K线最低价的数组
self.closeArray = np.zeros(self.bufferSize) # K线收盘价的数组
self.atrCount = 0 # 目前已经缓存了的ATR的计数
self.atrArray = np.zeros(self.bufferSize) # ATR指标的数组
self.atrValue = 0 # 最新的ATR指标数值
self.atrMa = 0 # ATR移动平均的数值
self.no_data = 0
self.rsiValue = 0 # RSI指标的数值
self.rsiBuy = 0 # RSI买开阈值
self.rsiSell = 0 # RSI卖开阈值
self.intraTradeHigh = 0 # 移动止损用的持仓期内最高价
self.intraTradeLow = 0 # 移动止损用的持仓期内最低价
# self.orderList = [] # 保存委托代码的列表
self.initCapital = 10000.0
self.initDays = 2
self.barHour = -1
self.stop = 0 # 用于判断上一个交易是否是止盈操作
self.posType = 0 # 应该持仓的方式。1 2 3
self.getPos()
self.upperLimit = None
self.lowerLimit = None
# self.longYd = 0 # 这四个是昨仓今仓
# self.longTd = 0
# self.shortYd = 0
# self.shortTd = 0
# self.r.set('sc_wtiusPos', 0) # 初始化仓位
# self.r.set('brent_scusPos', 0)
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' % self.name)
# 初始化RSI入场阈值
# self.rsiBuy = 50 + self.rsiEntry
# self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
# position = self.ctaEngine.query_position()
# print('qry postion: {0}'.format(position))
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.init_data_loaded = True
self.putEvent()
def onStart(self):
"""启动策略(必须由用户继承实现)"""
# self.ctaEngine.test()
self.writeCtaLog(u'%s策略启动' % self.name)
self.putEvent()
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' % self.name)
self.putEvent()
def getPos(self):
"""获取cn仓位"""
cnPosObj = self.banzhuan_query_position()
self.r.set('cnpositionlongYd', cnPosObj.longYd)
self.r.set('cnpositionlongTd', cnPosObj.longTd)
self.r.set('cnpositionshortYd', cnPosObj.shortYd)
self.r.set('cnpositionshortTd', cnPosObj.shortTd)
# ----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 过滤异常值
pass
# ----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
pass
# ----------------------------------------------------------------------
def onXminBar(self, bar):
"""交易后的账户余额或保证金"""
pass
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
# print(order)
# self.writeCtaLog(u'委托变化推送: %s' % order)
pass
def onTrade(self, trade):
# print('%s %s, price:%s, volume:%.4f, capital:%.2f' %(trade.dt,trade.direction, trade.price, trade.volume,trade.price * trade.volume))
# self.writeCtaLog(u'成交信息推送: %s' % trade)
# 发出状态更新事件
self.putEvent()
| [
"json.load",
"numpy.zeros"
] | [((1761, 1773), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1770, 1773), False, 'import json\n'), ((2745, 2770), 'numpy.zeros', 'np.zeros', (['self.bufferSize'], {}), '(self.bufferSize)\n', (2753, 2770), True, 'import numpy as np\n'), ((2807, 2832), 'numpy.zeros', 'np.zeros', (['self.bufferSize'], {}), '(self.bufferSize)\n', (2815, 2832), True, 'import numpy as np\n'), ((2871, 2896), 'numpy.zeros', 'np.zeros', (['self.bufferSize'], {}), '(self.bufferSize)\n', (2879, 2896), True, 'import numpy as np\n'), ((2978, 3003), 'numpy.zeros', 'np.zeros', (['self.bufferSize'], {}), '(self.bufferSize)\n', (2986, 3003), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from models.embedding import BertLSTMCNNForTripletNet
from transformers import BertTokenizer
import torch, json
import numpy as np
print('Loading model...')
res = np.loadtxt('output/embedding.txt')
print(1)
BERT_MODEL_PATH = "output/ckpts"
BERT_VOCAB_PATH = "output/ckpts/vocab.txt"
tokenizer = BertTokenizer.from_pretrained('output/ckpts')
model = BertLSTMCNNForTripletNet.from_pretrained(BERT_MODEL_PATH)
max_seq_len = 500
texts = []
f1 = open('datasets/all/train.txt', 'r', encoding='utf-8')
for line in f1:
x = json.loads(line)
if x['A'] not in texts:
texts.append(x['A'])
if x['B'] not in texts:
texts.append(x['B'])
if x['C'] not in texts:
texts.append(x['C'])
print(2)
print('Finish!')
sentenceB = ''
idx = 0
while sentenceB != 'q':
print('Please input a case.')
sentenceB = input()
if len(sentenceB) > max_seq_len:
sentenceB = sentenceB[-max_seq_len:]
text_dict = tokenizer.encode_plus(sentenceB, add_special_tokens=True, return_attention_mask=True)
input_ids = torch.tensor(text_dict['input_ids']).unsqueeze(0)
token_type_ids = torch.tensor(text_dict['token_type_ids']).unsqueeze(0)
attention_mask = torch.tensor(text_dict['attention_mask']).unsqueeze(0)
resB = model(input_ids, attention_mask_a=attention_mask, token_type_ids_a=token_type_ids)
embbedingB = resB.detach().numpy()
max_dis = 999
dis = []
most_similar_sentence = ''
for i, embedding in enumerate(res):
dis.append(np.linalg.norm(embedding - embbedingB))
f2 = open("output/top_{}.txt".format(idx), 'w', encoding='utf-8')
index = sorted(range(len(dis)), key=lambda x: dis[x])
top = 0
for i in index:
if top == 50:
break
#print(texts[i], dis[i], '\n')
f2.write(texts[i])
f2.write('\t')
f2.write(str(dis[i]))
f2.write('\n')
top += 1
f2.close()
idx += 1
| [
"json.loads",
"transformers.BertTokenizer.from_pretrained",
"models.embedding.BertLSTMCNNForTripletNet.from_pretrained",
"torch.tensor",
"numpy.linalg.norm",
"numpy.loadtxt"
] | [((211, 245), 'numpy.loadtxt', 'np.loadtxt', (['"""output/embedding.txt"""'], {}), "('output/embedding.txt')\n", (221, 245), True, 'import numpy as np\n'), ((345, 390), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""output/ckpts"""'], {}), "('output/ckpts')\n", (374, 390), False, 'from transformers import BertTokenizer\n'), ((399, 456), 'models.embedding.BertLSTMCNNForTripletNet.from_pretrained', 'BertLSTMCNNForTripletNet.from_pretrained', (['BERT_MODEL_PATH'], {}), '(BERT_MODEL_PATH)\n', (439, 456), False, 'from models.embedding import BertLSTMCNNForTripletNet\n'), ((572, 588), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (582, 588), False, 'import torch, json\n'), ((1116, 1152), 'torch.tensor', 'torch.tensor', (["text_dict['input_ids']"], {}), "(text_dict['input_ids'])\n", (1128, 1152), False, 'import torch, json\n'), ((1187, 1228), 'torch.tensor', 'torch.tensor', (["text_dict['token_type_ids']"], {}), "(text_dict['token_type_ids'])\n", (1199, 1228), False, 'import torch, json\n'), ((1263, 1304), 'torch.tensor', 'torch.tensor', (["text_dict['attention_mask']"], {}), "(text_dict['attention_mask'])\n", (1275, 1304), False, 'import torch, json\n'), ((1574, 1612), 'numpy.linalg.norm', 'np.linalg.norm', (['(embedding - embbedingB)'], {}), '(embedding - embbedingB)\n', (1588, 1612), True, 'import numpy as np\n')] |
from math import ceil
import numpy as np
import torch
from torch.utils.data import DataLoader
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def forward(model, dataset, batch_size):
"""Compute representations for input in chunks."""
chunks = int(ceil(float(len(dataset)) / batch_size))
outputs = []
labels = []
model.eval()
loader = DataLoader(dataset,
batch_size=batch_size, #chunks,
shuffle=False, # don't shuffle as we take labels in order in cluster update
num_workers=1)
with torch.no_grad(): # prevents computation graph from being made
for batch_idx, (inputs, labels_) in enumerate(loader):
inputs = inputs.to(device)
output = model(inputs)
outs = output.data
outputs.append(outs.cpu().numpy())
labels.append(labels_.cpu().numpy())
return np.vstack(outputs), np.hstack(labels)
| [
"numpy.hstack",
"torch.cuda.is_available",
"numpy.vstack",
"torch.utils.data.DataLoader",
"torch.no_grad"
] | [((387, 459), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(1)'}), '(dataset, batch_size=batch_size, shuffle=False, num_workers=1)\n', (397, 459), False, 'from torch.utils.data import DataLoader\n'), ((132, 157), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (155, 157), False, 'import torch\n'), ((614, 629), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (627, 629), False, 'import torch\n'), ((952, 970), 'numpy.vstack', 'np.vstack', (['outputs'], {}), '(outputs)\n', (961, 970), True, 'import numpy as np\n'), ((972, 989), 'numpy.hstack', 'np.hstack', (['labels'], {}), '(labels)\n', (981, 989), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import flopy
import gsflow
import os
import numpy as np
# from flopy.discretization import StructuredGrid
def start_tag(f, tag, indent_level, indent_char=" "):
s = indent_level * indent_char + tag
indent_level += 1
f.write(s + "\n")
return indent_level
def end_tag(f, tag, indent_level, indent_char=" "):
indent_level -= 1
s = indent_level * indent_char + tag
f.write(s + "\n")
return indent_level
class Mfvtk(object):
"""
Generate vtk files for modflow input and output
"""
def __init__(
self,
mf=None,
vtkname="mf_",
out_folder=None,
mf_pkg=[],
all=True,
shared_vertex=True,
ibound_filter=True,
):
if out_folder:
vtk_nm = os.path.join(out_folder, vtkname)
else:
vtk_nm = os.path.join(os.getcwd(), vtkname)
self.vtkname = vtk_nm
self.mf = mf
self.all = all
if self.all:
self.mf_pkg = mf.get_package_list()
else:
self.mf_pkg = mf_pkg
self.par3D = ["UPW"]
self.par2D = ["UZF"]
self.par1D = ["SFR", "HFB"]
self.parPoints = ["WEL", "HOB"]
self.vtk_objects = {}
self.shared_vertex = shared_vertex
self.ibound_filter = ibound_filter
def generate_2d_vtk(self):
"""
generate vtk objects for each mf package
:return:
"""
mf2d = self.__get_dummy_2d_model()
for pkg in self.mf_pkg:
if pkg in self.par2D:
pkg_ = self.mf.get_package(pkg)
for dataset in pkg_.data_list:
if hasattr(dataset, "array"):
arr = dataset.array
if not (hasattr(arr, "shape")):
continue
shp = (mf2d.nrow, mf2d.ncol)
if arr.shape == shp:
if not (pkg in self.vtk_objects.keys()):
file_name = self.vtkname + "_" + pkg + ".vtu"
obj = Vtk(file_name, mf2d)
self.vtk_objects[pkg] = obj
arr3d = np.zeros((1, mf2d.nrow, mf2d.ncol))
arr3d[0, :, :] = arr
nm = dataset.name
self.vtk_objects[pkg].add_array(nm, arr3d)
def generate_1d_vtk(self):
"""
generate vtk objects for each mf package
:return:
"""
for pkg in self.mf_pkg:
if pkg in self.par1D:
pkg_ = self.mf.get_package(pkg)
ibound3d = np.zeros_like(self.mf.bas6.ibound.array)
rows = pkg_.reach_data["i"]
cols = pkg_.reach_data["j"]
lays = pkg_.reach_data["k"]
ibound3d[lays, rows, cols] = 1
mf3d = self.__get_dummy_3d_model(ibound=ibound3d)
columns = pkg_.reach_data.dtype.names
for field in columns:
arr3d = np.zeros((mf3d.nlay, mf3d.nrow, mf3d.ncol))
arr3d[lays, rows, cols] = pkg_.reach_data[field]
if not (pkg in self.vtk_objects.keys()):
file_name = self.vtkname + "_" + pkg + ".vtu"
obj = Vtk(file_name, mf3d)
self.vtk_objects[pkg] = obj
nm = field
self.vtk_objects[pkg].add_array(nm, arr3d)
def __get_dummy_2d_model(self, ibound=None):
"""
generate 2 dimensional flopy object to be used in
ploting 2d data
:return:
"""
dis = self.mf.dis
mf2 = flopy.modflow.Modflow("xx")
nrow = dis.nrow
ncol = dis.ncol
delc = dis.delc
delr = dis.delr
top = dis.top.array
botm = top - 0.01
dis2 = flopy.modflow.ModflowDis(
mf2,
nlay=1,
nrow=nrow,
ncol=ncol,
nper=1,
delr=delr,
delc=delc,
top=top,
botm=botm,
)
if not (ibound is None):
ib = ibound
else:
ib = self.mf.bas6.ibound[0, :, :]
bas = flopy.modflow.ModflowBas(mf2, ibound=ib, strt=1)
return mf2
def __get_dummy_3d_model(self, ibound=None):
"""
generate 2 dimensional flopy object to be used in
ploting 3d data
:return:
"""
dis = self.mf.dis
mf3 = flopy.modflow.Modflow("xx")
nrow = dis.nrow
ncol = dis.ncol
nlay = dis.nlay
delc = dis.delc
delr = dis.delr
top = dis.top.array
botm = dis.botm.array
dis3 = flopy.modflow.ModflowDis(
mf3,
nlay=nlay,
nrow=nrow,
ncol=ncol,
nper=1,
delr=delr,
delc=delc,
top=top,
botm=botm,
)
if not (ibound is None):
ib = ibound
else:
ib = self.mf.bas6.ibound.array
bas = flopy.modflow.ModflowBas(mf3, ibound=ib, strt=1)
return mf3
def generate_3d_vtk(self):
"""
generate vtk objects for each mf package
:return:
"""
for pkg in self.mf_pkg:
if pkg in self.par3D:
pkg_ = self.mf.get_package(pkg)
for dataset in pkg_.data_list:
if hasattr(dataset, "array"):
arr = dataset.array
if not (hasattr(arr, "shape")):
continue
if arr.shape == self.mf.modelgrid.shape:
if not (pkg in self.vtk_objects.keys()):
file_name = self.vtkname + "_" + pkg + ".vtu"
obj = Vtk(file_name, self.mf)
self.vtk_objects[pkg] = obj
nm = dataset.name[0]
self.vtk_objects[pkg].add_array(nm, arr)
@staticmethod
def mf_to_vtk(
vtkname="mf_",
mfname=None,
mf_pkg=[],
all=True,
out_folder=None,
shared_vertex=True,
ibound_filter=True,
):
"""
:param name: modflow name file
:param packages: a list of packages to visulize
:param vtkfname: vtk output base file name
:param all: convert all model's inputs and outputs
:return:
"""
# upload the model
load_only = None
if len(mf_pkg) > 0:
load_only = ["DIS", "BAS6"]
for pkg in mf_pkg:
if pkg in ["cbc", "hds", "sfr.out"]:
continue
if pkg in load_only:
continue
load_only.append(pkg)
if not (os.path.isfile(mfname)):
raise ValueError("{} does not exist...!".format(str(mfname)))
mf = flopy.modflow.Modflow.load(mfname, load_only=load_only)
# Write vtk for each package
if out_folder:
vtk_nm = os.path.join(out_folder, vtkname)
else:
vtk_nm = os.path.join(os.getcwd(), vtkname)
mfvtk = Mfvtk(
mf=mf,
vtkname=vtk_nm,
mf_pkg=[],
all=all,
shared_vertex=shared_vertex,
ibound_filter=ibound_filter,
)
mfvtk.write_vtk()
def write_vtk(self):
self.generate_3d_vtk()
self.generate_2d_vtk()
self.generate_1d_vtk()
# mfvtk.generate_0d_vtk()
self.__write()
def __write(self):
for obj in self.vtk_objects.keys():
shared_vertex = self.shared_vertex
ibound_filter = self.ibound_filter
self.vtk_objects[obj].write(
shared_vertex=shared_vertex, ibound_filter=ibound_filter
)
class Gsflowvtk:
def __init__(
self,
gs=None,
vtkname="gs_",
mf_pkg=[],
mfall=True,
out_folder=None,
shared_vertex=True,
ibound_filter=True,
):
if out_folder:
vtk_nm = os.path.join(out_folder, vtkname)
else:
vtk_nm = os.path.join(os.getcwd(), vtkname)
self.vtkname = vtk_nm
self.gs = gs
self.mfall = mfall
self.mf_pkg = mf_pkg
self.out_folder = out_folder
self.vtk_objects = {}
self.shared_vertex = shared_vertex
self.ibound_filter = ibound_filter
@staticmethod
def gsflow_to_vtk(
vtkname="gs_",
control_file=None,
mf_pkg=[],
mfall=True,
out_folder=None,
shared_vertex=True,
ibound_filter=True,
):
if len(mf_pkg) == 0:
load_only = None
else:
load_only = mf_pkg
gs = gsflow.GsflowModel.load_from_file(
control_file, mf_load_only=load_only
)
gsfv = Gsflowvtk(
gs=gs,
vtkname=vtkname,
mf_pkg=mf_pkg,
mfall=mfall,
out_folder=out_folder,
shared_vertex=shared_vertex,
ibound_filter=ibound_filter,
)
gsfv.write_vtk()
def write_vtk(self):
mf = self.gs.mf
prms = self.gs.prms
if mf:
mfv = Mfvtk(
mf=mf,
vtkname=self.vtkname,
mf_pkg=self.mf_pkg,
all=self.mfall,
out_folder=self.out_folder,
shared_vertex=self.shared_vertex,
ibound_filter=self.ibound_filter,
)
mfv.write_vtk()
if prms:
self.write_prms_vtk()
def write_prms_vtk(self):
# dummy 2d modflow
gs = self.gs
dis = gs.mf.dis
mf2 = flopy.modflow.Modflow("xx")
nrow = dis.nrow
ncol = dis.ncol
delc = dis.delc
delr = dis.delr
top = dis.top.array
botm = top - 0.01
dis2 = flopy.modflow.ModflowDis(
mf2,
nlay=1,
nrow=nrow,
ncol=ncol,
nper=1,
delr=delr,
delc=delc,
top=top,
botm=botm,
)
hru_type = gs.prms.parameters.get_record("hru_type")
# TODO:change ibound to hru type
bas = flopy.modflow.ModflowBas(
mf2, ibound=hru_type.values.reshape(nrow, ncol), strt=1
)
nm = self.vtkname + "_prms.vtu"
vtkfile = Vtk(nm, mf2)
# add 2d data
all_prms = dict()
gsflow = gs
nhru = gsflow.prms.parameters.get_record("nhru").values[0]
for param in gsflow.prms.parameters.record_names:
par = gsflow.prms.parameters.get_record(param)
if par.section == "Dimensions":
continue
if "nhru" in par.dimensions_names:
if par.ndim > 1:
other_dim = list(np.copy(par.dims))
other_dim.remove(nhru)
parvalues = par.values.reshape((other_dim[0], nhru))
for ipar in range(other_dim[0]):
ivar = parvalues[ipar, :]
ivar = ivar.reshape(nrow, ncol) # np.flipud()
var = np.zeros((1, nrow, ncol))
var[0, :, :] = ivar
nm = par.name + "_" + str(ipar + 1)
all_prms[nm] = var
vtkfile.add_array(param, var)
else:
parvalues = par.values.reshape(nrow, ncol)
var = np.zeros((1, nrow, ncol))
var[0, :, :] = parvalues
all_prms[param] = var
vtkfile.add_array(param, var)
vtkfile.write(
shared_vertex=self.shared_vertex, ibound_filter=self.ibound_filter
)
class Vtk(object):
"""
Support for writing a model to a vtk file
"""
def __init__(self, output_filename, model, verbose=None):
if verbose is None:
verbose = model.verbose
self.verbose = verbose
self.output_filename = output_filename
self.model = model
self.modelgrid = model.modelgrid
self.shape = (
self.modelgrid.nlay,
self.modelgrid.nrow,
self.modelgrid.ncol,
)
self.arrays = {}
return
def add_array(self, name, a):
assert a.shape == self.shape
self.arrays[name] = a
return
def write(self, shared_vertex=False, ibound_filter=False, htop=None):
"""
Parameters
----------
shared_vertex : bool
Make a smoothed representation of model layers by calculating
an interpolated z value for the cell corner vertices.
ibound_filter : bool
Use the ibound array in the basic package of the model that
was passed in to exclude cells in the vtk file that have an
ibound value of zero.
htop : ndarray
This array must of shape (nlay, nrow, ncol). If htop is passed
then these htop values will be used to set the z elevation for the
cell tops. This makes it possible to show cells based on the
saturated thickness. htop should be calculated by the user as the
minimum of the cell top and the head and the maximum of the cell
bottom and the head.
"""
output_filename = self.output_filename
assert output_filename.lower().endswith(".vtu")
if os.path.exists(output_filename):
if self.verbose:
print("removing existing vtk file: " + output_filename)
os.remove(output_filename)
indent_level = 0
if self.verbose:
print("writing vtk file")
f = open(self.output_filename, "w")
ibound = None
if ibound_filter:
ibound = self.modelgrid.idomain
dis = self.model.dis
z = np.vstack(
[dis.top.array.reshape(1, dis.nrow, dis.ncol), dis.botm.array]
)
if shared_vertex:
top = z[:-1]
verts, iverts = get_3d_vertex_connectivity(
self.model.modelgrid, top, ibound=ibound
)
else:
top = z[:-1]
bot = z[1:]
if htop is not None:
top = htop
verts, iverts = get_3d_vertex_connectivity(
self.model.modelgrid, top, ibound=ibound
)
ncells = len(iverts)
npoints = verts.shape[0]
if self.verbose:
s = "Number of point is {}\n " "Number of cells is {}\n".format(
npoints, ncells
)
print(s)
# xml
s = '<?xml version="1.0"?>'
f.write(s + "\n")
indent_level = start_tag(
f, '<VTKFile type="UnstructuredGrid">', indent_level
)
# unstructured grid
indent_level = start_tag(f, "<UnstructuredGrid>", indent_level)
# piece
s = '<Piece NumberOfPoints="{}" ' 'NumberOfCells="{}">'.format(
npoints, ncells
)
indent_level = start_tag(f, s, indent_level)
# points
s = "<Points>"
indent_level = start_tag(f, s, indent_level)
s = '<DataArray type="Float64" NumberOfComponents="3">'
indent_level = start_tag(f, s, indent_level)
# assert (isinstance(self.modelgrid, StructuredGrid))
z = np.vstack(
[
self.modelgrid.top.reshape(
1, self.modelgrid.nrow, self.modelgrid.ncol
),
self.modelgrid.botm,
]
)
for row in verts:
s = indent_level * " " + "{} {} {} \n".format(*row)
f.write(s)
s = "</DataArray>"
indent_level = end_tag(f, s, indent_level)
s = "</Points>"
indent_level = end_tag(f, s, indent_level)
# cells
s = "<Cells>"
indent_level = start_tag(f, s, indent_level)
s = '<DataArray type="Int32" Name="connectivity">'
indent_level = start_tag(f, s, indent_level)
for row in iverts:
s = indent_level * " " + " ".join([str(i) for i in row]) + "\n"
f.write(s)
s = "</DataArray>"
indent_level = end_tag(f, s, indent_level)
s = '<DataArray type="Int32" Name="offsets">'
indent_level = start_tag(f, s, indent_level)
icount = 0
for row in iverts:
icount += len(row)
s = indent_level * " " + "{} \n".format(icount)
f.write(s)
s = "</DataArray>"
indent_level = end_tag(f, s, indent_level)
s = '<DataArray type="UInt8" Name="types">'
indent_level = start_tag(f, s, indent_level)
for row in iverts:
s = indent_level * " " + "{} \n".format(11)
f.write(s)
s = "</DataArray>"
indent_level = end_tag(f, s, indent_level)
s = "</Cells>"
indent_level = end_tag(f, s, indent_level)
# add cell data
s = '<CellData Scalars="scalars">'
indent_level = start_tag(f, s, indent_level)
self._write_data_array(f, indent_level, "top", z[0:-1], ibound)
for name, a in self.arrays.items():
self._write_data_array(f, indent_level, name, a, ibound)
s = "</CellData>"
indent_level = end_tag(f, s, indent_level)
# end piece
indent_level = end_tag(f, "</Piece>", indent_level)
# end unstructured grid
indent_level = end_tag(f, "</UnstructuredGrid>", indent_level)
# end xml
indent_level = end_tag(f, "</VTKFile>", indent_level)
# end file
f.close()
return
def _write_data_array(self, f, indent_level, name, a, ibound):
"""
Write a numpy array to the vtk file
"""
# header tag
s = '<DataArray type="Float64" Name="{}" format="ascii">'.format(name)
indent_level = start_tag(f, s, indent_level)
# data
nlay = a.shape[0]
# combine ibound with laycbd when model supports laycbd
if (
ibound is not None
and hasattr(self.model, "dis")
and hasattr(self.model.dis, "laycbd")
):
cbd = np.where(self.model.dis.laycbd.array > 0)
ibound = np.insert(
ibound, cbd[0] + 1, ibound[cbd[0], :, :], axis=0
)
for k in range(nlay):
s = indent_level * " "
f.write(s)
if ibound is None:
ak = a[k].flatten()
else:
idx = ibound[k] != 0
ak = a[k][idx].flatten()
for v in ak:
s = " {}".format(v)
f.write(s)
f.write("\n")
# ending tag
s = "</DataArray>"
indent_level = end_tag(f, s, indent_level)
return
# temporary patch for vtk after flopy deprecation of sr
def get_3d_vertex_connectivity(modelgrid, top, ibound=None):
if ibound is None:
ncells = modelgrid.nnodes
ibound = np.ones(modelgrid.shape, dtype=int)
else:
ncells = (ibound != 0).sum()
npoints = ncells * 8
verts = np.empty((npoints, 3), dtype=float)
iverts = []
ipoint = 0
for k in range(modelgrid.nlay):
for i in range(modelgrid.nrow):
for j in range(modelgrid.ncol):
if ibound[k, i, j] == 0:
continue
ivert = []
pts = modelgrid._cell_vert_list(i, j)
pt0, pt1, pt2, pt3, pt0 = pts
z = modelgrid.botm[k, i, j]
verts[ipoint, 0:2] = np.array(pt1)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt2)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt0)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt3)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
z = top[k, i, j]
verts[ipoint, 0:2] = np.array(pt1)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt2)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt0)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt3)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
iverts.append(ivert)
return verts, iverts
| [
"flopy.modflow.Modflow.load",
"flopy.modflow.ModflowDis",
"numpy.array",
"os.remove",
"gsflow.prms.parameters.get_record",
"os.path.exists",
"flopy.modflow.Modflow",
"numpy.where",
"numpy.empty",
"numpy.ones",
"os.path.isfile",
"gsflow.GsflowModel.load_from_file",
"numpy.insert",
"numpy.co... | [((19616, 19651), 'numpy.empty', 'np.empty', (['(npoints, 3)'], {'dtype': 'float'}), '((npoints, 3), dtype=float)\n', (19624, 19651), True, 'import numpy as np\n'), ((3805, 3832), 'flopy.modflow.Modflow', 'flopy.modflow.Modflow', (['"""xx"""'], {}), "('xx')\n", (3826, 3832), False, 'import flopy\n'), ((3998, 4112), 'flopy.modflow.ModflowDis', 'flopy.modflow.ModflowDis', (['mf2'], {'nlay': '(1)', 'nrow': 'nrow', 'ncol': 'ncol', 'nper': '(1)', 'delr': 'delr', 'delc': 'delc', 'top': 'top', 'botm': 'botm'}), '(mf2, nlay=1, nrow=nrow, ncol=ncol, nper=1, delr=\n delr, delc=delc, top=top, botm=botm)\n', (4022, 4112), False, 'import flopy\n'), ((4358, 4406), 'flopy.modflow.ModflowBas', 'flopy.modflow.ModflowBas', (['mf2'], {'ibound': 'ib', 'strt': '(1)'}), '(mf2, ibound=ib, strt=1)\n', (4382, 4406), False, 'import flopy\n'), ((4640, 4667), 'flopy.modflow.Modflow', 'flopy.modflow.Modflow', (['"""xx"""'], {}), "('xx')\n", (4661, 4667), False, 'import flopy\n'), ((4861, 4978), 'flopy.modflow.ModflowDis', 'flopy.modflow.ModflowDis', (['mf3'], {'nlay': 'nlay', 'nrow': 'nrow', 'ncol': 'ncol', 'nper': '(1)', 'delr': 'delr', 'delc': 'delc', 'top': 'top', 'botm': 'botm'}), '(mf3, nlay=nlay, nrow=nrow, ncol=ncol, nper=1, delr\n =delr, delc=delc, top=top, botm=botm)\n', (4885, 4978), False, 'import flopy\n'), ((5221, 5269), 'flopy.modflow.ModflowBas', 'flopy.modflow.ModflowBas', (['mf3'], {'ibound': 'ib', 'strt': '(1)'}), '(mf3, ibound=ib, strt=1)\n', (5245, 5269), False, 'import flopy\n'), ((7135, 7190), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['mfname'], {'load_only': 'load_only'}), '(mfname, load_only=load_only)\n', (7161, 7190), False, 'import flopy\n'), ((9043, 9114), 'gsflow.GsflowModel.load_from_file', 'gsflow.GsflowModel.load_from_file', (['control_file'], {'mf_load_only': 'load_only'}), '(control_file, mf_load_only=load_only)\n', (9076, 9114), False, 'import gsflow\n'), ((10019, 10046), 'flopy.modflow.Modflow', 'flopy.modflow.Modflow', (['"""xx"""'], {}), "('xx')\n", (10040, 10046), False, 'import flopy\n'), ((10213, 10327), 'flopy.modflow.ModflowDis', 'flopy.modflow.ModflowDis', (['mf2'], {'nlay': '(1)', 'nrow': 'nrow', 'ncol': 'ncol', 'nper': '(1)', 'delr': 'delr', 'delc': 'delc', 'top': 'top', 'botm': 'botm'}), '(mf2, nlay=1, nrow=nrow, ncol=ncol, nper=1, delr=\n delr, delc=delc, top=top, botm=botm)\n', (10237, 10327), False, 'import flopy\n'), ((13846, 13877), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (13860, 13877), False, 'import os\n'), ((19496, 19531), 'numpy.ones', 'np.ones', (['modelgrid.shape'], {'dtype': 'int'}), '(modelgrid.shape, dtype=int)\n', (19503, 19531), True, 'import numpy as np\n'), ((818, 851), 'os.path.join', 'os.path.join', (['out_folder', 'vtkname'], {}), '(out_folder, vtkname)\n', (830, 851), False, 'import os\n'), ((7022, 7044), 'os.path.isfile', 'os.path.isfile', (['mfname'], {}), '(mfname)\n', (7036, 7044), False, 'import os\n'), ((7273, 7306), 'os.path.join', 'os.path.join', (['out_folder', 'vtkname'], {}), '(out_folder, vtkname)\n', (7285, 7306), False, 'import os\n'), ((8340, 8373), 'os.path.join', 'os.path.join', (['out_folder', 'vtkname'], {}), '(out_folder, vtkname)\n', (8352, 8373), False, 'import os\n'), ((10946, 10986), 'gsflow.prms.parameters.get_record', 'gsflow.prms.parameters.get_record', (['param'], {}), '(param)\n', (10979, 10986), False, 'import gsflow\n'), ((13992, 14018), 'os.remove', 'os.remove', (['output_filename'], {}), '(output_filename)\n', (14001, 14018), False, 'import os\n'), ((18668, 18709), 'numpy.where', 'np.where', (['(self.model.dis.laycbd.array > 0)'], {}), '(self.model.dis.laycbd.array > 0)\n', (18676, 18709), True, 'import numpy as np\n'), ((18731, 18790), 'numpy.insert', 'np.insert', (['ibound', '(cbd[0] + 1)', 'ibound[cbd[0], :, :]'], {'axis': '(0)'}), '(ibound, cbd[0] + 1, ibound[cbd[0], :, :], axis=0)\n', (18740, 18790), True, 'import numpy as np\n'), ((900, 911), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (909, 911), False, 'import os\n'), ((2743, 2783), 'numpy.zeros_like', 'np.zeros_like', (['self.mf.bas6.ibound.array'], {}), '(self.mf.bas6.ibound.array)\n', (2756, 2783), True, 'import numpy as np\n'), ((7355, 7366), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7364, 7366), False, 'import os\n'), ((8422, 8433), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8431, 8433), False, 'import os\n'), ((10818, 10859), 'gsflow.prms.parameters.get_record', 'gsflow.prms.parameters.get_record', (['"""nhru"""'], {}), "('nhru')\n", (10851, 10859), False, 'import gsflow\n'), ((20084, 20097), 'numpy.array', 'np.array', (['pt1'], {}), '(pt1)\n', (20092, 20097), True, 'import numpy as np\n'), ((20238, 20251), 'numpy.array', 'np.array', (['pt2'], {}), '(pt2)\n', (20246, 20251), True, 'import numpy as np\n'), ((20392, 20405), 'numpy.array', 'np.array', (['pt0'], {}), '(pt0)\n', (20400, 20405), True, 'import numpy as np\n'), ((20546, 20559), 'numpy.array', 'np.array', (['pt3'], {}), '(pt3)\n', (20554, 20559), True, 'import numpy as np\n'), ((20734, 20747), 'numpy.array', 'np.array', (['pt1'], {}), '(pt1)\n', (20742, 20747), True, 'import numpy as np\n'), ((20888, 20901), 'numpy.array', 'np.array', (['pt2'], {}), '(pt2)\n', (20896, 20901), True, 'import numpy as np\n'), ((21042, 21055), 'numpy.array', 'np.array', (['pt0'], {}), '(pt0)\n', (21050, 21055), True, 'import numpy as np\n'), ((21196, 21209), 'numpy.array', 'np.array', (['pt3'], {}), '(pt3)\n', (21204, 21209), True, 'import numpy as np\n'), ((3150, 3193), 'numpy.zeros', 'np.zeros', (['(mf3d.nlay, mf3d.nrow, mf3d.ncol)'], {}), '((mf3d.nlay, mf3d.nrow, mf3d.ncol))\n', (3158, 3193), True, 'import numpy as np\n'), ((11851, 11876), 'numpy.zeros', 'np.zeros', (['(1, nrow, ncol)'], {}), '((1, nrow, ncol))\n', (11859, 11876), True, 'import numpy as np\n'), ((11173, 11190), 'numpy.copy', 'np.copy', (['par.dims'], {}), '(par.dims)\n', (11180, 11190), True, 'import numpy as np\n'), ((11512, 11537), 'numpy.zeros', 'np.zeros', (['(1, nrow, ncol)'], {}), '((1, nrow, ncol))\n', (11520, 11537), True, 'import numpy as np\n'), ((2277, 2312), 'numpy.zeros', 'np.zeros', (['(1, mf2d.nrow, mf2d.ncol)'], {}), '((1, mf2d.nrow, mf2d.ncol))\n', (2285, 2312), True, 'import numpy as np\n')] |
"""Módulo para metrificação de ativos e retornos."""
import numpy as np
import pandas as pd
from scipy import stats
def sharpe_ratio(returns, risk_free=0, time_scale=252):
"""
Essa função, a partir da definição do parâmetro de retorno, fornece o sharpe ratio do ativo, com base na média histórica e desvio padrão dos retornos.
O risk free considerado é nulo.
Args:
returns (pd.series): série com o retorno do ativo.
risk_free (float): risk free utilizado para cálculo do sharpe ratio.
time_scale (int): fator de escala do sharpe ratio, que é o número de amostras em um ano. Caso fosse uma série temporal diária: 252; série temporal mensal: 12
Returns:
float: índice de sharpe do ativo.
"""
expected_returns = np.mean(returns)
risk = np.std(returns)
sharpe = (expected_returns * time_scale - risk_free) / (risk * np.sqrt(time_scale))
return sharpe
def beta(returns, benchmark):
"""
Essa função, a partir do fornecimento dos retornos do ativo e do benchmark, calcula o beta do ativo.
Args:
returns (pd.Series): série com o retorno do ativo.
benchmark (pd.Series): série com o retorno do benchmark.
Returns:
float: Beta do ativo
"""
assert returns.shape[0] == benchmark.shape[0], "Séries temporais com dimensões diferentes"
cov = np.cov(returns, benchmark)[0][1]
benchmark_vol = np.var(benchmark)
return cov / benchmark_vol
def capm(returns, market_returns, risk_free):
"""
Essa função, com o fornecimento dos retornos de um portfólio ou ativo, dos retornos do mercado e da retorno sem risco,
calcula o retorno esperado pela abordagem CAPM. Essa abordagem considera o mercado (benchmark) e as relações com os ativos
como parâmetro para estimar o retorno esperado.
Args:
returns (pd.Series ou np.array): vetor de retornos
market_returns (pd.Series ou np.array): vetor de retornos do mercado ou benchmark
risk_free (float): retorno livre de risco
Returns:
float: retorno esperado pela abordagem CAPM
"""
beta = beta(returns, market_returns)
expected_market_returns = market_returns.mean()
expected_returns = risk_free + beta * (expected_market_returns - risk_free)
return expected_returns
def alpha(start_price, end_price, dividends):
"""
Essa função, com o fornecimento do preço final, dos dividendos por ação e do preço inicial, a calcula o alfa de um ativo.
Args:
start_price (float): preço inicial.
end_price (float): preço final.
dividends (float): dividendos por ação.
Returns:
float: alpha do ativo
"""
return (end_price + dividends - start_price) / start_price
def drawdown(returns):
"""
Calcula o drawdown percentual para uma série de retornos.
Args:
returns (pd.Series): série de retornos para a qual será calculado o drawdown.
Returns:
pd.Series: uma série com os valores percentuais do Drawdown.
"""
cum_returns = (1 + returns).cumprod()
peeks = cum_returns.cummax()
drawdowns = pd.Series((cum_returns/peeks - 1)*100,
name='Drawdown')
return drawdowns
def rolling_beta(returns, benchmark, window=60):
"""
Calcula o beta móvel para um ativo e um benchmark de referência, na forma de séries de retornos.
Args:
returns (array): série de retornos para o qual o beta será calculado.
benchmark (array): série de retornos para usar de referência no cálculo do beta.
window (int): janela móvel para calcular o beta ao longo do tempo.
Returns:
pd.Series: uma série com os valores do Beta para os últimos `window` dias.
A série não possui os `window` primeiros dias.
"""
rolling_beta = pd.Series([beta(returns[i-window:i], benchmark[i-window:i])
for i in range(window, len(returns))], index=returns[window:].index)
return rolling_beta
def rolling_sharpe(returns, window, risk_free=0):
"""
Calcula o sharpe móvel para um ativo e um benchmark de referência, na forma de séries de retornos.
Args:
returns (pd.Series): série de retornos para o qual o Sharpe Ratio será calculado.
window (int): janela móvel para calcular o Sharpe ao longo do tempo.
risk_free (float): valor da taxa livre de risco para cálculo do Sharpe.
Returns:
pd.Series: uma série com os valores do Sharpe para os últimos `window` dias.
A série não possui os `window` primeiros dias.
"""
rolling_sharpe = pd.Series([sharpe_ratio(returns[i - window:i], risk_free)
for i in range(window, len(returns))], returns[window:].index)
return rolling_sharpe
def ewma_volatility(returns, window):
"""
Essa função calcula a volatilidade por EWMA ao longo de um período.
Args:
returns (pd.Series): série de retornos para o qual o EWMA será calculado.
window (int): janela móvel para cálculo da EWMA;
Returns:
pd.Series: uma série com os valores de EWMA dos últimos `window` dias
"""
ewma_volatility = returns.ewm(span=window).std()
return ewma_volatility
def garman_klass_volatility(high_prices, low_prices, close_prices, open_prices, window, time_scale=1):
"""
Estima a volatilidade a partir dos seguintes preços: alta, baixa, abertura e fechamento
Args:
high_prices (pd.DataFrame): série de preços de alta de uma ação
low_prices (pd.DataFrame): série de preços de baixa de uma ação
close_prices (pd.DataFrame): série de preços de fechamento de uma ação
open_prices (pd.DataFrame): série de preços de abertura de uma ação
window (int): janela das estimativa de volatilidade
time_scale (int): fator de escala da volatilidade, por padrão é 1 (diária)
Returns:
pd.Series: série das estimativas de volatildade
"""
high_low_ratio = (1 / 2) * \
(np.log(np.divide(high_prices, low_prices))) ** 2
close_open_ratio = -(2 * np.log(2) - 1) * (
np.log(np.divide(close_prices, open_prices)) ** 2
)
log_ratio = high_low_ratio + close_open_ratio.values
garman_klass_vol = pd.Series(log_ratio, name='Garman Klass', copy=True)
Period_const = time_scale / window
garman_klass_vol.iloc[:window] = np.nan
for date in range(window, len(high_prices)):
garman_klass_vol.iloc[date] = np.sqrt(
Period_const * np.sum(log_ratio.iloc[date - window: date])
)
return garman_klass_vol
def parkinson_volatility(high_prices, low_prices, window, time_scale=1, plot=False):
"""
Estimando a volatilidade a partir dos preços de Alta e de Baixa
Args:
high (pd.DataFrame): série de preços de alta de uma ação
low (pd.DataFrame): série de preços de baixa de uma ação
window (int): janela das estimativa de volatilidade
time_scale (int): fator de escala da volatilidade, por padrão é 1 (diária)
Returns:
pd.Series: série das estimativas de volatildade
"""
log_ratio = np.log(np.divide(high_prices, low_prices)) ** 2
parkinson_vol = pd.Series(log_ratio, name='Parkinson', copy=True)
Period_const = time_scale / (4 * window * np.log(2))
parkinson_vol.iloc[:window] = np.nan
for date in range(window, len(high_prices)):
parkinson_vol.iloc[date] = np.sqrt(
Period_const * np.sum(log_ratio.iloc[date - window: date])
)
return parkinson_vol
def rolling_std(returns, window):
"""
Essa função calcula volatilidade a partir do cálculo da desvio padrão móvel.
Args:
returns (pd.Series): série de retornos para o qual o desvio padrão será calculado.
window (int): janela móvel para cálculo do desvio padrão móvel;
Returns:
pd.Series: uma série indexado à data com os valores de desvio padrão móvel dos últimos window dias
"""
rolling_std = returns.rolling(window).std()
return rolling_std
def returns(close_prices, return_type='simple'):
"""
Essa função permite o cálculo rápido do retorno de uma ação ao longo do tempo.
Args:
close_prices (pd.Series): série de preços de fechamento que será utilizada de base para o cálculo do retorno;
return_type (string): tipo de retorno (simples - 'simple' ou logarítmico - 'log') a ser calculado;
Returns:
pd.Series: série com os valores do retorno ao longo do tempo
"""
if return_type == "simple":
returns = close_prices.pct_change()
elif return_type == "log":
returns = np.log(close_prices/close_prices.shift(1))
else:
raise ValueError("Tipo de retorno inválido")
return returns
def cumulative_returns(returns, return_type):
"""
Essa função permite o cálculo do retorno cumulativo ao longo do tempo.
Args:
returns (pd.Series): série de retornos da ação ao longo do tempo;
return_type (string): tipo de retorno (simples - 'simp' ou logarítmico - 'log') presente na série.
Returns:
pd.Series: série com os valores de retorno cumulativo ao longo do tempo
"""
if return_type == "log":
cumulative_returns = returns.cumsum()
elif return_type == "simp":
cumulative_returns = (returns + 1).cumprod() - 1
else:
raise ValueError("Tipo de retorno inválido")
return cumulative_returns
def cagr(returns, time_scale=252):
"""
Calcula o CAGR que é a taxa composta de crescimento anual.
Args:
returns (pd.Series): série de retornos para a qual será calculado o drawdown.
time_scale (int): fator de escala do cagr, que é o número de amostras em um ano. Caso fosse uma série temporal diária: 252; série temporal mensal: 12
Returns:
float: cagr do ativo.
"""
cumulative_return = (1 + returns).cumprod()[-1]
return (cumulative_return ** (1/(returns.shape[-1] / time_scale)) - 1)
def mar_ratio(returns, time_window, time_scale=252):
"""
Calcula e plota o drawdown percentual para uma série de retornos.
Args:
returns (pd.Series): série de retornos para a qual será calculado o mar ratio.
time_window (float): janela de tempo que o mar ratio será calculado em relação a escala de tempo. time_window = 3 e time_scale = 252 denota uma janela de 3 anos (Calmar Ratio).
time_scale (int): fator de escala do mar ratio, que é o número de amostras em um ano. Caso fosse uma série temporal diária: 252; série temporal mensal: 12
Returns:
float: valor do mar ratio do ativo
"""
returns_window = returns[-time_window * time_scale:]
drawdowns = drawdown(returns_window)
max_drawdown = abs(drawdowns).max()
mar_ratio = returns_window.mean() * time_scale / max_drawdown
return mar_ratio
def value_at_risk(returns, confidance_level = 0.95, window = 1, method = 'variance-covariance'):
if method == 'variance-covariance':
mean = np.mean(returns)
std = np.std(returns)
var = stats.norm.ppf(1 - confidance_level, mean, std)
elif method == 'historical':
returns = returns.sort_values(ascending = True)
var = returns.quantile(1 - confidance_level)
if window != 1:
var = var * np.sqrt(window)
return var
| [
"pandas.Series",
"numpy.mean",
"numpy.sqrt",
"numpy.divide",
"numpy.log",
"scipy.stats.norm.ppf",
"numpy.sum",
"numpy.std",
"numpy.cov",
"numpy.var"
] | [((774, 790), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (781, 790), True, 'import numpy as np\n'), ((802, 817), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (808, 817), True, 'import numpy as np\n'), ((1418, 1435), 'numpy.var', 'np.var', (['benchmark'], {}), '(benchmark)\n', (1424, 1435), True, 'import numpy as np\n'), ((3140, 3199), 'pandas.Series', 'pd.Series', (['((cum_returns / peeks - 1) * 100)'], {'name': '"""Drawdown"""'}), "((cum_returns / peeks - 1) * 100, name='Drawdown')\n", (3149, 3199), True, 'import pandas as pd\n'), ((6280, 6332), 'pandas.Series', 'pd.Series', (['log_ratio'], {'name': '"""Garman Klass"""', 'copy': '(True)'}), "(log_ratio, name='Garman Klass', copy=True)\n", (6289, 6332), True, 'import pandas as pd\n'), ((7238, 7287), 'pandas.Series', 'pd.Series', (['log_ratio'], {'name': '"""Parkinson"""', 'copy': '(True)'}), "(log_ratio, name='Parkinson', copy=True)\n", (7247, 7287), True, 'import pandas as pd\n'), ((11062, 11078), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (11069, 11078), True, 'import numpy as np\n'), ((11102, 11117), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (11108, 11117), True, 'import numpy as np\n'), ((11141, 11188), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['(1 - confidance_level)', 'mean', 'std'], {}), '(1 - confidance_level, mean, std)\n', (11155, 11188), False, 'from scipy import stats\n'), ((886, 905), 'numpy.sqrt', 'np.sqrt', (['time_scale'], {}), '(time_scale)\n', (893, 905), True, 'import numpy as np\n'), ((1364, 1390), 'numpy.cov', 'np.cov', (['returns', 'benchmark'], {}), '(returns, benchmark)\n', (1370, 1390), True, 'import numpy as np\n'), ((7176, 7210), 'numpy.divide', 'np.divide', (['high_prices', 'low_prices'], {}), '(high_prices, low_prices)\n', (7185, 7210), True, 'import numpy as np\n'), ((7335, 7344), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (7341, 7344), True, 'import numpy as np\n'), ((11394, 11409), 'numpy.sqrt', 'np.sqrt', (['window'], {}), '(window)\n', (11401, 11409), True, 'import numpy as np\n'), ((6043, 6077), 'numpy.divide', 'np.divide', (['high_prices', 'low_prices'], {}), '(high_prices, low_prices)\n', (6052, 6077), True, 'import numpy as np\n'), ((6149, 6185), 'numpy.divide', 'np.divide', (['close_prices', 'open_prices'], {}), '(close_prices, open_prices)\n', (6158, 6185), True, 'import numpy as np\n'), ((6542, 6584), 'numpy.sum', 'np.sum', (['log_ratio.iloc[date - window:date]'], {}), '(log_ratio.iloc[date - window:date])\n', (6548, 6584), True, 'import numpy as np\n'), ((7509, 7551), 'numpy.sum', 'np.sum', (['log_ratio.iloc[date - window:date]'], {}), '(log_ratio.iloc[date - window:date])\n', (7515, 7551), True, 'import numpy as np\n'), ((6115, 6124), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (6121, 6124), True, 'import numpy as np\n')] |
import cv2, imutils, socket
import numpy as np
import time, os
import base64
import threading, wave, pyaudio, pickle, struct
# For details visit pyshine.com
BUFF_SIZE = 65536
BREAK = False
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, BUFF_SIZE)
host_name = socket.gethostname()
host_ip = 'localhost' # socket.gethostbyname(host_name)
print(host_ip)
port = 9699
message = b'Hello'
client_socket.sendto(message, (host_ip, port))
cv2.namedWindow('RECEIVING VIDEO')
cv2.moveWindow('RECEIVING VIDEO', 10, 360)
fps, st, frames_to_count, cnt = (0, 0, 20, 0)
def audio_stream():
p = pyaudio.PyAudio()
CHUNK = 1024
stream = p.open(format=p.get_format_from_width(2),
channels=2,
rate=44100,
output=True,
frames_per_buffer=CHUNK)
# create socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_address = (host_ip, port - 1)
print('server listening at', socket_address)
client_socket.connect(socket_address)
print("CLIENT CONNECTED TO", socket_address)
data = b""
payload_size = struct.calcsize("Q")
while True:
try:
while len(data) < payload_size:
packet = client_socket.recv(4 * 1024) # 4K
if not packet: break
data += packet
packet_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q", packet_msg_size)[0]
while len(data) < msg_size:
data += client_socket.recv(4 * 1024)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
stream.write(frame)
except:
break
client_socket.close()
print('Audio closed', BREAK)
os._exit(1)
t1 = threading.Thread(target=audio_stream, args=())
t1.start()
first = False
# recebendo video
while True:
# recebendo e processando frame recebido
packet, _ = client_socket.recvfrom(BUFF_SIZE)
data = base64.b64decode(packet, b' /')
npdata = np.frombuffer(data, dtype=np.uint8)
frame = cv2.imdecode(npdata, 1)
frame = cv2.putText(frame, 'FPS: ' + str(fps), (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("RECEIVING VIDEO", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
client_socket.close()
os._exit(1)
break
if cnt == frames_to_count:
try:
fps = round(frames_to_count / (time.time() - st), 1)
st = time.time()
cnt = 0
except:
pass
cnt += 1
client_socket.close()
cv2.destroyAllWindows() | [
"cv2.moveWindow",
"struct.calcsize",
"numpy.frombuffer",
"socket.socket",
"base64.b64decode",
"cv2.imshow",
"cv2.waitKey",
"struct.unpack",
"cv2.destroyAllWindows",
"os._exit",
"cv2.imdecode",
"time.time",
"pickle.loads",
"threading.Thread",
"pyaudio.PyAudio",
"socket.gethostname",
"... | [((207, 255), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (220, 255), False, 'import cv2, imutils, socket\n'), ((341, 361), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (359, 361), False, 'import cv2, imutils, socket\n'), ((514, 548), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RECEIVING VIDEO"""'], {}), "('RECEIVING VIDEO')\n", (529, 548), False, 'import cv2, imutils, socket\n'), ((549, 591), 'cv2.moveWindow', 'cv2.moveWindow', (['"""RECEIVING VIDEO"""', '(10)', '(360)'], {}), "('RECEIVING VIDEO', 10, 360)\n", (563, 591), False, 'import cv2, imutils, socket\n'), ((1940, 1986), 'threading.Thread', 'threading.Thread', ([], {'target': 'audio_stream', 'args': '()'}), '(target=audio_stream, args=())\n', (1956, 1986), False, 'import threading, wave, pyaudio, pickle, struct\n'), ((2764, 2787), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2785, 2787), False, 'import cv2, imutils, socket\n'), ((668, 685), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (683, 685), False, 'import threading, wave, pyaudio, pickle, struct\n'), ((940, 989), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (953, 989), False, 'import cv2, imutils, socket\n'), ((1205, 1225), 'struct.calcsize', 'struct.calcsize', (['"""Q"""'], {}), "('Q')\n", (1220, 1225), False, 'import threading, wave, pyaudio, pickle, struct\n'), ((1921, 1932), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (1929, 1932), False, 'import time, os\n'), ((2149, 2180), 'base64.b64decode', 'base64.b64decode', (['packet', "b' /'"], {}), "(packet, b' /')\n", (2165, 2180), False, 'import base64\n'), ((2194, 2229), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (2207, 2229), True, 'import numpy as np\n'), ((2243, 2266), 'cv2.imdecode', 'cv2.imdecode', (['npdata', '(1)'], {}), '(npdata, 1)\n', (2255, 2266), False, 'import cv2, imutils, socket\n'), ((2379, 2415), 'cv2.imshow', 'cv2.imshow', (['"""RECEIVING VIDEO"""', 'frame'], {}), "('RECEIVING VIDEO', frame)\n", (2389, 2415), False, 'import cv2, imutils, socket\n'), ((2426, 2440), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2437, 2440), False, 'import cv2, imutils, socket\n'), ((2511, 2522), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (2519, 2522), False, 'import time, os\n'), ((1767, 1791), 'pickle.loads', 'pickle.loads', (['frame_data'], {}), '(frame_data)\n', (1779, 1791), False, 'import threading, wave, pyaudio, pickle, struct\n'), ((2663, 2674), 'time.time', 'time.time', ([], {}), '()\n', (2672, 2674), False, 'import time, os\n'), ((1539, 1574), 'struct.unpack', 'struct.unpack', (['"""Q"""', 'packet_msg_size'], {}), "('Q', packet_msg_size)\n", (1552, 1574), False, 'import threading, wave, pyaudio, pickle, struct\n'), ((2624, 2635), 'time.time', 'time.time', ([], {}), '()\n', (2633, 2635), False, 'import time, os\n')] |
import scipy.stats as spst
import scipy.special as spsp
import numpy as np
from . import opt_abc as opt
from . import opt_smile_abc as smile
class Cev(opt.OptAnalyticABC, smile.OptSmileABC, smile.MassZeroABC):
"""
Constant Elasticity of Variance (CEV) model.
Underlying price is assumed to follow CEV process:
dS_t = (r - q) S_t dt + sigma S_t^beta dW_t, where dW_t is a standard Brownian motion.
Examples:
>>> import numpy as np
>>> import pyfeng as pf
>>> m = pf.Cev(sigma=0.2, beta=0.5, intr=0.05, divr=0.1)
>>> m.price(np.arange(80, 121, 10), 100, 1.2)
array([16.11757214, 10.00786871, 5.64880408, 2.89028476, 1.34128656])
"""
sigma = None
beta = 0.5
is_bsm_sigma = False
def __init__(self, sigma, beta=0.5, intr=0.0, divr=0.0, is_fwd=False):
"""
Args:
sigma: model volatility
beta: elasticity parameter. 0.5 by default
intr: interest rate (domestic interest rate)
divr: dividend/convenience yield (foreign interest rate)
is_fwd: if True, treat `spot` as forward price. False by default.
"""
super().__init__(sigma, intr=intr, divr=divr, is_fwd=is_fwd)
self.beta = beta
def params_kw(self):
params = super().params_kw()
extra = {"beta": self.beta}
return {**params, **extra} # Py 3.9, params | extra
def mass_zero(self, spot, texp, log=False):
fwd = self.forward(spot, texp)
betac = 1.0 - self.beta
a = 0.5 / betac
sigma_std = np.maximum(
self.sigma / np.power(fwd, betac) * np.sqrt(texp), np.finfo(float).eps
)
x = 0.5 / np.square(betac * sigma_std)
if log:
log_mass = (a - 1) * np.log(x) - x - np.log(spsp.gamma(a))
log_mass += np.log(
1
+ (a - 1)
/ x
* (1 + (a - 2) / x * (1 + (a - 3) / x * (1 + (a - 4) / x)))
)
with np.errstate(divide="ignore"):
log_mass = np.where(x > 100, log_mass, np.log(spst.gamma.sf(x=x, a=a)))
return log_mass
else:
return spst.gamma.sf(x=x, a=a)
def mass_zero_t0(self, spot, texp):
"""
Limit value of -T log(M_T) as T -> 0, where M_T is the mass at zero.
Args:
spot: spot (or forward) price
Returns:
- lim_{T->0} T log(M_T)
"""
fwd = self.forward(spot, texp)
betac = 1.0 - self.beta
alpha = self.sigma / np.power(fwd, betac)
t0 = 0.5 / (betac * alpha) ** 2
return t0
@staticmethod
def price_formula(
strike, spot, texp, sigma=None, cp=1, beta=0.5, intr=0.0, divr=0.0, is_fwd=False
):
"""
Args:
strike:
spot:
texp:
cp:
sigma:
beta:
intr:
divr:
is_fwd:
Returns:
"""
disc_fac = np.exp(-texp * intr)
fwd = spot * (1.0 if is_fwd else np.exp(-texp * divr) / disc_fac)
betac = 1.0 - beta
betac_inv = 1.0 / betac
alpha = sigma / np.power(fwd, betac)
sigma_std = np.maximum(alpha * np.sqrt(texp), np.finfo(float).eps)
kk = strike / fwd
x = 1.0 / np.square(betac * sigma_std)
y = np.power(kk, 2 * betac) * x
# Need to clean up the case beta > 0
if beta > 1.0:
raise ValueError("Cannot handle beta value higher than 1.0")
ncx2_sf = spst.ncx2.sf
ncx2_cdf = spst.ncx2.cdf
# Computing call and put is a bit of computtion waste, but do this for vectorization.
price = np.where(
cp > 0,
fwd * ncx2_sf(y, 2 + betac_inv, x) - strike * ncx2_cdf(x, betac_inv, y),
strike * ncx2_sf(x, betac_inv, y) - fwd * ncx2_cdf(y, 2 + betac_inv, x),
)
return disc_fac * price
def delta(self, strike, spot, texp, cp=1):
fwd, df, divf = self._fwd_factor(spot, texp)
betac_inv = 1 / (1 - self.beta)
k_star = 1.0 / np.square(self.sigma / betac_inv) / texp
x = k_star * np.power(fwd, 2 / betac_inv)
y = k_star * np.power(strike, 2 / betac_inv)
if self.beta < 1.0:
delta = (
0.5 * (cp - 1)
+ spst.ncx2.sf(y, 2 + betac_inv, x)
+ 2
* x
/ betac_inv
* (
spst.ncx2.pdf(y, 4 + betac_inv, x)
- strike / fwd * spst.ncx2.pdf(x, betac_inv, y)
)
)
else:
delta = (
0.5 * (cp - 1)
+ spst.ncx2.sf(x, -betac_inv, y)
- 2
* x
/ betac_inv
* (
spst.ncx2.pdf(x, -betac_inv, y)
- strike / fwd * spst.ncx2.pdf(y, 4 - betac_inv, x)
)
)
delta *= df if self.is_fwd else divf
return delta
def cdf(self, strike, spot, texp, cp=1):
fwd = self.forward(spot, texp)
betac = 1.0 - self.beta
betac_inv = 1.0 / betac
alpha = self.sigma / np.power(fwd, betac)
sigma_std = np.maximum(alpha * np.sqrt(texp), np.finfo(float).eps)
kk = strike / fwd
x = 1.0 / np.square(betac * sigma_std)
y = np.power(kk, 2 * betac) * x
cdf = np.where(
cp > 0, spst.ncx2.cdf(x, betac_inv, y), spst.ncx2.sf(x, betac_inv, y)
)
return cdf
def gamma(self, strike, spot, texp, cp=1):
fwd, df, divf = self._fwd_factor(spot, texp)
betac_inv = 1 / (1 - self.beta)
k_star = 1.0 / np.square(self.sigma / betac_inv) / texp
x = k_star * np.power(fwd, 2 / betac_inv)
y = k_star * np.power(strike, 2 / betac_inv)
if self.beta < 1.0:
gamma = (
(2 + betac_inv - x) * spst.ncx2.pdf(y, 4 + betac_inv, x)
+ x * spst.ncx2.pdf(y, 6 + betac_inv, x)
+ strike
/ fwd
* (
x * spst.ncx2.pdf(x, betac_inv, y)
- y * spst.ncx2.pdf(x, 2 + betac_inv, y)
)
)
else:
gamma = (
x * spst.ncx2.pdf(x, -betac_inv, y)
- y * spst.ncx2.pdf(x, 2 - betac_inv, y)
) + strike / fwd * (
(2 - betac_inv - x) * spst.ncx2.pdf(y, 4 - betac_inv, x)
+ x * spst.ncx2.pdf(y, 6 - betac_inv, x)
)
gamma *= 2 * (divf / betac_inv) ** 2 / df * x / fwd
if self.is_fwd:
gamma *= (df / divf) ** 2
return gamma
def vega(self, strike, spot, texp, cp=1):
fwd, df, divf = self._fwd_factor(spot, texp)
spot = fwd * df / divf
betac_inv = 1 / (1 - self.beta)
k_star = 1.0 / np.square(self.sigma / betac_inv) / texp
x = k_star * np.power(fwd, 2 / betac_inv)
y = k_star * np.power(strike, 2 / betac_inv)
if self.beta < 1.0:
vega = -fwd * spst.ncx2.pdf(y, 4 + betac_inv, x) + strike * spst.ncx2.pdf(
x, betac_inv, y
)
else:
vega = fwd * spst.ncx2.pdf(x, -betac_inv, y) - strike * spst.ncx2.pdf(
y, 4 - betac_inv, x
)
sigma = self.sigma * spot ** (self.beta - 1)
vega *= df * 2 * x / sigma
return vega
def theta(self, strike, spot, texp, cp=1):
### Need to implement this
return self.theta_numeric(strike, spot, texp, cp=cp)
| [
"scipy.stats.gamma.sf",
"numpy.sqrt",
"numpy.power",
"numpy.log",
"scipy.stats.ncx2.pdf",
"scipy.stats.ncx2.cdf",
"numpy.square",
"numpy.exp",
"numpy.errstate",
"scipy.special.gamma",
"scipy.stats.ncx2.sf",
"numpy.finfo"
] | [((3047, 3067), 'numpy.exp', 'np.exp', (['(-texp * intr)'], {}), '(-texp * intr)\n', (3053, 3067), True, 'import numpy as np\n'), ((1711, 1739), 'numpy.square', 'np.square', (['(betac * sigma_std)'], {}), '(betac * sigma_std)\n', (1720, 1739), True, 'import numpy as np\n'), ((1852, 1939), 'numpy.log', 'np.log', (['(1 + (a - 1) / x * (1 + (a - 2) / x * (1 + (a - 3) / x * (1 + (a - 4) / x))))'], {}), '(1 + (a - 1) / x * (1 + (a - 2) / x * (1 + (a - 3) / x * (1 + (a - 4) /\n x))))\n', (1858, 1939), True, 'import numpy as np\n'), ((2210, 2233), 'scipy.stats.gamma.sf', 'spst.gamma.sf', ([], {'x': 'x', 'a': 'a'}), '(x=x, a=a)\n', (2223, 2233), True, 'import scipy.stats as spst\n'), ((2587, 2607), 'numpy.power', 'np.power', (['fwd', 'betac'], {}), '(fwd, betac)\n', (2595, 2607), True, 'import numpy as np\n'), ((3226, 3246), 'numpy.power', 'np.power', (['fwd', 'betac'], {}), '(fwd, betac)\n', (3234, 3246), True, 'import numpy as np\n'), ((3366, 3394), 'numpy.square', 'np.square', (['(betac * sigma_std)'], {}), '(betac * sigma_std)\n', (3375, 3394), True, 'import numpy as np\n'), ((3407, 3430), 'numpy.power', 'np.power', (['kk', '(2 * betac)'], {}), '(kk, 2 * betac)\n', (3415, 3430), True, 'import numpy as np\n'), ((4222, 4250), 'numpy.power', 'np.power', (['fwd', '(2 / betac_inv)'], {}), '(fwd, 2 / betac_inv)\n', (4230, 4250), True, 'import numpy as np\n'), ((4272, 4303), 'numpy.power', 'np.power', (['strike', '(2 / betac_inv)'], {}), '(strike, 2 / betac_inv)\n', (4280, 4303), True, 'import numpy as np\n'), ((5287, 5307), 'numpy.power', 'np.power', (['fwd', 'betac'], {}), '(fwd, betac)\n', (5295, 5307), True, 'import numpy as np\n'), ((5427, 5455), 'numpy.square', 'np.square', (['(betac * sigma_std)'], {}), '(betac * sigma_std)\n', (5436, 5455), True, 'import numpy as np\n'), ((5468, 5491), 'numpy.power', 'np.power', (['kk', '(2 * betac)'], {}), '(kk, 2 * betac)\n', (5476, 5491), True, 'import numpy as np\n'), ((5541, 5571), 'scipy.stats.ncx2.cdf', 'spst.ncx2.cdf', (['x', 'betac_inv', 'y'], {}), '(x, betac_inv, y)\n', (5554, 5571), True, 'import scipy.stats as spst\n'), ((5573, 5602), 'scipy.stats.ncx2.sf', 'spst.ncx2.sf', (['x', 'betac_inv', 'y'], {}), '(x, betac_inv, y)\n', (5585, 5602), True, 'import scipy.stats as spst\n'), ((5859, 5887), 'numpy.power', 'np.power', (['fwd', '(2 / betac_inv)'], {}), '(fwd, 2 / betac_inv)\n', (5867, 5887), True, 'import numpy as np\n'), ((5909, 5940), 'numpy.power', 'np.power', (['strike', '(2 / betac_inv)'], {}), '(strike, 2 / betac_inv)\n', (5917, 5940), True, 'import numpy as np\n'), ((7063, 7091), 'numpy.power', 'np.power', (['fwd', '(2 / betac_inv)'], {}), '(fwd, 2 / betac_inv)\n', (7071, 7091), True, 'import numpy as np\n'), ((7113, 7144), 'numpy.power', 'np.power', (['strike', '(2 / betac_inv)'], {}), '(strike, 2 / betac_inv)\n', (7121, 7144), True, 'import numpy as np\n'), ((1648, 1661), 'numpy.sqrt', 'np.sqrt', (['texp'], {}), '(texp)\n', (1655, 1661), True, 'import numpy as np\n'), ((1663, 1678), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1671, 1678), True, 'import numpy as np\n'), ((2031, 2059), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (2042, 2059), True, 'import numpy as np\n'), ((3286, 3299), 'numpy.sqrt', 'np.sqrt', (['texp'], {}), '(texp)\n', (3293, 3299), True, 'import numpy as np\n'), ((3301, 3316), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3309, 3316), True, 'import numpy as np\n'), ((4160, 4193), 'numpy.square', 'np.square', (['(self.sigma / betac_inv)'], {}), '(self.sigma / betac_inv)\n', (4169, 4193), True, 'import numpy as np\n'), ((5347, 5360), 'numpy.sqrt', 'np.sqrt', (['texp'], {}), '(texp)\n', (5354, 5360), True, 'import numpy as np\n'), ((5362, 5377), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5370, 5377), True, 'import numpy as np\n'), ((5797, 5830), 'numpy.square', 'np.square', (['(self.sigma / betac_inv)'], {}), '(self.sigma / betac_inv)\n', (5806, 5830), True, 'import numpy as np\n'), ((7001, 7034), 'numpy.square', 'np.square', (['(self.sigma / betac_inv)'], {}), '(self.sigma / betac_inv)\n', (7010, 7034), True, 'import numpy as np\n'), ((1625, 1645), 'numpy.power', 'np.power', (['fwd', 'betac'], {}), '(fwd, betac)\n', (1633, 1645), True, 'import numpy as np\n'), ((1813, 1826), 'scipy.special.gamma', 'spsp.gamma', (['a'], {}), '(a)\n', (1823, 1826), True, 'import scipy.special as spsp\n'), ((3109, 3129), 'numpy.exp', 'np.exp', (['(-texp * divr)'], {}), '(-texp * divr)\n', (3115, 3129), True, 'import numpy as np\n'), ((4404, 4437), 'scipy.stats.ncx2.sf', 'spst.ncx2.sf', (['y', '(2 + betac_inv)', 'x'], {}), '(y, 2 + betac_inv, x)\n', (4416, 4437), True, 'import scipy.stats as spst\n'), ((4766, 4796), 'scipy.stats.ncx2.sf', 'spst.ncx2.sf', (['x', '(-betac_inv)', 'y'], {}), '(x, -betac_inv, y)\n', (4778, 4796), True, 'import scipy.stats as spst\n'), ((7200, 7234), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(4 + betac_inv)', 'x'], {}), '(y, 4 + betac_inv, x)\n', (7213, 7234), True, 'import scipy.stats as spst\n'), ((7246, 7276), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', 'betac_inv', 'y'], {}), '(x, betac_inv, y)\n', (7259, 7276), True, 'import scipy.stats as spst\n'), ((7346, 7377), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', '(-betac_inv)', 'y'], {}), '(x, -betac_inv, y)\n', (7359, 7377), True, 'import scipy.stats as spst\n'), ((7389, 7423), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(4 - betac_inv)', 'x'], {}), '(y, 4 - betac_inv, x)\n', (7402, 7423), True, 'import scipy.stats as spst\n'), ((1790, 1799), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1796, 1799), True, 'import numpy as np\n'), ((2123, 2146), 'scipy.stats.gamma.sf', 'spst.gamma.sf', ([], {'x': 'x', 'a': 'a'}), '(x=x, a=a)\n', (2136, 2146), True, 'import scipy.stats as spst\n'), ((4546, 4580), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(4 + betac_inv)', 'x'], {}), '(y, 4 + betac_inv, x)\n', (4559, 4580), True, 'import scipy.stats as spst\n'), ((4905, 4936), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', '(-betac_inv)', 'y'], {}), '(x, -betac_inv, y)\n', (4918, 4936), True, 'import scipy.stats as spst\n'), ((6030, 6064), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(4 + betac_inv)', 'x'], {}), '(y, 4 + betac_inv, x)\n', (6043, 6064), True, 'import scipy.stats as spst\n'), ((6087, 6121), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(6 + betac_inv)', 'x'], {}), '(y, 6 + betac_inv, x)\n', (6100, 6121), True, 'import scipy.stats as spst\n'), ((6393, 6424), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', '(-betac_inv)', 'y'], {}), '(x, -betac_inv, y)\n', (6406, 6424), True, 'import scipy.stats as spst\n'), ((6447, 6481), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', '(2 - betac_inv)', 'y'], {}), '(x, 2 - betac_inv, y)\n', (6460, 6481), True, 'import scipy.stats as spst\n'), ((4618, 4648), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', 'betac_inv', 'y'], {}), '(x, betac_inv, y)\n', (4631, 4648), True, 'import scipy.stats as spst\n'), ((4974, 5008), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(4 - betac_inv)', 'x'], {}), '(y, 4 - betac_inv, x)\n', (4987, 5008), True, 'import scipy.stats as spst\n'), ((6213, 6243), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', 'betac_inv', 'y'], {}), '(x, betac_inv, y)\n', (6226, 6243), True, 'import scipy.stats as spst\n'), ((6270, 6304), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['x', '(2 + betac_inv)', 'y'], {}), '(x, 2 + betac_inv, y)\n', (6283, 6304), True, 'import scipy.stats as spst\n'), ((6553, 6587), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(4 - betac_inv)', 'x'], {}), '(y, 4 - betac_inv, x)\n', (6566, 6587), True, 'import scipy.stats as spst\n'), ((6610, 6644), 'scipy.stats.ncx2.pdf', 'spst.ncx2.pdf', (['y', '(6 - betac_inv)', 'x'], {}), '(y, 6 - betac_inv, x)\n', (6623, 6644), True, 'import scipy.stats as spst\n')] |
import numpy as np
# Change False to True for each block of code to see what it does
# Arithmetic operations between 2 NumPy arrays
if False:
a = np.array([1, 2, 3, 4])
b = np.array([1, 2, 1, 2])
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a ** b)
# Arithmetic operations between a NumPy array and a single number
if True:
a = np.array([1, 2, 3, 4])
b = 2
print(a + b)
print(a - b)
print(a * b)
print(a / b)
print(a ** b)
# Logical operations with NumPy arrays
if False:
a = np.array([True, True, False, False])
b = np.array([True, False, True, False])
print(a & b)
print(a | b)
print(~a)
print(a & True)
print(a & False)
print(a | True)
print(a | False)
# Comparison operations between 2 NumPy Arrays
if True:
a = np.array([1, 2, 3, 4, 5])
b = np.array([5, 4, 3, 2, 1])
print(a > b)
print(a >= b)
print(a < b)
print(a <= b)
print(a == b)
print(a != b)
# Comparison operations between a NumPy array and a single number
if False:
a = np.array([1, 2, 3, 4])
b = 2
print(a > b)
print(a >= b)
print(a < b)
print(a <= b)
print(a == b)
print(a != b)
# First 20 countries with school completion data
countries = np.array([
'Algeria', 'Argentina', 'Armenia', 'Aruba', 'Austria', 'Azerbaijan',
'Bahamas', 'Barbados', 'Belarus', 'Belgium', 'Belize', 'Bolivia',
'Botswana', 'Brunei', 'Bulgaria', 'Burkina Faso', 'Burundi',
'Cambodia', 'Cameroon', 'Cape Verde'
])
# Female school completion rate in 2007 for those 20 countries
female_completion = np.array([
97.35583, 104.62379, 103.02998, 95.14321, 103.69019,
98.49185, 100.88828, 95.43974, 92.11484, 91.54804,
95.98029, 98.22902, 96.12179, 119.28105, 97.84627,
29.07386, 38.41644, 90.70509, 51.7478, 95.45072
])
# Male school completion rate in 2007 for those 20 countries
male_completion = np.array([
95.47622, 100.66476, 99.7926, 91.48936, 103.22096,
97.80458, 103.81398, 88.11736, 93.55611, 87.76347,
102.45714, 98.73953, 92.22388, 115.3892, 98.70502,
37.00692, 45.39401, 91.22084, 62.42028, 90.66958
])
def overall_completion_rate(female_completion, male_completion):
"""
Fill in this function to return a NumPy array containing the overall
school completion rate for each country. The arguments are NumPy
arrays giving the female and male completion of each country in
the same order.
"""
return (female_completion + male_completion) / 2
print(overall_completion_rate(female_completion, male_completion))
| [
"numpy.array"
] | [((1289, 1545), 'numpy.array', 'np.array', (["['Algeria', 'Argentina', 'Armenia', 'Aruba', 'Austria', 'Azerbaijan',\n 'Bahamas', 'Barbados', 'Belarus', 'Belgium', 'Belize', 'Bolivia',\n 'Botswana', 'Brunei', 'Bulgaria', 'Burkina Faso', 'Burundi', 'Cambodia',\n 'Cameroon', 'Cape Verde']"], {}), "(['Algeria', 'Argentina', 'Armenia', 'Aruba', 'Austria',\n 'Azerbaijan', 'Bahamas', 'Barbados', 'Belarus', 'Belgium', 'Belize',\n 'Bolivia', 'Botswana', 'Brunei', 'Bulgaria', 'Burkina Faso', 'Burundi',\n 'Cambodia', 'Cameroon', 'Cape Verde'])\n", (1297, 1545), True, 'import numpy as np\n'), ((1636, 1860), 'numpy.array', 'np.array', (['[97.35583, 104.62379, 103.02998, 95.14321, 103.69019, 98.49185, 100.88828, \n 95.43974, 92.11484, 91.54804, 95.98029, 98.22902, 96.12179, 119.28105, \n 97.84627, 29.07386, 38.41644, 90.70509, 51.7478, 95.45072]'], {}), '([97.35583, 104.62379, 103.02998, 95.14321, 103.69019, 98.49185, \n 100.88828, 95.43974, 92.11484, 91.54804, 95.98029, 98.22902, 96.12179, \n 119.28105, 97.84627, 29.07386, 38.41644, 90.70509, 51.7478, 95.45072])\n', (1644, 1860), True, 'import numpy as np\n'), ((1949, 2171), 'numpy.array', 'np.array', (['[95.47622, 100.66476, 99.7926, 91.48936, 103.22096, 97.80458, 103.81398, \n 88.11736, 93.55611, 87.76347, 102.45714, 98.73953, 92.22388, 115.3892, \n 98.70502, 37.00692, 45.39401, 91.22084, 62.42028, 90.66958]'], {}), '([95.47622, 100.66476, 99.7926, 91.48936, 103.22096, 97.80458, \n 103.81398, 88.11736, 93.55611, 87.76347, 102.45714, 98.73953, 92.22388,\n 115.3892, 98.70502, 37.00692, 45.39401, 91.22084, 62.42028, 90.66958])\n', (1957, 2171), True, 'import numpy as np\n'), ((152, 174), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (160, 174), True, 'import numpy as np\n'), ((183, 205), 'numpy.array', 'np.array', (['[1, 2, 1, 2]'], {}), '([1, 2, 1, 2])\n', (191, 205), True, 'import numpy as np\n'), ((377, 399), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (385, 399), True, 'import numpy as np\n'), ((555, 591), 'numpy.array', 'np.array', (['[True, True, False, False]'], {}), '([True, True, False, False])\n', (563, 591), True, 'import numpy as np\n'), ((600, 636), 'numpy.array', 'np.array', (['[True, False, True, False]'], {}), '([True, False, True, False])\n', (608, 636), True, 'import numpy as np\n'), ((835, 860), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (843, 860), True, 'import numpy as np\n'), ((869, 894), 'numpy.array', 'np.array', (['[5, 4, 3, 2, 1]'], {}), '([5, 4, 3, 2, 1])\n', (877, 894), True, 'import numpy as np\n'), ((1087, 1109), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1095, 1109), True, 'import numpy as np\n')] |
# This script calculates RPKM, when paired end reads are mapped to a contig
# catalogue with BWA MEM. It will not be accurate with single end reads or
# any other mapper than BWA MEM.
# Theory:
# We want a simple way to estimate abundance of redundant contig catalogues.
# Earlier we used to run analysis on deduplicated gene catalogues, but since
# both depth and kmer composition are only stable for longer contigs, we have
# moved to contig catalogues. We have not found a way of deduplicating contigs.
# For this we have until now used two methods:
# 1) Only counting the primary hits. In this case the read will never be
# assigned to any contig which differ by just 1 basepair. Even for
# identical contigs, reads are assigned randomly which causes noise.
# 2) Using MetaBAT's jgi_summarize_bam_contig_depths, a script which is not
# documented and we cannot figure out how works. When testing with small
# toy data, it produces absurd results.
# This script is an attempt to take an approach as simple as possible while
# still being sound technically. We simply count the number of reads in a
# contig normalized by contig length and total number of reads.
# We look at all hits, including secondary hits. We do not discount partial
# alignments. Also, if a read maps to multiple contigs, we don't count each hit
# as less than if it mapped to both. The reason for all these decisions is that
# if the aligner believes it's a hit, we believe the contig is present.
# We do not take varying insert sizes into account. It is unlikely that
# any contig with enough reads to provide a reliable estimate of depth would,
# by chance, only recruit read pairs with short or long insert size. So this
# will average out over all contigs.
# We DO filter the input file for duplicate lines, as BWA MEM erroneously
# produces quite a few of them.
# We count each read independently, because BWA MEM often assigns mating reads
# to different contigs. If a read has their mate unmapped, we count it twice
# to compensate (one single read corresponds to two paired reads).
__doc__ = """Estimate RPKM (depths) from BAM files of reads mapped to contigs.
Usage:
>>> bampaths = ['/path/to/bam1.bam', '/path/to/bam2.bam', '/path/to/bam3.bam']
>>> rpkms = read_bamfiles(bampaths)
"""
import pysam as _pysam
import sys as _sys
import os as _os
import multiprocessing as _multiprocessing
import numpy as _np
DEFAULT_SUBPROCESSES = min(8, _os.cpu_count())
def mergecolumns(pathlist):
"""Merges multiple npz files with columns to a matrix.
All paths must be npz arrays with the array saved as name 'arr_0',
and with the same length.
Input: pathlist: List of paths to find .npz files to merge
Output: Matrix with one column per npz file
"""
if len(pathlist) == 0:
return _np.array([], dtype=_np.float32)
for path in pathlist:
if not _os.path.exists(path):
raise FileNotFoundError(path)
first = _np.load(pathlist[0])['arr_0']
length = len(first)
ncolumns = len(pathlist)
result = _np.zeros((length, ncolumns), dtype=_np.float32)
result[:,0] = first
for columnno, path in enumerate(pathlist[1:]):
column = _np.load(path)['arr_0']
if len(column) != length:
raise ValueError("Length of data at {} is unlike other cols".format(path))
result[:,columnno + 1] = column
return result
def _get_alternate_references(alignedsegment):
"""Given a pysam aligned segment, returns a list with the names of all
references the read maps to, both primary and secondary hits.
"""
references = list()
# Some reads don't have secondary hits
if not alignedsegment.has_tag('XA'):
return references
# XA is a string contigname1,<other info>;contigname2,<other info>; ...
secondary_alignment_string = alignedsegment.get_tag('XA')
secondary_alignments = secondary_alignment_string.split(';')[:-1]
for secondary_alignment in secondary_alignments:
references.append(secondary_alignment.partition(',')[0])
return references
def _filter_segments(segmentiterator, minscore):
"""Returns an iterator of AlignedSegment filtered for reads with low
alignment score, and for any segments identical to the previous segment.
This is necessary as BWA MEM produces dopplegangers.
"""
# First get the first segment, so we in the loop can compare to the previous
for alignedsegment in segmentiterator:
if minscore > 0 and alignedsegment.get_tag('AS') < minscore:
continue
yield alignedsegment
break
lastname = alignedsegment.query_name
lastwasforward = alignedsegment.flag & 64 == 64
for alignedsegment in segmentiterator:
if minscore > 0 and alignedsegment.get_tag('AS') < minscore:
continue
# Depressingly, BWA somtimes outputs the same read multiple times.
# We identify them by having same name and directionality as previous.
thisname = alignedsegment.query_name
thisisforward = alignedsegment.flag & 64 == 64
if thisisforward is not lastwasforward or thisname != lastname:
yield alignedsegment
lastname = thisname
lastwasforward = thisisforward
def _get_contig_rpkms(inpath, outpath=None, minscore=50, minlength=2000):
"""Returns RPKM (reads per kilobase per million mapped reads)
for all contigs present in BAM header.
Inputs:
inpath: Path to BAM file
outpath: Path to dump depths array to or None
minscore [50]: Minimum alignment score (AS field) to consider
minlength [2000]: Discard any references shorter than N bases
Outputs:
path: Same as input path
rpkms:
If outpath is not None: None
Else: A float32-array with RPKM for each contig in BAM header
length: Length of rpkms array
"""
bamfile = _pysam.AlignmentFile(inpath, "rb")
# We can only get secondary alignment reference names, not indices. So we must
# make an idof dict to look up the indices.
idof = {contig: i for i, contig in enumerate(bamfile.references)}
contiglengths = bamfile.lengths
halfreads = _np.zeros(len(contiglengths), dtype=_np.int32)
nhalfreads = 0
for segment in _filter_segments(bamfile, minscore):
nhalfreads += 1
# Read w. unmapped mates count twice as they represent a whole read
value = 2 if segment.mate_is_unmapped else 1
halfreads[segment.reference_id] += value
for reference in _get_alternate_references(segment):
id = idof[reference]
halfreads[id] += value
bamfile.close()
del idof
rpkms = _np.zeros(len(contiglengths), dtype=_np.float32)
millionmappedreads = nhalfreads / 1e6
for i, (contiglength, nhalfreads) in enumerate(zip(contiglengths, halfreads)):
kilobases = contiglength / 1000
rpkms[i] = nhalfreads / (kilobases * millionmappedreads)
# Now filter for small contigs
lengthmask = _np.array(contiglengths, dtype=_np.int32) >= minlength
rpkms = rpkms[lengthmask]
# If dump to disk, array returned is None instead of rpkm array
if outpath is not None:
arrayresult = None
_np.savez_compressed(outpath, rpkms)
else:
arrayresult = rpkms
return inpath, arrayresult, len(rpkms)
def read_bamfiles(paths, dumpdirectory=None, minscore=50, minlength=100,
subprocesses=DEFAULT_SUBPROCESSES, logfile=None):
"Placeholder docstring - replaced after this func definition"
# Define callback function depending on whether a logfile exists or not
if logfile is not None:
def _callback(result):
path, rpkms, length = result
print('\tProcessed ', path, file=logfile)
logfile.flush()
def _error_callback(result):
print('\tERROR WHEN PROCESSING ', path, file=logfile)
raise _multiprocessing.ProcessError('ERROR WHEN PROCESSING ' + path)
else:
def _callback(result):
pass
def _error_callback(result):
pass
# Bam files must exist
for path in paths:
if not _os.path.isfile(path):
raise FileNotFoundError(path)
if dumpdirectory is not None:
# Dumpdirectory cannot exist, but its parent must exist
dumpdirectory = _os.path.abspath(dumpdirectory)
if _os.path.exists(dumpdirectory):
raise FileExistsError(dumpdirectory)
parentdir = _os.path.dirname(_os.path.abspath(dumpdirectory))
if not _os.path.isdir(parentdir):
raise FileNotFoundError("Parent dir of " + dumpdirectory)
# Create directory to dump in
_os.mkdir(dumpdirectory)
# Get references and lengths from first BAM file.
# We need these to print them in the output.
# Might as well do it before spawning all those processes.
firstfile = _pysam.AlignmentFile(paths[0], "rb")
ncontigs = sum(1 for length in firstfile.lengths if length >= minlength)
# Probe to check that the "AS" aux field is present (BWA makes this)
if minscore > 0:
segments = [j for i, j in zip(range(25), firstfile)]
if not all(segment.has_tag("AS") for segment in segments):
raise ValueError("If minscore > 0, 'AS' field must be present in BAM file.")
firstfile.close()
del firstfile
if ncontigs == 0:
raise ValueError('No headers in first bam file after filtering')
# Spawn independent processes to calculate RPKM for each of the BAM files
processresults = list()
# Queue all the processes
with _multiprocessing.Pool(processes=subprocesses) as pool:
for pathnumber, path in enumerate(paths):
if dumpdirectory is None:
outpath = None
else:
outpath = dumpdirectory + '/' + str(pathnumber) + '.npz'
arguments = (path, outpath, minscore, minlength)
processresults.append(pool.apply_async(_get_contig_rpkms, arguments,
callback=_callback, error_callback=_error_callback))
# For some reason, this is needed.
pool.close()
pool.join()
# Verify we didn't get errors or wrong lengths
for processresult in processresults:
if processresult.successful():
path, rpkm, length = processresult.get()
if length != ncontigs:
raise ValueError('Expected {} headers in {}, got {}.'.format(
ncontigs, path, length))
else:
processresult.get()
# If we did not dump to disk, load directly from process results to
# one big matrix...
if dumpdirectory is None:
columnof = {p:i for i, p in enumerate(paths)}
rpkms = _np.zeros((ncontigs, len(paths)), dtype=_np.float32)
for processresult in processresults:
path, rpkm, length = processresult.get()
rpkms[:, columnof[path]] = rpkm
# If we did, instead merge them from the disk
else:
dumppaths = [_os.path.join(dumpdirectory, str(i) + '.npz') for i in range(len(paths))]
rpkms = mergecolumns(dumppaths)
return rpkms
read_bamfiles.__doc__ = """Spawns processes to parse BAM files and get contig rpkms.
Input:
path: List or tuple of paths to BAM files
dumpdirectory: [None] Dir to create and dump per-sample depths NPZ files to
minscore [50]: Minimum alignment score (AS field) to consider
minlength [100]: Ignore any references shorter than N bases
subprocesses [{}]: Number of subprocesses to spawn
logfile: [None] File to print progress to
Output: A (n_contigs x n_samples) Numpy array with RPKM
""".format(DEFAULT_SUBPROCESSES)
| [
"os.path.exists",
"pysam.AlignmentFile",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"os.path.isdir",
"os.mkdir",
"os.cpu_count",
"multiprocessing.Pool",
"os.path.abspath",
"numpy.savez_compressed",
"numpy.load",
"multiprocessing.ProcessError"
] | [((2436, 2451), 'os.cpu_count', '_os.cpu_count', ([], {}), '()\n', (2449, 2451), True, 'import os as _os\n'), ((3057, 3105), 'numpy.zeros', '_np.zeros', (['(length, ncolumns)'], {'dtype': '_np.float32'}), '((length, ncolumns), dtype=_np.float32)\n', (3066, 3105), True, 'import numpy as _np\n'), ((5939, 5973), 'pysam.AlignmentFile', '_pysam.AlignmentFile', (['inpath', '"""rb"""'], {}), "(inpath, 'rb')\n", (5959, 5973), True, 'import pysam as _pysam\n'), ((8983, 9019), 'pysam.AlignmentFile', '_pysam.AlignmentFile', (['paths[0]', '"""rb"""'], {}), "(paths[0], 'rb')\n", (9003, 9019), True, 'import pysam as _pysam\n'), ((2806, 2838), 'numpy.array', '_np.array', (['[]'], {'dtype': '_np.float32'}), '([], dtype=_np.float32)\n', (2815, 2838), True, 'import numpy as _np\n'), ((2959, 2980), 'numpy.load', '_np.load', (['pathlist[0]'], {}), '(pathlist[0])\n', (2967, 2980), True, 'import numpy as _np\n'), ((7065, 7106), 'numpy.array', '_np.array', (['contiglengths'], {'dtype': '_np.int32'}), '(contiglengths, dtype=_np.int32)\n', (7074, 7106), True, 'import numpy as _np\n'), ((7282, 7318), 'numpy.savez_compressed', '_np.savez_compressed', (['outpath', 'rpkms'], {}), '(outpath, rpkms)\n', (7302, 7318), True, 'import numpy as _np\n'), ((8421, 8452), 'os.path.abspath', '_os.path.abspath', (['dumpdirectory'], {}), '(dumpdirectory)\n', (8437, 8452), True, 'import os as _os\n'), ((8464, 8494), 'os.path.exists', '_os.path.exists', (['dumpdirectory'], {}), '(dumpdirectory)\n', (8479, 8494), True, 'import os as _os\n'), ((8775, 8799), 'os.mkdir', '_os.mkdir', (['dumpdirectory'], {}), '(dumpdirectory)\n', (8784, 8799), True, 'import os as _os\n'), ((9693, 9738), 'multiprocessing.Pool', '_multiprocessing.Pool', ([], {'processes': 'subprocesses'}), '(processes=subprocesses)\n', (9714, 9738), True, 'import multiprocessing as _multiprocessing\n'), ((2881, 2902), 'os.path.exists', '_os.path.exists', (['path'], {}), '(path)\n', (2896, 2902), True, 'import os as _os\n'), ((3199, 3213), 'numpy.load', '_np.load', (['path'], {}), '(path)\n', (3207, 3213), True, 'import numpy as _np\n'), ((7991, 8053), 'multiprocessing.ProcessError', '_multiprocessing.ProcessError', (["('ERROR WHEN PROCESSING ' + path)"], {}), "('ERROR WHEN PROCESSING ' + path)\n", (8020, 8053), True, 'import multiprocessing as _multiprocessing\n'), ((8233, 8254), 'os.path.isfile', '_os.path.isfile', (['path'], {}), '(path)\n', (8248, 8254), True, 'import os as _os\n'), ((8583, 8614), 'os.path.abspath', '_os.path.abspath', (['dumpdirectory'], {}), '(dumpdirectory)\n', (8599, 8614), True, 'import os as _os\n'), ((8631, 8656), 'os.path.isdir', '_os.path.isdir', (['parentdir'], {}), '(parentdir)\n', (8645, 8656), True, 'import os as _os\n')] |
import gym
import numpy as np
import importlib
from mujoco_py import MjViewer
class HACEnv(gym.Env):
def __init__(self, task_id, eval_mode=False, **kwargs):
module = importlib.import_module(".." + task_id + ".design_agent_and_env", __name__)
self._env = env = module.design_env()
goal_space = np.array(env.goal_space_train)
self.metadata = {'render.subgoals' : True, 'render.modes': ['human']}
self.observation_space = gym.spaces.Dict({
'desired_goal': gym.spaces.Box(goal_space[:,0], goal_space[:,1]),
'achieved_goal': gym.spaces.Box(goal_space[:,0], goal_space[:,1]),
'achieved_subgoal': gym.spaces.Box(low=env.subgoal_bounds[:,0], high=env.subgoal_bounds[:,1]),
'observation': gym.spaces.Box(low=env.initial_state_space[:,0], high=env.initial_state_space[:,1]),
})
self.observation_keys = ['desired_goal', 'observation']
self.subgoal_space = gym.spaces.Box(low=env.subgoal_bounds[:,0], high=env.subgoal_bounds[:,1])
self.action_space = gym.spaces.Box(low=-np.array(env.action_bounds), high=np.array(env.action_bounds))
self._obs = None
self._end_goal = None
self._viewer = None
self._eval_mode = eval_mode
from collections import deque
self.last_obs = deque(maxlen=50)
self.last_actions = deque(maxlen=50)
def step(self, action):
next_obs = self._env.execute_action(action)
reward = self.compute_reward(
self._env.project_state_to_end_goal(self._env.sim, next_obs),
self._end_goal, {})
self._obs = next_obs
self.last_actions.append(action)
self.last_obs.appendleft(next_obs)
return self._get_obs(), reward, False, {}
def compute_reward(self, achieved_goal, desired_goal, info):
return -np.sqrt(np.sum(np.square(achieved_goal - desired_goal)) + 1e-8)
def _get_obs(self):
return {
'observation': self._obs,
'achieved_goal': self._env.project_state_to_end_goal(self._env.sim, self._obs),
'achieved_subgoal': self._env.project_state_to_subgoal(self._env.sim, self._obs),
'desired_goal': self._end_goal,
}
def reset(self):
self._end_goal = self._env.get_next_goal(test=self._eval_mode)
self._env.display_end_goal(self._end_goal)
self._obs = self._env.reset_sim(self._end_goal, test=self._eval_mode)
return self._get_obs()
def key_callback(self, window, key, scancode, action, mods):
import glfw
from itertools import islice
if action == glfw.RELEASE and key == glfw.KEY_A:
print("\n\n QPOS:\n", self._env.sim.data.qpos)
print("\n\n QVEL:\n", self._env.sim.data.qvel)
print("\n\n ControlRange: \n", self._env.model.actuator_ctrlrange, "\n\n")
last_n_obs = tuple(islice(self.last_obs, None, 49))
last_n_actions = tuple(islice(self.last_actions, None, 49))
for i, (obs,action) in enumerate(zip(last_n_obs, last_n_actions)):
print(i, "\n", obs, action)
def render(self, *args, **kwargs):
mode = 'human'
if args:
mode = args[0]
elif 'mode' in kwargs:
mode = kwargs['mode']
assert mode == 'human'
subgoals = kwargs.pop('subgoals', None)
if subgoals:
subgoals = [np.squeeze(subgoal) for _,subgoal in subgoals.items() if subgoal is not None]
self._env.display_subgoals(subgoals)
if self._viewer is None:
import glfw
self._viewer = MjViewer(self._env.sim)
# glfw.set_key_callback(self._viewer.window, self.key_callback)
ant_pos = self._get_obs()['achieved_goal']
self._viewer.add_marker(pos=[ant_pos[0], ant_pos[1], 1], label=("ant"+str(ant_pos)))
self._viewer.render()
def close(self):
self._viewer = None | [
"itertools.islice",
"collections.deque",
"importlib.import_module",
"gym.spaces.Box",
"numpy.squeeze",
"numpy.array",
"numpy.square",
"mujoco_py.MjViewer"
] | [((179, 254), 'importlib.import_module', 'importlib.import_module', (["('..' + task_id + '.design_agent_and_env')", '__name__'], {}), "('..' + task_id + '.design_agent_and_env', __name__)\n", (202, 254), False, 'import importlib\n'), ((322, 352), 'numpy.array', 'np.array', (['env.goal_space_train'], {}), '(env.goal_space_train)\n', (330, 352), True, 'import numpy as np\n'), ((962, 1037), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'env.subgoal_bounds[:, 0]', 'high': 'env.subgoal_bounds[:, 1]'}), '(low=env.subgoal_bounds[:, 0], high=env.subgoal_bounds[:, 1])\n', (976, 1037), False, 'import gym\n'), ((1330, 1346), 'collections.deque', 'deque', ([], {'maxlen': '(50)'}), '(maxlen=50)\n', (1335, 1346), False, 'from collections import deque\n'), ((1375, 1391), 'collections.deque', 'deque', ([], {'maxlen': '(50)'}), '(maxlen=50)\n', (1380, 1391), False, 'from collections import deque\n'), ((3671, 3694), 'mujoco_py.MjViewer', 'MjViewer', (['self._env.sim'], {}), '(self._env.sim)\n', (3679, 3694), False, 'from mujoco_py import MjViewer\n'), ((510, 560), 'gym.spaces.Box', 'gym.spaces.Box', (['goal_space[:, 0]', 'goal_space[:, 1]'], {}), '(goal_space[:, 0], goal_space[:, 1])\n', (524, 560), False, 'import gym\n'), ((589, 639), 'gym.spaces.Box', 'gym.spaces.Box', (['goal_space[:, 0]', 'goal_space[:, 1]'], {}), '(goal_space[:, 0], goal_space[:, 1])\n', (603, 639), False, 'import gym\n'), ((671, 746), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'env.subgoal_bounds[:, 0]', 'high': 'env.subgoal_bounds[:, 1]'}), '(low=env.subgoal_bounds[:, 0], high=env.subgoal_bounds[:, 1])\n', (685, 746), False, 'import gym\n'), ((773, 863), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': 'env.initial_state_space[:, 0]', 'high': 'env.initial_state_space[:, 1]'}), '(low=env.initial_state_space[:, 0], high=env.\n initial_state_space[:, 1])\n', (787, 863), False, 'import gym\n'), ((1118, 1145), 'numpy.array', 'np.array', (['env.action_bounds'], {}), '(env.action_bounds)\n', (1126, 1145), True, 'import numpy as np\n'), ((2936, 2967), 'itertools.islice', 'islice', (['self.last_obs', 'None', '(49)'], {}), '(self.last_obs, None, 49)\n', (2942, 2967), False, 'from itertools import islice\n'), ((3004, 3039), 'itertools.islice', 'islice', (['self.last_actions', 'None', '(49)'], {}), '(self.last_actions, None, 49)\n', (3010, 3039), False, 'from itertools import islice\n'), ((3460, 3479), 'numpy.squeeze', 'np.squeeze', (['subgoal'], {}), '(subgoal)\n', (3470, 3479), True, 'import numpy as np\n'), ((1084, 1111), 'numpy.array', 'np.array', (['env.action_bounds'], {}), '(env.action_bounds)\n', (1092, 1111), True, 'import numpy as np\n'), ((1877, 1916), 'numpy.square', 'np.square', (['(achieved_goal - desired_goal)'], {}), '(achieved_goal - desired_goal)\n', (1886, 1916), True, 'import numpy as np\n')] |
import numpy as np
import datetime
import pandas as pd
from operator import itemgetter
from copy import deepcopy
class BotTester(object):
"""The BotTester object is used to evaluate how close a collection of bot's picks match human picks.
This can be used in the following manner:
tester = BotTester(drafts)
tester.evaluate_bots([bot], ["SGD"])
tester.write_rating_dict()
"""
def __init__(self, drafts):
"""Create a new BotTester instance.
Fields:
self.drafts - a collection of multiple draft objects (list of list of list of cardnames)
self.correct - DataFrame of all bots' correct choices compared to human picks
self.fuzzy_correct - DataFrame of all bots' correct choices (if human pick in top 3 bot picks)
self.card_acc - DataFrame of per-card accuracy metrics for all bots
:param drafts: Attach a set of drafts to the BotTester
"""
before = datetime.datetime.now()
self.drafts = drafts
self.n_packs = len(drafts)*45
self.correct = pd.DataFrame(columns = ['draft_num', 'pick_num', 'human_pick'], index = range(self.n_packs))
self.fuzzy_correct = pd.DataFrame(columns = ['draft_num', 'pick_num', 'human_pick'], index = range(self.n_packs))
self.rank_error = pd.DataFrame(columns = ['draft_num', 'pick_num', 'human_pick'], index = range(self.n_packs))
self.card_acc = pd.DataFrame(columns = ['human_pick'])
print("Initialization time taken: " + str(datetime.datetime.now() - before))
def evaluate_bots(self, bots, bot_names):
"""Evaluates accuracy and fuzzy accuracy of a list of bots.
"Correct" is whether or not the bot's top choice matched the human's top choice.
"Fuzzy correct" is whether or not the human's top choice was in the bot's top 3 choices.
These values are stored the DataFrames acc and fuzz_acc for all bots.
:param bots: List of bots that all inherit from "bot.py"
:param bot_names: List of bot names (strings) of the same size as the list of bots.
"""
# Checks if we need to initialize dataframes
initialize = np.isnan(self.correct.iloc[0,2])
# Builds up static values as lists to later add to dataframes
draft_num_list = [None]*self.n_packs
pick_num_list = [None]*self.n_packs
human_pick_list = [None]*self.n_packs
# Fills in dataframes of correct choices
temp_names = []
before = datetime.datetime.now()
static_cols = ['draft_num', 'pick_num', 'human_pick']
for bot_counter in range(len(bots)): # AKh: better to rename to iBot
bot = bots[bot_counter]
all_correct = [None]*self.n_packs
all_fuzzy = all_correct.copy()
all_rank_error = all_correct.copy()
pack_counter = 0
for draft_num in range(len(self.drafts)):
draft = self.drafts[draft_num]
collection = []
for pick_num in range(len(draft)):
pack = draft[pick_num]
# Stores draft and pick number in dataframes if uninitialized
if initialize:
draft_num_list[pack_counter] = draft_num + 1
pick_num_list[pack_counter] = pick_num + 1
human_pick_list[pack_counter] = pack[0]
# Gets bot ranking on the current pack
pack_rank = bot.rank_pack([pack, collection])
collection.append(bot.get_top_pick(pack_rank))
# Gets top-one and top-three accuracy for the current pack
exact_correct = self.is_bot_correct(pack, pack_rank)
fuzzy_correct = self.is_bot_correct(pack, pack_rank, fuzzy = True)
rank_error = self.get_rank_error(pack, pack_rank)
# Stores accuracy in dataframes
all_correct[pack_counter] = exact_correct[1]
all_fuzzy[pack_counter] = fuzzy_correct[1]
all_rank_error[pack_counter] = rank_error[1]
pack_counter += 1
# Only initializes dataframe values once
if initialize:
self.correct['draft_num'] = draft_num_list
self.correct['pick_num'] = pick_num_list
self.correct['human_pick'] = human_pick_list
self.fuzzy_correct['draft_num'] = draft_num_list
self.fuzzy_correct['pick_num'] = pick_num_list
self.fuzzy_correct['human_pick'] = human_pick_list
self.rank_error['draft_num'] = draft_num_list
self.rank_error['pick_num'] = pick_num_list
self.rank_error['human_pick'] = human_pick_list
initialize = False
# Stores accuracy info in a single column of existing dataframes
bot_name = bot_names[bot_counter]
self.correct[bot_name] = all_correct
self.fuzzy_correct[bot_name] = all_fuzzy
self.rank_error[bot_name] = all_rank_error
current = datetime.datetime.now()
print(bot_name + " time taken: " + str(current - before))
before = current
# Fills in dataframes of per-card accuracies
unique_cards = np.sort(self.correct['human_pick'].unique())
self.card_acc['human_pick'] = unique_cards # All card names; human_pick is just where they came from
for bot_name in bot_names:
accuracies = []
for human_pick in unique_cards:
all_picks = self.correct.loc[self.correct['human_pick'] == human_pick]
accuracies.append(all_picks[bot_name].sum() / all_picks.shape[0])
self.card_acc[bot_name] = accuracies
def write_evaluations(self, exact_filename = "output_files/exact_correct.tsv", fuzzy_filename = "output_files/fuzzy_correct.tsv",
rank_error_filename = "output_files/rank_error.tsv", acc_filename = "output_files/card_accuracies.tsv"):
"""Writes correctness and accuracy DataFrames to filenames.
"""
self.correct.to_csv(exact_filename, sep = "\t", index = False)
print("Wrote correct to: " + str(exact_filename))
self.fuzzy_correct.to_csv(fuzzy_filename, sep = "\t", index = False)
print("Wrote fuzzy_correct to: " + str(fuzzy_filename))
self.rank_error.to_csv(rank_error_filename, sep = "\t", index = False)
print("Wrote rank_error to: " + str(rank_error_filename))
self.card_acc.to_csv(acc_filename, sep = "\t", index = False)
print("Wrote card_acc to: " + str(acc_filename))
def report_evaluations(self):
'''Reports some minimal info on bot running results in the notebook,
good for quick troubleshooting'''
print(np.mean(self.correct))
def is_bot_correct(self, pack, pack_rank, fuzzy = False):
""" Checks whether or not a bot's pick matches a human's pick.
Returns a tuple of (cardname, bot_correct) for whether or not the
bot's top choice matched the human's choice. If fuzzy = True, then
instead the bot is correct if the human's choice is in bot's top 3
"""
bot_correct = 0
human_pick = pack[0]
pack_rank = sorted(pack_rank, key = pack_rank.get, reverse = True)
if not fuzzy:
bot_pick = pack_rank[0]
if human_pick == bot_pick:
bot_correct = 1
elif fuzzy:
for i in range(min(len(pack_rank), 3)):
bot_pick = pack_rank[i]
if human_pick == bot_pick:
bot_correct = 1
return (human_pick, bot_correct)
def get_rank_error(self, pack, pack_rank):
""" Checks the rank error between a bot pick and a human pick.
Returns a tuple of (cardname, rank_error) for the rank of the human's choice.
"""
rank_error = 0
human_pick = pack[0]
pack_rank = sorted(pack_rank, key = pack_rank.get, reverse = True)
for card in pack_rank:
if card == human_pick:
break
rank_error += 1
return (pack[0], rank_error)
| [
"pandas.DataFrame",
"datetime.datetime.now",
"numpy.mean",
"numpy.isnan"
] | [((986, 1009), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1007, 1009), False, 'import datetime\n'), ((1458, 1494), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['human_pick']"}), "(columns=['human_pick'])\n", (1470, 1494), True, 'import pandas as pd\n'), ((2216, 2249), 'numpy.isnan', 'np.isnan', (['self.correct.iloc[0, 2]'], {}), '(self.correct.iloc[0, 2])\n', (2224, 2249), True, 'import numpy as np\n'), ((2546, 2569), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2567, 2569), False, 'import datetime\n'), ((5221, 5244), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5242, 5244), False, 'import datetime\n'), ((6971, 6992), 'numpy.mean', 'np.mean', (['self.correct'], {}), '(self.correct)\n', (6978, 6992), True, 'import numpy as np\n'), ((1547, 1570), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1568, 1570), False, 'import datetime\n')] |
"""
Do a likelihood fit. The class NestedSamplerStatModel is used for fitting
applying the bayesian algorithm nestle/multinest
"""
from __future__ import absolute_import, unicode_literals
import datetime
import json
import os
import shutil
import tempfile
from warnings import warn
import corner
import matplotlib.pyplot as plt
import numpy as np
from scipy import special as spsp
import dddm
import typing as ty
from immutabledict import immutabledict
export, __all__ = dddm.exporter()
@export
class MultiNestSampler(dddm.StatModel):
def __init__(self,
wimp_mass: ty.Union[float, int],
cross_section: ty.Union[float, int],
spectrum_class: ty.Union[dddm.DetectorSpectrum,
dddm.GenSpectrum],
prior: dict,
tmp_folder: str,
results_dir: str = None,
fit_parameters=('log_mass', 'log_cross_section', 'v_0', 'v_esc', 'density', 'k'),
detector_name=None,
verbose=False,
notes='default',
nlive=1024,
tol=0.1,
):
super().__init__(wimp_mass=wimp_mass,
cross_section=cross_section,
spectrum_class=spectrum_class,
prior=prior,
tmp_folder=tmp_folder,
fit_parameters=fit_parameters,
detector_name=detector_name,
verbose=verbose,
notes=notes,
)
self.results_dir = results_dir
self.config.update(
{'tol': tol, # Tolerance for sampling
'nlive': nlive, # number of live points
})
self.log_dict = {
'did_run': False,
'saved_in': None,
'tmp_dir': tmp_folder,
}
self.result = False
def check_did_run(self):
if not self.log_dict['did_run']:
self.log.info('did not run yet, lets fire it up!')
self.run()
else:
self.log.info('did run')
def check_did_save(self):
self.log.info(
"did not save yet, we don't want to lose our results so better do it now"
)
if self.log_dict['saved_in'] is None:
self.save_results()
def log_probability_nested(self, parameter_vals, parameter_names):
"""
:param parameter_vals: the values of the model/benchmark considered as the truth
# :param parameter_values: the values of the parameters that are being varied
:param parameter_names: the names of the parameter_values
:return:
"""
self.log.debug('there we go! Find that log probability')
evaluated_rate = self.eval_spectrum(parameter_vals, parameter_names)
ll = dddm.statistics.log_likelihood(self.benchmark_values, evaluated_rate)
if np.isnan(ll):
raise ValueError(f"Returned NaN from likelihood. ll = {ll}")
self.log.debug('found it! returning the log likelihood')
return ll
def log_prior_transform_nested(self, x, x_name):
self.log.debug(
'doing some transformations for nestle/multinest to read the priors'
)
this_prior = self.config['prior'][x_name]
prior_type = this_prior['prior_type']
if prior_type == 'flat':
a, b = this_prior['param']
# Prior transform of a flat prior is a simple line.
return x * (b - a) + a
if prior_type == 'gauss':
# Get the range from the config file
a, b = this_prior['range']
m, s = this_prior['param']
# Here the prior transform is being constructed and shifted. This may not seem trivial
# and one is advised to request a notebook where this is explained
# from the developer(s).
aprime = spsp.ndtr((a - m) / s)
bprime = spsp.ndtr((b - m) / s)
xprime = x * (bprime - aprime) + aprime
return m + s * spsp.ndtri(xprime)
raise ValueError(f"unknown prior type '{prior_type}'")
def _log_probability_nested(self, theta):
"""warp log_prior_transform_nested"""
ndim = len(theta)
return self.log_probability_nested(
theta, self.known_parameters[:ndim])
def _log_prior_transform_nested(self, theta):
result = [
self.log_prior_transform_nested(val, self.known_parameters[i])
for i, val in enumerate(theta)]
return np.array(result)
def _print_before_run(self):
self.log.warning(
f"""
--------------------------------------------------
{dddm.utils.now()}\n\tFinal print of all of the set options:
self.log = {self.log}
self.result = {self.result}
self.benchmark_values = {np.array(self.benchmark_values)}
self.config = {self.config}
--------------------------------------------------
"""
)
def run(self):
self._fix_parameters()
self._print_before_run()
try:
from pymultinest.solve import run, Analyzer, solve
except ModuleNotFoundError as e:
raise ModuleNotFoundError(
'package pymultinest not found. See README') from e
n_dims = len(self.config["fit_parameters"])
tol = self.config['tol'] # the stopping criterion
save_at = self.get_save_dir()
self.log.warning(f'start_fit for {n_dims} parameters')
start = datetime.datetime.now()
# Multinest saves output to a folder. First write to the tmp folder,
# move it to the results folder later
_tmp_folder = self.get_save_dir()
save_at_temp = os.path.join(_tmp_folder, 'multinest')
solve_multinest(
LogLikelihood=self._log_probability_nested, # SafeLoglikelihood,
Prior=self._log_prior_transform_nested, # SafePrior,
n_live_points=self.config['nlive'],
n_dims=n_dims,
outputfiles_basename=save_at_temp,
verbose=True,
evidence_tolerance=tol,
# null_log_evidence=dddm.statistics.LL_LOW_BOUND,
max_iter=self.config.get('max_iter', 0),
)
self.result_file = save_at_temp
# Open a save-folder after successful running multinest. Move the
# multinest results there.
dddm.utils.check_folder_for_file(save_at)
end = datetime.datetime.now()
dt = (end - start).total_seconds()
self.log.info(f'fit_done in {dt} s ({dt / 3600} h)')
self.log_dict['did_run'] = True
# release the config
self.config = dddm.utils._immutable_to_dict(self.config)
self.config['fit_time'] = dt
self.log.info('Finished with running Multinest!')
def get_summary(self):
self.log.info(
"getting the summary (or at least trying) let's first see if I did run"
)
self.check_did_run()
# keep a dictionary of all the results
resdict = {}
# Do the import of multinest inside the class such that the package can be
# loaded without multinest
try:
from pymultinest.solve import run, Analyzer, solve
except ModuleNotFoundError:
raise ModuleNotFoundError(
'package pymultinest not found. See README for installation')
self.log.info('start analyzer of results')
analyzer = Analyzer(len(self.config['fit_parameters']),
outputfiles_basename=self.result_file)
# Taken from multinest.solve
self.result = analyzer.get_stats()
samples = analyzer.get_equal_weighted_posterior()[:, :-1]
self.log.info('parameter values:')
for name, col in zip(self.config['fit_parameters'],
samples.transpose()):
self.log.info(
'%15s : %.3f +- %.3f' %
(name, col.mean(), col.std()))
resdict[name + '_fit_res'] = (
'{0:5.2f} +/- {1:5.2f}'.format(col.mean(), col.std()))
if 'log_' in name:
resdict[name[4:] + '_fit_res'] = '%.3g +/- %.2g' % (
10. ** col.mean(), 10. ** (col.mean()) * np.log(10.) * col.std())
self.log.info(f'\t {name[4:]},'
f' {resdict[name[4:] + "_fit_res"]}')
resdict['best_fit'] = np.mean(samples.transpose(), axis=1)
print(resdict['best_fit'])
resdict['cov_matrix'] = np.cov(samples.transpose())
print(resdict['cov_matrix'])
resdict['n_samples'] = len(samples.transpose()[0])
# Pass the samples to the self.result to be saved.
self.result['samples'] = samples
self.log.info('Alright we got all the info we need')
return resdict
def get_save_dir(self, force_index=False, _hash=None) -> str:
saved_in = self.log_dict['saved_in']
saved_ok = isinstance(saved_in, str) and os.path.exists(saved_in)
if saved_ok and not force_index:
return saved_in
target_save = dddm.context.open_save_dir(
f'nes_{self.__class__.__name__[:3]}',
base_dir=self.results_dir,
force_index=force_index,
_hash=_hash)
self.log_dict['saved_in'] = target_save
self.log.info(f'get_save_dir\tsave_dir = {target_save}')
return target_save
def save_results(self, force_index=False):
self.log.info('Saving results after checking we did run')
# save fit parameters to config
self.check_did_run()
save_dir = self.get_save_dir(force_index=force_index)
fit_summary = self.get_summary()
self.log.info(f'storing in {save_dir}')
# save the config, chain and flattened chain
pid_id = 'pid' + str(os.getpid()) + '_'
with open(os.path.join(save_dir, f'{pid_id}config.json'), 'w') as file:
json.dump(convert_dic_to_savable(self.config), file, indent=4)
with open(os.path.join(save_dir, f'{pid_id}res_dict.json'), 'w') as file:
json.dump(convert_dic_to_savable(fit_summary), file, indent=4)
np.save(
os.path.join(save_dir, f'{pid_id}config.npy'),
convert_dic_to_savable(self.config))
np.save(os.path.join(save_dir, f'{pid_id}res_dict.npy'),
convert_dic_to_savable(fit_summary))
for col in self.result.keys():
if col == 'samples' or not isinstance(col, dict):
if col == 'samples':
# in contrast to nestle, multinest returns the weighted
# samples.
store_at = os.path.join(save_dir,
f'{pid_id}weighted_samples.npy')
else:
store_at = os.path.join(
save_dir,
pid_id + col + '.npy')
np.save(store_at, self.result[col])
else:
np.save(os.path.join(save_dir, pid_id + col + '.npy'),
convert_dic_to_savable(self.result[col]))
if 'logging' in self.config:
store_at = os.path.join(save_dir,
self.config['logging'].split('/')[-1])
shutil.copy(self.config['logging'], store_at)
self.log.info('save_results::\tdone_saving')
def show_corner(self):
self.check_did_save()
save_dir = self.log_dict['saved_in']
combined_results = load_multinest_samples_from_file(save_dir)
multinest_corner(combined_results, save_dir)
self.log.info('Enjoy the plot. Maybe you do want to save it too?')
def convert_dic_to_savable(config):
result = config.copy()
if isinstance(config, immutabledict):
result = dict(config.items())
for key, value in result.items():
if dddm.utils.is_savable_type(value):
continue
if isinstance(value, (dict, immutabledict)):
result[key] = convert_dic_to_savable(result[key])
elif isinstance(value, np.ndarray):
result[key] = value.tolist()
elif isinstance(value, np.integer):
result[key] = int(value)
elif isinstance(value, np.floating):
result[key] = float(value)
else:
result[key] = str(result[key])
return result
def load_multinest_samples_from_file(load_dir):
keys = os.listdir(load_dir)
keys = [key for key in keys if os.path.isfile(os.path.join(load_dir, key))]
result = {}
for key in keys:
if '.npy' in key:
naked_key = key.split('.npy')[0]
naked_key = do_strip_from_pid(naked_key)
tmp_res = np.load(os.path.join(load_dir, key), allow_pickle=True)
if naked_key in ['config', 'res_dict']:
result[naked_key] = tmp_res.item()
else:
result[naked_key] = tmp_res
return result
def do_strip_from_pid(string):
"""
remove PID identifier from a string
"""
if 'pid' not in string:
return string
new_key = string.split("_")
new_key = "_".join(new_key[1:])
return new_key
def _get_info(result, _result_key):
info = r"$M_\chi}$=%.2f" % 10. ** np.float64(result['config']['log_mass'])
for prior_key in result['config']['prior'].keys():
if (prior_key in result['config']['prior'] and
'mean' in result['config']['prior'][prior_key]):
mean = result['config']['prior'][prior_key]['mean']
info += f"\n{prior_key} = {mean}"
nposterior, ndim = np.shape(result[_result_key])
info += "\nnposterior = %s" % nposterior
for str_inf in ['detector', 'notes', 'start', 'fit_time', 'poisson',
'n_energy_bins']:
if str_inf in result['config']:
info += f"\n{str_inf} = %s" % result['config'][str_inf]
if str_inf == 'start':
info = info[:-7]
if str_inf == 'fit_time':
info += 's (%.1f h)' % (float(result['config'][str_inf]) / 3600.)
return info, ndim
def multinest_corner(
result,
save=False,
_result_key='weighted_samples',
_weights=False):
info, ndim = _get_info(result, _result_key)
labels = dddm.statistics.get_param_list()[:ndim]
truths = []
for prior_name in dddm.statistics.get_prior_list()[:ndim]:
if prior_name == "rho_0":
prior_name = 'density'
if prior_name in result['config']:
truths.append(result['config'][prior_name])
else:
truths.append(result['config']['prior'][prior_name]['mean'])
weight_kwargs = dict(weights=result['weights']) if _weights else {}
fig = corner.corner(
result[_result_key],
**weight_kwargs,
labels=labels,
range=[0.99999, 0.99999, 0.99999, 0.99999, 0.99999][:ndim],
truths=truths,
show_titles=True)
fig.axes[1].set_title('Fit title', loc='left')
fig.axes[1].text(0, 1, info, verticalalignment='top')
if save:
plt.savefig(f"{save}corner.png", dpi=200)
def solve_multinest(LogLikelihood, Prior, n_dims, **kwargs):
"""
See PyMultinest Solve() for documentation
"""
from pymultinest.solve import run, Analyzer
kwargs['n_dims'] = n_dims
files_temporary = False
if 'outputfiles_basename' not in kwargs:
files_temporary = True
tempdir = tempfile.mkdtemp('pymultinest')
kwargs['outputfiles_basename'] = tempdir + '/'
outputfiles_basename = kwargs['outputfiles_basename']
def SafePrior(cube, ndim, nparams):
a = np.array([cube[i] for i in range(n_dims)])
b = Prior(a)
for i in range(n_dims):
cube[i] = b[i]
def SafeLoglikelihood(cube, ndim, nparams, lnew):
a = np.array([cube[i] for i in range(n_dims)])
likelihood = float(LogLikelihood(a))
if not np.isfinite(likelihood):
warn(f'WARNING: loglikelihood not finite: {likelihood}\n'
f'for parameters {a}, returned very low value instead')
return -dddm.statistics.LL_LOW_BOUND
return likelihood
kwargs['LogLikelihood'] = SafeLoglikelihood
kwargs['Prior'] = SafePrior
run(**kwargs)
analyzer = Analyzer(
n_dims, outputfiles_basename=outputfiles_basename)
try:
stats = analyzer.get_stats()
except ValueError as e:
# This can happen during testing if we limit the number of iterations
warn(f'Cannot load output file: {e}')
stats = {'nested sampling global log-evidence': -1,
'nested sampling global log-evidence error': -1
}
samples = analyzer.get_equal_weighted_posterior()[:, :-1]
return dict(logZ=stats['nested sampling global log-evidence'],
logZerr=stats['nested sampling global log-evidence error'],
samples=samples,
)
| [
"dddm.statistics.get_prior_list",
"numpy.log",
"dddm.exporter",
"numpy.array",
"numpy.isfinite",
"corner.corner",
"pymultinest.solve.Analyzer",
"numpy.save",
"os.path.exists",
"os.listdir",
"dddm.utils._immutable_to_dict",
"dddm.statistics.log_likelihood",
"numpy.float64",
"dddm.statistics... | [((473, 488), 'dddm.exporter', 'dddm.exporter', ([], {}), '()\n', (486, 488), False, 'import dddm\n'), ((12693, 12713), 'os.listdir', 'os.listdir', (['load_dir'], {}), '(load_dir)\n', (12703, 12713), False, 'import os\n'), ((13868, 13897), 'numpy.shape', 'np.shape', (['result[_result_key]'], {}), '(result[_result_key])\n', (13876, 13897), True, 'import numpy as np\n'), ((15015, 15183), 'corner.corner', 'corner.corner', (['result[_result_key]'], {'labels': 'labels', 'range': '[0.99999, 0.99999, 0.99999, 0.99999, 0.99999][:ndim]', 'truths': 'truths', 'show_titles': '(True)'}), '(result[_result_key], **weight_kwargs, labels=labels, range=[\n 0.99999, 0.99999, 0.99999, 0.99999, 0.99999][:ndim], truths=truths,\n show_titles=True)\n', (15028, 15183), False, 'import corner\n'), ((16540, 16553), 'pymultinest.solve.run', 'run', ([], {}), '(**kwargs)\n', (16543, 16553), False, 'from pymultinest.solve import run, Analyzer, solve\n'), ((16570, 16629), 'pymultinest.solve.Analyzer', 'Analyzer', (['n_dims'], {'outputfiles_basename': 'outputfiles_basename'}), '(n_dims, outputfiles_basename=outputfiles_basename)\n', (16578, 16629), False, 'from pymultinest.solve import run, Analyzer, solve\n'), ((2930, 2999), 'dddm.statistics.log_likelihood', 'dddm.statistics.log_likelihood', (['self.benchmark_values', 'evaluated_rate'], {}), '(self.benchmark_values, evaluated_rate)\n', (2960, 2999), False, 'import dddm\n'), ((3011, 3023), 'numpy.isnan', 'np.isnan', (['ll'], {}), '(ll)\n', (3019, 3023), True, 'import numpy as np\n'), ((4661, 4677), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (4669, 4677), True, 'import numpy as np\n'), ((5703, 5726), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5724, 5726), False, 'import datetime\n'), ((5916, 5954), 'os.path.join', 'os.path.join', (['_tmp_folder', '"""multinest"""'], {}), "(_tmp_folder, 'multinest')\n", (5928, 5954), False, 'import os\n'), ((6593, 6634), 'dddm.utils.check_folder_for_file', 'dddm.utils.check_folder_for_file', (['save_at'], {}), '(save_at)\n', (6625, 6634), False, 'import dddm\n'), ((6649, 6672), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6670, 6672), False, 'import datetime\n'), ((6868, 6910), 'dddm.utils._immutable_to_dict', 'dddm.utils._immutable_to_dict', (['self.config'], {}), '(self.config)\n', (6897, 6910), False, 'import dddm\n'), ((9331, 9465), 'dddm.context.open_save_dir', 'dddm.context.open_save_dir', (['f"""nes_{self.__class__.__name__[:3]}"""'], {'base_dir': 'self.results_dir', 'force_index': 'force_index', '_hash': '_hash'}), "(f'nes_{self.__class__.__name__[:3]}', base_dir=\n self.results_dir, force_index=force_index, _hash=_hash)\n", (9357, 9465), False, 'import dddm\n'), ((12136, 12169), 'dddm.utils.is_savable_type', 'dddm.utils.is_savable_type', (['value'], {}), '(value)\n', (12162, 12169), False, 'import dddm\n'), ((14558, 14590), 'dddm.statistics.get_param_list', 'dddm.statistics.get_param_list', ([], {}), '()\n', (14588, 14590), False, 'import dddm\n'), ((14636, 14668), 'dddm.statistics.get_prior_list', 'dddm.statistics.get_prior_list', ([], {}), '()\n', (14666, 14668), False, 'import dddm\n'), ((15354, 15395), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{save}corner.png"""'], {'dpi': '(200)'}), "(f'{save}corner.png', dpi=200)\n", (15365, 15395), True, 'import matplotlib.pyplot as plt\n'), ((15721, 15752), 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""pymultinest"""'], {}), "('pymultinest')\n", (15737, 15752), False, 'import tempfile\n'), ((4017, 4039), 'scipy.special.ndtr', 'spsp.ndtr', (['((a - m) / s)'], {}), '((a - m) / s)\n', (4026, 4039), True, 'from scipy import special as spsp\n'), ((4061, 4083), 'scipy.special.ndtr', 'spsp.ndtr', (['((b - m) / s)'], {}), '((b - m) / s)\n', (4070, 4083), True, 'from scipy import special as spsp\n'), ((9215, 9239), 'os.path.exists', 'os.path.exists', (['saved_in'], {}), '(saved_in)\n', (9229, 9239), False, 'import os\n'), ((10426, 10471), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{pid_id}config.npy"""'], {}), "(save_dir, f'{pid_id}config.npy')\n", (10438, 10471), False, 'import os\n'), ((10538, 10585), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{pid_id}res_dict.npy"""'], {}), "(save_dir, f'{pid_id}res_dict.npy')\n", (10550, 10585), False, 'import os\n'), ((11542, 11587), 'shutil.copy', 'shutil.copy', (["self.config['logging']", 'store_at'], {}), "(self.config['logging'], store_at)\n", (11553, 11587), False, 'import shutil\n'), ((13519, 13559), 'numpy.float64', 'np.float64', (["result['config']['log_mass']"], {}), "(result['config']['log_mass'])\n", (13529, 13559), True, 'import numpy as np\n'), ((16212, 16235), 'numpy.isfinite', 'np.isfinite', (['likelihood'], {}), '(likelihood)\n', (16223, 16235), True, 'import numpy as np\n'), ((16249, 16371), 'warnings.warn', 'warn', (['f"""WARNING: loglikelihood not finite: {likelihood}\nfor parameters {a}, returned very low value instead"""'], {}), '(\n f"""WARNING: loglikelihood not finite: {likelihood}\nfor parameters {a}, returned very low value instead"""\n )\n', (16253, 16371), False, 'from warnings import warn\n'), ((16799, 16836), 'warnings.warn', 'warn', (['f"""Cannot load output file: {e}"""'], {}), "(f'Cannot load output file: {e}')\n", (16803, 16836), False, 'from warnings import warn\n'), ((10103, 10149), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{pid_id}config.json"""'], {}), "(save_dir, f'{pid_id}config.json')\n", (10115, 10149), False, 'import os\n'), ((10258, 10306), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{pid_id}res_dict.json"""'], {}), "(save_dir, f'{pid_id}res_dict.json')\n", (10270, 10306), False, 'import os\n'), ((11181, 11216), 'numpy.save', 'np.save', (['store_at', 'self.result[col]'], {}), '(store_at, self.result[col])\n', (11188, 11216), True, 'import numpy as np\n'), ((12764, 12791), 'os.path.join', 'os.path.join', (['load_dir', 'key'], {}), '(load_dir, key)\n', (12776, 12791), False, 'import os\n'), ((12985, 13012), 'os.path.join', 'os.path.join', (['load_dir', 'key'], {}), '(load_dir, key)\n', (12997, 13012), False, 'import os\n'), ((4163, 4181), 'scipy.special.ndtri', 'spsp.ndtri', (['xprime'], {}), '(xprime)\n', (4173, 4181), True, 'from scipy import special as spsp\n'), ((4844, 4860), 'dddm.utils.now', 'dddm.utils.now', ([], {}), '()\n', (4858, 4860), False, 'import dddm\n'), ((5015, 5046), 'numpy.array', 'np.array', (['self.benchmark_values'], {}), '(self.benchmark_values)\n', (5023, 5046), True, 'import numpy as np\n'), ((10066, 10077), 'os.getpid', 'os.getpid', ([], {}), '()\n', (10075, 10077), False, 'import os\n'), ((10917, 10972), 'os.path.join', 'os.path.join', (['save_dir', 'f"""{pid_id}weighted_samples.npy"""'], {}), "(save_dir, f'{pid_id}weighted_samples.npy')\n", (10929, 10972), False, 'import os\n'), ((11070, 11115), 'os.path.join', 'os.path.join', (['save_dir', "(pid_id + col + '.npy')"], {}), "(save_dir, pid_id + col + '.npy')\n", (11082, 11115), False, 'import os\n'), ((11259, 11304), 'os.path.join', 'os.path.join', (['save_dir', "(pid_id + col + '.npy')"], {}), "(save_dir, pid_id + col + '.npy')\n", (11271, 11304), False, 'import os\n'), ((8470, 8482), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (8476, 8482), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import poisson_problem
import neural_networks
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.ticker as ticker
rc('font', **{'size':12, 'family':'serif', 'serif':['Computer Modern Roman']})
rc('text', usetex=True)
def draw_magnitude_int_loss(x_range, y_range, session, x_qual, y_qual):
x = np.linspace(x_range[0], x_range[1], x_qual)
y = np.linspace(y_range[0], y_range[1], y_qual)
mesh = np.array(np.meshgrid(x,y)).T.reshape(-1, 2)
f = problem.rhs(mesh)
loss_int_magnitude = np.sqrt(session.run(loss_int, feed_dict={int_var: mesh, sol_int:f}))
return np.reshape(loss_int_magnitude, (x_qual, y_qual))
def draw_magnitude_of_err_2d(x_range, y_range, exact_sol, x_qual, y_qual, neural_net_sol):
x = np.linspace(x_range[0], x_range[1], x_qual)
y = np.linspace(y_range[0], y_range[1], y_qual)
mesh = np.array(np.meshgrid(x,y)).T.reshape(-1, 2)
u_sol = exact_sol(mesh)
neural_net_sol_mesh = neural_net_sol(mesh.astype(np.float64)).eval()
err_vec = np.zeros(x_qual*y_qual)
for i in range(x_qual*y_qual):
err_vec[i] = np.sqrt((u_sol[i]-neural_net_sol_mesh[i])**2)
err_vec = np.reshape(err_vec, (x_qual, y_qual))
return err_vec
def fmt(x, pos):
a, b = '{:.2e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
NUM_STEPS = 1
NUM_INPUTS = 2
BATCHSIZE = 101*101
HIDDEN_UNITS = [16]
restore_name = 'test_model/1_layer_sq_loss_4000m_iter_20000'
problem = poisson_problem.poisson_2d()
neural_network = neural_networks.neural_network(NUM_INPUTS, 1, HIDDEN_UNITS)
int_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
bou_var = tf.placeholder(tf.float64, [None, NUM_INPUTS])
value_int = neural_network.value(int_var)
value_bou = neural_network.value(bou_var)
grad = neural_network.first_derivatives(int_var)
grad_grad= neural_network.second_derivatives(int_var)
sol_int = tf.placeholder(tf.float64, [None, 1])
sol_bou = tf.placeholder(tf.float64, [None, 1])
loss_int = tf.square(grad_grad[0]+grad_grad[1]+sol_int)
loss_bou = tf.square(value_bou-sol_bou)
loss = tf.reduce_mean(loss_int + loss_bou)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, restore_name)
print("Model restored.")
err = draw_magnitude_of_err_2d(problem.range, problem.range, problem.velocity, 101, 101, neural_network.value)
plt.imshow(np.rot90(err), cmap='hot', interpolation='nearest', extent=[0.0,1.0,0.0,1.0], aspect='auto')
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
plt.colorbar(format=ticker.FuncFormatter(fmt))
plt.show()
err = draw_magnitude_int_loss(problem.range, problem.range, sess, 101, 101)
plt.imshow(np.rot90(err), cmap='hot', interpolation='nearest', extent=[0.0,1.0,0.0,1.0], aspect='auto')
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
plt.colorbar(format=ticker.FuncFormatter(fmt))
plt.show()
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"neural_networks.neural_network",
"matplotlib.rc",
"numpy.rot90",
"tensorflow.reduce_mean",
"numpy.reshape",
"matplotlib.ticker.FuncFormatter",
"tensorflow.placeholder",
"tensorflow.Session",
"matplotlib.pyplot.xlabel",
"numpy.linspace",
"tensorflow.... | [((184, 270), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'size': 12, 'family': 'serif', 'serif': [\n 'Computer Modern Roman']})\n", (186, 270), False, 'from matplotlib import rc\n'), ((263, 286), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (265, 286), False, 'from matplotlib import rc\n'), ((1490, 1518), 'poisson_problem.poisson_2d', 'poisson_problem.poisson_2d', ([], {}), '()\n', (1516, 1518), False, 'import poisson_problem\n'), ((1537, 1596), 'neural_networks.neural_network', 'neural_networks.neural_network', (['NUM_INPUTS', '(1)', 'HIDDEN_UNITS'], {}), '(NUM_INPUTS, 1, HIDDEN_UNITS)\n', (1567, 1596), False, 'import neural_networks\n'), ((1608, 1654), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64', '[None, NUM_INPUTS]'], {}), '(tf.float64, [None, NUM_INPUTS])\n', (1622, 1654), True, 'import tensorflow as tf\n'), ((1666, 1712), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64', '[None, NUM_INPUTS]'], {}), '(tf.float64, [None, NUM_INPUTS])\n', (1680, 1712), True, 'import tensorflow as tf\n'), ((1914, 1951), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64', '[None, 1]'], {}), '(tf.float64, [None, 1])\n', (1928, 1951), True, 'import tensorflow as tf\n'), ((1962, 1999), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64', '[None, 1]'], {}), '(tf.float64, [None, 1])\n', (1976, 1999), True, 'import tensorflow as tf\n'), ((2012, 2060), 'tensorflow.square', 'tf.square', (['(grad_grad[0] + grad_grad[1] + sol_int)'], {}), '(grad_grad[0] + grad_grad[1] + sol_int)\n', (2021, 2060), True, 'import tensorflow as tf\n'), ((2068, 2098), 'tensorflow.square', 'tf.square', (['(value_bou - sol_bou)'], {}), '(value_bou - sol_bou)\n', (2077, 2098), True, 'import tensorflow as tf\n'), ((2104, 2139), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(loss_int + loss_bou)'], {}), '(loss_int + loss_bou)\n', (2118, 2139), True, 'import tensorflow as tf\n'), ((2149, 2182), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2180, 2182), True, 'import tensorflow as tf\n'), ((2191, 2207), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (2205, 2207), True, 'import tensorflow as tf\n'), ((366, 409), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', 'x_qual'], {}), '(x_range[0], x_range[1], x_qual)\n', (377, 409), True, 'import numpy as np\n'), ((415, 458), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', 'y_qual'], {}), '(y_range[0], y_range[1], y_qual)\n', (426, 458), True, 'import numpy as np\n'), ((637, 685), 'numpy.reshape', 'np.reshape', (['loss_int_magnitude', '(x_qual, y_qual)'], {}), '(loss_int_magnitude, (x_qual, y_qual))\n', (647, 685), True, 'import numpy as np\n'), ((784, 827), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', 'x_qual'], {}), '(x_range[0], x_range[1], x_qual)\n', (795, 827), True, 'import numpy as np\n'), ((833, 876), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', 'y_qual'], {}), '(y_range[0], y_range[1], y_qual)\n', (844, 876), True, 'import numpy as np\n'), ((1040, 1065), 'numpy.zeros', 'np.zeros', (['(x_qual * y_qual)'], {}), '(x_qual * y_qual)\n', (1048, 1065), True, 'import numpy as np\n'), ((1170, 1207), 'numpy.reshape', 'np.reshape', (['err_vec', '(x_qual, y_qual)'], {}), '(err_vec, (x_qual, y_qual))\n', (1180, 1207), True, 'import numpy as np\n'), ((2213, 2225), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2223, 2225), True, 'import tensorflow as tf\n'), ((2531, 2550), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_1$"""'], {}), "('$x_1$')\n", (2541, 2550), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_2$"""'], {}), "('$x_2$')\n", (2563, 2572), True, 'import matplotlib.pyplot as plt\n'), ((2623, 2633), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2631, 2633), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_1$"""'], {}), "('$x_1$')\n", (2828, 2837), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2859), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_2$"""'], {}), "('$x_2$')\n", (2850, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2910, 2920), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2918, 2920), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1161), 'numpy.sqrt', 'np.sqrt', (['((u_sol[i] - neural_net_sol_mesh[i]) ** 2)'], {}), '((u_sol[i] - neural_net_sol_mesh[i]) ** 2)\n', (1119, 1161), True, 'import numpy as np\n'), ((2437, 2450), 'numpy.rot90', 'np.rot90', (['err'], {}), '(err)\n', (2445, 2450), True, 'import numpy as np\n'), ((2724, 2737), 'numpy.rot90', 'np.rot90', (['err'], {}), '(err)\n', (2732, 2737), True, 'import numpy as np\n'), ((2595, 2620), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['fmt'], {}), '(fmt)\n', (2615, 2620), True, 'import matplotlib.ticker as ticker\n'), ((2882, 2907), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['fmt'], {}), '(fmt)\n', (2902, 2907), True, 'import matplotlib.ticker as ticker\n'), ((477, 494), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (488, 494), True, 'import numpy as np\n'), ((896, 913), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (907, 913), True, 'import numpy as np\n')] |
import torch
from torch.utils.data import Dataset
from .collaters import *
import json
import pickle
import os
import glob
import sys
from musa.ops import *
from .utils import *
import timeit
import struct
import numpy as np
import multiprocessing as mp
from sklearn.cluster import KMeans
import copy
def read_aco_file(spk_name, file_id, aco_dir):
spk_aco_dir = os.path.join(aco_dir, spk_name)
cc = read_bin_aco_file(os.path.join(spk_aco_dir, '{}.cc'.format(file_id)))
fv = read_bin_aco_file(os.path.join(spk_aco_dir, '{}.fv'.format(file_id)))
lf0 = read_bin_aco_file(os.path.join(spk_aco_dir, '{}.lf0'.format(file_id)))
fv = fv.reshape((-1, 1))
cc = cc.reshape((-1, 40))
# make lf0 interpolation and obtain u/v flag
i_lf0, uv = interpolation(lf0,
unvoiced_symbol=-10000000000.0)
i_lf0 = i_lf0.reshape(-1, 1)
uv = uv.reshape(-1, 1)
#print('cc shape: ', cc.shape)
# merge into aco structure
aco_data = np.concatenate((cc, fv, i_lf0, uv), axis=1)
return aco_data
def parse_lab_aco_correspondences(durs, aco_data):
""" Find the matching of acoustic frames to
duration boundaries.
An acoustic frame is within a phoneme if
>= 50% of the sliding window is within
the phoneme boundaries.
"""
# sampling rate
sr = 16000.
curr_dur_idx = 0
# set up curr boundary to be 0
# convert dur into samples, knowing
# sampling rate is 16kHz and dur is
# in seconds
#print('Parsing aco w/ durs: ', durs)
#print('Parsing aco w/ acos shape: ', len(aco_data))
cboundary = int(durs[curr_dur_idx] * sr)
# keep track of curr ph dur to compute reldur
curr_ph_dur = int(durs[curr_dur_idx] * sr)
# keep track of acumulated boundaries for reldur
acum_dur = 0
# set up current centroid of window
# in samples
wind_t = 0
wind_size = 320
wind_stride = 80
half_w = wind_size * .5
aco_seq_data = [[]]
# retrieve the tuples of relative durs (relative, absolute)
reldurs = [[]]
for aco_i in range(aco_data.shape[0]):
if wind_t >= cboundary and curr_dur_idx < (len(durs) - 1):
# window belongs to next phoneme, step on
aco_seq_data.append([])
reldurs.append([])
curr_dur_idx += 1
#print('wind_t > cboundary'
# ' ({}, {})'
# ''.format(wind_t,
# cboundary))
cboundary += int(durs[curr_dur_idx] * sr)
acum_dur += curr_ph_dur
curr_ph_dur = int(durs[curr_dur_idx] * sr)
#print('Moving cboundary to {}'.format(cboundary))
#print('durs len is: ', len(durs))
#print('last cboundary will be: ', int(sum(durs) * sr))
aco_seq_data[curr_dur_idx].append(aco_data[aco_i])
# compute reldur within current ph dur
reldur = (wind_t - acum_dur) / curr_ph_dur
reldurs[curr_dur_idx].append([reldur, curr_ph_dur / sr])
#print('Curr wind_t: {}, cboundary: {}, curr_dur_idx: '
# '{}, reldur: {}, curr_ph_dur: {},'
# 'curr_ph_dur / sr: {}'.format(wind_t,
# cboundary,
# curr_dur_idx,
# reldur,
# curr_ph_dur,
# curr_ph_dur / sr))
wind_t += wind_stride
return aco_seq_data, reldurs
def read_speaker_labs(spk_name, ids_list, lab_dir, lab_parser,
filter_by_dur=False, aco_dir=None):
parsed_lines = [] # maintain seq structure
parsed_tstamps = [] # maintain seq structure
if aco_dir is not None:
parsed_aco = [] # aco data if parsed
parsed_reldur = [] # reldur data
parse_timings = []
flat_tstamps = []
flat_lines = []
beg_t = timeit.default_timer()
#if filter_by_dur:
#log_file = open('/tmp/dur_filter.log', 'w')
for id_i, split_id in enumerate(ids_list, start=1):
spk_lab_dir = os.path.join(lab_dir, spk_name)
lab_f = os.path.join(spk_lab_dir, '{}.lab'.format(split_id))
with open(lab_f) as lf:
lab_lines = [l.rstrip() for l in lf.readlines()]
tstamps, parsed_lab = lab_parser(lab_lines)
if filter_by_dur:
filtered_lab = []
filtered_tstamps = []
# compute durs from timestamps to keep VALID phonemes
converted_durs = tstamps_to_dur(tstamps, True)
assert len(converted_durs) == len(parsed_lab), \
len(converted_durs)
for (plab, dur, tss) in zip(parsed_lab, converted_durs,
tstamps):
#print('dur=', dur)
if dur > 0:
#print('ACCEPTED with dur: ', dur)
filtered_lab.append(plab)
filtered_tstamps.append(tss)
#else:
#print('Filtered dur: ', dur)
#log_file.write('Filtred dur {} at file '
# '{}.lab\n'.format(dur,
# os.path.join(lab_dir,
# spk_name,
# split_id)))
flat_lines += filtered_lab
flat_tstamps += filtered_tstamps
parsed_tstamps.append(filtered_tstamps)
parsed_lines.append(filtered_lab)
a_durs = len(filtered_tstamps) / len(converted_durs)
#print('Ratio accepted durs: {}%'.format(a_durs * 100))
else:
parsed_tstamps.append(tstamps)
parsed_lines.append(parsed_lab)
flat_lines += parsed_lab
flat_tstamps += tstamps
if aco_dir is not None:
#print('split_id: ', split_id)
#print('parsed_tstamps: ', parsed_tstamps)
#print('parsed_tstamps[-1]: ', parsed_tstamps[-1])
# parse aco
parsed_durs = tstamps_to_dur(parsed_tstamps[-1], True)
aco_seq = read_aco_file(spk_name, split_id, aco_dir)
#print('Total read aco frames: ', aco_seq.shape)
aco_seq_data, \
seq_reldur = parse_lab_aco_correspondences(parsed_durs,
aco_seq)
parsed_aco.append(aco_seq_data)
parsed_reldur.append(seq_reldur)
#parse_timings.append(timeit.default_timer() - beg_t)
#print('Parsed spk-{} lab file {:5d}/{:5d}, mean time: {:.4f}'
# 's'.format(spk_name, id_i, len(ids_list),
# np.mean(parse_timings)),
# end='\n')
#beg_t = timeit.default_timer()
#log_file.close()
if aco_dir is None:
return (spk_name, parsed_tstamps, parsed_lines, flat_lines)
else:
return (spk_name, parsed_tstamps, parsed_lines, flat_lines,
parsed_aco, parsed_reldur)
def read_speaker_aco(spk_name, ids_list, aco_dir):
aco_data = None
parse_timings = []
beg_t = timeit.default_timer()
for id_i, split_id in enumerate(ids_list, start=1):
aco_data_ = read_aco_file(spk_name, split_id, aco_dir)
# merge into aco structure
if aco_data is None:
aco_data = aco_data_
else:
aco_data = np.concatenate((aco_data, aco_data_), axis=0)
parse_timings.append(timeit.default_timer() - beg_t)
print('Parsed spk-{} aco file {:5d}/{:5d}, mean time: {:.4f}'
's'.format(spk_name, id_i, len(ids_list),
np.mean(parse_timings)),
end='\n')
beg_t = timeit.default_timer()
return (spk_name, aco_data)
class TCSTAR(Dataset):
def __init__(self, spk_cfg_file, split, lab_dir,
lab_codebooks_path, force_gen=False,
ogmios_lab=True, parse_workers=4,
max_seq_len=None, batch_size=None,
max_spk_samples=None,
mulout=False,
q_classes=None,
trim_to_min=False,
forced_trim=None,
exclude_train_spks=[],
exclude_eval_spks=[]):
"""
# Arguments:
spk_cfg_file: config file to read a dict
split: 'train' 'valid' or 'test' split
lab_dir: root lab dir with spks within
lab_codebooks_path: codebooks file path dict
force_gen: flag to enforce re-generation of codebooks
and stats.
omgios_fmt: ogmios format to parse labs.
max_seq_len: if specified, batches are stateful-like
with max_seq_len time-steps per sample,
and batch_size is also required.
mulout: determines that speaker's data has to be
arranged in batches
trim_to_min: trim all speakers to same num_samples if
maxlen is applied (specially for MO).
forced_trim: max num of samples per speaker forced (this
has priority over trim_to_min counts)
"""
self.trim_to_min = trim_to_min
self.forced_trim = forced_trim
if max_seq_len is not None:
if batch_size is None:
raise ValueError('Please specify a batch size in '
' TCSTAR to arrange the stateful '
' sequences.')
else:
print('WARNING: trim to min flag activated, but has no '
' effect because no max_seq_len specified.')
self.max_seq_len = max_seq_len
if q_classes is not None:
assert isinstance(q_classes, int), type(q_classes)
self.q_classes = q_classes
self.mulout = mulout
self.batch_size = batch_size
self.exclude_train_spks = exclude_train_spks
with open(spk_cfg_file, 'rb') as cfg_f:
# load speakers config paths
self.speakers = pickle.load(cfg_f)
self.all_speakers = copy.deepcopy(self.speakers)
if split == 'train':
# filter speakers in exclude list
for spk in self.all_speakers.keys():
if spk in exclude_train_spks:
print('Excluding speaker {} from train '
'split'.format(spk))
del self.speakers[spk]
if split == 'valid':
# filter speakers in exclude list
for spk in self.all_speakers.keys():
if spk in exclude_eval_spks:
print('Excluding speaker {} from valid '
'split'.format(spk))
del self.speakers[spk]
# store spk2idx
fspk = list(self.speakers.keys())[0]
if 'idx' not in self.speakers[fspk]:
print('Indexing speakers with their ids...')
# index speakers with integer ids
self.spk2idx = dict((sname, i) for i, sname in
enumerate(self.speakers.keys()))
for spk,idx in self.spk2idx.items():
self.speakers[spk]['idx'] = idx
print('Created ids: ', json.dumps(self.spk2idx, indent=2))
else:
# load existing indexation
self.spk2idx = {}
for spk in self.speakers.keys():
self.spk2idx[spk] = self.speakers[spk]['idx']
print('Loaded ids: ', json.dumps(self.spk2idx, indent=2))
self.idx2spk = dict((v, k) for k, v in self.spk2idx.items())
self.split = split
self.lab_dir = lab_dir
self.ogmios_lab = ogmios_lab
self.force_gen = force_gen
self.parse_workers = parse_workers
self.lab_codebooks_path = lab_codebooks_path
self.max_spk_samples = max_spk_samples
# call load_lab
self.load_lab()
# save stats in case anything changed
with open(spk_cfg_file, 'wb') as cfg_f:
# update original speakers, excluded ones in
# train will be unmodified
for spk, spkval in self.speakers.items():
self.all_speakers[spk] = spkval
# load speakers config paths
pickle.dump(self.all_speakers, cfg_f)
def load_lab(self):
raise NotImplementedError
def parse_labs(self, lab_parser, compute_dur_stats=False,
compute_dur_classes=False, aco_dir=None):
# if aco_dir is pecified, aco_data will be parsed
# This is used by TCSTAR_aco
total_parsed_labs = []
total_flat_labs = []
total_parsed_durs = []
total_parsed_spks = []
total_parsed_aco = []
total_parsed_reldur = []
# prepare a multi-processing pool to parse labels faster
parse_pool = mp.Pool(self.parse_workers)
num_labs_total = sum(len(spk[self.split]) for sname, spk in
self.speakers.items())
if self.max_spk_samples is not None:
num_labs_total = self.max_spk_samples * len(self.speakers)
print('TCSTAR_dur-{} > Parsing {} labs from {} speakers. '
'Num workers: {}...'.format(self.split,
num_labs_total,
len(self.speakers),
self.parse_workers))
for sname, spk in self.speakers.items():
async_f = read_speaker_labs
if self.max_spk_samples is not None:
spk_samples = spk[self.split][:self.max_spk_samples]
else:
spk_samples = spk[self.split]
async_args = (sname, spk_samples, self.lab_dir,
lab_parser, True,
aco_dir)
spk['result'] = parse_pool.apply_async(async_f, async_args)
parse_pool.close()
parse_pool.join()
for sname, spk in self.speakers.items():
result = spk['result'].get()
parsed_timestamps = result[1]
parsed_durs = tstamps_to_dur(parsed_timestamps)
if compute_dur_stats:
#if self.norm_dur:
if self.split == 'train' and ('dur_stats' not in spk or \
self.force_gen):
flat_durs = [fd for dseq in parsed_durs for fd in dseq]
# if they do not exist (or force_gen) and it's train split
dur_min = np.min(flat_durs)
assert dur_min > 0, dur_min
dur_max = np.max(flat_durs)
assert dur_max > 0, dur_max
assert dur_max > dur_min, dur_max
spk['dur_stats'] = {'min':dur_min,
'max':dur_max}
elif self.split != 'train' and 'dur_stats' not in spk:
raise ValueError('Dur stats not available in spk config, '
'and norm_dur option was specified. Load '
'train split to solve this issue, or '
'pre-compute the stats.')
if compute_dur_classes:
#if self.q_classes is not None:
if self.split == 'train' and ('dur_clusters' not in spk or \
self.force_gen) and \
self.q_classes is not None:
flat_durs = [fd for dseq in parsed_durs for fd in dseq]
flat_durs = np.array(flat_durs)
flat_durs = flat_durs.reshape((-1, 1))
# make quantization for every user training data samples
dur_kmeans = KMeans(n_clusters=self.q_classes,
random_state=0).fit(flat_durs)
self.dur_kmeans = dur_kmeans
# Normalization of dur is not necessary anymore with clusters
spk['dur_clusters'] = dur_kmeans
parsed_labs = result[2]
total_flat_labs += result[3]
total_parsed_durs += parsed_durs
total_parsed_labs += parsed_labs
#print('len(parsed_labs) = ', len(parsed_labs))
total_parsed_spks += [sname] * len(parsed_labs)
if aco_dir is not None:
total_parsed_aco += result[-2]
total_parsed_reldur += result[-1]
if self.split == 'train' and ('aco_stats' not in spk or \
self.force_gen):
flat_acos = [fa for aseq in result[-2] for adur in aseq \
for fa in adur]
#print('len(flat_acos)=', len(flat_acos))
#print('len(flat_acos[0])=', len(flat_acos[0]))
aco_min = np.min(flat_acos, axis=0)
aco_max = np.max(flat_acos, axis=0)
assert aco_min.shape[0] == len(flat_acos[0]), aco_min.shape
spk['aco_stats'] = {'min':aco_min,
'max':aco_max}
# dur stats are necessary for absolute duration normalization
if self.split == 'train' and ('dur_stats' not in spk or \
self.force_gen):
#print('len parsed_durs: ', len(result[-1]))
#flat_durs = [fd for dseq in parsed_durs for fd in dseq]
flat_durs = []
for dfile in result[-1]:
for dseq in dfile:
for dpho in dseq:
fd = dpho[1]
flat_durs.append(fd)
#print('len flat_durs: ', len(flat_durs))
#print('flat_durs: ', flat_durs)
# if they do not exist (or force_gen) and it's train split
dur_min = np.min(flat_durs)
#assert dur_min > 0, dur_min
dur_max = np.max(flat_durs)
#assert dur_max > 0, dur_max
assert dur_max > dur_min, dur_max
spk['dur_stats'] = {'min':dur_min,
'max':dur_max}
del spk['result']
if aco_dir is None:
return parsed_labs, total_flat_labs, total_parsed_durs, \
total_parsed_labs, total_parsed_spks
else:
return parsed_labs, total_flat_labs, total_parsed_durs, \
total_parsed_labs, total_parsed_spks, total_parsed_aco, \
total_parsed_reldur
| [
"sklearn.cluster.KMeans",
"numpy.mean",
"pickle.dump",
"timeit.default_timer",
"json.dumps",
"pickle.load",
"os.path.join",
"numpy.min",
"numpy.max",
"numpy.array",
"multiprocessing.Pool",
"numpy.concatenate",
"copy.deepcopy"
] | [((368, 399), 'os.path.join', 'os.path.join', (['aco_dir', 'spk_name'], {}), '(aco_dir, spk_name)\n', (380, 399), False, 'import os\n'), ((985, 1028), 'numpy.concatenate', 'np.concatenate', (['(cc, fv, i_lf0, uv)'], {'axis': '(1)'}), '((cc, fv, i_lf0, uv), axis=1)\n', (999, 1028), True, 'import numpy as np\n'), ((3951, 3973), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3971, 3973), False, 'import timeit\n'), ((7228, 7250), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7248, 7250), False, 'import timeit\n'), ((4128, 4159), 'os.path.join', 'os.path.join', (['lab_dir', 'spk_name'], {}), '(lab_dir, spk_name)\n', (4140, 4159), False, 'import os\n'), ((7826, 7848), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7846, 7848), False, 'import timeit\n'), ((13088, 13115), 'multiprocessing.Pool', 'mp.Pool', (['self.parse_workers'], {}), '(self.parse_workers)\n', (13095, 13115), True, 'import multiprocessing as mp\n'), ((7504, 7549), 'numpy.concatenate', 'np.concatenate', (['(aco_data, aco_data_)'], {'axis': '(0)'}), '((aco_data, aco_data_), axis=0)\n', (7518, 7549), True, 'import numpy as np\n'), ((10218, 10236), 'pickle.load', 'pickle.load', (['cfg_f'], {}), '(cfg_f)\n', (10229, 10236), False, 'import pickle\n'), ((10269, 10297), 'copy.deepcopy', 'copy.deepcopy', (['self.speakers'], {}), '(self.speakers)\n', (10282, 10297), False, 'import copy\n'), ((12499, 12536), 'pickle.dump', 'pickle.dump', (['self.all_speakers', 'cfg_f'], {}), '(self.all_speakers, cfg_f)\n', (12510, 12536), False, 'import pickle\n'), ((7579, 7601), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7599, 7601), False, 'import timeit\n'), ((7762, 7784), 'numpy.mean', 'np.mean', (['parse_timings'], {}), '(parse_timings)\n', (7769, 7784), True, 'import numpy as np\n'), ((11468, 11502), 'json.dumps', 'json.dumps', (['self.spk2idx'], {'indent': '(2)'}), '(self.spk2idx, indent=2)\n', (11478, 11502), False, 'import json\n'), ((11728, 11762), 'json.dumps', 'json.dumps', (['self.spk2idx'], {'indent': '(2)'}), '(self.spk2idx, indent=2)\n', (11738, 11762), False, 'import json\n'), ((14776, 14793), 'numpy.min', 'np.min', (['flat_durs'], {}), '(flat_durs)\n', (14782, 14793), True, 'import numpy as np\n'), ((14872, 14889), 'numpy.max', 'np.max', (['flat_durs'], {}), '(flat_durs)\n', (14878, 14889), True, 'import numpy as np\n'), ((15851, 15870), 'numpy.array', 'np.array', (['flat_durs'], {}), '(flat_durs)\n', (15859, 15870), True, 'import numpy as np\n'), ((17173, 17198), 'numpy.min', 'np.min', (['flat_acos'], {'axis': '(0)'}), '(flat_acos, axis=0)\n', (17179, 17198), True, 'import numpy as np\n'), ((17229, 17254), 'numpy.max', 'np.max', (['flat_acos'], {'axis': '(0)'}), '(flat_acos, axis=0)\n', (17235, 17254), True, 'import numpy as np\n'), ((18293, 18310), 'numpy.min', 'np.min', (['flat_durs'], {}), '(flat_durs)\n', (18299, 18310), True, 'import numpy as np\n'), ((18390, 18407), 'numpy.max', 'np.max', (['flat_durs'], {}), '(flat_durs)\n', (18396, 18407), True, 'import numpy as np\n'), ((16040, 16089), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self.q_classes', 'random_state': '(0)'}), '(n_clusters=self.q_classes, random_state=0)\n', (16046, 16089), False, 'from sklearn.cluster import KMeans\n')] |
# 全连接层并用采用LeakyReLU作为激活函数
import os
import math
import copy
import pickle
import numpy as np
class FclrLayer(object):
def __init__(self, optimizer, K, N, epsilon=0.2, param_file='work/fclr.pkl'):
'''
参数:
K:本层神经元个数
N: 特征维度(下一层神经元数)
'''
self.epsilon = epsilon
self.name = 'ann.layer.FclrLayer'
self.X = None
self.Y = None
self.Y_ = None
self.W = None # 连接权值
self.b = None # 偏置值
self.W_opt = None
self.b_opt = None
self.K = K
self.N = N
self.input_shape = (N,)
self.trainable = True # 是否参加训练过程
self.activation = self.leaky_relu
if os.path.exists(param_file):
self.can_restore_layer = True
else:
self.can_restore_layer = False
self.param_file = param_file
def layer_name(self):
return '全连接Leaky_ReLU'
def parameters(self):
return np.prod(self.W.shape) + np.prod(self.b.shape)
def output_shape(self):
return (self.K, )
def save_layer(self):
params = [self.W, self.b]
with open(self.param_file, 'wb') as fd:
pickle.dump(params, fd)
def restore_layer(self):
with open(self.param_file, 'rb') as fd:
params = pickle.load(fd)
self.W = np.array(params[0])
self.b = np.array(params[1])
def initialize(self, optimizer):
'''
初始化网络参数
'''
if self.can_restore_layer:
self.restore_layer()
else:
# Initialize the weights
limit = 1 / math.sqrt(self.N)
self.W = np.random.uniform(-limit, limit, (self.K, self.N))
self.b = np.zeros((self.K, 1))
# Weight optimizers
self.W_opt = copy.copy(optimizer)
self.b_opt = copy.copy(optimizer)
def leaky_relu(self, X):
return np.where(X >= 0, X, self.epsilon * X)
def forward_pass(self, X, Y, training=True):
'''
前向传播过程
参数:
X:输入信号,M*N,其中M为迷你批次大小
Y:正确值,one-hot向量形式,M*K
'''
Z = X.dot(np.transpose(self.W)) + np.transpose(self.b)
Y_ = self.activation(Z)
self.X = X
self.Y = Y
self.Y_ = Y_
return Z, Y_
def backward_pass(self, accum_grad):
org_W = self.W
M, _ = self.X.shape
# 求出leaky_relu的微分
self.Y_[self.Y_>0] = 1
self.Y_[self.Y_<=0] = self.epsilon
pJ_pW_raw = None
pJ_pb_raw = None
pJ_pX = None
for i in range(M):
gi = accum_grad[i, :]
ai = self.Y_[i, :]
gai = gi * ai
gvw = gai.dot(org_W)
if self.trainable:
gai = gai.reshape((self.K, 1))
xi = self.X[i, :].reshape((1, self.N))
gai_xi = gai.dot(xi)
if pJ_pW_raw is None:
pJ_pW_raw = np.array([gai_xi])
else:
pJ_pW_raw = np.append(pJ_pW_raw, [gai_xi], axis=0)
if pJ_pb_raw is None:
pJ_pb_raw = np.array([gai])
else:
pJ_pb_raw = np.append(pJ_pb_raw, [gai], axis=0)
if pJ_pX is None:
pJ_pX = np.array([gvw])
else:
pJ_pX = np.append(pJ_pX, [gvw], axis=0)
if self.trainable:
pJ_pW = np.sum(pJ_pW_raw, axis=0)
pJ_pb = np.sum(pJ_pb_raw, axis=0)
self.W = self.W_opt.update(self.W, pJ_pW)
self.b = self.b_opt.update(self.b, pJ_pb)
return pJ_pX | [
"os.path.exists",
"numpy.prod",
"pickle.dump",
"numpy.where",
"pickle.load",
"math.sqrt",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.uniform",
"copy.copy",
"numpy.transpose"
] | [((698, 724), 'os.path.exists', 'os.path.exists', (['param_file'], {}), '(param_file)\n', (712, 724), False, 'import os\n'), ((1340, 1359), 'numpy.array', 'np.array', (['params[0]'], {}), '(params[0])\n', (1348, 1359), True, 'import numpy as np\n'), ((1377, 1396), 'numpy.array', 'np.array', (['params[1]'], {}), '(params[1])\n', (1385, 1396), True, 'import numpy as np\n'), ((1922, 1959), 'numpy.where', 'np.where', (['(X >= 0)', 'X', '(self.epsilon * X)'], {}), '(X >= 0, X, self.epsilon * X)\n', (1930, 1959), True, 'import numpy as np\n'), ((962, 983), 'numpy.prod', 'np.prod', (['self.W.shape'], {}), '(self.W.shape)\n', (969, 983), True, 'import numpy as np\n'), ((986, 1007), 'numpy.prod', 'np.prod', (['self.b.shape'], {}), '(self.b.shape)\n', (993, 1007), True, 'import numpy as np\n'), ((1184, 1207), 'pickle.dump', 'pickle.dump', (['params', 'fd'], {}), '(params, fd)\n', (1195, 1207), False, 'import pickle\n'), ((1307, 1322), 'pickle.load', 'pickle.load', (['fd'], {}), '(fd)\n', (1318, 1322), False, 'import pickle\n'), ((1658, 1708), 'numpy.random.uniform', 'np.random.uniform', (['(-limit)', 'limit', '(self.K, self.N)'], {}), '(-limit, limit, (self.K, self.N))\n', (1675, 1708), True, 'import numpy as np\n'), ((1730, 1751), 'numpy.zeros', 'np.zeros', (['(self.K, 1)'], {}), '((self.K, 1))\n', (1738, 1751), True, 'import numpy as np\n'), ((1810, 1830), 'copy.copy', 'copy.copy', (['optimizer'], {}), '(optimizer)\n', (1819, 1830), False, 'import copy\n'), ((1856, 1876), 'copy.copy', 'copy.copy', (['optimizer'], {}), '(optimizer)\n', (1865, 1876), False, 'import copy\n'), ((2167, 2187), 'numpy.transpose', 'np.transpose', (['self.b'], {}), '(self.b)\n', (2179, 2187), True, 'import numpy as np\n'), ((3434, 3459), 'numpy.sum', 'np.sum', (['pJ_pW_raw'], {'axis': '(0)'}), '(pJ_pW_raw, axis=0)\n', (3440, 3459), True, 'import numpy as np\n'), ((3480, 3505), 'numpy.sum', 'np.sum', (['pJ_pb_raw'], {'axis': '(0)'}), '(pJ_pb_raw, axis=0)\n', (3486, 3505), True, 'import numpy as np\n'), ((1618, 1635), 'math.sqrt', 'math.sqrt', (['self.N'], {}), '(self.N)\n', (1627, 1635), False, 'import math\n'), ((2143, 2163), 'numpy.transpose', 'np.transpose', (['self.W'], {}), '(self.W)\n', (2155, 2163), True, 'import numpy as np\n'), ((3297, 3312), 'numpy.array', 'np.array', (['[gvw]'], {}), '([gvw])\n', (3305, 3312), True, 'import numpy as np\n'), ((3355, 3386), 'numpy.append', 'np.append', (['pJ_pX', '[gvw]'], {'axis': '(0)'}), '(pJ_pX, [gvw], axis=0)\n', (3364, 3386), True, 'import numpy as np\n'), ((2955, 2973), 'numpy.array', 'np.array', (['[gai_xi]'], {}), '([gai_xi])\n', (2963, 2973), True, 'import numpy as np\n'), ((3028, 3066), 'numpy.append', 'np.append', (['pJ_pW_raw', '[gai_xi]'], {'axis': '(0)'}), '(pJ_pW_raw, [gai_xi], axis=0)\n', (3037, 3066), True, 'import numpy as np\n'), ((3137, 3152), 'numpy.array', 'np.array', (['[gai]'], {}), '([gai])\n', (3145, 3152), True, 'import numpy as np\n'), ((3207, 3242), 'numpy.append', 'np.append', (['pJ_pb_raw', '[gai]'], {'axis': '(0)'}), '(pJ_pb_raw, [gai], axis=0)\n', (3216, 3242), True, 'import numpy as np\n')] |
# %%
from __future__ import print_function, division
import copy
import time
import datetime
import random
import os
from tqdm import tqdm
import pandas as pd
import numpy as np
import argparse
import warnings
import logging
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
import utils.torchutil as torchutil
from utils.log import lg
from item_dataset import ItemDataset
from model import build_model
warnings.filterwarnings("ignore")
# python train.py --batch_size 512 --num_epochs 10
# %%
# %%
# Data augmentation and normalization for training
# Just normalization for validation
IMG_SIZE = 256
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(IMG_SIZE),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
val_transform = transforms.Compose([
transforms.Resize(680),
transforms.CenterCrop(IMG_SIZE),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
lg.info("device: %s", device)
embedding_dim = 300
# %%
def train_model(items, model, SAVE_DIR, num_epochs=10, st_epoch=None,
stop_window=None, bs=512, test=False, flod_k=0):
train_items, val_items = train_test_split(items, test_size=0.33, random_state=42)
lg.info('train_items shape: %s, val_items shape: %s', train_items.shape, val_items.shape)
train_d = ItemDataset(train_items, train_transforms)
train_d_val = ItemDataset(train_items, val_transform)
val_d = ItemDataset(val_items, val_transform)
train_d, val_d = torchutil.SafeDataset(train_d), torchutil.SafeDataset(val_d)
train_d_val = torchutil.SafeDataset(train_d_val)
train_loader = DataLoader(train_d, batch_size=bs, shuffle=True, num_workers=4)
train_val_loader = DataLoader(train_d_val, batch_size=bs, shuffle=False, num_workers=4)
val_loader = DataLoader(val_d, batch_size=bs, shuffle=False, num_workers=4)
model.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
# optimizer = optim.SGD(filter(lambda p:
# p.requires_grad, model.parameters()),
# lr=0.001, momentum=0.9)
optimizer = optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()))
# Decay LR by a factor of 0.1 every 7 epochs
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_epoch = 0
epochs_no_improve = 0
since = time.time()
for epoch in range(num_epochs):
lg.info('Epoch %d/%d', epoch, num_epochs - 1)
if st_epoch is not None and epoch < st_epoch:
lg.info("st_epoch: %d, skip %d", st_epoch, epoch)
continue
lg.info('-' * 20)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
dataloader = train_loader
else:
model.eval() # Set model to evaluate mode
dataloader = val_loader
running_loss = 0.0
acc_sum = 0.0
top5_acc_sum = 0.0
top3_acc_sum = 0.0
# Iterate over data.
# lg_tqdm = myutil.TqdmToLogger(lg.getLogger(), level=lg.INFO)
with tqdm(total=len(dataloader)) as t:
for _, labels, imgs in dataloader: # labels: [[1],[3]]
inputs = imgs.to(device)
labels = labels.to(device)
lg.debug('labels shape: %s, %s', labels.size(), labels)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
lg.debug('outputs shape: %s, %s', outputs.size(), outputs)
# values, indices = torch.max()
# _, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
bs = inputs.size(0)
running_loss += loss.item() * bs
# running_corrects += torch.sum(preds == labels.data)
top_acc = torchutil.accuracy(outputs, labels, topk=(1, 3, 5))
acc_sum += top_acc[0]
top3_acc_sum += top_acc[1]
top5_acc_sum += top_acc[2]
t.set_description_str(
"{} loss {:.4f}".format(phase, loss.item()))
t.update()
if test:
break
if phase == 'train':
scheduler.step()
data_size = len(train_items)
else:
data_size = len(val_items)
epoch_loss = running_loss / data_size
epoch_acc = acc_sum.item() / data_size
epoch_top3_acc = top3_acc_sum.item() / data_size
epoch_top5_acc = top5_acc_sum.item() / data_size
lg.info('fold[{}] epoch[{}] {} Loss: {:.4f},Acc: {:.2f}%|Top3 Acc:{:.2f}%| Top5 Acc:{:.2f}%'.format(flod_k,
epoch, phase, epoch_loss, epoch_acc*100,
epoch_top3_acc*100, epoch_top5_acc*100))
# deep copy the model
if phase == 'val':
if epoch_acc > best_acc:
epochs_no_improve = 0
best_epoch = epoch
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
torch.save(best_model_wts,
'{}/model_fold_{}_epoch_{}'.
format(SAVE_DIR, flod_k, epoch))
else:
epochs_no_improve += 1
if stop_window is not None and epochs_no_improve >= stop_window:
lg.info("early stopping, epochs_no_improve: %d", epochs_no_improve)
break
if test:
break
time_elapsed = time.time() - since
time_elapsed = datetime.timedelta(seconds=time_elapsed)
lg.info('fold[%d] Training complete in %s', flod_k, time_elapsed)
lg.info("best_epoch: %d, best_acc: %f ", best_epoch, best_acc)
# load best model weights
model.load_state_dict(best_model_wts)
em_train = get_embedding(model, train_val_loader, test)
em_val = get_embedding(model, val_loader, test)
em = np.vstack((em_train, em_val))
np.save('{}/em.npy'.format(SAVE_DIR), em)
return model
def get_embedding(model, dataloader, test=False):
my_embedding = None
def hook(m, i, o):
my_embedding.copy_(o.data)
h = model.base.fc.register_forward_hook(hook)
model.to(device)
model.eval()
item_ems = None
# lg_tqdm = myutil.TqdmToLogger(lg.getLogger(), level=lg.INFO)
with tqdm(total=len(dataloader)) as t:
for item_id, labels, imgs in dataloader:
inputs = imgs.to(device)
my_embedding = torch.zeros(inputs.size()[0], embedding_dim)
_ = model(inputs)
em = my_embedding.numpy()
item_ids = np.expand_dims(item_id.numpy(), 1)
labels = np.expand_dims(labels.numpy(), 1)
lg.debug('item_ids shape:{}, em shape: {}'.format(
item_ids.shape, em.shape))
rows = np.hstack((labels, item_ids, em))
lg.debug('rows shape: %s', rows.shape)
if item_ems is None:
item_ems = rows
else:
item_ems = np.vstack((item_ems, rows))
t.set_description('embedding with label and itemId shape: {}, batch min: {:.3f}'.format(
item_ems.shape, np.min(em)))
t.update()
if test:
break
h.remove()
return item_ems
# %%
def run(item_fp, bs=512, save_dir=None, fold=None, model_path=None,
num_epochs=10, st_epoch=0, stop_window=3, test=False, gene_em=False):
items = pd.read_csv(item_fp)
num_class = items['label'].nunique()
lg.info('item shape: %s, num_class:%s', items.shape, num_class)
if save_dir is None:
save_dir = '{}_{}'.format('result',
datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
if not os.path.exists(save_dir):
os.mkdir(save_dir)
seed = 42
random.seed(seed)
np.random.seed(seed)
model_ft = build_model(num_class, embedding_dim)
if model_path is not None:
model_ft.load_state_dict(torch.load('{}'.format(model_path)))
train_model(items, model_ft, save_dir,
num_epochs=num_epochs, st_epoch=st_epoch,
stop_window=stop_window, bs=bs, test=test)
# reset for next fold
model_path = None
st_epoch = 0
if test:
lg.info('just test, return')
# break
# %%
def build_em(items, model_ft, flod_k, train_idx, val_index, save_dir, bs):
val_items = items.iloc[val_index, :]
val_d = ItemDataset(val_items, val_transform)
val_d = torchutil.SafeDataset(val_d)
val_loader = DataLoader(val_d, batch_size=bs,
shuffle=False, num_workers=4)
em = get_embedding(model_ft, val_loader)
em_df = pd.DataFrame(em)
em_df.to_csv('{}/embedding_{}.csv'.format(save_dir, flod_k),
index=False, encoding='utf-8', header=False)
np.savez('{}/index_fold_{}'.format(save_dir, flod_k),
train_idx=train_idx, val_idx=val_index)
# %%
# --bs 800 --refit *** --fold k --em
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--debug', action='store_true',
help='debug level')
parser.add_argument('--test', action='store_true',
help='test')
parser.add_argument('--em', action='store_true',
help='is only build embedding')
parser.add_argument('--items', type=str, default='items_with_label.csv',
help='items meta csv file')
parser.add_argument('--save_dir', type=str, default='result',
help='save dir')
parser.add_argument('--input_size', type=int, default=256,
help='size of input image')
parser.add_argument('--em_size', type=int, default=300,
help='size of image embedding')
parser.add_argument('--bs', type=int, default=64,
help='batch size') # 800 for 16GB gpu
parser.add_argument('--epochs', type=int, default=10,
help='num_epochs')
parser.add_argument('--es', type=int, default=1,
help='early stopping step')
parser.add_argument('--model', type=str, default='152',
help='resnet model type')
parser.add_argument('--refit', type=str, default=None,
help='base model')
parser.add_argument('--fold', type=int, default=None,
help='fold')
parser.add_argument('--epoch', type=int, default=None,
help='start epoch')
parser.add_argument('--gpu', type=str, default='0',
help='use which gpu')
args = parser.parse_args()
if args.debug:
lg.setLevel(logging.DEBUG)
run(args.items, bs=args.bs, fold=args.fold, save_dir=args.save_dir, num_epochs=args.epochs,
test=args.test, model_path=args.refit, gene_em=args.em, stop_window=args.es)
| [
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"numpy.hstack",
"item_dataset.ItemDataset",
"utils.log.lg.info",
"torch.cuda.is_available",
"datetime.timedelta",
"os.path.exists",
"argparse.ArgumentParser",
"utils.torchutil.SafeDataset",
"numpy.vstack",
"numpy.random.seed",
"os.mkdir",
"pa... | [((589, 622), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (612, 622), False, 'import warnings\n'), ((1292, 1321), 'utils.log.lg.info', 'lg.info', (['"""device: %s"""', 'device'], {}), "('device: %s', device)\n", (1299, 1321), False, 'from utils.log import lg\n'), ((1515, 1571), 'sklearn.model_selection.train_test_split', 'train_test_split', (['items'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(items, test_size=0.33, random_state=42)\n', (1531, 1571), False, 'from sklearn.model_selection import train_test_split\n'), ((1576, 1669), 'utils.log.lg.info', 'lg.info', (['"""train_items shape: %s, val_items shape: %s"""', 'train_items.shape', 'val_items.shape'], {}), "('train_items shape: %s, val_items shape: %s', train_items.shape,\n val_items.shape)\n", (1583, 1669), False, 'from utils.log import lg\n'), ((1680, 1722), 'item_dataset.ItemDataset', 'ItemDataset', (['train_items', 'train_transforms'], {}), '(train_items, train_transforms)\n', (1691, 1722), False, 'from item_dataset import ItemDataset\n'), ((1741, 1780), 'item_dataset.ItemDataset', 'ItemDataset', (['train_items', 'val_transform'], {}), '(train_items, val_transform)\n', (1752, 1780), False, 'from item_dataset import ItemDataset\n'), ((1793, 1830), 'item_dataset.ItemDataset', 'ItemDataset', (['val_items', 'val_transform'], {}), '(val_items, val_transform)\n', (1804, 1830), False, 'from item_dataset import ItemDataset\n'), ((1932, 1966), 'utils.torchutil.SafeDataset', 'torchutil.SafeDataset', (['train_d_val'], {}), '(train_d_val)\n', (1953, 1966), True, 'import utils.torchutil as torchutil\n'), ((1986, 2049), 'torch.utils.data.DataLoader', 'DataLoader', (['train_d'], {'batch_size': 'bs', 'shuffle': '(True)', 'num_workers': '(4)'}), '(train_d, batch_size=bs, shuffle=True, num_workers=4)\n', (1996, 2049), False, 'from torch.utils.data import DataLoader\n'), ((2073, 2141), 'torch.utils.data.DataLoader', 'DataLoader', (['train_d_val'], {'batch_size': 'bs', 'shuffle': '(False)', 'num_workers': '(4)'}), '(train_d_val, batch_size=bs, shuffle=False, num_workers=4)\n', (2083, 2141), False, 'from torch.utils.data import DataLoader\n'), ((2159, 2221), 'torch.utils.data.DataLoader', 'DataLoader', (['val_d'], {'batch_size': 'bs', 'shuffle': '(False)', 'num_workers': '(4)'}), '(val_d, batch_size=bs, shuffle=False, num_workers=4)\n', (2169, 2221), False, 'from torch.utils.data import DataLoader\n'), ((2260, 2281), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2279, 2281), True, 'import torch.nn as nn\n'), ((2633, 2694), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(10)', 'gamma': '(0.1)'}), '(optimizer, step_size=10, gamma=0.1)\n', (2658, 2694), True, 'import torch.optim as optim\n'), ((2828, 2839), 'time.time', 'time.time', ([], {}), '()\n', (2837, 2839), False, 'import time\n'), ((6772, 6812), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'time_elapsed'}), '(seconds=time_elapsed)\n', (6790, 6812), False, 'import datetime\n'), ((6817, 6882), 'utils.log.lg.info', 'lg.info', (['"""fold[%d] Training complete in %s"""', 'flod_k', 'time_elapsed'], {}), "('fold[%d] Training complete in %s', flod_k, time_elapsed)\n", (6824, 6882), False, 'from utils.log import lg\n'), ((6887, 6949), 'utils.log.lg.info', 'lg.info', (['"""best_epoch: %d, best_acc: %f """', 'best_epoch', 'best_acc'], {}), "('best_epoch: %d, best_acc: %f ', best_epoch, best_acc)\n", (6894, 6949), False, 'from utils.log import lg\n'), ((7146, 7175), 'numpy.vstack', 'np.vstack', (['(em_train, em_val)'], {}), '((em_train, em_val))\n', (7155, 7175), True, 'import numpy as np\n'), ((8693, 8713), 'pandas.read_csv', 'pd.read_csv', (['item_fp'], {}), '(item_fp)\n', (8704, 8713), True, 'import pandas as pd\n'), ((8759, 8822), 'utils.log.lg.info', 'lg.info', (['"""item shape: %s, num_class:%s"""', 'items.shape', 'num_class'], {}), "('item shape: %s, num_class:%s', items.shape, num_class)\n", (8766, 8822), False, 'from utils.log import lg\n'), ((9064, 9081), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (9075, 9081), False, 'import random\n'), ((9086, 9106), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9100, 9106), True, 'import numpy as np\n'), ((9122, 9159), 'model.build_model', 'build_model', (['num_class', 'embedding_dim'], {}), '(num_class, embedding_dim)\n', (9133, 9159), False, 'from model import build_model\n'), ((9687, 9724), 'item_dataset.ItemDataset', 'ItemDataset', (['val_items', 'val_transform'], {}), '(val_items, val_transform)\n', (9698, 9724), False, 'from item_dataset import ItemDataset\n'), ((9737, 9765), 'utils.torchutil.SafeDataset', 'torchutil.SafeDataset', (['val_d'], {}), '(val_d)\n', (9758, 9765), True, 'import utils.torchutil as torchutil\n'), ((9783, 9845), 'torch.utils.data.DataLoader', 'DataLoader', (['val_d'], {'batch_size': 'bs', 'shuffle': '(False)', 'num_workers': '(4)'}), '(val_d, batch_size=bs, shuffle=False, num_workers=4)\n', (9793, 9845), False, 'from torch.utils.data import DataLoader\n'), ((9932, 9948), 'pandas.DataFrame', 'pd.DataFrame', (['em'], {}), '(em)\n', (9944, 9948), True, 'import pandas as pd\n'), ((10271, 10332), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (10294, 10332), False, 'import argparse\n'), ((831, 869), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['IMG_SIZE'], {}), '(IMG_SIZE)\n', (859, 869), False, 'from torchvision import transforms\n'), ((875, 908), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (906, 908), False, 'from torchvision import transforms\n'), ((914, 935), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (933, 935), False, 'from torchvision import transforms\n'), ((941, 1007), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (961, 1007), False, 'from torchvision import transforms\n'), ((1052, 1074), 'torchvision.transforms.Resize', 'transforms.Resize', (['(680)'], {}), '(680)\n', (1069, 1074), False, 'from torchvision import transforms\n'), ((1080, 1111), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['IMG_SIZE'], {}), '(IMG_SIZE)\n', (1101, 1111), False, 'from torchvision import transforms\n'), ((1117, 1138), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1136, 1138), False, 'from torchvision import transforms\n'), ((1144, 1210), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (1164, 1210), False, 'from torchvision import transforms\n'), ((1254, 1279), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1277, 1279), False, 'import torch\n'), ((1853, 1883), 'utils.torchutil.SafeDataset', 'torchutil.SafeDataset', (['train_d'], {}), '(train_d)\n', (1874, 1883), True, 'import utils.torchutil as torchutil\n'), ((1885, 1913), 'utils.torchutil.SafeDataset', 'torchutil.SafeDataset', (['val_d'], {}), '(val_d)\n', (1906, 1913), True, 'import utils.torchutil as torchutil\n'), ((2884, 2929), 'utils.log.lg.info', 'lg.info', (['"""Epoch %d/%d"""', 'epoch', '(num_epochs - 1)'], {}), "('Epoch %d/%d', epoch, num_epochs - 1)\n", (2891, 2929), False, 'from utils.log import lg\n'), ((3075, 3092), 'utils.log.lg.info', 'lg.info', (["('-' * 20)"], {}), "('-' * 20)\n", (3082, 3092), False, 'from utils.log import lg\n'), ((6733, 6744), 'time.time', 'time.time', ([], {}), '()\n', (6742, 6744), False, 'import time\n'), ((8988, 9012), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (9002, 9012), False, 'import os\n'), ((9022, 9040), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (9030, 9040), False, 'import os\n'), ((9509, 9537), 'utils.log.lg.info', 'lg.info', (['"""just test, return"""'], {}), "('just test, return')\n", (9516, 9537), False, 'from utils.log import lg\n'), ((11995, 12021), 'utils.log.lg.setLevel', 'lg.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (12006, 12021), False, 'from utils.log import lg\n'), ((2996, 3045), 'utils.log.lg.info', 'lg.info', (['"""st_epoch: %d, skip %d"""', 'st_epoch', 'epoch'], {}), "('st_epoch: %d, skip %d', st_epoch, epoch)\n", (3003, 3045), False, 'from utils.log import lg\n'), ((6592, 6659), 'utils.log.lg.info', 'lg.info', (['"""early stopping, epochs_no_improve: %d"""', 'epochs_no_improve'], {}), "('early stopping, epochs_no_improve: %d', epochs_no_improve)\n", (6599, 6659), False, 'from utils.log import lg\n'), ((8058, 8091), 'numpy.hstack', 'np.hstack', (['(labels, item_ids, em)'], {}), '((labels, item_ids, em))\n', (8067, 8091), True, 'import numpy as np\n'), ((8104, 8142), 'utils.log.lg.debug', 'lg.debug', (['"""rows shape: %s"""', 'rows.shape'], {}), "('rows shape: %s', rows.shape)\n", (8112, 8142), False, 'from utils.log import lg\n'), ((8253, 8280), 'numpy.vstack', 'np.vstack', (['(item_ems, rows)'], {}), '((item_ems, rows))\n', (8262, 8280), True, 'import numpy as np\n'), ((4948, 4999), 'utils.torchutil.accuracy', 'torchutil.accuracy', (['outputs', 'labels'], {'topk': '(1, 3, 5)'}), '(outputs, labels, topk=(1, 3, 5))\n', (4966, 4999), True, 'import utils.torchutil as torchutil\n'), ((8414, 8424), 'numpy.min', 'np.min', (['em'], {}), '(em)\n', (8420, 8424), True, 'import numpy as np\n'), ((8926, 8949), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8947, 8949), False, 'import datetime\n'), ((4164, 4204), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (4186, 4204), False, 'import torch\n')] |
from discord.ext import commands
import discord
import numpy as np
import os
import traceback
from parse import parse
client = discord.Client()
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
# @bot.event
# async def on_command_error(ctx, error):
# orig_error = getattr(error, "original", error)
# error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
# await ctx.send(error_msg)
# @bot.command()
# async def ping(ctx):
# await ctx.send('pong')
# @bot.command()
# async def neko(ctx):
# await ctx.send('nyan')
def dice(dice_size):
num = np.random.randint(1, int(dice_size + 1))
return num
def simple_dice(dice_size, dice_num):
dice_val = np.array([], dtype=np.int64)
for i in range(dice_num):
dice_val = np.append(dice_val, dice(dice_size))
#msg = 'dice: ' + str(np.sum(dice_val)) + ' = ' + str(dice_val)
m = dice_val
return m
def CCB(m, a):
if m <= (a/5):
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(a) + ' Extreme!!!'
elif (a/5) < m <= (a/2):
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(a) + ' Hard!!'
elif (a/2) < m <= a:
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(a) + ' Success!'
elif m > a:
if a >= 50:
if a < m <= 99:
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' > ' + str(a) + ' Failure.'
elif m == 100:
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' > ' + str(a) + ' Fanble...'
elif a < 50:
if a < m <= 95:
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' > ' + str(a) + ' Failure.'
elif 96 <= m <= 100:
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' > ' + str(a) + ' Fanble...'
return msg
def bp(m, a, M):
if m <= (a/5):
msg = 'dice: ' + str(M) + ' = ' + str(m) + ' <= ' + str(a) + ' Extreme!!!'
elif (a/5) < m <= (a/2):
msg = 'dice: ' + str(M) + ' = ' + str(m) + ' <= ' + str(a) + ' Hard!!'
elif (a/2) < m <= a:
msg = 'dice: ' + str(M) + ' = ' + str(m) + ' <= ' + str(a) + ' Success!'
elif m > a:
if a >= 50:
if a < m <= 99:
msg = 'dice: ' + str(M) + ' = ' + str(m) + ' > ' + str(a) + ' Failure.'
elif m == 100:
msg = 'dice: ' + str(M) + ' = ' + str(m) + ' > ' + str(a) + ' Fanble...'
elif a < 50:
if a < m <= 95:
msg = 'dice: ' + str(M) + ' = ' + str(m) + ' > ' + str(a) + ' Failure.'
elif 96 <= m <= 100:
msg = 'dice: ' + str(M) + ' = ' + str(m) + ' > ' + str(a) + ' Fanble...'
return msg
# メッセージ受信時に動作する処理
@client.event
async def on_message(message):
# メッセージ送信者がBotだった場合は無視する
if message.author.bot:
return
# 「/neko」と発言したら「にゃーん」が返る処理
if message.content == '/neko':
await message.channel.send('にゃーん')
if message.content.startswith('/dice'):
info = parse('/dice {}d{}', message.content)
info2 = parse('/dice {}d{}+{}', message.content)
info3 = parse('/dice {}d{}-{}', message.content)
info4 = parse('/dice {}d{}*{}', message.content)
info5 = parse('/dice {}d{}/{}', message.content)
if info:
if info[1].isdecimal() and info[0].isdecimal():
dice_num = int(info[0])
dice_size = int(info[1])
m = simple_dice(dice_size, dice_num)
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m)
await message.channel.send(msg)
if info2:
if info2[1].isdecimal() and info2[0].isdecimal():
dice_num = int(info2[0])
dice_size = int(info2[1])
m = simple_dice(dice_size, dice_num)
c = int(info2[2])
msg = 'dice: ' + str(np.sum(m))+str(m) + '+' + str(c)+ ' = ' + str(np.sum(m)+c)
await message.channel.send(msg)
if info3:
if info3[1].isdecimal() and info3[0].isdecimal():
dice_num = int(info3[0])
dice_size = int(info3[1])
m = simple_dice(dice_size, dice_num)
c = int(info3[2])
msg = 'dice: ' + str(np.sum(m))+str(m) + '-' + str(c)+ ' = ' + str(np.sum(m)-c)
await message.channel.send(msg)
if info4:
if info4[1].isdecimal() and info4[0].isdecimal():
dice_num = int(info4[0])
dice_size = int(info4[1])
m = simple_dice(dice_size, dice_num)
c = int(info4[2])
msg = 'dice: ' + str(np.sum(m))+str(m) + '*' + str(c)+ ' = ' + str(np.sum(m)*c)
await message.channel.send(msg)
if info5:
if info5[1].isdecimal() and info5[0].isdecimal():
dice_num = int(info5[0])
dice_size = int(info5[1])
m = simple_dice(dice_size, dice_num)
c = int(info5[2])
msg = 'dice: ' + str(np.sum(m))+str(m) + '/' + str(c)+ ' = ' + str(-(-np.sum(m)//c))
await message.channel.send(msg)
if message.content == 'CCB' or message.content == 'ccb':
m = simple_dice(100, 1)
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m)
await message.channel.send(msg)
if message.content.startswith('CCB<='):
info = parse('CCB<={}', message.content)
info2 = parse('CCB<={}+{}', message.content)
info3 = parse('CCB<={}-{}', message.content)
info_ = parse('CCB<={} {}', message.content)
info_2 = parse('CCB<={}+{} {}', message.content)
info_3 = parse('CCB<={}-{} {}', message.content)
if info:
if info[0].isdecimal():
m = simple_dice(100, 1)
msg = CCB(m, int(info[0]))
await message.channel.send(msg)
if info2:
if info2[0].isdecimal() and info2[1].isdecimal():
m = simple_dice(100, 1)
msg = CCB(m, int(info2[0])+int(info2[1]))
await message.channel.send(msg)
if info3:
if info3[0].isdecimal() and info3[1].isdecimal():
m = simple_dice(100, 1)
msg = CCB(m, int(info3[0])-int(info3[1]))
await message.channel.send(msg)
if info_:
if info_[0].isdecimal() and info_[1].isalpha():
m = simple_dice(100, 1)
msg = CCB(m, int(info_[0]))
await message.channel.send(msg)
if info_2:
if info_2[0].isdecimal() and info_2[1].isdecimal() and info_2[2].isalpha():
m = simple_dice(100, 1)
msg = CCB(m, int(info_2[0])+int(info_2[1]))
await message.channel.send(msg)
if info_3:
if info_3[0].isdecimal() and info_3[1].isdecimal() and info_3[2].isalpha():
m = simple_dice(100, 1)
msg = CCB(m, int(info_3[0])-int(info_3[1]))
await message.channel.send(msg)
if message.content.startswith('CCB>'):
info = parse('CCB>{}', message.content)
if info:
if info[0].isdecimal():
m = simple_dice(100, 1)
if int(m) > int(info[0]):
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' > ' + str(info[0]) + ' Success!'
elif int(m) <= int(info[0]):
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(info[0]) + ' Failure.'
#msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(info[0]) + ' Succese!'
await message.channel.send(msg)
if message.content.startswith('/p'):
info = parse('/p{}CCB', message.content)
info2 = parse('/p{}CCB<={}', message.content)
if info:
if info[0].isdecimal():
j = int(info[0])
m = dice(10)
if m == 10:
m = 0
#await message.channel.send(str(j))
M = []
for i in range(j+1):
M.append(dice(10))
#await message.channel.send(str(M[i]))
if M[i] == 10:
M[i] = 0
M[i] = M[i] * 10 + m
if M[i] == 0:
M[i] = 100
#await message.channel.send(str(M[i]))
msg = 'dice: ' + str(M) + ' = ' + str(max(M))
await message.channel.send(msg)
if info2:
if info2[0].isdecimal() and info2[1].isdecimal():
j = int(info2[0])
m = dice(10)
if m == 10:
m = 0
#await message.channel.send(str(j))
M = []
for i in range(j+1):
M.append(dice(10))
#await message.channel.send(str(M[i]))
if M[i] == 10:
M[i] = 0
M[i] = M[i] * 10 + m
if M[i] == 0:
M[i] = 100
#await message.channel.send(str(M[i]))
Mm = max(M)
msg = bp(Mm, int(info2[1]), M)
await message.channel.send(msg)
if message.content.startswith('/b'):
info = parse('/b{}CCB', message.content)
info2 = parse('/b{}CCB<={}', message.content)
if info:
if info[0].isdecimal():
j = int(info[0])
m = dice(10)
if m == 10:
m = 0
#await message.channel.send(str(j))
M = []
for i in range(j+1):
M.append(dice(10))
#await message.channel.send(str(M[i]))
if M[i] == 10:
M[i] = 0
M[i] = M[i] * 10 + m
if M[i] == 0:
M[i] = 100
#await message.channel.send(str(M[i]))
msg = 'dice: ' + str(M) + ' = ' + str(min(M))
await message.channel.send(msg)
if info2:
if info2[0].isdecimal() and info2[1].isdecimal():
j = int(info2[0])
m = dice(10)
if m == 10:
m = 0
#await message.channel.send(str(j))
M = []
for i in range(j+1):
M.append(dice(10))
#await message.channel.send(str(M[i]))
if M[i] == 10:
M[i] = 0
M[i] = M[i] * 10 + m
if M[i] == 0:
M[i] = 100
#await message.channel.send(str(M[i]))
Mm = min(M)
msg = bp(Mm, int(info2[1]), M)
await message.channel.send(msg)
if message.content == '/mad_rt':
roll = []
roll.append('a')
roll.append('dice: [1] -> 健忘症')
roll.append('dice: [2] -> 身体症状症')
roll.append('dice: [3] -> 暴力衝動')
roll.append('dice: [4] -> 偏執症')
roll.append('dice: [5] -> 重要な人々')
roll.append('dice: [6] -> 失神')
roll.append('dice: [7] -> パニックになって逃亡')
roll.append('dice: [8] -> 身体的ヒステリーもしくは感情爆発')
roll.append('dice: [9] -> 恐怖症の獲得(1d100をロールするかKPが1つ選ぶ)')
roll.append('dice: [0] -> マニアの獲得(1d100をロールするかKPが1つ選ぶ)')
m = roll[dice(10)]
a = dice(10)
m += '\ndice: ['+ str(a) +'] -> 一時的狂気(' + str(a) + 'ラウンド) or (' + str(a) + '時間)'
await message.channel.send(m)
if message.content == '/mad_s':
roll = []
roll.append('a')
roll.append('dice: [1] -> 健忘症')
roll.append('dice: [2] -> 盗難')
roll.append('dice: [3] -> 暴行')
roll.append('dice: [4] -> 暴力')
roll.append('dice: [5] -> イデオロギー・信念')
roll.append('dice: [6] -> 重要な人々')
roll.append('dice: [7] -> 収容')
roll.append('dice: [8] -> パニック')
roll.append('dice: [9] -> 恐怖症の獲得(1d100をロールするかKPが1つ選ぶ)')
roll.append('dice: [0] -> マニアの獲得(1d100をロールするかKPが1つ選ぶ)')
m = roll[dice(10)]
a = dice(10)
m += '\ndice: ['+ str(a) +'] -> 一時的狂気(' + str(a) + '時間後に意識を取り戻す)'
await message.channel.send(m)
client.run(token)
#bot.run(token)
| [
"parse.parse",
"discord.ext.commands.Bot",
"numpy.array",
"numpy.sum",
"discord.Client"
] | [((128, 144), 'discord.Client', 'discord.Client', ([], {}), '()\n', (142, 144), False, 'import discord\n'), ((151, 183), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""/"""'}), "(command_prefix='/')\n", (163, 183), False, 'from discord.ext import commands\n'), ((751, 779), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (759, 779), True, 'import numpy as np\n'), ((3075, 3112), 'parse.parse', 'parse', (['"""/dice {}d{}"""', 'message.content'], {}), "('/dice {}d{}', message.content)\n", (3080, 3112), False, 'from parse import parse\n'), ((3129, 3169), 'parse.parse', 'parse', (['"""/dice {}d{}+{}"""', 'message.content'], {}), "('/dice {}d{}+{}', message.content)\n", (3134, 3169), False, 'from parse import parse\n'), ((3186, 3226), 'parse.parse', 'parse', (['"""/dice {}d{}-{}"""', 'message.content'], {}), "('/dice {}d{}-{}', message.content)\n", (3191, 3226), False, 'from parse import parse\n'), ((3243, 3283), 'parse.parse', 'parse', (['"""/dice {}d{}*{}"""', 'message.content'], {}), "('/dice {}d{}*{}', message.content)\n", (3248, 3283), False, 'from parse import parse\n'), ((3300, 3340), 'parse.parse', 'parse', (['"""/dice {}d{}/{}"""', 'message.content'], {}), "('/dice {}d{}/{}', message.content)\n", (3305, 3340), False, 'from parse import parse\n'), ((5532, 5565), 'parse.parse', 'parse', (['"""CCB<={}"""', 'message.content'], {}), "('CCB<={}', message.content)\n", (5537, 5565), False, 'from parse import parse\n'), ((5582, 5618), 'parse.parse', 'parse', (['"""CCB<={}+{}"""', 'message.content'], {}), "('CCB<={}+{}', message.content)\n", (5587, 5618), False, 'from parse import parse\n'), ((5635, 5671), 'parse.parse', 'parse', (['"""CCB<={}-{}"""', 'message.content'], {}), "('CCB<={}-{}', message.content)\n", (5640, 5671), False, 'from parse import parse\n'), ((5688, 5724), 'parse.parse', 'parse', (['"""CCB<={} {}"""', 'message.content'], {}), "('CCB<={} {}', message.content)\n", (5693, 5724), False, 'from parse import parse\n'), ((5742, 5781), 'parse.parse', 'parse', (['"""CCB<={}+{} {}"""', 'message.content'], {}), "('CCB<={}+{} {}', message.content)\n", (5747, 5781), False, 'from parse import parse\n'), ((5799, 5838), 'parse.parse', 'parse', (['"""CCB<={}-{} {}"""', 'message.content'], {}), "('CCB<={}-{} {}', message.content)\n", (5804, 5838), False, 'from parse import parse\n'), ((7280, 7312), 'parse.parse', 'parse', (['"""CCB>{}"""', 'message.content'], {}), "('CCB>{}', message.content)\n", (7285, 7312), False, 'from parse import parse\n'), ((7923, 7956), 'parse.parse', 'parse', (['"""/p{}CCB"""', 'message.content'], {}), "('/p{}CCB', message.content)\n", (7928, 7956), False, 'from parse import parse\n'), ((7973, 8010), 'parse.parse', 'parse', (['"""/p{}CCB<={}"""', 'message.content'], {}), "('/p{}CCB<={}', message.content)\n", (7978, 8010), False, 'from parse import parse\n'), ((9584, 9617), 'parse.parse', 'parse', (['"""/b{}CCB"""', 'message.content'], {}), "('/b{}CCB', message.content)\n", (9589, 9617), False, 'from parse import parse\n'), ((9634, 9671), 'parse.parse', 'parse', (['"""/b{}CCB<={}"""', 'message.content'], {}), "('/b{}CCB<={}', message.content)\n", (9639, 9671), False, 'from parse import parse\n'), ((5379, 5388), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (5385, 5388), True, 'import numpy as np\n'), ((3998, 4007), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (4004, 4007), True, 'import numpy as np\n'), ((4392, 4401), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (4398, 4401), True, 'import numpy as np\n'), ((4786, 4795), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (4792, 4795), True, 'import numpy as np\n'), ((3589, 3598), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (3595, 3598), True, 'import numpy as np\n'), ((1028, 1037), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1034, 1037), True, 'import numpy as np\n'), ((5183, 5192), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (5189, 5192), True, 'import numpy as np\n'), ((1148, 1157), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1154, 1157), True, 'import numpy as np\n'), ((1260, 1269), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1266, 1269), True, 'import numpy as np\n'), ((3952, 3961), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (3958, 3961), True, 'import numpy as np\n'), ((4346, 4355), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (4352, 4355), True, 'import numpy as np\n'), ((4740, 4749), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (4746, 4749), True, 'import numpy as np\n'), ((5134, 5143), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (5140, 5143), True, 'import numpy as np\n'), ((7489, 7498), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (7495, 7498), True, 'import numpy as np\n'), ((7640, 7649), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (7646, 7649), True, 'import numpy as np\n'), ((1421, 1430), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1427, 1430), True, 'import numpy as np\n'), ((1544, 1553), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1550, 1553), True, 'import numpy as np\n'), ((1690, 1699), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1696, 1699), True, 'import numpy as np\n'), ((1819, 1828), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1825, 1828), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, roc_curve, auc
def multiple_histograms_plot(data, x, hue, density=False, bins=10,
alpha=0.5, colors=None, hue_order=None,
probability_hist=False, xticks=None,
title=None, xlabel=None, ylabel=None,
figsize=(15, 8), xticklabels=None):
hue_order = hue_order if hue_order is not None else sorted(data[hue].unique())
colors = colors if colors is not None else sns.color_palette(n_colors=len(hue_order))
colors_dict = dict(zip(hue_order, colors))
plt.figure(figsize=figsize)
for current_hue in hue_order:
current_hue_mask = data[hue] == current_hue
data.loc[current_hue_mask, x].hist(bins=bins, density=density,
alpha=alpha, label=str(current_hue),
color=colors_dict[current_hue])
xlabel = x if xlabel is None else xlabel
ylabel = (ylabel if ylabel is not None
else 'Density' if density
else 'Frequency')
_title_postfix = ' (normalized)' if density else ''
title = f'{xlabel} by {hue}{_title_postfix}'
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
ax = plt.gca()
if probability_hist:
plt.xlim(-0.0001, 1.0001)
ax.set_xticks(np.arange(0, 1.1, 0.1))
ax.set_xticks(np.arange(0.05, 1, 0.1), minor=True)
elif xticks is not None:
ax.set_xticks(xticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
def bar_plot_with_categorical(df, x, hue, order=None, figsize=(16, 8),
plot_average=True, xticklabels=None,
**sns_kwargs):
if order is None:
order = (pd.pivot_table(data=df, values=hue, index=[x], aggfunc='mean')
.sort_values(by=hue, ascending=False)
.index.values)
plt.subplots(figsize=figsize)
sns.barplot(x=x, y=hue, data=df, order=order, **sns_kwargs)
if plot_average:
hue_average = df[hue].mean()
plt.axhline(y=hue_average, linewidth=2, linestyle='--',
color='gray', label='{} average'.format(hue))
if xticklabels is not None:
ax = plt.gca()
ax.set_xticklabels(xticklabels)
plt.legend()
plt.show()
def plot_confusion_matrix(y_true, y_pred,
index_labels=('False (truth)', 'True (truth)'),
columns_labels=('False (pred)', 'True (pred)')):
conf_matrix = confusion_matrix(y_true, y_pred)
conf_matrix_df = pd.DataFrame(conf_matrix, index=index_labels,
columns=columns_labels)
_, ax = plt.subplots(figsize=(8, 8))
ax.set_title('Confusion Matrix')
sns.heatmap(conf_matrix_df, annot=True, fmt="d", linewidths=10,
cmap='Blues', ax=ax)
def plot_confusion_matrix_2(y_true, y_pred,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = ['0', '1']
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return fig
def plot_roc(y_true, y_score, figsize=(8, 8)):
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
plt.figure(figsize=figsize)
plt.plot(fpr, tpr, color='darkorange',
lw=2, label=f'ROC curve (AUC = {100*roc_auc:.2f}%)')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc="lower right")
plt.show()
return roc_auc | [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve",
"numpy.arange",
"pandas.pivot_table",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.gca",
"seaborn.heatma... | [((727, 754), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (737, 754), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1385), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1378, 1385), True, 'import matplotlib.pyplot as plt\n'), ((1390, 1408), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1400, 1408), True, 'import matplotlib.pyplot as plt\n'), ((1413, 1431), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (1423, 1431), True, 'import matplotlib.pyplot as plt\n'), ((1436, 1448), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1446, 1448), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1472), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1470, 1472), True, 'import matplotlib.pyplot as plt\n'), ((2169, 2198), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2181, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2262), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'x', 'y': 'hue', 'data': 'df', 'order': 'order'}), '(x=x, y=hue, data=df, order=order, **sns_kwargs)\n', (2214, 2262), True, 'import seaborn as sns\n'), ((2570, 2582), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2580, 2582), True, 'import matplotlib.pyplot as plt\n'), ((2587, 2597), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2595, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2814, 2846), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2830, 2846), False, 'from sklearn.metrics import confusion_matrix, roc_curve, auc\n'), ((2868, 2937), 'pandas.DataFrame', 'pd.DataFrame', (['conf_matrix'], {'index': 'index_labels', 'columns': 'columns_labels'}), '(conf_matrix, index=index_labels, columns=columns_labels)\n', (2880, 2937), True, 'import pandas as pd\n'), ((2984, 3012), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2996, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3054, 3143), 'seaborn.heatmap', 'sns.heatmap', (['conf_matrix_df'], {'annot': '(True)', 'fmt': '"""d"""', 'linewidths': '(10)', 'cmap': '"""Blues"""', 'ax': 'ax'}), "(conf_matrix_df, annot=True, fmt='d', linewidths=10, cmap=\n 'Blues', ax=ax)\n", (3065, 3143), True, 'import seaborn as sns\n'), ((3670, 3702), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3686, 3702), False, 'from sklearn.metrics import confusion_matrix, roc_curve, auc\n'), ((4002, 4016), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4014, 4016), True, 'import matplotlib.pyplot as plt\n'), ((5057, 5083), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (5066, 5083), False, 'from sklearn.metrics import confusion_matrix, roc_curve, auc\n'), ((5098, 5111), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (5101, 5111), False, 'from sklearn.metrics import confusion_matrix, roc_curve, auc\n'), ((5121, 5148), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (5131, 5148), True, 'import matplotlib.pyplot as plt\n'), ((5153, 5251), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': '(2)', 'label': 'f"""ROC curve (AUC = {100 * roc_auc:.2f}%)"""'}), "(fpr, tpr, color='darkorange', lw=2, label=\n f'ROC curve (AUC = {100 * roc_auc:.2f}%)')\n", (5161, 5251), True, 'import matplotlib.pyplot as plt\n'), ((5262, 5322), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': '(2)', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n", (5270, 5322), True, 'import matplotlib.pyplot as plt\n'), ((5332, 5352), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5340, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5357, 5377), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5365, 5377), True, 'import matplotlib.pyplot as plt\n'), ((5382, 5415), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (5392, 5415), True, 'import matplotlib.pyplot as plt\n'), ((5420, 5452), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (5430, 5452), True, 'import matplotlib.pyplot as plt\n'), ((5457, 5486), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (5467, 5486), True, 'import matplotlib.pyplot as plt\n'), ((5491, 5501), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5499, 5501), True, 'import matplotlib.pyplot as plt\n'), ((1506, 1531), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.0001)', '(1.0001)'], {}), '(-0.0001, 1.0001)\n', (1514, 1531), True, 'import matplotlib.pyplot as plt\n'), ((2507, 2516), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2514, 2516), True, 'import matplotlib.pyplot as plt\n'), ((1554, 1576), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (1563, 1576), True, 'import numpy as np\n'), ((1600, 1623), 'numpy.arange', 'np.arange', (['(0.05)', '(1)', '(0.1)'], {}), '(0.05, 1, 0.1)\n', (1609, 1623), True, 'import numpy as np\n'), ((4163, 4185), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (4172, 4185), True, 'import numpy as np\n'), ((4205, 4227), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (4214, 4227), True, 'import numpy as np\n'), ((2006, 2068), 'pandas.pivot_table', 'pd.pivot_table', ([], {'data': 'df', 'values': 'hue', 'index': '[x]', 'aggfunc': '"""mean"""'}), "(data=df, values=hue, index=[x], aggfunc='mean')\n", (2020, 2068), True, 'import pandas as pd\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet3d.core import instance_seg_eval
def test_instance_seg_eval():
valid_class_ids = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34,
36, 39)
class_labels = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')
n_points_list = [3300, 3000]
gt_labels_list = [[0, 0, 0, 0, 0, 0, 14, 14, 2, 1],
[13, 13, 2, 1, 3, 3, 0, 0, 0]]
gt_instance_masks = []
gt_semantic_masks = []
pred_instance_masks = []
pred_instance_labels = []
pred_instance_scores = []
for n_points, gt_labels in zip(n_points_list, gt_labels_list):
gt_instance_mask = np.ones(n_points, dtype=np.int) * -1
gt_semantic_mask = np.ones(n_points, dtype=np.int) * -1
pred_instance_mask = np.ones(n_points, dtype=np.int) * -1
labels = []
scores = []
for i, gt_label in enumerate(gt_labels):
begin = i * 300
end = begin + 300
gt_instance_mask[begin:end] = i
gt_semantic_mask[begin:end] = gt_label
pred_instance_mask[begin:end] = i
labels.append(gt_label)
scores.append(.99)
gt_instance_masks.append(torch.tensor(gt_instance_mask))
gt_semantic_masks.append(torch.tensor(gt_semantic_mask))
pred_instance_masks.append(torch.tensor(pred_instance_mask))
pred_instance_labels.append(torch.tensor(labels))
pred_instance_scores.append(torch.tensor(scores))
ret_value = instance_seg_eval(
gt_semantic_masks=gt_semantic_masks,
gt_instance_masks=gt_instance_masks,
pred_instance_masks=pred_instance_masks,
pred_instance_labels=pred_instance_labels,
pred_instance_scores=pred_instance_scores,
valid_class_ids=valid_class_ids,
class_labels=class_labels)
for label in [
'cabinet', 'bed', 'chair', 'sofa', 'showercurtrain', 'toilet'
]:
metrics = ret_value['classes'][label]
assert metrics['ap'] == 1.0
assert metrics['ap50%'] == 1.0
assert metrics['ap25%'] == 1.0
pred_instance_masks[1][2240:2700] = -1
pred_instance_masks[0][2700:3000] = 8
pred_instance_labels[0][9] = 2
ret_value = instance_seg_eval(
gt_semantic_masks=gt_semantic_masks,
gt_instance_masks=gt_instance_masks,
pred_instance_masks=pred_instance_masks,
pred_instance_labels=pred_instance_labels,
pred_instance_scores=pred_instance_scores,
valid_class_ids=valid_class_ids,
class_labels=class_labels)
assert abs(ret_value['classes']['cabinet']['ap50%'] - 0.72916) < 0.01
assert abs(ret_value['classes']['cabinet']['ap25%'] - 0.88888) < 0.01
assert abs(ret_value['classes']['bed']['ap50%'] - 0.5) < 0.01
assert abs(ret_value['classes']['bed']['ap25%'] - 0.5) < 0.01
assert abs(ret_value['classes']['chair']['ap50%'] - 0.375) < 0.01
assert abs(ret_value['classes']['chair']['ap25%'] - 1.0) < 0.01
| [
"numpy.ones",
"torch.tensor",
"mmdet3d.core.instance_seg_eval"
] | [((1773, 2070), 'mmdet3d.core.instance_seg_eval', 'instance_seg_eval', ([], {'gt_semantic_masks': 'gt_semantic_masks', 'gt_instance_masks': 'gt_instance_masks', 'pred_instance_masks': 'pred_instance_masks', 'pred_instance_labels': 'pred_instance_labels', 'pred_instance_scores': 'pred_instance_scores', 'valid_class_ids': 'valid_class_ids', 'class_labels': 'class_labels'}), '(gt_semantic_masks=gt_semantic_masks, gt_instance_masks=\n gt_instance_masks, pred_instance_masks=pred_instance_masks,\n pred_instance_labels=pred_instance_labels, pred_instance_scores=\n pred_instance_scores, valid_class_ids=valid_class_ids, class_labels=\n class_labels)\n', (1790, 2070), False, 'from mmdet3d.core import instance_seg_eval\n'), ((2506, 2803), 'mmdet3d.core.instance_seg_eval', 'instance_seg_eval', ([], {'gt_semantic_masks': 'gt_semantic_masks', 'gt_instance_masks': 'gt_instance_masks', 'pred_instance_masks': 'pred_instance_masks', 'pred_instance_labels': 'pred_instance_labels', 'pred_instance_scores': 'pred_instance_scores', 'valid_class_ids': 'valid_class_ids', 'class_labels': 'class_labels'}), '(gt_semantic_masks=gt_semantic_masks, gt_instance_masks=\n gt_instance_masks, pred_instance_masks=pred_instance_masks,\n pred_instance_labels=pred_instance_labels, pred_instance_scores=\n pred_instance_scores, valid_class_ids=valid_class_ids, class_labels=\n class_labels)\n', (2523, 2803), False, 'from mmdet3d.core import instance_seg_eval\n'), ((919, 950), 'numpy.ones', 'np.ones', (['n_points'], {'dtype': 'np.int'}), '(n_points, dtype=np.int)\n', (926, 950), True, 'import numpy as np\n'), ((983, 1014), 'numpy.ones', 'np.ones', (['n_points'], {'dtype': 'np.int'}), '(n_points, dtype=np.int)\n', (990, 1014), True, 'import numpy as np\n'), ((1049, 1080), 'numpy.ones', 'np.ones', (['n_points'], {'dtype': 'np.int'}), '(n_points, dtype=np.int)\n', (1056, 1080), True, 'import numpy as np\n'), ((1474, 1504), 'torch.tensor', 'torch.tensor', (['gt_instance_mask'], {}), '(gt_instance_mask)\n', (1486, 1504), False, 'import torch\n'), ((1539, 1569), 'torch.tensor', 'torch.tensor', (['gt_semantic_mask'], {}), '(gt_semantic_mask)\n', (1551, 1569), False, 'import torch\n'), ((1606, 1638), 'torch.tensor', 'torch.tensor', (['pred_instance_mask'], {}), '(pred_instance_mask)\n', (1618, 1638), False, 'import torch\n'), ((1676, 1696), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (1688, 1696), False, 'import torch\n'), ((1734, 1754), 'torch.tensor', 'torch.tensor', (['scores'], {}), '(scores)\n', (1746, 1754), False, 'import torch\n')] |
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from numpy.fft import rfft, rfftfreq
if __name__ == '__main__':
plt.style.use("ggplot")
Path("plots").mkdir(exist_ok=True)
A = 2 * np.pi
x = np.linspace(0, 10, 10 * 50)
sinex = np.sin(A * x)
sine2x = np.sin(A * 2 * x)
sine3x = np.sin(A * 3 * x)
sine_sum = sinex + sine2x + sine3x
sine_sum /= sine_sum.max()
plots = {
"y = sin(2πx)": sinex,
"y = sin(4πx)": sine2x,
"y = sin(6πx)": sine3x,
"y = peak_norm[sin(2πx) + sin(4πx) + sin(6πx)]": sine_sum
}
fig, ax = plt.subplots(4, 1, figsize=(10, 5), sharex=True)
for ix, (name, plot) in enumerate(plots.items()):
ax[ix].plot(x, plot)
ax[ix].set_title(name, fontdict={'size': 10})
ax[ix].set_ylabel("Amplitude", fontdict={'size': 8})
plt.xlabel("Time", fontdict={'size': 8})
fig.subplots_adjust(hspace=0.9)
fig.savefig("plots/sine.png", dpi=800)
y = rfft(sine_sum)
x = rfftfreq(len(sine_sum), 1 / 50)
fig = plt.figure(figsize=(10, 5))
plt.plot(x[:50], np.abs(y)[:50])
plt.xlabel("Frequency", fontdict={'size': 8})
plt.ylabel("Magnitude", fontdict={'size': 8})
plt.savefig("plots/fft.png", dpi=800)
| [
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"pathlib.Path",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.style.use",
"numpy.fft.rfft",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.sin",
"matplotlib.pyplot.subplots"
] | [((147, 170), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (160, 170), True, 'import matplotlib.pyplot as plt\n'), ((237, 264), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(10 * 50)'], {}), '(0, 10, 10 * 50)\n', (248, 264), True, 'import numpy as np\n'), ((277, 290), 'numpy.sin', 'np.sin', (['(A * x)'], {}), '(A * x)\n', (283, 290), True, 'import numpy as np\n'), ((304, 321), 'numpy.sin', 'np.sin', (['(A * 2 * x)'], {}), '(A * 2 * x)\n', (310, 321), True, 'import numpy as np\n'), ((335, 352), 'numpy.sin', 'np.sin', (['(A * 3 * x)'], {}), '(A * 3 * x)\n', (341, 352), True, 'import numpy as np\n'), ((619, 667), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'figsize': '(10, 5)', 'sharex': '(True)'}), '(4, 1, figsize=(10, 5), sharex=True)\n', (631, 667), True, 'import matplotlib.pyplot as plt\n'), ((872, 912), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {'fontdict': "{'size': 8}"}), "('Time', fontdict={'size': 8})\n", (882, 912), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1015), 'numpy.fft.rfft', 'rfft', (['sine_sum'], {}), '(sine_sum)\n', (1005, 1015), False, 'from numpy.fft import rfft, rfftfreq\n'), ((1067, 1094), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (1077, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1136, 1181), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency"""'], {'fontdict': "{'size': 8}"}), "('Frequency', fontdict={'size': 8})\n", (1146, 1181), True, 'import matplotlib.pyplot as plt\n'), ((1186, 1231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude"""'], {'fontdict': "{'size': 8}"}), "('Magnitude', fontdict={'size': 8})\n", (1196, 1231), True, 'import matplotlib.pyplot as plt\n'), ((1236, 1273), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/fft.png"""'], {'dpi': '(800)'}), "('plots/fft.png', dpi=800)\n", (1247, 1273), True, 'import matplotlib.pyplot as plt\n'), ((175, 188), 'pathlib.Path', 'Path', (['"""plots"""'], {}), "('plots')\n", (179, 188), False, 'from pathlib import Path\n'), ((1116, 1125), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (1122, 1125), True, 'import numpy as np\n')] |
# occiput
# <NAME>
# Aalto University, School of Science, Helsinki
# Oct 2013, Helsinki
# Harvard University, Martinos Center for Biomedical Imaging
# Boston, MA, USA
import os
import warnings
import dicom
import numpy
try:
import dcmstack
dcmstack_available = True
except:
dcmstack_available = False
from glob import glob
from ...Core.Conversion import nipy_to_occiput, nifti_to_occiput
from ...Visualization.LookupTable import load_freesurfer_lut_file
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import nipy
def import_nifti(filename):
nip = nipy.load_image(filename)
return nipy_to_occiput(nip)
def import_mask(filename, lookup_table_filename=None):
# Load file
nip = nipy.load_image(filename)
occ = nipy_to_occiput(nip)
occ.set_mask_flag(1)
# Load the lookup table. If not specified, try to load from file with the same name as
# the mask image file.
if lookup_table_filename == None:
f = []
f.append(os.path.splitext(filename)[0] + '.lut')
f.append(os.path.splitext(os.path.splitext(filename)[0])[0] + '.lut') # This includes .nii.gz files
for lookup_table_filename in f:
try:
lut = load_freesurfer_lut_file(lookup_table_filename)
except:
lut = None
else:
lut = load_freesurfer_lut_file(lookup_table_filename)
if lut is not None:
occ.set_lookup_table(lut)
return occ
def import_dicom(search_path, extension='IMA'):
if(not dcmstack_available):
raise ("Pleast install dcmstack from https://github.com/moloney/dcmstack/tags")
else:
search_string = search_path + '/*.' + extension
src_paths = glob(search_string)
stacks = dcmstack.parse_and_stack(src_paths)
images = []
for key in stacks.keys():
stack = stacks[key]
image = nifti_to_occiput(stack.to_nifti())
images.append(image)
return images
def import_dicom_series(path, files_start_with=None, files_end_with=None,
exclude_files_end_with=('.dat', '.txt', '.py', '.pyc', '.nii', '.gz')):
"""Rudimentary file to load dicom serie from a directory. """
N = 0
paths = []
slices = []
files = os.listdir(path)
for file_name in files:
file_valid = True
if files_start_with is not None:
if not file_name.startswith(files_start_with):
file_valid = False
if files_end_with is not None:
if not file_name.endswith(files_end_with):
file_valid = False
for s in exclude_files_end_with:
if file_name.endswith(s):
file_valid = False
if file_valid:
full_path = path + os.sep + file_name
# read moco information from files
paths.append(full_path)
f = dicom.read_file(full_path)
slice = f.pixel_array
slices.append(slice)
N += 1
instance_number = f.get(0x00200013).value
creation_time = f.get(0x00080013).value
# print "Instance number: ",instance_number
# print "Creation time: ",creation_time
array = numpy.zeros((slices[0].shape[0], slices[0].shape[1], N), dtype=numpy.float32)
for i in range(N):
slice = numpy.float32(slices[i]) # FIXME: handle other data types
array[:, :, i] = slice
# return occiput_from_array(array)
return array
| [
"os.listdir",
"dcmstack.parse_and_stack",
"dicom.read_file",
"warnings.catch_warnings",
"nipy.load_image",
"os.path.splitext",
"numpy.zeros",
"warnings.simplefilter",
"numpy.float32",
"glob.glob"
] | [((480, 505), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (503, 505), False, 'import warnings\n'), ((511, 542), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (532, 542), False, 'import warnings\n'), ((599, 624), 'nipy.load_image', 'nipy.load_image', (['filename'], {}), '(filename)\n', (614, 624), False, 'import nipy\n'), ((741, 766), 'nipy.load_image', 'nipy.load_image', (['filename'], {}), '(filename)\n', (756, 766), False, 'import nipy\n'), ((2299, 2315), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2309, 2315), False, 'import os\n'), ((3268, 3345), 'numpy.zeros', 'numpy.zeros', (['(slices[0].shape[0], slices[0].shape[1], N)'], {'dtype': 'numpy.float32'}), '((slices[0].shape[0], slices[0].shape[1], N), dtype=numpy.float32)\n', (3279, 3345), False, 'import numpy\n'), ((1739, 1758), 'glob.glob', 'glob', (['search_string'], {}), '(search_string)\n', (1743, 1758), False, 'from glob import glob\n'), ((1776, 1811), 'dcmstack.parse_and_stack', 'dcmstack.parse_and_stack', (['src_paths'], {}), '(src_paths)\n', (1800, 1811), False, 'import dcmstack\n'), ((3385, 3409), 'numpy.float32', 'numpy.float32', (['slices[i]'], {}), '(slices[i])\n', (3398, 3409), False, 'import numpy\n'), ((2921, 2947), 'dicom.read_file', 'dicom.read_file', (['full_path'], {}), '(full_path)\n', (2936, 2947), False, 'import dicom\n'), ((1014, 1040), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1030, 1040), False, 'import os\n'), ((1088, 1114), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1104, 1114), False, 'import os\n')] |
import glob
import json
import os
import queue
import random
import threading
import time
from ctypes import *
import cv2
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
import PIL.Image as pil
import torch
from torchvision import transforms
import camera_parameters as params
import darknet.darknet as darknet
import monodepth2.networks as networks
from camera_parameters import *
from deep_sort import build_tracker
from deep_sort.parser import get_config
from kalman_filter import KalmanFilter
from monodepth2.layers import disp_to_depth
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
print("CUDA NOT AVALIABLE")
def gstreamer_pipeline(
capture_width=1280,
capture_height=720,
display_width=1280,
display_height=720,
framerate=10,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
class CameraCapture(cv2.VideoCapture):
"""Bufferless & Distorted VideoCapture"""
def __init__(self, original_options: tuple, intrinsic_matrix, dist_coeffs):
super().__init__(*original_options)
# self._queue = queue.SimpleQueue()
self._queue = queue.Queue()
read_camera_thread = threading.Thread(target=self._reader)
read_camera_thread.daemon = True
read_camera_thread.start()
self._intrinsic_matrix = intrinsic_matrix.cpu().numpy()
self._dist_coeffs = np.array(dist_coeffs)
frame = self._queue.get()
self._new_intrinsic_matrix, self._new_xywh = cv2.getOptimalNewCameraMatrix(
self._intrinsic_matrix, self._dist_coeffs, frame.shape[:2], 0)
self.intrinsic_matrix = torch.tensor(
self._new_intrinsic_matrix).to(device)
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
self._success, frame = super().read()
if not self._success:
break
while True:
try:
self._queue.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
break
self._queue.put(frame)
def _distort_img(self, img):
distorted_img = cv2.undistort(img, self._intrinsic_matrix, self._dist_coeffs,
None, self._new_intrinsic_matrix)
x, y, w, h = self._new_xywh
distorted_img = distorted_img[x:x+w, y:y+h]
return distorted_img
def read(self):
return self._success, self._distort_img(self._queue.get())
class FileCapture():
def __init__(self, file_path: str, ext="jpg") -> None:
images_list = glob.glob(f'{file_path}/*.{ext}')
images_list.sort(key=lambda x: int(x[len(file_path)+1:-len(ext)-1]))
self.images = iter(images_list)
self.intrinsic_matrix = torch.FloatTensor(
[[785.26446533, 0., 627.50964355],
[0., 785.27935791, 340.54248047],
[0., 0., 1.]]).to(device)
def release(self):
pass
def read(self):
success_flag = False
try:
fname = next(self.images)
frame = cv2.imread(fname)
success_flag = True
except:
frame = None
pass
return success_flag, frame
class Trajectory():
def __init__(self, max_age=50, max_error=0.1):
self.max_age = max_age
self.max_error = max_error
self.objects = dict()
self.index = 0
def __delete_out_dated(self):
to_be_deleted = []
for obj_id, coords_dict in self.objects.items():
last_index = max([key for key in coords_dict.keys()])
if self.index-last_index > self.max_age:
to_be_deleted.append(obj_id)
for index in to_be_deleted:
self.objects.pop(index)
def update(self, coords, ids):
self.__delete_out_dated()
for i, id in enumerate(ids):
if id not in self.objects.keys():
self.objects[id] = {self.index: coords[i]}
else:
if self.index-1 in self.objects[id]:
self.objects[id][self.index] = coords[i]
else:
last_index = max([key for key in self.objects[id].keys()])
for index in range(last_index+1, self.index+1):
last_coord = self.objects[id][last_index]
current_coord = coords[i]
self.objects[id][index] = [last_coord[coord]+(current_coord[coord]-last_coord[coord])*(
index-last_index)/(self.index-last_index) for coord in range(len(coords[i]))]
self.index += 1
def get_nearest(self, distance) -> dict:
distance_dict = dict()
min_delta = float("inf")
for obj_id, coords_dict in self.objects.items():
last_index = max([key for key in coords_dict.keys()])
current_coord = coords_dict[last_index]
current_distance = (sum(x**2 for x in current_coord))**.5
distance_dict[obj_id] = current_distance
for obj_id, obj_distance in distance_dict.items():
if abs(obj_distance-distance) < min_delta:
min_delta = abs(obj_distance-distance)
best_match = obj_id
if min_delta < self.max_error:
return self.objects[best_match]
return None
def init_monodepth_model(model_name):
"""Function to predict for a single image or folder of images
"""
model_path = os.path.join("./monodepth2/models", model_name)
print("-> Loading model from ", model_path)
encoder_path = os.path.join(model_path, "encoder.pth")
depth_decoder_path = os.path.join(model_path, "depth.pth")
# LOADING PRETRAINED MODEL
print(" Loading pretrained encoder")
encoder = networks.ResnetEncoder(18, False)
loaded_dict_enc = torch.load(encoder_path, map_location=device)
# extract the height and width of image that this model was trained with
feed_height = loaded_dict_enc['height']
feed_width = loaded_dict_enc['width']
filtered_dict_enc = {
k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()}
encoder.load_state_dict(filtered_dict_enc)
encoder.to(device)
encoder.eval()
print(" Loading pretrained decoder")
depth_decoder = networks.DepthDecoder(
num_ch_enc=encoder.num_ch_enc, scales=range(4))
loaded_dict = torch.load(depth_decoder_path, map_location=device)
depth_decoder.load_state_dict(loaded_dict)
depth_decoder.to(device)
depth_decoder.eval()
return feed_height, feed_width, encoder, depth_decoder
def get_relative_depth(frame, feed_height, feed_width, encoder, depth_decoder):
input_image = pil.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
# PREDICTING ON EACH IMAGE IN TURN
with torch.no_grad():
# Load image and preprocess
original_width, original_height = input_image.size
input_image = input_image.resize(
(feed_width, feed_height), pil.LANCZOS)
input_image = transforms.ToTensor()(input_image).unsqueeze(0)
# PREDICTION
input_image = input_image.to(device)
features = encoder(input_image)
outputs = depth_decoder(features)
disp = outputs[("disp", 0)]
disp_resized = torch.nn.functional.interpolate(
disp, (original_height, original_width), mode="bilinear", align_corners=False) # 插值成源图像大小
_, depth = disp_to_depth(disp_resized, 0.1, 100)
return depth
def pixelcoord_to_worldcoord(depth_matrix, intrinsic_matrix, inv_extrinsics_matrix, pixel_indexs):
cx = intrinsic_matrix[0, 2]
cy = intrinsic_matrix[1, 2]
fx = intrinsic_matrix[0, 0]
fy = intrinsic_matrix[1, 1]
v = pixel_indexs[0, :]
u = pixel_indexs[1, :]
depth_vector = depth_matrix.view(-1)
v = (v-cy)*depth_vector/fy
u = (u-cx)*depth_vector/fx
ones = torch.ones(depth_vector.size()).to(device)
P_cam = torch.stack((u, v, depth_vector, ones), dim=0)
# [x: crosswise ,y: -lengthwise, z: vertical, 1]
P_w = torch.mm(inv_extrinsics_matrix, P_cam)
# np.savetxt('P_cam.txt', P_cam.cpu().numpy()[:, :10])
# np.savetxt('P_w.txt', P_w.cpu().numpy()[:, :10])
return P_w
def get_mask(x_left, y_top, x_right, y_bottom, to_size, portion=1):
"""Edges all included"""
if portion > 0 and portion < 1:
mask = torch.bernoulli(
torch.ones(y_bottom-y_top+1, x_right-x_left+1)*portion)
else:
mask = torch.ones(y_bottom-y_top+1, x_right-x_left+1)
padding = (
x_left, # padding in left
to_size[1]-x_right-1, # padding in right
y_top, # padding in top
to_size[0]-y_bottom-1 # padding in bottom
)
mask = torch.nn.functional.pad(
mask, padding, mode="constant", value=0).type(torch.bool).to(device)
return mask
def find_diff(last_frame, current_frame, threshold=10):
diff = cv2.absdiff(last_frame, current_frame)
diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
static_points = torch.from_numpy((diff < threshold)).to(device)
return static_points
def get_scale(relative_disp: torch.tensor, intrinsic_matrix: torch.tensor, inv_extrinsics_matrix: torch.tensor,
camera_height: float, pixel_indexs, current_frame, last_frame, last_true_disp, portion=1):
mask = get_mask(relative_disp.size()[1]*3//8,
relative_disp.size()[0]*27//40,
relative_disp.size()[1]*5//8,
relative_disp.size()[0]*37//40,
relative_disp.size(), portion=portion)
road_points = relative_disp*mask
P_w = pixelcoord_to_worldcoord(road_points, intrinsic_matrix,
inv_extrinsics_matrix, pixel_indexs)
rel_heights = torch.masked_select(P_w[2, :], mask.view(-1)) # 选取z坐标
std = torch.std(rel_heights)
mean = torch.mean(rel_heights)
threshold_mask = torch.lt(torch.abs(rel_heights-mean), std)
rel_heights = torch.masked_select(rel_heights, threshold_mask.view(-1))
scale_camera_height_based = camera_height / \
(rel_heights.sum()/rel_heights.shape[0])
if last_frame is not None and last_true_disp is not None:
static_points = find_diff(last_frame, current_frame)
scale_static_points_based = torch.sum(last_true_disp*static_points.reshape_as(last_true_disp)) /\
torch.sum(relative_disp*static_points.reshape_as(relative_disp))
scale = .1*scale_camera_height_based+0.9*scale_static_points_based
# print(torch.sum(static_points) / last_true_disp.numel())
else:
scale = scale_camera_height_based
return scale
def init_darknet_network(config_file: str, data_file: str, weights_file: str,):
network, class_names, class_colors = darknet.load_network(
config_file, data_file, weights_file, batch_size=1)
return network, class_names, class_colors
def detection(darknet_network, class_names, class_colors, frame, confidence_thresh=0.25):
original_height = frame.shape[0]
original_width = frame.shape[1]
network_width = darknet.network_width(darknet_network)
network_height = darknet.network_height(darknet_network)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (network_width, network_height),
interpolation=cv2.INTER_LINEAR)
img_for_detect = darknet.make_image(network_width, network_height, 3)
darknet.copy_image_from_bytes(img_for_detect, frame_resized.tobytes())
detections = darknet.detect_image(
darknet_network, class_names, img_for_detect, thresh=confidence_thresh)
darknet.free_image(img_for_detect)
detections_resized = []
for label, confidence, bbox in detections:
x, y, w, h = bbox
bbox = (x*original_width/network_width,
y*original_height/network_height,
w*original_width/network_width,
h*original_height/network_height,)
detections_resized.append((label, confidence, bbox))
return detections_resized
def get_coordinates(P_w, outputs, frame):
measurement_list = []
id_list = []
for output in outputs:
x1, y1, x2, y2, id = output
# mask = get_mask(x1+(x2-x1)//10, y1+(y2-y1)//10,
# x2-(x2-x1)//10, y2-(y2-y1)//10, frame.shape)
mask = get_mask(x1, y1, x2, y2, frame.shape)
x_coords = torch.masked_select(P_w[0, :], mask.view(-1))
y_coords = torch.masked_select(P_w[1, :], mask.view(-1))
z_coords = torch.masked_select(P_w[2, :], mask.view(-1))
coords = torch.stack((x_coords, y_coords, z_coords), dim=0)
distance_w = torch.norm(coords, p=2, dim=0)
min_distance = torch.min(distance_w)
index = (distance_w == min_distance).nonzero().flatten()[0]
x_distance = float(coords[0, index])
y_distance = float(coords[1, index])
z_distance = float(coords[2, index])
measurement_list.append([x_distance, y_distance, z_distance])
id_list.append(id)
return measurement_list, id_list
# @profile
def main():
# # read form camera
# intrinsic_matrix = params.intrinsic_matrix
# camera = CameraCapture((0,), intrinsic_matrix, dist_coeffs)
# camera = CameraCapture(
# (gstreamer_pipeline(), cv2.CAP_GSTREAMER), intrinsic_matrix, dist_coeffs)
# read form file
camera = FileCapture("./img")
intrinsic_matrix = camera.intrinsic_matrix
# cv2.namedWindow("Test camera")
# cv2.namedWindow("Result")
# cv2.namedWindow("MultiTracker")
# choices = ["mono_640x192",
# "stereo_640x192",
# "mono+stereo_640x192",
# "mono_no_pt_640x192",
# "stereo_no_pt_640x192",
# "mono+stereo_no_pt_640x192",
# "mono_1024x320",
# "stereo_1024x320",
# "mono+stereo_1024x320"]
# initiate monodepth
feed_height, feed_width, encoder, depth_decoder = init_monodepth_model(
"mono_640x192")
# initiate yolo
darknet_network, class_names, class_colors = init_darknet_network(config_file="./darknet/yolo-obj.cfg",
data_file="./darknet/data/obj.data",
weights_file="./darknet/yolo-obj.weights")
# initiate deep track
cfg = get_config()
cfg.merge_from_file("deep_sort/configs/deep_sort.yaml")
deepsort = build_tracker(cfg, use_cuda=torch.cuda.is_available())
# initiate Kalman filter
measurement_matrix = np.array(
[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]], np.float32)
transition_matrix = np.array(
[[1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]], np.float32)
process_noise_cov = np.eye(6, dtype=np.float32) * 1e-3
measurement_noise_cov = np.eye(3, dtype=np.float32) * 1e-1
kalman_filter = KalmanFilter(6, 3, measurement_matrix,
transition_matrix, process_noise_cov, measurement_noise_cov)
# initiate trajectory recorder
trajectory_recorder = Trajectory()
success, frame = camera.read()
pixel_indexs = torch.tensor([[v, u]
for v in range(frame.shape[0])
for u in range(frame.shape[1])]).t().to(device)
last_frame = None
last_true_disp = None
last_y = dict()
time_serial_index = 0
while success:
last_run_time = time.time()
# key = cv2.waitKey(1)
# # if key == 27 or not success or\
# # cv2.getWindowProperty("Test camera", cv2.WND_PROP_AUTOSIZE) < 1 or\
# # cv2.getWindowProperty("Result", cv2.WND_PROP_AUTOSIZE) < 1 or\
# # cv2.getWindowProperty("MultiTracker", cv2.WND_PROP_AUTOSIZE) < 1:
# if key == 27 or not success:
# break
# if key == ord(']'):
# continue
# do depth estimation
rel_disp = get_relative_depth(
frame, feed_height, feed_width, encoder, depth_decoder)
scale = get_scale(rel_disp, intrinsic_matrix,
inv_extrinsics_matrix, camera_height, pixel_indexs,
frame, last_frame, last_true_disp)
true_disp = rel_disp*scale
P_w = pixelcoord_to_worldcoord(true_disp, intrinsic_matrix,
inv_extrinsics_matrix, pixel_indexs)
last_frame = frame
last_true_disp = true_disp
# do detection
detections = detection(
darknet_network, class_names, class_colors, frame)
detections = np.array(detections, dtype=object)
if detections.size > 0:
bbox_xywh = np.array([np.array(xywh) for xywh in detections[:, 2]])
cls_conf = detections[:, 1].astype(np.float)
else:
bbox_xywh = np.array([[], [], [], []]).T
cls_conf = np.array([[], [], [], []]).T
# do tracking
outputs = deepsort.update(bbox_xywh, cls_conf, frame)
# get coordinates
coords, ids = get_coordinates(P_w, outputs, frame)
# do kalman filting
filtered = kalman_filter.update(coords, ids)
# record trajectory
trajectory_recorder.update([filtered[index] for index in ids], ids)
# # get depth image
# disp_resized_np = -P_w[1, :].cpu().numpy().reshape(
# frame.shape[0], frame.shape[1])
# # Saving colormapped depth image
# vmax = np.percentile(disp_resized_np, 95)
# # normalizer = mpl.colors.Normalize(
# # vmin=disp_resized_np.min(), vmax=vmax)
# normalizer = mpl.colors.Normalize(vmin=0, vmax=20)
# # print(f"min: {disp_resized_np.min()}\tmax: {vmax}")
# mapper = cm.ScalarMappable(norm=normalizer, cmap='magma')
# colormapped_im = (mapper.to_rgba(disp_resized_np)[
# :, :, :3] * 255).astype(np.uint8)
# im = pil.fromarray(colormapped_im)
# # im.save(f'./temp/{temp_index}_disp.jpg')
# plot result
font = cv2.FONT_HERSHEY_DUPLEX
font_thickness = 1
for output in outputs:
x1, y1, x2, y2, id = output
random.seed(id)
color = (random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255))
cv2.rectangle(frame, (x1, y1), (x2, y2),
color, 2)
x_distance, y_distance, z_distance = filtered[id]
if id in last_y:
speed = (last_y[id]+y_distance)/0.1244
else:
speed = 0
last_y[id] = -y_distance
text_line1 = f"y:{-y_distance:0.2f}m"
text_line2 = f"speed:{speed:0.2f}m/s"
font_scale = 0.8
cv2.putText(frame, text_line1, (x2, y1), font,
font_scale, color, font_thickness, cv2.LINE_AA)
size_line1 = cv2.getTextSize(
text_line1, font, font_scale, font_thickness)[0]
cv2.putText(frame, text_line2, (x2, y1+size_line1[1]), font,
font_scale, color, font_thickness, cv2.LINE_AA)
font_scale = 2
fps_text = f"FPS:{1/(time.time()-last_run_time):0.1f}"
size_fps_text = cv2.getTextSize(
fps_text, font, font_scale, font_thickness)[0]
cv2.putText(frame, fps_text, (frame.shape[0]-size_fps_text[0], size_fps_text[1]), font,
font_scale, (0, 0, 255), font_thickness, cv2.LINE_AA)
# cv2.imshow("MultiTracker", frame)
# cv2.imwrite(f'./temp/{temp_index}.jpg', frame)
print(f"index: {time_serial_index}")
# if time_serial_index == 50:
# break
success, frame = camera.read()
time_serial_index += 1
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| [
"cv2.rectangle",
"torch.from_numpy",
"torch.min",
"numpy.array",
"torch.cuda.is_available",
"cv2.destroyAllWindows",
"torch.nn.functional.interpolate",
"torch.nn.functional.pad",
"darknet.darknet.detect_image",
"monodepth2.layers.disp_to_depth",
"torch.mean",
"darknet.darknet.free_image",
"d... | [((570, 595), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (593, 595), False, 'import torch\n'), ((610, 630), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (622, 630), False, 'import torch\n'), ((650, 669), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (662, 669), False, 'import torch\n'), ((6134, 6181), 'os.path.join', 'os.path.join', (['"""./monodepth2/models"""', 'model_name'], {}), "('./monodepth2/models', model_name)\n", (6146, 6181), False, 'import os\n'), ((6249, 6288), 'os.path.join', 'os.path.join', (['model_path', '"""encoder.pth"""'], {}), "(model_path, 'encoder.pth')\n", (6261, 6288), False, 'import os\n'), ((6314, 6351), 'os.path.join', 'os.path.join', (['model_path', '"""depth.pth"""'], {}), "(model_path, 'depth.pth')\n", (6326, 6351), False, 'import os\n'), ((6441, 6474), 'monodepth2.networks.ResnetEncoder', 'networks.ResnetEncoder', (['(18)', '(False)'], {}), '(18, False)\n', (6463, 6474), True, 'import monodepth2.networks as networks\n'), ((6497, 6542), 'torch.load', 'torch.load', (['encoder_path'], {'map_location': 'device'}), '(encoder_path, map_location=device)\n', (6507, 6542), False, 'import torch\n'), ((7063, 7114), 'torch.load', 'torch.load', (['depth_decoder_path'], {'map_location': 'device'}), '(depth_decoder_path, map_location=device)\n', (7073, 7114), False, 'import torch\n'), ((8637, 8683), 'torch.stack', 'torch.stack', (['(u, v, depth_vector, ones)'], {'dim': '(0)'}), '((u, v, depth_vector, ones), dim=0)\n', (8648, 8683), False, 'import torch\n'), ((8747, 8785), 'torch.mm', 'torch.mm', (['inv_extrinsics_matrix', 'P_cam'], {}), '(inv_extrinsics_matrix, P_cam)\n', (8755, 8785), False, 'import torch\n'), ((9615, 9653), 'cv2.absdiff', 'cv2.absdiff', (['last_frame', 'current_frame'], {}), '(last_frame, current_frame)\n', (9626, 9653), False, 'import cv2\n'), ((9665, 9703), 'cv2.cvtColor', 'cv2.cvtColor', (['diff', 'cv2.COLOR_BGR2GRAY'], {}), '(diff, cv2.COLOR_BGR2GRAY)\n', (9677, 9703), False, 'import cv2\n'), ((10540, 10562), 'torch.std', 'torch.std', (['rel_heights'], {}), '(rel_heights)\n', (10549, 10562), False, 'import torch\n'), ((10574, 10597), 'torch.mean', 'torch.mean', (['rel_heights'], {}), '(rel_heights)\n', (10584, 10597), False, 'import torch\n'), ((11479, 11551), 'darknet.darknet.load_network', 'darknet.load_network', (['config_file', 'data_file', 'weights_file'], {'batch_size': '(1)'}), '(config_file, data_file, weights_file, batch_size=1)\n', (11499, 11551), True, 'import darknet.darknet as darknet\n'), ((11792, 11830), 'darknet.darknet.network_width', 'darknet.network_width', (['darknet_network'], {}), '(darknet_network)\n', (11813, 11830), True, 'import darknet.darknet as darknet\n'), ((11852, 11891), 'darknet.darknet.network_height', 'darknet.network_height', (['darknet_network'], {}), '(darknet_network)\n', (11874, 11891), True, 'import darknet.darknet as darknet\n'), ((11908, 11946), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (11920, 11946), False, 'import cv2\n'), ((11967, 12058), 'cv2.resize', 'cv2.resize', (['frame_rgb', '(network_width, network_height)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(frame_rgb, (network_width, network_height), interpolation=cv2.\n INTER_LINEAR)\n', (11977, 12058), False, 'import cv2\n'), ((12107, 12159), 'darknet.darknet.make_image', 'darknet.make_image', (['network_width', 'network_height', '(3)'], {}), '(network_width, network_height, 3)\n', (12125, 12159), True, 'import darknet.darknet as darknet\n'), ((12253, 12350), 'darknet.darknet.detect_image', 'darknet.detect_image', (['darknet_network', 'class_names', 'img_for_detect'], {'thresh': 'confidence_thresh'}), '(darknet_network, class_names, img_for_detect, thresh=\n confidence_thresh)\n', (12273, 12350), True, 'import darknet.darknet as darknet\n'), ((12359, 12393), 'darknet.darknet.free_image', 'darknet.free_image', (['img_for_detect'], {}), '(img_for_detect)\n', (12377, 12393), True, 'import darknet.darknet as darknet\n'), ((15162, 15174), 'deep_sort.parser.get_config', 'get_config', ([], {}), '()\n', (15172, 15174), False, 'from deep_sort.parser import get_config\n'), ((15360, 15447), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]]', 'np.float32'], {}), '([[1, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]], np.\n float32)\n', (15368, 15447), True, 'import numpy as np\n'), ((15476, 15622), 'numpy.array', 'np.array', (['[[1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1], [0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]', 'np.float32'], {}), '([[1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1], [0, 0,\n 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]], np.float32)\n', (15484, 15622), True, 'import numpy as np\n'), ((15779, 15882), 'kalman_filter.KalmanFilter', 'KalmanFilter', (['(6)', '(3)', 'measurement_matrix', 'transition_matrix', 'process_noise_cov', 'measurement_noise_cov'], {}), '(6, 3, measurement_matrix, transition_matrix, process_noise_cov,\n measurement_noise_cov)\n', (15791, 15882), False, 'from kalman_filter import KalmanFilter\n'), ((20749, 20772), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (20770, 20772), False, 'import cv2\n'), ((1702, 1715), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1713, 1715), False, 'import queue\n'), ((1745, 1782), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._reader'}), '(target=self._reader)\n', (1761, 1782), False, 'import threading\n'), ((1952, 1973), 'numpy.array', 'np.array', (['dist_coeffs'], {}), '(dist_coeffs)\n', (1960, 1973), True, 'import numpy as np\n'), ((2061, 2157), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['self._intrinsic_matrix', 'self._dist_coeffs', 'frame.shape[:2]', '(0)'], {}), '(self._intrinsic_matrix, self._dist_coeffs,\n frame.shape[:2], 0)\n', (2090, 2157), False, 'import cv2\n'), ((2778, 2878), 'cv2.undistort', 'cv2.undistort', (['img', 'self._intrinsic_matrix', 'self._dist_coeffs', 'None', 'self._new_intrinsic_matrix'], {}), '(img, self._intrinsic_matrix, self._dist_coeffs, None, self.\n _new_intrinsic_matrix)\n', (2791, 2878), False, 'import cv2\n'), ((3221, 3254), 'glob.glob', 'glob.glob', (['f"""{file_path}/*.{ext}"""'], {}), "(f'{file_path}/*.{ext}')\n", (3230, 3254), False, 'import glob\n'), ((7391, 7429), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (7403, 7429), False, 'import cv2\n'), ((7479, 7494), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7492, 7494), False, 'import torch\n'), ((7964, 8078), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['disp', '(original_height, original_width)'], {'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(disp, (original_height, original_width),\n mode='bilinear', align_corners=False)\n", (7995, 8078), False, 'import torch\n'), ((8120, 8157), 'monodepth2.layers.disp_to_depth', 'disp_to_depth', (['disp_resized', '(0.1)', '(100)'], {}), '(disp_resized, 0.1, 100)\n', (8133, 8157), False, 'from monodepth2.layers import disp_to_depth\n'), ((9178, 9232), 'torch.ones', 'torch.ones', (['(y_bottom - y_top + 1)', '(x_right - x_left + 1)'], {}), '(y_bottom - y_top + 1, x_right - x_left + 1)\n', (9188, 9232), False, 'import torch\n'), ((10628, 10657), 'torch.abs', 'torch.abs', (['(rel_heights - mean)'], {}), '(rel_heights - mean)\n', (10637, 10657), False, 'import torch\n'), ((13328, 13378), 'torch.stack', 'torch.stack', (['(x_coords, y_coords, z_coords)'], {'dim': '(0)'}), '((x_coords, y_coords, z_coords), dim=0)\n', (13339, 13378), False, 'import torch\n'), ((13400, 13430), 'torch.norm', 'torch.norm', (['coords'], {'p': '(2)', 'dim': '(0)'}), '(coords, p=2, dim=0)\n', (13410, 13430), False, 'import torch\n'), ((13454, 13475), 'torch.min', 'torch.min', (['distance_w'], {}), '(distance_w)\n', (13463, 13475), False, 'import torch\n'), ((15661, 15688), 'numpy.eye', 'np.eye', (['(6)'], {'dtype': 'np.float32'}), '(6, dtype=np.float32)\n', (15667, 15688), True, 'import numpy as np\n'), ((15724, 15751), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (15730, 15751), True, 'import numpy as np\n'), ((16349, 16360), 'time.time', 'time.time', ([], {}), '()\n', (16358, 16360), False, 'import time\n'), ((17519, 17553), 'numpy.array', 'np.array', (['detections'], {'dtype': 'object'}), '(detections, dtype=object)\n', (17527, 17553), True, 'import numpy as np\n'), ((20282, 20434), 'cv2.putText', 'cv2.putText', (['frame', 'fps_text', '(frame.shape[0] - size_fps_text[0], size_fps_text[1])', 'font', 'font_scale', '(0, 0, 255)', 'font_thickness', 'cv2.LINE_AA'], {}), '(frame, fps_text, (frame.shape[0] - size_fps_text[0],\n size_fps_text[1]), font, font_scale, (0, 0, 255), font_thickness, cv2.\n LINE_AA)\n', (20293, 20434), False, 'import cv2\n'), ((3719, 3736), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (3729, 3736), False, 'import cv2\n'), ((9724, 9758), 'torch.from_numpy', 'torch.from_numpy', (['(diff < threshold)'], {}), '(diff < threshold)\n', (9740, 9758), False, 'import torch\n'), ((15278, 15303), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15301, 15303), False, 'import torch\n'), ((19106, 19121), 'random.seed', 'random.seed', (['id'], {}), '(id)\n', (19117, 19121), False, 'import random\n'), ((19269, 19319), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x1, y1)', '(x2, y2)', 'color', '(2)'], {}), '(frame, (x1, y1), (x2, y2), color, 2)\n', (19282, 19319), False, 'import cv2\n'), ((19716, 19814), 'cv2.putText', 'cv2.putText', (['frame', 'text_line1', '(x2, y1)', 'font', 'font_scale', 'color', 'font_thickness', 'cv2.LINE_AA'], {}), '(frame, text_line1, (x2, y1), font, font_scale, color,\n font_thickness, cv2.LINE_AA)\n', (19727, 19814), False, 'import cv2\n'), ((19954, 20068), 'cv2.putText', 'cv2.putText', (['frame', 'text_line2', '(x2, y1 + size_line1[1])', 'font', 'font_scale', 'color', 'font_thickness', 'cv2.LINE_AA'], {}), '(frame, text_line2, (x2, y1 + size_line1[1]), font, font_scale,\n color, font_thickness, cv2.LINE_AA)\n', (19965, 20068), False, 'import cv2\n'), ((20198, 20257), 'cv2.getTextSize', 'cv2.getTextSize', (['fps_text', 'font', 'font_scale', 'font_thickness'], {}), '(fps_text, font, font_scale, font_thickness)\n', (20213, 20257), False, 'import cv2\n'), ((2200, 2240), 'torch.tensor', 'torch.tensor', (['self._new_intrinsic_matrix'], {}), '(self._new_intrinsic_matrix)\n', (2212, 2240), False, 'import torch\n'), ((3405, 3516), 'torch.FloatTensor', 'torch.FloatTensor', (['[[785.26446533, 0.0, 627.50964355], [0.0, 785.27935791, 340.54248047], [0.0,\n 0.0, 1.0]]'], {}), '([[785.26446533, 0.0, 627.50964355], [0.0, 785.27935791, \n 340.54248047], [0.0, 0.0, 1.0]])\n', (3422, 3516), False, 'import torch\n'), ((9097, 9151), 'torch.ones', 'torch.ones', (['(y_bottom - y_top + 1)', '(x_right - x_left + 1)'], {}), '(y_bottom - y_top + 1, x_right - x_left + 1)\n', (9107, 9151), False, 'import torch\n'), ((17761, 17787), 'numpy.array', 'np.array', (['[[], [], [], []]'], {}), '([[], [], [], []])\n', (17769, 17787), True, 'import numpy as np\n'), ((17813, 17839), 'numpy.array', 'np.array', (['[[], [], [], []]'], {}), '([[], [], [], []])\n', (17821, 17839), True, 'import numpy as np\n'), ((19143, 19165), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (19157, 19165), False, 'import random\n'), ((19188, 19210), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (19202, 19210), False, 'import random\n'), ((19233, 19255), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (19247, 19255), False, 'import random\n'), ((19860, 19921), 'cv2.getTextSize', 'cv2.getTextSize', (['text_line1', 'font', 'font_scale', 'font_thickness'], {}), '(text_line1, font, font_scale, font_thickness)\n', (19875, 19921), False, 'import cv2\n'), ((7707, 7728), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7726, 7728), False, 'from torchvision import transforms\n'), ((9427, 9491), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['mask', 'padding'], {'mode': '"""constant"""', 'value': '(0)'}), "(mask, padding, mode='constant', value=0)\n", (9450, 9491), False, 'import torch\n'), ((17620, 17634), 'numpy.array', 'np.array', (['xywh'], {}), '(xywh)\n', (17628, 17634), True, 'import numpy as np\n'), ((20140, 20151), 'time.time', 'time.time', ([], {}), '()\n', (20149, 20151), False, 'import time\n')] |
import argparse
from collections import OrderedDict
import copy
import flair
import logging
import numpy as np
import os, sys, time
import pandas as pd
import tempfile
from tqdm import tqdm
import torch
import torch.nn as nn
import transformers
from dataloader import DataLoader
from constants import spacy_pos_dict
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
class MLP(nn.Module):
def __init__(self, args, dl):
super().__init__()
networks = OrderedDict()
network_dim = [dl.emb_dim]
for i in range(args.nlayers):
network_dim.append(args.dim)
network_dim.append(len(dl.pos_tags))
for i in range(len(network_dim)-1):
fc = nn.Linear(network_dim[i], network_dim[i+1])
networks["fc_{}".format(i+1)] = fc
networks["relu_{}".format(i+1)] = nn.ReLU()
self.network = nn.Sequential(networks)
self.to(device)
def forward(self, x_tensor):
return self.network(x_tensor)
def main(args):
# Logging
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler = logging.FileHandler(args.log)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.info(args)
logger.info("Seed: {}".format(args.seed))
torch.manual_seed(args.seed)
# Training
tr_dataloader = DataLoader("train", args)
dev_dataloader = DataLoader("dev", args)
probe = MLP(args, tr_dataloader)
optim = torch.optim.Adam(
probe.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
# Load checkpoint, or initialize training
if os.path.exists(args.program_checkpoint):
checkpoint = torch.load(args.program_checkpoint)
best_dev_loss = checkpoint["best_dev_loss"]
best_model_state_dict = checkpoint["best_model_state_dict"]
epoch = checkpoint["epoch"]
steps = checkpoint["steps"]
probe.load_state_dict(checkpoint["probe"])
optim.load_state_dict(checkpoint["optim"])
logger.info("Resume training from checkpoint at epoch {}".format(epoch))
else:
best_dev_loss = np.inf
best_model_state_dict = None
epoch = 0
steps = 0
# Start training
doc_iter = 0
epoch_loss_buffer = []
stopping_counter = 0
while args.max_grad_step < 0 or steps < args.max_grad_step:
# One document per step. Minibatch of size 1.
x_tensor, y_tensor = tr_dataloader.next()
if x_tensor is None: # This epoch finishes
epoch += 1
tr_dataloader.reset()
epoch_tr_loss = np.mean(epoch_loss_buffer)
epoch_loss_buffer = []
valid_loss, val_acc = run_eval(probe, dev_dataloader)
if epoch % 20 == 0: # Around 30s per epoch.
torch.save({
"probe": probe.state_dict(),
"probe_best": best_model_state_dict,
"best_dev_loss": best_dev_loss,
"optim": optim.state_dict(),
"epoch": epoch,
"steps": steps
}, args.program_checkpoint)
if valid_loss < best_dev_loss:
stopping_counter = 0
best_dev_loss = valid_loss
best_model_state_dict = copy.deepcopy(probe.state_dict())
else:
stopping_counter += 1
# Anneal learning rate
for param_group in optim.param_groups:
param_group["lr"] *= args.lr_anneal
logger.info("Epoch {}: tr|dev {:.4f} | {:.4f}, Val acc {:.2f}".format(epoch, epoch_tr_loss, valid_loss, val_acc))
if stopping_counter == 4:
logger.info("Loss not improving for 4 consecutive epochs. Training done.")
break
else: # This epoch is not done; keep training
y_pred = nn.LogSoftmax(dim=-1)(probe(x_tensor))
loss = nn.CrossEntropyLoss()(y_pred, y_tensor)
loss.backward()
optim.step()
epoch_loss_buffer.append(loss.item())
steps += 1
if steps == args.max_grad_step:
epoch_loss = np.mean(epoch_loss_buffer)
valid_loss, val_acc = run_eval(probe, dev_dataloader)
logger.info("Steps reached {}. Early stop. Valid loss {:.4f} Acc {:.2f}".format(steps, valid_loss, val_acc))
test_dataloader = DataLoader("test", args)
if best_model_state_dict is not None: # This is None before the first epoch is done (early stopping)
probe.load_state_dict(best_model_state_dict)
test_loss, test_acc = run_eval(probe, test_dataloader)
logger.info("Total steps: {}".format(steps))
logger.info("Test: {:.4f} Acc {:.2f}".format(test_loss, test_acc))
exp_df = pd.DataFrame({
"lm": [args.lm], "lang": [args.lang], "task": [args.task],
"layer": [args.nlayers], "dim": [args.dim],
"batch_size": [args.batch_size],
"init_lr*1e6": [args.lr*1e6],
"weight_decay": [args.weight_decay],
"lr_anneal": [args.lr_anneal],
"max_grad_step": args.max_grad_step,
"train_steps": steps,
"seed": [args.seed],
"devloss": [best_dev_loss],
"testloss": [test_loss],
"acc": [test_acc]
})
# Bookkeeping
if not os.path.exists(args.bookkeep_df):
bk_df = None
else:
bk_df = pd.read_csv(args.bookkeep_df)
bk_df = pd.concat([bk_df, exp_df], sort=False)
bk_df.to_csv(args.bookkeep_df, index=False)
def run_eval(probe, dataloader):
probe.eval()
eval_losses = []
n_total, n_correct = 0, 0
while dataloader.has_next():
x_tensor, y_tensor = dataloader.next()
if x_tensor is None:
break
y_pred = nn.LogSoftmax(dim=-1)(probe(x_tensor))
loss = nn.CrossEntropyLoss()(y_pred, y_tensor)
eval_losses.append(loss.item())
n_total += len(y_tensor)
predictions, predint = y_pred.max(dim=-1)
n_correct += (predint == y_tensor).sum().item()
dataloader.reset()
probe.train()
acc = n_correct / n_total * 100
return np.mean(eval_losses), acc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--lm", type=str, choices=["bertmulti", "fasttext", "glove"], default="bertmulti")
parser.add_argument("--lang", type=str, choices=["en", "es", "fr"], default="en")
parser.add_argument("--task", type=str, choices=["probe", "ctarget", "crep"], default="probe")
parser.add_argument("--seed", type=int, default=73)
parser.add_argument("--nlayers", type=int, default=1,
help="Num layers for probe")
parser.add_argument("--dim", type=int, default=100,
help="Dimension for probe")
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size for training. For valid / test the batch size is always 1")
parser.add_argument("--lr", type=float, default=3e-4,
help="Learning rate for Adam optimizer")
parser.add_argument("--lr_anneal", type=float, default=1.0,
help="Annealing for learning rate per epoch")
parser.add_argument("--weight_decay", type=float, default=0,
help="Weight decay for Adam optimizer")
parser.add_argument("--max_grad_step", type=int, default=-1)
parser.add_argument("--log", type=str, default="test_log.out")
parser.add_argument("--bookkeep_df", type=str, default="bookkeep_df.csv")
parser.add_argument("--program_checkpoint", type=str, default="",
help="Only needed for V cluster.")
args = parser.parse_args()
main(args)
| [
"logging.getLogger",
"torch.nn.ReLU",
"logging.StreamHandler",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.nn.Sequential",
"torch.cuda.is_available",
"dataloader.DataLoader",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"logging.FileHandler",
"pandas.DataFrame",
"c... | [((354, 379), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (377, 379), False, 'import torch\n'), ((330, 350), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (342, 350), False, 'import torch\n'), ((385, 404), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (397, 404), False, 'import torch\n'), ((1081, 1153), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""'], {}), "('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n", (1098, 1153), False, 'import logging\n'), ((1168, 1197), 'logging.FileHandler', 'logging.FileHandler', (['args.log'], {}), '(args.log)\n', (1187, 1197), False, 'import logging\n'), ((1283, 1310), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1300, 1310), False, 'import logging\n'), ((1386, 1409), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1407, 1409), False, 'import logging\n'), ((1572, 1600), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1589, 1600), False, 'import torch\n'), ((1647, 1672), 'dataloader.DataLoader', 'DataLoader', (['"""train"""', 'args'], {}), "('train', args)\n", (1657, 1672), False, 'from dataloader import DataLoader\n'), ((1694, 1717), 'dataloader.DataLoader', 'DataLoader', (['"""dev"""', 'args'], {}), "('dev', args)\n", (1704, 1717), False, 'from dataloader import DataLoader\n'), ((1924, 1963), 'os.path.exists', 'os.path.exists', (['args.program_checkpoint'], {}), '(args.program_checkpoint)\n', (1938, 1963), False, 'import os, sys, time\n'), ((4772, 4796), 'dataloader.DataLoader', 'DataLoader', (['"""test"""', 'args'], {}), "('test', args)\n", (4782, 4796), False, 'from dataloader import DataLoader\n'), ((5149, 5579), 'pandas.DataFrame', 'pd.DataFrame', (["{'lm': [args.lm], 'lang': [args.lang], 'task': [args.task], 'layer': [args.\n nlayers], 'dim': [args.dim], 'batch_size': [args.batch_size],\n 'init_lr*1e6': [args.lr * 1000000.0], 'weight_decay': [args.\n weight_decay], 'lr_anneal': [args.lr_anneal], 'max_grad_step': args.\n max_grad_step, 'train_steps': steps, 'seed': [args.seed], 'devloss': [\n best_dev_loss], 'testloss': [test_loss], 'acc': [test_acc]}"], {}), "({'lm': [args.lm], 'lang': [args.lang], 'task': [args.task],\n 'layer': [args.nlayers], 'dim': [args.dim], 'batch_size': [args.\n batch_size], 'init_lr*1e6': [args.lr * 1000000.0], 'weight_decay': [\n args.weight_decay], 'lr_anneal': [args.lr_anneal], 'max_grad_step':\n args.max_grad_step, 'train_steps': steps, 'seed': [args.seed],\n 'devloss': [best_dev_loss], 'testloss': [test_loss], 'acc': [test_acc]})\n", (5161, 5579), True, 'import pandas as pd\n'), ((5859, 5897), 'pandas.concat', 'pd.concat', (['[bk_df, exp_df]'], {'sort': '(False)'}), '([bk_df, exp_df], sort=False)\n', (5868, 5897), True, 'import pandas as pd\n'), ((6624, 6649), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6647, 6649), False, 'import argparse\n'), ((509, 522), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (520, 522), False, 'from collections import OrderedDict\n'), ((913, 936), 'torch.nn.Sequential', 'nn.Sequential', (['networks'], {}), '(networks)\n', (926, 936), True, 'import torch.nn as nn\n'), ((1986, 2021), 'torch.load', 'torch.load', (['args.program_checkpoint'], {}), '(args.program_checkpoint)\n', (1996, 2021), False, 'import torch\n'), ((5735, 5767), 'os.path.exists', 'os.path.exists', (['args.bookkeep_df'], {}), '(args.bookkeep_df)\n', (5749, 5767), False, 'import os, sys, time\n'), ((5817, 5846), 'pandas.read_csv', 'pd.read_csv', (['args.bookkeep_df'], {}), '(args.bookkeep_df)\n', (5828, 5846), True, 'import pandas as pd\n'), ((6556, 6576), 'numpy.mean', 'np.mean', (['eval_losses'], {}), '(eval_losses)\n', (6563, 6576), True, 'import numpy as np\n'), ((743, 788), 'torch.nn.Linear', 'nn.Linear', (['network_dim[i]', 'network_dim[i + 1]'], {}), '(network_dim[i], network_dim[i + 1])\n', (752, 788), True, 'import torch.nn as nn\n'), ((880, 889), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (887, 889), True, 'import torch.nn as nn\n'), ((2913, 2939), 'numpy.mean', 'np.mean', (['epoch_loss_buffer'], {}), '(epoch_loss_buffer)\n', (2920, 2939), True, 'import numpy as np\n'), ((6193, 6214), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (6206, 6214), True, 'import torch.nn as nn\n'), ((6247, 6268), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (6266, 6268), True, 'import torch.nn as nn\n'), ((4209, 4230), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (4222, 4230), True, 'import torch.nn as nn\n'), ((4267, 4288), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4286, 4288), True, 'import torch.nn as nn\n'), ((4523, 4549), 'numpy.mean', 'np.mean', (['epoch_loss_buffer'], {}), '(epoch_loss_buffer)\n', (4530, 4549), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.utils.rnn as rnn_utils
from torch.nn.functional import softmax
import logging
from crowd_nav.policy.cadrl import mlp
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class ValueNetwork(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims, attention_dims, with_global_state,
cell_size, cell_num):
super().__init__()
self.input_dim=input_dim
self.self_state_dim = self_state_dim
self.global_state_dim = mlp1_dims[-1]
self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
self.with_global_state = with_global_state
if with_global_state:
self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
else:
self.attention = mlp(mlp1_dims[-1], attention_dims)
self.cell_size = cell_size
self.cell_num = cell_num
mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim
self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
self.attention_weights = None
def set_device(self,device):
self.device=device
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a rotated state)
:return:
"""
state,mask=state[:,:,:self.input_dim],state[:,:,self.input_dim]
size = state.shape
#print(state)
#equal self_state = state[0, 0, :self.self_state_dim]
self_state = state[:, 0, :self.self_state_dim]
#transfer to hr_social ratio
hr_social_stress=state[:,:,-1].view(size[0], size[1], 1).squeeze(dim=2)
hr_social_exp=torch.exp(hr_social_stress).float()
hr_social_weight=(hr_social_exp/torch.sum(hr_social_exp, dim=1, keepdim=True)).unsqueeze(2)
mlp1_output = self.mlp1(state.view((-1, size[2])))
#print(mlp1_output.shape)
mlp2_output = self.mlp2(mlp1_output)
if self.with_global_state:
# compute attention scores
#original_state = mlp1_output.view(size[0], size[1], -1)
#original method#
mask_=mask.unsqueeze(2).to(self.device)
mask_ = mask_.expand((size[0], size[1], mlp1_output.shape[1])).contiguous()
mask_weight=mask_/torch.sum(mask_,dim=1,keepdim=True)
mask_weight[mask_weight != mask_weight] = 0
global_state=torch.mul(mask_weight,mlp1_output.view(size[0], size[1], -1))
global_state=torch.sum(global_state,dim=1,keepdim=True)
# global_state = torch.mean(mlp1_output.view(size[0], size[1], -1), 1, keepdim=True)
#social_stress method#
#global_state= torch.sum(torch.mul(hr_social_weight, original_state),dim=1,keepdim=True)
global_state = global_state.expand((size[0], size[1], self.global_state_dim)).\
contiguous().view(-1, self.global_state_dim)
attention_input = torch.cat([mlp1_output, global_state], dim=1)
else:
attention_input = mlp1_output
scores = self.attention(attention_input).view(size[0], size[1], 1).squeeze(dim=2)
# masked softmax
# weights = softmax(scores, dim=1).unsqueeze(2)
scores=scores*mask.float()
scores_exp = torch.exp(scores) * (scores != 0).float()
weights = (scores_exp / torch.sum(scores_exp, dim=1, keepdim=True)).unsqueeze(2)
weights[weights!=weights]=0
self.attention_weights = weights[0, :, 0].data.cpu().numpy()
# output feature is a linear combination of input features
features = mlp2_output.view(size[0], size[1], -1)
# for converting to onnx
# expanded_weights = torch.cat([torch.zeros(weights.size()).copy_(weights) for _ in range(50)], dim=2)
weighted_feature = torch.sum(torch.mul(weights, features), dim=1)
# concatenate agent's state with global weighted humans' state
joint_state = torch.cat([self_state, weighted_feature], dim=1)
value = self.mlp3(joint_state)
if torch.sum(value!=value,dim=0).squeeze()>0:
print(state)
print(mask)
return value
class ssDRL(MultiHumanRL):
def __init__(self):
super().__init__()
self.name = 'SARL'
def configure(self, config):
self.set_common_parameters(config)
mlp1_dims = [int(x) for x in config.get('sarl', 'mlp1_dims').split(', ')]
mlp2_dims = [int(x) for x in config.get('sarl', 'mlp2_dims').split(', ')]
mlp3_dims = [int(x) for x in config.get('sarl', 'mlp3_dims').split(', ')]
attention_dims = [int(x) for x in config.get('sarl', 'attention_dims').split(', ')]
self.with_om = config.getboolean('sarl', 'with_om')
with_global_state = config.getboolean('sarl', 'with_global_state')
self.model = ValueNetwork(self.input_dim(), self.self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims,
attention_dims, with_global_state, self.cell_size, self.cell_num)
self.multiagent_training = config.getboolean('sarl', 'multiagent_training')
if self.with_om:
self.name = 'OM-SARL'
logging.info('Policy: {} {} global state'.format(self.name, 'w/' if with_global_state else 'w/o'))
def get_attention_weights(self):
return self.model.attention_weights
def set_device(self, device):
self.device = device
self.model.to(device)
self.model.set_device(device)
def predict(self, state):
def dist(human):
# sort human order by decreasing distance to the robot
return (-human.hr_social_stress, np.linalg.norm(np.array(human.position) - np.array(state.self_state.position)))
#return (np.linalg.norm(np.array(human.position) - np.array(state.self_state.position)))
state.human_states = sorted(state.human_states, key=dist, reverse=True)
return super().predict(state)
| [
"torch.mul",
"crowd_nav.policy.cadrl.mlp",
"torch.exp",
"numpy.array",
"torch.sum",
"torch.cat"
] | [((606, 647), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['input_dim', 'mlp1_dims'], {'last_relu': '(True)'}), '(input_dim, mlp1_dims, last_relu=True)\n', (609, 647), False, 'from crowd_nav.policy.cadrl import mlp\n'), ((668, 697), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['mlp1_dims[-1]', 'mlp2_dims'], {}), '(mlp1_dims[-1], mlp2_dims)\n', (671, 697), False, 'from crowd_nav.policy.cadrl import mlp\n'), ((1074, 1104), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['mlp3_input_dim', 'mlp3_dims'], {}), '(mlp3_input_dim, mlp3_dims)\n', (1077, 1104), False, 'from crowd_nav.policy.cadrl import mlp\n'), ((4151, 4199), 'torch.cat', 'torch.cat', (['[self_state, weighted_feature]'], {'dim': '(1)'}), '([self_state, weighted_feature], dim=1)\n', (4160, 4199), False, 'import torch\n'), ((808, 846), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['(mlp1_dims[-1] * 2)', 'attention_dims'], {}), '(mlp1_dims[-1] * 2, attention_dims)\n', (811, 846), False, 'from crowd_nav.policy.cadrl import mlp\n'), ((890, 924), 'crowd_nav.policy.cadrl.mlp', 'mlp', (['mlp1_dims[-1]', 'attention_dims'], {}), '(mlp1_dims[-1], attention_dims)\n', (893, 924), False, 'from crowd_nav.policy.cadrl import mlp\n'), ((2680, 2724), 'torch.sum', 'torch.sum', (['global_state'], {'dim': '(1)', 'keepdim': '(True)'}), '(global_state, dim=1, keepdim=True)\n', (2689, 2724), False, 'import torch\n'), ((3143, 3188), 'torch.cat', 'torch.cat', (['[mlp1_output, global_state]'], {'dim': '(1)'}), '([mlp1_output, global_state], dim=1)\n', (3152, 3188), False, 'import torch\n'), ((3475, 3492), 'torch.exp', 'torch.exp', (['scores'], {}), '(scores)\n', (3484, 3492), False, 'import torch\n'), ((4020, 4048), 'torch.mul', 'torch.mul', (['weights', 'features'], {}), '(weights, features)\n', (4029, 4048), False, 'import torch\n'), ((1855, 1882), 'torch.exp', 'torch.exp', (['hr_social_stress'], {}), '(hr_social_stress)\n', (1864, 1882), False, 'import torch\n'), ((2476, 2513), 'torch.sum', 'torch.sum', (['mask_'], {'dim': '(1)', 'keepdim': '(True)'}), '(mask_, dim=1, keepdim=True)\n', (2485, 2513), False, 'import torch\n'), ((1931, 1976), 'torch.sum', 'torch.sum', (['hr_social_exp'], {'dim': '(1)', 'keepdim': '(True)'}), '(hr_social_exp, dim=1, keepdim=True)\n', (1940, 1976), False, 'import torch\n'), ((3550, 3592), 'torch.sum', 'torch.sum', (['scores_exp'], {'dim': '(1)', 'keepdim': '(True)'}), '(scores_exp, dim=1, keepdim=True)\n', (3559, 3592), False, 'import torch\n'), ((4251, 4283), 'torch.sum', 'torch.sum', (['(value != value)'], {'dim': '(0)'}), '(value != value, dim=0)\n', (4260, 4283), False, 'import torch\n'), ((5877, 5901), 'numpy.array', 'np.array', (['human.position'], {}), '(human.position)\n', (5885, 5901), True, 'import numpy as np\n'), ((5904, 5939), 'numpy.array', 'np.array', (['state.self_state.position'], {}), '(state.self_state.position)\n', (5912, 5939), True, 'import numpy as np\n')] |
import gym
import numpy as np
import cv2
from collections import deque
class Environment(object):
def __init__(self, env_name, resized_width, resized_height,
agent_history_length, replay_size, alpha, action_repeat=4):
self._env = gym.make(env_name)
self._width = resized_width
self._height = resized_height
self._history_length = agent_history_length
self._replay_size = replay_size
self._state_buffer = deque(maxlen=replay_size)
self._default_priority = 0
self._alpha = alpha
self._action_repeat = action_repeat
@property
def action_size(self):
return self._env.action_space.n
def new_game(self):
frame = self._process_frame(self._env.reset())
self._frames = [frame] * self._history_length
def step(self, action):
reward = 0
for _ in range(self._action_repeat):
frame, reward_action, terminal, info = self._env.step(action)
reward += np.clip(reward_action, -1, 1)
if terminal:
break
frame = self._process_frame(frame)
prev_frames = self._frames
frames = prev_frames[1:] + [frame]
self._frames = frames
if self._replay_size > 0:
self._state_buffer.append({
'frames': frames,
'prev_frames': prev_frames,
'action': action,
'reward': reward,
'terminal': terminal,
'priority': self._default_priority})
return list(frames), reward, terminal, info
def render(self):
self._env.render()
def _process_frame(self, frame):
return cv2.resize(cv2.cvtColor(
frame, cv2.COLOR_RGB2GRAY) / 255., (self._width, self._height))
def _get_sample_probability(self):
priority = np.zeros(len(self._state_buffer))
i = 0
for state in self._state_buffer:
priority[i] = state['priority']
if self._default_priority < priority[i]:
self._default_priority = priority[i]
i += 1
probability = np.power(priority + 1e-7, self._alpha)
return probability / np.sum(probability)
def sample(self, batch_size):
if self._replay_size < 0:
raise Exception('replay_size = 0!')
buffer_size = len(self._state_buffer)
if buffer_size < batch_size:
return [], [], [], [], [], []
else:
prev_frames_batch = []
current_frames_batch = []
action_batch = []
reward_batch = []
terminal_batch = []
if self._alpha == 0:
state_batch = np.random.choice(
self._state_buffer, batch_size)
else:
state_batch = np.random.choice(
self._state_buffer, batch_size,
p=self._get_sample_probability())
for state in state_batch:
prev_frames_batch.append(state['prev_frames'])
current_frames_batch.append(state['frames'])
action_batch.append(state['action'])
reward_batch.append(state['reward'])
terminal_batch.append(state['terminal'])
return prev_frames_batch, action_batch, reward_batch,\
current_frames_batch, terminal_batch, state_batch
def get_frames(self):
return list(self._frames)
| [
"numpy.clip",
"collections.deque",
"numpy.power",
"numpy.random.choice",
"numpy.sum",
"cv2.cvtColor",
"gym.make"
] | [((261, 279), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (269, 279), False, 'import gym\n'), ((475, 500), 'collections.deque', 'deque', ([], {'maxlen': 'replay_size'}), '(maxlen=replay_size)\n', (480, 500), False, 'from collections import deque\n'), ((2151, 2190), 'numpy.power', 'np.power', (['(priority + 1e-07)', 'self._alpha'], {}), '(priority + 1e-07, self._alpha)\n', (2159, 2190), True, 'import numpy as np\n'), ((1013, 1042), 'numpy.clip', 'np.clip', (['reward_action', '(-1)', '(1)'], {}), '(reward_action, -1, 1)\n', (1020, 1042), True, 'import numpy as np\n'), ((2219, 2238), 'numpy.sum', 'np.sum', (['probability'], {}), '(probability)\n', (2225, 2238), True, 'import numpy as np\n'), ((1721, 1760), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (1733, 1760), False, 'import cv2\n'), ((2725, 2773), 'numpy.random.choice', 'np.random.choice', (['self._state_buffer', 'batch_size'], {}), '(self._state_buffer, batch_size)\n', (2741, 2773), True, 'import numpy as np\n')] |
import torch, os
import numpy as np
from omniglotNShot import OmniglotNShot
import argparse
from meta import Meta
from meta_ADML import Meta_ADML
def main(args):
torch.manual_seed(222)
torch.cuda.manual_seed_all(222)
np.random.seed(222)
#print(args)
config = [
('conv2d', [64, 1, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 3, 3, 2, 0]),
('relu', [True]),
('bn', [64]),
('conv2d', [64, 64, 2, 2, 1, 0]),
('relu', [True]),
('bn', [64]),
('flatten', []),
('linear', [args.n_way, 64])
]
device = torch.device('cuda')
if args.ADML:
maml = Meta_ADML(args,config).to(device)
else:
maml = Meta(args, config).to(device)
tmp = filter(lambda x: x.requires_grad, maml.parameters())
num = sum(map(lambda x: np.prod(x.shape), tmp))
#print(maml)
#print('Total trainable tensors:', num)
db_train = OmniglotNShot('omniglot',
batchsz=args.task_num,
n_way=args.n_way,
k_shot=args.k_spt,
k_query=args.k_qry,
imgsz=args.imgsz)
if not os.path.isdir(args.save_path+'/omniglot/'):
os.makedirs(args.save_path+'/omniglot/', )
natural_validation_accuracy = 0.0
robust_validation_accuracy = 0.0
natural_validation_accuracy_advTrained = 0.0
robust_validation_accuracy_advTrained = 0.0
for step in range(args.epoch):
x_spt, y_spt, x_qry, y_qry = db_train.next()
x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), \
torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)
# set traning=True to update running_mean, running_variance, bn_weights, bn_bias
accs = maml(x_spt, y_spt, x_qry, y_qry)
if (step+1) % 50 == 0:
print('step:', step, '\ttraining acc:', accs)
if (step+1) % 1000 == 0:
accs = []
robust_accs = []
accs_advTrained = []
robust_accs_advTrained = []
for _ in range(1000//args.task_num):
# test
x_spt, y_spt, x_qry, y_qry = db_train.next('test')
x_spt, y_spt, x_qry, y_qry = torch.from_numpy(x_spt).to(device), torch.from_numpy(y_spt).to(device), \
torch.from_numpy(x_qry).to(device), torch.from_numpy(y_qry).to(device)
# split to single task each time
for x_spt_one, y_spt_one, x_qry_one, y_qry_one in zip(x_spt, y_spt, x_qry, y_qry):
test_acc, robust_test_acc, test_acc_advTrained, robust_test_acc_advTrained = maml.finetunning(x_spt_one, y_spt_one, x_qry_one, y_qry_one)
accs.append( test_acc )
robust_accs.append( robust_test_acc )
accs_advTrained.append( test_acc_advTrained )
robust_accs_advTrained.append( robust_test_acc_advTrained )
# [b, update_step+1]
accs = np.array(accs).mean(axis=0).astype(np.float16)
robust_accs = np.array(robust_accs).mean(axis=0).astype(np.float16)
accs_advTrained = np.array(accs_advTrained).mean(axis=0).astype(np.float16)
robust_accs_advTrained = np.array(robust_accs_advTrained).mean(axis=0).astype(np.float16)
natural_validation_accuracy = 100.0*accs[-1]
robust_validation_accuracy = 100.0*robust_accs[-1]
natural_validation_accuracy_advTrained = 100.0*accs_advTrained[-1]
robust_validation_accuracy_advTrained = 100.0*robust_accs_advTrained[-1]
print('Test Natural acc:', accs)
print('Test Robust acc:', robust_accs)
print('Test Natural acc adversarially fine tuned:', accs_advTrained)
print('Test Robust acc adversarially fine tuned:', robust_accs_advTrained)
print('\nSaving..')
state = {
'net_params': maml.net.parameters()
}
torch.save(state, args.save_path+'/omniglot/'+str(args.k_spt)+'_shot_epoch='+str(step)+'.t7')
f = open(args.save_path +'/omniglot/'+str(args.k_spt)+'_shot_test_acc.txt', 'w+')
f.write('natural acc: '+str(natural_validation_accuracy)+', robust accuracy: '+str(robust_validation_accuracy)+', natural acc when adversarially fine tuned: '+str(natural_validation_accuracy_advTrained)+', robust accuracy when adversarially fine tuned: '+str(robust_validation_accuracy_advTrained))
f.close()
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--epoch', type=int, help='epoch number', default=4000)
argparser.add_argument('--n_way', type=int, help='n way', default=5)
argparser.add_argument('--k_spt', type=int, help='k shot for support set', default=1)
argparser.add_argument('--k_qry', type=int, help='k shot for query set', default=15)
argparser.add_argument('--imgsz', type=int, help='imgsz', default=28)
argparser.add_argument('--imgc', type=int, help='imgc', default=1)
argparser.add_argument('--task_num', type=int, help='meta batch size, namely task num', default=32)
argparser.add_argument('--meta_lr', type=float, help='meta-level outer learning rate', default=1e-3)
argparser.add_argument('--update_lr', type=float, help='task-level inner update learning rate', default=0.4)
argparser.add_argument('--update_step', type=int, help='task-level inner update steps', default=5)
argparser.add_argument('--update_step_test', type=int, help='update steps for finetunning', default=10)
argparser.add_argument('--attack_query', action='store_true', help='attack query')
argparser.add_argument('--attack_support', action='store_true', help='attack support')
argparser.add_argument('--attack_epsilon', type=float, help='maximum attack norm', default=16.0/255.0)
argparser.add_argument('--no_attack_validation', action='store_true', help='no attack during validation')
argparser.add_argument('--attack_step_size', type=float, help='step size for attacker', default=16.0/255.0)
argparser.add_argument('--attack_steps', type=int, help='number of attack steps', default=1)
argparser.add_argument('--eval_attack_steps', type=int, help='number of validation attack steps', default=20)
argparser.add_argument('--eval_attack_step_size', type=float, help='number of validation attack steps', default=4.0/255)
argparser.add_argument('--no_random_start', action='store_true', help='number of attack steps')
argparser.add_argument('--targeted', action='store_true', help='targeted attacks')
argparser.add_argument('--save_path', default='checkpoint/', help='path to save models and stats')
argparser.add_argument('--ADML', action='store_true', help='use adversarial meta-learning')
args = argparser.parse_args()
main(args)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"numpy.prod",
"argparse.ArgumentParser",
"os.makedirs",
"torch.from_numpy",
"omniglotNShot.OmniglotNShot",
"numpy.array",
"meta_ADML.Meta_ADML",
"os.path.isdir",
"numpy.random.seed",
"meta.Meta",
"torch.device"
] | [((178, 200), 'torch.manual_seed', 'torch.manual_seed', (['(222)'], {}), '(222)\n', (195, 200), False, 'import torch, os\n'), ((205, 236), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(222)'], {}), '(222)\n', (231, 236), False, 'import torch, os\n'), ((241, 260), 'numpy.random.seed', 'np.random.seed', (['(222)'], {}), '(222)\n', (255, 260), True, 'import numpy as np\n'), ((736, 756), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (748, 756), False, 'import torch, os\n'), ((1072, 1200), 'omniglotNShot.OmniglotNShot', 'OmniglotNShot', (['"""omniglot"""'], {'batchsz': 'args.task_num', 'n_way': 'args.n_way', 'k_shot': 'args.k_spt', 'k_query': 'args.k_qry', 'imgsz': 'args.imgsz'}), "('omniglot', batchsz=args.task_num, n_way=args.n_way, k_shot=\n args.k_spt, k_query=args.k_qry, imgsz=args.imgsz)\n", (1085, 1200), False, 'from omniglotNShot import OmniglotNShot\n'), ((4810, 4835), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4833, 4835), False, 'import argparse\n'), ((1323, 1367), 'os.path.isdir', 'os.path.isdir', (["(args.save_path + '/omniglot/')"], {}), "(args.save_path + '/omniglot/')\n", (1336, 1367), False, 'import torch, os\n'), ((1375, 1417), 'os.makedirs', 'os.makedirs', (["(args.save_path + '/omniglot/')"], {}), "(args.save_path + '/omniglot/')\n", (1386, 1417), False, 'import torch, os\n'), ((790, 813), 'meta_ADML.Meta_ADML', 'Meta_ADML', (['args', 'config'], {}), '(args, config)\n', (799, 813), False, 'from meta_ADML import Meta_ADML\n'), ((849, 867), 'meta.Meta', 'Meta', (['args', 'config'], {}), '(args, config)\n', (853, 867), False, 'from meta import Meta\n'), ((971, 987), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (978, 987), True, 'import numpy as np\n'), ((1718, 1741), 'torch.from_numpy', 'torch.from_numpy', (['x_spt'], {}), '(x_spt)\n', (1734, 1741), False, 'import torch, os\n'), ((1754, 1777), 'torch.from_numpy', 'torch.from_numpy', (['y_spt'], {}), '(y_spt)\n', (1770, 1777), False, 'import torch, os\n'), ((1829, 1852), 'torch.from_numpy', 'torch.from_numpy', (['x_qry'], {}), '(x_qry)\n', (1845, 1852), False, 'import torch, os\n'), ((1865, 1888), 'torch.from_numpy', 'torch.from_numpy', (['y_qry'], {}), '(y_qry)\n', (1881, 1888), False, 'import torch, os\n'), ((2470, 2493), 'torch.from_numpy', 'torch.from_numpy', (['x_spt'], {}), '(x_spt)\n', (2486, 2493), False, 'import torch, os\n'), ((2506, 2529), 'torch.from_numpy', 'torch.from_numpy', (['y_spt'], {}), '(y_spt)\n', (2522, 2529), False, 'import torch, os\n'), ((2589, 2612), 'torch.from_numpy', 'torch.from_numpy', (['x_qry'], {}), '(x_qry)\n', (2605, 2612), False, 'import torch, os\n'), ((2625, 2648), 'torch.from_numpy', 'torch.from_numpy', (['y_qry'], {}), '(y_qry)\n', (2641, 2648), False, 'import torch, os\n'), ((3268, 3282), 'numpy.array', 'np.array', (['accs'], {}), '(accs)\n', (3276, 3282), True, 'import numpy as np\n'), ((3341, 3362), 'numpy.array', 'np.array', (['robust_accs'], {}), '(robust_accs)\n', (3349, 3362), True, 'import numpy as np\n'), ((3425, 3450), 'numpy.array', 'np.array', (['accs_advTrained'], {}), '(accs_advTrained)\n', (3433, 3450), True, 'import numpy as np\n'), ((3520, 3552), 'numpy.array', 'np.array', (['robust_accs_advTrained'], {}), '(robust_accs_advTrained)\n', (3528, 3552), True, 'import numpy as np\n')] |
import netCDF4
import numpy as np
from delft3dfmpy.datamodels.cstructures import meshgeom, meshgeomdim
import logging
logger = logging.getLogger(__name__)
ugrid_dim_dict = {
"nnetwork_branches": ('1d', 'nbranches'),
"nnetwork_nodes": ('1d', 'nnodes'),
"nnetwork_geometry": ('1d', 'ngeometry'),
"nnetwork_edges": ('1d', 'nbranches'),
"nmesh1d_edges": ('1d', 'numedge'),
"nmesh1d_nodes": ('1d', 'numnode'),
"max_nmesh2d_face_nodes": ('2d', 'maxnumfacenodes'),
"nmesh2d_edges": ('2d', 'numedge'),
"nmesh2d_faces": ('2d', 'numface'),
"nmesh2d_nodes": ('2d', 'numnode')
}
ugrid_var_dict = {
'network_node_ids': ('1d', 'network_node_ids'),
'network_node_long_names': ('1d', 'network_node_long_names'),
'network_node_x': ('1d', 'nnodex'),
'network_node_y': ('1d', 'nnodey'),
'network_branch_ids': ('1d', 'network_branch_ids'),
'network_branch_long_names': ('1d', 'network_branch_long_names'),
'network_branch_lengths': ('1d', 'nbranchlengths'),
'network_branch_order': ('1d', 'nbranchorder'),
'network_edge_nodes': ('1d', 'nedge_nodes'),
'network_geom_x': ('1d', 'ngeopointx'),
'network_geom_y': ('1d', 'ngeopointy'),
'network_part_node_count': ('1d', 'nbranchgeometrynodes'),
'mesh1d_node_ids': ('1d', 'mesh1d_node_ids'),
'mesh1d_node_long_names': ('1d', 'mesh1d_node_long_names'),
'mesh1d_edge_nodes': ('1d', 'edge_nodes'),
'mesh1d_nodes_branch_id': ('1d', 'branchidx'),
'mesh1d_nodes_branch_offset': ('1d', 'branchoffsets'),
'mesh2d_node_x': ('2d', 'nodex'),
'mesh2d_node_y': ('2d', 'nodey'),
'mesh2d_node_z': ('2d', 'nodez'),
'mesh2d_edge_nodes': ('2d', 'edge_nodes'),
'mesh2d_face_x': ('2d', 'facex'),
'mesh2d_face_y': ('2d', 'facey'),
'mesh2d_face_z': ('2d', 'facez'),
'mesh2d_face_nodes': ('2d', 'face_nodes')
}
class UgridReader:
def __init__(self, network):
self.network = network
def read_ugrid(self, path):
"""
Read Ugrid from netcdf and return dflowfm cstructure with grid
"""
ncfile = netCDF4.Dataset(path)
# Read mesh1d
mesh1d = meshgeom(meshgeomdim())
read_dimensions(mesh1d.meshgeomdim, '1d', ncfile)
read_values(mesh1d, '1d', ncfile)
schematised, branches = mesh1d.process_1d_network()
self.network.mesh1d.add_from_other(mesh1d)
# Add branches
for idx, geometry in branches.items():
self.network.branches.at[idx, 'geometry'] = geometry
for idx, geometry in schematised.items():
self.network.schematised.at[idx, 'geometry'] = geometry
# Read mesh2d
mesh2d = meshgeom(meshgeomdim())
read_dimensions(mesh2d.meshgeomdim, '2d', ncfile)
read_values(mesh2d, '2d', ncfile)
self.network.mesh2d.add_from_other(mesh2d)
# Read links1d2d
links_1dnode, links_2dface = read_links(ncfile)
self.network.links1d2d.nodes1d.extend(links_1dnode)
self.network.links1d2d.faces2d.extend(links_2dface)
ncfile.close()
def read_dimensions(meshgeomdim, readdim, ncfile):
"""
Function to read dimensions from netcdf file
"""
assert readdim in ['1d', '2d']
meshgeomdim.dim = int(readdim[0])
# Read dimensions
for ncname, (dim, cname) in ugrid_dim_dict.items():
if readdim != dim:
continue
# Check if variable is in nc file
if ncname not in ncfile.dimensions.keys():
logger.error(f'Failed to read dimension "{ncname}" from ncfile for {readdim} mesh.')
value = ncfile.dimensions[ncname].size
logger.info(f'Read dimension "{ncname}" from ncfile for {readdim} mesh.')
setattr(meshgeomdim, cname, value)
def read_values(meshgeom, readdim, ncfile):
"""
Function to read values from netcdf file
"""
assert readdim in ['1d', '2d']
# Read variables
for ncname, (dim, cname) in ugrid_var_dict.items():
if readdim != dim:
continue
# Check if variable is in nc file
if ncname not in ncfile.variables.keys():
logger.error(f'Failed to read variable "{ncname}" from ncfile for {readdim} mesh.')
# Read values
values = ncfile.variables[ncname][:]
logger.info(f'Read variable "{ncname}" with shape {values.shape} from ncfile for {readdim} mesh.')
# Read description variables (strings)
if cname in meshgeom.description1d.keys():
meshgeom.description1d[cname] = list(map(str.strip, netCDF4.chartostring(values)))
# Read numerical values
else:
if values.mask.any():
values[values.mask] = ncfile.variables[ncname]._FillValue
meshgeom.set_values(cname, np.ravel(values))
def read_links(ncfile):
"""
Function to read 1d 2d links
"""
var = 'link1d2d'
if var not in ncfile.variables.keys():
return [[], []]
else:
return ncfile.variables[var][:, :].T.tolist() | [
"logging.getLogger",
"delft3dfmpy.datamodels.cstructures.meshgeom.description1d.keys",
"netCDF4.Dataset",
"delft3dfmpy.datamodels.cstructures.meshgeomdim",
"numpy.ravel",
"netCDF4.chartostring"
] | [((129, 156), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'import logging\n'), ((2088, 2109), 'netCDF4.Dataset', 'netCDF4.Dataset', (['path'], {}), '(path)\n', (2103, 2109), False, 'import netCDF4\n'), ((2163, 2176), 'delft3dfmpy.datamodels.cstructures.meshgeomdim', 'meshgeomdim', ([], {}), '()\n', (2174, 2176), False, 'from delft3dfmpy.datamodels.cstructures import meshgeom, meshgeomdim\n'), ((2692, 2705), 'delft3dfmpy.datamodels.cstructures.meshgeomdim', 'meshgeomdim', ([], {}), '()\n', (2703, 2705), False, 'from delft3dfmpy.datamodels.cstructures import meshgeom, meshgeomdim\n'), ((4499, 4528), 'delft3dfmpy.datamodels.cstructures.meshgeom.description1d.keys', 'meshgeom.description1d.keys', ([], {}), '()\n', (4526, 4528), False, 'from delft3dfmpy.datamodels.cstructures import meshgeom, meshgeomdim\n'), ((4818, 4834), 'numpy.ravel', 'np.ravel', (['values'], {}), '(values)\n', (4826, 4834), True, 'import numpy as np\n'), ((4594, 4622), 'netCDF4.chartostring', 'netCDF4.chartostring', (['values'], {}), '(values)\n', (4614, 4622), False, 'import netCDF4\n')] |
import numpy as np
import pandas as pd
def get_key_int_maps(keys):
key_to_int = {name: i for i, name in enumerate(keys)}
int_to_key = {i: name for i, name in enumerate(keys)}
return key_to_int, int_to_key
def onehot_encode_class(class_to_idx, classname):
n_classes = len(class_to_idx.keys())
onehot = np.zeros(n_classes)
idx = class_to_idx[classname]
onehot[idx] = 1
return onehot
def encode_column(df, column):
df = df.copy()
unique_names = df[column].unique()
key_to_int, int_to_key = get_key_int_maps(unique_names)
encoded = [key_to_int[c] for c in df[column].values]
df[column+'_code'] = encoded
return df | [
"numpy.zeros"
] | [((324, 343), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (332, 343), True, 'import numpy as np\n')] |
"""Unittest for Intervention class."""
import unittest
import numpy as np
from pybats.dglm import dlm
from pybats_detection.random_dlm import RandomDLM
from pybats_detection.intervention import Intervention
from pybats_detection.loader import load_market_share
class TestIntervention(unittest.TestCase):
"""Tests Intervention."""
def test__variance_intervention(self):
"""Test variance intervention, shifting in observation variance."""
# Generating level data model
np.random.seed(66)
rdlm = RandomDLM(n=50, V=1, W=0.1)
df_simulated = rdlm.level(
start_level=100,
dict_shift={"t": [30, 31, 40, 41],
"level_mean_shift": [10, -10, 6, -6],
"level_var_shift": [1, 1, 1, 1]})
# Define model (prior mean and variance matrix)
a = np.array(100)
R = np.eye(1)
np.fill_diagonal(R, val=1000)
mod = dlm(a0=a, R0=R, ntrend=1, deltrend=0.90)
# List with the interventions
list_interventions = [
{"time_index": 31,
"which": ["variance"],
"parameters": [{"v_shift": "ignore"}]},
{"time_index": 41,
"which": ["variance"],
"parameters": [{"v_shift": 10}]}
]
# Perform the filter and smooth with manual interventions
dlm_intervention = Intervention(mod=mod)
out = dlm_intervention.fit(
y=df_simulated["y"], interventions=list_interventions)
self.assertEqual(list(out.keys()), ["filter", "smooth", "model"])
self.assertTrue(np.all(out["smooth"]["posterior"]["variance"] > 0))
dlm_intervention = Intervention(mod=mod, smooth=False)
out = dlm_intervention.fit(
y=df_simulated["y"], interventions=list_interventions)
self.assertEqual(list(out.keys()),
["predictive", "posterior", "model"])
def test__noise_intervention(self):
"""Test noise intervention, shifting the prior moments."""
# Generating level data model
np.random.seed(66)
rdlm = RandomDLM(n=50, V=1, W=0.1)
df_simulated = rdlm.level(
start_level=100,
dict_shift={"t": [30],
"level_mean_shift": [10],
"level_var_shift": [1]})
# Define model (prior mean and variance matrix)
a = np.array([100, 0])
R = np.eye(2)
np.fill_diagonal(R, val=1000)
mod = dlm(a0=a, R0=R, ntrend=2, deltrend=0.90)
# List with the interventions
list_interventions = [
{"time_index": 31, "which": ["noise"],
"parameters": [
{"h_shift": np.array([10, 0]),
"H_shift": np.array([[100, 0], [0, 300]])}]
}]
# Perform the filter and smooth with manual interventions
dlm_intervention = Intervention(mod=mod)
out = dlm_intervention.fit(
y=df_simulated["y"], interventions=list_interventions)
self.assertEqual(list(out.keys()), ["filter", "smooth", "model"])
self.assertTrue(np.all(out["smooth"]["posterior"]["variance"] > 0))
def test__subjective_intervention(self):
"""Test subjective intervention, replacing the prior moments."""
# Generating level data model
np.random.seed(66)
rdlm = RandomDLM(n=50, V=1, W=0.1)
df_simulated = rdlm.level(
start_level=100,
dict_shift={"t": [30],
"level_mean_shift": [10],
"level_var_shift": [1]})
# Define model (prior mean and variance matrix)
a = np.array([100, 0])
R = np.eye(2)
np.fill_diagonal(R, val=1000)
mod = dlm(a0=a, R0=R, ntrend=2, deltrend=0.90)
# List with the interventions
list_interventions = [{
"time_index": 31, "which": ["subjective"],
"parameters": [
{"a_star": np.array([110, 0]),
"R_star": np.array([[100, 0], [0, 300]])}]
}
]
dlm_intervention = Intervention(mod=mod)
out = dlm_intervention.fit(
y=df_simulated["y"], interventions=list_interventions)
self.assertEqual(list(out.keys()), ["filter", "smooth", "model"])
self.assertTrue(np.all(out["smooth"]["posterior"]["variance"] > 0))
def test__subjective_intervention_with_regression(self):
"""Test subjective intervention with regressor."""
# Generating level data model
np.random.seed(66)
X = np.random.normal(0, .1, 100).reshape(-1, 1)
rdlm = RandomDLM(n=100, V=.1, W=[0.006, .001])
df_simulated = rdlm.level_with_covariates(
start_level=100, start_covariates=[-2], X=X,
dict_shift={"t": [30], "mean_shift": [10], "var_shift": [1]})
# Define model
a0 = np.array([100, 0, -1])
R0 = np.eye(3)
R0[0, 0] = 100
R0[2, 2] = 10
mod = dlm(a0=a0, R0=R0, n0=1, s0=.1, ntrend=2, nregn=1,
delregn=.98, deltrend=0.95)
# List with the interventions
list_interventions = [{
"time_index": 31, "which": ["noise"],
"parameters": [
{"h_shift": np.array([110, 0, 0]),
"H_shift": np.eye(3)*0.0}]}]
dlm_intervention = Intervention(mod=mod)
out = dlm_intervention.fit(
y=df_simulated["y"], X=df_simulated[["x1"]],
interventions=list_interventions)
self.assertEqual(list(out.keys()), ["filter", "smooth", "model"])
self.assertTrue(np.all(out["smooth"]["posterior"]["variance"] > 0))
def test__subjective_intervention_with_regression_market_share(self):
"""
Test subjective intervention with regressor in market share example.
"""
market_share = load_market_share()
y = market_share['share']
X = market_share[['price', 'prom', 'cprom']]
X = X - X.mean()
# Define model
a0 = np.array([42, 0, 0, 0])
R0 = np.eye(4) * 4.0
R0[0, 0] = 25
mod = dlm(a0=a0, R0=R0, ntrend=1, nregn=3, delregn=.90,
deltrend=1, delVar=.99)
# List with the interventions
list_interventions = [{
"time_index": 34, "which": ["variance"],
"parameters": [
{"h_shift": np.array([0, 0, 0, 0]),
"H_shift": np.eye(4)*0.0}]}]
dlm_intervention = Intervention(mod=mod)
out = dlm_intervention.fit(y=y, X=X,
interventions=list_interventions)
# Measures
predictive_df = out.get('filter').get('predictive')
mse = ((predictive_df.y - predictive_df.f)**2).mean()
mad = np.abs(predictive_df.y - predictive_df.f).mean()
mse_comparative = np.abs(mse / .056 - 1)
mad_comparative = np.abs(mad / .185 - 1)
# Coefs
mod_ = out.get('model')
coefs_df = mod_.get_coef()
signal_lst = list((coefs_df.Mean < 0).values)
self.assertEqual(list(out.keys()), ["filter", "smooth", "model"])
self.assertEqual(signal_lst, [False, True, False, True])
self.assertTrue(mse_comparative < .10)
self.assertTrue(mad_comparative < .10)
| [
"numpy.random.normal",
"numpy.abs",
"numpy.eye",
"pybats_detection.loader.load_market_share",
"pybats_detection.intervention.Intervention",
"numpy.fill_diagonal",
"pybats.dglm.dlm",
"numpy.array",
"numpy.random.seed",
"numpy.all",
"pybats_detection.random_dlm.RandomDLM"
] | [((502, 520), 'numpy.random.seed', 'np.random.seed', (['(66)'], {}), '(66)\n', (516, 520), True, 'import numpy as np\n'), ((536, 563), 'pybats_detection.random_dlm.RandomDLM', 'RandomDLM', ([], {'n': '(50)', 'V': '(1)', 'W': '(0.1)'}), '(n=50, V=1, W=0.1)\n', (545, 563), False, 'from pybats_detection.random_dlm import RandomDLM\n'), ((864, 877), 'numpy.array', 'np.array', (['(100)'], {}), '(100)\n', (872, 877), True, 'import numpy as np\n'), ((890, 899), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (896, 899), True, 'import numpy as np\n'), ((908, 937), 'numpy.fill_diagonal', 'np.fill_diagonal', (['R'], {'val': '(1000)'}), '(R, val=1000)\n', (924, 937), True, 'import numpy as np\n'), ((952, 991), 'pybats.dglm.dlm', 'dlm', ([], {'a0': 'a', 'R0': 'R', 'ntrend': '(1)', 'deltrend': '(0.9)'}), '(a0=a, R0=R, ntrend=1, deltrend=0.9)\n', (955, 991), False, 'from pybats.dglm import dlm\n'), ((1400, 1421), 'pybats_detection.intervention.Intervention', 'Intervention', ([], {'mod': 'mod'}), '(mod=mod)\n', (1412, 1421), False, 'from pybats_detection.intervention import Intervention\n'), ((1703, 1738), 'pybats_detection.intervention.Intervention', 'Intervention', ([], {'mod': 'mod', 'smooth': '(False)'}), '(mod=mod, smooth=False)\n', (1715, 1738), False, 'from pybats_detection.intervention import Intervention\n'), ((2102, 2120), 'numpy.random.seed', 'np.random.seed', (['(66)'], {}), '(66)\n', (2116, 2120), True, 'import numpy as np\n'), ((2136, 2163), 'pybats_detection.random_dlm.RandomDLM', 'RandomDLM', ([], {'n': '(50)', 'V': '(1)', 'W': '(0.1)'}), '(n=50, V=1, W=0.1)\n', (2145, 2163), False, 'from pybats_detection.random_dlm import RandomDLM\n'), ((2431, 2449), 'numpy.array', 'np.array', (['[100, 0]'], {}), '([100, 0])\n', (2439, 2449), True, 'import numpy as np\n'), ((2462, 2471), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (2468, 2471), True, 'import numpy as np\n'), ((2480, 2509), 'numpy.fill_diagonal', 'np.fill_diagonal', (['R'], {'val': '(1000)'}), '(R, val=1000)\n', (2496, 2509), True, 'import numpy as np\n'), ((2524, 2563), 'pybats.dglm.dlm', 'dlm', ([], {'a0': 'a', 'R0': 'R', 'ntrend': '(2)', 'deltrend': '(0.9)'}), '(a0=a, R0=R, ntrend=2, deltrend=0.9)\n', (2527, 2563), False, 'from pybats.dglm import dlm\n'), ((2933, 2954), 'pybats_detection.intervention.Intervention', 'Intervention', ([], {'mod': 'mod'}), '(mod=mod)\n', (2945, 2954), False, 'from pybats_detection.intervention import Intervention\n'), ((3373, 3391), 'numpy.random.seed', 'np.random.seed', (['(66)'], {}), '(66)\n', (3387, 3391), True, 'import numpy as np\n'), ((3407, 3434), 'pybats_detection.random_dlm.RandomDLM', 'RandomDLM', ([], {'n': '(50)', 'V': '(1)', 'W': '(0.1)'}), '(n=50, V=1, W=0.1)\n', (3416, 3434), False, 'from pybats_detection.random_dlm import RandomDLM\n'), ((3702, 3720), 'numpy.array', 'np.array', (['[100, 0]'], {}), '([100, 0])\n', (3710, 3720), True, 'import numpy as np\n'), ((3733, 3742), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3739, 3742), True, 'import numpy as np\n'), ((3751, 3780), 'numpy.fill_diagonal', 'np.fill_diagonal', (['R'], {'val': '(1000)'}), '(R, val=1000)\n', (3767, 3780), True, 'import numpy as np\n'), ((3795, 3834), 'pybats.dglm.dlm', 'dlm', ([], {'a0': 'a', 'R0': 'R', 'ntrend': '(2)', 'deltrend': '(0.9)'}), '(a0=a, R0=R, ntrend=2, deltrend=0.9)\n', (3798, 3834), False, 'from pybats.dglm import dlm\n'), ((4145, 4166), 'pybats_detection.intervention.Intervention', 'Intervention', ([], {'mod': 'mod'}), '(mod=mod)\n', (4157, 4166), False, 'from pybats_detection.intervention import Intervention\n'), ((4587, 4605), 'numpy.random.seed', 'np.random.seed', (['(66)'], {}), '(66)\n', (4601, 4605), True, 'import numpy as np\n'), ((4677, 4718), 'pybats_detection.random_dlm.RandomDLM', 'RandomDLM', ([], {'n': '(100)', 'V': '(0.1)', 'W': '[0.006, 0.001]'}), '(n=100, V=0.1, W=[0.006, 0.001])\n', (4686, 4718), False, 'from pybats_detection.random_dlm import RandomDLM\n'), ((4936, 4958), 'numpy.array', 'np.array', (['[100, 0, -1]'], {}), '([100, 0, -1])\n', (4944, 4958), True, 'import numpy as np\n'), ((4972, 4981), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4978, 4981), True, 'import numpy as np\n'), ((5042, 5121), 'pybats.dglm.dlm', 'dlm', ([], {'a0': 'a0', 'R0': 'R0', 'n0': '(1)', 's0': '(0.1)', 'ntrend': '(2)', 'nregn': '(1)', 'delregn': '(0.98)', 'deltrend': '(0.95)'}), '(a0=a0, R0=R0, n0=1, s0=0.1, ntrend=2, nregn=1, delregn=0.98, deltrend=0.95)\n', (5045, 5121), False, 'from pybats.dglm import dlm\n'), ((5412, 5433), 'pybats_detection.intervention.Intervention', 'Intervention', ([], {'mod': 'mod'}), '(mod=mod)\n', (5424, 5433), False, 'from pybats_detection.intervention import Intervention\n'), ((5922, 5941), 'pybats_detection.loader.load_market_share', 'load_market_share', ([], {}), '()\n', (5939, 5941), False, 'from pybats_detection.loader import load_market_share\n'), ((6091, 6114), 'numpy.array', 'np.array', (['[42, 0, 0, 0]'], {}), '([42, 0, 0, 0])\n', (6099, 6114), True, 'import numpy as np\n'), ((6181, 6255), 'pybats.dglm.dlm', 'dlm', ([], {'a0': 'a0', 'R0': 'R0', 'ntrend': '(1)', 'nregn': '(3)', 'delregn': '(0.9)', 'deltrend': '(1)', 'delVar': '(0.99)'}), '(a0=a0, R0=R0, ntrend=1, nregn=3, delregn=0.9, deltrend=1, delVar=0.99)\n', (6184, 6255), False, 'from pybats.dglm import dlm\n'), ((6551, 6572), 'pybats_detection.intervention.Intervention', 'Intervention', ([], {'mod': 'mod'}), '(mod=mod)\n', (6563, 6572), False, 'from pybats_detection.intervention import Intervention\n'), ((6919, 6942), 'numpy.abs', 'np.abs', (['(mse / 0.056 - 1)'], {}), '(mse / 0.056 - 1)\n', (6925, 6942), True, 'import numpy as np\n'), ((6968, 6991), 'numpy.abs', 'np.abs', (['(mad / 0.185 - 1)'], {}), '(mad / 0.185 - 1)\n', (6974, 6991), True, 'import numpy as np\n'), ((1623, 1673), 'numpy.all', 'np.all', (["(out['smooth']['posterior']['variance'] > 0)"], {}), "(out['smooth']['posterior']['variance'] > 0)\n", (1629, 1673), True, 'import numpy as np\n'), ((3156, 3206), 'numpy.all', 'np.all', (["(out['smooth']['posterior']['variance'] > 0)"], {}), "(out['smooth']['posterior']['variance'] > 0)\n", (3162, 3206), True, 'import numpy as np\n'), ((4368, 4418), 'numpy.all', 'np.all', (["(out['smooth']['posterior']['variance'] > 0)"], {}), "(out['smooth']['posterior']['variance'] > 0)\n", (4374, 4418), True, 'import numpy as np\n'), ((5671, 5721), 'numpy.all', 'np.all', (["(out['smooth']['posterior']['variance'] > 0)"], {}), "(out['smooth']['posterior']['variance'] > 0)\n", (5677, 5721), True, 'import numpy as np\n'), ((6128, 6137), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6134, 6137), True, 'import numpy as np\n'), ((4618, 4647), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', '(100)'], {}), '(0, 0.1, 100)\n', (4634, 4647), True, 'import numpy as np\n'), ((6843, 6884), 'numpy.abs', 'np.abs', (['(predictive_df.y - predictive_df.f)'], {}), '(predictive_df.y - predictive_df.f)\n', (6849, 6884), True, 'import numpy as np\n'), ((2743, 2760), 'numpy.array', 'np.array', (['[10, 0]'], {}), '([10, 0])\n', (2751, 2760), True, 'import numpy as np\n'), ((2790, 2820), 'numpy.array', 'np.array', (['[[100, 0], [0, 300]]'], {}), '([[100, 0], [0, 300]])\n', (2798, 2820), True, 'import numpy as np\n'), ((4017, 4035), 'numpy.array', 'np.array', (['[110, 0]'], {}), '([110, 0])\n', (4025, 4035), True, 'import numpy as np\n'), ((4064, 4094), 'numpy.array', 'np.array', (['[[100, 0], [0, 300]]'], {}), '([[100, 0], [0, 300]])\n', (4072, 4094), True, 'import numpy as np\n'), ((5315, 5336), 'numpy.array', 'np.array', (['[110, 0, 0]'], {}), '([110, 0, 0])\n', (5323, 5336), True, 'import numpy as np\n'), ((6453, 6475), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (6461, 6475), True, 'import numpy as np\n'), ((5366, 5375), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5372, 5375), True, 'import numpy as np\n'), ((6505, 6514), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6511, 6514), True, 'import numpy as np\n')] |
import matplotlib.dates
from .station import MonitoringStation
import numpy as np
def polyfit(dates, levels,p):
# Using shifted x values, find coefficient of best-fit
# polynomial f(x) of degree p
#dates into a list of floats
numbered_dates = matplotlib.dates.date2num(dates)
#date shift is the first date
shift = numbered_dates[0]
#shifted dates to use for polynomial
time = numbered_dates - shift
#coeffs of bestfit polynomial
p_coeff = np.polyfit(time, levels, p)
# Convert coefficient into a polynomial that can be evaluated
poly = np.poly1d(p_coeff)
return poly, shift
| [
"numpy.poly1d",
"numpy.polyfit"
] | [((484, 511), 'numpy.polyfit', 'np.polyfit', (['time', 'levels', 'p'], {}), '(time, levels, p)\n', (494, 511), True, 'import numpy as np\n'), ((590, 608), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (599, 608), True, 'import numpy as np\n')] |
import random
import numpy as np
from ._common import _all_indices, _tally_at_pointer, _inc_pointer
def _order_tiebreak(winners, n=1):
"""
Given an iterable of possibly tied `winners`, select the highest numbered.
(Since they are to be eliminated.)
"""
return sorted(winners)[-n:]
def _random_tiebreak(winners, n=1):
"""
Given an iterable of possibly tied `winners`, select one at random.
"""
if len(winners) == 1:
return winners
else:
return random.sample(winners, n)
def _no_tiebreak(winners, n=1):
"""
Given an iterable of `winners`, return None if there is a tie.
"""
if len(winners) <= n:
return winners
else:
return [None]
_tiebreak_map = {'order': _order_tiebreak,
'random': _random_tiebreak,
None: _no_tiebreak}
def _get_tiebreak(tiebreaker):
try:
return _tiebreak_map[tiebreaker]
except KeyError:
raise ValueError('Tiebreaker not understood')
def irv(election, tiebreaker=None):
"""
Find the winner of an election using instant-runoff voting.
If any candidate gets a majority of first-preference votes, they win.
Otherwise, the candidate(s) with the least number of votes is eliminated,
votes for eliminated candidates are transferred according to the voters'
preference rankings, and a series of runoff elections are held between the
remainders until a candidate gets a majority.[1]_
Also known as "the alternative vote", "ranked-choice voting", Hare's
method, or Ware's method.
The votes in each instant-runoff round are calculated from the same set of
ranked ballots. If voters are honest and consistent between rounds, then
this is also equivalent to the exhaustive ballot method, which uses actual
separate runoff elections.[2]_
Parameters
----------
election : array_like
A collection of ranked ballots. See `borda` for election format.
Currently, this must include full rankings for each voter.
tiebreaker : {'random', None}, optional
If there is a tie, and `tiebreaker` is ``'random'``, tied candidates
are eliminated or selected at random
is returned.
If 'order', the lowest-ID tied candidate is preferred in each tie.
By default, ``None`` is returned if there are any ties.
Returns
-------
winner : int
The ID number of the winner, or ``None`` for an unbroken tie.
References
----------
.. [1] https://en.wikipedia.org/wiki/Instant-runoff_voting
.. [2] https://en.wikipedia.org/wiki/Exhaustive_ballot
Examples
--------
Label some candidates:
>>> A, B, C = 0, 1, 2
Specify the ballots for the 5 voters:
>>> election = [[A, C, B],
[A, C, B],
[B, C, A],
[B, C, A],
[C, A, B],
]
In the first round, no candidate gets a majority, so Candidate C (2) is
eliminated. Voter 4's support is transferred to Candidate A (0), causing
Candidate A to win, with 3 out of 5 votes:
>>> irv(election)
0
"""
election = np.asarray(election)
n_voters = election.shape[0]
n_cands = election.shape[1]
eliminated = set()
tiebreak = _get_tiebreak(tiebreaker)
pointer = np.zeros(n_voters, dtype=np.uint8)
tallies = np.empty(n_cands, dtype=np.uint)
for round_ in range(n_cands):
_tally_at_pointer(tallies, election, pointer)
tallies_list = tallies.tolist() # tolist makes things 2-4x faster
# Did anyone get majority
highest = max(tallies_list)
if highest > n_voters / 2:
return tallies_list.index(highest)
# If not, eliminate lowest
lowest = min(x for x in tallies_list if x != 0) # faster?
low_scorers = _all_indices(tallies_list, lowest)
loser = tiebreak(low_scorers)[0]
# Handle no tiebreaker case
if loser is None:
return None
# Add candidate with lowest score in this round
eliminated.add(loser)
# Make sure candidates who never got votes are also eliminated
# TODO: In the future when round tallies are also output, this should
# be its own round
eliminated.update(_all_indices(tallies_list, 0))
# Increment pointers until they point at non-eliminated candidates
_inc_pointer(election, pointer, eliminated)
raise RuntimeError('Bug in IRV calculation')
| [
"random.sample",
"numpy.zeros",
"numpy.asarray",
"numpy.empty"
] | [((3207, 3227), 'numpy.asarray', 'np.asarray', (['election'], {}), '(election)\n', (3217, 3227), True, 'import numpy as np\n'), ((3371, 3405), 'numpy.zeros', 'np.zeros', (['n_voters'], {'dtype': 'np.uint8'}), '(n_voters, dtype=np.uint8)\n', (3379, 3405), True, 'import numpy as np\n'), ((3420, 3452), 'numpy.empty', 'np.empty', (['n_cands'], {'dtype': 'np.uint'}), '(n_cands, dtype=np.uint)\n', (3428, 3452), True, 'import numpy as np\n'), ((504, 529), 'random.sample', 'random.sample', (['winners', 'n'], {}), '(winners, n)\n', (517, 529), False, 'import random\n')] |
from mmdet.datasets.builder import PIPELINES
import os
import cv2
import random
import numpy as np
def visual_table_resized_bbox(results):
bboxes = results['img_info']['bbox']
img = results['img']
for bbox in bboxes:
img = cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), thickness=1)
return img
def visual_table_xywh_bbox(results):
img = results['img']
bboxes = results['img_info']['bbox']
for bbox in bboxes:
draw_bbox = np.empty_like(bbox)
draw_bbox[0] = bbox[0] - bbox[2] / 2
draw_bbox[1] = bbox[1] - bbox[3] / 2
draw_bbox[2] = bbox[0] + bbox[2] / 2
draw_bbox[3] = bbox[1] + bbox[3] / 2
img = cv2.rectangle(img, (int(draw_bbox[0]), int(draw_bbox[1])), (int(draw_bbox[2]), int(draw_bbox[3])), (0, 255, 0), thickness=1)
return img
@PIPELINES.register_module()
class TableResize:
"""Image resizing and padding for Table Recognition OCR, Table Structure Recognition.
Args:
height (int | tuple(int)): Image height after resizing.
min_width (none | int | tuple(int)): Image minimum width
after resizing.
max_width (none | int | tuple(int)): Image maximum width
after resizing.
keep_aspect_ratio (bool): Keep image aspect ratio if True
during resizing, Otherwise resize to the size height *
max_width.
img_pad_value (int): Scalar to fill padding area.
width_downsample_ratio (float): Downsample ratio in horizontal
direction from input image to output feature.
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used. Default: None.
"""
def __init__(self,
img_scale=None,
min_size=None,
ratio_range=None,
interpolation=None,
keep_ratio=True,
long_size=None):
self.img_scale = img_scale
self.min_size = min_size
self.ratio_range = ratio_range
self.interpolation = cv2.INTER_LINEAR
self.long_size = long_size
self.keep_ratio = keep_ratio
def _get_resize_scale(self, w, h):
if self.keep_ratio:
if self.img_scale is None and isinstance(self.ratio_range, list):
choice_ratio = random.uniform(self.ratio_range[0], self.ratio_range[1])
return (int(w * choice_ratio), int(h * choice_ratio))
elif isinstance(self.img_scale, tuple) and -1 in self.img_scale:
if self.img_scale[0] == -1:
resize_w = w / h * self.img_scale[1]
return (int(resize_w), self.img_scale[1])
else:
resize_h = h / w * self.img_scale[0]
return (self.img_scale[0], int(resize_h))
else:
return (int(w), int(h))
else:
if isinstance(self.img_scale, tuple):
return self.img_scale
else:
raise NotImplementedError
def _resize_bboxes(self, results):
img_shape = results['img_shape']
if 'img_info' in results.keys():
# train and validate phase
if results['img_info'].get('bbox', None) is not None:
bboxes = results['img_info']['bbox']
scale_factor = results['scale_factor']
# bboxes[..., 0::2], bboxes[..., 1::2] = \
# bboxes[..., 0::2] * scale_factor[1], bboxes[..., 1::2] * scale_factor[0]
bboxes[..., 0::2] = np.clip(bboxes[..., 0::2] * scale_factor[1], 0, img_shape[1]-1)
bboxes[..., 1::2] = np.clip(bboxes[..., 1::2] * scale_factor[0], 0, img_shape[0]-1)
results['img_info']['bbox'] = bboxes
else:
raise ValueError('results should have bbox keys.')
else:
# testing phase
pass
def _resize_img(self, results):
img = results['img']
h, w, _ = img.shape
if self.min_size is not None:
if w > h:
w = self.min_size / h * w
h = self.min_size
else:
h = self.min_size / w * h
w = self.min_size
if self.long_size is not None:
if w < h:
w = self.long_size / h * w
h = self.long_size
else:
h = self.long_size / w * h
w = self.long_size
img_scale = self._get_resize_scale(w, h)
resize_img = cv2.resize(img, img_scale, interpolation=self.interpolation)
scale_factor = (resize_img.shape[0] / img.shape[0], resize_img.shape[1] / img.shape[1])
results['img'] = resize_img
results['img_shape'] = resize_img.shape
results['pad_shape'] = resize_img.shape
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def __call__(self, results):
self._resize_img(results)
self._resize_bboxes(results)
return results
@PIPELINES.register_module()
class TablePad:
"""Pad the image & mask.
Two padding modes:
(1) pad to fixed size.
(2) pad to the minium size that is divisible by some number.
"""
def __init__(self,
size=None,
size_divisor=None,
pad_val=None,
keep_ratio=False,
return_mask=False,
mask_ratio=2,
train_state=True,
):
self.size = size[::-1]
self.size_divisor = size_divisor
self.pad_val = pad_val
self.keep_ratio = keep_ratio
self.return_mask = return_mask
self.mask_ratio = mask_ratio
self.training = train_state
# only one of size or size_divisor is valid.
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad(self, img, size, pad_val):
if not isinstance(size, tuple):
raise NotImplementedError
if len(size) < len(img.shape):
shape = size + (img.shape[-1], )
else:
shape = size
pad = np.empty(shape, dtype=img.dtype)
pad[...] = pad_val
h, w = img.shape[:2]
size_w, size_h = size[:2]
if h > size_h or w > size_w:
if self.keep_ratio:
if h / size_h > w / size_w:
size = (int(w / h * size_h), size_h)
else:
size = (size_w, int(h / w * size_w))
img = cv2.resize(img, size[::-1], cv2.INTER_LINEAR)
pad[:img.shape[0], :img.shape[1], ...] = img
if self.return_mask:
mask = np.empty(size, dtype=img.dtype)
mask[...] = 0
mask[:img.shape[0], :img.shape[1]] = 1
# mask_ratio is mean stride of backbone in (height, width)
if isinstance(self.mask_ratio, int):
mask = mask[::self.mask_ratio, ::self.mask_ratio]
elif isinstance(self.mask_ratio, tuple):
mask = mask[::self.mask_ratio[0], ::self.mask_ratio[1]]
else:
raise NotImplementedError
mask = np.expand_dims(mask, axis=0)
else:
mask = None
return pad, mask
def _divisor(self, img, size_divisor, pad_val):
pass
def _pad_img(self, results):
if self.size is not None:
padded_img, mask = self._pad(results['img'], self.size, self.pad_val)
elif self.size_divisor is not None:
raise NotImplementedError
results['img'] = padded_img
results['mask'] = mask
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def __call__(self, results):
self._pad_img(results)
#visual_img = visual_table_resized_bbox(results)
#cv2.imwrite('/data_0/cache/{}_visual.jpg'.format(os.path.basename(results['filename']).split('.')[0]), visual_img)
# if self.training:
# scaleBbox(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(size={}, size_divisor={}, pad_val={})'.format(
self.size, self.size_divisor, self.pad_val)
return repr_str
def xyxy2xywh(bboxes):
"""
Convert coord (x1,y1,x2,y2) to (x,y,w,h).
where (x1,y1) is top-left, (x2,y2) is bottom-right.
(x,y) is bbox center and (w,h) is width and height.
:param bboxes: (x1, y1, x2, y2)
:return:
"""
new_bboxes = np.empty_like(bboxes)
new_bboxes[..., 0] = (bboxes[..., 0] + bboxes[..., 2]) / 2 # x center
new_bboxes[..., 1] = (bboxes[..., 1] + bboxes[..., 3]) / 2 # y center
new_bboxes[..., 2] = bboxes[..., 2] - bboxes[..., 0] # width
new_bboxes[..., 3] = bboxes[..., 3] - bboxes[..., 1] # height
return new_bboxes
def normalize_bbox(bboxes, img_shape):
bboxes[..., 0], bboxes[..., 2] = bboxes[..., 0] / img_shape[1], bboxes[..., 2] / img_shape[1]
bboxes[..., 1], bboxes[..., 3] = bboxes[..., 1] / img_shape[0], bboxes[..., 3] / img_shape[0]
return bboxes
@PIPELINES.register_module()
class TableBboxEncode:
"""Encode table bbox for training.
convert coord (x1,y1,x2,y2) to (x,y,w,h)
normalize to (0,1)
adjust key 'bbox' and 'bbox_mask' location in dictionary 'results'
"""
def __init__(self):
pass
def __call__(self, results):
bboxes = results['img_info']['bbox']
bboxes = xyxy2xywh(bboxes)
img_shape = results['img'].shape
bboxes = normalize_bbox(bboxes, img_shape)
flag = self.check_bbox_valid(bboxes)
if not flag:
print('Box invalid in {}'.format(results['filename']))
results['img_info']['bbox'] = bboxes
self.adjust_key(results)
# self.visual_normalized_bbox(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
def check_bbox_valid(self, bboxes):
low = (bboxes >= 0.) * 1
high = (bboxes <= 1.) * 1
matrix = low + high
for idx, m in enumerate(matrix):
if m.sum() != 8:
return False
return True
def visual_normalized_bbox(self, results):
"""
visual after normalized bbox in results.
:param results:
:return:
"""
save_path = '/data_0/cache/{}_normalized.jpg'.\
format(os.path.basename(results['filename']).split('.')[0])
img = results['img']
img_shape = img.shape
# x,y,w,h
bboxes = results['img_info']['bbox']
bboxes[..., 0::2] = bboxes[..., 0::2] * img_shape[1]
bboxes[..., 1::2] = bboxes[..., 1::2] * img_shape[0]
# x,y,x,y
new_bboxes = np.empty_like(bboxes)
new_bboxes[..., 0] = bboxes[..., 0] - bboxes[..., 2] / 2
new_bboxes[..., 1] = bboxes[..., 1] - bboxes[..., 3] / 2
new_bboxes[..., 2] = bboxes[..., 0] + bboxes[..., 2] / 2
new_bboxes[..., 3] = bboxes[..., 1] + bboxes[..., 3] / 2
# draw
for new_bbox in new_bboxes:
img = cv2.rectangle(img, (int(new_bbox[0]), int(new_bbox[1])),
(int(new_bbox[2]), int(new_bbox[3])), (0, 255, 0), thickness=1)
cv2.imwrite(save_path, img)
def adjust_key(self, results):
"""
Adjust key 'bbox' and 'bbox_mask' location in dictionary 'results'.
:param results:
:return:
"""
bboxes = results['img_info'].pop('bbox')
bboxes_masks = results['img_info'].pop('bbox_masks')
results['bbox'] = bboxes
results['bbox_masks'] = bboxes_masks
return results
| [
"numpy.clip",
"cv2.imwrite",
"random.uniform",
"numpy.empty_like",
"numpy.empty",
"numpy.expand_dims",
"os.path.basename",
"cv2.resize",
"mmdet.datasets.builder.PIPELINES.register_module"
] | [((869, 896), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (894, 896), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((5244, 5271), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (5269, 5271), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((9429, 9456), 'mmdet.datasets.builder.PIPELINES.register_module', 'PIPELINES.register_module', ([], {}), '()\n', (9454, 9456), False, 'from mmdet.datasets.builder import PIPELINES\n'), ((8848, 8869), 'numpy.empty_like', 'np.empty_like', (['bboxes'], {}), '(bboxes)\n', (8861, 8869), True, 'import numpy as np\n'), ((513, 532), 'numpy.empty_like', 'np.empty_like', (['bbox'], {}), '(bbox)\n', (526, 532), True, 'import numpy as np\n'), ((4728, 4788), 'cv2.resize', 'cv2.resize', (['img', 'img_scale'], {'interpolation': 'self.interpolation'}), '(img, img_scale, interpolation=self.interpolation)\n', (4738, 4788), False, 'import cv2\n'), ((6390, 6422), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'img.dtype'}), '(shape, dtype=img.dtype)\n', (6398, 6422), True, 'import numpy as np\n'), ((11110, 11131), 'numpy.empty_like', 'np.empty_like', (['bboxes'], {}), '(bboxes)\n', (11123, 11131), True, 'import numpy as np\n'), ((11625, 11652), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'img'], {}), '(save_path, img)\n', (11636, 11652), False, 'import cv2\n'), ((6781, 6826), 'cv2.resize', 'cv2.resize', (['img', 'size[::-1]', 'cv2.INTER_LINEAR'], {}), '(img, size[::-1], cv2.INTER_LINEAR)\n', (6791, 6826), False, 'import cv2\n'), ((6928, 6959), 'numpy.empty', 'np.empty', (['size'], {'dtype': 'img.dtype'}), '(size, dtype=img.dtype)\n', (6936, 6959), True, 'import numpy as np\n'), ((7429, 7457), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (7443, 7457), True, 'import numpy as np\n'), ((2482, 2538), 'random.uniform', 'random.uniform', (['self.ratio_range[0]', 'self.ratio_range[1]'], {}), '(self.ratio_range[0], self.ratio_range[1])\n', (2496, 2538), False, 'import random\n'), ((3735, 3800), 'numpy.clip', 'np.clip', (['(bboxes[..., 0::2] * scale_factor[1])', '(0)', '(img_shape[1] - 1)'], {}), '(bboxes[..., 0::2] * scale_factor[1], 0, img_shape[1] - 1)\n', (3742, 3800), True, 'import numpy as np\n'), ((3835, 3900), 'numpy.clip', 'np.clip', (['(bboxes[..., 1::2] * scale_factor[0])', '(0)', '(img_shape[0] - 1)'], {}), '(bboxes[..., 1::2] * scale_factor[0], 0, img_shape[0] - 1)\n', (3842, 3900), True, 'import numpy as np\n'), ((10774, 10811), 'os.path.basename', 'os.path.basename', (["results['filename']"], {}), "(results['filename'])\n", (10790, 10811), False, 'import os\n')] |
# Copyright (c) 2012, <NAME> [see LICENSE.txt]
# Python 2 to 3 workarounds
import sys
if sys.version_info[0] == 2:
_strobj = str
_xrange = xrange
elif sys.version_info[0] == 3:
_strobj = str
_xrange = range
import os
import pylab
import numpy as np
from collections import Counter
from pyvttbl.misc.support import _flatten
def box_plot(df, val, factors=None, where=None,
fname=None, output_dir='', quality='medium'):
"""
Makes a box plot
args:
df:
a pyvttbl.DataFrame object
val:
the label of the dependent variable
kwds:
factors:
a list of factors to include in boxplot
where:
a string, list of strings, or list of tuples
applied to the DataFrame before plotting
fname:
output file name
quality:
{'low' | 'medium' | 'high'} specifies image file dpi
"""
if factors == None:
factors = []
if where == None:
where = []
# check to see if there is any data in the table
if df == {}:
raise Exception('Table must have data to print data')
# check to see if data columns have equal lengths
if not df._are_col_lengths_equal():
raise Exception('columns have unequal lengths')
# check the supplied arguments
if val not in list(df.keys()):
raise KeyError(val)
if not hasattr(factors, '__iter__'):
raise TypeError( "'%s' object is not iterable"
% type(factors).__name__)
for k in factors:
if k not in list(df.keys()):
raise KeyError(k)
# check for duplicate names
dup = Counter([val]+factors)
del dup[None]
if not all([count==1 for count in list(dup.values())]):
raise Exception('duplicate labels specified as plot parameters')
# check fname
if not isinstance(fname, _strobj) and fname != None:
raise TypeError('fname must be None or string')
if isinstance(fname, _strobj):
if not (fname.lower().endswith('.png') or \
fname.lower().endswith('.svg')):
raise Exception('fname must end with .png or .svg')
test = {}
if factors == []:
d = df.select_col(val, where=where)
fig = pylab.figure()
pylab.boxplot(np.array(d))
xticks = pylab.xticks()[0]
xlabels = [val]
pylab.xticks(xticks, xlabels)
test['d'] = d
test['val'] = val
else:
D = df.pivot(val, rows=factors,
where=where,
aggregate='tolist')
fig = pylab.figure(figsize=(6*len(factors),6))
fig.subplots_adjust(left=.05, right=.97, bottom=0.24)
pylab.boxplot([np.array(_flatten(d)) for d in D])
xticks = pylab.xticks()[0]
xlabels = ['\n'.join('%s = %s'%fc for fc in c) for c in D.rnames]
pylab.xticks(xticks, xlabels,
rotation=35,
verticalalignment='top')
test['d'] = [np.array(_flatten(d)) for d in D]
test['xlabels'] = xlabels
maintitle = '%s'%val
if factors != []:
maintitle += ' by '
maintitle += ' * '.join(factors)
fig.text(0.5, 0.95, maintitle,
horizontalalignment='center',
verticalalignment='top')
test['maintitle'] = maintitle
if fname == None:
fname = 'box(%s'%val
if factors != []:
fname += '~' + '_X_'.join([str(f) for f in factors])
fname += ').png'
fname = os.path.join(output_dir, fname)
test['fname'] = fname
# save figure
if quality == 'low' or fname.endswith('.svg'):
pylab.savefig(fname)
elif quality == 'medium':
pylab.savefig(fname, dpi=200)
elif quality == 'high':
pylab.savefig(fname, dpi=300)
else:
pylab.savefig(fname)
pylab.close()
if df.TESTMODE:
return test
| [
"pylab.xticks",
"pylab.savefig",
"os.path.join",
"pylab.close",
"collections.Counter",
"pylab.figure",
"numpy.array",
"pyvttbl.misc.support._flatten"
] | [((1735, 1759), 'collections.Counter', 'Counter', (['([val] + factors)'], {}), '([val] + factors)\n', (1742, 1759), False, 'from collections import Counter\n'), ((3640, 3671), 'os.path.join', 'os.path.join', (['output_dir', 'fname'], {}), '(output_dir, fname)\n', (3652, 3671), False, 'import os\n'), ((4011, 4024), 'pylab.close', 'pylab.close', ([], {}), '()\n', (4022, 4024), False, 'import pylab\n'), ((2350, 2364), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (2362, 2364), False, 'import pylab\n'), ((2467, 2496), 'pylab.xticks', 'pylab.xticks', (['xticks', 'xlabels'], {}), '(xticks, xlabels)\n', (2479, 2496), False, 'import pylab\n'), ((2969, 3036), 'pylab.xticks', 'pylab.xticks', (['xticks', 'xlabels'], {'rotation': '(35)', 'verticalalignment': '"""top"""'}), "(xticks, xlabels, rotation=35, verticalalignment='top')\n", (2981, 3036), False, 'import pylab\n'), ((3785, 3805), 'pylab.savefig', 'pylab.savefig', (['fname'], {}), '(fname)\n', (3798, 3805), False, 'import pylab\n'), ((2387, 2398), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (2395, 2398), True, 'import numpy as np\n'), ((2417, 2431), 'pylab.xticks', 'pylab.xticks', ([], {}), '()\n', (2429, 2431), False, 'import pylab\n'), ((2869, 2883), 'pylab.xticks', 'pylab.xticks', ([], {}), '()\n', (2881, 2883), False, 'import pylab\n'), ((3853, 3882), 'pylab.savefig', 'pylab.savefig', (['fname'], {'dpi': '(200)'}), '(fname, dpi=200)\n', (3866, 3882), False, 'import pylab\n'), ((3110, 3121), 'pyvttbl.misc.support._flatten', '_flatten', (['d'], {}), '(d)\n', (3118, 3121), False, 'from pyvttbl.misc.support import _flatten\n'), ((3928, 3957), 'pylab.savefig', 'pylab.savefig', (['fname'], {'dpi': '(300)'}), '(fname, dpi=300)\n', (3941, 3957), False, 'import pylab\n'), ((3985, 4005), 'pylab.savefig', 'pylab.savefig', (['fname'], {}), '(fname)\n', (3998, 4005), False, 'import pylab\n'), ((2826, 2837), 'pyvttbl.misc.support._flatten', '_flatten', (['d'], {}), '(d)\n', (2834, 2837), False, 'from pyvttbl.misc.support import _flatten\n')] |
import numpy as np
def compute_lambda(J, S):
if len(J) == len(S):
lambda_m = np.array([J[i]/S[j] for i in range(len(J)) for j in range(len(S))])
return (1./len(S))*lambda_m.reshape((len(J),len(S)))
else:
raise ValueError('J and S arrays must have the same length.')
def split_lambda(L, mtype = 'diagonal'):
L_star = np.zeros(L.shape)
diag = np.diag_indices_from(L)
L_star[diag] = L[diag].copy()
if mtype == 'diagonal':
return L_star
elif mtype == 'tridiagonal':
lower_diag = (diag[0][:-1],diag[1][1:])
upper_diag = (diag[0][1:],diag[1][:-1])
L_star[lower_diag] = L[lower_diag].copy()
L_star[upper_diag] = L[upper_diag].copy()
return L_star
else:
raise NotImplementedError('%s' % mtype)
def compute_S_step(J, S, mtype='diagonal'):
'''
Computes the next iteration source-function using Accelerated Lambda Iteration.
The gray version of the equation (RT: S=J) is:
$S^{n+1} = (1-\Lambda^*)^{-1} (\Lambda - \Lambda^*) S^{n}
'''
L = compute_lambda(J, S)
L_star = split_lambda(L, mtype=mtype)
L_star_I_inv = np.linalg.inv(np.identity(len(L_star)) - L_star)
return np.matmul(np.matmul(L_star_I_inv, L-L_star), S)
| [
"numpy.diag_indices_from",
"numpy.zeros",
"numpy.matmul"
] | [((358, 375), 'numpy.zeros', 'np.zeros', (['L.shape'], {}), '(L.shape)\n', (366, 375), True, 'import numpy as np\n'), ((387, 410), 'numpy.diag_indices_from', 'np.diag_indices_from', (['L'], {}), '(L)\n', (407, 410), True, 'import numpy as np\n'), ((1243, 1278), 'numpy.matmul', 'np.matmul', (['L_star_I_inv', '(L - L_star)'], {}), '(L_star_I_inv, L - L_star)\n', (1252, 1278), True, 'import numpy as np\n')] |
import meshio
import numpy
from . import avsucd, flac3d, pickle, tecplot, tough
from ._mesh import Mesh, from_meshio
__all__ = [
"read",
"write",
"write_points_cells",
"read_time_series",
"write_time_series",
]
_extension_to_filetype = {
".dat": "tecplot",
".f3grid": "flac3d",
".pickle": "pickle",
}
def _filetype_from_filename(filename):
"""Determine file type from its extension."""
import os
ext = os.path.splitext(filename)[1].lower()
return _extension_to_filetype[ext] if ext in _extension_to_filetype.keys() else ""
def read(filename, file_format=None, **kwargs):
"""Read unstructured mesh from file.
Parameters
----------
filename : str
Input file name.
file_format : str or None, optional, default None
Input file format.
Returns
-------
toughio.Mesh
Imported mesh.
"""
# Check file format
if not isinstance(filename, str):
raise TypeError()
fmt = file_format if file_format else _filetype_from_filename(filename)
# Call custom readers
format_to_reader = {
"tough": (tough, (), {}),
"avsucd": (avsucd, (), {}),
"flac3d": (flac3d, (), {}),
"pickle": (pickle, (), {}),
"tecplot": (tecplot, (), {}),
}
if fmt in format_to_reader.keys():
interface, args, default_kwargs = format_to_reader[fmt]
_kwargs = default_kwargs.copy()
_kwargs.update(kwargs)
return interface.read(filename, *args, **_kwargs)
else:
mesh = meshio.read(filename, file_format)
return from_meshio(mesh)
def write(filename, mesh, file_format=None, **kwargs):
"""Write unstructured mesh to file.
Parameters
----------
filename : str
Output file name.
mesh : toughio.Mesh
Mesh to export.
file_format : str or None, optional, default None
Output file format.
Other Parameters
----------------
nodal_distance : str ('line' or 'orthogonal'), optional, default 'line'
Only if ``file_format = "tough"``. Method to calculate connection
nodal distances:
- 'line': distance between node and common face along connecting
line (distance is not normal),
- 'orthogonal' : distance between node and its orthogonal
projection onto common face (shortest distance).
material_name : dict or None, default None
Only if ``file_format = "tough"``. Rename cell material.
material_end : str, array_like or None, default None
Only if ``file_format = "tough"``. Move cells to bottom of block
'ELEME' if their materials is in `material_end`.
incon_eos : str or None, optional, default None
Equation-of-state identifier to determine the actual number of
primary variables to initialize. If `None`, TOUGH input `INCON`
file will not be written.
"""
# Check file format
if not isinstance(filename, str):
raise TypeError()
fmt = file_format if file_format else _filetype_from_filename(filename)
# Call custom writer
format_to_writer = {
"tough": (
tough,
(),
{
"nodal_distance": "line",
"material_name": None,
"material_end": None,
"incon_eos": None,
},
),
"avsucd": (avsucd, (), {}),
"flac3d": (flac3d, (), {}),
"pickle": (pickle, (), {}),
"tecplot": (tecplot, (), {}),
}
if fmt in format_to_writer.keys():
interface, args, default_kwargs = format_to_writer[fmt]
_kwargs = default_kwargs.copy()
_kwargs.update(kwargs)
interface.write(filename, mesh, *args, **_kwargs)
else:
mesh = mesh.to_meshio()
meshio.write(filename, mesh, file_format=file_format, **kwargs)
def write_points_cells(
filename,
points,
cells,
point_data=None,
cell_data=None,
field_data=None,
file_format=None,
**kwargs
):
"""Write unstructured mesh to file given points and cells data.
Parameters
----------
filename : str
Output file name.
points : ndarray
Grid points array.
cells : dict
Grid cell data.
point_data : dict or None, optional, default None
Data associated to grid points.
cell_data : dict or None, optional, default None
Data associated to grid cells.
field_data : dict or None, optional, default None
Data names.
file_format : str or None, optional, default None
Output file format.
Other Parameters
----------------
kwargs : dict
Refer to function ``write`` for additional information.
"""
mesh = Mesh(
points=points,
cells=cells,
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
)
write(filename, mesh, file_format=file_format, **kwargs)
def read_time_series(filename):
"""Read time series from XDMF file.
Parameters
----------
filename : str
Input file name.
Returns
-------
list of namedtuple (type, data)
Grid cell data.
list of dict
Data associated to grid points for each time step.
list of dict
Data associated to grid cells for each time step.
array_like
Time step values.
"""
from ._common import get_meshio_version, get_new_meshio_cells
if not isinstance(filename, str):
raise ValueError()
point_data, cell_data, time_steps = [], [], []
if get_meshio_version() < (3,):
reader = meshio.XdmfTimeSeriesReader(filename)
points, cells = reader.read_points_cells()
for k in range(reader.num_steps):
t, pdata, cdata = reader.read_data(k)
_, cdata = get_new_meshio_cells(cells, cdata)
point_data.append(pdata)
cell_data.append(cdata)
time_steps.append(t)
cells = get_new_meshio_cells(cells)
else:
with meshio.xdmf.TimeSeriesReader(filename) as reader:
points, cells = reader.read_points_cells()
for k in range(reader.num_steps):
t, pdata, cdata = reader.read_data(k)
point_data.append(pdata)
cell_data.append(cdata)
time_steps.append(t)
return points, cells, point_data, cell_data, time_steps
def write_time_series(
filename, points, cells, point_data=None, cell_data=None, time_steps=None,
):
"""Write time series given points and cells data.
Parameters
----------
filename : str
Output file name.
points : ndarray
Grid points array.
cells : list of namedtuple (type, data)
Grid cell data.
point_data : list of dict or None, optional, default None
Data associated to grid points for each time step.
cell_data : list of dict or None, optional, default None
Data associated to grid cells for each time step.
time_steps : array_like, optional, default None
Time step values.
"""
from ._common import get_meshio_version, get_old_meshio_cells
if not isinstance(filename, str):
raise TypeError()
if point_data is not None and not isinstance(point_data, (list, tuple)):
raise TypeError()
if cell_data is not None and not isinstance(cell_data, (list, tuple)):
raise TypeError()
if time_steps is not None and not isinstance(
time_steps, (list, tuple, numpy.ndarray)
):
raise TypeError()
if not (point_data or cell_data):
raise ValueError("Provide at least point_data or cell_data.")
else:
nt = len(point_data) if point_data else len(cell_data)
if point_data and len(point_data) != nt:
raise ValueError("Inconsistent number of point data.")
if cell_data and len(cell_data) != nt:
raise ValueError("Inconsistent number of cell data.")
if time_steps is not None and len(time_steps) != nt:
raise ValueError("Inconsistent number of time steps.")
point_data = point_data if point_data else [{}] * nt
cell_data = cell_data if cell_data else [{}] * nt
time_steps = time_steps if time_steps is not None else list(range(nt))
# Sort data with time steps
idx = numpy.argsort(time_steps)
point_data = [point_data[i] for i in idx]
cell_data = [cell_data[i] for i in idx]
time_steps = [time_steps[i] for i in idx]
# Write XDMF
def write_data(writer, points, cells, point_data, cell_data, time_steps):
writer.write_points_cells(points, cells)
for tstep, pdata, cdata in zip(time_steps, point_data, cell_data):
writer.write_data(tstep, point_data=pdata, cell_data=cdata)
if get_meshio_version() < (3,):
writer = meshio.XdmfTimeSeriesWriter(filename)
tmp = [get_old_meshio_cells(cells, cdata) for cdata in cell_data]
cells = tmp[0][0]
cell_data = [cell[1] for cell in tmp]
write_data(writer, points, cells, point_data, cell_data, time_steps)
else:
with meshio.xdmf.TimeSeriesWriter(filename) as writer:
write_data(writer, points, cells, point_data, cell_data, time_steps)
| [
"meshio.XdmfTimeSeriesReader",
"meshio.xdmf.TimeSeriesWriter",
"meshio.XdmfTimeSeriesWriter",
"os.path.splitext",
"numpy.argsort",
"meshio.xdmf.TimeSeriesReader",
"meshio.write",
"meshio.read"
] | [((8336, 8361), 'numpy.argsort', 'numpy.argsort', (['time_steps'], {}), '(time_steps)\n', (8349, 8361), False, 'import numpy\n'), ((1558, 1592), 'meshio.read', 'meshio.read', (['filename', 'file_format'], {}), '(filename, file_format)\n', (1569, 1592), False, 'import meshio\n'), ((3816, 3879), 'meshio.write', 'meshio.write', (['filename', 'mesh'], {'file_format': 'file_format'}), '(filename, mesh, file_format=file_format, **kwargs)\n', (3828, 3879), False, 'import meshio\n'), ((5643, 5680), 'meshio.XdmfTimeSeriesReader', 'meshio.XdmfTimeSeriesReader', (['filename'], {}), '(filename)\n', (5670, 5680), False, 'import meshio\n'), ((8844, 8881), 'meshio.XdmfTimeSeriesWriter', 'meshio.XdmfTimeSeriesWriter', (['filename'], {}), '(filename)\n', (8871, 8881), False, 'import meshio\n'), ((6058, 6096), 'meshio.xdmf.TimeSeriesReader', 'meshio.xdmf.TimeSeriesReader', (['filename'], {}), '(filename)\n', (6086, 6096), False, 'import meshio\n'), ((9128, 9166), 'meshio.xdmf.TimeSeriesWriter', 'meshio.xdmf.TimeSeriesWriter', (['filename'], {}), '(filename)\n', (9156, 9166), False, 'import meshio\n'), ((453, 479), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (469, 479), False, 'import os\n')] |
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import CameraInfo, Image, PointCloud2, PointField
from image_geometry import PinholeCameraModel
import numpy
import message_filters
from kinect_numpy_tools.numpy_msg import numpy_msg
import code # HACK!!
class ImageGrabber():
def __init__(self, topics):
# Flags
self.need_rgb = True
self.need_depth = True
# RGB Subscribers
sub_rgb = message_filters.Subscriber(topics['rgb'], numpy_msg(Image))
sub_rgb_info = message_filters.Subscriber(topics['rgb_info'], CameraInfo)
ts_rgb = message_filters.TimeSynchronizer([sub_rgb, sub_rgb_info], 100)
ts_rgb.registerCallback(self.rgb_callback)
# Depth Subscribers
sub_depth = message_filters.Subscriber(topics['depth'], numpy_msg(Image))
sub_depth_info = message_filters.Subscriber(topics['depth_info'], CameraInfo)
ts_depth = message_filters.TimeSynchronizer([sub_depth, sub_depth_info], 100)
ts_depth.registerCallback(self.depth_callback)
self.pub_cloud = rospy.Publisher('/camera/xyz_rgb/points', numpy_msg(PointCloud2))
def rgb_callback(self, image_rgb, rgb_info):
self.image_rgb = image_rgb
self.rgb_info = rgb_info
self.need_rgb = False
def depth_callback(self, image_depth, depth_info):
image_depth.data[numpy.isnan(image_depth.data)] = 0.0 # filter out NANs
self.image_depth = image_depth
self.depth_info = depth_info
self.need_depth = False
def convert_to_xyz(self):
self.array_xyz = numpy.zeros(self.array_rays.shape)
for i in range(self.array_rays.shape[2]):
self.array_xyz[:, :, i] = self.image_depth.data * self.array_rays[:, :, i]
self.array_xyz /= 1000 # convert from millimeters -> meters
def get_rays(self):
model = PinholeCameraModel()
model.fromCameraInfo(self.depth_info)
self.array_rays = numpy.zeros((self.image_depth.height, self.image_depth.width, 3))
for u in range(self.image_depth.height):
for v in range(self.image_depth.width):
ray = model.projectPixelTo3dRay((u, v))
ray_z = [el / ray[2] for el in ray] # normalize the ray so its Z-component equals 1.0
self.array_rays[u, v, :] = ray_z
if __name__ == '__main__':
rospy.init_node('rgbd_to_xyz_converter')
topics = {'rgb' : '/camera/rgb/image_color',
'rgb_info' : '/camera/rgb/camera_info',
'depth' : '/camera/depth_registered/image_raw',
'depth_info' : '/camera/depth_registered/camera_info'}
image_grabber = ImageGrabber(topics)
while image_grabber.need_depth: # wait for first depth image
rospy.sleep(0.01)
rospy.loginfo('Got depth image; processing...')
image_grabber.get_rays() # make map of 3D rays for each pixel for converting depth values -> XYZ points
rospy.loginfo('Made array of all pixel->ray correspondences!')
while not rospy.is_shutdown(): # Do RGBD->XYZ conversions forever
if not image_grabber.need_rgb and not image_grabber.need_depth: # if have both RGB and Depth image
image_grabber.convert_to_xyz()
# Initialize the PointCloud2 msg
cloud = PointCloud2()
cloud.header.stamp = image_grabber.image_rgb.header.stamp
cloud.header.frame_id = image_grabber.image_rgb.header.frame_id
cloud.height = image_grabber.image_rgb.height
cloud.width = image_grabber.image_rgb.width
cloud.point_step = 32
cloud.row_step = 20480
# Jam the numpy XYZ/RGB data into the cloud
points_arr = image_grabber.array_xyz.astype('float32')
image_rgb = image_grabber.image_rgb.data
cloud.data = numpy.rec.fromarrays((points_arr, image_rgb), names=('xyz', 'rgb'))
# Generate 'fields' attribute of PointCloud2 according to Kinect conventions
cloud.fields.append( PointField(name='x', offset=0, datatype=7, count=1) )
cloud.fields.append( PointField(name='y', offset=4, datatype=7, count=1) )
cloud.fields.append( PointField(name='z', offset=8, datatype=7, count=1) )
cloud.fields.append( PointField(name='rgb', offset=16, datatype=7, count=1) )
# Publish!
image_grabber.pub_cloud.publish(data=cloud.data, fields=cloud.fields, header=cloud.header, height=cloud.height, width=cloud.width, point_step=cloud.point_step, row_step=cloud.row_step)
image_grabber.need_rgb = True # reset flags
image_grabber.need_depth = True
| [
"image_geometry.PinholeCameraModel",
"sensor_msgs.msg.PointField",
"rospy.is_shutdown",
"message_filters.TimeSynchronizer",
"rospy.init_node",
"numpy.zeros",
"numpy.isnan",
"kinect_numpy_tools.numpy_msg.numpy_msg",
"sensor_msgs.msg.PointCloud2",
"message_filters.Subscriber",
"rospy.sleep",
"nu... | [((2404, 2444), 'rospy.init_node', 'rospy.init_node', (['"""rgbd_to_xyz_converter"""'], {}), "('rgbd_to_xyz_converter')\n", (2419, 2444), False, 'import rospy\n'), ((2860, 2907), 'rospy.loginfo', 'rospy.loginfo', (['"""Got depth image; processing..."""'], {}), "('Got depth image; processing...')\n", (2873, 2907), False, 'import rospy\n'), ((3021, 3083), 'rospy.loginfo', 'rospy.loginfo', (['"""Made array of all pixel->ray correspondences!"""'], {}), "('Made array of all pixel->ray correspondences!')\n", (3034, 3083), False, 'import rospy\n'), ((531, 589), 'message_filters.Subscriber', 'message_filters.Subscriber', (["topics['rgb_info']", 'CameraInfo'], {}), "(topics['rgb_info'], CameraInfo)\n", (557, 589), False, 'import message_filters\n'), ((607, 669), 'message_filters.TimeSynchronizer', 'message_filters.TimeSynchronizer', (['[sub_rgb, sub_rgb_info]', '(100)'], {}), '([sub_rgb, sub_rgb_info], 100)\n', (639, 669), False, 'import message_filters\n'), ((857, 917), 'message_filters.Subscriber', 'message_filters.Subscriber', (["topics['depth_info']", 'CameraInfo'], {}), "(topics['depth_info'], CameraInfo)\n", (883, 917), False, 'import message_filters\n'), ((937, 1003), 'message_filters.TimeSynchronizer', 'message_filters.TimeSynchronizer', (['[sub_depth, sub_depth_info]', '(100)'], {}), '([sub_depth, sub_depth_info], 100)\n', (969, 1003), False, 'import message_filters\n'), ((1601, 1635), 'numpy.zeros', 'numpy.zeros', (['self.array_rays.shape'], {}), '(self.array_rays.shape)\n', (1612, 1635), False, 'import numpy\n'), ((1884, 1904), 'image_geometry.PinholeCameraModel', 'PinholeCameraModel', ([], {}), '()\n', (1902, 1904), False, 'from image_geometry import PinholeCameraModel\n'), ((1977, 2042), 'numpy.zeros', 'numpy.zeros', (['(self.image_depth.height, self.image_depth.width, 3)'], {}), '((self.image_depth.height, self.image_depth.width, 3))\n', (1988, 2042), False, 'import numpy\n'), ((2837, 2854), 'rospy.sleep', 'rospy.sleep', (['(0.01)'], {}), '(0.01)\n', (2848, 2854), False, 'import rospy\n'), ((3107, 3126), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (3124, 3126), False, 'import rospy\n'), ((490, 506), 'kinect_numpy_tools.numpy_msg.numpy_msg', 'numpy_msg', (['Image'], {}), '(Image)\n', (499, 506), False, 'from kinect_numpy_tools.numpy_msg import numpy_msg\n'), ((814, 830), 'kinect_numpy_tools.numpy_msg.numpy_msg', 'numpy_msg', (['Image'], {}), '(Image)\n', (823, 830), False, 'from kinect_numpy_tools.numpy_msg import numpy_msg\n'), ((1127, 1149), 'kinect_numpy_tools.numpy_msg.numpy_msg', 'numpy_msg', (['PointCloud2'], {}), '(PointCloud2)\n', (1136, 1149), False, 'from kinect_numpy_tools.numpy_msg import numpy_msg\n'), ((1381, 1410), 'numpy.isnan', 'numpy.isnan', (['image_depth.data'], {}), '(image_depth.data)\n', (1392, 1410), False, 'import numpy\n'), ((3383, 3396), 'sensor_msgs.msg.PointCloud2', 'PointCloud2', ([], {}), '()\n', (3394, 3396), False, 'from sensor_msgs.msg import CameraInfo, Image, PointCloud2, PointField\n'), ((3928, 3995), 'numpy.rec.fromarrays', 'numpy.rec.fromarrays', (['(points_arr, image_rgb)'], {'names': "('xyz', 'rgb')"}), "((points_arr, image_rgb), names=('xyz', 'rgb'))\n", (3948, 3995), False, 'import numpy\n'), ((4131, 4182), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""x"""', 'offset': '(0)', 'datatype': '(7)', 'count': '(1)'}), "(name='x', offset=0, datatype=7, count=1)\n", (4141, 4182), False, 'from sensor_msgs.msg import CameraInfo, Image, PointCloud2, PointField\n'), ((4221, 4272), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""y"""', 'offset': '(4)', 'datatype': '(7)', 'count': '(1)'}), "(name='y', offset=4, datatype=7, count=1)\n", (4231, 4272), False, 'from sensor_msgs.msg import CameraInfo, Image, PointCloud2, PointField\n'), ((4311, 4362), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""z"""', 'offset': '(8)', 'datatype': '(7)', 'count': '(1)'}), "(name='z', offset=8, datatype=7, count=1)\n", (4321, 4362), False, 'from sensor_msgs.msg import CameraInfo, Image, PointCloud2, PointField\n'), ((4401, 4455), 'sensor_msgs.msg.PointField', 'PointField', ([], {'name': '"""rgb"""', 'offset': '(16)', 'datatype': '(7)', 'count': '(1)'}), "(name='rgb', offset=16, datatype=7, count=1)\n", (4411, 4455), False, 'from sensor_msgs.msg import CameraInfo, Image, PointCloud2, PointField\n')] |
import matplotlib.pyplot as plt
import numpy as np
from brian2 import *
def visualise_connectivity(S):
Ns = len(S.source)
Nt = len(S.target)
figure(figsize=(10, 4))
subplot(121)
plot(zeros(Ns), arange(Ns), 'ok', ms=1)
plot(ones(Nt), arange(Nt), 'ok', ms=1)
for i, j in zip(S.i, S.j):
plot([0, 1], [i, j], '-k',linewidth=0.1)
xticks([0, 1], ['Source', 'Target'])
ylabel('Neuron index')
xlim(-0.1, 1.1)
ylim(-1, max(Ns, Nt))
subplot(122)
plot(S.i, S.j, 'ok', ms=1)
xlim(-1, Ns)
ylim(-1, Nt)
xlabel('Source neuron index')
ylabel('Target neuron index')
def bin_array(array, BIN, time_array):
N0 = int(BIN/(time_array[1]-time_array[0]))
N1 = int((time_array[-1]-time_array[0])/BIN)
return array[:N0*N1].reshape((N1,N0)).mean(axis=1)
Nsim='1'
start_scope()
# parameters
DT=0.1
defaultclock.dt = DT*ms
N1 = 2000
N2 = 8000
TotTime = 1e3
# TotTime=10
duration = TotTime*ms
seed(50)
eqs='''
dv/dt = (-GsynE*(v-Ee)-GsynI*(v-Ei)-gl*(v-El)+ gl*Dt*exp((v-Vt)/Dt)-w + Is)/Cm : volt (unless refractory)
dw/dt = (a*(v-El)-w)/tau_w:ampere
dGsynI/dt = -GsynI/Tsyn : siemens
dGsynE/dt = -GsynE/Tsyn : siemens
Is:ampere
Cm:farad
gl:siemens
El:volt
a:siemens
tau_w:second
Dt:volt
Vt:volt
Ee:volt
Ei:volt
Tsyn:second
'''#% neuron_params
# Population 1 - FS - inhibitory
b1 = 0.0*pA
G1 = NeuronGroup(N1, eqs, threshold='v > 0.0*mV', reset='v = -65*mV', refractory='5*ms', method='heun')
#init:
G1.v = -65.*mV
G1.w = 0.0*pA
G1.GsynI=0.0*nS
G1.GsynE=0.0*nS
#parameters
G1.Cm = 200.*pF
G1.gl = 10.*nS
G1.Is = 0.0
G1.a = 0.0*nS
G1.El = -65.0*mV
G1.Vt = -50.*mV
G1.Dt = 0.5*mV
G1.tau_w = 1.0*ms
G1.Ee=0.*mV
G1.Ei=-80.*mV
G1.Tsyn=5.*ms
# Population 2 - RS - excitatory
b2 = 10.*pA
G2 = NeuronGroup(N2, eqs, threshold='v > 0.0*mV', reset='v = -64.5*mV; w += b2', refractory='5*ms', method='heun')
G2.Cm = 200.*pF
G2.El = -64.5*mV
G2.gl = 10.*nS
G2.Is = 0.0*nA
G2.a = 0.*nS
G2.Dt = 2.*mV
G2.tau_w = 500.*ms
G2.Vt = -50.*mV
G2.v = -64.5*mV
G2.w = 0.0*pA
G2.GsynI=0.0*nS
G2.GsynE=0.0*nS
G2.Ee=0.*mV
G2.Ei=-80.*mV
G2.Tsyn=5.*ms
# external drive--------------------------------------------------------------------------
rate =140.0*2+400*1e-3
nb_P = 10
P_ed_1 = PoissonGroup(N1*nb_P, (rate/nb_P)*Hz)
P_ed_2 = PoissonGroup(N2*nb_P, (rate/nb_P)*Hz)
# connections-----------------------------------------------------------------------------
print("connection")
Qi=2.5*nS
Qe=1.*nS
prbC= 0.05
prbC2=0.05
S_12 = Synapses(G1, G2, on_pre='GsynI_post+=Qi')
S_12.connect(p=prbC2)
S_11 = Synapses(G1, G1, on_pre='GsynI_post+=Qi')
S_11.connect(condition='i != j',p=prbC2)
S_21 = Synapses(G2, G1, on_pre='GsynE_post+=Qe')
S_21.connect(p=prbC)
S_22 = Synapses(G2, G2, on_pre='GsynE_post+=Qe')
S_22.connect(condition='i != j', p=prbC)
S_ed_in = Synapses(P_ed_1, G1, on_pre='GsynE_post+=Qe')
S_ed_in.connect(condition='j == i % N1')
S_ed_ex = Synapses(P_ed_2, G2, on_pre='GsynE_post+=Qe')
S_ed_ex.connect(condition='j == i % N2')
M1G1 = SpikeMonitor(G1)
M2G1 = StateMonitor(G1, 'v', record=range(N1),dt=1*ms)
M3G1 = StateMonitor(G1, 'w', record=range(N1),dt=1*ms)
FRG1 = PopulationRateMonitor(G1)
#
M1G2 = SpikeMonitor(G2)
M2G2 = StateMonitor(G2, 'v', record=range(N2),dt=1*ms)
M3G2 = StateMonitor(G2, 'w', record=range(N2),dt=1*ms)
FRG2 = PopulationRateMonitor(G2)
print('--##Start simulation##--')
run(duration)
print('--##End simulation##--')
RasG1 = np.array([M1G1.t/ms, [i+N2 for i in M1G1.i]])
RasG2 = np.array([M1G2.t/ms, M1G2.i])
LVG1=[]
LwG1=[]
LVG2=[]
LwG2=[]
for a in range(N1):
LVG1.append(array(M2G1[a].v/mV))
LwG1.append(array(M3G1[a].w/pA))
for a in range(N2):
LVG2.append(array(M2G2[a].v/mV))
LwG2.append(array(M3G2[a].w/pA))
BIN=5
time_array = np.arange(int(TotTime/DT))*DT
LfrG2=np.array(FRG2.rate/Hz)
TimBinned,popRateG2=bin_array(time_array, BIN, time_array),bin_array(LfrG2, BIN, time_array)
LfrG1=np.array(FRG1.rate/Hz)
TimBinned,popRateG1=bin_array(time_array, BIN, time_array),bin_array(LfrG1, BIN, time_array)
Lt1G1=array(M2G1.t/ms)
Lt2G1=array(M3G1.t/ms)
Lt1G2=array(M2G2.t/ms)
Lt2G2=array(M3G2.t/ms)
mean_LVG1 = np.mean(LVG1,axis=0)
max_LVG1 = np.max(LVG1,axis=0)
min_LVG1 = np.min(LVG1,axis=0)
mean_LwG1 = np.mean(LwG1,axis=0)
max_LwG1 = np.max(LwG1,axis=0)
min_LwG1 = np.min(LwG1,axis=0)
mean_LVG2 = np.mean(LVG2,axis=0)
max_LVG2 = np.max(LVG2,axis=0)
min_LVG2 = np.min(LVG2,axis=0)
mean_LwG2 = np.mean(LwG2,axis=0)
max_LwG2 = np.max(LwG2,axis=0)
min_LwG2 = np.min(LwG2,axis=0)
fig=plt.figure(figsize=(12,4))
ax1=fig.add_subplot(221)
ax2=fig.add_subplot(222)
for a in range(1):
ax1.plot(Lt1G1, LVG1[a],'r:',linewidth=0.5)
ax1.plot(Lt1G2, LVG2[a],'g:',linewidth=0.5)
for a in range(5):
ax2.plot(Lt2G1, LwG1[a],'r:',linewidth=0.5)
ax2.plot(Lt2G2, LwG2[a],'g:',linewidth=0.5)
ax1.plot(Lt1G1, mean_LVG1,'r',linewidth=2.0)
ax2.plot(Lt2G1, mean_LwG1,'r',linewidth=2.0)
ax1.plot(Lt1G2, mean_LVG2,'g',linewidth=2.0)
ax2.plot(Lt2G2, mean_LwG2,'g',linewidth=2.0)
# ax1.plot(Lt1G1, max_LVG1,'r--',linewidth=0.5)
ax2.plot(Lt2G1, max_LwG1,'r--',linewidth=1.0)
# ax1.plot(Lt1G2, max_LVG2,'g--',linewidth=0.5)
ax2.plot(Lt2G2, max_LwG2,'g--',linewidth=1.0)
ax1.plot(Lt1G1, min_LVG1,'r--',linewidth=0.5)
ax2.plot(Lt2G1, min_LwG1,'r--',linewidth=1.0)
ax1.plot(Lt1G2, min_LVG2,'g--',linewidth=0.5)
ax2.plot(Lt2G2, min_LwG2,'g--',linewidth=1.0)
ax1.set_ylim([-100, 0])
ax1.set_xlabel('Time (ms)')
ax1.set_ylabel('V in (mV)')
ax2.set_xlabel('Time (ms)')
ax2.set_ylabel('W in (pA)')
ax3=fig.add_subplot(223)
ax3.plot(RasG1[0], RasG1[1], '.r',markersize=0.1)
ax3.plot(RasG2[0], RasG2[1], '.g',markersize=0.1)
ax3.set_xlabel('Time (ms)')
ax3.set_ylabel('Neuron index')
ax4=fig.add_subplot(224)
ax4.plot(TimBinned,popRateG1, 'r')
ax4.plot(TimBinned,popRateG2, 'g')
ax4.set_xlabel('Time (ms)')
ax4.set_ylabel('FR')
print(popRateG1.shape)
print(numpy.std(popRateG1[100:])/np.mean(popRateG1[100:]))
print(numpy.std(popRateG2[100:])/np.mean(popRateG2[100:]))
plt.show()
| [
"numpy.mean",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.show"
] | [((3417, 3468), 'numpy.array', 'np.array', (['[M1G1.t / ms, [(i + N2) for i in M1G1.i]]'], {}), '([M1G1.t / ms, [(i + N2) for i in M1G1.i]])\n', (3425, 3468), True, 'import numpy as np\n'), ((3471, 3502), 'numpy.array', 'np.array', (['[M1G2.t / ms, M1G2.i]'], {}), '([M1G2.t / ms, M1G2.i])\n', (3479, 3502), True, 'import numpy as np\n'), ((3781, 3805), 'numpy.array', 'np.array', (['(FRG2.rate / Hz)'], {}), '(FRG2.rate / Hz)\n', (3789, 3805), True, 'import numpy as np\n'), ((3904, 3928), 'numpy.array', 'np.array', (['(FRG1.rate / Hz)'], {}), '(FRG1.rate / Hz)\n', (3912, 3928), True, 'import numpy as np\n'), ((4126, 4147), 'numpy.mean', 'np.mean', (['LVG1'], {'axis': '(0)'}), '(LVG1, axis=0)\n', (4133, 4147), True, 'import numpy as np\n'), ((4158, 4178), 'numpy.max', 'np.max', (['LVG1'], {'axis': '(0)'}), '(LVG1, axis=0)\n', (4164, 4178), True, 'import numpy as np\n'), ((4189, 4209), 'numpy.min', 'np.min', (['LVG1'], {'axis': '(0)'}), '(LVG1, axis=0)\n', (4195, 4209), True, 'import numpy as np\n'), ((4221, 4242), 'numpy.mean', 'np.mean', (['LwG1'], {'axis': '(0)'}), '(LwG1, axis=0)\n', (4228, 4242), True, 'import numpy as np\n'), ((4253, 4273), 'numpy.max', 'np.max', (['LwG1'], {'axis': '(0)'}), '(LwG1, axis=0)\n', (4259, 4273), True, 'import numpy as np\n'), ((4284, 4304), 'numpy.min', 'np.min', (['LwG1'], {'axis': '(0)'}), '(LwG1, axis=0)\n', (4290, 4304), True, 'import numpy as np\n'), ((4316, 4337), 'numpy.mean', 'np.mean', (['LVG2'], {'axis': '(0)'}), '(LVG2, axis=0)\n', (4323, 4337), True, 'import numpy as np\n'), ((4348, 4368), 'numpy.max', 'np.max', (['LVG2'], {'axis': '(0)'}), '(LVG2, axis=0)\n', (4354, 4368), True, 'import numpy as np\n'), ((4379, 4399), 'numpy.min', 'np.min', (['LVG2'], {'axis': '(0)'}), '(LVG2, axis=0)\n', (4385, 4399), True, 'import numpy as np\n'), ((4411, 4432), 'numpy.mean', 'np.mean', (['LwG2'], {'axis': '(0)'}), '(LwG2, axis=0)\n', (4418, 4432), True, 'import numpy as np\n'), ((4443, 4463), 'numpy.max', 'np.max', (['LwG2'], {'axis': '(0)'}), '(LwG2, axis=0)\n', (4449, 4463), True, 'import numpy as np\n'), ((4474, 4494), 'numpy.min', 'np.min', (['LwG2'], {'axis': '(0)'}), '(LwG2, axis=0)\n', (4480, 4494), True, 'import numpy as np\n'), ((4499, 4526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (4509, 4526), True, 'import matplotlib.pyplot as plt\n'), ((5971, 5981), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5979, 5981), True, 'import matplotlib.pyplot as plt\n'), ((5886, 5910), 'numpy.mean', 'np.mean', (['popRateG1[100:]'], {}), '(popRateG1[100:])\n', (5893, 5910), True, 'import numpy as np\n'), ((5945, 5969), 'numpy.mean', 'np.mean', (['popRateG2[100:]'], {}), '(popRateG2[100:])\n', (5952, 5969), True, 'import numpy as np\n')] |
# coding: utf-8
from PIL import ImageGrab
import numpy as np
import cv2
fps = 20
start = 3 # 延时录制
end = 15 # 自动结束时间
curScreen = ImageGrab.grab() # 获取屏幕对象
height, width = curScreen.size
video = cv2.VideoWriter('video02.avi', cv2.VideoWriter_fourcc(*'XVID'), fps, (height, width))
imageNum = 0
while True:
imageNum += 1
captureImage = ImageGrab.grab() # 抓取屏幕
frame = cv2.cvtColor(np.array(captureImage), cv2.COLOR_RGB2BGR)
# 显示无图像的窗口
cv2.imshow('capturing', np.zeros((1, 255), np.uint8))
# 控制窗口显示位置,方便通过按键方式退出
cv2.moveWindow('capturing', height - 100, width - 100)
if imageNum > fps * start:
video.write(frame)
# 退出条件
if cv2.waitKey(50) == ord('q') or imageNum > fps * end:
break
video.release()
cv2.destroyAllWindows() | [
"cv2.moveWindow",
"PIL.ImageGrab.grab",
"numpy.array",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoWriter_fourcc",
"cv2.waitKey"
] | [((132, 148), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {}), '()\n', (146, 148), False, 'from PIL import ImageGrab\n'), ((760, 783), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (781, 783), False, 'import cv2\n'), ((230, 261), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (252, 261), False, 'import cv2\n'), ((348, 364), 'PIL.ImageGrab.grab', 'ImageGrab.grab', ([], {}), '()\n', (362, 364), False, 'from PIL import ImageGrab\n'), ((546, 600), 'cv2.moveWindow', 'cv2.moveWindow', (['"""capturing"""', '(height - 100)', '(width - 100)'], {}), "('capturing', height - 100, width - 100)\n", (560, 600), False, 'import cv2\n'), ((398, 420), 'numpy.array', 'np.array', (['captureImage'], {}), '(captureImage)\n', (406, 420), True, 'import numpy as np\n'), ((485, 513), 'numpy.zeros', 'np.zeros', (['(1, 255)', 'np.uint8'], {}), '((1, 255), np.uint8)\n', (493, 513), True, 'import numpy as np\n'), ((677, 692), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (688, 692), False, 'import cv2\n')] |
import numpy as np
import sys
from os import path, getcwd, makedirs
import pickle
from utils import get_filename
from plotter_params import plot_setup
import scipy.stats as stats
import matplotlib.pyplot as plt
import pylab as pl
import matplotlib.ticker as ticker
plot_setup()
epsilon = 0.1
length = 160
filename = path.join(getcwd(), "data", "full_trace_estimations",
f"epsilon_{epsilon}_length_{length}".replace('.', '_'))
with open(path.join(filename, 'true_process_fidelity.pickle'), 'rb') as f:
true_pf = pickle.load(f)
with open(path.join(filename, 'true_zero_fidelity.pickle'), 'rb') as f:
true_zf = pickle.load(f)
with open(path.join(filename, 'process_fidelity_estimates.pickle'), 'rb') as f:
proc_fs = pickle.load(f)
with open(path.join(filename, 'zero_fidelity_estimates.pickle'), 'rb') as f:
zero_fs = pickle.load(f)
nbin = 80
pf_diffs = [i - true_pf for i in proc_fs[::]]
pf_range = np.max(pf_diffs) - np.min(pf_diffs)
pf_width = pf_range/nbin
p_fidels = sorted(pf_diffs)
fmean = np.mean(p_fidels)
fx, fy, _ = plt.hist(p_fidels, bins=nbin, alpha=0.6, density=True, label='Fidelity')
f_fit = stats.norm.pdf(p_fidels, fmean, np.std(p_fidels))
f_fit = f_fit*(np.max(fx)/np.max(f_fit))
plt.plot(p_fidels, f_fit, marker=None, lw=0.75)
plt.xlabel("Approximation error")
plt.ylabel("Frequency")
plt.xlim(-0.5, 0.5)
zf_diffs = [i - true_zf for i in zero_fs[::]]
zf_range = np.max(zf_diffs) - np.min(zf_diffs)
zf_nbins = np.int(zf_range/pf_width)
z_fidels = sorted(zf_diffs)
z_mean = np.mean(z_fidels)
zfx, zfy, _ = plt.hist(z_fidels, bins=zf_nbins, alpha=0.6, density=True, label='Figure of Merit')
z_fit = stats.norm.pdf(z_fidels, z_mean, np.std(z_fidels))
z_fit = z_fit*(np.max(zfx)/np.max(z_fit))
plt.plot(z_fidels, z_fit, marker=None, lw=0.75)
plt.xlabel("Approximation error")
plt.ylabel("Frequency")
plt.tight_layout(w_pad=-1)
plt.legend()
plt.savefig(path.join(filename, 'F_v_FOM_dists_160_evals_normed_eq_width.pdf'))
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pickle.load",
"numpy.std",
"os.path.join",
"os.getcwd",
"numpy.max",
"plotter_params.plot_setup",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"matplotlib.pyplot.xli... | [((266, 278), 'plotter_params.plot_setup', 'plot_setup', ([], {}), '()\n', (276, 278), False, 'from plotter_params import plot_setup\n'), ((1044, 1061), 'numpy.mean', 'np.mean', (['p_fidels'], {}), '(p_fidels)\n', (1051, 1061), True, 'import numpy as np\n'), ((1074, 1146), 'matplotlib.pyplot.hist', 'plt.hist', (['p_fidels'], {'bins': 'nbin', 'alpha': '(0.6)', 'density': '(True)', 'label': '"""Fidelity"""'}), "(p_fidels, bins=nbin, alpha=0.6, density=True, label='Fidelity')\n", (1082, 1146), True, 'import matplotlib.pyplot as plt\n'), ((1246, 1293), 'matplotlib.pyplot.plot', 'plt.plot', (['p_fidels', 'f_fit'], {'marker': 'None', 'lw': '(0.75)'}), '(p_fidels, f_fit, marker=None, lw=0.75)\n', (1254, 1293), True, 'import matplotlib.pyplot as plt\n'), ((1294, 1327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Approximation error"""'], {}), "('Approximation error')\n", (1304, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1338, 1351), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1371), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.5)', '(0.5)'], {}), '(-0.5, 0.5)\n', (1360, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1505), 'numpy.int', 'np.int', (['(zf_range / pf_width)'], {}), '(zf_range / pf_width)\n', (1484, 1505), True, 'import numpy as np\n'), ((1542, 1559), 'numpy.mean', 'np.mean', (['z_fidels'], {}), '(z_fidels)\n', (1549, 1559), True, 'import numpy as np\n'), ((1574, 1662), 'matplotlib.pyplot.hist', 'plt.hist', (['z_fidels'], {'bins': 'zf_nbins', 'alpha': '(0.6)', 'density': '(True)', 'label': '"""Figure of Merit"""'}), "(z_fidels, bins=zf_nbins, alpha=0.6, density=True, label=\n 'Figure of Merit')\n", (1582, 1662), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1806), 'matplotlib.pyplot.plot', 'plt.plot', (['z_fidels', 'z_fit'], {'marker': 'None', 'lw': '(0.75)'}), '(z_fidels, z_fit, marker=None, lw=0.75)\n', (1767, 1806), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1841), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Approximation error"""'], {}), "('Approximation error')\n", (1818, 1841), True, 'import matplotlib.pyplot as plt\n'), ((1842, 1865), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1852, 1865), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1893), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'w_pad': '(-1)'}), '(w_pad=-1)\n', (1883, 1893), True, 'import matplotlib.pyplot as plt\n'), ((1894, 1906), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1904, 1906), True, 'import matplotlib.pyplot as plt\n'), ((1987, 1997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1995, 1997), True, 'import matplotlib.pyplot as plt\n'), ((329, 337), 'os.getcwd', 'getcwd', ([], {}), '()\n', (335, 337), False, 'from os import path, getcwd, makedirs\n'), ((541, 555), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (552, 555), False, 'import pickle\n'), ((643, 657), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (654, 657), False, 'import pickle\n'), ((753, 767), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (764, 767), False, 'import pickle\n'), ((860, 874), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (871, 874), False, 'import pickle\n'), ((945, 961), 'numpy.max', 'np.max', (['pf_diffs'], {}), '(pf_diffs)\n', (951, 961), True, 'import numpy as np\n'), ((964, 980), 'numpy.min', 'np.min', (['pf_diffs'], {}), '(pf_diffs)\n', (970, 980), True, 'import numpy as np\n'), ((1187, 1203), 'numpy.std', 'np.std', (['p_fidels'], {}), '(p_fidels)\n', (1193, 1203), True, 'import numpy as np\n'), ((1431, 1447), 'numpy.max', 'np.max', (['zf_diffs'], {}), '(zf_diffs)\n', (1437, 1447), True, 'import numpy as np\n'), ((1450, 1466), 'numpy.min', 'np.min', (['zf_diffs'], {}), '(zf_diffs)\n', (1456, 1466), True, 'import numpy as np\n'), ((1699, 1715), 'numpy.std', 'np.std', (['z_fidels'], {}), '(z_fidels)\n', (1705, 1715), True, 'import numpy as np\n'), ((1919, 1985), 'os.path.join', 'path.join', (['filename', '"""F_v_FOM_dists_160_evals_normed_eq_width.pdf"""'], {}), "(filename, 'F_v_FOM_dists_160_evals_normed_eq_width.pdf')\n", (1928, 1985), False, 'from os import path, getcwd, makedirs\n'), ((462, 513), 'os.path.join', 'path.join', (['filename', '"""true_process_fidelity.pickle"""'], {}), "(filename, 'true_process_fidelity.pickle')\n", (471, 513), False, 'from os import path, getcwd, makedirs\n'), ((567, 615), 'os.path.join', 'path.join', (['filename', '"""true_zero_fidelity.pickle"""'], {}), "(filename, 'true_zero_fidelity.pickle')\n", (576, 615), False, 'from os import path, getcwd, makedirs\n'), ((669, 725), 'os.path.join', 'path.join', (['filename', '"""process_fidelity_estimates.pickle"""'], {}), "(filename, 'process_fidelity_estimates.pickle')\n", (678, 725), False, 'from os import path, getcwd, makedirs\n'), ((779, 832), 'os.path.join', 'path.join', (['filename', '"""zero_fidelity_estimates.pickle"""'], {}), "(filename, 'zero_fidelity_estimates.pickle')\n", (788, 832), False, 'from os import path, getcwd, makedirs\n'), ((1220, 1230), 'numpy.max', 'np.max', (['fx'], {}), '(fx)\n', (1226, 1230), True, 'import numpy as np\n'), ((1231, 1244), 'numpy.max', 'np.max', (['f_fit'], {}), '(f_fit)\n', (1237, 1244), True, 'import numpy as np\n'), ((1732, 1743), 'numpy.max', 'np.max', (['zfx'], {}), '(zfx)\n', (1738, 1743), True, 'import numpy as np\n'), ((1744, 1757), 'numpy.max', 'np.max', (['z_fit'], {}), '(z_fit)\n', (1750, 1757), True, 'import numpy as np\n')] |
'''
HardDPMixModel.py
Bayesian parametric mixture model with a unbounded number of components K
'''
import numpy as np
from bnpy.allocmodel import DPMixModel
from bnpy.suffstats import SuffStatBag
from bnpy.util import NumericHardUtil
from bnpy.util import gammaln, digamma, EPS
class HardDPMixModel(DPMixModel):
def requireMergeTerms(self):
return False
######################################################### Local Params
#########################################################
def calc_local_params(self, Data, LP, **kwargs):
''' Calculate local parameters for each data item and each component.
This is part of the E-step.
Args
-------
Data : bnpy data object with Data.nObs observations
LP : local param dict with fields
E_log_soft_ev : Data.nObs x K array
E_log_soft_ev[n,k] = log p(data obs n | comp k)
Returns
-------
LP : local param dict with fields
resp : Data.nObs x K array whose rows sum to one
resp[n,k] = posterior responsibility that comp. k has for data n
'''
lpr = LP['E_log_soft_ev']
lpr += self.Elogw
LP['resp'] = NumericHardUtil.toHardAssignmentMatrix(lpr)
assert np.allclose(LP['resp'].sum(axis=1), 1)
return LP
######################################################### Suff Stats
#########################################################
def get_global_suff_stats(self, Data, LP,
doPrecompEntropy=False,
doPrecompMergeEntropy=False, mPairIDs=None):
''' Calculate the sufficient statistics for global parameter updates
Only adds stats relevant for this allocModel.
Other stats are added by the obsModel.
Args
-------
Data : bnpy data object
LP : local param dict with fields
resp : Data.nObs x K array,
where resp[n,k] = posterior resp of comp k
doPrecompEntropy : boolean flag
indicates whether to precompute ELBO terms in advance
used for memoized learning algorithms (moVB)
doPrecompMergeEntropy : boolean flag
indicates whether to precompute ELBO terms in advance
for all possible merges of pairs of components
used for optional merge moves
Returns
-------
SS : SuffStats for K components, with field
N : vector of length-K,
effective number of observations assigned to each comp
'''
Nvec = np.sum(LP['resp'], axis=0)
SS = SuffStatBag(K=Nvec.size, D=Data.dim)
SS.setField('N', Nvec, dims=('K'))
return SS
######################################################### Evidence
#########################################################
def calc_evidence(self, Data, SS, LP=None ):
'''
'''
evV = self.E_logpV() - self.E_logqV()
evZq = 0
if SS.hasAmpFactor():
evZ = self.E_logpZ(SS) - SS.ampF * evZq
else:
evZ = self.E_logpZ(SS) - evZq
return evZ + evV
| [
"numpy.sum",
"bnpy.util.NumericHardUtil.toHardAssignmentMatrix",
"bnpy.suffstats.SuffStatBag"
] | [((1238, 1281), 'bnpy.util.NumericHardUtil.toHardAssignmentMatrix', 'NumericHardUtil.toHardAssignmentMatrix', (['lpr'], {}), '(lpr)\n', (1276, 1281), False, 'from bnpy.util import NumericHardUtil\n'), ((2686, 2712), 'numpy.sum', 'np.sum', (["LP['resp']"], {'axis': '(0)'}), "(LP['resp'], axis=0)\n", (2692, 2712), True, 'import numpy as np\n'), ((2722, 2758), 'bnpy.suffstats.SuffStatBag', 'SuffStatBag', ([], {'K': 'Nvec.size', 'D': 'Data.dim'}), '(K=Nvec.size, D=Data.dim)\n', (2733, 2758), False, 'from bnpy.suffstats import SuffStatBag\n')] |
import pandas as pd
import numpy as np
from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes
import time
import sys, os
import pickle
#on assure le compatibilité pour Python 2 et 3
called_version = sys.version_info
if called_version[0] == 2:
from Tkinter import Tk
from tkFileDialog import askopenfilename
elif called_version[0] == 3:
from tkinter import Tk,filedialog
def main(data_string,filename="processed.all.data"):
'''Crée ou charge un modele, normalise les données d'entrées puis réalise une prédiction'''
#on verifie s'il y a un modele deja existant, sinon on le cree
model_savename = filename + 'ModelBayes.p'
if not os.path.isfile('/'.join((os.path.dirname(os.path.realpath(__file__)),model_savename))):
faireModele(filename)
#on ouvre le modele en format pickle, contient model, data, scaler et encoder
modeller = pickle.load(open(model_savename,'rb')) #b pour binary, r pour read
#on mets les données d'entrée sous forme d'array et on transtype
inputData = data_string.split(' ')
inputData = [[float(e) for e in inputData]] #attention rajout d'une dimension ici
#on centre réduit, nécessaire pour le modele en noyau
scaledData = normaliseTest(inputData, modeller['scaler'], modeller['encoder'])
#on réalise la prédiction
predictionSet = modeller['model'].predict(scaledData)
print("%-30s%-4.2f%-1s" % ('Le patient semble être dans la catégorie : ', predictionSet[0], '.'))
def faireModele(filename="processed.all.data"):
'''Crée un modele depuis un fichier et sauvegarde dans un fichier pickle'''
rawData = np.array(pd.read_csv(filename,header=None))
data = processFeaturesTarget(rawData)
training_scaled, scaler, encoder = normaliseTrain(data['X'])
model = naive_bayes.BernoulliNB().fit(training_scaled,data['y'])
#on sauvegarde
model_savename = filename + 'ModelBayes.p'
pickle.dump({'model': model,'data': data,'scaler': scaler, 'encoder': encoder}, open(model_savename,'wb'))
def normalise(train, test, scalerType='Standard'):
'''Retourne les données normalisées. Attention, seulement pour des données float.'''
trainScaled, scaler, encoder = normaliseTrain(train)
testScaled = normaliseTest(test, scaler, encoder)
return (trainScaled, testScaled)
def normaliseTrain(train):
''' Retourne les données normalisées et le scaler. Attention seulement pour des données float'''
# codage des valeurs categoriques, pour pouvoir les envoyer dans un modele SVM
#auto : determine le nb de categorie, array : les features à traiter
#sparse = False pour renvoyer un array plutot qu'une matrice
#on n'applique pas à sexe et exercise induced angina puisqu'il n'y a que 2 catégories
encoder = preprocessing.OneHotEncoder('auto',[2,6,10,12], sparse=False)
features = encoder.fit_transform(train)
#on met sous forme supervisée
realValueIndex = 13
realValuedData = features[:, realValueIndex:]
#on normalise selon la méthode standard (moyenne et ecart type):
scaler = preprocessing.StandardScaler()
scaledData = scaler.fit_transform(realValuedData)
# on renvoie les features encodée correctement
return np.hstack((features[:,:realValueIndex],scaledData)),scaler,encoder
def normaliseTest(test, scaler, encoder):
'''Prend en argument les infos pour scaler et les applique au jeux de données de test'''
#encodage des valeurs categoriques
features = encoder.transform(test)
#on met sous forme supervisée
realValueIndex = 13
realValuedData = features[:, realValueIndex:]
scaledData = scaler.transform(realValuedData)
return np.hstack((features[:, :realValueIndex], scaledData))
def processFeaturesTarget(inputData):
"""Separe les features en entrée et le label attendu
On enleve la distinction des maladies aussi, on ne garde que accident ou pas accident"""
#copie de l'inputData
inputDataWork = inputData.copy()
# on remplace les ? des valeurs manquantes en nan pis en convertis en float
inputDataWork[inputDataWork == '?'] = np.nan
inputDataWork.astype(np.float)
#on remplace les valeurs manquantes par les valeurs les plus fréquentes
features = preprocessing.Imputer(strategy='most_frequent').fit_transform(inputDataWork[:, :-1])
# On enleve la distinction des maladies, >1 prend la valeur 1
#le jeu est trop petit pour esperer avoir des résultats satisfaisants en gardant les distinctions de maladie
inputDataWork[:, -1][inputDataWork[:, -1] >= 1] = 1
return ({'X': features, 'y': np.array(inputDataWork[:, -1], dtype='f')})
def analyse(verbose=0,filename = "processed.all.data", shuffling=True ):
''' Réalise l'analyse des résultats'''
t0 = time.time()
rawdata = np.array(pd.read_csv(filename,header=None))
data = processFeaturesTarget(rawdata)
score = crossValidation(data, verbose, shuffling)
if verbose >= 0:
print("%-15s%-15s%-15s%-15s" % ('Moy Accuracy', 'Moy Recall', 'Moy Precision', 'Moy ROC_AUC'))
print("%-15.3f%-15.3f%-15.3f%-15.3f" % (np.mean(score[0]), np.mean(score[1]), np.mean(score[2]), np.mean(score[3])))
print('Runtime is ', time.time() - t0, '(s)')
return score
def crossValidation(data, verbose=0, shuffling=True):
"10-fold validation"
kf = model_selection.KFold(n_splits=10, shuffle=shuffling)
accuracies = np.array([])
recalls = np.array([])
precisions = np.array([])
rocs = np.array([])
numFold = 1
for train_index, test_index in kf.split(data['X']):
X_train, X_test = data['X'][train_index], data['X'][test_index]
y_train, y_test = data['y'][train_index], data['y'][test_index]
X_train, X_test = normalise(X_train, X_test)
#pas de gridSearch puisqu'il n'y a pas de parameter
clf = model_selection.GridSearchCV(
naive_bayes.BernoulliNB(), {'alpha': [1]}
).fit(X_train, y_train)
#on affiche l'estimateur si on a un haut niveau de verbose
if verbose>3:print(clf.best_estimator_)
#on utilise l'estimateur sur tout le jeu de test
preds = clf.predict(X_test)
predsProb = clf.predict_proba(X_test)
#on affiche le label et la proba attribuée à la valeur malade pour toutes les lignes testées
if verbose>2:
comp=[[y_test[i],predsProb[i,1]] for i in range(len(y_test))]
for row in comp: print(row)
#on crée les metriques et on les ajoute a la liste des metriques mesurés
roc = metrics.roc_auc_score(y_test, predsProb[:, 1])
acc = metrics.accuracy_score(y_test, preds)
recall = metrics.recall_score(y_test, preds)
precision = metrics.precision_score(y_test, preds)
rocs = np.append(rocs, roc)
accuracies = np.append(accuracies, acc)
recalls = np.append(recalls, recall)
precisions = np.append(precisions, precision)
#on affiche les métriques pour le fold si verbose
if verbose>1:
print('%-15s%-15s%-15s%-15s%-15s'%('Fold','Accuracy','Recall','Precision','ROC_AUC'))
print('%-15.3f%-15.3f%-15.3f%-15.3f%-15.3f'%(numFold,acc,recall,precision,roc))
numFold += 1
#on renvoie la moyenne des métriques
return (np.array((accuracies, recalls, precisions, roc)))
#if __name__ == "__main__":
# inputs = ' '.join(sys.argv[1:])
# main(inputs)
#main("62.0 0.0 4.0 140.0 268.0 0.0 2.0 160.0 0.0 3.6 3.0 2.0 3.0")
#main("62.0 0.0 4.0 140.0 268.0 0.0 2.0 160.0 0.0 3.6 3.0 2.0 3.0","processed.cleveland.data")
#main("44.0 1.0 2.0 120.0 263.0 0.0 0.0 173.0 0.0 0.0 1.0 0.0 7.0")
#main("44.0 1.0 2.0 120.0 263.0 0.0 0.0 173.0 0.0 0.0 1.0 0.0 7.0","processed.cleveland.data")
analyse(1)
#analyse(1,"processed.cleveland.data",False) | [
"numpy.mean",
"pandas.read_csv",
"numpy.hstack",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.Imputer",
"sklearn.metrics.roc_auc_score",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"sklearn.metrics.recall_score",
"sklearn.metrics.precision_score",
"numpy.append",
"o... | [((2803, 2868), 'sklearn.preprocessing.OneHotEncoder', 'preprocessing.OneHotEncoder', (['"""auto"""', '[2, 6, 10, 12]'], {'sparse': '(False)'}), "('auto', [2, 6, 10, 12], sparse=False)\n", (2830, 2868), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((3102, 3132), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (3130, 3132), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((3704, 3757), 'numpy.hstack', 'np.hstack', (['(features[:, :realValueIndex], scaledData)'], {}), '((features[:, :realValueIndex], scaledData))\n', (3713, 3757), True, 'import numpy as np\n'), ((4796, 4807), 'time.time', 'time.time', ([], {}), '()\n', (4805, 4807), False, 'import time\n'), ((5370, 5423), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {'n_splits': '(10)', 'shuffle': 'shuffling'}), '(n_splits=10, shuffle=shuffling)\n', (5391, 5423), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((5442, 5454), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5450, 5454), True, 'import numpy as np\n'), ((5469, 5481), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5477, 5481), True, 'import numpy as np\n'), ((5499, 5511), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5507, 5511), True, 'import numpy as np\n'), ((5523, 5535), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5531, 5535), True, 'import numpy as np\n'), ((7326, 7374), 'numpy.array', 'np.array', (['(accuracies, recalls, precisions, roc)'], {}), '((accuracies, recalls, precisions, roc))\n', (7334, 7374), True, 'import numpy as np\n'), ((1667, 1701), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': 'None'}), '(filename, header=None)\n', (1678, 1701), True, 'import pandas as pd\n'), ((3251, 3304), 'numpy.hstack', 'np.hstack', (['(features[:, :realValueIndex], scaledData)'], {}), '((features[:, :realValueIndex], scaledData))\n', (3260, 3304), True, 'import numpy as np\n'), ((4623, 4664), 'numpy.array', 'np.array', (['inputDataWork[:, -1]'], {'dtype': '"""f"""'}), "(inputDataWork[:, -1], dtype='f')\n", (4631, 4664), True, 'import numpy as np\n'), ((4832, 4866), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': 'None'}), '(filename, header=None)\n', (4843, 4866), True, 'import pandas as pd\n'), ((6586, 6632), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_test', 'predsProb[:, 1]'], {}), '(y_test, predsProb[:, 1])\n', (6607, 6632), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((6647, 6684), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (6669, 6684), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((6702, 6737), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (6722, 6737), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((6758, 6796), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_test', 'preds'], {}), '(y_test, preds)\n', (6781, 6796), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((6812, 6832), 'numpy.append', 'np.append', (['rocs', 'roc'], {}), '(rocs, roc)\n', (6821, 6832), True, 'import numpy as np\n'), ((6854, 6880), 'numpy.append', 'np.append', (['accuracies', 'acc'], {}), '(accuracies, acc)\n', (6863, 6880), True, 'import numpy as np\n'), ((6899, 6925), 'numpy.append', 'np.append', (['recalls', 'recall'], {}), '(recalls, recall)\n', (6908, 6925), True, 'import numpy as np\n'), ((6947, 6979), 'numpy.append', 'np.append', (['precisions', 'precision'], {}), '(precisions, precision)\n', (6956, 6979), True, 'import numpy as np\n'), ((1822, 1847), 'sklearn.naive_bayes.BernoulliNB', 'naive_bayes.BernoulliNB', ([], {}), '()\n', (1845, 1847), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((4269, 4316), 'sklearn.preprocessing.Imputer', 'preprocessing.Imputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (4290, 4316), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((5238, 5249), 'time.time', 'time.time', ([], {}), '()\n', (5247, 5249), False, 'import time\n'), ((5136, 5153), 'numpy.mean', 'np.mean', (['score[0]'], {}), '(score[0])\n', (5143, 5153), True, 'import numpy as np\n'), ((5155, 5172), 'numpy.mean', 'np.mean', (['score[1]'], {}), '(score[1])\n', (5162, 5172), True, 'import numpy as np\n'), ((5174, 5191), 'numpy.mean', 'np.mean', (['score[2]'], {}), '(score[2])\n', (5181, 5191), True, 'import numpy as np\n'), ((5193, 5210), 'numpy.mean', 'np.mean', (['score[3]'], {}), '(score[3])\n', (5200, 5210), True, 'import numpy as np\n'), ((5923, 5948), 'sklearn.naive_bayes.BernoulliNB', 'naive_bayes.BernoulliNB', ([], {}), '()\n', (5946, 5948), False, 'from sklearn import preprocessing, svm, ensemble, neighbors, metrics, model_selection, naive_bayes\n'), ((743, 769), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (759, 769), False, 'import sys, os\n')] |
# -*- coding: utf-8 -*-
"""
@Author: YYM
@Institute: CASIA
"""
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.jit as jit
import math
from scipy import signal
from network.ObjectClasses import Neuron, Spike
from network.ReservoirDefinitions import create_random_reservoir
thresh = 0.5
dampening_factor = 0.3
lens = 0.5
decay = 0.2
if_bias = True
def to_device(input):
return input.cuda()
################SNU model###################
class ActFun(torch.autograd.Function):
@staticmethod
def forward(ctx, input, a=thresh):
ctx.save_for_backward(input)
return input.gt(a).float()
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
temp = torch.max((1 - torch.abs(input/thresh)), to_device(torch.tensor(0)).float()) * dampening_factor
return grad_input * temp.float(), None
# @staticmethod
# def backward(ctx, grad_h):
# z = ctx.saved_tensors
# s = torch.sigmoid(z[0])
# d_input = (1 - s) * s * grad_h
# return d_input
act_fun = ActFun.apply
def mem_update(ops, x, v_mem, spike, lateral = None):
v_mem = v_mem * decay * (1. - spike) + ops(x)
if lateral:
v_mem += lateral(spike)
spike = act_fun(v_mem)
return v_mem, spike
############################################
###############LSM_SNU model################
class SNN(nn.Module):
def __init__(self, batch_size, input_size, num_classes, encoding_num=4, possion_num=50, gpu='0'):
super(SNN, self).__init__()
self.batch_size = batch_size
self.input_size = input_size
self.num_classes = num_classes*encoding_num #every class coded by 4 neurons
self.possion_num = possion_num
self.fc1 = nn.Linear(self.input_size, self.num_classes, bias = if_bias)
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
def forward(self, input, task, time_window):
self.fc1 = self.fc1.float()
batch_size = input.shape[0]
#monitor
self.monitor_input = torch.zeros(batch_size, self.input_size, self.possion_num).cuda()
self.monitor_fc1 = torch.zeros(batch_size, self.num_classes, self.possion_num).cuda()
h1_mem = h1_spike = h1_sumspike = torch.zeros(batch_size, self.num_classes).cuda()
for step in range(time_window):
x = input
sum_out = None
for t in range(time_window):
if task == "LSM":
x_t = x[:,t].float().cuda()
elif task == 'STDP':
x_t = x[:,t].cuda()
x_t = x_t.view(batch_size, -1)
h1_mem, h1_spike = mem_update(self.fc1, x_t, h1_mem, h1_spike)
#h1_sumspike += h1_spike
with torch.no_grad():
self.monitor_fc1[:,:,t] = h1_spike.detach()
sum_out = h1_spike if sum_out is None else sum_out + h1_spike
#outputs = h2_sumspike / time_window
return sum_out
def stdp_step(self,reward, lr):
r_stdp(self.monitor_input[0],self.monitor_fc1[0],self.fc1.weight, reward, lr=lr)
class LSMNetwork:
def __init__(self, dims, frac_inhibitory, w_matrix, fanout,
simulation_steps, num_in_ch, tau=20, t_ref=10,
propagation_time=10, ignore_frac=0.0,each_step_reset=True):
#simulation_steps : total number of simulation steps to simulate time T in steps of dt = T/dt
self.reset = each_step_reset
self.ignore_frac = ignore_frac
self.propagation_time = propagation_time
self.tau = tau
self.t_ref = t_ref
self.dims = dims
self.n_nodes = dims[0]*dims[1]*dims[2]
self.num_in_ch = num_in_ch
if num_in_ch<=self.n_nodes:
mapped_nodes = torch.from_numpy(np.random.choice(self.n_nodes, size=num_in_ch, replace=False))
else:
mapped_nodes = torch.from_numpy(np.random.choice(self.n_nodes, size=num_in_ch, replace=True))
self.mapped_nodes = mapped_nodes
self.frac_inibitory = frac_inhibitory
self.w_matrix = w_matrix
self.fanout = fanout
adj_mat, all_connections, all_weights = create_random_reservoir(dims, frac_inhibitory, w_matrix, fanout)
#self.adj_mat = adj_mat
self.all_connections = all_connections
self.all_weights = all_weights
self.neuronList = [Neuron(i, all_connections[i], all_weights[i], fanout, tau, t_ref, propagation_time) for i in range(len(all_connections))]
self.simulation_steps = simulation_steps
self.current_time_step = 0
self.action_buffer = []
#TODO: Replace list in a more efficient way
for i in range(simulation_steps):
self.action_buffer.append([])
return
def add_input(self, input_spike_train):
#input_spike_train : num_channels x simulation_steps matrix of all channels of the input spike train
# for i in range(len(self.neuronList)):
# self.neuronList[i] = Neuron(i, self.all_connections[i], self.all_weights[i], self.fanout, self.tau, self.t_ref, self.propagation_time)
for t_step in range(input_spike_train.shape[1]):
self.action_buffer[t_step] = []
for ch in range(self.num_in_ch):
if input_spike_train[ch,t_step] > 0:
#TODO: Replace list in a more efficient way
self.action_buffer[t_step].append((input_spike_train[ch,t_step], self.mapped_nodes[ch]))
return
def simulate(self):
rate_coding = torch.zeros([self.n_nodes,self.simulation_steps])
frac = self.ignore_frac
for t_step in range(self.simulation_steps):
#print(t_step)
if len(self.action_buffer[t_step])>0:
for action in self.action_buffer[t_step]:
spike_val = action[0]
target_node = action[1]
spike_produced = self.neuronList[int(target_node)].receive_spike(t_step, spike_val)
if spike_produced != None:
if t_step > frac*self.simulation_steps:
rate_coding[target_node][t_step] += 1
receiver_nodes = spike_produced.receiver_nodes
spike_values = spike_produced.spike_values
receive_times = spike_produced.receive_times
for node in range(len(receiver_nodes)):
if(receive_times[node]<self.simulation_steps):
#TODO: Replace list in a more efficient way
self.action_buffer[int(receive_times[node])].append((int(spike_values[node]), receiver_nodes[node]))
#if self.reset:
#reset
for i in range(len(self.neuronList)):
self.neuronList[i].reset_spike()
for step in range(self.simulation_steps):
self.action_buffer[t_step] = []
return rate_coding
############################################
#################e-prop#####################
class EPropBase(torch.autograd.Function):
@staticmethod
def forward(ctx,
input,
spike,
v_hidden,
a,
weight_hh,
weight_ih,
e_trace_hh,
ev_w_hh_x,
ea_w_hh_x,
e_trace_ih,
ev_w_ih_x,
ea_w_ih_x,
gamma_pd,
thresh,
alpha,
beta,
rho):
# TODO:Not on the same device
# Remove the main diagonal element
temp_w_hh = weight_hh - to_device(torch.eye(weight_hh.size(0))) * weight_hh
v_hidden_t = alpha * v_hidden + torch.mm(spike, temp_w_hh) + torch.mm(input, weight_ih) - v_hidden.gt(a).float() * a
spike_t = act_fun(v_hidden_t,a)
a = rho * a + spike_t
A = thresh + beta * a
psi = 1/thresh * gamma_pd * torch.max(to_device(torch.tensor(0)).float(), (1-torch.abs(1/thresh*(v_hidden_t-A))))
spike_for_hh = spike.resize(input.size(0),weight_ih.size(1),1).repeat(1,1,weight_ih.size(1))
#TODO: Compare the current spike or the previous spike
ev_w_hh_x = alpha * ev_w_hh_x + spike_for_hh
e_trace_hh = psi[:,None,:] * (ev_w_hh_x - beta * ea_w_hh_x)
ea_w_hh_x = psi[:,None,:] * ev_w_hh_x + (rho - psi[:,None,:] * beta) * ea_w_hh_x
spike_for_ih = input.resize(input.size(0),weight_ih.size(0),1).repeat(1,1,weight_ih.size(1))
ev_w_ih_x = alpha * ev_w_ih_x + spike_for_ih
e_trace_ih = psi[:,None,:] * (ev_w_ih_x - beta * ea_w_ih_x)
ea_w_ih_x = psi[:,None,:] * ev_w_ih_x + (rho - psi[:,None,:] * beta) * ea_w_ih_x
ctx.save_for_backward(e_trace_hh, e_trace_ih)
return spike_t, v_hidden_t, A, e_trace_hh,ev_w_hh_x,ea_w_hh_x,e_trace_ih,ev_w_ih_x,ea_w_ih_x
@staticmethod
def backward(ctx, grad_spike_t, grad_hidden_t, grad_A, grad_e_trace_hh,grad_ev_w_hh_x,grad_ea_w_hh_x,grad_e_trace_ih,grad_ev_w_ih_x,grad_ea_w_ih_x):
#TODO: Rewrite backward
e_trace_hh, e_trace_ih, = ctx.saved_variables
grad_weight_ih = e_trace_ih * grad_spike_t.reshape(grad_spike_t.size(0),1,grad_spike_t.size(1)).repeat(1,e_trace_ih.shape[1],1)
grad_weight_hh = e_trace_hh * grad_spike_t.reshape(grad_spike_t.size(0),1,grad_spike_t.size(1)).repeat(1,e_trace_hh.shape[1],1)
#input, spike, v_hidden, a, weight_hh, weight_ih, e_trace_hh, ev_w_hh_x, ea_w_hh_x, e_trace_ih, ev_w_ih_x, ea_w_ih_x, gamma_pd, thresh, alpha, beta, rho
return None, None, None, None, grad_weight_hh, grad_weight_ih, None,None,None,None,None,None, None, None, None, None, None
eprop = EPropBase.apply
###################Recurrent################
class RSNU(nn.Module):
def __init__(self, input_size, hidden_size, bias=False):
super(RSNU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
# Create for weight matrices, one for each gate + recurrent connections
self.weight_ih = nn.Parameter(torch.Tensor(input_size, hidden_size))
self.weight_hh = nn.Parameter(torch.Tensor(hidden_size, hidden_size))
if bias:
self.bias_ih = nn.Parameter(torch.Tensor(hidden_size))
self.bias_hh = nn.Parameter(torch.Tensor(hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.initialize_parameters(self.weight_ih, self.bias_ih)
self.initialize_parameters(self.weight_hh, self.bias_hh)
def initialize_parameters(self, weight, bias):
nn.init.kaiming_uniform_(weight, a=math.sqrt(5))
if bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(bias, -bound, bound)
class EPropRSNU(RSNU):
def __init__(self, input_size, hidden_size, bias=True, thresh=1, beta=0.1, dt=1,tau=10, tau_e=70,gpu='0'):
super(EPropRSNU, self).__init__(input_size, hidden_size, bias)
self.thresh = thresh
self.beta = beta
self.dt = dt
self.tau = torch.tensor(tau).float()
self.tau_e = torch.tensor(tau_e).float()
self.alpha = torch.exp(-self.dt / self.tau)
self.rho = torch.exp(-self.dt / self.tau_e)
self.a = None
self.batch_size = None
self.v_hidden = None
self.eligibility_vectors = []
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
def forward(self, input, initial_v_h):
'''
input (batch x, input_size, seq_len x)
initial_hidden (batch x, hidden_size)
initial_state (batch x, hidden_size)
cut input into slices follow dim 0(time)
'''
input = input.to(self.device)
inputs = input.unbind(2)
input_size = input.size(1)
self.hidden_size = initial_v_h.size(1)
self.batch_size = input.size(0)
batch_size = input.size(0)
if self.v_hidden == None:
self.v_hidden = initial_v_h.to(self.device)
spike = torch.zeros_like(self.v_hidden)
if self.a == None:
self.a = torch.zeros_like(self.v_hidden)
if len(self.eligibility_vectors) == 0:
ev_w_hh_x = torch.zeros(batch_size, self.hidden_size, self.hidden_size, requires_grad=False).to(self.device)
ea_w_hh_x = torch.zeros_like(ev_w_hh_x)
e_trace_hh = torch.zeros_like(ev_w_hh_x)
ev_w_ih_x = torch.zeros(batch_size, self.input_size, self.hidden_size, requires_grad=False).to(self.device)
ea_w_ih_x = torch.zeros_like(ev_w_ih_x)
e_trace_ih = torch.zeros_like(ev_w_ih_x)
self.eligibility_vectors = [e_trace_hh, ev_w_hh_x, ea_w_hh_x, e_trace_ih, ev_w_ih_x, ea_w_ih_x]
outputs = []
gamma_pd=0.3
for i in range(len(inputs)):
spike, self.v_hidden, self.a, self.eligibility_vectors[0], self.eligibility_vectors[1], self.eligibility_vectors[2], self.eligibility_vectors[3], self.eligibility_vectors[4], self.eligibility_vectors[5] = eprop(inputs[i], spike, self.v_hidden, self.a, self.weight_hh, self.weight_ih, self.eligibility_vectors[0], self.eligibility_vectors[1], self.eligibility_vectors[2], self.eligibility_vectors[3], self.eligibility_vectors[4], self.eligibility_vectors[5], gamma_pd, self.thresh, self.alpha, self.beta, self.rho)
outputs += [spike]
return torch.stack(outputs)
def reset(self):
self.a = None
self.v_hidden = None
self.eligibility_vectors = []
##############cerebellar model##############
def gen_mask(row, col, percent=0.5, num_ones=None):
if num_ones is None:
# Total number being masked is 0.5 by default.
num_ones = int(row * percent)
mask = np.zeros([row,col])
for i in range(col):
temp_mask = np.hstack([
np.zeros(num_ones),
np.ones(row - num_ones)])
np.random.shuffle(temp_mask)
mask[:,i] = temp_mask
return mask.reshape(row, col)
class SparseLinearFunction(torch.autograd.Function):
"""
autograd function which masks it's weights by 'mask'.
"""
# Note that both forward and backward are @staticmethods
@staticmethod
# bias, mask is an optional argument
def forward(ctx, input, weight, bias=None, mask=None):
if mask is not None:
# change weight to 0 where mask == 0
weight = weight * mask
output = input.mm(weight.t())
if bias is not None:
output += bias.unsqueeze(0).expand_as(output)
ctx.save_for_backward(input, weight, bias, mask)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
input, weight, bias, mask = ctx.saved_tensors
grad_input = grad_weight = grad_bias = grad_mask = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if mask is not None:
# change grad_weight to 0 where mask == 0
grad_weight = grad_weight * mask
# if bias is not None and ctx.needs_input_grad[2]:
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0).squeeze(0)
return grad_input, grad_weight, grad_bias, grad_mask
class SparseLinear(nn.Module):
def __init__(self, input_features, output_features, bias=True, mask=None):
"""
Argumens
------------------
mask [numpy.array]:
the shape is (n_input_feature, n_output_feature).
the elements are 0 or 1 which declare un-connected or
connected.
bias [bool]:
flg of bias.
"""
super(SparseLinear, self).__init__()
self.input_features = input_features
self.output_features = output_features
# nn.Parameter is a special kind of Tensor, that will get
# automatically registered as Module's parameter once it's assigned
# as an attribute.
self.weight = nn.Parameter(torch.Tensor(
self.output_features, self.input_features))
if bias:
self.bias = nn.Parameter(
torch.Tensor(self.output_features))
else:
# You should always register all possible parameters, but the
# optional ones can be None if you want.
self.register_parameter('bias', None)
# Initialize the above parameters (weight & bias).
self.init_params()
if mask is not None:
mask = torch.tensor(mask, dtype=torch.float).t()
self.mask = nn.Parameter(mask, requires_grad=False)
# print('\n[!] CustomizedLinear: \n', self.weight.data.t())
else:
self.register_parameter('mask', None)
def init_params(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
# See the autograd section for explanation of what happens here.
return SparseLinearFunction.apply(
input, self.weight, self.bias, self.mask)
def extra_repr(self):
# (Optional)Set the extra information about this module. You can test
# it by printing an object of this class.
return 'input_features={}, output_features={}, bias={}, mask={}'.format(
self.input_features, self.output_features,
self.bias is not None, self.mask is not None)
class cere_SNN(nn.Module):
def __init__(self,batch_size,num_in_MF,num_out_MF,num_out_GC,num_out_PC,num_out_DCN,possion_num=50):
super(cere_SNN, self).__init__()
self.batch_size = batch_size
self.num_in_MF = num_in_MF*16
self.num_out_MF = num_out_MF*4
self.num_out_GC = num_out_GC*4
self.num_out_PC = num_out_PC*4
self.num_out_DCN = num_out_DCN*8
self.possion_num = possion_num
MF_GC_mask = gen_mask(self.num_out_MF, self.num_out_GC,num_ones=4)
self.fc1 = nn.Linear(self.num_in_MF, self.num_out_MF, bias = if_bias)
self.fc2 = SparseLinear(self.num_out_MF, self.num_out_GC, bias = if_bias,mask=MF_GC_mask)
self.fc3 = nn.Linear(self.num_out_GC, self.num_out_PC, bias = if_bias)
self.fc4 = nn.Linear(self.num_out_PC, self.num_out_DCN, bias = if_bias)
print("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def forward(self,m,u,sigma,f, time_window):
self.fc1 = self.fc1.float()
# self.fc3.weight.data = torch.clamp(self.fc3.weight, -100, 0)
h1_mem = h1_spike = h1_sumspike = torch.zeros(self.batch_size, self.num_out_MF).cuda()
h2_mem = h2_spike = h2_sumspike = torch.zeros(self.batch_size, self.num_out_GC).cuda()
h3_mem = h3_spike = h3_sumspike = torch.zeros(self.batch_size, self.num_out_PC).cuda()
h4_mem = h4_spike = h4_sumspike = torch.zeros(self.batch_size, self.num_out_DCN).cuda()
input = torch.cat((m,u,sigma,f),1)
x = input
sum_out = None
for t in range(time_window):
x_t = x[:,:,t]
x_t = x_t.view(self.batch_size, -1)
h1_mem, h1_spike = mem_update(self.fc1, x_t, h1_mem, h1_spike)
#h1_sumspike += h1_spike
h2_mem, h2_spike = mem_update(self.fc2, h1_spike, h2_mem, h2_spike)
#h1_sumspike += h1_spike
h3_mem, h3_spike = mem_update(self.fc3, h2_spike, h3_mem, h3_spike)
#h1_sumspike += h1_spike
#DCN:
baseline_MF = torch.mean(h1_spike)
m_DCN_in = -h3_spike + torch.abs(baseline_MF)
h4_mem, h4_spike = mem_update(self.fc4, m_DCN_in, h4_mem, h4_spike)
#h1_sumspike += h1_spike
sum_out = h4_spike if sum_out is None else sum_out + h4_spike
return sum_out
class cere_model(nn.Module):
def __init__(self,batch_size,num_in_MF,
num_out_MF,num_out_GC,num_out_PC,num_out_DCN,possion_num=50,gpu='0'):
super(cere_model,self).__init__()
self.batch_size = batch_size
self.num_out_MF = num_out_MF
self.num_out_GC = num_out_GC
self.num_out_PC = num_out_PC
self.num_out_DCN = num_out_DCN
self.possion_num = possion_num
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
#MF: every class coded by 4 neurons
self.m_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
self.u_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
self.sigma_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
self.f_MF = nn.Linear(num_in_MF*4, num_out_MF, bias = if_bias)
#GC:
num_in_GC=num_out_MF
self.m_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
self.u_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
self.sigma_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
self.f_GC = nn.Linear(num_in_GC, num_out_GC, bias = if_bias)
#PC:
num_in_PC=4*num_out_GC
#PC E
self.m_PCE = nn.Linear(num_in_PC, num_out_PC, bias = if_bias)
#PC I
self.m_PCI = nn.Linear(num_in_PC, num_out_PC, bias = if_bias)
#DCN: every class coded by 4 neurons
num_in_DCN=num_out_PC
self.m_DCN = nn.Linear(num_in_DCN, num_out_DCN*8, bias = if_bias)
print("number of parameters: %e", sum(p.numel() for p in self.parameters()))
# #STDP prams
# #monitor
# self.monitor_MF_m = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_MF_u = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_MF_sigma = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_MF_f = torch.zeros(self.batch_size, self.num_out_MF, self.possion_num).to(self.device)
# self.monitor_GC_m = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_GC_u = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_GC_sigma = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_GC_f = torch.zeros(self.batch_size, self.num_out_GC, self.possion_num).to(self.device)
# self.monitor_PCE_m = torch.zeros(self.batch_size, self.num_out_PC, self.possion_num).to(self.device)
# self.monitor_PCI_m = torch.zeros(self.batch_size, self.num_out_PC, self.possion_num).to(self.device)
# self.monitor_DCN_m = torch.zeros(self.batch_size, self.num_out_DCN*8, self.possion_num).to(self.device)
def forward(self,m,u,sigma,f, time_window):
batch_size = m.shape[0]
self.monitor_DCN_m = torch.zeros(batch_size, self.num_out_DCN*8, self.possion_num).cuda()
m_MF_mem = m_MF_spike = u_MF_mem = u_MF_spike = sigma_MF_mem = sigma_MF_spike = f_MF_mem = f_MF_spike = torch.zeros(batch_size, self.num_out_MF).cuda()
m_GC_mem = m_GC_spike = u_GC_mem = u_GC_spike = sigma_GC_mem = sigma_GC_spike = f_GC_mem = f_GC_spike = torch.zeros(batch_size, self.num_out_GC).cuda()
m_PCE_mem = m_PCE_spike = u_PCE_mem = u_PCE_spike = m_PCI_mem = m_PCI_spike = u_PCI_mem = u_PCI_spike = torch.zeros(batch_size, self.num_out_PC).cuda()
m_DCN_mem = m_DCN_spike = torch.zeros(batch_size, self.num_out_DCN*8).cuda()
m = m.float()
u = u.float()
sigma = sigma.float()
f = f.float()
for step in range(time_window):
sum_out_m = sum_out_u = None
#x = x.float()
#x = x.view(self.batch_size, -1)
#x_t = torch.from_numpy(x[:,t]).float().to(self.device)
for t in range(time_window):
m_t = m[:,:,t]
m_t = m_t.view(batch_size, -1)
u_t = u[:,:,t]
u_t = u_t.view(batch_size, -1)
sigma_t = sigma[:,:,t]
sigma_t = sigma_t.view(batch_size, -1)
f_t = f[:,:,t]
f_t = f_t.view(batch_size, -1)
#MF:
m_MF_mem, m_MF_spike = mem_update(self.m_MF, m_t, m_MF_mem, m_MF_spike)
u_MF_mem, u_MF_spike = mem_update(self.u_MF, u_t, u_MF_mem, u_MF_spike)
sigma_MF_mem, sigma_MF_spike = mem_update(self.sigma_MF, sigma_t, sigma_MF_mem, sigma_MF_spike)
f_MF_mem, f_MF_spike = mem_update(self.f_MF, f_t, f_MF_mem, f_MF_spike)
#GC:
m_GC_mem, m_GC_spike = mem_update(self.m_GC, m_MF_spike, m_GC_mem, m_GC_spike)
u_GC_mem, u_GC_spike = mem_update(self.u_GC, u_MF_spike, u_GC_mem, u_GC_spike)
sigma_GC_mem, sigma_GC_spike = mem_update(self.sigma_GC, sigma_MF_spike, sigma_GC_mem, sigma_GC_spike)
f_GC_mem, f_GC_spike = mem_update(self.f_GC, f_MF_spike, f_GC_mem, f_GC_spike)
#PC:
#combine tensor
PF_in = torch.cat((m_GC_spike,u_GC_spike,sigma_GC_spike,f_GC_spike),0)
PF_in = PF_in.view(batch_size, -1)
MF_in = torch.cat((m_MF_spike,u_MF_spike,sigma_MF_spike,f_MF_spike),0)
MF_in = MF_in.view(batch_size, -1)
m_PCE_mem, m_PCE_spike = mem_update(self.m_PCE, PF_in, m_PCE_mem, m_PCE_spike)
m_PCI_mem, m_PCI_spike = mem_update(self.m_PCI, PF_in, m_PCI_mem, m_PCI_spike)
#DCN:
baseline_MF = torch.mean(MF_in)
m_DCN_in = m_PCE_spike + m_PCI_spike + baseline_MF
m_DCN_mem, m_DCN_spike = mem_update(self.m_DCN, m_DCN_in, m_DCN_mem, m_DCN_spike)
# with torch.no_grad():
# self.monitor_MF_m[:,:,t] = m_MF_spike.detach()
# self.monitor_MF_u[:,:,t] = u_MF_spike.detach()
# self.monitor_MF_sigma[:,:,t] = sigma_MF_spike.detach()
# self.monitor_MF_f[:,:,t] = f_MF_spike.detach()
# self.monitor_GC_m[:,:,t] = m_GC_spike.detach()
# self.monitor_GC_u[:,:,t] = u_GC_spike.detach()
# self.monitor_GC_sigma[:,:,t] = sigma_GC_spike.detach()
# self.monitor_GC_f[:,:,t] = f_GC_spike.detach()
# self.monitor_PCE_m[:,:,t] = m_PCE_spike.detach()
# self.monitor_PCI_m[:,:,t] = m_PCI_spike.detach()
self.monitor_DCN_m[:,:,t] = m_DCN_spike
sum_out_m = m_DCN_spike if sum_out_m is None else sum_out_m + m_DCN_spike
return sum_out_m#, self.monitor_DCN_m
# def stdp_step(self, reward, lr):
# r_stdp(self.monitor_PCE_m[0],self.monitor_DCN_m[0],self.m_DCN.weight[:,0:self.num_out_PC], reward,lr=lr)
# r_stdp(self.monitor_PCI_m[0],self.monitor_DCN_m[0],self.m_DCN.weight[:,self.num_out_PC:2 * self.num_out_PC], reward,lr=lr)
# r_stdp(self.monitor_GC_m[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,0:self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_u[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,self.num_out_GC:2*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_sigma[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,2*self.num_out_GC:3*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_f[0],self.monitor_PCE_m[0],self.m_PCE.weight[:,3*self.num_out_GC:4*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_m[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,0:self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_u[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,self.num_out_GC:2*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_sigma[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,2*self.num_out_GC:3*self.num_out_GC], reward,lr=lr)
# r_stdp(self.monitor_GC_f[0],self.monitor_PCI_m[0],self.m_PCI.weight[:,3*self.num_out_GC:4*self.num_out_GC], reward,lr=lr)
############################################
##############prefrontal model##############
class prefrontal_model_FF(nn.Module):
def __init__(self, batch_size, num_hidden,N_step,possion_num=50,gpu='0'):
super(prefrontal_model, self).__init__()
self.batch_size = batch_size
self.num_hidden = num_hidden
self.inputnum = 16*4
self.output = 4
self.possion_num = possion_num
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
#layer
self.fc1 = nn.Linear(self.inputnum, self.num_hidden, bias = if_bias)
self.fc2 = nn.Linear(self.num_hidden, 2*self.num_hidden, bias = if_bias)
self.fc3 = nn.Linear(2*self.num_hidden, self.num_hidden, bias = if_bias)
self.fc4 = nn.Linear(self.num_hidden, self.num_hidden, bias = if_bias)
self.fc5 = nn.Linear(self.num_hidden, self.output, bias = if_bias)
#monitor
self.monitor_h1 = torch.zeros(self.batch_size, self.num_hidden, self.possion_num).to(self.device)
def forward(self,input, time_window):
h1_mem = h1_spike = h1_sumspike = torch.zeros(self.batch_size, self.num_hidden).to(self.device)
h2_mem = h2_spike = h2_sumspike = torch.zeros(self.batch_size, 2*self.num_hidden).to(self.device)
h3_mem = h3_spike = h3_sumspike = torch.zeros(self.batch_size, self.num_hidden).to(self.device)
h4_mem = h4_spike = h4_sumspike = torch.zeros(self.batch_size, self.num_hidden).to(self.device)
h5_mem = h5_spike = h5_sumspike = torch.zeros(self.batch_size, self.output).to(self.device)
for step in range(time_window):
sum_out1 = None
sum_out2 = None
sum_out3 = None
#x = x.view(self.batch_size, -1)
for t in range(time_window):
#get signal
#x_t = torch.from_numpy(x[:,t]).float().cuda()
input_t = input[:,t].float()
input_t = input_t.view(self.batch_size, -1)
#layer forward
h1_mem, h1_spike = mem_update(self.fc1, input_t, h1_mem, h1_spike)
#h1_sumspike += h1_spike
h2_mem, h2_spike = mem_update(self.fc2, h1_spike, h2_mem, h2_spike)
#h2_sumspike += h2_spike
h3_mem, h3_spike = mem_update(self.fc3, h2_spike, h3_mem, h3_spike)
#h3_sumspike += h3_spike
h4_mem, h4_spike = mem_update(self.fc4, h3_spike, h4_mem, h4_spike)
#h4_sumspike += h4_spike
h5_mem, h5_spike = mem_update(self.fc5, h4_spike, h5_mem, h5_spike)
#h5_sumspike += h5_spike
sum_out = h5_spike if sum_out is None else sum_out + h5_spike
#outputs = h2_sumspike / time_window
return sum_out
class prefrontal_model(nn.Module):
def __init__(self, batch_size, num_hidden1, num_hidden2, num_hidden3,N_step,possion_num=50,gpu='0'):
super(prefrontal_model, self).__init__()
self.batch_size = batch_size
self.num_hidden1 = num_hidden1
self.num_hidden2 = num_hidden2
self.num_hidden3 = num_hidden3
self.inputnum = 16*4
self.output = 4
self.possion_num = possion_num
self.device = torch.device("cuda:"+gpu if torch.cuda.is_available() else "cpu")
#layer
self.fc1 = nn.Linear(self.inputnum, self.num_hidden1, bias = if_bias)
self.fc2 = nn.Linear(self.num_hidden1, self.num_hidden2, bias = if_bias)
self.fc3 = nn.Linear(self.num_hidden2, self.num_hidden2, bias = if_bias)
self.fc4 = nn.Linear(self.num_hidden2, self.num_hidden3, bias = if_bias)
self.fc5 = nn.Linear(self.num_hidden3, self.output, bias = if_bias)
def forward(self,input, time_window):
'''
input1: (f(x,t), f(y,t))
input2: (u(x,t+n), sigma(x,t+n), u(y,t+n), sigma(y,t+n))
input3: (delta(z), m(z,t-1), m_dot(z,t-1),speed_limiter)
input4: m_dot(z,t-1)
'''
batch_size = input.shape[0]
h1_mem = h1_spike = h1_sumspike = torch.zeros(batch_size, self.num_hidden1).to(self.device)
h2_mem = h2_spike = h2_sumspike = torch.zeros(batch_size, self.num_hidden2).to(self.device)
h3_mem = h3_spike = h3_sumspike = torch.zeros(batch_size, self.num_hidden2).to(self.device)
h4_mem = h4_spike = h4_sumspike = torch.zeros(batch_size, self.num_hidden3).to(self.device)
h5_mem = h5_spike = h5_sumspike = torch.zeros(batch_size, self.output).to(self.device)
if type(input) is np.ndarray:
input = torch.from_numpy(input).float().to(self.device)
else:
input = input.float().to(self.device)
sum_out = None
#x = x.view(self.batch_size, -1)
for t in range(time_window):
x_t = input[:,:,t]
x_t = x_t.reshape(batch_size, -1)
#layer forward
h1_mem, h1_spike = mem_update(self.fc1, x_t, h1_mem, h1_spike)
#h1_sumspike += h1_spike
h2_mem, h2_spike = mem_update(self.fc2, h1_spike, h2_mem, h2_spike)
h3_mem, h3_spike = mem_update(self.fc3, h2_spike, h3_mem, h3_spike)
h4_mem, h4_spike = mem_update(self.fc4, h3_spike, h4_mem, h4_spike)
h5_mem, h5_spike = mem_update(self.fc5, h4_spike, h5_mem, h5_spike)
sum_out = h5_spike if sum_out is None else sum_out + h5_spike
#outputs = h2_sumspike / time_window
return sum_out
############################################
# TODO: May be problems here
def r_stdp(pre_spike, post_spike, weight, reward, lr=0.03):
'''
pre_spike: (pre_neuron_num, timescale)
post_spike: (post_neuron_num, timescale)
layer.weight: (post_neuron_num, pre_neuron_num)
'''
A_positve = 1
A_negative = -1
tao_positive = 20
tao_negative = 20
tao_z = 25
if pre_spike.size()[1] != post_spike.size()[1] or pre_spike.size()[0] != weight.data.size()[1] or post_spike.size()[0] != weight.data.size()[0]:
print('matrix dimention error')
return False
pre_size = pre_spike.size()[0]
post_size = post_spike.size()[0]
timescale = pre_spike.size()[1]
P_positive = torch.zeros_like(pre_spike[:,0]).view(1,-1)
P_negative = torch.zeros_like(post_spike[:,0]).view(1,-1)
z = 0
dt = 1
for t in range(0,timescale):
temp_pre = pre_spike[:,t].view(1,-1)
temp_post = post_spike[:,t].view(1,-1)
P_positive = -P_positive * t * math.exp(-dt/tao_positive) + A_positve * temp_pre
P_negative = -P_negative * t * math.exp(-dt/tao_negative) + A_negative * temp_post
temp_post_transpose = torch.t(temp_post)
P_negative_transpose = torch.t(P_negative)
kesai = torch.mm(temp_post_transpose, P_positive) + torch.mm(P_negative_transpose, temp_pre)
z = z * math.exp(-dt/tao_z) + kesai
weight.data = weight.data + lr * reward * z
weight.data = torch.clamp(weight.data, -100,100)
return True
| [
"math.sqrt",
"torch.exp",
"torch.from_numpy",
"torch.cuda.is_available",
"math.exp",
"torch.mean",
"torch.zeros_like",
"torch.nn.init.uniform_",
"torch.abs",
"numpy.ones",
"numpy.random.choice",
"network.ObjectClasses.Neuron",
"torch.Tensor",
"torch.nn.init._calculate_fan_in_and_fan_out",
... | [((14180, 14200), 'numpy.zeros', 'np.zeros', (['[row, col]'], {}), '([row, col])\n', (14188, 14200), True, 'import numpy as np\n'), ((1840, 1898), 'torch.nn.Linear', 'nn.Linear', (['self.input_size', 'self.num_classes'], {'bias': 'if_bias'}), '(self.input_size, self.num_classes, bias=if_bias)\n', (1849, 1898), True, 'import torch.nn as nn\n'), ((4354, 4418), 'network.ReservoirDefinitions.create_random_reservoir', 'create_random_reservoir', (['dims', 'frac_inhibitory', 'w_matrix', 'fanout'], {}), '(dims, frac_inhibitory, w_matrix, fanout)\n', (4377, 4418), False, 'from network.ReservoirDefinitions import create_random_reservoir\n'), ((5746, 5796), 'torch.zeros', 'torch.zeros', (['[self.n_nodes, self.simulation_steps]'], {}), '([self.n_nodes, self.simulation_steps])\n', (5757, 5796), False, 'import torch\n'), ((11547, 11577), 'torch.exp', 'torch.exp', (['(-self.dt / self.tau)'], {}), '(-self.dt / self.tau)\n', (11556, 11577), False, 'import torch\n'), ((11597, 11629), 'torch.exp', 'torch.exp', (['(-self.dt / self.tau_e)'], {}), '(-self.dt / self.tau_e)\n', (11606, 11629), False, 'import torch\n'), ((12438, 12469), 'torch.zeros_like', 'torch.zeros_like', (['self.v_hidden'], {}), '(self.v_hidden)\n', (12454, 12469), False, 'import torch\n'), ((13813, 13833), 'torch.stack', 'torch.stack', (['outputs'], {}), '(outputs)\n', (13824, 13833), False, 'import torch\n'), ((14324, 14352), 'numpy.random.shuffle', 'np.random.shuffle', (['temp_mask'], {}), '(temp_mask)\n', (14341, 14352), True, 'import numpy as np\n'), ((18855, 18911), 'torch.nn.Linear', 'nn.Linear', (['self.num_in_MF', 'self.num_out_MF'], {'bias': 'if_bias'}), '(self.num_in_MF, self.num_out_MF, bias=if_bias)\n', (18864, 18911), True, 'import torch.nn as nn\n'), ((19031, 19088), 'torch.nn.Linear', 'nn.Linear', (['self.num_out_GC', 'self.num_out_PC'], {'bias': 'if_bias'}), '(self.num_out_GC, self.num_out_PC, bias=if_bias)\n', (19040, 19088), True, 'import torch.nn as nn\n'), ((19110, 19168), 'torch.nn.Linear', 'nn.Linear', (['self.num_out_PC', 'self.num_out_DCN'], {'bias': 'if_bias'}), '(self.num_out_PC, self.num_out_DCN, bias=if_bias)\n', (19119, 19168), True, 'import torch.nn as nn\n'), ((19819, 19849), 'torch.cat', 'torch.cat', (['(m, u, sigma, f)', '(1)'], {}), '((m, u, sigma, f), 1)\n', (19828, 19849), False, 'import torch\n'), ((21283, 21333), 'torch.nn.Linear', 'nn.Linear', (['(num_in_MF * 4)', 'num_out_MF'], {'bias': 'if_bias'}), '(num_in_MF * 4, num_out_MF, bias=if_bias)\n', (21292, 21333), True, 'import torch.nn as nn\n'), ((21354, 21404), 'torch.nn.Linear', 'nn.Linear', (['(num_in_MF * 4)', 'num_out_MF'], {'bias': 'if_bias'}), '(num_in_MF * 4, num_out_MF, bias=if_bias)\n', (21363, 21404), True, 'import torch.nn as nn\n'), ((21429, 21479), 'torch.nn.Linear', 'nn.Linear', (['(num_in_MF * 4)', 'num_out_MF'], {'bias': 'if_bias'}), '(num_in_MF * 4, num_out_MF, bias=if_bias)\n', (21438, 21479), True, 'import torch.nn as nn\n'), ((21500, 21550), 'torch.nn.Linear', 'nn.Linear', (['(num_in_MF * 4)', 'num_out_MF'], {'bias': 'if_bias'}), '(num_in_MF * 4, num_out_MF, bias=if_bias)\n', (21509, 21550), True, 'import torch.nn as nn\n'), ((21614, 21660), 'torch.nn.Linear', 'nn.Linear', (['num_in_GC', 'num_out_GC'], {'bias': 'if_bias'}), '(num_in_GC, num_out_GC, bias=if_bias)\n', (21623, 21660), True, 'import torch.nn as nn\n'), ((21683, 21729), 'torch.nn.Linear', 'nn.Linear', (['num_in_GC', 'num_out_GC'], {'bias': 'if_bias'}), '(num_in_GC, num_out_GC, bias=if_bias)\n', (21692, 21729), True, 'import torch.nn as nn\n'), ((21756, 21802), 'torch.nn.Linear', 'nn.Linear', (['num_in_GC', 'num_out_GC'], {'bias': 'if_bias'}), '(num_in_GC, num_out_GC, bias=if_bias)\n', (21765, 21802), True, 'import torch.nn as nn\n'), ((21825, 21871), 'torch.nn.Linear', 'nn.Linear', (['num_in_GC', 'num_out_GC'], {'bias': 'if_bias'}), '(num_in_GC, num_out_GC, bias=if_bias)\n', (21834, 21871), True, 'import torch.nn as nn\n'), ((21954, 22000), 'torch.nn.Linear', 'nn.Linear', (['num_in_PC', 'num_out_PC'], {'bias': 'if_bias'}), '(num_in_PC, num_out_PC, bias=if_bias)\n', (21963, 22000), True, 'import torch.nn as nn\n'), ((22039, 22085), 'torch.nn.Linear', 'nn.Linear', (['num_in_PC', 'num_out_PC'], {'bias': 'if_bias'}), '(num_in_PC, num_out_PC, bias=if_bias)\n', (22048, 22085), True, 'import torch.nn as nn\n'), ((22185, 22237), 'torch.nn.Linear', 'nn.Linear', (['num_in_DCN', '(num_out_DCN * 8)'], {'bias': 'if_bias'}), '(num_in_DCN, num_out_DCN * 8, bias=if_bias)\n', (22194, 22237), True, 'import torch.nn as nn\n'), ((29501, 29556), 'torch.nn.Linear', 'nn.Linear', (['self.inputnum', 'self.num_hidden'], {'bias': 'if_bias'}), '(self.inputnum, self.num_hidden, bias=if_bias)\n', (29510, 29556), True, 'import torch.nn as nn\n'), ((29578, 29639), 'torch.nn.Linear', 'nn.Linear', (['self.num_hidden', '(2 * self.num_hidden)'], {'bias': 'if_bias'}), '(self.num_hidden, 2 * self.num_hidden, bias=if_bias)\n', (29587, 29639), True, 'import torch.nn as nn\n'), ((29659, 29720), 'torch.nn.Linear', 'nn.Linear', (['(2 * self.num_hidden)', 'self.num_hidden'], {'bias': 'if_bias'}), '(2 * self.num_hidden, self.num_hidden, bias=if_bias)\n', (29668, 29720), True, 'import torch.nn as nn\n'), ((29740, 29797), 'torch.nn.Linear', 'nn.Linear', (['self.num_hidden', 'self.num_hidden'], {'bias': 'if_bias'}), '(self.num_hidden, self.num_hidden, bias=if_bias)\n', (29749, 29797), True, 'import torch.nn as nn\n'), ((29819, 29872), 'torch.nn.Linear', 'nn.Linear', (['self.num_hidden', 'self.output'], {'bias': 'if_bias'}), '(self.num_hidden, self.output, bias=if_bias)\n', (29828, 29872), True, 'import torch.nn as nn\n'), ((32369, 32425), 'torch.nn.Linear', 'nn.Linear', (['self.inputnum', 'self.num_hidden1'], {'bias': 'if_bias'}), '(self.inputnum, self.num_hidden1, bias=if_bias)\n', (32378, 32425), True, 'import torch.nn as nn\n'), ((32447, 32506), 'torch.nn.Linear', 'nn.Linear', (['self.num_hidden1', 'self.num_hidden2'], {'bias': 'if_bias'}), '(self.num_hidden1, self.num_hidden2, bias=if_bias)\n', (32456, 32506), True, 'import torch.nn as nn\n'), ((32528, 32587), 'torch.nn.Linear', 'nn.Linear', (['self.num_hidden2', 'self.num_hidden2'], {'bias': 'if_bias'}), '(self.num_hidden2, self.num_hidden2, bias=if_bias)\n', (32537, 32587), True, 'import torch.nn as nn\n'), ((32609, 32668), 'torch.nn.Linear', 'nn.Linear', (['self.num_hidden2', 'self.num_hidden3'], {'bias': 'if_bias'}), '(self.num_hidden2, self.num_hidden3, bias=if_bias)\n', (32618, 32668), True, 'import torch.nn as nn\n'), ((32690, 32744), 'torch.nn.Linear', 'nn.Linear', (['self.num_hidden3', 'self.output'], {'bias': 'if_bias'}), '(self.num_hidden3, self.output, bias=if_bias)\n', (32699, 32744), True, 'import torch.nn as nn\n'), ((35730, 35748), 'torch.t', 'torch.t', (['temp_post'], {}), '(temp_post)\n', (35737, 35748), False, 'import torch\n'), ((35780, 35799), 'torch.t', 'torch.t', (['P_negative'], {}), '(P_negative)\n', (35787, 35799), False, 'import torch\n'), ((36020, 36055), 'torch.clamp', 'torch.clamp', (['weight.data', '(-100)', '(100)'], {}), '(weight.data, -100, 100)\n', (36031, 36055), False, 'import torch\n'), ((4564, 4651), 'network.ObjectClasses.Neuron', 'Neuron', (['i', 'all_connections[i]', 'all_weights[i]', 'fanout', 'tau', 't_ref', 'propagation_time'], {}), '(i, all_connections[i], all_weights[i], fanout, tau, t_ref,\n propagation_time)\n', (4570, 4651), False, 'from network.ObjectClasses import Neuron, Spike\n'), ((10374, 10411), 'torch.Tensor', 'torch.Tensor', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (10386, 10411), False, 'import torch\n'), ((10451, 10489), 'torch.Tensor', 'torch.Tensor', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (10463, 10489), False, 'import torch\n'), ((11013, 11058), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['weight'], {}), '(weight)\n', (11050, 11058), True, 'import torch.nn as nn\n'), ((11113, 11150), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['bias', '(-bound)', 'bound'], {}), '(bias, -bound, bound)\n', (11129, 11150), True, 'import torch.nn as nn\n'), ((12519, 12550), 'torch.zeros_like', 'torch.zeros_like', (['self.v_hidden'], {}), '(self.v_hidden)\n', (12535, 12550), False, 'import torch\n'), ((12744, 12771), 'torch.zeros_like', 'torch.zeros_like', (['ev_w_hh_x'], {}), '(ev_w_hh_x)\n', (12760, 12771), False, 'import torch\n'), ((12797, 12824), 'torch.zeros_like', 'torch.zeros_like', (['ev_w_hh_x'], {}), '(ev_w_hh_x)\n', (12813, 12824), False, 'import torch\n'), ((12970, 12997), 'torch.zeros_like', 'torch.zeros_like', (['ev_w_ih_x'], {}), '(ev_w_ih_x)\n', (12986, 12997), False, 'import torch\n'), ((13023, 13050), 'torch.zeros_like', 'torch.zeros_like', (['ev_w_ih_x'], {}), '(ev_w_ih_x)\n', (13039, 13050), False, 'import torch\n'), ((16814, 16869), 'torch.Tensor', 'torch.Tensor', (['self.output_features', 'self.input_features'], {}), '(self.output_features, self.input_features)\n', (16826, 16869), False, 'import torch\n'), ((17382, 17421), 'torch.nn.Parameter', 'nn.Parameter', (['mask'], {'requires_grad': '(False)'}), '(mask, requires_grad=False)\n', (17394, 17421), True, 'import torch.nn as nn\n'), ((20391, 20411), 'torch.mean', 'torch.mean', (['h1_spike'], {}), '(h1_spike)\n', (20401, 20411), False, 'import torch\n'), ((35261, 35294), 'torch.zeros_like', 'torch.zeros_like', (['pre_spike[:, 0]'], {}), '(pre_spike[:, 0])\n', (35277, 35294), False, 'import torch\n'), ((35322, 35356), 'torch.zeros_like', 'torch.zeros_like', (['post_spike[:, 0]'], {}), '(post_spike[:, 0])\n', (35338, 35356), False, 'import torch\n'), ((35817, 35858), 'torch.mm', 'torch.mm', (['temp_post_transpose', 'P_positive'], {}), '(temp_post_transpose, P_positive)\n', (35825, 35858), False, 'import torch\n'), ((35861, 35901), 'torch.mm', 'torch.mm', (['P_negative_transpose', 'temp_pre'], {}), '(P_negative_transpose, temp_pre)\n', (35869, 35901), False, 'import torch\n'), ((1951, 1976), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1974, 1976), False, 'import torch\n'), ((2157, 2215), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.input_size', 'self.possion_num'], {}), '(batch_size, self.input_size, self.possion_num)\n', (2168, 2215), False, 'import torch\n'), ((2250, 2309), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_classes', 'self.possion_num'], {}), '(batch_size, self.num_classes, self.possion_num)\n', (2261, 2309), False, 'import torch\n'), ((2359, 2400), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_classes'], {}), '(batch_size, self.num_classes)\n', (2370, 2400), False, 'import torch\n'), ((3974, 4035), 'numpy.random.choice', 'np.random.choice', (['self.n_nodes'], {'size': 'num_in_ch', 'replace': '(False)'}), '(self.n_nodes, size=num_in_ch, replace=False)\n', (3990, 4035), True, 'import numpy as np\n'), ((4095, 4155), 'numpy.random.choice', 'np.random.choice', (['self.n_nodes'], {'size': 'num_in_ch', 'replace': '(True)'}), '(self.n_nodes, size=num_in_ch, replace=True)\n', (4111, 4155), True, 'import numpy as np\n'), ((8029, 8055), 'torch.mm', 'torch.mm', (['input', 'weight_ih'], {}), '(input, weight_ih)\n', (8037, 8055), False, 'import torch\n'), ((10548, 10573), 'torch.Tensor', 'torch.Tensor', (['hidden_size'], {}), '(hidden_size)\n', (10560, 10573), False, 'import torch\n'), ((10615, 10640), 'torch.Tensor', 'torch.Tensor', (['hidden_size'], {}), '(hidden_size)\n', (10627, 10640), False, 'import torch\n'), ((10946, 10958), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (10955, 10958), False, 'import math\n'), ((11083, 11100), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (11092, 11100), False, 'import math\n'), ((11451, 11468), 'torch.tensor', 'torch.tensor', (['tau'], {}), '(tau)\n', (11463, 11468), False, 'import torch\n'), ((11498, 11517), 'torch.tensor', 'torch.tensor', (['tau_e'], {}), '(tau_e)\n', (11510, 11517), False, 'import torch\n'), ((11800, 11825), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11823, 11825), False, 'import torch\n'), ((14262, 14280), 'numpy.zeros', 'np.zeros', (['num_ones'], {}), '(num_ones)\n', (14270, 14280), True, 'import numpy as np\n'), ((14290, 14313), 'numpy.ones', 'np.ones', (['(row - num_ones)'], {}), '(row - num_ones)\n', (14297, 14313), True, 'import numpy as np\n'), ((16953, 16987), 'torch.Tensor', 'torch.Tensor', (['self.output_features'], {}), '(self.output_features)\n', (16965, 16987), False, 'import torch\n'), ((19455, 19500), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_out_MF'], {}), '(self.batch_size, self.num_out_MF)\n', (19466, 19500), False, 'import torch\n'), ((19550, 19595), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_out_GC'], {}), '(self.batch_size, self.num_out_GC)\n', (19561, 19595), False, 'import torch\n'), ((19645, 19690), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_out_PC'], {}), '(self.batch_size, self.num_out_PC)\n', (19656, 19690), False, 'import torch\n'), ((19740, 19786), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_out_DCN'], {}), '(self.batch_size, self.num_out_DCN)\n', (19751, 19786), False, 'import torch\n'), ((20447, 20469), 'torch.abs', 'torch.abs', (['baseline_MF'], {}), '(baseline_MF)\n', (20456, 20469), False, 'import torch\n'), ((21181, 21206), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21204, 21206), False, 'import torch\n'), ((23714, 23777), 'torch.zeros', 'torch.zeros', (['batch_size', '(self.num_out_DCN * 8)', 'self.possion_num'], {}), '(batch_size, self.num_out_DCN * 8, self.possion_num)\n', (23725, 23777), False, 'import torch\n'), ((23895, 23935), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_out_MF'], {}), '(batch_size, self.num_out_MF)\n', (23906, 23935), False, 'import torch\n'), ((24055, 24095), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_out_GC'], {}), '(batch_size, self.num_out_GC)\n', (24066, 24095), False, 'import torch\n'), ((24216, 24256), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_out_PC'], {}), '(batch_size, self.num_out_PC)\n', (24227, 24256), False, 'import torch\n'), ((24298, 24343), 'torch.zeros', 'torch.zeros', (['batch_size', '(self.num_out_DCN * 8)'], {}), '(batch_size, self.num_out_DCN * 8)\n', (24309, 24343), False, 'import torch\n'), ((25980, 26046), 'torch.cat', 'torch.cat', (['(m_GC_spike, u_GC_spike, sigma_GC_spike, f_GC_spike)', '(0)'], {}), '((m_GC_spike, u_GC_spike, sigma_GC_spike, f_GC_spike), 0)\n', (25989, 26046), False, 'import torch\n'), ((26118, 26184), 'torch.cat', 'torch.cat', (['(m_MF_spike, u_MF_spike, sigma_MF_spike, f_MF_spike)', '(0)'], {}), '((m_MF_spike, u_MF_spike, sigma_MF_spike, f_MF_spike), 0)\n', (26127, 26184), False, 'import torch\n'), ((26509, 26526), 'torch.mean', 'torch.mean', (['MF_in'], {}), '(MF_in)\n', (26519, 26526), False, 'import torch\n'), ((29429, 29454), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (29452, 29454), False, 'import torch\n'), ((29918, 29981), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_hidden', 'self.possion_num'], {}), '(self.batch_size, self.num_hidden, self.possion_num)\n', (29929, 29981), False, 'import torch\n'), ((30087, 30132), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_hidden'], {}), '(self.batch_size, self.num_hidden)\n', (30098, 30132), False, 'import torch\n'), ((30191, 30240), 'torch.zeros', 'torch.zeros', (['self.batch_size', '(2 * self.num_hidden)'], {}), '(self.batch_size, 2 * self.num_hidden)\n', (30202, 30240), False, 'import torch\n'), ((30297, 30342), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_hidden'], {}), '(self.batch_size, self.num_hidden)\n', (30308, 30342), False, 'import torch\n'), ((30401, 30446), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.num_hidden'], {}), '(self.batch_size, self.num_hidden)\n', (30412, 30446), False, 'import torch\n'), ((30505, 30546), 'torch.zeros', 'torch.zeros', (['self.batch_size', 'self.output'], {}), '(self.batch_size, self.output)\n', (30516, 30546), False, 'import torch\n'), ((32297, 32322), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (32320, 32322), False, 'import torch\n'), ((33087, 33128), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_hidden1'], {}), '(batch_size, self.num_hidden1)\n', (33098, 33128), False, 'import torch\n'), ((33187, 33228), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_hidden2'], {}), '(batch_size, self.num_hidden2)\n', (33198, 33228), False, 'import torch\n'), ((33287, 33328), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_hidden2'], {}), '(batch_size, self.num_hidden2)\n', (33298, 33328), False, 'import torch\n'), ((33387, 33428), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.num_hidden3'], {}), '(batch_size, self.num_hidden3)\n', (33398, 33428), False, 'import torch\n'), ((33487, 33523), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.output'], {}), '(batch_size, self.output)\n', (33498, 33523), False, 'import torch\n'), ((35558, 35586), 'math.exp', 'math.exp', (['(-dt / tao_positive)'], {}), '(-dt / tao_positive)\n', (35566, 35586), False, 'import math\n'), ((35647, 35675), 'math.exp', 'math.exp', (['(-dt / tao_negative)'], {}), '(-dt / tao_negative)\n', (35655, 35675), False, 'import math\n'), ((35918, 35939), 'math.exp', 'math.exp', (['(-dt / tao_z)'], {}), '(-dt / tao_z)\n', (35926, 35939), False, 'import math\n'), ((832, 857), 'torch.abs', 'torch.abs', (['(input / thresh)'], {}), '(input / thresh)\n', (841, 857), False, 'import torch\n'), ((2931, 2946), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2944, 2946), False, 'import torch\n'), ((8000, 8026), 'torch.mm', 'torch.mm', (['spike', 'temp_w_hh'], {}), '(spike, temp_w_hh)\n', (8008, 8026), False, 'import torch\n'), ((8279, 8319), 'torch.abs', 'torch.abs', (['(1 / thresh * (v_hidden_t - A))'], {}), '(1 / thresh * (v_hidden_t - A))\n', (8288, 8319), False, 'import torch\n'), ((12623, 12708), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.hidden_size', 'self.hidden_size'], {'requires_grad': '(False)'}), '(batch_size, self.hidden_size, self.hidden_size, requires_grad=False\n )\n', (12634, 12708), False, 'import torch\n'), ((12850, 12929), 'torch.zeros', 'torch.zeros', (['batch_size', 'self.input_size', 'self.hidden_size'], {'requires_grad': '(False)'}), '(batch_size, self.input_size, self.hidden_size, requires_grad=False)\n', (12861, 12929), False, 'import torch\n'), ((17316, 17353), 'torch.tensor', 'torch.tensor', (['mask'], {'dtype': 'torch.float'}), '(mask, dtype=torch.float)\n', (17328, 17353), False, 'import torch\n'), ((868, 883), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (880, 883), False, 'import torch\n'), ((8250, 8265), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (8262, 8265), False, 'import torch\n'), ((33607, 33630), 'torch.from_numpy', 'torch.from_numpy', (['input'], {}), '(input)\n', (33623, 33630), False, 'import torch\n')] |
import numpy as np
import pyqmc
import pandas as pd
from pyqmc.reblock import reblock
from pyqmc.pbc import enforce_pbc
from pyqmc.coord import PeriodicConfigs
from pyscf.pbc import gto, scf
from pyscf.pbc.dft.multigrid import multigrid
from pyscf.scf.addons import remove_linear_dep_
def test_cubic_with_ecp(kind=1, nk=(2, 2, 2)):
from pyscf.pbc.dft.multigrid import multigrid
L = 6.63 * 2
cell = gto.Cell(
atom="""Li {0} {0} {0}
Li {1} {1} {1}""".format(
0.0, L / 4
),
basis="bfd-vdz",
ecp={"Li": "bfd"},
spin=0,
unit="bohr",
)
cell.exp_to_discard = 0.1
cell.build(a=np.eye(3) * L)
kpts = cell.make_kpts(nk)
mf = scf.KRKS(cell, kpts)
mf.xc = "pbe"
mf = mf.density_fit()
mf = multigrid(mf)
mf = mf.run()
runtest(cell, mf, kind=kind)
def test_RKS(kind=1, nk=(2, 2, 2)):
L = 2
mol = gto.M(
atom="""He {0} {0} {0}""".format(0.0),
basis="sto-3g",
a=np.eye(3) * L,
unit="bohr",
)
kpts = mol.make_kpts(nk)
mf = scf.KRKS(mol, kpts)
mf.xc = "pbe"
# mf = mf.density_fit()
mf = mf.run()
runtest(mol, mf, kind=kind)
def test_noncubic(kind=1, nk=(2, 2, 2)):
L = 3
mol = gto.M(
atom="""H {0} {0} {0}
H {1} {1} {1}""".format(
0.0, L / 4
),
basis="sto-3g",
a=(np.ones((3, 3)) - np.eye(3)) * L / 2,
spin=0,
unit="bohr",
)
kpts = mol.make_kpts(nk)
mf = scf.KRKS(mol, kpts)
mf.xc = "pbe"
# mf = mf.density_fit()
mf = mf.run()
runtest(mol, mf, kind=kind)
def runtest(mol, mf, kind=0):
for k, occ in enumerate(mf.mo_occ):
print(k, occ)
kpt = mf.kpts[kind]
twist = np.dot(kpt, mol.lattice_vectors().T / (2 * np.pi))
print("kpt", kpt)
print("twist", twist)
wf0 = pyqmc.PySCFSlater(mol, mf)
wft = pyqmc.PySCFSlater(mol, mf, twist=twist)
#####################################
## compare values across boundary
## psi, KE, ecp,
#####################################
nconfig = 100
coords = pyqmc.initial_guess(mol, nconfig, 1)
nelec = coords.configs.shape[1]
epos, wrap = enforce_pbc(coords.lvecs, coords.configs)
coords = PeriodicConfigs(epos, coords.lvecs)
shift_ = np.random.randint(10, size=coords.configs.shape) - 5
phase = np.exp(2j * np.pi * np.einsum("ijk,k->ij", shift_, twist))
shift = np.dot(shift_, mol.lattice_vectors())
epos, wrap = enforce_pbc(coords.lvecs, epos + shift)
newcoords = PeriodicConfigs(epos, coords.lvecs, wrap=wrap)
assert np.linalg.norm(newcoords.configs - coords.configs) < 1e-12
ph0, val0 = wf0.recompute(coords)
pht, valt = wft.recompute(coords)
enacc = pyqmc.accumulators.EnergyAccumulator(mol, threshold=np.inf)
np.random.seed(0)
en0 = enacc(coords, wf0)
np.random.seed(0)
ent = enacc(coords, wft)
e = 0
rat0 = wf0.testvalue(e, newcoords.electron(e))
assert np.linalg.norm(rat0 - 1) < 1e-10, rat0 - 1
ratt = wft.testvalue(e, newcoords.electron(e))
rattdiff = ratt - phase[:, e]
assert np.linalg.norm(rattdiff) < 1e-9, [
np.round(rattdiff, 10),
np.amax(np.abs(rattdiff)),
]
ph0new, val0new = wf0.recompute(newcoords)
phtnew, valtnew = wft.recompute(newcoords)
np.random.seed(0)
en0new = enacc(newcoords, wf0)
np.random.seed(0)
entnew = enacc(newcoords, wft)
assert np.linalg.norm(ph0 - ph0new) < 1e-11
assert np.linalg.norm(pht * phase.prod(axis=1) - phtnew) < 1e-11, (
pht * phase.prod(axis=1) - phtnew
)
assert np.linalg.norm(val0 - val0new) < 1e-11, np.linalg.norm(val0 - val0new)
assert np.linalg.norm(valt - valtnew) < 1e-11, np.linalg.norm(valt - valtnew)
for k in en0.keys():
print(k)
diff0 = en0[k] - en0new[k]
difft = ent[k] - entnew[k]
if k == "ecp":
for l, diff in [("0", diff0), ("t", difft)]:
mad = np.mean(np.abs(diff))
if True: # mad > 1e-12:
print("ecp%s diff" % l, mad, np.linalg.norm(diff))
assert mad < 1e-3, diff
else:
assert np.linalg.norm(diff0) < 1e-10, diff0
assert np.linalg.norm(difft) < 1e-10, difft
if __name__ == "__main__":
kind = 1
nk = [2, 2, 2]
test_cubic_with_ecp(kind, nk)
test_RKS(kind, nk)
test_noncubic(kind, nk)
| [
"pyqmc.initial_guess",
"numpy.abs",
"pyqmc.PySCFSlater",
"numpy.eye",
"numpy.ones",
"pyqmc.pbc.enforce_pbc",
"pyscf.pbc.dft.multigrid.multigrid",
"numpy.random.randint",
"numpy.einsum",
"numpy.random.seed",
"pyqmc.accumulators.EnergyAccumulator",
"numpy.linalg.norm",
"pyqmc.coord.PeriodicCon... | [((772, 792), 'pyscf.pbc.scf.KRKS', 'scf.KRKS', (['cell', 'kpts'], {}), '(cell, kpts)\n', (780, 792), False, 'from pyscf.pbc import gto, scf\n'), ((846, 859), 'pyscf.pbc.dft.multigrid.multigrid', 'multigrid', (['mf'], {}), '(mf)\n', (855, 859), False, 'from pyscf.pbc.dft.multigrid import multigrid\n'), ((1151, 1170), 'pyscf.pbc.scf.KRKS', 'scf.KRKS', (['mol', 'kpts'], {}), '(mol, kpts)\n', (1159, 1170), False, 'from pyscf.pbc import gto, scf\n'), ((1643, 1662), 'pyscf.pbc.scf.KRKS', 'scf.KRKS', (['mol', 'kpts'], {}), '(mol, kpts)\n', (1651, 1662), False, 'from pyscf.pbc import gto, scf\n'), ((1998, 2024), 'pyqmc.PySCFSlater', 'pyqmc.PySCFSlater', (['mol', 'mf'], {}), '(mol, mf)\n', (2015, 2024), False, 'import pyqmc\n'), ((2035, 2074), 'pyqmc.PySCFSlater', 'pyqmc.PySCFSlater', (['mol', 'mf'], {'twist': 'twist'}), '(mol, mf, twist=twist)\n', (2052, 2074), False, 'import pyqmc\n'), ((2250, 2286), 'pyqmc.initial_guess', 'pyqmc.initial_guess', (['mol', 'nconfig', '(1)'], {}), '(mol, nconfig, 1)\n', (2269, 2286), False, 'import pyqmc\n'), ((2340, 2381), 'pyqmc.pbc.enforce_pbc', 'enforce_pbc', (['coords.lvecs', 'coords.configs'], {}), '(coords.lvecs, coords.configs)\n', (2351, 2381), False, 'from pyqmc.pbc import enforce_pbc\n'), ((2395, 2430), 'pyqmc.coord.PeriodicConfigs', 'PeriodicConfigs', (['epos', 'coords.lvecs'], {}), '(epos, coords.lvecs)\n', (2410, 2430), False, 'from pyqmc.coord import PeriodicConfigs\n'), ((2637, 2676), 'pyqmc.pbc.enforce_pbc', 'enforce_pbc', (['coords.lvecs', '(epos + shift)'], {}), '(coords.lvecs, epos + shift)\n', (2648, 2676), False, 'from pyqmc.pbc import enforce_pbc\n'), ((2693, 2739), 'pyqmc.coord.PeriodicConfigs', 'PeriodicConfigs', (['epos', 'coords.lvecs'], {'wrap': 'wrap'}), '(epos, coords.lvecs, wrap=wrap)\n', (2708, 2739), False, 'from pyqmc.coord import PeriodicConfigs\n'), ((2900, 2959), 'pyqmc.accumulators.EnergyAccumulator', 'pyqmc.accumulators.EnergyAccumulator', (['mol'], {'threshold': 'np.inf'}), '(mol, threshold=np.inf)\n', (2936, 2959), False, 'import pyqmc\n'), ((2964, 2981), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2978, 2981), True, 'import numpy as np\n'), ((3015, 3032), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3029, 3032), True, 'import numpy as np\n'), ((3481, 3498), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3495, 3498), True, 'import numpy as np\n'), ((3538, 3555), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3552, 3555), True, 'import numpy as np\n'), ((3811, 3841), 'numpy.linalg.norm', 'np.linalg.norm', (['(val0 - val0new)'], {}), '(val0 - val0new)\n', (3825, 3841), True, 'import numpy as np\n'), ((3893, 3923), 'numpy.linalg.norm', 'np.linalg.norm', (['(valt - valtnew)'], {}), '(valt - valtnew)\n', (3907, 3923), True, 'import numpy as np\n'), ((2445, 2493), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': 'coords.configs.shape'}), '(10, size=coords.configs.shape)\n', (2462, 2493), True, 'import numpy as np\n'), ((2752, 2802), 'numpy.linalg.norm', 'np.linalg.norm', (['(newcoords.configs - coords.configs)'], {}), '(newcoords.configs - coords.configs)\n', (2766, 2802), True, 'import numpy as np\n'), ((3135, 3159), 'numpy.linalg.norm', 'np.linalg.norm', (['(rat0 - 1)'], {}), '(rat0 - 1)\n', (3149, 3159), True, 'import numpy as np\n'), ((3274, 3298), 'numpy.linalg.norm', 'np.linalg.norm', (['rattdiff'], {}), '(rattdiff)\n', (3288, 3298), True, 'import numpy as np\n'), ((3317, 3339), 'numpy.round', 'np.round', (['rattdiff', '(10)'], {}), '(rattdiff, 10)\n', (3325, 3339), True, 'import numpy as np\n'), ((3603, 3631), 'numpy.linalg.norm', 'np.linalg.norm', (['(ph0 - ph0new)'], {}), '(ph0 - ph0new)\n', (3617, 3631), True, 'import numpy as np\n'), ((3771, 3801), 'numpy.linalg.norm', 'np.linalg.norm', (['(val0 - val0new)'], {}), '(val0 - val0new)\n', (3785, 3801), True, 'import numpy as np\n'), ((3853, 3883), 'numpy.linalg.norm', 'np.linalg.norm', (['(valt - valtnew)'], {}), '(valt - valtnew)\n', (3867, 3883), True, 'import numpy as np\n'), ((2530, 2567), 'numpy.einsum', 'np.einsum', (['"""ijk,k->ij"""', 'shift_', 'twist'], {}), "('ijk,k->ij', shift_, twist)\n", (2539, 2567), True, 'import numpy as np\n'), ((3357, 3373), 'numpy.abs', 'np.abs', (['rattdiff'], {}), '(rattdiff)\n', (3363, 3373), True, 'import numpy as np\n'), ((718, 727), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (724, 727), True, 'import numpy as np\n'), ((1071, 1080), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1077, 1080), True, 'import numpy as np\n'), ((4350, 4371), 'numpy.linalg.norm', 'np.linalg.norm', (['diff0'], {}), '(diff0)\n', (4364, 4371), True, 'import numpy as np\n'), ((4406, 4427), 'numpy.linalg.norm', 'np.linalg.norm', (['difft'], {}), '(difft)\n', (4420, 4427), True, 'import numpy as np\n'), ((4147, 4159), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (4153, 4159), True, 'import numpy as np\n'), ((1524, 1539), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (1531, 1539), True, 'import numpy as np\n'), ((1542, 1551), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1548, 1551), True, 'import numpy as np\n'), ((4251, 4271), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {}), '(diff)\n', (4265, 4271), True, 'import numpy as np\n')] |
""" Keep data during a workflow run in the persistent data store
Data that should be kept during a workflow run can be saved into the persistent
data store. This data is deleted as soon as the workflow run ends, but is available
to all tasks during the lifetime of the workflow.
The data store provides methods to store and retrieve single values or append values
to a list. This can even be done asynchronously from different tasks at the same time.
The key under which the data is being stored supports a hierarchical structure using
the dot notation.
This workflow stores different types of data in the persistent data store and modifies
them.
"""
from lightflow.models import Dag
from lightflow.tasks import PythonTask
import numpy as np
# the callback function to store data in the persistent data store. It stores a single
# integer value in 'number', a single integer value into the hierarchical key
# 'buffer' -> 'observable' and a numpy array into 'image'. Additionally it adds an integer
# value to a list in 'sample' -> 'spectra'.
def store_data(data, store, signal, context):
store.set('number', 5)
store.set('buffer.observable', 20)
store.set('image', np.ones((100, 100)))
store.push('sample.spectra', 7)
# the callback function for the task that retrieves and prints the 'number' and 'image'
# values then modifies the 'number' value and creates a new list of 'filenames'.
def modify_data(data, store, signal, context):
number = store.get('number')
print('The number is: {}'.format(number))
img = store.get('image')
print('The dimension of the image is: {}'.format(img.shape))
store.set('number', number * 10)
store.push('filenames', 'file_a.spec')
# the callback function for the task that adds another filename to the list.
def add_filename(data, store, signal, context):
store.push('filenames', 'file_b.spec')
# the callback function for the task that adds a nested list to the list of filenames and
# then extends the list of filenames with two more entries.
def add_more_filenames(data, store, signal, context):
store.push('filenames', ['nested_a', 'nested_b'])
store.extend('filenames', ['file_c.spec', 'file_d.spec'])
# create the main DAG
d = Dag('main_dag')
# create the tasks that call the functions above
store_task = PythonTask(name='store_task',
callback=store_data)
modify_task = PythonTask(name='modify_task',
callback=modify_data)
add_filename_task = PythonTask(name='add_filename_task',
callback=add_filename)
add_more_filename_task = PythonTask(name='add_more_filename_task',
callback=add_more_filenames)
# set up the graph of the DAG, in which the store_task and modify_task are called
# in sequence while the add_filename_task and add_more_filename_task are run in parallel.
d.define({
store_task: modify_task,
modify_task: [add_filename_task, add_more_filename_task]
})
| [
"lightflow.models.Dag",
"numpy.ones",
"lightflow.tasks.PythonTask"
] | [((2236, 2251), 'lightflow.models.Dag', 'Dag', (['"""main_dag"""'], {}), "('main_dag')\n", (2239, 2251), False, 'from lightflow.models import Dag\n'), ((2315, 2365), 'lightflow.tasks.PythonTask', 'PythonTask', ([], {'name': '"""store_task"""', 'callback': 'store_data'}), "(name='store_task', callback=store_data)\n", (2325, 2365), False, 'from lightflow.tasks import PythonTask\n'), ((2405, 2457), 'lightflow.tasks.PythonTask', 'PythonTask', ([], {'name': '"""modify_task"""', 'callback': 'modify_data'}), "(name='modify_task', callback=modify_data)\n", (2415, 2457), False, 'from lightflow.tasks import PythonTask\n'), ((2504, 2563), 'lightflow.tasks.PythonTask', 'PythonTask', ([], {'name': '"""add_filename_task"""', 'callback': 'add_filename'}), "(name='add_filename_task', callback=add_filename)\n", (2514, 2563), False, 'from lightflow.tasks import PythonTask\n'), ((2621, 2691), 'lightflow.tasks.PythonTask', 'PythonTask', ([], {'name': '"""add_more_filename_task"""', 'callback': 'add_more_filenames'}), "(name='add_more_filename_task', callback=add_more_filenames)\n", (2631, 2691), False, 'from lightflow.tasks import PythonTask\n'), ((1186, 1205), 'numpy.ones', 'np.ones', (['(100, 100)'], {}), '((100, 100))\n', (1193, 1205), True, 'import numpy as np\n')] |
import gym
import numpy as np
from garage.envs.mujoco import Walker2DEnv
from garage.envs.mujoco.hill import HillEnv
from garage.envs.mujoco.hill import terrain
from garage.misc.overrides import overrides
class Walker2DHillEnv(HillEnv):
MODEL_CLASS = Walker2DEnv
@overrides
def _mod_hfield(self, hfield):
# clear a flat patch for the robot to start off from
return terrain.clear_patch(
hfield,
gym.spaces.Box(
np.array([-2.0, -2.0]),
np.array([-0.5, -0.5]),
dtype=np.float32))
| [
"numpy.array"
] | [((483, 505), 'numpy.array', 'np.array', (['[-2.0, -2.0]'], {}), '([-2.0, -2.0])\n', (491, 505), True, 'import numpy as np\n'), ((523, 545), 'numpy.array', 'np.array', (['[-0.5, -0.5]'], {}), '([-0.5, -0.5])\n', (531, 545), True, 'import numpy as np\n')] |
import json
import argparse
import sys
import configs.config as config
import os
import numpy as np
import random
from datetime import datetime
from utils.utils import *
def main():
parser = argparse.ArgumentParser(description="Generate synth dataset images for disentanglement.")
parser.add_argument("--frames", type=int, help="frames to use from the sequence", default=2)
parser.add_argument("--gender", type=int,
help="-1: both, 0: female, 1: male", default=-1)
parser.add_argument("--backgrounds", type=int,
help="number of backgrounds", default=10)
parser.add_argument("--orientations", type=int, choices=[4, 8, 16], default=4,
help="number of orientation classes")
parser.add_argument("--shapes", type=int, default=4,
help="number of shapes")
parser.add_argument("--textures", type=int, default=8,
help="number of textures")
parser.add_argument("--reset", action="store_true", help="reset the generation config file, even if it already exists")
parser.add_argument("path", help="basic config path")
args = parser.parse_args()
configuration_dict = {}
params = config.load_file(args.path, "SYNTH_DATA")
if not os.path.isfile(os.path.join(params["output_path"], "generation_config.json")) or args.reset:
seed_number = 11
random.seed(seed_number)
np.random.seed(seed_number)
configuration_dict.update(params)
configuration_dict["created"] = datetime.now().strftime("%d-%m-%Y-%H-%M")
configuration_dict["factors"] = {"frames_per_sequence": args.frames}
# backgrounds
bg_names = os.path.join(params["bg_path"], 'train_img.txt')
nh_txt_paths = []
with open(bg_names) as f:
for line in f:
nh_txt_paths.append(os.path.join(params["bg_path"], line[:-1]))
# backgrounds = np.random.choice(nh_txt_paths[:-1], args.backgrounds, replace=False)
backgrounds = nh_txt_paths[:args.backgrounds]
configuration_dict["factors"]["backgrounds"] = backgrounds
# gender
genders = {0: 'female', 1: 'male'}
# set gender.
if args.gender == -1:
gender = [genders.get(g) for g in genders]
else:
gender = genders.get(args.gender)
configuration_dict["factors"]["gender"] = gender
# orientations
configuration_dict["factors"]["orientations"] = list(np.arange(0, 360, (360/args.orientations)))
# clothing/textures
assert args.textures % 2 == 0
textures = []
for igndr, gndr in enumerate(gender):
with open(os.path.join(params["smpl_data_folder"], 'textures', '%s_%s.txt' % (gndr, 'train'))) as f:
txt_paths = f.read().splitlines()
# if using only one source of clothing
if params["clothing_option"] == 'nongrey':
clothing_txt_paths = [k for k in txt_paths if 'nongrey' in k]
elif params["clothing_option"] == 'grey':
clothing_txt_paths = [k for k in txt_paths if 'nongrey' not in k]
textures.extend(np.random.choice(clothing_txt_paths, size=int(args.textures / 2), replace=False))
configuration_dict["factors"]["textures"] = textures
# shapes (extracted only from female model)
ndofs = 10
gndr = "female"
smpl_data = np.load(os.path.join(params["smpl_data_folder"], params["smpl_data_filename"]))
fshapes = smpl_data['%sshapes' % gndr][:, :ndofs]
nb_fshapes = len(fshapes)
fshapes = fshapes[:int(nb_fshapes*0.8)] # train split
shapes_idx = np.random.choice(np.arange(len(fshapes)), size=args.shapes, replace=False)
shapes = fshapes[shapes_idx]
configuration_dict["factors"]["shapes"] = shapes
# light
configuration_dict["sh_coeffs"] = .7 * (2 * np.random.rand(9) - 1)
configuration_dict["sh_coeffs"][0] = .5 + .9 * np.random.rand() # Ambient light (first coeff) needs a minimum is ambient. Rest is uniformly distributed, higher means brighter.
configuration_dict["sh_coeffs"][1] = -.7 * np.random.rand()
# camera distance
# configuration_dict["camera_distance"] = np.random.normal(8.0, 1)
configuration_dict["camera_distance"] = 7.2 # fixed not random
if args.reset and os.path.exists(params["output_path"]) and params["output_path"] != "" and params["output_path"] != "/":
os.system(f"rm -rf {params['output_path']}")
os.makedirs(params["output_path"], exist_ok=True)
folders = ["info", "images", "logs", "dataset"]
for folder in folders:
os.makedirs(os.path.join(params["output_path"], folder), exist_ok=True)
configuration_dict[f"{folder}_path"] = str(os.path.join(params["output_path"], folder))
with open(os.path.join(params["output_path"], "generation_config.json"), "w", encoding="utf-8") as f:
json.dump(configuration_dict, f, ensure_ascii=False, indent=4, cls=NumpyEncoder)
print("Generated a new configuration file!")
else:
print("Configuration file already exists!")
if __name__ == "__main__":
main() | [
"os.path.exists",
"numpy.random.rand",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.arange",
"json.dump",
"os.path.join",
"random.seed",
"datetime.datetime.now",
"numpy.random.seed",
"os.system",
"configs.config.load_file"
] | [((196, 290), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate synth dataset images for disentanglement."""'}), "(description=\n 'Generate synth dataset images for disentanglement.')\n", (219, 290), False, 'import argparse\n'), ((1235, 1276), 'configs.config.load_file', 'config.load_file', (['args.path', '"""SYNTH_DATA"""'], {}), "(args.path, 'SYNTH_DATA')\n", (1251, 1276), True, 'import configs.config as config\n'), ((1415, 1439), 'random.seed', 'random.seed', (['seed_number'], {}), '(seed_number)\n', (1426, 1439), False, 'import random\n'), ((1448, 1475), 'numpy.random.seed', 'np.random.seed', (['seed_number'], {}), '(seed_number)\n', (1462, 1475), True, 'import numpy as np\n'), ((1720, 1768), 'os.path.join', 'os.path.join', (["params['bg_path']", '"""train_img.txt"""'], {}), "(params['bg_path'], 'train_img.txt')\n", (1732, 1768), False, 'import os\n'), ((4618, 4667), 'os.makedirs', 'os.makedirs', (["params['output_path']"], {'exist_ok': '(True)'}), "(params['output_path'], exist_ok=True)\n", (4629, 4667), False, 'import os\n'), ((2524, 2566), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(360 / args.orientations)'], {}), '(0, 360, 360 / args.orientations)\n', (2533, 2566), True, 'import numpy as np\n'), ((3482, 3552), 'os.path.join', 'os.path.join', (["params['smpl_data_folder']", "params['smpl_data_filename']"], {}), "(params['smpl_data_folder'], params['smpl_data_filename'])\n", (3494, 3552), False, 'import os\n'), ((4230, 4246), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4244, 4246), True, 'import numpy as np\n'), ((4449, 4486), 'os.path.exists', 'os.path.exists', (["params['output_path']"], {}), "(params['output_path'])\n", (4463, 4486), False, 'import os\n'), ((4565, 4609), 'os.system', 'os.system', (['f"""rm -rf {params[\'output_path\']}"""'], {}), '(f"rm -rf {params[\'output_path\']}")\n', (4574, 4609), False, 'import os\n'), ((5074, 5159), 'json.dump', 'json.dump', (['configuration_dict', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)', 'cls': 'NumpyEncoder'}), '(configuration_dict, f, ensure_ascii=False, indent=4, cls=NumpyEncoder\n )\n', (5083, 5159), False, 'import json\n'), ((1304, 1365), 'os.path.join', 'os.path.join', (["params['output_path']", '"""generation_config.json"""'], {}), "(params['output_path'], 'generation_config.json')\n", (1316, 1365), False, 'import os\n'), ((1559, 1573), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1571, 1573), False, 'from datetime import datetime\n'), ((4049, 4065), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4063, 4065), True, 'import numpy as np\n'), ((4791, 4834), 'os.path.join', 'os.path.join', (["params['output_path']", 'folder'], {}), "(params['output_path'], folder)\n", (4803, 4834), False, 'import os\n'), ((4906, 4949), 'os.path.join', 'os.path.join', (["params['output_path']", 'folder'], {}), "(params['output_path'], folder)\n", (4918, 4949), False, 'import os\n'), ((4970, 5031), 'os.path.join', 'os.path.join', (["params['output_path']", '"""generation_config.json"""'], {}), "(params['output_path'], 'generation_config.json')\n", (4982, 5031), False, 'import os\n'), ((1892, 1934), 'os.path.join', 'os.path.join', (["params['bg_path']", 'line[:-1]'], {}), "(params['bg_path'], line[:-1])\n", (1904, 1934), False, 'import os\n'), ((2725, 2812), 'os.path.join', 'os.path.join', (["params['smpl_data_folder']", '"""textures"""', "('%s_%s.txt' % (gndr, 'train'))"], {}), "(params['smpl_data_folder'], 'textures', '%s_%s.txt' % (gndr,\n 'train'))\n", (2737, 2812), False, 'import os\n'), ((3971, 3988), 'numpy.random.rand', 'np.random.rand', (['(9)'], {}), '(9)\n', (3985, 3988), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
This script builds loss functions using Cython on a local machine.
To run this script
1. Change to the directory
$REPO_DIR/riskslim/loss_functions
2. Run the following commands in Bash:
python2 build_cython_loss_functions.py build_ext --inplace
python3 build_cython_loss_functions.py build_ext --inplace
"""
import numpy
import scipy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
#fast log loss
ext_modules = [Extension(name = "fast_log_loss",
sources=["fast_log_loss.pyx"],
include_dirs=[numpy.get_include(), scipy.get_include()],
libraries=["m"],
extra_compile_args = ["-ffast-math"])]
setup(
cmdclass = {'build_ext': build_ext},
include_dirs = [numpy.get_include(), scipy.get_include()],
ext_modules = ext_modules,
)
#lookup log loss
ext_modules = [Extension(name = "lookup_log_loss",
sources=["lookup_log_loss.pyx"],
include_dirs=[numpy.get_include(), scipy.get_include()],
libraries=["m"],
extra_compile_args = ["-ffast-math"])]
setup(
cmdclass = {'build_ext': build_ext},
include_dirs = [numpy.get_include(), scipy.get_include()],
ext_modules = ext_modules,
)
| [
"scipy.get_include",
"numpy.get_include"
] | [((859, 878), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (876, 878), False, 'import numpy\n'), ((880, 899), 'scipy.get_include', 'scipy.get_include', ([], {}), '()\n', (897, 899), False, 'import scipy\n'), ((1319, 1338), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1336, 1338), False, 'import numpy\n'), ((1340, 1359), 'scipy.get_include', 'scipy.get_include', ([], {}), '()\n', (1357, 1359), False, 'import scipy\n'), ((641, 660), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (658, 660), False, 'import numpy\n'), ((662, 681), 'scipy.get_include', 'scipy.get_include', ([], {}), '()\n', (679, 681), False, 'import scipy\n'), ((1101, 1120), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1118, 1120), False, 'import numpy\n'), ((1122, 1141), 'scipy.get_include', 'scipy.get_include', ([], {}), '()\n', (1139, 1141), False, 'import scipy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 17:02:36 2019
@author: Chandar_S
"""
#%%
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
import pandas, xgboost, numpy, textblob, string
from keras.preprocessing import text, sequence
from keras import layers, models, optimizers
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import re
from keras.utils import np_utils
''' HYPER PARAMETERS '''
input_file = 'T&ADataForAnalysis_NonCluster'
data=pandas.read_excel(input_file +'.xlsx', sheet_name="Sheet1") #Include your data file instead of data.xlsx
ticket_data = data.iloc[:,0:30] #Selecting the column that has text.
Analysis_primary_columnName = 'Issue'
Analysis_secondary_columnName = 'Machine Classification'
Analysis_Result_columnName = 'Machine Classification'
Analysis_ticket_columnName = 'Ticket'
testing_corpus=[]
testing_description=[]
testing_ticket_numbers=[]
''' HYPER PARAMETERS '''
#stop = set(stopwords.words('english'))
#exclude = set(string.punctuation)
#lemma = WordNetLemmatizer()
# Cleaning the text sentences so that punctuation marks, stop words & digits are removed
#def clean(doc):
# stop_free = " ".join([i for i in doc.lower().split() if i not in stop])
# punc_free = ''.join(ch for ch in stop_free if ch not in exclude)
# normalized = " ".join(lemma.lemmatize(word) for word in punc_free.split())
# processed = re.sub(r"\d+","",normalized)
# y = processed.split()
# return y
#
labels, texts = [], []
for index,row in ticket_data.iterrows():
if (row[Analysis_primary_columnName] and str(row[Analysis_primary_columnName]) != 'nan' ):
line = str(row[Analysis_primary_columnName])
else:
line = str(row[Analysis_secondary_columnName])
# line = line.strip()
# cleaned = clean(line)
# cleaned = ' '.join(cleaned)
# texts.append(cleaned)
if (str(row[Analysis_Result_columnName]) != 'nan'):
texts.append(line)
labels.append(row[Analysis_Result_columnName])
else:
testing_description.append(line)
testing_corpus.append(line)
testing_ticket_numbers.append(row[Analysis_ticket_columnName])
# load the dataset
#data = open('data/corpus', encoding="utf8").read()
#labels, texts = [], []
#for i, line in enumerate(data.split("\n")):
# content = line.split()
# labels.append(content[0])
# texts.append(" ".join(content[1:]))
# create a dataframe using texts and lables
trainDF = pandas.DataFrame()
trainDF['text'] = texts
trainDF['label'] = labels
#%%
# label encode the target variable
encoder = preprocessing.LabelEncoder()
encoded_y = encoder.fit_transform(trainDF['label'])
# split the dataset into training and validation datasets
train_x, valid_x, train_y, valid_y = model_selection.train_test_split(trainDF['text'], encoded_y)
# create a count vectorizer object
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
count_vect.fit(trainDF['text'])
# transform the training and validation data using count vectorizer object
xtrain_count = count_vect.transform(train_x)
xvalid_count = count_vect.transform(valid_x)
# word level tf-idf
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', max_features=5000)
tfidf_vect.fit(trainDF['text'])
xtrain_tfidf = tfidf_vect.transform(train_x)
xvalid_tfidf = tfidf_vect.transform(valid_x)
# ngram level tf-idf
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf_vect_ngram.fit(trainDF['text'])
xtrain_tfidf_ngram = tfidf_vect_ngram.transform(train_x)
xvalid_tfidf_ngram = tfidf_vect_ngram.transform(valid_x)
# characters level tf-idf
tfidf_vect_ngram_chars = TfidfVectorizer(analyzer='char', token_pattern=r'\w{1,}', ngram_range=(2,3), max_features=5000)
tfidf_vect_ngram_chars.fit(trainDF['text'])
xtrain_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(train_x)
xvalid_tfidf_ngram_chars = tfidf_vect_ngram_chars.transform(valid_x)
# load the pre-trained word-embedding vectors
embeddings_index = {}
for i, line in enumerate(open('glove.6B/glove.6B.300d.txt', encoding="utf8")):
values = line.split()
embeddings_index[values[0]] = numpy.asarray(values[1:], dtype='float32')
# create a tokenizer
token = text.Tokenizer()
token.fit_on_texts(trainDF['text'])
word_index = token.word_index
# convert text to sequence of tokens and pad them to ensure equal length vectors
train_seq_x = sequence.pad_sequences(token.texts_to_sequences(train_x), maxlen=70)
valid_seq_x = sequence.pad_sequences(token.texts_to_sequences(valid_x), maxlen=70)
# create token-embedding mapping
embedding_matrix = numpy.zeros((len(word_index) + 1, 300))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
trainDF['char_count'] = trainDF['text'].apply(len)
trainDF['word_count'] = trainDF['text'].apply(lambda x: len(x.split()))
trainDF['word_density'] = trainDF['char_count'] / (trainDF['word_count']+1)
trainDF['punctuation_count'] = trainDF['text'].apply(lambda x: len("".join(_ for _ in x if _ in string.punctuation)))
trainDF['title_word_count'] = trainDF['text'].apply(lambda x: len([wrd for wrd in x.split() if wrd.istitle()]))
trainDF['upper_case_word_count'] = trainDF['text'].apply(lambda x: len([wrd for wrd in x.split() if wrd.isupper()]))
pos_family = {
'noun' : ['NN','NNS','NNP','NNPS'],
'pron' : ['PRP','PRP$','WP','WP$'],
'verb' : ['VB','VBD','VBG','VBN','VBP','VBZ'],
'adj' : ['JJ','JJR','JJS'],
'adv' : ['RB','RBR','RBS','WRB']
}
# function to check and get the part of speech tag count of a words in a given sentence
def check_pos_tag(x, flag):
cnt = 0
try:
wiki = textblob.TextBlob(x)
for tup in wiki.tags:
ppo = list(tup)[1]
if ppo in pos_family[flag]:
cnt += 1
except:
pass
return cnt
trainDF['noun_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'noun'))
trainDF['verb_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'verb'))
trainDF['adj_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'adj'))
trainDF['adv_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'adv'))
trainDF['pron_count'] = trainDF['text'].apply(lambda x: check_pos_tag(x, 'pron'))
# train a LDA Model
lda_model = decomposition.LatentDirichletAllocation(n_components=20, learning_method='online', max_iter=20)
X_topics = lda_model.fit_transform(xtrain_count)
topic_word = lda_model.components_
vocab = count_vect.get_feature_names()
# view the topic models
n_top_words = 10
topic_summaries = []
for i, topic_dist in enumerate(topic_word):
topic_words = numpy.array(vocab)[numpy.argsort(topic_dist)][:-(n_top_words+1):-1]
topic_summaries.append(' '.join(topic_words))
#%%
def train_model(classifier, feature_vector_train, label, feature_vector_valid, is_neural_net=False):
if is_neural_net:
# fit the training dataset on the classifier
classifier.fit(feature_vector_train, label, batch_size = 1000, epochs=5, validation_split=0.05)
# predict the labels on validation dataset
predictions = classifier.predict(feature_vector_valid)
predictions = predictions.argmax(axis=-1)
else:
# fit the training dataset on the classifier
classifier.fit(feature_vector_train, label)
# predict the labels on validation dataset
predictions = classifier.predict(feature_vector_valid)
return metrics.accuracy_score(predictions, valid_y) *100
#%%
'''
from sklearn.neighbors import KNeighborsClassifier
# Naive Bayes on Word Level TF IDF Vectors
accuracy = train_model(KNeighborsClassifier(n_neighbors=25), xtrain_tfidf, train_y, xvalid_tfidf)
print ("KNN, WordLevel TF-IDF: ", accuracy)
#%%
# Naive Bayes on Count Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_count, train_y, xvalid_count)
print ("NB, Count Vectors: ", accuracy)
# Naive Bayes on Word Level TF IDF Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf, train_y, xvalid_tfidf)
print ("NB, WordLevel TF-IDF: ", accuracy)
# Naive Bayes on Ngram Level TF IDF Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print ("NB, N-Gram Vectors: ", accuracy)
# Naive Bayes on Character Level TF IDF Vectors
accuracy = train_model(naive_bayes.MultinomialNB(), xtrain_tfidf_ngram_chars, train_y, xvalid_tfidf_ngram_chars)
print ("NB, CharLevel Vectors: ", accuracy)
#%%
# Linear Classifier on Count Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_count, train_y, xvalid_count)
print ("LR, Count Vectors: ", accuracy)
# Linear Classifier on Word Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf, train_y, xvalid_tfidf)
print ("LR, WordLevel TF-IDF: ", accuracy)
# Linear Classifier on Ngram Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print ("LR, N-Gram Vectors: ", accuracy)
# Linear Classifier on Character Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf_ngram_chars, train_y, xvalid_tfidf_ngram_chars)
print ("LR, CharLevel Vectors: ", accuracy)
#%%
# SVM on Ngram Level TF IDF Vectors
accuracy = train_model(svm.SVC(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print ("SVM, N-Gram Vectors: ", accuracy)
#%%
# RF on Count Vectors
accuracy = train_model(ensemble.RandomForestClassifier(), xtrain_count, train_y, xvalid_count)
print ("RF, Count Vectors: ", accuracy)
# RF on Word Level TF IDF Vectors
accuracy = train_model(ensemble.RandomForestClassifier(), xtrain_tfidf, train_y, xvalid_tfidf)
print ("RF, WordLevel TF-IDF: ", accuracy)
#%%
# Extereme Gradient Boosting on Count Vectors
accuracy = train_model(xgboost.XGBClassifier(), xtrain_count.tocsc(), train_y, xvalid_count.tocsc())
print ("Xgb, Count Vectors: ", accuracy)
# Extereme Gradient Boosting on Word Level TF IDF Vectors
accuracy = train_model(xgboost.XGBClassifier(), xtrain_tfidf.tocsc(), train_y, xvalid_tfidf.tocsc())
print ("Xgb, WordLevel TF-IDF: ", accuracy)
# Extereme Gradient Boosting on Character Level TF IDF Vectors
accuracy = train_model(xgboost.XGBClassifier(), xtrain_tfidf_ngram_chars.tocsc(), train_y, xvalid_tfidf_ngram_chars.tocsc())
print ("Xgb, CharLevel Vectors: ", accuracy)
#%%
def create_cnn():
# Add an Input Layer
input_layer = layers.Input((70, ))
# Add the word embedding Layer
embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
# Add the convolutional Layer
conv_layer = layers.Convolution1D(100, 3, activation="relu")(embedding_layer)
# Add the pooling Layer
pooling_layer = layers.GlobalMaxPool1D()(conv_layer)
# Add the output Layers
output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
output_layer1 = layers.Dropout(0.25)(output_layer1)
output_layer2 = layers.Dense(units=max(encoded_y) + 1, activation="softmax", name="ouput_layer")(output_layer1)
# Compile the model
model = models.Model(inputs=input_layer, outputs=output_layer2)
model.compile(optimizer=optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=["accuracy"])
return model
classifier = create_cnn()
accuracy = train_model(classifier, train_seq_x, train_y, valid_seq_x, is_neural_net=True)
print ("CNN, Word Embeddings", accuracy)
#%%
def create_rnn_lstm():
# Add an Input Layer
input_layer = layers.Input((70, ))
# Add the word embedding Layer
embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
# Add the LSTM Layer
lstm_layer = layers.LSTM(100)(embedding_layer)
# Add the output Layers
output_layer1 = layers.Dense(50, activation="relu")(lstm_layer)
output_layer1 = layers.Dropout(0.25)(output_layer1)
output_layer2 = layers.Dense(units=max(encoded_y) + 1, activation="softmax", name="ouput_layer")(output_layer1)
# Compile the model
model = models.Model(inputs=input_layer, outputs=output_layer2)
model.compile(optimizer=optimizers.Adam(), loss='sparse_categorical_crossentropy')
return model
classifier = create_rnn_lstm()
accuracy = train_model(classifier, train_seq_x, train_y, valid_seq_x, is_neural_net=True)
print ("RNN-LSTM, Word Embeddings", accuracy)
#%%
def create_rnn_gru():
# Add an Input Layer
input_layer = layers.Input((70, ))
# Add the word embedding Layer
embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
# Add the GRU Layer
lstm_layer = layers.GRU(100)(embedding_layer)
# Add the output Layers
output_layer1 = layers.Dense(50, activation="relu")(lstm_layer)
output_layer1 = layers.Dropout(0.25)(output_layer1)
output_layer2 = layers.Dense(units=max(encoded_y) + 1, activation="softmax", name="ouput_layer")(output_layer1)
# Compile the model
model = models.Model(inputs=input_layer, outputs=output_layer2)
model.compile(optimizer=optimizers.Adam(), loss='sparse_categorical_crossentropy')
return model
classifier = create_rnn_gru()
accuracy = train_model(classifier, train_seq_x, train_y, valid_seq_x, is_neural_net=True)
print ("RNN-GRU, Word Embeddings", accuracy)
#%%
def create_bidirectional_rnn():
# Add an Input Layer
input_layer = layers.Input((70, ))
# Add the word embedding Layer
embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
# Add the LSTM Layer
lstm_layer = layers.Bidirectional(layers.GRU(100))(embedding_layer)
# Add the output Layers
output_layer1 = layers.Dense(50, activation="relu")(lstm_layer)
output_layer1 = layers.Dropout(0.25)(output_layer1)
output_layer2 = layers.Dense(units=max(encoded_y) + 1, activation="softmax", name="ouput_layer")(output_layer1)
# Compile the model
model = models.Model(inputs=input_layer, outputs=output_layer2)
model.compile(optimizer=optimizers.Adam(), loss='sparse_categorical_crossentropy')
return model
classifier = create_bidirectional_rnn()
accuracy = train_model(classifier, train_seq_x, train_y, valid_seq_x, is_neural_net=True)
print ("RNN-Bidirectional, Word Embeddings", accuracy)
'''
#%%
def create_rcnn():
# Add an Input Layer
input_layer = layers.Input((70, ))
# Add the word embedding Layer
embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
# Add the recurrent layer
layers.Bidirectional(layers.GRU(50, return_sequences=True))(embedding_layer)
# Add the convolutional Layer
conv_layer = layers.Convolution1D(100, 3, activation="relu")(embedding_layer)
# Add the pooling Layer
pooling_layer = layers.GlobalMaxPool1D()(conv_layer)
# Add the output Layers
output_layer1 = layers.Dense(50, activation="relu")(pooling_layer)
output_layer1 = layers.Dropout(0.25)(output_layer1)
output_layer2 = layers.Dense(units=max(encoded_y) + 1, activation="softmax", name="ouput_layer")(output_layer1)
# Compile the model
model = models.Model(inputs=input_layer, outputs=output_layer2)
model.compile(optimizer=optimizers.Adam(), loss='sparse_categorical_crossentropy')
return model
classifier = create_rcnn()
accuracy = train_model(classifier, train_seq_x, train_y, valid_seq_x, is_neural_net=True)
print ("CNN, Word Embeddings", accuracy)
#%%
import pandas as pd
import numpy as np
# convert text to sequence of tokens and pad them to ensure equal length vectors
test_seq_x = sequence.pad_sequences(token.texts_to_sequences(testing_corpus), maxlen=70)
predicted_labels = classifier.predict(test_seq_x )
#classifier = linear_model.LogisticRegression()
# Linear Classifier on Word Level TF IDF Vectors
#accuracy = train_model(classifier, xtrain_tfidf, train_y, xvalid_tfidf)
#print (f"LR, WordLevel TF-IDF: {int(accuracy)}% ")
#testing_tfidf = tfidf_vect.transform(testing_corpus)
#predicted_labels = classifier.predict(testing_tfidf )
classification_dic={'Issue': testing_description, 'Transformed Data' : testing_corpus, 'Machine Cluster':predicted_labels} #Creating dict having doc with the corresponding cluster number.
predicted_frame=pd.DataFrame(classification_dic, index=[testing_ticket_numbers], columns=['Issue']) # Converting it into a dataframe.
predicted_frame["Machine Classification"] = encoder.inverse_transform(np.argmax(predicted_labels, axis = 1))
# save to file
predicted_frame.to_excel(input_file + "_AdvancedResult.xlsx")
print ("Resuls written to " + input_file + "_AdvancedResult.xlsx")
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.argsort",
"numpy.array",
"pandas.read_excel",
"keras.layers.Dense",
"textblob.TextBlob",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.asarray",
"keras.models.Model",
"keras.layers.GlobalMaxPool1D",
"pandas.DataFrame",
"keras.optimizers.... | [((649, 709), 'pandas.read_excel', 'pandas.read_excel', (["(input_file + '.xlsx')"], {'sheet_name': '"""Sheet1"""'}), "(input_file + '.xlsx', sheet_name='Sheet1')\n", (666, 709), False, 'import pandas, xgboost, numpy, textblob, string\n'), ((2648, 2666), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (2664, 2666), False, 'import pandas, xgboost, numpy, textblob, string\n'), ((2769, 2797), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (2795, 2797), False, 'from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\n'), ((2947, 3007), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (["trainDF['text']", 'encoded_y'], {}), "(trainDF['text'], encoded_y)\n", (2979, 3007), False, 'from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\n'), ((3058, 3115), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""word"""', 'token_pattern': '"""\\\\w{1,}"""'}), "(analyzer='word', token_pattern='\\\\w{1,}')\n", (3073, 3115), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((3350, 3426), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""word"""', 'token_pattern': '"""\\\\w{1,}"""', 'max_features': '(5000)'}), "(analyzer='word', token_pattern='\\\\w{1,}', max_features=5000)\n", (3365, 3426), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((3593, 3694), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""word"""', 'token_pattern': '"""\\\\w{1,}"""', 'ngram_range': '(2, 3)', 'max_features': '(5000)'}), "(analyzer='word', token_pattern='\\\\w{1,}', ngram_range=(2, 3\n ), max_features=5000)\n", (3608, 3694), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((3896, 3997), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""char"""', 'token_pattern': '"""\\\\w{1,}"""', 'ngram_range': '(2, 3)', 'max_features': '(5000)'}), "(analyzer='char', token_pattern='\\\\w{1,}', ngram_range=(2, 3\n ), max_features=5000)\n", (3911, 3997), False, 'from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n'), ((4460, 4476), 'keras.preprocessing.text.Tokenizer', 'text.Tokenizer', ([], {}), '()\n', (4474, 4476), False, 'from keras.preprocessing import text, sequence\n'), ((6610, 6710), 'sklearn.decomposition.LatentDirichletAllocation', 'decomposition.LatentDirichletAllocation', ([], {'n_components': '(20)', 'learning_method': '"""online"""', 'max_iter': '(20)'}), "(n_components=20, learning_method=\n 'online', max_iter=20)\n", (6649, 6710), False, 'from sklearn import decomposition, ensemble\n'), ((17126, 17214), 'pandas.DataFrame', 'pd.DataFrame', (['classification_dic'], {'index': '[testing_ticket_numbers]', 'columns': "['Issue']"}), "(classification_dic, index=[testing_ticket_numbers], columns=[\n 'Issue'])\n", (17138, 17214), True, 'import pandas as pd\n'), ((4386, 4428), 'numpy.asarray', 'numpy.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (4399, 4428), False, 'import pandas, xgboost, numpy, textblob, string\n'), ((15117, 15136), 'keras.layers.Input', 'layers.Input', (['(70,)'], {}), '((70,))\n', (15129, 15136), False, 'from keras import layers, models, optimizers\n'), ((15997, 16052), 'keras.models.Model', 'models.Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer2'}), '(inputs=input_layer, outputs=output_layer2)\n', (16009, 16052), False, 'from keras import layers, models, optimizers\n'), ((17314, 17349), 'numpy.argmax', 'np.argmax', (['predicted_labels'], {'axis': '(1)'}), '(predicted_labels, axis=1)\n', (17323, 17349), True, 'import numpy as np\n'), ((5983, 6003), 'textblob.TextBlob', 'textblob.TextBlob', (['x'], {}), '(x)\n', (6000, 6003), False, 'import pandas, xgboost, numpy, textblob, string\n'), ((7771, 7815), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['predictions', 'valid_y'], {}), '(predictions, valid_y)\n', (7793, 7815), False, 'from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm\n'), ((15319, 15347), 'keras.layers.SpatialDropout1D', 'layers.SpatialDropout1D', (['(0.3)'], {}), '(0.3)\n', (15342, 15347), False, 'from keras import layers, models, optimizers\n'), ((15537, 15584), 'keras.layers.Convolution1D', 'layers.Convolution1D', (['(100)', '(3)'], {'activation': '"""relu"""'}), "(100, 3, activation='relu')\n", (15557, 15584), False, 'from keras import layers, models, optimizers\n'), ((15651, 15675), 'keras.layers.GlobalMaxPool1D', 'layers.GlobalMaxPool1D', ([], {}), '()\n', (15673, 15675), False, 'from keras import layers, models, optimizers\n'), ((15737, 15772), 'keras.layers.Dense', 'layers.Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (15749, 15772), False, 'from keras import layers, models, optimizers\n'), ((15808, 15828), 'keras.layers.Dropout', 'layers.Dropout', (['(0.25)'], {}), '(0.25)\n', (15822, 15828), False, 'from keras import layers, models, optimizers\n'), ((6955, 6973), 'numpy.array', 'numpy.array', (['vocab'], {}), '(vocab)\n', (6966, 6973), False, 'import pandas, xgboost, numpy, textblob, string\n'), ((6974, 6999), 'numpy.argsort', 'numpy.argsort', (['topic_dist'], {}), '(topic_dist)\n', (6987, 6999), False, 'import pandas, xgboost, numpy, textblob, string\n'), ((15425, 15462), 'keras.layers.GRU', 'layers.GRU', (['(50)'], {'return_sequences': '(True)'}), '(50, return_sequences=True)\n', (15435, 15462), False, 'from keras import layers, models, optimizers\n'), ((16081, 16098), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (16096, 16098), False, 'from keras import layers, models, optimizers\n')] |
import ast
import json
import pathlib
import subprocess
import time
import numpy as np
import requests
from util import logger
DETECTOR_MAP = {
'detectors': ['gpt-2'],
'gpt-detector': 'detector-large.pt',
'gltr-detector': ('gpt2-xl', 'BERT'),
'gpt-detector-server': 'http://localhost:8080/',
'gltr-detector-server': ('http://localhost:5001/', 'http://localhost:5002/')
}
SUB_PROCESSES = []
PATH_RA = pathlib.Path.cwd() / 'reliability_assessment'
PATH_NEURAL = PATH_RA / 'neural_filter'
class NeuralVerifier:
def __init__(self):
self.default_logger = logger.get_logger('neural_verifier')
for detector in DETECTOR_MAP['detectors']:
self.__download_models(mode=detector)
# python run_discrimination.py --input_data input_data.jsonl --output_dir models/mega-0.96 --config_file lm/configs/mega.json --predict_val true
def init_gpt_model(self, model: str = DETECTOR_MAP['gpt-detector']):
self.default_logger.info("Initialize GPT-2 Neural Verifier")
gpt_2_server = subprocess.Popen(["python", str(PATH_NEURAL / 'roberta_detector' / 'server.py'),
str(PATH_NEURAL / 'roberta_detector' / 'models' / model)])
SUB_PROCESSES.append(gpt_2_server)
while True:
try:
if requests.get(f"{DETECTOR_MAP['gpt-detector-server']}").status_code is not None:
self.default_logger.info("GPT-2 Neural Verifier Initialized")
break
except requests.exceptions.ConnectionError:
continue
def init_gltr_models(self, model: str = DETECTOR_MAP['gltr-detector'][0]):
if model not in DETECTOR_MAP['gltr-detector']:
raise RuntimeError
default_port = DETECTOR_MAP['gltr-detector-server'][DETECTOR_MAP['gltr-detector'].index(model)][-5:-1]
self.default_logger.info(f"Initialize GLTR {model}")
gltr_gpt_server = subprocess.Popen(
["python", str(PATH_NEURAL / 'gltr' / 'server.py'), "--model", model, "--port",
f"{default_port}"])
SUB_PROCESSES.append(gltr_gpt_server)
while True:
try:
if requests.get(f'http://localhost:{default_port}/').status_code is not None:
self.default_logger.info(f"GLTR {model} Initialized")
break
except requests.exceptions.ConnectionError:
continue
def __download_models(self, mode: str = 'gpt-2'):
if mode == 'gpt-2':
dir_prefix = PATH_NEURAL / 'roberta_detector' / 'models'
base_model = pathlib.Path(dir_prefix / 'detector-base.pt')
if not base_model.exists():
open(str(base_model), 'wb').write(
requests.get(
'https://openaipublic.azureedge.net/gpt-2/detector-models/v1/detector-base.pt').content)
self.default_logger.info(f'{mode} base model downloaded')
else:
self.default_logger.info(f'{mode} base model exists')
large_model = pathlib.Path(dir_prefix / 'detector-large.pt')
if not large_model.exists():
open(str(large_model), 'wb').write(
requests.get(
'https://openaipublic.azureedge.net/gpt-2/detector-models/v1/detector-large.pt').content)
self.default_logger.info(f'{mode} large model downloaded')
else:
self.default_logger.info(f'{mode} large model exists')
def detect(self, text, mode: str = DETECTOR_MAP['gpt-detector']) -> dict or list:
"""
Output Format for GPT-2: {'all_tokens', 'used_tokens', 'real_probability', 'fake_probability'}
Output Format for GLTR: {'bpe_strings', 'pred_topk', 'real_topk', 'frac_hist'}
Be noted that BERT may not return valid results due to empty tokenized text. Just ignore it.
:param text: Tweet Text (Without Non-ASCII Code)
:param mode: 'gpt-2' or 'gltr' currently supported
:return:
"""
if mode == DETECTOR_MAP['gpt-detector']:
# Payload text should not have # symbols or it will ignore following text - less tokens
url = f"{DETECTOR_MAP['gpt-detector-server']}?={text.replace('#', '')}"
payload = {}
headers = {}
response = None
for retry_limit in range(5):
try:
response = requests.request("GET", url, headers=headers, data=payload)
break
except requests.exceptions.ConnectionError:
time.sleep(1)
continue
# self.default_logger.info(f'{mode}: {response.text}')
# Return a dict representation of the returned text
return ast.literal_eval(response.text) if response is not None else {}
elif mode in DETECTOR_MAP['gltr-detector']:
gltr_type = mode
gltr_server = DETECTOR_MAP['gltr-detector-server'][DETECTOR_MAP['gltr-detector'].index(gltr_type)]
url = f"{gltr_server}api/analyze"
payload = json.dumps({
"project": f"{gltr_type}",
"text": text
})
headers = {
'Content-Type': 'application/json'
}
response = None
for retry_limit in range(5):
try:
response = requests.request("POST", url, headers=headers, data=payload)
break
except requests.exceptions.ConnectionError:
time.sleep(1)
continue
if response is not None and response.ok:
gltr_result = json.loads(response.text)['result']
# GLTR['result'].keys() = 'bpe_strings', 'pred_topk', 'real_topk'
frac_distribution = [float(real_topk[1]) / float(gltr_result['pred_topk'][index][0][1])
for index, real_topk in enumerate(gltr_result['real_topk'])]
frac_histogram = np.histogram(frac_distribution, bins=10, range=(0.0, 1.0), density=False)
gltr_result['frac_hist'] = frac_histogram[0].tolist()
output_data = gltr_result
else:
self.default_logger.error(f'GLTR Exception: {payload}')
output_data = {}
return output_data
else:
raise NotImplementedError
| [
"numpy.histogram",
"json.loads",
"pathlib.Path",
"pathlib.Path.cwd",
"json.dumps",
"requests.request",
"requests.get",
"ast.literal_eval",
"time.sleep",
"util.logger.get_logger"
] | [((450, 468), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (466, 468), False, 'import pathlib\n'), ((614, 650), 'util.logger.get_logger', 'logger.get_logger', (['"""neural_verifier"""'], {}), "('neural_verifier')\n", (631, 650), False, 'from util import logger\n'), ((2663, 2708), 'pathlib.Path', 'pathlib.Path', (["(dir_prefix / 'detector-base.pt')"], {}), "(dir_prefix / 'detector-base.pt')\n", (2675, 2708), False, 'import pathlib\n'), ((3135, 3181), 'pathlib.Path', 'pathlib.Path', (["(dir_prefix / 'detector-large.pt')"], {}), "(dir_prefix / 'detector-large.pt')\n", (3147, 3181), False, 'import pathlib\n'), ((4885, 4916), 'ast.literal_eval', 'ast.literal_eval', (['response.text'], {}), '(response.text)\n', (4901, 4916), False, 'import ast\n'), ((5209, 5262), 'json.dumps', 'json.dumps', (["{'project': f'{gltr_type}', 'text': text}"], {}), "({'project': f'{gltr_type}', 'text': text})\n", (5219, 5262), False, 'import json\n'), ((4526, 4585), 'requests.request', 'requests.request', (['"""GET"""', 'url'], {'headers': 'headers', 'data': 'payload'}), "('GET', url, headers=headers, data=payload)\n", (4542, 4585), False, 'import requests\n'), ((6169, 6242), 'numpy.histogram', 'np.histogram', (['frac_distribution'], {'bins': '(10)', 'range': '(0.0, 1.0)', 'density': '(False)'}), '(frac_distribution, bins=10, range=(0.0, 1.0), density=False)\n', (6181, 6242), True, 'import numpy as np\n'), ((1351, 1405), 'requests.get', 'requests.get', (['f"""{DETECTOR_MAP[\'gpt-detector-server\']}"""'], {}), '(f"{DETECTOR_MAP[\'gpt-detector-server\']}")\n', (1363, 1405), False, 'import requests\n'), ((2230, 2279), 'requests.get', 'requests.get', (['f"""http://localhost:{default_port}/"""'], {}), "(f'http://localhost:{default_port}/')\n", (2242, 2279), False, 'import requests\n'), ((2820, 2922), 'requests.get', 'requests.get', (['"""https://openaipublic.azureedge.net/gpt-2/detector-models/v1/detector-base.pt"""'], {}), "(\n 'https://openaipublic.azureedge.net/gpt-2/detector-models/v1/detector-base.pt'\n )\n", (2832, 2922), False, 'import requests\n'), ((3295, 3398), 'requests.get', 'requests.get', (['"""https://openaipublic.azureedge.net/gpt-2/detector-models/v1/detector-large.pt"""'], {}), "(\n 'https://openaipublic.azureedge.net/gpt-2/detector-models/v1/detector-large.pt'\n )\n", (3307, 3398), False, 'import requests\n'), ((4692, 4705), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4702, 4705), False, 'import time\n'), ((5522, 5582), 'requests.request', 'requests.request', (['"""POST"""', 'url'], {'headers': 'headers', 'data': 'payload'}), "('POST', url, headers=headers, data=payload)\n", (5538, 5582), False, 'import requests\n'), ((5816, 5841), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (5826, 5841), False, 'import json\n'), ((5689, 5702), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5699, 5702), False, 'import time\n')] |
# Optimizing Rastrigin's function, using a combination of EA and BO
from typing import Counter
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import GPy
import GPyOpt
from GPyOpt.methods import BayesianOptimization
import matplotlib.pyplot as plt
import time
import pickle
import random
random.seed(a=1)
# n-dimensional Rastrigin function, global optima at 0
# X is a list [x_i]
def rastrigin(X):
# X is a n dimensional row vector
A = 10
dim = 20
X = X.reshape((dim, 1))
return dim*A + sum([(x**2 - A * np.cos(2 * math.pi * x)) for x in X])
kernel = GPy.kern.Matern52(input_dim=20, variance=1.0, lengthscale=0.2)
bds = [{'name': 'X1', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X2', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X3', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X4', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X5', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X6', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X7', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X8', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X9', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X10', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X11', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X12', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X13', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X14', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X15', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X16', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X17', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X18', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X19', 'type': 'continuous', 'domain': (-5, 5)},
{'name': 'X20', 'type': 'continuous', 'domain': (-5, 5)}]
optimizer = BayesianOptimization(f=rastrigin,
domain=bds,
model_type='GP',
kernel=kernel,
acquisition_type ='EI',
maximize=False)
t0 = time.time()
optimizer.run_optimization(max_iter=100, max_time=3600)
t1 = time.time()
print("time:")
print(t1-t0)
t_bo = t1-t0
#optimizer.plot_acquisition()
optimizer.plot_convergence()
# get the candidate solutions and their evaluations
ins = optimizer.get_evaluations()[0]
outs = optimizer.get_evaluations()[1]
outputs = outs.flatten()
# sort in descending order
indices = np.argsort(outputs)
ins_sorted = ins[indices]
ins_sorted_inverse = ins_sorted[::-1]
outputs.sort()
reverse_array = outputs[::-1]
#f = open('BO-Rastrigin-best-20.dat', 'ab')
#pickle.dump(reverse_array , f)
#f.close()
print("The minimum value obtained by the function was:")
print(optimizer.fx_opt)
print(optimizer.x_opt)
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import math
import matplotlib.pyplot as plt
import numpy as np
import random
from deap import creator, base, tools, algorithms
import pickle
counter_var = 0
# n-dimensional Rastrigin function, global optima at 0
# X is a list [x_i]
def rastrigin2(X, A=10):
dim=20
return dim*A + sum([(x**2 - A * np.cos(2 * math.pi * x)) for x in X]),
def genFunky(icls, ndim, ins):
global counter_var
genome = list()
# choose randomly from the last 50 solutions from BO
#inverse_ins = ins[::-1]
#candidate = inverse_ins[random.randint(0, 51)]
#candidate = inverse_ins[counter_var] # change input to ins
candidate = ins[counter_var]
counter_var += 1
for i in np.arange(0, ndim):
genome.append(candidate[i])
return icls(genome)
# dimention of the optimization
dim = 20
popSize = 50
genSize = 2000
random.seed(a=1)
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("attr", random.random)
#toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr, n=dim)
# bias the initial population
toolbox.register("individual", genFunky, creator.Individual, dim, ins[::-1])#ins_sorted) # ins)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", rastrigin2)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutGaussian, mu=0, sigma=1, indpb=0.2) # indpb: Independent probability for each attribute to be mutated
toolbox.register("select", tools.selTournament, tournsize=3)
#population = toolbox.population(n=popSize)
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean, axis=0)
stats.register("std", np.std, axis=0)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
toolbox.pop_size = popSize
toolbox.max_gen = genSize
toolbox.mut_prob = 0.1
logbook = tools.Logbook()
logbook.header = ["gen", "evals"] + stats.fields
hof = tools.HallOfFame(1, similar=np.array_equal) #can change the size
def run_ea(toolbox, stats=stats, verbose=True, hof=hof):
pop = toolbox.population(n=toolbox.pop_size)
pop = toolbox.select(pop, len(pop))
return algorithms.eaSimple(pop, toolbox,
cxpb=0,#1-toolbox.mut_prob, : no crossover
mutpb=toolbox.mut_prob,
stats=stats,
ngen=toolbox.max_gen,
verbose=verbose,
halloffame=hof)
t0 = time.time()
res,log = run_ea(toolbox, stats=stats, verbose=True, hof=hof)
t1 = time.time()
t_ea = t1-t0
# print info for best solution found:
print("-----")
print(len(hof))
best = hof.items[0]
print("-- Best Individual = ", best)
print("-- Best Fitness = ", best.fitness.values)
avg = log.select("avg")
std = log.select("std")
min_ = log.select("min")
max_ = log.select("max")
plt.figure(figsize=(6.4,4.8))
plt.plot(reverse_array, color='red', label='Bayesian Optimization')
plt.plot(list(range(105, 105+2000+1)), min_, color='blue', label='Evolutionary Algorithm')
#plt.fill_between(list(range(0, toolbox.max_gen+1)), avg-std, avg+std, color='cornflowerblue', alpha=0.2)
plt.xlabel("Generations/Iterations")
plt.ylabel("Objective Value")
plt.title("EA + BO", fontsize='small')
plt.grid(color='skyblue', linestyle=':', linewidth=0.5)
plt.legend(loc="upper right")
plt.tight_layout()
plt.ylim(bottom=0)
plt.show()
print("time:")
print(t_bo+t_ea)
#f = open('GA-Rastrigin-best-20.dat', 'ab')
#pickle.dump(min_ , f)
#f.close()
#f = open('GA-Rastrigin-avg-20.dat', 'ab')
#pickle.dump(avg , f)
#f.close() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"GPyOpt.methods.BayesianOptimization",
"numpy.argsort",
"numpy.arange",
"deap.creator.create",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"deap.tools.HallOfFame",
"matplotlib.pyplot.ylim",
"deap.tools.Logbook",
"numpy.cos",
"ma... | [((324, 340), 'random.seed', 'random.seed', ([], {'a': '(1)'}), '(a=1)\n', (335, 340), False, 'import random\n'), ((609, 671), 'GPy.kern.Matern52', 'GPy.kern.Matern52', ([], {'input_dim': '(20)', 'variance': '(1.0)', 'lengthscale': '(0.2)'}), '(input_dim=20, variance=1.0, lengthscale=0.2)\n', (626, 671), False, 'import GPy\n'), ((1844, 1965), 'GPyOpt.methods.BayesianOptimization', 'BayesianOptimization', ([], {'f': 'rastrigin', 'domain': 'bds', 'model_type': '"""GP"""', 'kernel': 'kernel', 'acquisition_type': '"""EI"""', 'maximize': '(False)'}), "(f=rastrigin, domain=bds, model_type='GP', kernel=\n kernel, acquisition_type='EI', maximize=False)\n", (1864, 1965), False, 'from GPyOpt.methods import BayesianOptimization\n'), ((2134, 2145), 'time.time', 'time.time', ([], {}), '()\n', (2143, 2145), False, 'import time\n'), ((2207, 2218), 'time.time', 'time.time', ([], {}), '()\n', (2216, 2218), False, 'import time\n'), ((2510, 2529), 'numpy.argsort', 'np.argsort', (['outputs'], {}), '(outputs)\n', (2520, 2529), True, 'import numpy as np\n'), ((3742, 3758), 'random.seed', 'random.seed', ([], {'a': '(1)'}), '(a=1)\n', (3753, 3758), False, 'import random\n'), ((3760, 3819), 'deap.creator.create', 'creator.create', (['"""FitnessMin"""', 'base.Fitness'], {'weights': '(-1.0,)'}), "('FitnessMin', base.Fitness, weights=(-1.0,))\n", (3774, 3819), False, 'from deap import creator, base, tools, algorithms\n'), ((3820, 3882), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'list'], {'fitness': 'creator.FitnessMin'}), "('Individual', list, fitness=creator.FitnessMin)\n", (3834, 3882), False, 'from deap import creator, base, tools, algorithms\n'), ((3894, 3908), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (3906, 3908), False, 'from deap import creator, base, tools, algorithms\n'), ((4580, 4632), 'deap.tools.Statistics', 'tools.Statistics', ([], {'key': '(lambda ind: ind.fitness.values)'}), '(key=lambda ind: ind.fitness.values)\n', (4596, 4632), False, 'from deap import creator, base, tools, algorithms\n'), ((4874, 4889), 'deap.tools.Logbook', 'tools.Logbook', ([], {}), '()\n', (4887, 4889), False, 'from deap import creator, base, tools, algorithms\n'), ((4946, 4989), 'deap.tools.HallOfFame', 'tools.HallOfFame', (['(1)'], {'similar': 'np.array_equal'}), '(1, similar=np.array_equal)\n', (4962, 4989), False, 'from deap import creator, base, tools, algorithms\n'), ((5570, 5581), 'time.time', 'time.time', ([], {}), '()\n', (5579, 5581), False, 'import time\n'), ((5649, 5660), 'time.time', 'time.time', ([], {}), '()\n', (5658, 5660), False, 'import time\n'), ((5950, 5980), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 4.8)'}), '(figsize=(6.4, 4.8))\n', (5960, 5980), True, 'import matplotlib.pyplot as plt\n'), ((5980, 6047), 'matplotlib.pyplot.plot', 'plt.plot', (['reverse_array'], {'color': '"""red"""', 'label': '"""Bayesian Optimization"""'}), "(reverse_array, color='red', label='Bayesian Optimization')\n", (5988, 6047), True, 'import matplotlib.pyplot as plt\n'), ((6245, 6281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generations/Iterations"""'], {}), "('Generations/Iterations')\n", (6255, 6281), True, 'import matplotlib.pyplot as plt\n'), ((6282, 6311), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Objective Value"""'], {}), "('Objective Value')\n", (6292, 6311), True, 'import matplotlib.pyplot as plt\n'), ((6312, 6350), 'matplotlib.pyplot.title', 'plt.title', (['"""EA + BO"""'], {'fontsize': '"""small"""'}), "('EA + BO', fontsize='small')\n", (6321, 6350), True, 'import matplotlib.pyplot as plt\n'), ((6351, 6406), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""skyblue"""', 'linestyle': '""":"""', 'linewidth': '(0.5)'}), "(color='skyblue', linestyle=':', linewidth=0.5)\n", (6359, 6406), True, 'import matplotlib.pyplot as plt\n'), ((6407, 6436), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (6417, 6436), True, 'import matplotlib.pyplot as plt\n'), ((6437, 6455), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6453, 6455), True, 'import matplotlib.pyplot as plt\n'), ((6456, 6474), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'bottom': '(0)'}), '(bottom=0)\n', (6464, 6474), True, 'import matplotlib.pyplot as plt\n'), ((6475, 6485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6483, 6485), True, 'import matplotlib.pyplot as plt\n'), ((3590, 3608), 'numpy.arange', 'np.arange', (['(0)', 'ndim'], {}), '(0, ndim)\n', (3599, 3608), True, 'import numpy as np\n'), ((5170, 5308), 'deap.algorithms.eaSimple', 'algorithms.eaSimple', (['pop', 'toolbox'], {'cxpb': '(0)', 'mutpb': 'toolbox.mut_prob', 'stats': 'stats', 'ngen': 'toolbox.max_gen', 'verbose': 'verbose', 'halloffame': 'hof'}), '(pop, toolbox, cxpb=0, mutpb=toolbox.mut_prob, stats=\n stats, ngen=toolbox.max_gen, verbose=verbose, halloffame=hof)\n', (5189, 5308), False, 'from deap import creator, base, tools, algorithms\n'), ((561, 584), 'numpy.cos', 'np.cos', (['(2 * math.pi * x)'], {}), '(2 * math.pi * x)\n', (567, 584), True, 'import numpy as np\n'), ((3206, 3229), 'numpy.cos', 'np.cos', (['(2 * math.pi * x)'], {}), '(2 * math.pi * x)\n', (3212, 3229), True, 'import numpy as np\n')] |
from PySide2.QtWidgets import QApplication
import numpy as np
import visoptslider
if __name__ == "__main__":
app = QApplication()
# Define the target function and bound
num_dimensions = 3
def target_function(x):
# Rosenbrock function
value = 0.0
for i in range(x.shape[0] - 1):
value += 100.0 * (x[i + 1] - x[i] * x[i]) * (x[i + 1] - x[i] * x[i]) + (1.0 - x[i]) * (1.0 - x[i])
return value
upper_bound = np.array([+2.0, +2.0, +2.0])
lower_bound = np.array([-2.0, -2.0, -2.0])
maximum_value = 200.0
minimum_value = 0.0
# Optional settings
labels = ["x1", "x2", "x3"]
show_values = True
resolution = 200
# Instantiate and initialize the widget
sliders_widget = visoptslider.SlidersWidget()
sliders_widget.initialize(num_dimensions=num_dimensions,
target_function=target_function,
upper_bound=upper_bound,
lower_bound=lower_bound,
maximum_value=maximum_value,
minimum_value=minimum_value,
labels=labels,
show_values=show_values,
resolution=resolution)
# Set a callback function
sliders_widget.callback = lambda: print(sliders_widget.argument)
# Show the widget
sliders_widget.show()
app.exec_()
| [
"numpy.array",
"PySide2.QtWidgets.QApplication",
"visoptslider.SlidersWidget"
] | [((120, 134), 'PySide2.QtWidgets.QApplication', 'QApplication', ([], {}), '()\n', (132, 134), False, 'from PySide2.QtWidgets import QApplication\n'), ((472, 500), 'numpy.array', 'np.array', (['[+2.0, +2.0, +2.0]'], {}), '([+2.0, +2.0, +2.0])\n', (480, 500), True, 'import numpy as np\n'), ((519, 547), 'numpy.array', 'np.array', (['[-2.0, -2.0, -2.0]'], {}), '([-2.0, -2.0, -2.0])\n', (527, 547), True, 'import numpy as np\n'), ((765, 793), 'visoptslider.SlidersWidget', 'visoptslider.SlidersWidget', ([], {}), '()\n', (791, 793), False, 'import visoptslider\n')] |
import numpy as np
import pandas as pd
import os
import glob
import arrow
import csv
def get_stockpools(date, stocknum=(10, 20)):
first = str(int(date[:4]) - 2) + date[4:]
date0 = date[:4] + '-' + date[4:]
date0 = arrow.get(date0)
second = date0.shift(months=-1)
second = second.format('YYYYMM')
fix = 'SP_' + first + '_' + second + '_'
stock_file_list = [fix + str(stocknum[a]) + '.txt' for a in range(len(stocknum))]
return stock_file_list
def kill_blank(data):
if data == ' ':
data = np.nan
return data
prefix = '2to19'
# prefix = '2to22'
usedIndex = 'Index1'
# usedIndex = 'Index2'
# modelParam = 'MP1'
modelParam = 'MP2'
# modelParam = 'MP3'
# modelParam = 'MP4'
predMonthList = []
predMonthList.extend(['201502', '201503', '201504', '201505', '201506',
'201507', '201508', '201509', '201510', '201511', '201512'])
predMonthList.extend(['201601', '201602', '201603', '201604', '201605', '201606',
'201607', '201608', '201609', '201610', '201611', '201612'])
predMonthList.extend(['201701', '201702', '201703', '201704', '201705', '201706',
'201707', '201708', '201709', '201710', '201711', '201712'])
predMonthList.extend(['201801', '201802'])
stockNum = (10, 20)
path0 = 'D:/rongshidata/experiment_data_1'
closePrice = pd.read_csv(path0 + '/close.txt', sep="\t", header=0, skiprows=1,
index_col=0, parse_dates=True,
date_parser=lambda dates: pd.datetime.strptime(dates, '%Y%m%d'))
closePrice = closePrice.dropna(axis=1, how='all')
df_index = list(closePrice.index)
for predMonth in predMonthList:
stockPools = get_stockpools(predMonth, stocknum=stockNum)
s = '{0}/monthly/ReportXgb_V{1}_{2}_{3}/for{4}/'
res_dir = s.format(path0, prefix.replace('to', '_V'), usedIndex, modelParam, predMonth)
config = res_dir + 'config' + predMonth + '.csv'
configDates = []
with open(config) as f:
r = csv.reader(f)
for row in r:
configDates.append(tuple(row))
line1 = ['folder', 'stock']
lines = []
for configDate in configDates:
sd = ':'.join(configDate)
line1.append(sd + '_TR')
s1 = '{0}/monthly/combination{1}_{2}/comb{3}.txt'
combFile = s1.format(path0, prefix, modelParam, predMonth)
combData = pd.read_csv(combFile, sep='\t', header=None, names=['feature', 'quantile'])
folder1 = glob.glob(path0 + '/monthly/end/' + 'feature_v*')
for feature_v in folder1:
folder2 = glob.glob(feature_v + '/prob/' + '*m_*_*')
feature_v = os.path.basename(feature_v)
for date_range in folder2:
# folder3 = glob.glob(date_range + '/PNE*_NL_*')
folder3 = glob.glob(date_range + '/PNE_*')
date_range = os.path.basename(date_range)
sentence = (combData['feature'] == feature_v) & (combData['quantile'] == date_range)
if not sentence.any():
continue
for path in folder3:
for stockPool in stockPools:
period_returns = [path, stockPool[17:-4]]
directory = path + '/' + stockPool
sp = pd.read_csv(directory, skiprows=1, header=None, sep='\t', prefix='S')
sp = sp.dropna(axis=1, how='all')
for probDate in range(1, len(sp.index), 2):
sp.drop(probDate, inplace=True)
sp = sp.applymap(kill_blank)
dateList = list(sp.S0)
sp.S0 = sp.S0.apply(lambda y: y[:4] + '-' + y[4:])
returnList = []
for effectDate in range(len(sp.index)):
spx = sp.iloc[effectDate]
spx = spx.dropna()
if len(spx) > 1:
datex = spx[0]
lsx = list(spx[1:])
tmp = closePrice.loc[datex, lsx]
i = df_index[df_index.index(tmp.index[0]) - 1]
j = tmp.index[-1]
tmp = closePrice.loc[i:j, lsx]
returns = np.mean((tmp.iloc[-1] - tmp.iloc[0]) / tmp.iloc[0])
returnList.append(returns)
else:
returnList.append(0)
for configDate in configDates:
try:
start = dateList.index(configDate[0])
end = dateList.index(configDate[1]) + 1
returnData = np.array(returnList[start:end]) + 1
se = end - start
result = ((np.prod(returnData) ** (12 / se)) - 1) * 100
period_returns.append(str(result) + '%')
except ValueError:
period_returns.append('')
lines.append(period_returns)
print(predMonth, feature_v, date_range, 'Done')
report = res_dir + 'ReportTR' + predMonth + '.csv'
with open(report, 'w', newline='') as fw:
w = csv.writer(fw)
w.writerow(line1)
w.writerows(lines)
| [
"numpy.mean",
"numpy.prod",
"pandas.read_csv",
"csv.writer",
"pandas.datetime.strptime",
"numpy.array",
"arrow.get",
"os.path.basename",
"csv.reader",
"glob.glob"
] | [((229, 245), 'arrow.get', 'arrow.get', (['date0'], {}), '(date0)\n', (238, 245), False, 'import arrow\n'), ((2363, 2438), 'pandas.read_csv', 'pd.read_csv', (['combFile'], {'sep': '"""\t"""', 'header': 'None', 'names': "['feature', 'quantile']"}), "(combFile, sep='\\t', header=None, names=['feature', 'quantile'])\n", (2374, 2438), True, 'import pandas as pd\n'), ((2454, 2503), 'glob.glob', 'glob.glob', (["(path0 + '/monthly/end/' + 'feature_v*')"], {}), "(path0 + '/monthly/end/' + 'feature_v*')\n", (2463, 2503), False, 'import glob\n'), ((2001, 2014), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (2011, 2014), False, 'import csv\n'), ((2552, 2594), 'glob.glob', 'glob.glob', (["(feature_v + '/prob/' + '*m_*_*')"], {}), "(feature_v + '/prob/' + '*m_*_*')\n", (2561, 2594), False, 'import glob\n'), ((2615, 2642), 'os.path.basename', 'os.path.basename', (['feature_v'], {}), '(feature_v)\n', (2631, 2642), False, 'import os\n'), ((5233, 5247), 'csv.writer', 'csv.writer', (['fw'], {}), '(fw)\n', (5243, 5247), False, 'import csv\n'), ((1524, 1561), 'pandas.datetime.strptime', 'pd.datetime.strptime', (['dates', '"""%Y%m%d"""'], {}), "(dates, '%Y%m%d')\n", (1544, 1561), True, 'import pandas as pd\n'), ((2761, 2793), 'glob.glob', 'glob.glob', (["(date_range + '/PNE_*')"], {}), "(date_range + '/PNE_*')\n", (2770, 2793), False, 'import glob\n'), ((2819, 2847), 'os.path.basename', 'os.path.basename', (['date_range'], {}), '(date_range)\n', (2835, 2847), False, 'import os\n'), ((3226, 3295), 'pandas.read_csv', 'pd.read_csv', (['directory'], {'skiprows': '(1)', 'header': 'None', 'sep': '"""\t"""', 'prefix': '"""S"""'}), "(directory, skiprows=1, header=None, sep='\\t', prefix='S')\n", (3237, 3295), True, 'import pandas as pd\n'), ((4235, 4286), 'numpy.mean', 'np.mean', (['((tmp.iloc[-1] - tmp.iloc[0]) / tmp.iloc[0])'], {}), '((tmp.iloc[-1] - tmp.iloc[0]) / tmp.iloc[0])\n', (4242, 4286), True, 'import numpy as np\n'), ((4677, 4708), 'numpy.array', 'np.array', (['returnList[start:end]'], {}), '(returnList[start:end])\n', (4685, 4708), True, 'import numpy as np\n'), ((4797, 4816), 'numpy.prod', 'np.prod', (['returnData'], {}), '(returnData)\n', (4804, 4816), True, 'import numpy as np\n')] |
import json
import numpy as np
import os
import tensorflow as tf
def read_tensor_from_byte_array(byteArray,
input_height=224,
input_width=224,
input_mean=127.5,
input_std=127.5):
input_name = "file_reader"
output_name = "normalized"
image_reader = tf.image.decode_image(byteArray,channels=3)
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def label_image(data):
t = read_tensor_from_byte_array(data)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
with tf.Session(graph=graph) as sess:
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0]: t
})
results = np.squeeze(results)
top_k = results.argsort()[-3:][::-1]
return [{labels[i]:str(results[i])} for i in top_k]
from azureml.core.model import Model
from azureml.contrib.services.aml_request import AMLRequest, rawhttp
from azureml.contrib.services.aml_response import AMLResponse
localmode = False
def init():
model_path=''
with open('model.json','r') as f:
model = json.load(f)
model_path = Model.get_model_path(model['model_name'])
global input_layer, output_layer, graph, labels
if localmode:
model_path = os.path.join(model_path,'model')
model_file = os.path.join(model_path,"retrained_graph.pb")
graph = load_graph(model_file)
label_file = os.path.join(model_path,"output_labels.txt")
labels = load_labels(label_file)
input_layer = "input"
output_layer = "final_result"
@rawhttp
def run(request):
print("This is run()")
print("Request: [{0}]".format(request))
if request.method == 'GET':
respBody = str.encode(request.full_path)
return AMLResponse(respBody, 200)
elif request.method == 'POST':
data = request.get_data(False)
print("Data Length:[{}]".format(len(data)))
labels = label_image(data)
print("Labels:[{}]".format(json.dumps(labels)))
respBody = json.dumps(labels)
return AMLResponse(respBody,200,json_str=True)
else:
return AMLResponse("bad request", 500)
if __name__ == "__main__":
localmode = True
model_name=''
with open('model.json','r') as f:
model = json.load(f)
model_name = model['model_name']
try:
Model.get_model_path(model_name)
except:
from azureml.core import Workspace
ws = Workspace.from_config()
Model(ws, model_name).download(model_name)
init()
file_name = "test-image/tulip.jpg"
with open(file_name, "rb") as binary_file:
data = binary_file.read()
print("Data Length:[{}]".format(len(data)))
result = label_image(data)
print(json.dumps(result))
| [
"azureml.core.Workspace.from_config",
"tensorflow.Graph",
"azureml.contrib.services.aml_response.AMLResponse",
"tensorflow.gfile.GFile",
"tensorflow.Session",
"tensorflow.image.resize_bilinear",
"os.path.join",
"json.dumps",
"tensorflow.GraphDef",
"numpy.squeeze",
"azureml.core.model.Model.get_m... | [((383, 427), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['byteArray'], {'channels': '(3)'}), '(byteArray, channels=3)\n', (404, 427), True, 'import tensorflow as tf\n'), ((444, 477), 'tensorflow.cast', 'tf.cast', (['image_reader', 'tf.float32'], {}), '(image_reader, tf.float32)\n', (451, 477), True, 'import tensorflow as tf\n'), ((496, 527), 'tensorflow.expand_dims', 'tf.expand_dims', (['float_caster', '(0)'], {}), '(float_caster, 0)\n', (510, 527), True, 'import tensorflow as tf\n'), ((540, 608), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['dims_expander', '[input_height, input_width]'], {}), '(dims_expander, [input_height, input_width])\n', (564, 608), True, 'import tensorflow as tf\n'), ((692, 704), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (702, 704), True, 'import tensorflow as tf\n'), ((977, 987), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (985, 987), True, 'import tensorflow as tf\n'), ((1002, 1015), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1013, 1015), True, 'import tensorflow as tf\n'), ((1615, 1634), 'numpy.squeeze', 'np.squeeze', (['results'], {}), '(results)\n', (1625, 1634), True, 'import numpy as np\n'), ((2229, 2275), 'os.path.join', 'os.path.join', (['model_path', '"""retrained_graph.pb"""'], {}), "(model_path, 'retrained_graph.pb')\n", (2241, 2275), False, 'import os\n'), ((2327, 2372), 'os.path.join', 'os.path.join', (['model_path', '"""output_labels.txt"""'], {}), "(model_path, 'output_labels.txt')\n", (2339, 2372), False, 'import os\n'), ((634, 668), 'tensorflow.subtract', 'tf.subtract', (['resized', '[input_mean]'], {}), '(resized, [input_mean])\n', (645, 668), True, 'import tensorflow as tf\n'), ((1124, 1154), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {}), '(graph_def)\n', (1143, 1154), True, 'import tensorflow as tf\n'), ((1457, 1480), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (1467, 1480), True, 'import tensorflow as tf\n'), ((2012, 2024), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2021, 2024), False, 'import json\n'), ((2046, 2087), 'azureml.core.model.Model.get_model_path', 'Model.get_model_path', (["model['model_name']"], {}), "(model['model_name'])\n", (2066, 2087), False, 'from azureml.core.model import Model\n'), ((2179, 2212), 'os.path.join', 'os.path.join', (['model_path', '"""model"""'], {}), "(model_path, 'model')\n", (2191, 2212), False, 'import os\n'), ((2668, 2694), 'azureml.contrib.services.aml_response.AMLResponse', 'AMLResponse', (['respBody', '(200)'], {}), '(respBody, 200)\n', (2679, 2694), False, 'from azureml.contrib.services.aml_response import AMLResponse\n'), ((3174, 3186), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3183, 3186), False, 'import json\n'), ((3237, 3269), 'azureml.core.model.Model.get_model_path', 'Model.get_model_path', (['model_name'], {}), '(model_name)\n', (3257, 3269), False, 'from azureml.core.model import Model\n'), ((822, 848), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['label_file'], {}), '(label_file)\n', (836, 848), True, 'import tensorflow as tf\n'), ((2931, 2949), 'json.dumps', 'json.dumps', (['labels'], {}), '(labels)\n', (2941, 2949), False, 'import json\n'), ((2965, 3006), 'azureml.contrib.services.aml_response.AMLResponse', 'AMLResponse', (['respBody', '(200)'], {'json_str': '(True)'}), '(respBody, 200, json_str=True)\n', (2976, 3006), False, 'from azureml.contrib.services.aml_response import AMLResponse\n'), ((3030, 3061), 'azureml.contrib.services.aml_response.AMLResponse', 'AMLResponse', (['"""bad request"""', '(500)'], {}), "('bad request', 500)\n", (3041, 3061), False, 'from azureml.contrib.services.aml_response import AMLResponse\n'), ((3332, 3355), 'azureml.core.Workspace.from_config', 'Workspace.from_config', ([], {}), '()\n', (3353, 3355), False, 'from azureml.core import Workspace\n'), ((3617, 3635), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3627, 3635), False, 'import json\n'), ((2891, 2909), 'json.dumps', 'json.dumps', (['labels'], {}), '(labels)\n', (2901, 2909), False, 'import json\n'), ((3362, 3383), 'azureml.core.model.Model', 'Model', (['ws', 'model_name'], {}), '(ws, model_name)\n', (3367, 3383), False, 'from azureml.core.model import Model\n')] |
import multiprocessing as mp
import numpy as np
#############
# Utilities #
#############
class Seeder:
def __init__(self, seed=0):
self.seed = seed
self._rs = np.random.RandomState(seed=seed)
def __call__(self, size):
seeds = self._rs.randint(2 ** 31 - 1, size=size, dtype=int)
return seeds.tolist()
class Evaluator:
def __init__(self, num_workers=None):
if num_workers is None:
num_workers = mp.cpu_count()
self.num_workers = num_workers
def evaluate(self, solution, seed):
raise NotImplementedError
def __call__(self, solutions, seeds):
results = []
with mp.Pool(self.num_workers) as pool:
for solution, seed in zip(solutions, seeds):
func = self.evaluate
args = (solution, seed)
results.append(pool.apply_async(func, args=args))
fitness = [r.get() for r in results]
return np.array(fitness)
##############
# Optimizers #
##############
class Optimizer:
def __init__(self, theta):
self.theta = theta
self.t = 0
def update(self, grad):
self.t += 1
self.theta += self._step(grad)
return np.array(self.theta)
def _step(self, grad):
raise NotImplementedError
class SGD(Optimizer):
def __init__(self, theta, alpha, beta=0.9):
super().__init__(theta)
self.alpha = alpha
self.beta = beta
self.v = np.zeros_like(theta)
def _step(self, grad):
self.v = self.beta * self.v + (1 - self.beta) * grad
return -self.alpha * self.v
class Adam(Optimizer):
def __init__(self, theta, alpha, beta1=0.9, beta2=0.999):
super().__init__(theta)
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.m = np.zeros_like(theta)
self.v = np.zeros_like(theta)
def _step(self, grad):
self.m = self.beta1 * self.m + (1 - self.beta1) * grad
self.v = self.beta2 * self.v + (1 - self.beta2) * grad ** 2
m_corr = 1 - self.beta1 ** self.t
v_corr = np.sqrt(1 - self.beta2 ** self.t)
alpha = self.alpha * v_corr / m_corr
return -alpha * self.m / (np.sqrt(self.v) + 1e-8)
########################
# Evolution strategies #
########################
class ES:
def __init__(self, optim, sigma):
self.optim = optim
self.mu = np.array(optim.theta)
self.sigma = sigma
self.epsilon = None
def sample(self, popsize):
assert popsize % 2 == 0
eps_split = np.random.randn(popsize // 2, len(self.mu))
self.epsilon = np.concatenate([eps_split, -eps_split], axis=0)
return self.mu + self.sigma * self.epsilon
def update(self, fitness):
rank = np.empty_like(fitness, dtype=np.long)
rank[np.argsort(fitness)] = np.arange(len(fitness))
fitness = rank.astype(fitness.dtype) / (len(fitness) - 1) - 0.5
fitness = (fitness - np.mean(fitness)) / (np.std(fitness) + 1e-8)
grad = 1 / (len(fitness) * self.sigma) * (self.epsilon.T @ fitness)
self.mu = self.optim.update(-grad)
| [
"numpy.mean",
"numpy.sqrt",
"multiprocessing.cpu_count",
"numpy.argsort",
"numpy.array",
"numpy.empty_like",
"multiprocessing.Pool",
"numpy.concatenate",
"numpy.std",
"numpy.zeros_like",
"numpy.random.RandomState"
] | [((172, 204), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (193, 204), True, 'import numpy as np\n'), ((875, 892), 'numpy.array', 'np.array', (['fitness'], {}), '(fitness)\n', (883, 892), True, 'import numpy as np\n'), ((1113, 1133), 'numpy.array', 'np.array', (['self.theta'], {}), '(self.theta)\n', (1121, 1133), True, 'import numpy as np\n'), ((1344, 1364), 'numpy.zeros_like', 'np.zeros_like', (['theta'], {}), '(theta)\n', (1357, 1364), True, 'import numpy as np\n'), ((1674, 1694), 'numpy.zeros_like', 'np.zeros_like', (['theta'], {}), '(theta)\n', (1687, 1694), True, 'import numpy as np\n'), ((1708, 1728), 'numpy.zeros_like', 'np.zeros_like', (['theta'], {}), '(theta)\n', (1721, 1728), True, 'import numpy as np\n'), ((1929, 1962), 'numpy.sqrt', 'np.sqrt', (['(1 - self.beta2 ** self.t)'], {}), '(1 - self.beta2 ** self.t)\n', (1936, 1962), True, 'import numpy as np\n'), ((2218, 2239), 'numpy.array', 'np.array', (['optim.theta'], {}), '(optim.theta)\n', (2226, 2239), True, 'import numpy as np\n'), ((2424, 2471), 'numpy.concatenate', 'np.concatenate', (['[eps_split, -eps_split]'], {'axis': '(0)'}), '([eps_split, -eps_split], axis=0)\n', (2438, 2471), True, 'import numpy as np\n'), ((2560, 2597), 'numpy.empty_like', 'np.empty_like', (['fitness'], {'dtype': 'np.long'}), '(fitness, dtype=np.long)\n', (2573, 2597), True, 'import numpy as np\n'), ((430, 444), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (442, 444), True, 'import multiprocessing as mp\n'), ((616, 641), 'multiprocessing.Pool', 'mp.Pool', (['self.num_workers'], {}), '(self.num_workers)\n', (623, 641), True, 'import multiprocessing as mp\n'), ((2607, 2626), 'numpy.argsort', 'np.argsort', (['fitness'], {}), '(fitness)\n', (2617, 2626), True, 'import numpy as np\n'), ((2034, 2049), 'numpy.sqrt', 'np.sqrt', (['self.v'], {}), '(self.v)\n', (2041, 2049), True, 'import numpy as np\n'), ((2747, 2763), 'numpy.mean', 'np.mean', (['fitness'], {}), '(fitness)\n', (2754, 2763), True, 'import numpy as np\n'), ((2768, 2783), 'numpy.std', 'np.std', (['fitness'], {}), '(fitness)\n', (2774, 2783), True, 'import numpy as np\n')] |
import ta
import numpy as np
import pandas as pd
def get_indicators(stock, indicators):
columns = stock.columns.to_list()
size = len(stock["close"])
for indicator in indicators:
name = indicator["name"]
for param in indicator["params"]:
if name == "SMA":
N = param
if "X_SMA"+str(N) not in columns:
if size > N:
# stock["SMA"+str(N)] = stock["close"].rolling(N).mean()
sma = ta.trend.sma_indicator(stock["close"], n = N, fillna=False).to_numpy()
stock["SMA"+str(N)] = sma
close = stock["close"].to_numpy()
jumps_up = ((sma[N:] < close[N:]) & (close[N-1:-1] < sma[N-1:-1])).astype(int)
jumps_down = ((sma[N:] > close[N:]) & (close[N-1:-1] > sma[N-1:-1])).astype(int)
stock["X_SMA"+str(N)] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["SMA"+str(N)] = [np.nan]*size
stock["X_SMA"+str(N)] = [np.nan]*size
elif name == "EMA":
N = param
if "X_EMA"+str(N) not in columns:
if size > N:
# stock["EMA"+str(N)] = stock["close"].ewm(span=N).mean()
ema = ta.trend.ema_indicator(stock["close"], n = N, fillna = False).to_numpy()
stock["EMA"+str(N)] = ema
close = stock["close"].to_numpy()
jumps_up = ((ema[N:] < close[N:]) & (close[N-1:-1] < ema[N-1:-1])).astype(int)
jumps_down = ((ema[N:] > close[N:]) & (close[N-1:-1] > ema[N-1:-1])).astype(int)
stock["X_EMA"+str(N)] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["EMA"+str(N)] = [np.nan]*size
stock["X_EMA"+str(N)] = [np.nan]*size
elif name == "SEN":
N = param
if "X_SEN"+str(N) not in columns:
if size > N:
sen = ta.trend.ichimoku_base_line(stock["high"], stock["low"], n1 = N, n2 = N,
visual=False, fillna=False).to_numpy()
stock["SEN"+str(N)] = sen
close = stock["close"].to_numpy()
jumps_up = ((sen[N:] < close[N:]) & (close[N-1:-1] < sen[N-1:-1])).astype(int)
jumps_down = ((sen[N:] > close[N:]) & (close[N-1:-1] > sen[N-1:-1])).astype(int)
stock["X_SEN"+str(N)] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["SEN"+str(N)] = [np.nan]*size
stock["X_SEN"+str(N)] = [np.nan]*size
elif name == "STD":
N = param
if "STD"+str(N) not in columns:
if size > N:
stock["STD"+str(N)] = stock["close"].rolling(N).std()
else:
stock["STD"+str(N)] = [np.nan]*size
elif name == "RSI":
N_list, crosses = param
for N in N_list:
if f"RSI_{N}" not in columns: # f"X_RSI_{N}_{crosses[-1]}"
if size > N:
rsi = ta.momentum.rsi(stock["close"], n = N).to_numpy()
stock["RSI_"+str(N)] = rsi
for cross in crosses:
jumps_up = ((rsi[N:] > cross) & (rsi[N-1:-1] <= cross)).astype(int)
jumps_down = ((rsi[N:] < cross) & (rsi[N-1:-1] >= cross)).astype(int)
stock[f"X_RSI_{N}_{cross}"] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["RSI_"+str(N)] = [np.nan]*size
for cross in crosses:
stock[f"X_RSI_{N}_{cross}"] = [np.nan]*size
elif name == "MACD":
N1, N2, N3 = param
if size > max(N1, N2) + N3:
if "MACD"+str(N1)+"_"+str(N2)+"_"+str(N3) not in columns:
ema1 = stock["close"].ewm(span=N1).mean().to_numpy()
ema2 = stock["close"].ewm(span=N2).mean().to_numpy()
macd = ema2 - ema1
ma = pd.Series(macd).rolling(N3).mean().to_numpy()
mask1 = (macd[N3:] > ma[N3:])
mask2 = (macd[N3-1:-1] < ma[N3-1:-1])
signal = np.zeros(len(stock)-N3)
signal[(mask1 & mask2)] = 1
signal[(~mask1 & ~mask2)] = -1
stock["MACD"+str(N1)+"_"+str(N2)+"_"+str(N3)] = np.append(np.zeros(N3), signal).astype(int)
else:
stock["MACD"+str(N1)+"_"+str(N2)+"_"+str(N3)] = [np.nan]*size
elif name == "STOC":
N_list, crosses = param
for N in N_list:
if "STOC"+str(N) not in columns:
if size > N:
H = stock["high"].rolling(N).max()[N-1:]
L = stock["low"].rolling(N).min()[N-1:]
stoc = (100 * (stock["close"] - L)/(H - L)).to_numpy()
stock["STOC"+str(N)] = stoc
for cross in crosses:
jumps_up = ((stoc[N:] > cross) & (stoc[N-1:-1] <= cross)).astype(int)
jumps_down = ((stoc[N:] < cross) & (stoc[N-1:-1] >= cross)).astype(int)
stock[f"X_STOC{N}_{cross}"] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["STOC"+str(N)] = [np.nan]*size
for cross in crosses:
stock[f"X_STOC{N}_{cross}"] = [np.nan]*size
elif name == "SAR":
alpha, maximum = param
tag = str(int(1e4*alpha))+"_"+str(int(1e4*maximum))
if "PSAR"+tag not in columns:
indicator = ta.trend.PSARIndicator(stock["high"], stock["low"], stock["close"],
step = alpha, max_step = maximum, fillna = False)
stock["PSAR"+tag] = indicator.psar().to_numpy()
stock["BIN_PSAR"+tag] = indicator.psar_up_indicator().to_numpy() - indicator.psar_down_indicator().to_numpy()
elif name == "ATR":
N = param
if "ATR"+str(N) not in columns:
if size > N:
stock["ATR"+str(N)] = ta.volatility.average_true_range(stock["high"], stock["low"], stock["close"],
n = N, fillna = False).to_numpy()
else:
stock["ATR"+str(N)] = [np.nan]*size
elif name == "ADX":
N = param
if "ADX"+str(N) not in columns:
if size >= 2*N:
stock["ADX"+str(N)] = ta.trend.adx(stock["high"], stock["low"], stock["close"],
n = N, fillna = False).to_numpy()
else:
stock["ADX"+str(N)] = [np.nan]*size
elif name == "CCI":
C = 0.015
N_list, crosses = param
for N in N_list:
if "CCI"+str(N) not in columns:
if size > N:
cci = ta.trend.cci(stock["high"], stock["low"], stock["close"], n = N, c = C,
fillna=False).to_numpy()
stock["CCI"+str(N)] = cci
for cross in crosses:
jumps_up = ((cci[N:] > cross) & (cci[N-1:-1] <= cross)).astype(int)
jumps_down = ((cci[N:] < cross) & (cci[N-1:-1] >= cross)).astype(int)
if round(cross) == 0:
cross = "0"
stock[f"X_CCI{N}_{cross}".replace("-", "n")] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["CCI"+str(N)] = [np.nan]*size
for cross in crosses:
if round(cross) == 0:
cross = "0"
stock[f"X_CCI{N}_{cross}".replace("-", "n")] = [np.nan]*size
elif name == "MFI":
N_list, crosses = param
for N in N_list:
if "MFI"+str(N) not in columns: # f"X_MFI{N}_{crosses[-1]}"
if size > N:
mfi = ta.volume.money_flow_index(stock["high"], stock["low"], stock["close"],
stock["voltot"], n = N, fillna=False).to_numpy()
stock["MFI"+str(N)] = mfi
for cross in crosses:
jumps_up = ((mfi[N:] > cross) & (mfi[N-1:-1] <= cross)).astype(int)
jumps_down = ((mfi[N:] < cross) & (mfi[N-1:-1] >= cross)).astype(int)
stock[f"X_MFI{N}_{cross}"] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["MFI"+str(N)] = [np.nan]*size
for cross in crosses:
stock[f"X_MFI{N}_{cross}"] = [np.nan]*size
elif name == "CMF":
N_list, crosses = param
for N in N_list:
if "CMF"+str(N) not in columns:
if size > N:
cmf = ta.volume.chaikin_money_flow(stock["high"], stock["low"], stock["close"],
stock["voltot"], n = N, fillna=False).to_numpy()
stock["CMF"+str(N)] = cmf
for cross in crosses:
jumps_up = ((cmf[N:] > cross) & (cmf[N-1:-1] <= cross)).astype(int)
jumps_down = ((cmf[N:] < cross) & (cmf[N-1:-1] >= cross)).astype(int)
cross = round(100*cross)/100
if round(100*cross) == 0:
cross = "00"
stock[f"X_CMF{N}_{cross}".replace(".", "").replace("-", "n")] = np.append([0]*N, jumps_up-jumps_down)
else:
stock["CMF"+str(N)] = [np.nan]*size
for cross in crosses:
stock[f"X_CMF{N}_{cross}".replace(".", "").replace("-", "n")] = [np.nan]*size
elif name == "ADI":
N = param
if "ADI" not in columns:
stock["ADI"] = ta.volume.acc_dist_index(stock["high"], stock["low"], stock["close"],
stock["voltot"], fillna=False).to_numpy()
elif name == "OBV":
N = param
if "OBV"+str(N) not in columns:
if size > N:
obv = ta.volume.on_balance_volume(stock["close"], stock["voltot"], fillna=False)
mean = ta.trend.sma_indicator(obv, n = N, fillna=False).to_numpy()
obv = obv.to_numpy()
jump_up = (obv[N:] > mean[N:]) & (obv[N-1:-1] <= mean[N-1:-1])
jump_down = (obv[N:] < mean[N:]) & (obv[N-1:-1] >= mean[N-1:-1])
stock["OBV"+str(N)] = np.append([0]*N, jump_up.astype(int) - jump_down.astype(int))
else:
stock["OBV"+str(N)] = [np.nan]*size
elif name == "AROON":
for N in param:
if "BIN_AROON"+str(N) not in columns:
if size > N:
aroon = ta.trend.AroonIndicator(stock["close"], n = N, fillna = False)
up, down = aroon.aroon_up().to_numpy(), aroon.aroon_down().to_numpy()
stock["UP_AROON"+str(N)] = up
stock["DOWN_AROON"+str(N)] = down
jump_up = (up[N:] > down[N:]) & (up[N-1:-1] <= down[N-1:-1])
jump_down = (up[N:] < down[N:]) & (up[N-1:-1] >= down[N-1:-1])
stock["BIN_AROON"+str(N)] = np.append([0]*N, jump_up.astype(int) - jump_down.astype(int))
else:
stock["UP_AROON"+str(N)] = [np.nan]*size
stock["DOWN_AROON"+str(N)] = [np.nan]*size
stock["BIN_AROON"+str(N)] = [np.nan]*size
elif name == "AO":
N1, N2 = param
if f"AO{N1}_{N2}" not in columns:
if size > max(N1, N2):
ao = ta.momentum.ao(stock["high"], stock["low"], s=N1, len=N2, fillna=False).to_numpy()
stock[f"AO{N1}_{N2}"] = ao
jump_up = (ao[N2:] > 0) & (ao[N2-1:-1] <= 0)
jump_down = (ao[N2:] < 0) & (ao[N2-1:-1] >= 0)
stock[f"BIN_AO{N1}_{N2}"] = np.append([0]*N2, jump_up.astype(int) - jump_down.astype(int))
else:
stock[f"AO{N1}_{N2}"] = [np.nan]*size
stock[f"BIN_AO{N1}_{N2}"] = [np.nan]*size
elif name == "UO":
N_list, crosses = param
for N in N_list:
if "UO"+str(N) not in columns: # f"X_UO{N}_{crosses[-1]}"
if size > 4*N + 1:
uo = ta.momentum.uo(stock["high"], stock["low"], stock["close"], s=N, m=2*N,
len=4*N, ws=4.0, wm=2.0, wl=1.0, fillna=False).to_numpy()
stock["UO"+str(N)] = uo
for cross in crosses:
jumps_up = ((uo[4*N+1:] > cross) & (uo[4*N:-1] <= cross)).astype(int)
jumps_down = ((uo[4*N+1:] < cross) & (uo[4*N:-1] >= cross)).astype(int)
stock[f"X_UO{N}_{cross}"] = np.append([0]*(4*N+1), jumps_up-jumps_down)
else:
stock["UO"+str(N)] = [np.nan]*size
for cross in crosses:
stock[f"X_UO{N}_{cross}"] = [np.nan]*size
elif name == "MEAN_CROSSOVER":
for mean_type in ["SMA", "EMA", "SEN"]:
for N1 in param:
mean1 = stock[mean_type+str(N1)].to_numpy()
for N2 in param:
if N2 > N1:
tag = f"X_{mean_type+str(N1)}_{N2}"
if size > N2:
if tag not in columns:
mean2 = stock[mean_type+str(N2)].to_numpy()
jumps_up = ((mean1[N2:] > mean2[N2:]) & (mean1[N2-1:-1] <= mean2[N2-1:-1])).astype(int)
jumps_down = ((mean1[N2:] < mean2[N2:]) & (mean1[N2-1:-1] >= mean2[N2-1:-1])).astype(int)
stock[tag] = np.append([0]*N2, jumps_up-jumps_down)
else:
stock[tag] = [np.nan]*size
elif name == "DEV_CROSSOVER":
for N in param:
if f"X_ATR{N}_STD{N}" not in columns:
if size > N + 2:
atr = stock["ATR"+str(N)].to_numpy()
std = stock["STD"+str(N)].to_numpy()
close = stock["close"].to_numpy()
std_jumps = ((std[N:] > atr[N:]) & (std[N-1:-1] <= atr[N-1:-1]))
slope = (close[N:] > close[N-3:-3])
signal = (std_jumps & slope).astype(int) - (std_jumps & ~slope).astype(int)
stock[f"X_ATR{N}_STD{N}"] = np.append([0]*N, signal)
else:
stock[f"X_ATR{N}_STD{N}"] = [np.nan]*size
elif name == "MARTELO":
tipo, N1, N2 = param
tag = f"MARTELO{tipo}_{str(N1)}_{str(N2)}"
if tag not in columns:
N = N1+5 if N1 > N2 else N2+5
if size >= max(N+1, 2*N2):
std = stock["STD"+str(N1)]
low, high = stock["low"].copy().to_numpy(), stock["high"].copy().to_numpy()
opene, close = stock["open"].copy().to_numpy(), stock["close"].copy().to_numpy()
body = abs(close - opene)
u_tail = np.minimum(close, opene) - low
d_tail = high - np.maximum(close, opene)
if tipo == 1:
adx = stock["ADX"+str(N2)]
stoc = stock["STOC"+str(N2)]
u_mask = (u_tail[N:] > std[N:]) & (u_tail[N:] > body[N:]) & (adx[N-1:-1] > 30) & (stoc[N:] < 50) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > std[N:]) & (d_tail[N:] > body[N:]) & (adx[N-1:-1] > 30) & (stoc[N:] > 50) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 2:
adx = stock["ADX"+str(N2)]
u_mask = (u_tail[N:] > std[N:]) & (u_tail[N:] > body[N:]) & (adx[N-1:-1] > 30) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > std[N:]) & (d_tail[N:] > body[N:]) & (adx[N-1:-1] > 30) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 3:
stoc = stock["STOC"+str(N2)]
u_mask = (u_tail[N:] > std[N:]) & (u_tail[N:] > body[N:]) & (stoc[N:] < 50) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > std[N:]) & (d_tail[N:] > body[N:]) & (stoc[N:] > 50) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 4:
u_mask = (u_tail[N:] > std[N:]) & (u_tail[N:] > body[N:]) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > std[N:]) & (d_tail[N:] > body[N:]) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 5:
u_mask = (u_tail[N:] > std[N:]) & (u_tail[N:] > body[N:]) & (body[N:] > d_tail[N:]) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > std[N:]) & (d_tail[N:] > body[N:]) & (body[N:] > u_tail[N:]) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 6:
u_mask = (u_tail[N:] > std[N:]) & (std[N:] > d_tail[N:]) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > std[N:]) & (std[N:] > u_tail[N:]) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 7:
u_mask = (u_tail[N:] > std[N:]) & (std[N:] > d_tail[N:])
d_mask = (d_tail[N:] > std[N:]) & (std[N:] > u_tail[N:])
elif tipo == 8:
u_mask = (u_tail[N:] > body[N:]) & (low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > body[N:]) & (high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 9:
u_mask = (u_tail[N:] > 2*body[N:]) & (u_tail[N:] > 2*d_tail[N:]) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > 2*body[N:]) & (d_tail[N:] > 2*u_tail[N:]) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 10:
u_mask = (u_tail[N:] > std[N:]) & (u_tail[N:] > 2*body[N:]) & (u_tail[N:] > 2*d_tail[N:]) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > std[N:]) & (d_tail[N:] > 2*body[N:]) & (d_tail[N:] > 2*u_tail[N:]) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
elif tipo == 11:
atr = stock["ATR"+str(N1)]
u_mask = (u_tail[N:] > 0.5*atr[N:]) & (u_tail[N:] > 2*body[N:]) & (u_tail[N:] > 2*d_tail[N:]) & (
low[N:] < pd.Series(low).rolling(5).min()[N-1:-1])
d_mask = (d_tail[N:] > 0.5*atr[N:]) & (d_tail[N:] > 2*body[N:]) & (d_tail[N:] > 2*u_tail[N:]) & (
high[N:] > pd.Series(high).rolling(5).max()[N-1:-1])
else:
raise Exception(f"MARTELO type {tipo} not found!")
u_martelo = (u_mask).astype(int)
d_martelo = (d_mask).astype(int)
num = len(stock) - N
stock[tag] = np.append([0]*N, (u_martelo-d_martelo)[:num])
else:
stock[tag] = [np.nan]*size
elif name == "ROSS":
tipo, width, tall = param
num_tag = 3 if tipo == 0 else tipo
tag = f"ROSS{tipo}_{width}_{tall}".replace(".", "")
if tag not in columns:
if size > 5*width:
close, high, low = stock["close"], stock["high"], stock["low"]
atr = ta.volatility.average_true_range(high, low, close, n = width*5,
fillna = False).to_numpy()
tol = int(width/2)
dy = tall*atr[5*width:]
BR = close.to_numpy()[5*width:]
RT_L = low.rolling(tol).min().to_numpy()[4*width:-1*width]
RH_L = high.rolling(tol).max().to_numpy()[3*width:-2*width]
P3_L = low.rolling(tol).min().to_numpy()[2*width:-3*width]
P2_L = high.rolling(tol).max().to_numpy()[1*width:-4*width]
P1_L = low.rolling(tol).min().to_numpy()[:-5*width]
# LONG: highest close of the period at P2, RH and BR
mask_L = (BR > close.rolling(5*width).max().to_numpy()[5*width -1:-1])
mask_L = mask_L & (RH_L >= close.rolling(3*width).max().to_numpy()[3*width -1:-2*width -1])
mask_L = mask_L & (P2_L >= close.rolling(1*width).max().to_numpy()[1*width -1:-4*width -1])
RT_S = high.rolling(tol).max().to_numpy()[4*width:-1*width]
RH_S = low.rolling(tol).min().to_numpy()[3*width:-2*width]
P3_S = high.rolling(tol).max().to_numpy()[2*width:-3*width]
P2_S = low.rolling(tol).min().to_numpy()[1*width:-4*width]
P1_S = high.rolling(tol).max().to_numpy()[:-5*width]
# SHORT: lowest close of the period at P2, RH and BR
mask_S = (BR < close.rolling(5*width).min().to_numpy()[5*width -1:-1])
mask_S = mask_S & (RH_S <= close.rolling(3*width).min().to_numpy()[3*width -1:-2*width -1])
mask_S = mask_S & (P2_S <= close.rolling(1*width).min().to_numpy()[1*width -1:-4*width -1])
if tipo in [0, 3]:
tag = f"ROSS3_{width}_{tall}".replace(".", "")
# LONG
# BR above RT
mask_L = mask_L & (BR > RT_L + dy)
# RH above P2 and much above P3
mask_L = mask_L & (RH_L > P3_L + 2*dy)
# SHORT
# BR below RT
mask_S = mask_S & (BR < RT_S - dy)
# RH below P2 and much below P3
mask_S = mask_S & (RH_S < P3_S - 2*dy)
signal = mask_L.astype(int) - mask_S.astype(int)
stock[tag] = np.append([0]*(5*width), signal)
if tipo in [0, 2]:
tag = f"ROSS2_{width}_{tall}".replace(".", "")
# LONG
# BR above RT
mask_L = mask_L & (BR > RT_L + dy)
# RH above P2 and much above P3
mask_L = mask_L & (RH_L > P3_L + 2*dy)
mask_L = mask_L & (RH_L > P2_L + dy)
# SHORT
# BR below RT
mask_S = mask_S & (BR < RT_S - dy)
# RH below P2 and much below P3
mask_S = mask_S & (RH_S < P3_S - 2*dy)
mask_S = mask_S & (RH_S < P2_S - dy)
signal = mask_L.astype(int) - mask_S.astype(int)
stock[tag] = np.append([0]*(5*width), signal)
if tipo in [0, 1]:
tag = f"ROSS1_{width}_{tall}".replace(".", "")
# LONG
# BR above RT
mask_L = mask_L & (BR > RT_L + dy)
# RH above P2 and much above P3
mask_L = mask_L & (RH_L > P3_L + 2*dy)
mask_L = mask_L & (RH_L > P2_L + dy)
# P3 above P1
mask_L = mask_L & (P3_L > P1_L + dy)
# P2 much above P1
mask_L = mask_L & (P2_L > P1_L + 2*dy)
# SHORT
# BR below RT
mask_S = mask_S & (BR < RT_S - dy)
# RH below P2 and much below P3
mask_S = mask_S & (RH_S < P3_S - 2*dy)
mask_S = mask_S & (RH_S < P2_S - dy)
# P3 below P1
mask_S = mask_S & (P3_S < P1_S - dy)
# P2 much below P1
mask_S = mask_S & (P2_S < P1_S - 2*dy)
signal = mask_L.astype(int) - mask_S.astype(int)
stock[tag] = np.append([0]*(5*width), signal)
else:
stock[f"ROSS1_{width}_{tall}".replace(".", "")] = [np.nan]*size
stock[f"ROSS2_{width}_{tall}".replace(".", "")] = [np.nan]*size
stock[f"ROSS3_{width}_{tall}".replace(".", "")] = [np.nan]*size
elif name == "3BP":
tipo, mult1, mult2 = param
tag = f"3BP{tipo}_{mult1}_{mult2}".replace(".", "")
if tag not in columns:
if size > 13:
N = 13
atr, change = stock["ATR10"].to_numpy(), stock["close"].to_numpy() - stock["open"].to_numpy()
mask_L = (change[N:] > mult1*atr[N:])
mask_S = (-change[N:] > mult1*atr[N:])
if tipo == 1:
# LONG CASE 1
mask_L1 = (change[N-1:-1] < 0) & (abs(change[N-1:-1]) < mult2*atr[N-1:-1])
mask_L1 = mask_L1 & (change[N-2:-2] > mult1*atr[N-2:-2])
# LONG CASE 2
mask_L2 = (change[N-1:-1] > 0) & (change[N-1:-1] < mult2*atr[N-1:-1])
mask_L2 = mask_L2 & (change[N-2:-2] < 0) & (abs(change[N-2:-2]) < mult2*atr[N-2:-2])
mask_L2 = mask_L2 & (change[N-3:-3] > mult1*atr[N-3:-3])
# SHORT CASE 1
mask_S1 = (change[N-1:-1] > 0) & (change[N-1:-1] < mult2*atr[N-1:-1])
mask_S1 = mask_S1 & (change[N-2:-2] < 0) & (abs(change[N-2:-2]) > mult1*atr[N-2:-2])
# SHORT CASE 2
mask_S2 = (change[N-1:-1] < 0) & (abs(change[N-1:-1]) < mult2*atr[N-1:-1])
mask_S2 = mask_S2 & (change[N-2:-2] > 0) & (change[N-2:-2] < mult2*atr[N-2:-2])
mask_S2 = mask_S2 & (change[N-3:-3] < 0) & (abs(change[N-3:-3]) > mult1*atr[N-3:-3])
elif tipo == 2:
ATR = atr[N-3:-3]
# LONG CASE 1
mask_L1 = (change[N-1:-1] < 0) & (abs(change[N-1:-1]) < mult2*ATR)
mask_L1 = mask_L1 & (change[N-2:-2] > mult1*ATR)
mask_L1 = mask_L1 & (change[N-2:-2] > change[N:]) # new
# LONG CASE 2
mask_L2 = (change[N-1:-1] > 0) & (change[N-1:-1] < mult2*ATR)
mask_L2 = mask_L2 & (change[N-2:-2] < 0) & (abs(change[N-2:-2]) < mult2*ATR)
mask_L2 = mask_L2 & (change[N-3:-3] > mult1*ATR)
mask_L2 = mask_L2 & (change[N-3:-3] > change[N:]) # new
# SHORT CASE 1
mask_S1 = (change[N-1:-1] > 0) & (change[N-1:-1] < mult2*ATR)
mask_S1 = mask_S1 & (change[N-2:-2] < 0) & (abs(change[N-2:-2]) > mult1*ATR)
mask_S1 = mask_S1 & (abs(change[N-2:-2]) > abs(change[N:])) # new
# SHORT CASE 2
mask_S2 = (change[N-1:-1] < 0) & (abs(change[N-1:-1]) < mult2*ATR)
mask_S2 = mask_S2 & (change[N-2:-2] > 0) & (change[N-2:-2] < mult2*ATR)
mask_S2 = mask_S2 & (change[N-3:-3] < 0) & (abs(change[N-3:-3]) > mult1*ATR)
mask_S2 = mask_S2 & (abs(change[N-3:-3]) > abs(change[N:])) # new
mask_L = mask_L & (mask_L1 | mask_L2)
mask_S = mask_S & (mask_S1 | mask_S2)
signal = mask_L.astype(int) - mask_S.astype(int)
stock[tag] = np.append([0]*N, signal)
else:
stock[tag] = [np.nan]*size
else:
raise Exception(f"Indicator {name} not found!")
return stock | [
"ta.volume.on_balance_volume",
"ta.volume.acc_dist_index",
"ta.volatility.average_true_range",
"ta.momentum.ao",
"numpy.maximum",
"ta.trend.adx",
"ta.momentum.rsi",
"ta.trend.sma_indicator",
"ta.trend.cci",
"ta.volume.chaikin_money_flow",
"ta.trend.ichimoku_base_line",
"ta.trend.AroonIndicator... | [((973, 1014), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (982, 1014), True, 'import numpy as np\n'), ((1865, 1906), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (1874, 1906), True, 'import numpy as np\n'), ((533, 590), 'ta.trend.sma_indicator', 'ta.trend.sma_indicator', (["stock['close']"], {'n': 'N', 'fillna': '(False)'}), "(stock['close'], n=N, fillna=False)\n", (555, 590), False, 'import ta\n'), ((2776, 2817), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (2785, 2817), True, 'import numpy as np\n'), ((1423, 1480), 'ta.trend.ema_indicator', 'ta.trend.ema_indicator', (["stock['close']"], {'n': 'N', 'fillna': '(False)'}), "(stock['close'], n=N, fillna=False)\n", (1445, 1480), False, 'import ta\n'), ((2234, 2335), 'ta.trend.ichimoku_base_line', 'ta.trend.ichimoku_base_line', (["stock['high']", "stock['low']"], {'n1': 'N', 'n2': 'N', 'visual': '(False)', 'fillna': '(False)'}), "(stock['high'], stock['low'], n1=N, n2=N, visual\n =False, fillna=False)\n", (2261, 2335), False, 'import ta\n'), ((3964, 4005), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (3973, 4005), True, 'import numpy as np\n'), ((6507, 6623), 'ta.trend.PSARIndicator', 'ta.trend.PSARIndicator', (["stock['high']", "stock['low']", "stock['close']"], {'step': 'alpha', 'max_step': 'maximum', 'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'], step=\n alpha, max_step=maximum, fillna=False)\n", (6529, 6623), False, 'import ta\n'), ((3540, 3576), 'ta.momentum.rsi', 'ta.momentum.rsi', (["stock['close']"], {'n': 'N'}), "(stock['close'], n=N)\n", (3555, 3576), False, 'import ta\n'), ((5067, 5079), 'numpy.zeros', 'np.zeros', (['N3'], {}), '(N3)\n', (5075, 5079), True, 'import numpy as np\n'), ((6020, 6061), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (6029, 6061), True, 'import numpy as np\n'), ((7074, 7175), 'ta.volatility.average_true_range', 'ta.volatility.average_true_range', (["stock['high']", "stock['low']", "stock['close']"], {'n': 'N', 'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'\n ], n=N, fillna=False)\n", (7106, 7175), False, 'import ta\n'), ((4653, 4668), 'pandas.Series', 'pd.Series', (['macd'], {}), '(macd)\n', (4662, 4668), True, 'import pandas as pd\n'), ((7550, 7626), 'ta.trend.adx', 'ta.trend.adx', (["stock['high']", "stock['low']", "stock['close']"], {'n': 'N', 'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'], n=N, fillna=False)\n", (7562, 7626), False, 'import ta\n'), ((8694, 8735), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (8703, 8735), True, 'import numpy as np\n'), ((8054, 8140), 'ta.trend.cci', 'ta.trend.cci', (["stock['high']", "stock['low']", "stock['close']"], {'n': 'N', 'c': 'C', 'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'], n=N, c=C, fillna=\n False)\n", (8066, 8140), False, 'import ta\n'), ((9899, 9940), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (9908, 9940), True, 'import numpy as np\n'), ((9341, 9452), 'ta.volume.money_flow_index', 'ta.volume.money_flow_index', (["stock['high']", "stock['low']", "stock['close']", "stock['voltot']"], {'n': 'N', 'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'],\n stock['voltot'], n=N, fillna=False)\n", (9367, 9452), False, 'import ta\n'), ((11163, 11204), 'numpy.append', 'np.append', (['([0] * N)', '(jumps_up - jumps_down)'], {}), '([0] * N, jumps_up - jumps_down)\n', (11172, 11204), True, 'import numpy as np\n'), ((11599, 11704), 'ta.volume.acc_dist_index', 'ta.volume.acc_dist_index', (["stock['high']", "stock['low']", "stock['close']", "stock['voltot']"], {'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'], stock\n ['voltot'], fillna=False)\n", (11623, 11704), False, 'import ta\n'), ((11952, 12026), 'ta.volume.on_balance_volume', 'ta.volume.on_balance_volume', (["stock['close']", "stock['voltot']"], {'fillna': '(False)'}), "(stock['close'], stock['voltot'], fillna=False)\n", (11979, 12026), False, 'import ta\n'), ((10397, 10510), 'ta.volume.chaikin_money_flow', 'ta.volume.chaikin_money_flow', (["stock['high']", "stock['low']", "stock['close']", "stock['voltot']"], {'n': 'N', 'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'],\n stock['voltot'], n=N, fillna=False)\n", (10425, 10510), False, 'import ta\n'), ((12059, 12105), 'ta.trend.sma_indicator', 'ta.trend.sma_indicator', (['obv'], {'n': 'N', 'fillna': '(False)'}), '(obv, n=N, fillna=False)\n', (12081, 12105), False, 'import ta\n'), ((12744, 12802), 'ta.trend.AroonIndicator', 'ta.trend.AroonIndicator', (["stock['close']"], {'n': 'N', 'fillna': '(False)'}), "(stock['close'], n=N, fillna=False)\n", (12767, 12802), False, 'import ta\n'), ((13764, 13835), 'ta.momentum.ao', 'ta.momentum.ao', (["stock['high']", "stock['low']"], {'s': 'N1', 'len': 'N2', 'fillna': '(False)'}), "(stock['high'], stock['low'], s=N1, len=N2, fillna=False)\n", (13778, 13835), False, 'import ta\n'), ((15128, 15179), 'numpy.append', 'np.append', (['([0] * (4 * N + 1))', '(jumps_up - jumps_down)'], {}), '([0] * (4 * N + 1), jumps_up - jumps_down)\n', (15137, 15179), True, 'import numpy as np\n'), ((14580, 14706), 'ta.momentum.uo', 'ta.momentum.uo', (["stock['high']", "stock['low']", "stock['close']"], {'s': 'N', 'm': '(2 * N)', 'len': '(4 * N)', 'ws': '(4.0)', 'wm': '(2.0)', 'wl': '(1.0)', 'fillna': '(False)'}), "(stock['high'], stock['low'], stock['close'], s=N, m=2 * N,\n len=4 * N, ws=4.0, wm=2.0, wl=1.0, fillna=False)\n", (14594, 14706), False, 'import ta\n'), ((17100, 17126), 'numpy.append', 'np.append', (['([0] * N)', 'signal'], {}), '([0] * N, signal)\n', (17109, 17126), True, 'import numpy as np\n'), ((22954, 23003), 'numpy.append', 'np.append', (['([0] * N)', '(u_martelo - d_martelo)[:num]'], {}), '([0] * N, (u_martelo - d_martelo)[:num])\n', (22963, 23003), True, 'import numpy as np\n'), ((17849, 17873), 'numpy.minimum', 'np.minimum', (['close', 'opene'], {}), '(close, opene)\n', (17859, 17873), True, 'import numpy as np\n'), ((17921, 17945), 'numpy.maximum', 'np.maximum', (['close', 'opene'], {}), '(close, opene)\n', (17931, 17945), True, 'import numpy as np\n'), ((16261, 16303), 'numpy.append', 'np.append', (['([0] * N2)', '(jumps_up - jumps_down)'], {}), '([0] * N2, jumps_up - jumps_down)\n', (16270, 16303), True, 'import numpy as np\n'), ((26234, 26270), 'numpy.append', 'np.append', (['([0] * (5 * width))', 'signal'], {}), '([0] * (5 * width), signal)\n', (26243, 26270), True, 'import numpy as np\n'), ((27190, 27226), 'numpy.append', 'np.append', (['([0] * (5 * width))', 'signal'], {}), '([0] * (5 * width), signal)\n', (27199, 27226), True, 'import numpy as np\n'), ((28596, 28632), 'numpy.append', 'np.append', (['([0] * (5 * width))', 'signal'], {}), '([0] * (5 * width), signal)\n', (28605, 28632), True, 'import numpy as np\n'), ((32487, 32513), 'numpy.append', 'np.append', (['([0] * N)', 'signal'], {}), '([0] * N, signal)\n', (32496, 32513), True, 'import numpy as np\n'), ((23480, 23557), 'ta.volatility.average_true_range', 'ta.volatility.average_true_range', (['high', 'low', 'close'], {'n': '(width * 5)', 'fillna': '(False)'}), '(high, low, close, n=width * 5, fillna=False)\n', (23512, 23557), False, 'import ta\n'), ((18276, 18290), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (18285, 18290), True, 'import pandas as pd\n'), ((18491, 18506), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (18500, 18506), True, 'import pandas as pd\n'), ((18789, 18803), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (18798, 18803), True, 'import pandas as pd\n'), ((18986, 19001), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (18995, 19001), True, 'import pandas as pd\n'), ((19283, 19297), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (19292, 19297), True, 'import pandas as pd\n'), ((19477, 19492), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (19486, 19492), True, 'import pandas as pd\n'), ((19696, 19710), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (19705, 19710), True, 'import pandas as pd\n'), ((19872, 19887), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (19881, 19887), True, 'import pandas as pd\n'), ((20117, 20131), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (20126, 20131), True, 'import pandas as pd\n'), ((20319, 20334), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (20328, 20334), True, 'import pandas as pd\n'), ((20537, 20551), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (20546, 20551), True, 'import pandas as pd\n'), ((20712, 20727), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (20721, 20727), True, 'import pandas as pd\n'), ((21087, 21101), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (21096, 21101), True, 'import pandas as pd\n'), ((21204, 21219), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (21213, 21219), True, 'import pandas as pd\n'), ((21430, 21444), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (21439, 21444), True, 'import pandas as pd\n'), ((21613, 21628), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (21622, 21628), True, 'import pandas as pd\n'), ((21865, 21879), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (21874, 21879), True, 'import pandas as pd\n'), ((22073, 22088), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (22082, 22088), True, 'import pandas as pd\n'), ((22385, 22399), 'pandas.Series', 'pd.Series', (['low'], {}), '(low)\n', (22394, 22399), True, 'import pandas as pd\n'), ((22597, 22612), 'pandas.Series', 'pd.Series', (['high'], {}), '(high)\n', (22606, 22612), True, 'import pandas as pd\n')] |
import tensorflow as tf
import numpy as np
import math
"""
Exercise 1.1: Diagonal Gaussian Likelihood
Write a function which takes in Tensorflow symbols for the means and
log stds of a batch of diagonal Gaussian distributions, along with a
Tensorflow placeholder for (previously-generated) samples from those
distributions, and returns a Tensorflow symbol for computing the log
likelihoods of those samples.
"""
def gaussian_likelihood(x, mu, log_std):
"""
Args:
x: Tensor with shape [batch, dim]
mu: Tensor with shape [batch, dim]
log_std: Tensor with shape [batch, dim] or [dim]
Returns:
Tensor with shape [batch]
"""
#######################
# #
# YOUR CODE HERE #
# #
#######################
#My new answer
presum = -0.5 * (((x - mu)/(tf.exp(log_std)))**2 + 2*log_std + np.log(2*np.pi))
return tf.reduce_sum(presum, axis = 1)
#My old answer
'''
# 1. Take in values x, mu, and log_std
batch_size = 32
dim = 10
# Calculate std
std = tf.math.exp(log_std)
# Set up tf placeholders
ans = tf.placeholder(tf.float32, shape=[dim,])
x2 = tf.placeholder(tf.float32, shape=[dim,])
mu2 = tf.placeholder(tf.float32, shape=[dim,])
std2 = tf.placeholder(tf.float32, shape=[dim,])
log_std2 = tf.placeholder(tf.float32, shape=[dim,])
output = tf.placeholder(tf.float32, shape=[dim,])
# Set up array for output
output_np = []
# 2. Calculate log_likelihood according to spinning up formula
for i in range(batch_size):
# intialize variables
x2 = x[i, :]
mu2 = mu[i, :]
std2 = std[:]
log_std2 = log_std[:]
# calculate sum portion of likelihood
A1 = x2 - mu2
A = tf.pow(A1, 2)
#test = (x[i, :] - mu[i, :])**2
#test = tf.keras.backend.print_tensor(test, message = 'test is')
#A1 = tf.keras.backend.print_tensor(A1, message = 'A1 is')
#A = tf.keras.backend.print_tensor(A, message = 'A is')
B1 = tf.pow(std2, 2)
B = tf.divide(A, B1)
#B = tf.keras.backend.print_tensor(B, message = 'B is')
C = B + 2*log_std2
D = tf.reduce_sum(C)
E = -0.5*(D + dim * tf.math.log(2*math.pi))
output_np.append(E)
output = output_np
return
'''
if __name__ == '__main__':
"""
Run this file to verify your solution.
"""
from spinup.exercises.problem_set_1_solutions import exercise1_1_soln
from spinup.exercises.common import print_result
sess = tf.Session()
dim = 10
x = tf.placeholder(tf.float32, shape=(None, dim))
mu = tf.placeholder(tf.float32, shape=(None, dim))
log_std = tf.placeholder(tf.float32, shape=(dim,))
your_gaussian_likelihood = gaussian_likelihood(x, mu, log_std)
true_gaussian_likelihood = exercise1_1_soln.gaussian_likelihood(x, mu, log_std)
batch_size = 32
feed_dict = {x: np.random.rand(batch_size, dim),
mu: np.random.rand(batch_size, dim),
log_std: np.random.rand(dim)}
your_result, true_result = sess.run([your_gaussian_likelihood, true_gaussian_likelihood],
feed_dict=feed_dict)
'''
z = tf.print(your_result)
z2 = tf.print(true_result)
sess.run([z, z2])
'''
correct = np.allclose(your_result, true_result)
print_result(correct) | [
"numpy.allclose",
"numpy.random.rand",
"spinup.exercises.common.print_result",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.log",
"spinup.exercises.problem_set_1_solutions.exercise1_1_soln.gaussian_likelihood",
"tensorflow.exp"
] | [((927, 956), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['presum'], {'axis': '(1)'}), '(presum, axis=1)\n', (940, 956), True, 'import tensorflow as tf\n'), ((2609, 2621), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2619, 2621), True, 'import tensorflow as tf\n'), ((2644, 2689), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, dim)'}), '(tf.float32, shape=(None, dim))\n', (2658, 2689), True, 'import tensorflow as tf\n'), ((2699, 2744), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, dim)'}), '(tf.float32, shape=(None, dim))\n', (2713, 2744), True, 'import tensorflow as tf\n'), ((2759, 2799), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(dim,)'}), '(tf.float32, shape=(dim,))\n', (2773, 2799), True, 'import tensorflow as tf\n'), ((2899, 2951), 'spinup.exercises.problem_set_1_solutions.exercise1_1_soln.gaussian_likelihood', 'exercise1_1_soln.gaussian_likelihood', (['x', 'mu', 'log_std'], {}), '(x, mu, log_std)\n', (2935, 2951), False, 'from spinup.exercises.problem_set_1_solutions import exercise1_1_soln\n'), ((3398, 3435), 'numpy.allclose', 'np.allclose', (['your_result', 'true_result'], {}), '(your_result, true_result)\n', (3409, 3435), True, 'import numpy as np\n'), ((3440, 3461), 'spinup.exercises.common.print_result', 'print_result', (['correct'], {}), '(correct)\n', (3452, 3461), False, 'from spinup.exercises.common import print_result\n'), ((2993, 3024), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'dim'], {}), '(batch_size, dim)\n', (3007, 3024), True, 'import numpy as np\n'), ((3047, 3078), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'dim'], {}), '(batch_size, dim)\n', (3061, 3078), True, 'import numpy as np\n'), ((3106, 3125), 'numpy.random.rand', 'np.random.rand', (['dim'], {}), '(dim)\n', (3120, 3125), True, 'import numpy as np\n'), ((899, 916), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (905, 916), True, 'import numpy as np\n'), ((864, 879), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (870, 879), True, 'import tensorflow as tf\n')] |
import numpy as np
import time
import os
import tensorflow as tf
import texar as tx
class Generator(object):
def __init__(self, sess, model, data_loader, config, vocab):
self.sess = sess
self.model = model
self.data_loader = data_loader
self.config = config
self.vocab = vocab
def generate(self, epoch):
self.data_loader.switch_to_test_data(self.sess)
step = 0
refs, hypos = [], []
while True:
try:
# for step in range(len(self.data_loader)):
t0 = time.time()
results = self.generate_step()
# loss = results['loss']
input_text = results['input_text']
input_texts = tx.utils.strip_special_tokens(
input_text, is_token_list=True)
target_text = results['target_text']
target_texts = tx.utils.strip_special_tokens(
target_text, is_token_list=True)
target_texts = tx.utils.str_join(target_texts)
output_ids = results['output_ids']
output_texts = tx.utils.map_ids_to_strs(
ids=output_ids, vocab=self.vocab)
if self.config.exp_name.__contains__('hier'):
output_texts = np.reshape(output_texts, [self.config.batch_size, -1])
output_texts = tx.utils.str_join(output_texts)
output_texts = [text.split(' <PAD>')[0] if text.__contains__(' <PAD>') else text for text in output_texts]
target_texts = np.reshape(target_texts, [self.config.batch_size, -1])
target_texts = tx.utils.str_join(target_texts)
for hypo, ref in zip(output_texts, target_texts):
hypos.append(hypo)
refs.append([ref])
# Writes samples
tx.utils.write_paired_text(tx.utils.str_join(input_texts), output_texts, os.path.join(self.config.log_root, epoch + self.config.sample_path), append=True, mode='v')
tx.utils.write_paired_text(target_texts, output_texts, os.path.join(self.config.log_root, epoch + self.config.sample_path + '2'), append=True, mode='v')
if step % 100 == 0:
# tf.logging.info("%s Testing loss : %2f, sec/batch : %2f" % (step, loss, (time.time() - t0)))
print(' '.join(input_texts[0]))
# print(' '.join(topic_texts[0]))
print(output_texts[0])
step += 1
except tf.errors.OutOfRangeError:
break
test_bleu = tx.evals.corpus_bleu(list_of_references=refs, hypotheses=hypos, max_order=2, return_all=True)
print('=' * 50)
print_str = 'BLEU={d[0]:.4f}, b1={d[1]:.4f}, b2={d[2]:.4f}'.format(d=test_bleu)
print(print_str)
print('=' * 50)
with open(os.path.join(self.config.log_root, epoch + '.bleu'), 'w') as f:
f.write(print_str)
def generate_step(self):
return self.model.run_generate_step(self.sess)
| [
"texar.utils.strip_special_tokens",
"numpy.reshape",
"os.path.join",
"texar.utils.map_ids_to_strs",
"texar.evals.corpus_bleu",
"texar.utils.str_join",
"time.time"
] | [((2686, 2783), 'texar.evals.corpus_bleu', 'tx.evals.corpus_bleu', ([], {'list_of_references': 'refs', 'hypotheses': 'hypos', 'max_order': '(2)', 'return_all': '(True)'}), '(list_of_references=refs, hypotheses=hypos, max_order=2,\n return_all=True)\n', (2706, 2783), True, 'import texar as tx\n'), ((575, 586), 'time.time', 'time.time', ([], {}), '()\n', (584, 586), False, 'import time\n'), ((757, 818), 'texar.utils.strip_special_tokens', 'tx.utils.strip_special_tokens', (['input_text'], {'is_token_list': '(True)'}), '(input_text, is_token_list=True)\n', (786, 818), True, 'import texar as tx\n'), ((925, 987), 'texar.utils.strip_special_tokens', 'tx.utils.strip_special_tokens', (['target_text'], {'is_token_list': '(True)'}), '(target_text, is_token_list=True)\n', (954, 987), True, 'import texar as tx\n'), ((1040, 1071), 'texar.utils.str_join', 'tx.utils.str_join', (['target_texts'], {}), '(target_texts)\n', (1057, 1071), True, 'import texar as tx\n'), ((1155, 1213), 'texar.utils.map_ids_to_strs', 'tx.utils.map_ids_to_strs', ([], {'ids': 'output_ids', 'vocab': 'self.vocab'}), '(ids=output_ids, vocab=self.vocab)\n', (1179, 1213), True, 'import texar as tx\n'), ((2959, 3010), 'os.path.join', 'os.path.join', (['self.config.log_root', "(epoch + '.bleu')"], {}), "(self.config.log_root, epoch + '.bleu')\n", (2971, 3010), False, 'import os\n'), ((1333, 1387), 'numpy.reshape', 'np.reshape', (['output_texts', '[self.config.batch_size, -1]'], {}), '(output_texts, [self.config.batch_size, -1])\n', (1343, 1387), True, 'import numpy as np\n'), ((1423, 1454), 'texar.utils.str_join', 'tx.utils.str_join', (['output_texts'], {}), '(output_texts)\n', (1440, 1454), True, 'import texar as tx\n'), ((1618, 1672), 'numpy.reshape', 'np.reshape', (['target_texts', '[self.config.batch_size, -1]'], {}), '(target_texts, [self.config.batch_size, -1])\n', (1628, 1672), True, 'import numpy as np\n'), ((1708, 1739), 'texar.utils.str_join', 'tx.utils.str_join', (['target_texts'], {}), '(target_texts)\n', (1725, 1739), True, 'import texar as tx\n'), ((1962, 1992), 'texar.utils.str_join', 'tx.utils.str_join', (['input_texts'], {}), '(input_texts)\n', (1979, 1992), True, 'import texar as tx\n'), ((2008, 2075), 'os.path.join', 'os.path.join', (['self.config.log_root', '(epoch + self.config.sample_path)'], {}), '(self.config.log_root, epoch + self.config.sample_path)\n', (2020, 2075), False, 'import os\n'), ((2171, 2244), 'os.path.join', 'os.path.join', (['self.config.log_root', "(epoch + self.config.sample_path + '2')"], {}), "(self.config.log_root, epoch + self.config.sample_path + '2')\n", (2183, 2244), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
# Set random seed for reproducibility
np.random.seed(1000)
nb_iterations = 100000
x = 1.0
samples = []
def prior(x):
return 0.1 * np.exp(-0.1 * x)
def likelihood(x):
if x >= 0:
return 0.5 * np.exp(-np.abs(x))
return 0
def g(x):
return likelihood(x) * prior(x)
def q(xp):
return np.random.normal(xp)
if __name__ == '__main__':
# Main loop
for i in range(nb_iterations):
xc = q(x)
alpha = g(xc) / g(x)
if np.isnan(alpha):
continue
if alpha >= 1:
samples.append(xc)
x = xc
else:
if np.random.uniform(0.0, 1.0) < alpha:
samples.append(xc)
x = xc
# Show the histogram
sns.set()
fig, ax = plt.subplots(1, 2, figsize=(22, 10))
sns.kdeplot(samples, shade=True, shade_lowest=True, kernel="gau", ax=ax[0])
sns.kdeplot(samples, shade=True, shade_lowest=True, cumulative=True, kernel="gau", ax=ax[1])
ax[0].set_xlabel('x', fontsize=22)
ax[0].set_title('Probability density function', fontsize=22)
ax[1].set_xlabel('x', fontsize=22)
ax[1].set_title('Cumulative distribution function', fontsize=22)
plt.show() | [
"numpy.random.normal",
"numpy.abs",
"seaborn.set",
"numpy.exp",
"seaborn.kdeplot",
"numpy.isnan",
"numpy.random.seed",
"numpy.random.uniform",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((117, 137), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (131, 137), True, 'import numpy as np\n'), ((418, 438), 'numpy.random.normal', 'np.random.normal', (['xp'], {}), '(xp)\n', (434, 438), True, 'import numpy as np\n'), ((865, 874), 'seaborn.set', 'sns.set', ([], {}), '()\n', (872, 874), True, 'import seaborn as sns\n'), ((892, 928), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(22, 10)'}), '(1, 2, figsize=(22, 10))\n', (904, 928), True, 'import matplotlib.pyplot as plt\n'), ((934, 1009), 'seaborn.kdeplot', 'sns.kdeplot', (['samples'], {'shade': '(True)', 'shade_lowest': '(True)', 'kernel': '"""gau"""', 'ax': 'ax[0]'}), "(samples, shade=True, shade_lowest=True, kernel='gau', ax=ax[0])\n", (945, 1009), True, 'import seaborn as sns\n'), ((1015, 1112), 'seaborn.kdeplot', 'sns.kdeplot', (['samples'], {'shade': '(True)', 'shade_lowest': '(True)', 'cumulative': '(True)', 'kernel': '"""gau"""', 'ax': 'ax[1]'}), "(samples, shade=True, shade_lowest=True, cumulative=True, kernel\n ='gau', ax=ax[1])\n", (1026, 1112), True, 'import seaborn as sns\n'), ((1335, 1345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1343, 1345), True, 'import matplotlib.pyplot as plt\n'), ((226, 242), 'numpy.exp', 'np.exp', (['(-0.1 * x)'], {}), '(-0.1 * x)\n', (232, 242), True, 'import numpy as np\n'), ((587, 602), 'numpy.isnan', 'np.isnan', (['alpha'], {}), '(alpha)\n', (595, 602), True, 'import numpy as np\n'), ((735, 762), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (752, 762), True, 'import numpy as np\n'), ((313, 322), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (319, 322), True, 'import numpy as np\n')] |
# This Program is distributed under the terms of the MIT license.
# Author: <NAME>
# Date: 2021-12-12
# Copyright: <NAME> from SAA, SJTU
# Description: Task 1 - Lagrange-Polynomial
# Environment: Python 3.9.5 64-bit
# Numpy 1.20.3
# Matplotlib 3.4.2
import numpy as np
import matplotlib.pyplot as plt
# Lagrange-Polynomial
def L(n, x, f):
x_ = np.linspace(-1, 1, n + 1)
y_ = f(x_)
l_ = np.zeros(x_.size)
res = 0.0
for i in range(n + 1):
deno, nume = 1.0, 1.0
for j in range(n + 1):
if i != j:
deno *= (x_[i] - x_[j])
nume *= (x - x_[j])
l_[i] = nume / deno
res += l_[i] * y_[i]
return res
def LLL(n, x, f):
res = np.zeros(200)
for i in range(200):
res[i] = L(n, x[i], f)
return res
def redefinedL(n, x, f):
x_ = np.zeros(n + 1)
for i in range(n + 1):
x_[i] = np.cos((2 * i + 1) * np.pi / (2 * (n + 1)))
y_ = f(x_)
l_ = np.zeros(x_.size)
res = 0.0
for i in range(n + 1):
deno, nume = 1.0, 1.0
for j in range(n + 1):
if i != j:
deno *= (x_[i] - x_[j])
nume *= (x - x_[j])
l_[i] = nume / deno
res += l_[i] * y_[i]
return res
def redefinedLLL(n, x, f):
res = np.zeros(200)
for i in range(200):
res[i] = redefinedL(n, x[i], f)
return res
def f(x):
return 1.0 / (1.0 + 25.0 * x**2)
def g(x):
return np.exp(x)
# main
n = np.array([5, 10, 20])
x = np.array([-0.95, -0.47, 0.1, 0.37, 0.93])
# function f(x) = 1 / (1 + 25x^2)
for i in n:
print(f'n = {i}:')
for j in x:
print(f'f({j})-p_n({j}) = {f(j) - L(i, j, f)}')
plt.title(r'Lagrange-Polynomial of $f(x)=\dfrac{1}{25+x^2}$')
plt.grid('on')
xx = np.linspace(-1, 1, 200)
yy = f(xx)
zz = LLL(i, xx, f)
plt.plot(xx, yy, 'r-', label='$f(x)=\dfrac{1}{25+x^2}$')
plt.plot(xx, zz, 'g:', label=f'$p{i}(x)$')
plt.legend()
plt.show()
# function g(x) = e^x
for i in n:
print(f'n = {i}:')
for j in x:
print(f'g({j})-p_n({j}) = {g(j) - L(i, j, g)}')
plt.title(r'Lagrange-Polynomial of $g(x)=e^x$')
plt.grid('on')
xx = np.linspace(-1, 1, 200)
yy = g(xx)
zz = LLL(i, xx, g)
plt.plot(xx, yy, 'r-', label='$g(x)=e^x$')
plt.plot(xx, zz, 'g:', label=f'$p{i}(x)$')
plt.legend()
plt.show()
# Redefined Lagrange-Polynomial
# function f(x) = 1 / (1 + 25x^2)
for i in n:
print(f'n = {i}:')
for j in x:
print(f'f({j})-p_n({j}) = {f(j) - redefinedL(i, j, f)}')
plt.title(r'Lagrange-Polynomial of $f(x)=\dfrac{1}{25+x^2}$')
plt.grid('on')
xx = np.linspace(-1, 1, 200)
yy = f(xx)
zz = redefinedLLL(i, xx, f)
plt.plot(xx, yy, 'r-', label='$f(x)=\dfrac{1}{25+x^2}$')
plt.plot(xx, zz, 'g:', label=f'$p{i}(x)$')
plt.legend()
plt.show() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.cos",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1508, 1529), 'numpy.array', 'np.array', (['[5, 10, 20]'], {}), '([5, 10, 20])\n', (1516, 1529), True, 'import numpy as np\n'), ((1534, 1575), 'numpy.array', 'np.array', (['[-0.95, -0.47, 0.1, 0.37, 0.93]'], {}), '([-0.95, -0.47, 0.1, 0.37, 0.93])\n', (1542, 1575), True, 'import numpy as np\n'), ((376, 401), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(n + 1)'], {}), '(-1, 1, n + 1)\n', (387, 401), True, 'import numpy as np\n'), ((426, 443), 'numpy.zeros', 'np.zeros', (['x_.size'], {}), '(x_.size)\n', (434, 443), True, 'import numpy as np\n'), ((746, 759), 'numpy.zeros', 'np.zeros', (['(200)'], {}), '(200)\n', (754, 759), True, 'import numpy as np\n'), ((866, 881), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (874, 881), True, 'import numpy as np\n'), ((993, 1010), 'numpy.zeros', 'np.zeros', (['x_.size'], {}), '(x_.size)\n', (1001, 1010), True, 'import numpy as np\n'), ((1322, 1335), 'numpy.zeros', 'np.zeros', (['(200)'], {}), '(200)\n', (1330, 1335), True, 'import numpy as np\n'), ((1486, 1495), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1492, 1495), True, 'import numpy as np\n'), ((1722, 1783), 'matplotlib.pyplot.title', 'plt.title', (['"""Lagrange-Polynomial of $f(x)=\\\\dfrac{1}{25+x^2}$"""'], {}), "('Lagrange-Polynomial of $f(x)=\\\\dfrac{1}{25+x^2}$')\n", (1731, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1788, 1802), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (1796, 1802), True, 'import matplotlib.pyplot as plt\n'), ((1812, 1835), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (1823, 1835), True, 'import numpy as np\n'), ((1878, 1935), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""r-"""'], {'label': '"""$f(x)=\\\\dfrac{1}{25+x^2}$"""'}), "(xx, yy, 'r-', label='$f(x)=\\\\dfrac{1}{25+x^2}$')\n", (1886, 1935), True, 'import matplotlib.pyplot as plt\n'), ((1939, 1981), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'zz', '"""g:"""'], {'label': 'f"""$p{i}(x)$"""'}), "(xx, zz, 'g:', label=f'$p{i}(x)$')\n", (1947, 1981), True, 'import matplotlib.pyplot as plt\n'), ((1986, 1998), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1996, 1998), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2013), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2011, 2013), True, 'import matplotlib.pyplot as plt\n'), ((2148, 2194), 'matplotlib.pyplot.title', 'plt.title', (['"""Lagrange-Polynomial of $g(x)=e^x$"""'], {}), "('Lagrange-Polynomial of $g(x)=e^x$')\n", (2157, 2194), True, 'import matplotlib.pyplot as plt\n'), ((2200, 2214), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (2208, 2214), True, 'import matplotlib.pyplot as plt\n'), ((2224, 2247), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (2235, 2247), True, 'import numpy as np\n'), ((2290, 2332), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""r-"""'], {'label': '"""$g(x)=e^x$"""'}), "(xx, yy, 'r-', label='$g(x)=e^x$')\n", (2298, 2332), True, 'import matplotlib.pyplot as plt\n'), ((2337, 2379), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'zz', '"""g:"""'], {'label': 'f"""$p{i}(x)$"""'}), "(xx, zz, 'g:', label=f'$p{i}(x)$')\n", (2345, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2396), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2394, 2396), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2409, 2411), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2661), 'matplotlib.pyplot.title', 'plt.title', (['"""Lagrange-Polynomial of $f(x)=\\\\dfrac{1}{25+x^2}$"""'], {}), "('Lagrange-Polynomial of $f(x)=\\\\dfrac{1}{25+x^2}$')\n", (2609, 2661), True, 'import matplotlib.pyplot as plt\n'), ((2666, 2680), 'matplotlib.pyplot.grid', 'plt.grid', (['"""on"""'], {}), "('on')\n", (2674, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2690, 2713), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (2701, 2713), True, 'import numpy as np\n'), ((2765, 2822), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""r-"""'], {'label': '"""$f(x)=\\\\dfrac{1}{25+x^2}$"""'}), "(xx, yy, 'r-', label='$f(x)=\\\\dfrac{1}{25+x^2}$')\n", (2773, 2822), True, 'import matplotlib.pyplot as plt\n'), ((2826, 2868), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'zz', '"""g:"""'], {'label': 'f"""$p{i}(x)$"""'}), "(xx, zz, 'g:', label=f'$p{i}(x)$')\n", (2834, 2868), True, 'import matplotlib.pyplot as plt\n'), ((2873, 2885), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2883, 2885), True, 'import matplotlib.pyplot as plt\n'), ((2890, 2900), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2898, 2900), True, 'import matplotlib.pyplot as plt\n'), ((925, 968), 'numpy.cos', 'np.cos', (['((2 * i + 1) * np.pi / (2 * (n + 1)))'], {}), '((2 * i + 1) * np.pi / (2 * (n + 1)))\n', (931, 968), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import os
import scipy.sparse as sp
import torch
from scipy.sparse import linalg
from prettytable import PrettyTable
class DataLoader(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
self.xs = xs
self.ys = ys
def shuffle(self):
permutation = np.random.permutation(self.size)
xs, ys = self.xs[permutation], self.ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler():
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()
def asym_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat= sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32).todense()
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename, adjtype):
sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
if adjtype == "scalap":
adj = [calculate_scaled_laplacian(adj_mx)]
elif adjtype == "normlap":
adj = [calculate_normalized_laplacian(adj_mx).astype(np.float32).todense()]
elif adjtype == "symnadj":
adj = [sym_adj(adj_mx)]
elif adjtype == "transition":
adj = [asym_adj(adj_mx)]
elif adjtype == "doubletransition":
adj = [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
elif adjtype == "identity":
adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32)]
else:
error = 0
assert error, "adj type not defined"
return sensor_ids, sensor_id_to_ind, adj
def load_dataset(dataset_dir, batch_size, valid_batch_size=None, test_batch_size=None,
eRec=False, eR_seq_size=12, suffix='', scaler=None):
data = {}
if eRec:
for category in [f'train{suffix}', f'val{suffix}', f'test{suffix}']:
if os.path.exists(os.path.join(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz')):
cat_data = np.load(os.path.join(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
else:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
data_size = data['x_' + category].shape[0]
data_x_aux = np.zeros((1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].shape[2], data['x_' + category].shape[3]))
data_x_aux_temp = np.zeros((1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].shape[2],
data['x_' + category].shape[3]))
data_y_aux = np.zeros((1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].shape[2],
data['y_' + category].shape[3]))
data_y_aux_temp = np.zeros((1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].shape[2],
data['y_' + category].shape[3]))
for idx in range(data_size - eR_seq_size):
x = data['x_' + category][idx:idx+eR_seq_size]
x = np.expand_dims(x, axis=0)
y = data['y_' + category][idx:idx + eR_seq_size]
y = np.expand_dims(y, axis=0)
data_x_aux_temp = np.append(data_x_aux_temp, x, axis=0)
data_y_aux_temp = np.append(data_y_aux_temp, y, axis=0)
if idx % 1000 == 0:
print(idx)
data_x_aux = np.append(data_x_aux, data_x_aux_temp[1:], axis=0)
data_y_aux = np.append(data_y_aux, data_y_aux_temp[1:], axis=0)
data_x_aux_temp = np.zeros(
(1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].shape[2],
data['x_' + category].shape[3]))
data_y_aux_temp = np.zeros(
(1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].shape[2],
data['y_' + category].shape[3]))
data_x_aux = np.append(data_x_aux, data_x_aux_temp[1:], axis=0)
data_y_aux = np.append(data_y_aux, data_y_aux_temp[1:], axis=0)
data['x_' + category] = data_x_aux[1:]
data['y_' + category] = data_y_aux[1:]
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
np.savez_compressed(os.path.join(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz'), x=data['x_' + category], y=data['y_' + category])
else:
for category in [f'train{suffix}', f'val{suffix}', f'test{suffix}']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
print(f'data x_{category} shape')
print(data['x_' + category].shape)
print(f'data y_{category} shape')
print(data['y_' + category].shape)
if scaler is None:
scaler = StandardScaler(mean=data[f'x_train{suffix}'][..., 0].mean(),
std=data[f'x_train{suffix}'][..., 0].std())
# Data format
for category in [f'train{suffix}', f'val{suffix}', f'test{suffix}']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
data['train_loader'] = DataLoader(data[f'x_train{suffix}'], data[f'y_train{suffix}'], batch_size)
data['val_loader'] = DataLoader(data[f'x_val{suffix}'], data[f'y_val{suffix}'], valid_batch_size)
data['test_loader'] = DataLoader(data[f'x_test{suffix}'], data[f'y_test{suffix}'], test_batch_size)
data['scaler'] = scaler
return data
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
def print_loss(loss_type, loss):
if type(loss) == torch.Tensor:
out = torch.mean(loss)
else:
out = np.mean(loss)
print(f'{loss_type} = {out}\n')
return out
def print_loss_sensor(loss_type, loss):
# loss mus be calculated with no reduction
tab = PrettyTable()
if len(loss.shape) > 2:
dim = (0, 1)
else:
dim = 0
if type(loss) == torch.Tensor:
out = torch.mean(loss, dim)
tab.add_column("mean_detectors", out.numpy())
else:
out = np.mean(loss, dim)
tab.add_column(f"{loss_type}_detectors", out)
print(f'{loss_type} per sensor:')
print(tab, '\n')
return out
def print_loss_seq(loss_type, loss):
# loss mus be calculated with no reduction
tab = PrettyTable()
tab.field_names = [f"time_{i+1}" for i in range(loss.shape[1])]
if type(loss) == torch.Tensor:
out = torch.mean(loss, (0, 2))
tab.add_row(out.numpy())
else:
out = np.mean(loss, (0, 2))
tab.add_row(out)
print(f'{loss_type} per sequence time-step:')
print(tab, '\n')
return out
def print_loss_sensor_seq(loss_type, loss):
# loss mus be calculated with no reduction
tab = PrettyTable()
tab.field_names = [f"time_{i+1}" for i in range(loss.shape[1])]
if type(loss) == torch.Tensor:
out = torch.mean(loss, 0)
tab.add_rows(out.transpose(0, 1).numpy())
else:
out = np.mean(loss, 0)
tab.add_rows(out.transpose(0, 1))
print(f'{loss_type} per sensor per sequence time-step')
print(tab, '\n')
return out | [
"numpy.mean",
"numpy.repeat",
"scipy.sparse.eye",
"torch.mean",
"scipy.sparse.linalg.eigsh",
"numpy.concatenate",
"scipy.sparse.coo_matrix",
"scipy.sparse.diags",
"torch.zeros_like",
"numpy.isinf",
"scipy.sparse.csr_matrix",
"numpy.random.permutation",
"prettytable.PrettyTable",
"numpy.max... | [((2062, 2080), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (2075, 2080), True, 'import scipy.sparse as sp\n'), ((2228, 2248), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (2236, 2248), True, 'import scipy.sparse as sp\n'), ((2375, 2393), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (2388, 2393), True, 'import scipy.sparse as sp\n'), ((2524, 2539), 'scipy.sparse.diags', 'sp.diags', (['d_inv'], {}), '(d_inv)\n', (2532, 2539), True, 'import scipy.sparse as sp\n'), ((2764, 2782), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (2777, 2782), True, 'import scipy.sparse as sp\n'), ((2920, 2940), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (2928, 2940), True, 'import scipy.sparse as sp\n'), ((3405, 3421), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['L'], {}), '(L)\n', (3418, 3421), True, 'import scipy.sparse as sp\n'), ((3449, 3492), 'scipy.sparse.identity', 'sp.identity', (['M'], {'format': '"""csr"""', 'dtype': 'L.dtype'}), "(M, format='csr', dtype=L.dtype)\n", (3460, 3492), True, 'import scipy.sparse as sp\n'), ((9695, 9713), 'numpy.isnan', 'np.isnan', (['null_val'], {}), '(null_val)\n', (9703, 9713), True, 'import numpy as np\n'), ((9831, 9847), 'torch.mean', 'torch.mean', (['mask'], {}), '(mask)\n', (9841, 9847), False, 'import torch\n'), ((10057, 10073), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (10067, 10073), False, 'import torch\n'), ((10262, 10280), 'numpy.isnan', 'np.isnan', (['null_val'], {}), '(null_val)\n', (10270, 10280), True, 'import numpy as np\n'), ((10399, 10415), 'torch.mean', 'torch.mean', (['mask'], {}), '(mask)\n', (10409, 10415), False, 'import torch\n'), ((10501, 10526), 'torch.abs', 'torch.abs', (['(preds - labels)'], {}), '(preds - labels)\n', (10510, 10526), False, 'import torch\n'), ((10631, 10647), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (10641, 10647), False, 'import torch\n'), ((10706, 10724), 'numpy.isnan', 'np.isnan', (['null_val'], {}), '(null_val)\n', (10714, 10724), True, 'import numpy as np\n'), ((10843, 10859), 'torch.mean', 'torch.mean', (['mask'], {}), '(mask)\n', (10853, 10859), False, 'import torch\n'), ((11082, 11098), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (11092, 11098), False, 'import torch\n'), ((11326, 11364), 'prettytable.PrettyTable', 'PrettyTable', (["['Modules', 'Parameters']"], {}), "(['Modules', 'Parameters'])\n", (11337, 11364), False, 'from prettytable import PrettyTable\n'), ((11972, 11985), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (11983, 11985), False, 'from prettytable import PrettyTable\n'), ((12453, 12466), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (12464, 12466), False, 'from prettytable import PrettyTable\n'), ((12902, 12915), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (12913, 12915), False, 'from prettytable import PrettyTable\n'), ((1044, 1076), 'numpy.random.permutation', 'np.random.permutation', (['self.size'], {}), '(self.size)\n', (1065, 1076), True, 'import numpy as np\n'), ((2180, 2200), 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (2188, 2200), True, 'import numpy as np\n'), ((2491, 2506), 'numpy.isinf', 'np.isinf', (['d_inv'], {}), '(d_inv)\n', (2499, 2506), True, 'import numpy as np\n'), ((2872, 2892), 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (2880, 2892), True, 'import numpy as np\n'), ((2968, 2988), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (2974, 2988), True, 'import scipy.sparse as sp\n'), ((3195, 3232), 'numpy.maximum.reduce', 'np.maximum.reduce', (['[adj_mx, adj_mx.T]'], {}), '([adj_mx, adj_mx.T])\n', (3212, 3232), True, 'import numpy as np\n'), ((3331, 3361), 'scipy.sparse.linalg.eigsh', 'linalg.eigsh', (['L', '(1)'], {'which': '"""LM"""'}), "(L, 1, which='LM')\n", (3343, 3361), False, 'from scipy.sparse import linalg\n'), ((9873, 9890), 'torch.isnan', 'torch.isnan', (['mask'], {}), '(mask)\n', (9884, 9890), False, 'import torch\n'), ((9892, 9914), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (9908, 9914), False, 'import torch\n'), ((9997, 10014), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (10008, 10014), False, 'import torch\n'), ((10016, 10038), 'torch.zeros_like', 'torch.zeros_like', (['loss'], {}), '(loss)\n', (10032, 10038), False, 'import torch\n'), ((10441, 10458), 'torch.isnan', 'torch.isnan', (['mask'], {}), '(mask)\n', (10452, 10458), False, 'import torch\n'), ((10460, 10482), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (10476, 10482), False, 'import torch\n'), ((10571, 10588), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (10582, 10588), False, 'import torch\n'), ((10590, 10612), 'torch.zeros_like', 'torch.zeros_like', (['loss'], {}), '(loss)\n', (10606, 10612), False, 'import torch\n'), ((10885, 10902), 'torch.isnan', 'torch.isnan', (['mask'], {}), '(mask)\n', (10896, 10902), False, 'import torch\n'), ((10904, 10926), 'torch.zeros_like', 'torch.zeros_like', (['mask'], {}), '(mask)\n', (10920, 10926), False, 'import torch\n'), ((10945, 10970), 'torch.abs', 'torch.abs', (['(preds - labels)'], {}), '(preds - labels)\n', (10954, 10970), False, 'import torch\n'), ((11022, 11039), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (11033, 11039), False, 'import torch\n'), ((11041, 11063), 'torch.zeros_like', 'torch.zeros_like', (['loss'], {}), '(loss)\n', (11057, 11063), False, 'import torch\n'), ((11767, 11783), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (11777, 11783), False, 'import torch\n'), ((11808, 11821), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (11815, 11821), True, 'import numpy as np\n'), ((12110, 12131), 'torch.mean', 'torch.mean', (['loss', 'dim'], {}), '(loss, dim)\n', (12120, 12131), False, 'import torch\n'), ((12210, 12228), 'numpy.mean', 'np.mean', (['loss', 'dim'], {}), '(loss, dim)\n', (12217, 12228), True, 'import numpy as np\n'), ((12584, 12608), 'torch.mean', 'torch.mean', (['loss', '(0, 2)'], {}), '(loss, (0, 2))\n', (12594, 12608), False, 'import torch\n'), ((12666, 12687), 'numpy.mean', 'np.mean', (['loss', '(0, 2)'], {}), '(loss, (0, 2))\n', (12673, 12687), True, 'import numpy as np\n'), ((13033, 13052), 'torch.mean', 'torch.mean', (['loss', '(0)'], {}), '(loss, 0)\n', (13043, 13052), False, 'import torch\n'), ((13127, 13143), 'numpy.mean', 'np.mean', (['loss', '(0)'], {}), '(loss, 0)\n', (13134, 13143), True, 'import numpy as np\n'), ((651, 690), 'numpy.repeat', 'np.repeat', (['xs[-1:]', 'num_padding'], {'axis': '(0)'}), '(xs[-1:], num_padding, axis=0)\n', (660, 690), True, 'import numpy as np\n'), ((715, 754), 'numpy.repeat', 'np.repeat', (['ys[-1:]', 'num_padding'], {'axis': '(0)'}), '(ys[-1:], num_padding, axis=0)\n', (724, 754), True, 'import numpy as np\n'), ((772, 811), 'numpy.concatenate', 'np.concatenate', (['[xs, x_padding]'], {'axis': '(0)'}), '([xs, x_padding], axis=0)\n', (786, 811), True, 'import numpy as np\n'), ((829, 868), 'numpy.concatenate', 'np.concatenate', (['[ys, y_padding]'], {'axis': '(0)'}), '([ys, y_padding], axis=0)\n', (843, 868), True, 'import numpy as np\n'), ((2132, 2154), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (2140, 2154), True, 'import numpy as np\n'), ((2450, 2470), 'numpy.power', 'np.power', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (2458, 2470), True, 'import numpy as np\n'), ((2829, 2846), 'numpy.power', 'np.power', (['d', '(-0.5)'], {}), '(d, -0.5)\n', (2837, 2846), True, 'import numpy as np\n'), ((3677, 3691), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3688, 3691), False, 'import pickle\n'), ((9731, 9750), 'torch.isnan', 'torch.isnan', (['labels'], {}), '(labels)\n', (9742, 9750), False, 'import torch\n'), ((10298, 10317), 'torch.isnan', 'torch.isnan', (['labels'], {}), '(labels)\n', (10309, 10317), False, 'import torch\n'), ((10742, 10761), 'torch.isnan', 'torch.isnan', (['labels'], {}), '(labels)\n', (10753, 10761), False, 'import torch\n'), ((3797, 3830), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3808, 3830), False, 'import pickle\n'), ((5002, 5068), 'os.path.join', 'os.path.join', (['dataset_dir', "(f'eR{eR_seq_size}_' + category + '.npz')"], {}), "(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz')\n", (5014, 5068), False, 'import os\n'), ((5982, 6108), 'numpy.zeros', 'np.zeros', (["(1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].\n shape[2], data['x_' + category].shape[3])"], {}), "((1, eR_seq_size, data['x_' + category].shape[1], data['x_' +\n category].shape[2], data['x_' + category].shape[3]))\n", (5990, 6108), True, 'import numpy as np\n'), ((6139, 6265), 'numpy.zeros', 'np.zeros', (["(1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].\n shape[2], data['x_' + category].shape[3])"], {}), "((1, eR_seq_size, data['x_' + category].shape[1], data['x_' +\n category].shape[2], data['x_' + category].shape[3]))\n", (6147, 6265), True, 'import numpy as np\n'), ((6332, 6458), 'numpy.zeros', 'np.zeros', (["(1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].\n shape[2], data['y_' + category].shape[3])"], {}), "((1, eR_seq_size, data['y_' + category].shape[1], data['y_' +\n category].shape[2], data['y_' + category].shape[3]))\n", (6340, 6458), True, 'import numpy as np\n'), ((6528, 6654), 'numpy.zeros', 'np.zeros', (["(1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].\n shape[2], data['y_' + category].shape[3])"], {}), "((1, eR_seq_size, data['y_' + category].shape[1], data['y_' +\n category].shape[2], data['y_' + category].shape[3]))\n", (6536, 6654), True, 'import numpy as np\n'), ((7869, 7919), 'numpy.append', 'np.append', (['data_x_aux', 'data_x_aux_temp[1:]'], {'axis': '(0)'}), '(data_x_aux, data_x_aux_temp[1:], axis=0)\n', (7878, 7919), True, 'import numpy as np\n'), ((7949, 7999), 'numpy.append', 'np.append', (['data_y_aux', 'data_y_aux_temp[1:]'], {'axis': '(0)'}), '(data_y_aux, data_y_aux_temp[1:], axis=0)\n', (7958, 7999), True, 'import numpy as np\n'), ((8585, 8629), 'os.path.join', 'os.path.join', (['dataset_dir', "(category + '.npz')"], {}), "(dataset_dir, category + '.npz')\n", (8597, 8629), False, 'import os\n'), ((5106, 5172), 'os.path.join', 'os.path.join', (['dataset_dir', "(f'eR{eR_seq_size}_' + category + '.npz')"], {}), "(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz')\n", (5118, 5172), False, 'import os\n'), ((5537, 5581), 'os.path.join', 'os.path.join', (['dataset_dir', "(category + '.npz')"], {}), "(dataset_dir, category + '.npz')\n", (5549, 5581), False, 'import os\n'), ((6846, 6871), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (6860, 6871), True, 'import numpy as np\n'), ((6965, 6990), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (6979, 6990), True, 'import numpy as np\n'), ((7029, 7066), 'numpy.append', 'np.append', (['data_x_aux_temp', 'x'], {'axis': '(0)'}), '(data_x_aux_temp, x, axis=0)\n', (7038, 7066), True, 'import numpy as np\n'), ((7105, 7142), 'numpy.append', 'np.append', (['data_y_aux_temp', 'y'], {'axis': '(0)'}), '(data_y_aux_temp, y, axis=0)\n', (7114, 7142), True, 'import numpy as np\n'), ((8348, 8414), 'os.path.join', 'os.path.join', (['dataset_dir', "(f'eR{eR_seq_size}_' + category + '.npz')"], {}), "(dataset_dir, f'eR{eR_seq_size}_' + category + '.npz')\n", (8360, 8414), False, 'import os\n'), ((7255, 7305), 'numpy.append', 'np.append', (['data_x_aux', 'data_x_aux_temp[1:]'], {'axis': '(0)'}), '(data_x_aux, data_x_aux_temp[1:], axis=0)\n', (7264, 7305), True, 'import numpy as np\n'), ((7343, 7393), 'numpy.append', 'np.append', (['data_y_aux', 'data_y_aux_temp[1:]'], {'axis': '(0)'}), '(data_y_aux, data_y_aux_temp[1:], axis=0)\n', (7352, 7393), True, 'import numpy as np\n'), ((7436, 7562), 'numpy.zeros', 'np.zeros', (["(1, eR_seq_size, data['x_' + category].shape[1], data['x_' + category].\n shape[2], data['x_' + category].shape[3])"], {}), "((1, eR_seq_size, data['x_' + category].shape[1], data['x_' +\n category].shape[2], data['x_' + category].shape[3]))\n", (7444, 7562), True, 'import numpy as np\n'), ((7659, 7785), 'numpy.zeros', 'np.zeros', (["(1, eR_seq_size, data['y_' + category].shape[1], data['y_' + category].\n shape[2], data['y_' + category].shape[3])"], {}), "((1, eR_seq_size, data['y_' + category].shape[1], data['y_' +\n category].shape[2], data['y_' + category].shape[3]))\n", (7667, 7785), True, 'import numpy as np\n'), ((4467, 4487), 'numpy.transpose', 'np.transpose', (['adj_mx'], {}), '(adj_mx)\n', (4479, 4487), True, 'import numpy as np\n'), ((4545, 4569), 'numpy.ones', 'np.ones', (['adj_mx.shape[0]'], {}), '(adj_mx.shape[0])\n', (4552, 4569), True, 'import numpy as np\n')] |
import collections
import itertools
import numpy as np
__all__ = ['chow_liu']
def chow_liu(X, root=None):
"""Return a Chow-Liu tree.
A Chow-Liu tree takes three steps to build:
1. Compute the mutual information between each pair of variables. The values are organised in
a fully connected graph.
2. Extract the maximum spanning tree from the graph.
3. Orient the edges of the tree by picking a root.
TODO: the current implementation uses Kruskal's algorithm to extract the MST. According to
Wikipedia, faster algorithms exist for fully connected graphs.
References
----------
1. <NAME>. and <NAME>., 1968. Approximating discrete probability distributions with
dependence trees. IEEE transactions on Information Theory, 14(3), pp.462-467.
2. https://www.wikiwand.com/en/Chow-Liu_tree
"""
# Compute the mutual information between each pair of variables
marginals = {v: X[v].value_counts(normalize=True) for v in X.columns}
edge = collections.namedtuple('edge', ['u', 'v', 'mi'])
mis = (
edge(
u, v, mutual_info(
puv=X.groupby([u, v]).size() / len(X),
pu=marginals[u],
pv=marginals[v]
))
for u, v in itertools.combinations(sorted(X.columns), 2)
)
edges = ((e.u, e.v) for e in sorted(mis, key=lambda e: e.mi, reverse=True))
# Extract the maximum spanning tree
neighbors = kruskal(vertices=X.columns, edges=edges)
if root is None:
root = X.columns[0]
return list(orient_tree(neighbors, root, visited=set()))
def mutual_info(puv, pu, pv):
"""Return the mutual information between variables u and v."""
# We first align pu and pv with puv so that we can vectorise the MI computation
# TODO: maybe there's a faster way to align pu and pv with respect to puv
pu = pu.reindex(puv.index.get_level_values(pu.name)).values
pv = pv.reindex(puv.index.get_level_values(pv.name)).values
return (puv * np.log(puv / (pv * pu))).sum()
class DisjointSet:
"""Disjoint-set data structure.
References
----------
1. <NAME>. and <NAME>., 1984. Worst-case analysis of set union algorithms.
Journal of the ACM (JACM), 31(2), pp.245-281.
2. https://www.wikiwand.com/en/Disjoint-set_data_structure
"""
def __init__(self, *values):
self.parents = {x: x for x in values}
self.sizes = {x: 1 for x in values}
def find(self, x):
while self.parents[x] != x:
x, self.parents[x] = self.parents[x], self.parents[self.parents[x]]
return x
def union(self, x, y):
if self.sizes[x] < self.sizes[y]:
x, y = y, x
self.parents[y] = x
self.sizes[x] += self.sizes[y]
def kruskal(vertices, edges):
"""Find the Maximum Spanning Tree of a dense graph using Kruskal's algorithm.
The provided edges are assumed to be sorted in descending order.
References
----------
1. <NAME>., 1956. On the shortest spanning subtree of a graph and the traveling
salesman problem. Proceedings of the American Mathematical society, 7(1), pp.48-50.
"""
ds = DisjointSet(*vertices)
neighbors = collections.defaultdict(set)
for u, v in edges:
if ds.find(u) != ds.find(v):
neighbors[u].add(v)
neighbors[v].add(u)
ds.union(ds.find(u), ds.find(v))
if len(neighbors) == len(vertices):
break
return neighbors
def orient_tree(neighbors, root, visited):
"""Return tree edges that originate from the given root.
"""
for neighbor in neighbors[root] - visited:
yield root, neighbor
yield from orient_tree(neighbors, root=neighbor, visited={root})
| [
"numpy.log",
"collections.namedtuple",
"collections.defaultdict"
] | [((1020, 1068), 'collections.namedtuple', 'collections.namedtuple', (['"""edge"""', "['u', 'v', 'mi']"], {}), "('edge', ['u', 'v', 'mi'])\n", (1042, 1068), False, 'import collections\n'), ((3232, 3260), 'collections.defaultdict', 'collections.defaultdict', (['set'], {}), '(set)\n', (3255, 3260), False, 'import collections\n'), ((2015, 2038), 'numpy.log', 'np.log', (['(puv / (pv * pu))'], {}), '(puv / (pv * pu))\n', (2021, 2038), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import fidimag.extensions.common_clib as clib
# Change int he future to common clib:
import fidimag.extensions.clib as atom_clib
import sys
from .minimiser_base import MinimiserBase
class SteepestDescent(MinimiserBase):
"""
This class is the driver to minimise a system using an optimised Steepest
Descent algorithm defined in [1].
The evolution step is written as
m_i+1 = FM * (FP * m_i - 4 * tau * (m_i X m_i x H))
where
FM = (1 - tau^2 * (m_i X H)^2)
FP = (1 + tau^2 * (m_i X H)^2)
and tau is a "time step" that needs to be defined according to
eq. 10 of [1].
NOTES:
- We use the simplest criteria for choosing tau. However, it is not exactly
clear how to deal with the denominators defined in tau when they are
zero.
We are not using these criteria but might be necessary:
For now we refer to the methods defined in the minimizer branch of
https://github.com/MicroMagnum/MicroMagnum.
- The effective field H is defined in Tesla units (it works in the
atomistic case). This is defined in the MuMax3 code, so we scale the
field with mu0 when using this class with the micromagnetic classes.
- Methods for the calculations using Numpy are defined for debugging. In
this class we use a C library to speed up the evolution step.
REFS:
[1] Exl et al., Journal of Applied Physics 115, 17D118 (2014).
https://doi.org/10.1063/1.4862839
"""
def __init__(self, mesh, spin,
magnetisation, magnetisation_inv, field, pins,
interactions,
name,
data_saver,
use_jac=False,
integrator=None
):
# Define
super(SteepestDescent, self).__init__(mesh, spin,
magnetisation, magnetisation_inv,
field,
pins,
interactions,
name,
data_saver)
# ---------------------------------------------------------------------
# Variables defined in this SteepestDescent
self.mxH = np.zeros_like(self.field)
self.mxmxH = np.zeros_like(self.field)
self.mxmxH_last = np.zeros_like(self.field)
# time as zero
self.t = 0
self.tau = 1e-4
# self._n_field = np.zeros_like(self.field)
# Threshold values for the criteria to choose the T factor.
# They are defined as properties with the corr decoration
self._tmax = 1e-1
self._tmin = 1e-16
# Scaling of the field
self.scale = 1.
@property
def tmax(self):
return self._tmax
@tmax.setter
def tmax(self, t):
self._tmax = t
# self._tmax_arr = t * np.ones((len(self.tau), 2))
@property
def tmin(self):
return self._tmin
@tmin.setter
def tmin(self, t):
self._tmin = t
# self._tmin_arr = t * np.ones((len(self.tau), 2))
# Same as:
# tmin = property(fget=set_tmin, fset=set_tmin)
def normalise_field(self, a):
norm = np.sqrt(np.sum(a.reshape(-1, 3) ** 2, axis=1))
norm_a = a.reshape(-1, 3) / norm[:, np.newaxis]
norm_a.shape = (-1,)
return norm_a
def field_cross_product(self, a, b):
aXb = np.cross(a.reshape(-1, 3), b.reshape(-1, 3))
return aXb.reshape(-1,)
def run_step(self):
"""
Numpy version of the calculation
"""
# ---------------------------------------------------------------------
self.mxH.shape = (-1, 3)
self.mxmxH.shape = (-1, 3)
self.spin.shape = (-1, 3)
mxH_sq_norm = np.sum(self.mxH ** 2, axis=1)
factor_plus = 4 + (self.tau ** 2) * mxH_sq_norm
factor_minus = 4 - (self.tau ** 2) * mxH_sq_norm
# Compute: m[i+1] = ((4 - t^2 A^2) * m[i] - 4 * t * m[i] x m[i] x H) / (4 + t^2 A^2)
# where "t = self.tau" is the time step and "A = m[i] x H"
new_spin = (factor_minus[:, np.newaxis] * self.spin
- 4 * self.tau * self.mxmxH
# this term should be zero:
# + (2 * (self.tau ** 2) * np.sum(self.mxH * self.spin, axis=1))[:, np.newaxis] * self.mxH
)
new_spin = new_spin / factor_plus[:, np.newaxis]
self.mxH.shape = (-1,)
self.mxmxH.shape = (-1,)
self.spin.shape = (-1,)
new_spin.shape = (-1,)
self.spin_last[:] = self.spin[:]
self.spin[:] = new_spin[:]
atom_clib.normalise_spin(self.spin, self._pins, self.n)
# ---------------------------------------------------------------------
# Update the effective field, torques and time step for the next iter
self.compute_effective_field()
self.mxmxH_last[:] = self.mxmxH[:]
self.mxH[:] = self.field_cross_product(self.spin, self.scale * self.field)[:]
self.mxmxH[:] = self.field_cross_product(self.spin, self.mxH)[:]
# ---------------------------------------------------------------------
# Define the time step tau
ds = (self.spin - self.spin_last).reshape(-1, 3)
dy = (self.mxmxH - self.mxmxH_last).reshape(-1, 3)
if self.step % 2 == 0:
num = np.sum(ds * ds)
den = np.sum(ds * dy)
else:
num = np.sum(ds * dy)
den = np.sum(dy * dy)
# The criteria is taken from the Micromagnum code
if den == 0:
self.tau = self._tmax
else:
self.tau = num / den
# Set the minimum between the abs value of tau and the max tolerance
# self.tau = np.sign(self.tau) * np.max(self._tmin_arr, axis=1)
# Set the maximum between the previous minimum and the min tolerance
# self.tau = np.sign(self.tau) * max(min(np.abs(self.tau),
# self._tmax),
# self._tmin)
# ---------------------------------------------------------------------
def run_step_CLIB(self):
"""
C version of the calculation from common/lib/steepest_descent.c
"""
clib.compute_sd_spin(self.spin, self.spin_last,
self._magnetisation,
self.mxH, self.mxmxH, self.mxmxH_last,
self.tau, self._pins,
self.n
)
self.compute_effective_field()
# Notice that the field is scaled (in the micro class we use Tesla)
clib.compute_sd_step(self.spin, self.spin_last,
self._magnetisation,
self.scale * self.field,
self.mxH, self.mxmxH, self.mxmxH_last,
self.tau, self._pins,
self.n, self.step,
self._tmin, self._tmax
)
def minimise(self, stopping_dm=1e-3, max_steps=5000,
save_data_steps=10, save_m_steps=None, save_vtk_steps=None,
log_every=1000, printing=True,
initial_t_step=1e-2
):
"""
Run the minimisation until meeting the stopping_dm criteria
"""
# Rewrite tmax and tmin arrays and variable
self.tmax = self._tmax
self.tmin = self._tmin
self.step = 0
# Initial "time" step: the algorithm seems sensitive to this value
self.tau = initial_t_step
self.spin_last[:] = self.spin[:]
self.compute_effective_field()
self.mxH[:] = self.field_cross_product(self.spin, self.scale * self.field)[:]
self.mxmxH[:] = self.field_cross_product(self.spin, self.mxH)[:]
self.mxmxH_last[:] = self.mxmxH[:]
while self.step < max_steps:
self.run_step_CLIB()
# Vectorised calculation with Numpy:
# self.run_step()
max_dm = (self.spin - self.spin_last).reshape(-1, 3) ** 2
max_dm = np.max(np.sqrt(np.sum(max_dm, axis=1)))
if printing:
if self.step % log_every == 0:
# print("#{:<4} t={:<8.3g} dt={:.3g} max_dmdt={:.3g}
print("#{:<4} max_tau={:<8.3g} max_dm={:<10.3g}".format(self.step,
np.max(np.abs(self.tau)),
max_dm))
if max_dm < stopping_dm and self.step > 0:
print("#{:<4} max_tau={:<8.3g} max_dm={:<10.3g}".format(self.step,
np.max(np.abs(self.tau)),
max_dm)
)
self.compute_effective_field()
self.data_saver.save()
break
if self.step % save_data_steps == 0:
# update field before saving data
self.compute_effective_field()
self.data_saver.save()
if (save_vtk_steps is not None) and (self.step % save_vtk_steps == 0):
self.save_vtk()
if (save_m_steps is not None) and (self.step % save_m_steps == 0):
self.save_m()
self.step += 1
if self.step == max_steps:
sys.stderr.write("Warning: minimise did not converge in {} steps - maxdm = {}".format(self.step, max_dm))
| [
"numpy.abs",
"fidimag.extensions.common_clib.compute_sd_step",
"numpy.sum",
"fidimag.extensions.common_clib.compute_sd_spin",
"fidimag.extensions.clib.normalise_spin",
"numpy.zeros_like"
] | [((2388, 2413), 'numpy.zeros_like', 'np.zeros_like', (['self.field'], {}), '(self.field)\n', (2401, 2413), True, 'import numpy as np\n'), ((2435, 2460), 'numpy.zeros_like', 'np.zeros_like', (['self.field'], {}), '(self.field)\n', (2448, 2460), True, 'import numpy as np\n'), ((2487, 2512), 'numpy.zeros_like', 'np.zeros_like', (['self.field'], {}), '(self.field)\n', (2500, 2512), True, 'import numpy as np\n'), ((3945, 3974), 'numpy.sum', 'np.sum', (['(self.mxH ** 2)'], {'axis': '(1)'}), '(self.mxH ** 2, axis=1)\n', (3951, 3974), True, 'import numpy as np\n'), ((4808, 4863), 'fidimag.extensions.clib.normalise_spin', 'atom_clib.normalise_spin', (['self.spin', 'self._pins', 'self.n'], {}), '(self.spin, self._pins, self.n)\n', (4832, 4863), True, 'import fidimag.extensions.clib as atom_clib\n'), ((6471, 6613), 'fidimag.extensions.common_clib.compute_sd_spin', 'clib.compute_sd_spin', (['self.spin', 'self.spin_last', 'self._magnetisation', 'self.mxH', 'self.mxmxH', 'self.mxmxH_last', 'self.tau', 'self._pins', 'self.n'], {}), '(self.spin, self.spin_last, self._magnetisation, self.\n mxH, self.mxmxH, self.mxmxH_last, self.tau, self._pins, self.n)\n', (6491, 6613), True, 'import fidimag.extensions.common_clib as clib\n'), ((6880, 7086), 'fidimag.extensions.common_clib.compute_sd_step', 'clib.compute_sd_step', (['self.spin', 'self.spin_last', 'self._magnetisation', '(self.scale * self.field)', 'self.mxH', 'self.mxmxH', 'self.mxmxH_last', 'self.tau', 'self._pins', 'self.n', 'self.step', 'self._tmin', 'self._tmax'], {}), '(self.spin, self.spin_last, self._magnetisation, self.\n scale * self.field, self.mxH, self.mxmxH, self.mxmxH_last, self.tau,\n self._pins, self.n, self.step, self._tmin, self._tmax)\n', (6900, 7086), True, 'import fidimag.extensions.common_clib as clib\n'), ((5549, 5564), 'numpy.sum', 'np.sum', (['(ds * ds)'], {}), '(ds * ds)\n', (5555, 5564), True, 'import numpy as np\n'), ((5583, 5598), 'numpy.sum', 'np.sum', (['(ds * dy)'], {}), '(ds * dy)\n', (5589, 5598), True, 'import numpy as np\n'), ((5631, 5646), 'numpy.sum', 'np.sum', (['(ds * dy)'], {}), '(ds * dy)\n', (5637, 5646), True, 'import numpy as np\n'), ((5665, 5680), 'numpy.sum', 'np.sum', (['(dy * dy)'], {}), '(dy * dy)\n', (5671, 5680), True, 'import numpy as np\n'), ((8399, 8421), 'numpy.sum', 'np.sum', (['max_dm'], {'axis': '(1)'}), '(max_dm, axis=1)\n', (8405, 8421), True, 'import numpy as np\n'), ((8966, 8982), 'numpy.abs', 'np.abs', (['self.tau'], {}), '(self.tau)\n', (8972, 8982), True, 'import numpy as np\n'), ((8692, 8708), 'numpy.abs', 'np.abs', (['self.tau'], {}), '(self.tau)\n', (8698, 8708), True, 'import numpy as np\n')] |
from src.boson import Boson
from os import path
import pickle as pc
import dynet as dy
import numpy as np
import json
dir='/home/lnn/Documents/OpenNRE-Ina/OpenNRE-PyTorch/mnre_data'
print("Loading model from {}...".format('/home/lnn/Downloads/minimal-span-parser-master/models/top-down-model_dev=87.21'))
model = dy.ParameterCollection()
[parser] = dy.load('/home/lnn/Downloads/minimal-span-parser-master/models/top-down-model_dev=87.21', model)
dim=500
def lstm_parse(pos_file,lstm_file):
if pos_file.find('.json')>=0:
data=json.load(open(pos_file))['mnre_data']
l=len(data)
else:
data=np.load(pos_file)
l=len(data)
if path.exists(lstm_file):
lstm_outs=np.load(lstm_file)
else:
lstm_outs = np.zeros((l, 500), dtype=np.float)
# ct=0
# for i in lstm_outs:
# if not i.any():
# ct+=1
# print(ct)
# return ct
for m,line in enumerate(data):
# if m<=154000:
# continue
line=[tuple(i) for i in line]
dy.renew_cg()
try:
lo = parser.parts_parse(line)
lstm_outs[m]=lo[-1].npvalue()
except ValueError as e:
print(e)
print(line)
lstm_outs[m]=np.random.standard_normal((500,))
if m%100==0:
print(m)
# break
if m%1000==0:
np.save(lstm_file,lstm_outs)
np.save(lstm_file, lstm_outs)
def load_parse_res(ppath):
parser=pc.load(open(ppath,mode='rb'))
print(len(parser))
print(parser)
lstm_parse(path.join(dir,'test_zh','stanford_test_zh.json'),path.join(dir,'test_zh','big_stanford_test_lstm_out.npy'))
# pos_mnre(path.join(dir, 'train_zh.txt'), path.join(dir,'train_zh', 'train_zh.json')) | [
"os.path.exists",
"numpy.random.standard_normal",
"os.path.join",
"dynet.ParameterCollection",
"numpy.zeros",
"dynet.renew_cg",
"dynet.load",
"numpy.load",
"numpy.save"
] | [((314, 338), 'dynet.ParameterCollection', 'dy.ParameterCollection', ([], {}), '()\n', (336, 338), True, 'import dynet as dy\n'), ((350, 456), 'dynet.load', 'dy.load', (['"""/home/lnn/Downloads/minimal-span-parser-master/models/top-down-model_dev=87.21"""', 'model'], {}), "(\n '/home/lnn/Downloads/minimal-span-parser-master/models/top-down-model_dev=87.21'\n , model)\n", (357, 456), True, 'import dynet as dy\n'), ((669, 691), 'os.path.exists', 'path.exists', (['lstm_file'], {}), '(lstm_file)\n', (680, 691), False, 'from os import path\n'), ((1417, 1446), 'numpy.save', 'np.save', (['lstm_file', 'lstm_outs'], {}), '(lstm_file, lstm_outs)\n', (1424, 1446), True, 'import numpy as np\n'), ((1572, 1622), 'os.path.join', 'path.join', (['dir', '"""test_zh"""', '"""stanford_test_zh.json"""'], {}), "(dir, 'test_zh', 'stanford_test_zh.json')\n", (1581, 1622), False, 'from os import path\n'), ((1621, 1680), 'os.path.join', 'path.join', (['dir', '"""test_zh"""', '"""big_stanford_test_lstm_out.npy"""'], {}), "(dir, 'test_zh', 'big_stanford_test_lstm_out.npy')\n", (1630, 1680), False, 'from os import path\n'), ((622, 639), 'numpy.load', 'np.load', (['pos_file'], {}), '(pos_file)\n', (629, 639), True, 'import numpy as np\n'), ((711, 729), 'numpy.load', 'np.load', (['lstm_file'], {}), '(lstm_file)\n', (718, 729), True, 'import numpy as np\n'), ((761, 795), 'numpy.zeros', 'np.zeros', (['(l, 500)'], {'dtype': 'np.float'}), '((l, 500), dtype=np.float)\n', (769, 795), True, 'import numpy as np\n'), ((1040, 1053), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (1051, 1053), True, 'import dynet as dy\n'), ((1384, 1413), 'numpy.save', 'np.save', (['lstm_file', 'lstm_outs'], {}), '(lstm_file, lstm_outs)\n', (1391, 1413), True, 'import numpy as np\n'), ((1253, 1286), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(500,)'], {}), '((500,))\n', (1278, 1286), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import time
from pystorm.hal import HAL
from pystorm.hal.hal import parse_hal_spikes, parse_hal_binned_tags
HAL = HAL()
from pystorm.hal.neuromorph import graph # to describe HAL/neuromorph network
from pystorm.PyDriver import bddriver as bd # expose Driver functions directly for debug (cool!)
def collect_spikes_and_tags(collect_time, info_str=""):
_ = HAL.get_spikes()
_ = HAL.get_outputs()
print("starting data collection " + info_str)
HAL.start_traffic(flush=False)
HAL.enable_output_recording(flush=False)
HAL.enable_spike_recording(flush=True)
time.sleep(collect_time)
print(" stopping data collection")
HAL.stop_traffic(flush=False)
HAL.disable_output_recording(flush=False)
HAL.disable_spike_recording(flush=True)
spikes = HAL.get_spikes()
parsed_spikes = parse_hal_spikes(spikes)
outputs = HAL.get_outputs()
parsed_outputs = parse_hal_binned_tags(outputs)
return parsed_spikes, parsed_outputs
def run_big_raw_spikes_pool(encs, bias_level, collect_time, use_inp=False):
net = graph.Network("net")
# some misc encoders
N = encs.shape[0]
biases = bias_level * np.ones((N,), dtype=int)
p_all = net.create_pool("p_all", encs, biases=biases)
if use_inp:
i_all = net.create_input("i_all", 1)
net.create_connection("", i_all, p_all, None)
# map network
print("calling map for all neurons")
HAL.map(net)
spikes, _ = collect_spikes_and_tags(collect_time, "for big raw spikes pool")
if use_inp:
return spikes, p_all, i_all
else:
return spikes, p_all
def run_many_raw_spike_pools(num_pools, encs, bias_level, collect_time, use_inp=False):
net = graph.Network("net")
N = encs.shape[0]
biases = bias_level * np.ones((N,), dtype=int)
ps = []
inps = []
for n in range(num_pools):
p = net.create_pool("p" + str(n), encs, biases=biases)
ps.append(p)
if use_inp:
i = net.create_input("i" + str(n), 1)
net.create_connection("", i, p, None)
inps.append(i)
# map network
print("calling map")
HAL.map(net)
spikes, _ = collect_spikes_and_tags(collect_time, "for big raw spikes pool")
if use_inp:
return spikes, ps, inps
else:
return spikes, ps
def reshape_big_pool_spikes(all_parsed_spikes, width, height, collect_time):
# reshape raw spike data
N_all = width * height
all_parsed_arr = np.zeros((N_all,))
assert(len(all_parsed_spikes) == 1) # should just have p_all
for k, spikes in all_parsed_spikes.items():
for n in range(N_all):
if n in spikes:
all_parsed_arr[n] = sum([el[1] for el in spikes[n]]) # els are (time, ct)
all_parsed_xy = all_parsed_arr.reshape((height, width)) / collect_time
return all_parsed_xy
def reconstruct_many_pool_spikes(many_parsed_spikes, width, height, height_all, width_all, collect_time):
N = width * height
# reconstruct many pool spikes into one dataset
many_parsed_xy = np.zeros((height_all, width_all))
for p, spikes in many_parsed_spikes.items():
this_parsed_arr = np.zeros((N,))
for n in range(N):
if n in spikes:
this_parsed_arr[n] = sum([el[1] for el in spikes[n]]) # els are (time, ct)
this_p_parsed_xy = this_parsed_arr.reshape((height, width))
pool_x, pool_y = p.mapped_xy
many_parsed_xy[pool_y:pool_y + height, pool_x:pool_x + width] = this_p_parsed_xy / collect_time
return many_parsed_xy
def make_XY_rate_comparison_plots(fname, all_parsed_xy, many_parsed_xy, fclip):
plt.figure()
plt.title("spike rate histogram")
plt.hist(all_parsed_xy, bins=20)
print("clipping", np.sum(all_parsed_xy > fclip), "very fast neurons")
all_parsed_xy[all_parsed_xy > fclip] = fclip
many_parsed_xy[many_parsed_xy > fclip] = fclip
def scale(x):
#return np.log10(x + 1)
return x
vmin = np.min(scale(all_parsed_xy))
vmax = np.max(scale(all_parsed_xy))
plt.figure(figsize=(10,10))
plt.subplot(221)
plt.title("rates for whole array")
plt.imshow(scale(all_parsed_xy), vmin=vmin, vmax=vmax)
plt.colorbar()
plt.subplot(223)
plt.title("rates for array reconstructed from pools")
plt.imshow(scale(many_parsed_xy), vmin=vmin, vmax=vmax)
plt.colorbar()
plt.subplot(222)
pct_err = np.abs(all_parsed_xy - many_parsed_xy) / fclip
plt.title("error fraction in terms of max spike rate")
plt.imshow(pct_err)
plt.colorbar()
plt.savefig(fname + ".png")
return pct_err
| [
"pystorm.hal.HAL",
"pystorm.hal.neuromorph.graph.Network",
"matplotlib.pyplot.hist",
"time.sleep",
"pystorm.hal.HAL.enable_output_recording",
"matplotlib.pyplot.imshow",
"pystorm.hal.HAL.map",
"pystorm.hal.HAL.get_spikes",
"pystorm.hal.HAL.enable_spike_recording",
"pystorm.hal.HAL.stop_traffic",
... | [((166, 171), 'pystorm.hal.HAL', 'HAL', ([], {}), '()\n', (169, 171), False, 'from pystorm.hal import HAL\n'), ((414, 430), 'pystorm.hal.HAL.get_spikes', 'HAL.get_spikes', ([], {}), '()\n', (428, 430), False, 'from pystorm.hal import HAL\n'), ((439, 456), 'pystorm.hal.HAL.get_outputs', 'HAL.get_outputs', ([], {}), '()\n', (454, 456), False, 'from pystorm.hal import HAL\n'), ((512, 542), 'pystorm.hal.HAL.start_traffic', 'HAL.start_traffic', ([], {'flush': '(False)'}), '(flush=False)\n', (529, 542), False, 'from pystorm.hal import HAL\n'), ((547, 587), 'pystorm.hal.HAL.enable_output_recording', 'HAL.enable_output_recording', ([], {'flush': '(False)'}), '(flush=False)\n', (574, 587), False, 'from pystorm.hal import HAL\n'), ((592, 630), 'pystorm.hal.HAL.enable_spike_recording', 'HAL.enable_spike_recording', ([], {'flush': '(True)'}), '(flush=True)\n', (618, 630), False, 'from pystorm.hal import HAL\n'), ((636, 660), 'time.sleep', 'time.sleep', (['collect_time'], {}), '(collect_time)\n', (646, 660), False, 'import time\n'), ((706, 735), 'pystorm.hal.HAL.stop_traffic', 'HAL.stop_traffic', ([], {'flush': '(False)'}), '(flush=False)\n', (722, 735), False, 'from pystorm.hal import HAL\n'), ((740, 781), 'pystorm.hal.HAL.disable_output_recording', 'HAL.disable_output_recording', ([], {'flush': '(False)'}), '(flush=False)\n', (768, 781), False, 'from pystorm.hal import HAL\n'), ((786, 825), 'pystorm.hal.HAL.disable_spike_recording', 'HAL.disable_spike_recording', ([], {'flush': '(True)'}), '(flush=True)\n', (813, 825), False, 'from pystorm.hal import HAL\n'), ((840, 856), 'pystorm.hal.HAL.get_spikes', 'HAL.get_spikes', ([], {}), '()\n', (854, 856), False, 'from pystorm.hal import HAL\n'), ((877, 901), 'pystorm.hal.hal.parse_hal_spikes', 'parse_hal_spikes', (['spikes'], {}), '(spikes)\n', (893, 901), False, 'from pystorm.hal.hal import parse_hal_spikes, parse_hal_binned_tags\n'), ((917, 934), 'pystorm.hal.HAL.get_outputs', 'HAL.get_outputs', ([], {}), '()\n', (932, 934), False, 'from pystorm.hal import HAL\n'), ((956, 986), 'pystorm.hal.hal.parse_hal_binned_tags', 'parse_hal_binned_tags', (['outputs'], {}), '(outputs)\n', (977, 986), False, 'from pystorm.hal.hal import parse_hal_spikes, parse_hal_binned_tags\n'), ((1116, 1136), 'pystorm.hal.neuromorph.graph.Network', 'graph.Network', (['"""net"""'], {}), "('net')\n", (1129, 1136), False, 'from pystorm.hal.neuromorph import graph\n'), ((1475, 1487), 'pystorm.hal.HAL.map', 'HAL.map', (['net'], {}), '(net)\n', (1482, 1487), False, 'from pystorm.hal import HAL\n'), ((1761, 1781), 'pystorm.hal.neuromorph.graph.Network', 'graph.Network', (['"""net"""'], {}), "('net')\n", (1774, 1781), False, 'from pystorm.hal.neuromorph import graph\n'), ((2193, 2205), 'pystorm.hal.HAL.map', 'HAL.map', (['net'], {}), '(net)\n', (2200, 2205), False, 'from pystorm.hal import HAL\n'), ((2528, 2546), 'numpy.zeros', 'np.zeros', (['(N_all,)'], {}), '((N_all,))\n', (2536, 2546), True, 'import numpy as np\n'), ((3114, 3147), 'numpy.zeros', 'np.zeros', (['(height_all, width_all)'], {}), '((height_all, width_all))\n', (3122, 3147), True, 'import numpy as np\n'), ((3704, 3716), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3714, 3716), True, 'import matplotlib.pyplot as plt\n'), ((3721, 3754), 'matplotlib.pyplot.title', 'plt.title', (['"""spike rate histogram"""'], {}), "('spike rate histogram')\n", (3730, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3791), 'matplotlib.pyplot.hist', 'plt.hist', (['all_parsed_xy'], {'bins': '(20)'}), '(all_parsed_xy, bins=20)\n', (3767, 3791), True, 'import matplotlib.pyplot as plt\n'), ((4121, 4149), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (4131, 4149), True, 'import matplotlib.pyplot as plt\n'), ((4153, 4169), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (4164, 4169), True, 'import matplotlib.pyplot as plt\n'), ((4174, 4208), 'matplotlib.pyplot.title', 'plt.title', (['"""rates for whole array"""'], {}), "('rates for whole array')\n", (4183, 4208), True, 'import matplotlib.pyplot as plt\n'), ((4272, 4286), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4284, 4286), True, 'import matplotlib.pyplot as plt\n'), ((4292, 4308), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (4303, 4308), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4366), 'matplotlib.pyplot.title', 'plt.title', (['"""rates for array reconstructed from pools"""'], {}), "('rates for array reconstructed from pools')\n", (4322, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4431, 4445), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4443, 4445), True, 'import matplotlib.pyplot as plt\n'), ((4451, 4467), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (4462, 4467), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4587), 'matplotlib.pyplot.title', 'plt.title', (['"""error fraction in terms of max spike rate"""'], {}), "('error fraction in terms of max spike rate')\n", (4542, 4587), True, 'import matplotlib.pyplot as plt\n'), ((4592, 4611), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pct_err'], {}), '(pct_err)\n', (4602, 4611), True, 'import matplotlib.pyplot as plt\n'), ((4616, 4630), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4628, 4630), True, 'import matplotlib.pyplot as plt\n'), ((4635, 4662), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '.png')"], {}), "(fname + '.png')\n", (4646, 4662), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1236), 'numpy.ones', 'np.ones', (['(N,)'], {'dtype': 'int'}), '((N,), dtype=int)\n', (1219, 1236), True, 'import numpy as np\n'), ((1831, 1855), 'numpy.ones', 'np.ones', (['(N,)'], {'dtype': 'int'}), '((N,), dtype=int)\n', (1838, 1855), True, 'import numpy as np\n'), ((3223, 3237), 'numpy.zeros', 'np.zeros', (['(N,)'], {}), '((N,))\n', (3231, 3237), True, 'import numpy as np\n'), ((3815, 3844), 'numpy.sum', 'np.sum', (['(all_parsed_xy > fclip)'], {}), '(all_parsed_xy > fclip)\n', (3821, 3844), True, 'import numpy as np\n'), ((4482, 4520), 'numpy.abs', 'np.abs', (['(all_parsed_xy - many_parsed_xy)'], {}), '(all_parsed_xy - many_parsed_xy)\n', (4488, 4520), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import re
import csv
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import LinearSVC
#from sklearn.externals import joblib
from sklearn.metrics import accuracy_score
import sys
mode = sys.argv[1]
training_path = sys.argv[2]
def get_features(df):
white = df["white"].tolist()
aav = df["aav"].tolist()
his = df["hispanic"].tolist()
other = df["other"].tolist()
return [[float(i[0]), float(i[1]), float(i[2]), float(i[3])] for i in zip(aav, his, other, white)]
# Read in data
data = pd.read_csv(training_path)
texts = data['tweet']
y = data['ND_label'].tolist()
X = get_features(data)
# Train the model
model = LinearSVC(class_weight="balanced", dual=False, tol=1e-2, max_iter=1e5)
cclf = CalibratedClassifierCV(base_estimator=model)
cclf.fit(X, y)
# Record the training bias
proba = cclf.predict_proba(X)
proba = np.log(proba)
save_file = training_path[:-4]+'_dddbias.pkl'
with open(save_file, 'wb') as handle:
pickle.dump(proba, handle, protocol=pickle.HIGHEST_PROTOCOL)
prediction = cclf.predict(X)
acc = accuracy_score(y, prediction)
print(f'Training Accuracy score: {acc}')
| [
"pickle.dump",
"pandas.read_csv",
"numpy.log",
"sklearn.svm.LinearSVC",
"sklearn.calibration.CalibratedClassifierCV",
"sklearn.metrics.accuracy_score"
] | [((642, 668), 'pandas.read_csv', 'pd.read_csv', (['training_path'], {}), '(training_path)\n', (653, 668), True, 'import pandas as pd\n'), ((771, 846), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'class_weight': '"""balanced"""', 'dual': '(False)', 'tol': '(0.01)', 'max_iter': '(100000.0)'}), "(class_weight='balanced', dual=False, tol=0.01, max_iter=100000.0)\n", (780, 846), False, 'from sklearn.svm import LinearSVC\n'), ((849, 893), 'sklearn.calibration.CalibratedClassifierCV', 'CalibratedClassifierCV', ([], {'base_estimator': 'model'}), '(base_estimator=model)\n', (871, 893), False, 'from sklearn.calibration import CalibratedClassifierCV\n'), ((976, 989), 'numpy.log', 'np.log', (['proba'], {}), '(proba)\n', (982, 989), True, 'import numpy as np\n'), ((1176, 1205), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'prediction'], {}), '(y, prediction)\n', (1190, 1205), False, 'from sklearn.metrics import accuracy_score\n'), ((1079, 1139), 'pickle.dump', 'pickle.dump', (['proba', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(proba, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (1090, 1139), False, 'import pickle\n')] |
import re
from .propagation import sgp4
from .inout import twoline2rv
from .earth_gravity import EarthGravity, wgs84
from math import pi, sqrt, floor, pow
from numpy import dot, linalg, arange, copy, char
from math import fabs as abs
from .inout import jday
from datetime import datetime
from operator import attrgetter
from .ext import invjday
def jsattime(dtTimeObj):
attrs = ('year', 'month', 'day', 'hour', 'minute', 'second')
d = dtTimeObj
d_tuple = attrgetter(*attrs)(d)
d_tuple = list(d_tuple)
# print(d_tuple)
x = jday(d_tuple[0], d_tuple[1], d_tuple[2], d_tuple[3], d_tuple[4], d_tuple[5])
end = x * 86400
return end
# start = datetime.now()
# currentJsec = jsattime(start)
# print (currentJsec)
# print("212456679584.23526")
def convertTLE(satno, filename):
# sat_dic = {}
# N = 1
ln1 = str("1 " + str(satno)) # Sets up search query for TLE finder
while len(ln1) < 7:
ln1 = re.sub(' ', ' ', ln1, 1)
ln2 = str("2 " + str(satno))
while len(ln2) < 7:
ln2 = re.sub(' ', ' ', ln2, 1)
# ln1 = re.sub(' +', ' ', ln1)
L1, L2 = '', ''
searchfile = open(filename, "r") # Opens TLE file for TLE that matches satno
# print(str(ln1))
for line in searchfile:
if ln1 in line:
L1 = line
if ln2 in line:
L2 = line
searchfile.close()
if L1:
# use Vallado's sgp4 code to input TLE and output r,v
satout = sgp4(twoline2rv(L1, L2, wgs84), 0)
# set up to convert to touple
position = satout[0]
velocity = satout[1]
# convert jday at beginning of TLE to seconds
jsec = (twoline2rv(L1, L2, wgs84)).jdsatepoch * 86400
end = jsec, position[0], position[1], position[2], velocity[0], velocity[1], velocity[2]
return end
else:
pass
# print(satno, "TLE not found, skipping")
def twobody2(sat1, point):
# stop = sat2
ri = [sat1[1], sat1[2], sat1[3]]
vi = [sat1[4], sat1[5], sat1[6]]
tau = (point - sat1[0])
# tau = -500
mu = 398600.4415
tolerance = 1e-12
u = 0
imax = 20
orbits = 0
tdesired = copy(tau)
threshold = tolerance * abs(tdesired)
r0 = linalg.norm(ri)
n0 = dot(ri, vi)
beta = 2 * (mu / r0) - dot(vi, vi)
if (beta != 0):
umax = + 1 / sqrt(abs(beta))
umin = - 1 / sqrt(abs(beta))
if (beta > 0):
orbits = beta * tau - 2 * n0
orbits = 1 + (orbits * sqrt(beta)) / (pi * mu)
orbits = floor(orbits / 2)
for i in arange(1, imax, 1).reshape(-1):
q = beta * u * u
q = q / (1 + q)
n = 0
r = 1
l = 1
s = 1
d = 3
gcf = 1
k = - 5
gold = 0
while (gcf != gold):
k = - k
l = l + 2
d = d + 4 * l
n = n + (1 + k) * l
r = d / (d - n * r * q)
s = (r - 1) * s
gold = copy(gcf)
gcf = gold + s
h0 = 1 - 2 * q
h1 = 2 * u * (1 - q)
u0 = 2 * h0 * h0 - 1
u1 = 2 * h0 * h1
u2 = 2 * h1 * h1
u3 = 2 * h1 * u2 * gcf / 3
if (orbits != 0):
u3 = u3 + 2 * pi * orbits / (beta * sqrt(beta))
r1 = r0 * u0 + n0 * u1 + mu * u2
dt = r0 * u1 + n0 * u2 + mu * u3
slope = 4 * r1 / (1 + beta * u * u)
terror = tdesired - dt
if (abs(terror) < threshold):
break
if ((i > 1) and (u == uold)):
break
if ((i > 1) and (dt == dtold)):
break
uold = copy(u)
dtold = copy(dt)
ustep = terror / slope
if (ustep > 0):
umin = copy(u)
u = u + ustep
if (u > umax):
u = (umin + umax) / 2
else:
umax = copy(u)
u = u + ustep
if (u < umin):
u = (umin + umax) / 2
if (i == imax):
print('\\n\\nmax iterations in twobody2 function')
usaved = copy(u)
f = 1.0 - (mu / r0) * u2
gg = 1.0 - (mu / r1) * u2
g = r0 * u1 + n0 * u2
ff = - mu * u1 / (r0 * r1)
# Had to re-arrange things to make it work
for i in arange(1): # .reshape(-1):
posi = f * ri[i] + g * vi[i]
veli = ff * ri[i] + gg * vi[i]
for j in arange(2).reshape(-1):
posj = f * ri[j] + g * vi[j]
velj = ff * ri[j] + gg * vi[j]
for k in arange(3).reshape(-1):
posk = f * ri[k] + g * vi[k]
velk = ff * ri[k] + gg * vi[k]
# Make "pretty" output
position = [posi, posj, posk]
velocity = [veli, velj, velk]
return position
def inverseDt(dtTimeObj):
ref0 = invjday(dtTimeObj / 86400)
ref0 = list(ref0)
ref0[5] = int(ref0[5])
ref0 = tuple(ref0)
# convert ref0 to a datetime object
ref2 = datetime(*ref0)
return ref2
if __name__ == '__main__':
TLE = 'tle.txt'
esv = 25544
# print(convertTLE(esv, TLE))
point = jsattime(datetime.utcnow())
print(twobody2(convertTLE(esv, TLE), point))
# print(start)
| [
"datetime.datetime",
"numpy.copy",
"operator.attrgetter",
"math.floor",
"datetime.datetime.utcnow",
"math.sqrt",
"numpy.dot",
"math.fabs",
"numpy.linalg.norm",
"re.sub",
"numpy.arange"
] | [((2169, 2178), 'numpy.copy', 'copy', (['tau'], {}), '(tau)\n', (2173, 2178), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((2230, 2245), 'numpy.linalg.norm', 'linalg.norm', (['ri'], {}), '(ri)\n', (2241, 2245), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((2255, 2266), 'numpy.dot', 'dot', (['ri', 'vi'], {}), '(ri, vi)\n', (2258, 2266), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((4048, 4055), 'numpy.copy', 'copy', (['u'], {}), '(u)\n', (4052, 4055), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((4233, 4242), 'numpy.arange', 'arange', (['(1)'], {}), '(1)\n', (4239, 4242), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((4866, 4881), 'datetime.datetime', 'datetime', (['*ref0'], {}), '(*ref0)\n', (4874, 4881), False, 'from datetime import datetime\n'), ((469, 487), 'operator.attrgetter', 'attrgetter', (['*attrs'], {}), '(*attrs)\n', (479, 487), False, 'from operator import attrgetter\n'), ((948, 973), 're.sub', 're.sub', (['""" """', '""" """', 'ln1', '(1)'], {}), "(' ', ' ', ln1, 1)\n", (954, 973), False, 'import re\n'), ((1045, 1070), 're.sub', 're.sub', (['""" """', '""" """', 'ln2', '(1)'], {}), "(' ', ' ', ln2, 1)\n", (1051, 1070), False, 'import re\n'), ((2207, 2220), 'math.fabs', 'abs', (['tdesired'], {}), '(tdesired)\n', (2210, 2220), True, 'from math import fabs as abs\n'), ((2294, 2305), 'numpy.dot', 'dot', (['vi', 'vi'], {}), '(vi, vi)\n', (2297, 2305), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((2528, 2545), 'math.floor', 'floor', (['(orbits / 2)'], {}), '(orbits / 2)\n', (2533, 2545), False, 'from math import pi, sqrt, floor, pow\n'), ((3608, 3615), 'numpy.copy', 'copy', (['u'], {}), '(u)\n', (3612, 3615), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((3632, 3640), 'numpy.copy', 'copy', (['dt'], {}), '(dt)\n', (3636, 3640), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((5018, 5035), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5033, 5035), False, 'from datetime import datetime\n'), ((2559, 2577), 'numpy.arange', 'arange', (['(1)', 'imax', '(1)'], {}), '(1, imax, 1)\n', (2565, 2577), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((2972, 2981), 'numpy.copy', 'copy', (['gcf'], {}), '(gcf)\n', (2976, 2981), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((3434, 3445), 'math.fabs', 'abs', (['terror'], {}), '(terror)\n', (3437, 3445), True, 'from math import fabs as abs\n'), ((3716, 3723), 'numpy.copy', 'copy', (['u'], {}), '(u)\n', (3720, 3723), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((3848, 3855), 'numpy.copy', 'copy', (['u'], {}), '(u)\n', (3852, 3855), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((4350, 4359), 'numpy.arange', 'arange', (['(2)'], {}), '(2)\n', (4356, 4359), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((4462, 4471), 'numpy.arange', 'arange', (['(3)'], {}), '(3)\n', (4468, 4471), False, 'from numpy import dot, linalg, arange, copy, char\n'), ((2352, 2361), 'math.fabs', 'abs', (['beta'], {}), '(beta)\n', (2355, 2361), True, 'from math import fabs as abs\n'), ((2389, 2398), 'math.fabs', 'abs', (['beta'], {}), '(beta)\n', (2392, 2398), True, 'from math import fabs as abs\n'), ((2487, 2497), 'math.sqrt', 'sqrt', (['beta'], {}), '(beta)\n', (2491, 2497), False, 'from math import pi, sqrt, floor, pow\n'), ((3251, 3261), 'math.sqrt', 'sqrt', (['beta'], {}), '(beta)\n', (3255, 3261), False, 'from math import pi, sqrt, floor, pow\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 11:17:51 2017
@author: dhingratul
"""
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
# %matplotlib inline
from __future__ import print_function
import collections
import math
import numpy as np
# import os
import random
import tensorflow as tf
import zipfile
from matplotlib import pylab
from six.moves import range
# from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE
"""
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
# Download a file if not present, and make sure it's the right size.
if not os.path.exists(filename):
filename, _ = urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified %s' % filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('/home/dhingratul/Documents/Dataset/text8.zip',
31344016)
"""
# Read the Data into a string
def read_data(filename):
# Extract first file in the .zip as list of words
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
filename = '/home/dhingratul/Documents/Dataset/text8.zip'
words = read_data(filename)
vocab_size = 50000
# Build dictionary, replace rare words with UNK token
def build_dataset(words):
count = [['UNK', -1]]
# most_common(n), gives n most common words
vocab = collections.Counter(words).most_common(vocab_size - 1)
count.extend(vocab)
dic = dict()
for word, _ in count:
dic[word] = len(dic)
data = list()
unk_ctr = 0
for w in words:
if w in dic:
index = dic[w]
else:
index = 0
unk_ctr = unk_ctr + 1 # UNK counter if it doesn't exist in dict
data.append(index)
count[0][1] = unk_ctr
rev_dic = dict(zip(dic.values(), dic.keys()))
return data, count, dic, rev_dic
data, count, dictionary, reverse_dictionary = build_dataset(words)
d_index = 0
# Generate training batch for skip-gram model
def generate_batch(batch_size, num_skips, skip_window):
global d_index # To access the copy of the global variable created
# Assert: Test the condition, and trigger an error if the it is false
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
# span === [skip_window target skip_window]
span = 2 * skip_window + 1 # +1 for target
# Initialize a double-ended queue with O(1) ops
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[d_index])
d_index = (d_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[d_index])
d_index = (d_index + 1) % len(data)
return batch, labels
# Train a skip-gram model
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
"""
We pick a random validation set to sample nearest neighbors. here we limit the
validation samples to the words that have a low numeric ID, which by
construction are also the most frequent.
"""
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.array(random.sample(range(valid_window), valid_size))
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
# Input Data
train_data = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples)
# Variables
embeddings = tf.Variable(tf.random_uniform(
[vocab_size, embedding_size], -1.0, 1.0)) # shape, minval, maxval
softmax_w = tf.Variable(tf.truncated_normal(
[vocab_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_b = tf.Variable(tf.zeros([vocab_size]))
# Model - Look up for embeddings for input
embed = tf.nn.embedding_lookup(embeddings, train_data)
# Softmax loss using sample of negative labels each time
# S.Softmax is a faster way to train softmax over huge number of classes
loss_intermed = tf.nn.sampled_softmax_loss(
weights=softmax_w, biases=softmax_b, inputs=embed,
labels=train_labels, num_samples=num_sampled,
num_classes=vocab_size)
loss = tf.reduce_mean(loss_intermed)
# Optimizer
""" Optimizes both softmax_weights and embeddings, as embeddings are
defined as a variable, and minimize method modifies all varibales
"""
lr = 1.0
optimizer = tf.train.AdagradOptimizer(lr).minimize(loss)
# Similarity b/w mini-batches and all embeddings using Cosine distance
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
norm_embeddings = embeddings/norm
valid_embeddings = tf.nn.embedding_lookup(norm_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(norm_embeddings))
# Access to graph
num_steps = 100001
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
print("TF Graph Initialized")
average_loss = 0
for i in range(num_steps):
batch_data, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_data: batch_data, train_labels: batch_labels}
_, l = sess.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if i % 2000 == 0:
if i > 0:
average_loss = average_loss / 2000
print('Average loss at step %d: %f' % (i, average_loss))
if i % 10000 == 0:
sim = similarity.eval()
# Random set of words to evaluate similarit on (16)
for j in range(valid_size):
valid_word = reverse_dictionary[valid_examples[j]]
top_k = 8 # Number of NN
NN = (-sim[i, :]).argsort()[1: top_k + 1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[NN[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = norm_embeddings.eval()
| [
"zipfile.ZipFile",
"tensorflow.transpose",
"math.sqrt",
"tensorflow.reduce_mean",
"tensorflow.Graph",
"tensorflow.nn.embedding_lookup",
"collections.deque",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.square",
"random.randint",
"tensorflow.zeros",
"tensorflow.device",
"tens... | [((4269, 4279), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4277, 4279), True, 'import tensorflow as tf\n'), ((2583, 2627), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'batch_size', 'dtype': 'np.int32'}), '(shape=batch_size, dtype=np.int32)\n', (2593, 2627), True, 'import numpy as np\n'), ((2643, 2692), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(batch_size, 1)', 'dtype': 'np.int32'}), '(shape=(batch_size, 1), dtype=np.int32)\n', (2653, 2692), True, 'import numpy as np\n'), ((2854, 2884), 'collections.deque', 'collections.deque', ([], {'maxlen': 'span'}), '(maxlen=span)\n', (2871, 2884), False, 'import collections\n'), ((2898, 2909), 'six.moves.range', 'range', (['span'], {}), '(span)\n', (2903, 2909), False, 'from six.moves import range\n'), ((3005, 3035), 'six.moves.range', 'range', (['(batch_size // num_skips)'], {}), '(batch_size // num_skips)\n', (3010, 3035), False, 'from six.moves import range\n'), ((4305, 4324), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (4314, 4324), True, 'import tensorflow as tf\n'), ((4360, 4404), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[batch_size]'}), '(tf.int32, shape=[batch_size])\n', (4374, 4404), True, 'import tensorflow as tf\n'), ((4424, 4471), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[batch_size, 1]'}), '(tf.int32, shape=[batch_size, 1])\n', (4438, 4471), True, 'import tensorflow as tf\n'), ((4492, 4519), 'tensorflow.constant', 'tf.constant', (['valid_examples'], {}), '(valid_examples)\n', (4503, 4519), True, 'import tensorflow as tf\n'), ((4918, 4964), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'train_data'], {}), '(embeddings, train_data)\n', (4940, 4964), True, 'import tensorflow as tf\n'), ((5123, 5280), 'tensorflow.nn.sampled_softmax_loss', 'tf.nn.sampled_softmax_loss', ([], {'weights': 'softmax_w', 'biases': 'softmax_b', 'inputs': 'embed', 'labels': 'train_labels', 'num_samples': 'num_sampled', 'num_classes': 'vocab_size'}), '(weights=softmax_w, biases=softmax_b, inputs=\n embed, labels=train_labels, num_samples=num_sampled, num_classes=vocab_size\n )\n', (5149, 5280), True, 'import tensorflow as tf\n'), ((5319, 5348), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_intermed'], {}), '(loss_intermed)\n', (5333, 5348), True, 'import tensorflow as tf\n'), ((5802, 5856), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['norm_embeddings', 'valid_dataset'], {}), '(norm_embeddings, valid_dataset)\n', (5824, 5856), True, 'import tensorflow as tf\n'), ((5976, 5999), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5986, 5999), True, 'import tensorflow as tf\n'), ((6121, 6137), 'six.moves.range', 'range', (['num_steps'], {}), '(num_steps)\n', (6126, 6137), False, 'from six.moves import range\n'), ((1268, 1293), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename'], {}), '(filename)\n', (1283, 1293), False, 'import zipfile\n'), ((3168, 3184), 'six.moves.range', 'range', (['num_skips'], {}), '(num_skips)\n', (3173, 3184), False, 'from six.moves import range\n'), ((4168, 4187), 'six.moves.range', 'range', (['valid_window'], {}), '(valid_window)\n', (4173, 4187), False, 'from six.moves import range\n'), ((4565, 4623), 'tensorflow.random_uniform', 'tf.random_uniform', (['[vocab_size, embedding_size]', '(-1.0)', '(1.0)'], {}), '([vocab_size, embedding_size], -1.0, 1.0)\n', (4582, 4623), True, 'import tensorflow as tf\n'), ((4835, 4857), 'tensorflow.zeros', 'tf.zeros', (['[vocab_size]'], {}), '([vocab_size])\n', (4843, 4857), True, 'import tensorflow as tf\n'), ((5902, 5931), 'tensorflow.transpose', 'tf.transpose', (['norm_embeddings'], {}), '(norm_embeddings)\n', (5914, 5931), True, 'import tensorflow as tf\n'), ((1655, 1681), 'collections.Counter', 'collections.Counter', (['words'], {}), '(words)\n', (1674, 1681), False, 'import collections\n'), ((5545, 5574), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['lr'], {}), '(lr)\n', (5570, 5574), True, 'import tensorflow as tf\n'), ((5698, 5719), 'tensorflow.square', 'tf.square', (['embeddings'], {}), '(embeddings)\n', (5707, 5719), True, 'import tensorflow as tf\n'), ((6013, 6046), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6044, 6046), True, 'import tensorflow as tf\n'), ((6725, 6742), 'six.moves.range', 'range', (['valid_size'], {}), '(valid_size)\n', (6730, 6742), False, 'from six.moves import range\n'), ((3257, 3284), 'random.randint', 'random.randint', (['(0)', '(span - 1)'], {}), '(0, span - 1)\n', (3271, 3284), False, 'import random\n'), ((6988, 7000), 'six.moves.range', 'range', (['top_k'], {}), '(top_k)\n', (6993, 7000), False, 'from six.moves import range\n'), ((4779, 4804), 'math.sqrt', 'math.sqrt', (['embedding_size'], {}), '(embedding_size)\n', (4788, 4804), False, 'import math\n')] |
import numpy as np
import cv2
class CoordGenerator(object):
def __init__(self, intrin, img_w, img_h):
super(CoordGenerator, self).__init__()
self.intrinsics = intrin
self.image_width = img_w
self.image_height = img_h
def pixel2local(self, depth): # depth: float32, meter.
cx, cy, fx, fy = self.intrinsics[0, 2], self.intrinsics[1, 2], self.intrinsics[0, 0], self.intrinsics[1, 1]
u_base = np.tile(np.arange(self.image_width), (self.image_height, 1))
v_base = np.tile(np.arange(self.image_height)[:, np.newaxis], (1, self.image_width))
X = (u_base - cx) * depth / fx
Y = (v_base - cy) * depth / fy
coord_camera = np.stack((X, Y, depth), axis=2)
points_local = coord_camera.reshape((-1, 3), order='F') # (N, 3).
return points_local
def local2world(self, points_local, pose):
points_local_homo = np.concatenate((points_local, np.ones((points_local.shape[0], 1), dtype=np.float32)), axis=1) # N*4.
points_world_homo = np.matmul(pose, points_local_homo.T).T # (4*4 * 4*N).T = N*4.
points_world = np.divide(points_world_homo, points_world_homo[:, [-1]])[:, :-1]
return points_world
def depth_pose_2coord(self, depth, pose):
points_local = self.pixel2local(depth)
points_world = self.local2world(points_local, pose)
points_world[(points_local == [0, 0, 0]).all(axis=1)] = 0 # useless?
img_coord = points_world.reshape((self.image_height, self.image_width, 3), order='F')
vis_coord = (img_coord - img_coord.min()) / (img_coord.max() - img_coord.min()) * 255
return img_coord, vis_coord
# if __name__ == '__main__':
# print('done.')
| [
"numpy.ones",
"numpy.divide",
"numpy.stack",
"numpy.matmul",
"numpy.arange"
] | [((724, 755), 'numpy.stack', 'np.stack', (['(X, Y, depth)'], {'axis': '(2)'}), '((X, Y, depth), axis=2)\n', (732, 755), True, 'import numpy as np\n'), ((473, 500), 'numpy.arange', 'np.arange', (['self.image_width'], {}), '(self.image_width)\n', (482, 500), True, 'import numpy as np\n'), ((1069, 1105), 'numpy.matmul', 'np.matmul', (['pose', 'points_local_homo.T'], {}), '(pose, points_local_homo.T)\n', (1078, 1105), True, 'import numpy as np\n'), ((1155, 1211), 'numpy.divide', 'np.divide', (['points_world_homo', 'points_world_homo[:, [-1]]'], {}), '(points_world_homo, points_world_homo[:, [-1]])\n', (1164, 1211), True, 'import numpy as np\n'), ((552, 580), 'numpy.arange', 'np.arange', (['self.image_height'], {}), '(self.image_height)\n', (561, 580), True, 'import numpy as np\n'), ((969, 1022), 'numpy.ones', 'np.ones', (['(points_local.shape[0], 1)'], {'dtype': 'np.float32'}), '((points_local.shape[0], 1), dtype=np.float32)\n', (976, 1022), True, 'import numpy as np\n')] |
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from wann_genetic.individual.torch.ffnn import Network as TorchNetwork
import torch
def node_names(net):
return (
[f"$x_{{{i}}}$" for i in range(net.n_in)] # inputs
+ ["$b$"] # bias
+ [f"$h_{{{i}}}$" for i in range(net.n_hidden)] # hidden
+ [f"$y_{{{i}}}$" for i in range(net.n_out)] # outputs
)
def draw_graph(net, ax=None, pos_iterations=None, layer_h=17, labels=None):
if ax is None:
ax = plt.gca()
ax.set_axis_off()
g = nx.DiGraph()
# Add nodes
nodes = node_names(net)
g.add_nodes_from(nodes)
# Colors
color = (
['#fdb462'] * net.n_in # inputs
+ ['#ffed6f'] # bias
+ ['#80b1d3'] * net.n_hidden # hidden
+ ['#b3de69'] * net.n_out # outputs
)
layers = list([len(s) for s in net.layers(include_input=True)])
# pos x will be determined by layer, pos y will be iterated on
# input, bias and output nodes will have static position, hidden node
# positions will be determine iteratively
pos = np.zeros(net.n_nodes, dtype=[('x', float), ('y', float)])
pos['y'][0: net.offset] = np.arange(net.offset)
pos['y'][-net.n_out:] = np.arange(net.n_out)
a = net.weight_matrix[:net.offset, :net.n_hidden]
b = net.weight_matrix[net.offset:, :net.n_hidden]
c = net.weight_matrix[net.offset:, net.n_hidden:]
M = np.vstack([
a, b+b.T, c.T
])
M_sum = np.sum(M, axis=0)
M_sum[np.where(M_sum == 0)] = 1
M = M / M_sum
if pos_iterations is None:
pos_iterations = len(layers)
for i in range(pos_iterations):
update = np.dot(pos['y'], M)
pos['y'][net.offset:-net.n_out] = update
i_0 = 0
x_0 = 0
for layer_size in layers:
bound = np.log(layer_size)
ns = i_0 + np.arange(layer_size)
order = np.argsort(pos['y'][ns]) + i_0
x_n = layer_size // layer_h + 1
xi = np.mod(np.arange(layer_size), x_n)
yi = np.arange(layer_size) // x_n
y_n = min(layer_h, layer_size)
y_rel = np.linspace(bound, -bound, y_n)
x_rel = np.linspace(-np.log(x_n), np.log(x_n), x_n)
x_0 += np.log(x_n) + 1
pos['y'][order] = y_rel[yi] - x_rel[xi]*0.1
pos['x'][order] = x_rel[xi] + x_0
x_0 += np.log(x_n) + 1
i_0 += layer_size
if labels == 'func_names':
node_labels = [''] * (net.n_in + 1) + [
net.enabled_act_functions[func][0][:5] for func in net.nodes['func']
]
pos = dict(zip(nodes, np.array([pos['x'], pos['y']]).T))
nx.draw_networkx_nodes(g, ax=ax, pos=pos, node_color=color, node_size=150)
nx.draw_networkx_labels(
g, ax=ax, pos=pos, labels=dict(zip(nodes, node_labels)),
font_size=8,
)
elif labels == 'names':
pos = dict(zip(nodes, np.array([pos['x'], pos['y']]).T))
nx.draw_networkx_nodes(g, ax=ax, pos=pos, node_color=color, node_size=150)
nx.draw_networkx_labels(
g, ax=ax, pos=pos, labels=dict(zip(nodes, nodes)),
font_size=8,
)
elif labels == 'func_plots':
# draw circles
ax.scatter(pos['x'], pos['y'], 150, color, marker='o', zorder=10)
pos = dict(zip(nodes, np.array([pos['x'], pos['y']]).T))
for func, n in zip(net.nodes['func'], nodes[net.offset:]):
func = net.enabled_act_functions[func][1]
x = np.linspace(0, 2, 10)
x = np.hstack([x, 1-x, -x, x-1])
if isinstance(net, TorchNetwork):
x = torch.Tensor(x)
y = func(x).numpy()
else:
y = func(x)
y = y - np.min(y)
y = y - np.max(y)/2
verts = np.column_stack([x, y])
ax.scatter(*pos[n], 50, 'k', marker=verts, zorder=200)
else:
pos = dict(zip(nodes, np.array([pos['x'], pos['y']]).T))
nx.draw_networkx_nodes(g, ax=ax, pos=pos, node_color=color, node_size=150)
edge_params = dict(
edge_cmap=plt.get_cmap('tab10'),
alpha=.6, ax=ax, pos=pos,
edge_vmin=0, edge_vmax=9,
arrows=True,
)
# draw feed forward edges
edge_col = list()
edgelist = list()
for row, col in zip(*np.where(net.weight_matrix != 0)):
edgelist.append((nodes[row], nodes[col + net.offset]))
edge_col.append(
2 if net.weight_matrix[row][col] > 0
else 3
)
nx.draw_networkx_edges(g, edgelist=edgelist, edge_color=edge_col,
width=1, arrowstyle='-',
min_source_margin=10, min_target_margin=5,
**edge_params)
if net.is_recurrent:
# draw recurrent edges
edge_col = list()
edgelist = list()
for row, col in zip(*np.where(net.recurrent_weight_matrix != 0)):
edgelist.append((nodes[row], nodes[col + net.offset]))
edge_col.append(
0 if net.recurrent_weight_matrix[row][col] > 0
else 1
)
nx.draw_networkx_edges(g, edgelist=edgelist, edge_color=edge_col,
min_source_margin=30, min_target_margin=20,
width=2, **edge_params)
def draw_weight_matrix(net, ax=None):
x_ticks = list(range(net.n_hidden + net.n_out))
x_ticklabels = node_names(net)[net.offset:]
y_ticks = list(range(net.n_nodes - net.n_out))
y_ticklabels = node_names(net)[:net.n_out]
if ax is None:
plt.xticks(x_ticks, x_ticklabels)
plt.yticks(y_ticks, y_ticklabels)
imshow = plt.imshow
else:
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_ticklabels)
ax.set_yticks(y_ticks)
ax.set_yticklabels(y_ticklabels)
imshow = ax.imshow
imshow(np.max(net.weight_matrix) - net.weight_matrix, cmap='magma')
| [
"numpy.hstack",
"numpy.log",
"numpy.column_stack",
"networkx.draw_networkx_nodes",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.where",
"networkx.DiGraph",
"numpy.max",
"numpy.dot",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"numpy.vstack",
"numpy.min",
"matplotlib.pyplot.... | [((614, 626), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (624, 626), True, 'import networkx as nx\n'), ((1194, 1251), 'numpy.zeros', 'np.zeros', (['net.n_nodes'], {'dtype': "[('x', float), ('y', float)]"}), "(net.n_nodes, dtype=[('x', float), ('y', float)])\n", (1202, 1251), True, 'import numpy as np\n'), ((1283, 1304), 'numpy.arange', 'np.arange', (['net.offset'], {}), '(net.offset)\n', (1292, 1304), True, 'import numpy as np\n'), ((1333, 1353), 'numpy.arange', 'np.arange', (['net.n_out'], {}), '(net.n_out)\n', (1342, 1353), True, 'import numpy as np\n'), ((1526, 1554), 'numpy.vstack', 'np.vstack', (['[a, b + b.T, c.T]'], {}), '([a, b + b.T, c.T])\n', (1535, 1554), True, 'import numpy as np\n'), ((1580, 1597), 'numpy.sum', 'np.sum', (['M'], {'axis': '(0)'}), '(M, axis=0)\n', (1586, 1597), True, 'import numpy as np\n'), ((4622, 4774), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['g'], {'edgelist': 'edgelist', 'edge_color': 'edge_col', 'width': '(1)', 'arrowstyle': '"""-"""', 'min_source_margin': '(10)', 'min_target_margin': '(5)'}), "(g, edgelist=edgelist, edge_color=edge_col, width=1,\n arrowstyle='-', min_source_margin=10, min_target_margin=5, **edge_params)\n", (4644, 4774), True, 'import networkx as nx\n'), ((572, 581), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (579, 581), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1628), 'numpy.where', 'np.where', (['(M_sum == 0)'], {}), '(M_sum == 0)\n', (1616, 1628), True, 'import numpy as np\n'), ((1776, 1795), 'numpy.dot', 'np.dot', (["pos['y']", 'M'], {}), "(pos['y'], M)\n", (1782, 1795), True, 'import numpy as np\n'), ((1916, 1934), 'numpy.log', 'np.log', (['layer_size'], {}), '(layer_size)\n', (1922, 1934), True, 'import numpy as np\n'), ((2212, 2243), 'numpy.linspace', 'np.linspace', (['bound', '(-bound)', 'y_n'], {}), '(bound, -bound, y_n)\n', (2223, 2243), True, 'import numpy as np\n'), ((2733, 2807), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['g'], {'ax': 'ax', 'pos': 'pos', 'node_color': 'color', 'node_size': '(150)'}), '(g, ax=ax, pos=pos, node_color=color, node_size=150)\n', (2755, 2807), True, 'import networkx as nx\n'), ((5242, 5379), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['g'], {'edgelist': 'edgelist', 'edge_color': 'edge_col', 'min_source_margin': '(30)', 'min_target_margin': '(20)', 'width': '(2)'}), '(g, edgelist=edgelist, edge_color=edge_col,\n min_source_margin=30, min_target_margin=20, width=2, **edge_params)\n', (5264, 5379), True, 'import networkx as nx\n'), ((5704, 5737), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x_ticks', 'x_ticklabels'], {}), '(x_ticks, x_ticklabels)\n', (5714, 5737), True, 'import matplotlib.pyplot as plt\n'), ((5746, 5779), 'matplotlib.pyplot.yticks', 'plt.yticks', (['y_ticks', 'y_ticklabels'], {}), '(y_ticks, y_ticklabels)\n', (5756, 5779), True, 'import matplotlib.pyplot as plt\n'), ((1954, 1975), 'numpy.arange', 'np.arange', (['layer_size'], {}), '(layer_size)\n', (1963, 1975), True, 'import numpy as np\n'), ((1992, 2016), 'numpy.argsort', 'np.argsort', (["pos['y'][ns]"], {}), "(pos['y'][ns])\n", (2002, 2016), True, 'import numpy as np\n'), ((2085, 2106), 'numpy.arange', 'np.arange', (['layer_size'], {}), '(layer_size)\n', (2094, 2106), True, 'import numpy as np\n'), ((2126, 2147), 'numpy.arange', 'np.arange', (['layer_size'], {}), '(layer_size)\n', (2135, 2147), True, 'import numpy as np\n'), ((2286, 2297), 'numpy.log', 'np.log', (['x_n'], {}), '(x_n)\n', (2292, 2297), True, 'import numpy as np\n'), ((2320, 2331), 'numpy.log', 'np.log', (['x_n'], {}), '(x_n)\n', (2326, 2331), True, 'import numpy as np\n'), ((2447, 2458), 'numpy.log', 'np.log', (['x_n'], {}), '(x_n)\n', (2453, 2458), True, 'import numpy as np\n'), ((3047, 3121), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['g'], {'ax': 'ax', 'pos': 'pos', 'node_color': 'color', 'node_size': '(150)'}), '(g, ax=ax, pos=pos, node_color=color, node_size=150)\n', (3069, 3121), True, 'import networkx as nx\n'), ((4196, 4217), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (4208, 4217), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4448), 'numpy.where', 'np.where', (['(net.weight_matrix != 0)'], {}), '(net.weight_matrix != 0)\n', (4424, 4448), True, 'import numpy as np\n'), ((6001, 6026), 'numpy.max', 'np.max', (['net.weight_matrix'], {}), '(net.weight_matrix)\n', (6007, 6026), True, 'import numpy as np\n'), ((2273, 2284), 'numpy.log', 'np.log', (['x_n'], {}), '(x_n)\n', (2279, 2284), True, 'import numpy as np\n'), ((4078, 4152), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['g'], {'ax': 'ax', 'pos': 'pos', 'node_color': 'color', 'node_size': '(150)'}), '(g, ax=ax, pos=pos, node_color=color, node_size=150)\n', (4100, 4152), True, 'import networkx as nx\n'), ((4992, 5034), 'numpy.where', 'np.where', (['(net.recurrent_weight_matrix != 0)'], {}), '(net.recurrent_weight_matrix != 0)\n', (5000, 5034), True, 'import numpy as np\n'), ((2690, 2720), 'numpy.array', 'np.array', (["[pos['x'], pos['y']]"], {}), "([pos['x'], pos['y']])\n", (2698, 2720), True, 'import numpy as np\n'), ((3589, 3610), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(10)'], {}), '(0, 2, 10)\n', (3600, 3610), True, 'import numpy as np\n'), ((3627, 3659), 'numpy.hstack', 'np.hstack', (['[x, 1 - x, -x, x - 1]'], {}), '([x, 1 - x, -x, x - 1])\n', (3636, 3659), True, 'import numpy as np\n'), ((3903, 3926), 'numpy.column_stack', 'np.column_stack', (['[x, y]'], {}), '([x, y])\n', (3918, 3926), True, 'import numpy as np\n'), ((3004, 3034), 'numpy.array', 'np.array', (["[pos['x'], pos['y']]"], {}), "([pos['x'], pos['y']])\n", (3012, 3034), True, 'import numpy as np\n'), ((3722, 3737), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (3734, 3737), False, 'import torch\n'), ((3840, 3849), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (3846, 3849), True, 'import numpy as np\n'), ((3415, 3445), 'numpy.array', 'np.array', (["[pos['x'], pos['y']]"], {}), "([pos['x'], pos['y']])\n", (3423, 3445), True, 'import numpy as np\n'), ((3870, 3879), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (3876, 3879), True, 'import numpy as np\n'), ((4035, 4065), 'numpy.array', 'np.array', (["[pos['x'], pos['y']]"], {}), "([pos['x'], pos['y']])\n", (4043, 4065), True, 'import numpy as np\n')] |
from enum import Enum
from collections import namedtuple
import tvm
import tvm.te as te
import tvm.tg as tg
import tvm.tir as tir
import numpy as np
def pprint_dict(d):
import json
print(json.dumps(d, indent=2, sort_keys=False))
def get_vector_add(n):
"""TVM expression for vector add"""
A = te.placeholder((n,), name='a')
B = te.placeholder((n,), name='b')
C = te.compute(A.shape, lambda i: A[i] + B[i], name='c')
return A, B, C
def get_gemm(n, m, l):
"""Return the computing expression of matrix multiplication
A : n x l matrix
B : l x m matrix
C : n x m matrix with C = A B
"""
k = te.reduce_axis((0, l), name='k')
A = te.placeholder((n, l), name='A')
B = te.placeholder((l, m), name='B')
C = te.compute((n, m),
lambda x, y: te.sum(A[x, k] * B[k, y], axis=k),
name='C')
return A, B, C
def get_padding(X, ph, pw, val=0):
"""Pad X with the given value in 2-D
ph, pw : height and width padding
val : padding value, default 0
"""
assert len(X.shape) >= 2
nh, nw = X.shape[-2], X.shape[-1]
return te.compute(
(*X.shape[0:-2], nh+ph*2, nw+pw*2),
lambda *i: te.if_then_else(
te.any(i[-2]<ph, i[-2]>=nh+ph, i[-1]<pw, i[-1]>=nw+pw),
val, X[i[:-2]+(i[-2]-ph, i[-1]-pw)]),
name='PaddedX')
def conv_out_size(n, k, p, s):
"""Compute the output size by given input size n (width or height),
kernel size k, padding p, and stride s
Return output size (width or height)
"""
return (n - k + 2 * p) // s + 1
def get_conv2d(oc, ic, nh, nw, kh, kw, ph=0, pw=0, sh=1, sw=1):
"""Convolution
oc, ic : output and input channels
nh, nw : input width and height
kh, kw : kernel width and height
ph, pw : height and width padding sizes, default 0
sh, sw : height and width strides, default 1
"""
# reduction axes
ric = te.reduce_axis((0, ic), name='ric')
rkh = te.reduce_axis((0, kh), name='rkh')
rkw = te.reduce_axis((0, kw), name='rkw')
# output height and weights
oh = conv_out_size(nh, kh, ph, sh)
ow = conv_out_size(nw, kw, pw, sw)
# pad X and then compute Y
X = te.placeholder((ic, nh, nw), name='X')
K = te.placeholder((oc, ic, kh, kw), name='K')
PaddedX = get_padding(X, ph, pw) if ph * pw != 0 else X
Y = te.compute(
(oc, oh, ow),
lambda c, i, j: te.sum(
PaddedX[ric, i*sh+rkh, j*sw+rkw] * K[c, ric, rkh, rkw],
axis=[ric, rkh, rkw]), name='Y')
return X, K, Y, PaddedX
def get_conv2d_unroll(oc, ic, nh, nw, kh, kw, ph=0, pw=0, sh=1, sw=1):
"""Convolution
oc, ic : output and input channels
nh, nw : input width and height
kh, kw : kernel width and height
ph, pw : height and width padding sizes, default 0
sh, sw : height and width strides, default 1
"""
# reduction axes
ric = te.reduce_axis((0, ic), name='ric')
rkh = te.reduce_axis((0, kh), name='rkh')
rkw = te.reduce_axis((0, kw), name='rkw')
# output height and weights
oh = conv_out_size(nh, kh, ph, sh)
ow = conv_out_size(nw, kw, pw, sw)
# pad X and then compute Y
X = te.placeholder((ic, nh, nw), name='X')
K = te.placeholder((oc, ic, kh, kw), name='K')
PaddedX = get_padding(X, ph, pw) if ph * pw != 0 else X
Y = te.compute(
(oc, oh, ow),
lambda c, i, j: te.sum(
PaddedX[ric, i*sh+rkh, j*sw+rkw] * K[c, ric, rkh, rkw],
axis=[ric, rkh, rkw]), name='Y')
sch = te.create_schedule(Y.op)
sch[Y].pragma(Y.op.axis[0], 'auto_unroll_max_step', 4)
return sch, (X, K, Y, PaddedX)
def depthwise_conv_pack(c, nh, nw, kh, kw, ph, pw, tc):
"""Pack data and weight for depthwise convolution
Note that the input channel of kernel is specified as 1,
and the output channel of kernel equals the input channel of data
c : input channel of data and output channel of kernel
nh, nw : input width and height
kh, kw : kernel width and height
ph, pw : height and width padding
tc : the tiling size of channels
"""
X = te.placeholder((c, nh, nw), name='X')
K = te.placeholder((c, 1, kh, kw), name='K')
PaddedX = get_padding(X, ph, pw) if ph * pw != 0 else X
# make sure the channel tiling is valid
if c < tc:
tc = c
assert c % tc == 0
# pack X and K
PackedX = te.compute(
(c//tc, nh+ph*2, nw+pw*2, tc),
lambda c_out, x, y, c_in: PaddedX[c_out*tc + c_in, x, y],
name='PackedX')
PackedK = te.compute(
(c//tc, 1, kh, kw, 1, tc),
lambda c_out, _, x, y, __, c_in: K[
c_out*tc + c_in, 0, x, y],
name='PackedK')
return X, K, PaddedX, PackedX, PackedK
def get_depthwise_conv2d(c, nh, nw, kh, kw, ph, pw, sh, sw, tc):
"""depthwise conv
c : number of channels for both input and output.
nh, nw : input width and height
kh, kw : kernel width and height
ph, pw : height and width padding
sh, sw : height and width strides
tc : the tiling sizes of channels
"""
X, K, PaddedX, PackedX, PackedK = depthwise_conv_pack(
c, nh, nw, kh, kw, ph, pw, tc)
# reduction axes
rkh = te.reduce_axis((0, kh), name='rkh')
rkw = te.reduce_axis((0, kw), name='rkw')
# output height and weights
oh = conv_out_size(nh, kh, ph, sh)
ow = conv_out_size(nw, kw, pw, sw)
# compute Y in the packed layout
PackedY = te.compute(
(c//tc, oh, ow, tc),
lambda c_out, x, y, c_in: te.sum(
(PackedX[c_out, x*sh+rkh, y*sw+rkw, c_in] *
PackedK[c_out, 0, rkh, rkw, 0, c_in]),
axis=[rkh, rkw]), name='PackedY')
# Unpack the result
Y = te.compute((c, oh, ow),
lambda c, x, y: PackedY[c//tc, x, y, c%tc],
name='Y')
return X, K, Y, PaddedX, PackedX, PackedK, PackedY
def get_feature(inputs, outputs, sch=None, target='llvm'):
sch = sch or te.create_schedule([o.op for o in outputs])
target = tvm.target.create(target)
args = [*inputs, *outputs]
print(tvm.lower(sch, args, simple_mode=True))
features = tg.auto_schedule.get_feature(sch, args, target, flatten=True)
features = np.array(features)
structured_features = tg.auto_schedule.get_feature(sch, args, target, flatten=False)
return features, structured_features
def nelem(tensor: te.tensor.Tensor):
return np.prod([t.value for t in tensor.shape])
AccessType = Enum('AccessType', ['kNone', 'kRead', 'kWrite', 'kReadWrite'])
ReuseType = Enum('ReuseType', ['kNoReuse', 'kLoopMultipleRead', 'kSerialMultipleRead', 'kBothReuse'])
def structural_equal(feature, feature_ref, check_features:list):
assert len(feature) == len(feature_ref), \
f'mismatch in len(feature): {len(feature)} != {len(feature_ref)}(ref)'
for fea, fea_ref in zip(feature, feature_ref):
for buf_key in fea.keys():
if buf_key == '_stmt_': continue
for fea_key in check_features:
res, ref = fea[buf_key][fea_key], fea_ref[buf_key][fea_key]
enum_feas = ['access_type', 'reuse_type']
assert res == str(ref) if fea_key in enum_feas else res == ref, \
f'mismatch in {key}: {res} != {ref}(ref)'
BUFFER_ACCESS_FEATURE_KEYS = ['access_type', 'bytes', 'unique_bytes', 'lines',
'unique_lines', 'reuse_type', 'reuse_distance', 'reuse_counter', 'stride']
Feature = namedtuple('Feature', ['access_type', 'bytes', 'unique_bytes', 'lines',
'unique_lines', 'reuse_type', 'reuse_distance', 'reuse_counter', 'stride'])
def build_structured_feature(d):
from copy import deepcopy
d = deepcopy(d)
for dd in d:
for k, v in dd.items():
dd[k] = {kk: getattr(v, kk) for kk in BUFFER_ACCESS_FEATURE_KEYS}
return d
def conv2d_gpu_default(oc, ic, n, k, p, s):
X, K, Y, PaddedX = get_conv2d(oc, ic, n, n, k, k, p, p, s, s)
sch = te.create_schedule(Y.op)
if p != 0: sch[PaddedX].compute_inline()
_, y, x = sch[Y].op.axis
sch[Y].bind(y, te.thread_axis("blockIdx.x"))
sch[Y].bind(x, te.thread_axis("threadIdx.x"))
print(tvm.lower(sch, [X, K, Y], simple_mode=True))
return sch, (X, K, Y)
def split_axis(factors, sch, op, axis):
"""Splitting an axis into factors
Parameters
----------
factors: array of integers
The factors that the split applies
sch: tvm.te.schedule.Schedule
The tvm schedule
op: tvm.te.tensor.Operation
The stage to be applied
axis: tvm.te.schedule.IterVar
axis to split
Returns
-------
axes : list of Axis
The transformed axes.
"""
ret = []
for i in range(0, len(factors)):
ax0, ax1 = sch[op].split(axis, factor=int(np.prod(factors[i:])))
ret.append(ax0)
axis = ax1
return ret + [axis]
def conv2d_gpu_tiled(oc, ic, n, k, p, s):
tile_c = [4, 8]
tile_h = [2, 2]
tile_w = [16, 4]
tile_rc = [1, 1]
tile_rh = [1, 1]
tile_rw = [1, 3]
X, K, Y, PaddedX = get_conv2d(oc, ic, n, n, k, k, p, p, s, s)
sch = te.create_schedule(Y.op)
if p != 0: sch[PaddedX].compute_inline()
YL = sch.cache_write(Y, 'local')
# create cache stage
XX = sch.cache_read(PaddedX, 'shared', [YL])
KK = sch.cache_read(K, 'shared', [YL])
XL = sch.cache_read(XX, 'local', [YL])
KL = sch.cache_read(KK, 'local', [YL])
c, h, w = sch[Y].op.axis
bc, tc, ic = split_axis(tile_c, sch, Y, c)
bh, th, ih = split_axis(tile_h, sch, Y, h)
bw, tw, iw = split_axis(tile_w, sch, Y, w)
sch[Y].bind(bc, te.thread_axis("blockIdx.z"))
sch[Y].bind(bh, te.thread_axis("blockIdx.y"))
sch[Y].bind(bw, te.thread_axis("blockIdx.x"))
sch[Y].bind(tc, te.thread_axis("threadIdx.z"))
sch[Y].bind(th, te.thread_axis("threadIdx.y"))
sch[Y].bind(tw, te.thread_axis("threadIdx.x"))
sch[Y].reorder(bc, bh, bw, tc, th, tw, ic, ih, iw)
sch[YL].compute_at(sch[Y], tw)
# tile reduction axes
c, h, w = sch[YL].op.axis
rc, rh, rw = sch[YL].op.reduce_axis
rco, rcm, rci = split_axis(tile_rc, sch, YL, rc)
rho, rhm, rhi = split_axis(tile_rh, sch, YL, rh)
rwo, rwm, rwi = split_axis(tile_rw, sch, YL, rw)
sch[YL].reorder(rco, rho, rwo, rcm, rhm, rwm, rci, rhi, rwi, c, h, w)
sch[XX].compute_at(sch[YL], rwo)
sch[KK].compute_at(sch[YL], rwo)
sch[XL].compute_at(sch[YL], rwm)
sch[KL].compute_at(sch[YL], rwm)
# cooperative fetching
for load in [XX, KK]:
args = sch[load].op.axis
fused = sch[load].fuse(*args)
# align thread layout
tz, fused = sch[load].split(fused, nparts=tile_c[0])
ty, fused = sch[load].split(fused, nparts=tile_h[0])
tx, _ = sch[load].split(fused, nparts=tile_w[0])
sch[load].bind(tz, te.thread_axis("threadIdx.z"))
sch[load].bind(ty, te.thread_axis("threadIdx.y"))
sch[load].bind(tx, te.thread_axis("threadIdx.x"))
return sch, (X, K, Y)
def conv2d_gpu_tiled_vthread(oc, ic, n, k, p, s):
tile_c = [4, 8]
tile_h = [2, 2]
tile_w = [16, 4]
tile_rc = [1, 1]
tile_rh = [1, 1]
tile_rw = [1, 3]
X, K, Y, PaddedX = get_conv2d(oc, ic, n, n, k, k, p, p, s, s)
sch = te.create_schedule(Y.op)
if p != 0: sch[PaddedX].compute_inline()
YL = sch.cache_write(Y, 'local')
# create cache stage
XX = sch.cache_read(PaddedX, 'shared', [YL])
KK = sch.cache_read(K, 'shared', [YL])
XL = sch.cache_read(XX, 'local', [YL])
KL = sch.cache_read(KK, 'local', [YL])
c, h, w = sch[Y].op.axis
bc, vc, tc, ic = split_axis(tile_c, sch, Y, c)
bh, vh, th, ih = split_axis(tile_h, sch, Y, h)
bw, vw, tw, iw = split_axis(tile_w, sch, Y, w)
sch[Y].bind(bc, te.thread_axis("blockIdx.z"))
sch[Y].bind(bh, te.thread_axis("blockIdx.y"))
sch[Y].bind(bw, te.thread_axis("blockIdx.x"))
sch[Y].bind(vc, te.thread_axis("vthread"))
sch[Y].bind(vh, te.thread_axis("vthread"))
sch[Y].bind(vw, te.thread_axis("vthread"))
sch[Y].bind(tc, te.thread_axis("threadIdx.z"))
sch[Y].bind(th, te.thread_axis("threadIdx.y"))
sch[Y].bind(tw, te.thread_axis("threadIdx.x"))
sch[Y].reorder(bc, bh, bw, vc, vh, vw, tc, th, tw, ic, ih, iw)
sch[YL].compute_at(sch[Y], tw)
# tile reduction axes
c, h, w = sch[YL].op.axis
rc, rh, rw = sch[YL].op.reduce_axis
rco, rcm, rci = split_axis(tile_rc, sch, YL, rc)
rho, rhm, rhi = split_axis(tile_rh, sch, YL, rh)
rwo, rwm, rwi = split_axis(tile_rw, sch, YL, rw)
sch[YL].reorder(rco, rho, rwo, rcm, rhm, rwm, rci, rhi, rwi, c, h, w)
sch[XX].compute_at(sch[YL], rwo)
sch[KK].compute_at(sch[YL], rwo)
sch[XL].compute_at(sch[YL], rwm)
sch[KL].compute_at(sch[YL], rwm)
# cooperative fetching
for load in [XX, KK]:
args = sch[load].op.axis
fused = sch[load].fuse(*args)
# align thread layout
tz, fused = sch[load].split(fused, nparts=tile_c[1])
ty, fused = sch[load].split(fused, nparts=tile_h[1])
tx, _ = sch[load].split(fused, nparts=tile_w[1])
sch[load].bind(tz, te.thread_axis("threadIdx.z"))
sch[load].bind(ty, te.thread_axis("threadIdx.y"))
sch[load].bind(tx, te.thread_axis("threadIdx.x"))
return sch, (X, K, Y)
def depthwise_cached_block(c, n, k, p, s, tc, tw):
X, K, Y, PaddedX, PackedX, PackedK, PackedY = get_depthwise_conv2d(
c, n, n, k, k, p, p, s, s, tc)
sch = te.create_schedule(Y.op)
CachedY = sch.cache_write(PackedY, 'global')
c_out, h, w, c_in = sch[PackedY].op.axis
w_out, w_in = sch[PackedY].split(w, factor=tw)
sch[PackedY].reorder(c_out, h, w_out, w_in, c_in)
c_out_h = sch[PackedY].fuse(c_out, h)
sch[PackedY].parallel(c_out_h)
sch[CachedY].compute_at(sch[PackedY], w_out)
cc_out, ch, cw, cc_in = sch[CachedY].op.axis
kh, kw = sch[CachedY].op.reduce_axis
sch[CachedY].reorder(cc_out, ch, kh, kw, cw, cc_in)
sch[CachedY].vectorize(cc_in)
sch[CachedY].unroll(cw)
# Schedule the padding by adding thread-level parallelism
if PaddedX != X:
sch[PaddedX].parallel(PaddedX.op.axis[0])
# Optimize the packing of X and K
sch[PackedX].parallel(sch[PackedX].fuse(*PackedX.op.axis[0:2]))
sch[PackedX].unroll(PackedX.op.axis[-1])
sch[PackedK].parallel(sch[PackedK].fuse(*PackedK.op.axis[0:2]))
sch[PackedK].unroll(PackedK.op.axis[-1])
# Optimize the unpacking of Y
sch[Y].parallel(sch[Y].fuse(*Y.op.axis[0:2]))
sch[Y].unroll(Y.op.axis[-1])
return sch, (X, K, Y)
| [
"numpy.prod",
"tvm.te.sum",
"collections.namedtuple",
"copy.deepcopy",
"tvm.te.any",
"tvm.target.create",
"json.dumps",
"tvm.te.create_schedule",
"tvm.tg.auto_schedule.get_feature",
"tvm.te.thread_axis",
"tvm.te.placeholder",
"numpy.array",
"tvm.te.compute",
"enum.Enum",
"tvm.lower",
"... | [((6364, 6426), 'enum.Enum', 'Enum', (['"""AccessType"""', "['kNone', 'kRead', 'kWrite', 'kReadWrite']"], {}), "('AccessType', ['kNone', 'kRead', 'kWrite', 'kReadWrite'])\n", (6368, 6426), False, 'from enum import Enum\n'), ((6439, 6532), 'enum.Enum', 'Enum', (['"""ReuseType"""', "['kNoReuse', 'kLoopMultipleRead', 'kSerialMultipleRead', 'kBothReuse']"], {}), "('ReuseType', ['kNoReuse', 'kLoopMultipleRead', 'kSerialMultipleRead',\n 'kBothReuse'])\n", (6443, 6532), False, 'from enum import Enum\n'), ((7286, 7437), 'collections.namedtuple', 'namedtuple', (['"""Feature"""', "['access_type', 'bytes', 'unique_bytes', 'lines', 'unique_lines',\n 'reuse_type', 'reuse_distance', 'reuse_counter', 'stride']"], {}), "('Feature', ['access_type', 'bytes', 'unique_bytes', 'lines',\n 'unique_lines', 'reuse_type', 'reuse_distance', 'reuse_counter', 'stride'])\n", (7296, 7437), False, 'from collections import namedtuple\n'), ((306, 336), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""a"""'}), "((n,), name='a')\n", (320, 336), True, 'import tvm.te as te\n'), ((343, 373), 'tvm.te.placeholder', 'te.placeholder', (['(n,)'], {'name': '"""b"""'}), "((n,), name='b')\n", (357, 373), True, 'import tvm.te as te\n'), ((380, 432), 'tvm.te.compute', 'te.compute', (['A.shape', '(lambda i: A[i] + B[i])'], {'name': '"""c"""'}), "(A.shape, lambda i: A[i] + B[i], name='c')\n", (390, 432), True, 'import tvm.te as te\n'), ((619, 651), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, l)'], {'name': '"""k"""'}), "((0, l), name='k')\n", (633, 651), True, 'import tvm.te as te\n'), ((658, 690), 'tvm.te.placeholder', 'te.placeholder', (['(n, l)'], {'name': '"""A"""'}), "((n, l), name='A')\n", (672, 690), True, 'import tvm.te as te\n'), ((697, 729), 'tvm.te.placeholder', 'te.placeholder', (['(l, m)'], {'name': '"""B"""'}), "((l, m), name='B')\n", (711, 729), True, 'import tvm.te as te\n'), ((1880, 1915), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, ic)'], {'name': '"""ric"""'}), "((0, ic), name='ric')\n", (1894, 1915), True, 'import tvm.te as te\n'), ((1924, 1959), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kh)'], {'name': '"""rkh"""'}), "((0, kh), name='rkh')\n", (1938, 1959), True, 'import tvm.te as te\n'), ((1968, 2003), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kw)'], {'name': '"""rkw"""'}), "((0, kw), name='rkw')\n", (1982, 2003), True, 'import tvm.te as te\n'), ((2143, 2181), 'tvm.te.placeholder', 'te.placeholder', (['(ic, nh, nw)'], {'name': '"""X"""'}), "((ic, nh, nw), name='X')\n", (2157, 2181), True, 'import tvm.te as te\n'), ((2188, 2230), 'tvm.te.placeholder', 'te.placeholder', (['(oc, ic, kh, kw)'], {'name': '"""K"""'}), "((oc, ic, kh, kw), name='K')\n", (2202, 2230), True, 'import tvm.te as te\n'), ((2822, 2857), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, ic)'], {'name': '"""ric"""'}), "((0, ic), name='ric')\n", (2836, 2857), True, 'import tvm.te as te\n'), ((2866, 2901), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kh)'], {'name': '"""rkh"""'}), "((0, kh), name='rkh')\n", (2880, 2901), True, 'import tvm.te as te\n'), ((2910, 2945), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kw)'], {'name': '"""rkw"""'}), "((0, kw), name='rkw')\n", (2924, 2945), True, 'import tvm.te as te\n'), ((3085, 3123), 'tvm.te.placeholder', 'te.placeholder', (['(ic, nh, nw)'], {'name': '"""X"""'}), "((ic, nh, nw), name='X')\n", (3099, 3123), True, 'import tvm.te as te\n'), ((3130, 3172), 'tvm.te.placeholder', 'te.placeholder', (['(oc, ic, kh, kw)'], {'name': '"""K"""'}), "((oc, ic, kh, kw), name='K')\n", (3144, 3172), True, 'import tvm.te as te\n'), ((3419, 3443), 'tvm.te.create_schedule', 'te.create_schedule', (['Y.op'], {}), '(Y.op)\n', (3437, 3443), True, 'import tvm.te as te\n'), ((4007, 4044), 'tvm.te.placeholder', 'te.placeholder', (['(c, nh, nw)'], {'name': '"""X"""'}), "((c, nh, nw), name='X')\n", (4021, 4044), True, 'import tvm.te as te\n'), ((4053, 4093), 'tvm.te.placeholder', 'te.placeholder', (['(c, 1, kh, kw)'], {'name': '"""K"""'}), "((c, 1, kh, kw), name='K')\n", (4067, 4093), True, 'import tvm.te as te\n'), ((4284, 4415), 'tvm.te.compute', 'te.compute', (['(c // tc, nh + ph * 2, nw + pw * 2, tc)', '(lambda c_out, x, y, c_in: PaddedX[c_out * tc + c_in, x, y])'], {'name': '"""PackedX"""'}), "((c // tc, nh + ph * 2, nw + pw * 2, tc), lambda c_out, x, y,\n c_in: PaddedX[c_out * tc + c_in, x, y], name='PackedX')\n", (4294, 4415), True, 'import tvm.te as te\n'), ((4439, 4563), 'tvm.te.compute', 'te.compute', (['(c // tc, 1, kh, kw, 1, tc)', '(lambda c_out, _, x, y, __, c_in: K[c_out * tc + c_in, 0, x, y])'], {'name': '"""PackedK"""'}), "((c // tc, 1, kh, kw, 1, tc), lambda c_out, _, x, y, __, c_in: K[\n c_out * tc + c_in, 0, x, y], name='PackedK')\n", (4449, 4563), True, 'import tvm.te as te\n'), ((5104, 5139), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kh)'], {'name': '"""rkh"""'}), "((0, kh), name='rkh')\n", (5118, 5139), True, 'import tvm.te as te\n'), ((5150, 5185), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, kw)'], {'name': '"""rkw"""'}), "((0, kw), name='rkw')\n", (5164, 5185), True, 'import tvm.te as te\n'), ((5617, 5702), 'tvm.te.compute', 'te.compute', (['(c, oh, ow)', '(lambda c, x, y: PackedY[c // tc, x, y, c % tc])'], {'name': '"""Y"""'}), "((c, oh, ow), lambda c, x, y: PackedY[c // tc, x, y, c % tc],\n name='Y')\n", (5627, 5702), True, 'import tvm.te as te\n'), ((5921, 5946), 'tvm.target.create', 'tvm.target.create', (['target'], {}), '(target)\n', (5938, 5946), False, 'import tvm\n'), ((6039, 6100), 'tvm.tg.auto_schedule.get_feature', 'tg.auto_schedule.get_feature', (['sch', 'args', 'target'], {'flatten': '(True)'}), '(sch, args, target, flatten=True)\n', (6067, 6100), True, 'import tvm.tg as tg\n'), ((6114, 6132), 'numpy.array', 'np.array', (['features'], {}), '(features)\n', (6122, 6132), True, 'import numpy as np\n'), ((6158, 6220), 'tvm.tg.auto_schedule.get_feature', 'tg.auto_schedule.get_feature', (['sch', 'args', 'target'], {'flatten': '(False)'}), '(sch, args, target, flatten=False)\n', (6186, 6220), True, 'import tvm.tg as tg\n'), ((6308, 6348), 'numpy.prod', 'np.prod', (['[t.value for t in tensor.shape]'], {}), '([t.value for t in tensor.shape])\n', (6315, 6348), True, 'import numpy as np\n'), ((7505, 7516), 'copy.deepcopy', 'deepcopy', (['d'], {}), '(d)\n', (7513, 7516), False, 'from copy import deepcopy\n'), ((7761, 7785), 'tvm.te.create_schedule', 'te.create_schedule', (['Y.op'], {}), '(Y.op)\n', (7779, 7785), True, 'import tvm.te as te\n'), ((8854, 8878), 'tvm.te.create_schedule', 'te.create_schedule', (['Y.op'], {}), '(Y.op)\n', (8872, 8878), True, 'import tvm.te as te\n'), ((10900, 10924), 'tvm.te.create_schedule', 'te.create_schedule', (['Y.op'], {}), '(Y.op)\n', (10918, 10924), True, 'import tvm.te as te\n'), ((13042, 13066), 'tvm.te.create_schedule', 'te.create_schedule', (['Y.op'], {}), '(Y.op)\n', (13060, 13066), True, 'import tvm.te as te\n'), ((195, 235), 'json.dumps', 'json.dumps', (['d'], {'indent': '(2)', 'sort_keys': '(False)'}), '(d, indent=2, sort_keys=False)\n', (205, 235), False, 'import json\n'), ((5866, 5909), 'tvm.te.create_schedule', 'te.create_schedule', (['[o.op for o in outputs]'], {}), '([o.op for o in outputs])\n', (5884, 5909), True, 'import tvm.te as te\n'), ((5985, 6023), 'tvm.lower', 'tvm.lower', (['sch', 'args'], {'simple_mode': '(True)'}), '(sch, args, simple_mode=True)\n', (5994, 6023), False, 'import tvm\n'), ((7873, 7901), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (7887, 7901), True, 'import tvm.te as te\n'), ((7920, 7949), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (7934, 7949), True, 'import tvm.te as te\n'), ((7959, 8002), 'tvm.lower', 'tvm.lower', (['sch', '[X, K, Y]'], {'simple_mode': '(True)'}), '(sch, [X, K, Y], simple_mode=True)\n', (7968, 8002), False, 'import tvm\n'), ((9335, 9363), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.z"""'], {}), "('blockIdx.z')\n", (9349, 9363), True, 'import tvm.te as te\n'), ((9383, 9411), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (9397, 9411), True, 'import tvm.te as te\n'), ((9431, 9459), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (9445, 9459), True, 'import tvm.te as te\n'), ((9479, 9508), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.z"""'], {}), "('threadIdx.z')\n", (9493, 9508), True, 'import tvm.te as te\n'), ((9528, 9557), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (9542, 9557), True, 'import tvm.te as te\n'), ((9577, 9606), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (9591, 9606), True, 'import tvm.te as te\n'), ((11393, 11421), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.z"""'], {}), "('blockIdx.z')\n", (11407, 11421), True, 'import tvm.te as te\n'), ((11441, 11469), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.y"""'], {}), "('blockIdx.y')\n", (11455, 11469), True, 'import tvm.te as te\n'), ((11489, 11517), 'tvm.te.thread_axis', 'te.thread_axis', (['"""blockIdx.x"""'], {}), "('blockIdx.x')\n", (11503, 11517), True, 'import tvm.te as te\n'), ((11537, 11562), 'tvm.te.thread_axis', 'te.thread_axis', (['"""vthread"""'], {}), "('vthread')\n", (11551, 11562), True, 'import tvm.te as te\n'), ((11582, 11607), 'tvm.te.thread_axis', 'te.thread_axis', (['"""vthread"""'], {}), "('vthread')\n", (11596, 11607), True, 'import tvm.te as te\n'), ((11627, 11652), 'tvm.te.thread_axis', 'te.thread_axis', (['"""vthread"""'], {}), "('vthread')\n", (11641, 11652), True, 'import tvm.te as te\n'), ((11672, 11701), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.z"""'], {}), "('threadIdx.z')\n", (11686, 11701), True, 'import tvm.te as te\n'), ((11721, 11750), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (11735, 11750), True, 'import tvm.te as te\n'), ((11770, 11799), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (11784, 11799), True, 'import tvm.te as te\n'), ((786, 819), 'tvm.te.sum', 'te.sum', (['(A[x, k] * B[k, y])'], {'axis': 'k'}), '(A[x, k] * B[k, y], axis=k)\n', (792, 819), True, 'import tvm.te as te\n'), ((2349, 2446), 'tvm.te.sum', 'te.sum', (['(PaddedX[ric, i * sh + rkh, j * sw + rkw] * K[c, ric, rkh, rkw])'], {'axis': '[ric, rkh, rkw]'}), '(PaddedX[ric, i * sh + rkh, j * sw + rkw] * K[c, ric, rkh, rkw], axis\n =[ric, rkh, rkw])\n', (2355, 2446), True, 'import tvm.te as te\n'), ((3291, 3388), 'tvm.te.sum', 'te.sum', (['(PaddedX[ric, i * sh + rkh, j * sw + rkw] * K[c, ric, rkh, rkw])'], {'axis': '[ric, rkh, rkw]'}), '(PaddedX[ric, i * sh + rkh, j * sw + rkw] * K[c, ric, rkh, rkw], axis\n =[ric, rkh, rkw])\n', (3297, 3388), True, 'import tvm.te as te\n'), ((5422, 5538), 'tvm.te.sum', 'te.sum', (['(PackedX[c_out, x * sh + rkh, y * sw + rkw, c_in] * PackedK[c_out, 0, rkh,\n rkw, 0, c_in])'], {'axis': '[rkh, rkw]'}), '(PackedX[c_out, x * sh + rkh, y * sw + rkw, c_in] * PackedK[c_out, 0,\n rkh, rkw, 0, c_in], axis=[rkh, rkw])\n', (5428, 5538), True, 'import tvm.te as te\n'), ((10495, 10524), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.z"""'], {}), "('threadIdx.z')\n", (10509, 10524), True, 'import tvm.te as te\n'), ((10551, 10580), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (10565, 10580), True, 'import tvm.te as te\n'), ((10607, 10636), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (10621, 10636), True, 'import tvm.te as te\n'), ((12700, 12729), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.z"""'], {}), "('threadIdx.z')\n", (12714, 12729), True, 'import tvm.te as te\n'), ((12756, 12785), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.y"""'], {}), "('threadIdx.y')\n", (12770, 12785), True, 'import tvm.te as te\n'), ((12812, 12841), 'tvm.te.thread_axis', 'te.thread_axis', (['"""threadIdx.x"""'], {}), "('threadIdx.x')\n", (12826, 12841), True, 'import tvm.te as te\n'), ((1200, 1266), 'tvm.te.any', 'te.any', (['(i[-2] < ph)', '(i[-2] >= nh + ph)', '(i[-1] < pw)', '(i[-1] >= nw + pw)'], {}), '(i[-2] < ph, i[-2] >= nh + ph, i[-1] < pw, i[-1] >= nw + pw)\n', (1206, 1266), True, 'import tvm.te as te\n'), ((8541, 8561), 'numpy.prod', 'np.prod', (['factors[i:]'], {}), '(factors[i:])\n', (8548, 8561), True, 'import numpy as np\n')] |
import numpy as np
import gym
from dezero import Model
from dezero import optimizers
import dezero.functions as F
import dezero.layers as L
from common.utils import plot_total_reward
class PolicyNet(Model):
def __init__(self, action_size=2):
super().__init__()
self.l1 = L.Linear(256)
self.l2 = L.Linear(action_size)
def forward(self, x):
x = F.relu(self.l1(x))
x = self.l2(x)
x = F.softmax(x)
return x
class ValueNet(Model):
def __init__(self):
super().__init__()
self.l1 = L.Linear(256)
self.l2 = L.Linear(1)
def forward(self, x):
x = F.relu(self.l1(x))
x = self.l2(x)
return x
class Agent:
def __init__(self):
self.gamma = 0.98
self.lr_pi = 0.0001
self.lr_v = 0.0001
self.action_size = 2
self.pi = PolicyNet()
self.v = ValueNet()
self.optimizer_pi = optimizers.Adam(self.lr_pi).setup(self.pi)
self.optimizer_v = optimizers.Adam(self.lr_v).setup(self.v)
def get_action(self, state):
state = np.atleast_2d(state) #state[np.newaxis, :] # add batch axis
probs = self.pi(state)
probs = probs[0]
action = np.random.choice([0, 1], p=probs.data)
return action, probs[action]
def update(self, state, action_prob, reward, next_state, done):
state = np.atleast_2d(state) # add batch axis
next_state = np.atleast_2d(next_state)
td_target = reward + self.gamma * self.v(next_state) * (1 - done)
td_target.unchain()
v = self.v(state)
loss_v = F.mean_squared_error(v, td_target)
delta = td_target - v
delta.unchain()
loss_pi = -F.log(action_prob) * delta
self.v.cleargrads()
self.pi.cleargrads()
loss_v.backward()
loss_pi.backward()
self.optimizer_v.update()
self.optimizer_pi.update()
env = gym.make('CartPole-v0')
agent = Agent()
reward_log = {}
for episode in range(3000):
state = env.reset()
done = False
sum_reward = 0
while not done:
action, prob = agent.get_action(state)
next_state, reward, done, info = env.step(action)
agent.update(state, prob, reward, next_state, done)
state = next_state
sum_reward += reward
reward_log[episode] = sum_reward
if episode % 100 == 0:
print("episode :{}, total reward : {:.1f}".format(episode, sum_reward))
plot_total_reward(reward_log) | [
"numpy.atleast_2d",
"dezero.layers.Linear",
"dezero.optimizers.Adam",
"numpy.random.choice",
"dezero.functions.softmax",
"dezero.functions.log",
"common.utils.plot_total_reward",
"dezero.functions.mean_squared_error",
"gym.make"
] | [((1955, 1978), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (1963, 1978), False, 'import gym\n'), ((2490, 2519), 'common.utils.plot_total_reward', 'plot_total_reward', (['reward_log'], {}), '(reward_log)\n', (2507, 2519), False, 'from common.utils import plot_total_reward\n'), ((293, 306), 'dezero.layers.Linear', 'L.Linear', (['(256)'], {}), '(256)\n', (301, 306), True, 'import dezero.layers as L\n'), ((325, 346), 'dezero.layers.Linear', 'L.Linear', (['action_size'], {}), '(action_size)\n', (333, 346), True, 'import dezero.layers as L\n'), ((440, 452), 'dezero.functions.softmax', 'F.softmax', (['x'], {}), '(x)\n', (449, 452), True, 'import dezero.functions as F\n'), ((564, 577), 'dezero.layers.Linear', 'L.Linear', (['(256)'], {}), '(256)\n', (572, 577), True, 'import dezero.layers as L\n'), ((596, 607), 'dezero.layers.Linear', 'L.Linear', (['(1)'], {}), '(1)\n', (604, 607), True, 'import dezero.layers as L\n'), ((1104, 1124), 'numpy.atleast_2d', 'np.atleast_2d', (['state'], {}), '(state)\n', (1117, 1124), True, 'import numpy as np\n'), ((1238, 1276), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'p': 'probs.data'}), '([0, 1], p=probs.data)\n', (1254, 1276), True, 'import numpy as np\n'), ((1399, 1419), 'numpy.atleast_2d', 'np.atleast_2d', (['state'], {}), '(state)\n', (1412, 1419), True, 'import numpy as np\n'), ((1459, 1484), 'numpy.atleast_2d', 'np.atleast_2d', (['next_state'], {}), '(next_state)\n', (1472, 1484), True, 'import numpy as np\n'), ((1631, 1665), 'dezero.functions.mean_squared_error', 'F.mean_squared_error', (['v', 'td_target'], {}), '(v, td_target)\n', (1651, 1665), True, 'import dezero.functions as F\n'), ((943, 970), 'dezero.optimizers.Adam', 'optimizers.Adam', (['self.lr_pi'], {}), '(self.lr_pi)\n', (958, 970), False, 'from dezero import optimizers\n'), ((1013, 1039), 'dezero.optimizers.Adam', 'optimizers.Adam', (['self.lr_v'], {}), '(self.lr_v)\n', (1028, 1039), False, 'from dezero import optimizers\n'), ((1740, 1758), 'dezero.functions.log', 'F.log', (['action_prob'], {}), '(action_prob)\n', (1745, 1758), True, 'import dezero.functions as F\n')] |
import numpy
from numba import jit
from . import best_split
from . import misc_functions as m
#from importlib import reload
#reload(m)
#reload(best_split)
cache = False
class _tree:
"""
This is the recursive binary tree implementation.
"""
def __init__(self, feature_index=-1, feature_threshold=None, true_branch=None, false_branch=None, p_right=None, results=None):
self.feature_index = feature_index
self.feature_threshold = feature_threshold
self.true_branch = true_branch
self.false_branch = false_branch
self.p_right = p_right
self.results = results # None for nodes, not None for leaves # TODO: decide what you want to do with this.
def get_node_list(self, node_list, this_node, node_idx):
node_idx_right = node_idx + 1
last_node_left_branch = node_idx
if type(this_node.true_branch) != type(None):
last_node_right_branch, node_list = self.get_node_list(node_list, this_node.true_branch, node_idx_right)
node_idx_left = last_node_right_branch + 1
last_node_left_branch, node_list = self.get_node_list(node_list, this_node.false_branch, node_idx_left)
if type(this_node.results) != type(None):
node_list.append([node_idx, this_node.feature_index, this_node.feature_threshold, None,None, this_node.results, None])
else:
node_list.append([node_idx, this_node.feature_index, this_node.feature_threshold, node_idx_right, node_idx_left, None, this_node.p_right])
return last_node_left_branch, node_list
############################################################
############################################################
############################################################
############################################################
############ UNSUPERVISED ############
############################################################
############################################################
############################################################
############################################################
#@jit(cache=True, nopython=True)
def default_synthetic_data(X):
"""
Synthetic data with same marginal distribution for each feature
"""
synthetic_X = numpy.zeros(X.shape)
nof_features = X.shape[1]
nof_objects = X.shape[0]
for f in range(nof_features):
feature_values = X[:, f]
synthetic_X[:, f] += numpy.random.choice(feature_values, nof_objects)
return synthetic_X
#@jit(cache=True, nopython=True)
def get_synthetic_data(X, dX, py, py_remove, pnode, is_max):
#if (len(numpy.unique(y)) == 1):
# y= numpy.zeros(len(y), dtype = int)
real_inds = numpy.where(py[:,1] == 0)[0]
X_real = X[real_inds]
dX_real = dX[real_inds]
py_real = py[real_inds]
pnode_real = pnode[real_inds]
is_max_real = is_max[real_inds]
n_real = X_real.shape[0]
if n_real < 50:
return X, dX, py, py_remove, pnode, is_max
X_syn = default_synthetic_data(X_real)
dX_syn = default_synthetic_data(dX_real)
X_new = numpy.vstack([X_real,X_syn])
dX_new = numpy.vstack([dX_real,dX_syn])
py_new = numpy.zeros([X_new.shape[0],2])
py_new[:n_real,0] = py_real[:,0] #Class 'real'
py_new[n_real:,1] = py_real[:,0]
pnode_new = numpy.zeros([X_new.shape[0]])
pnode_new[:n_real] = pnode_real
pnode_new[n_real:] = pnode_real
is_max_new = numpy.concatenate([is_max_real, is_max_real])
return X_new, dX_new, py_new, py_new, pnode_new, is_max_new
############################################################
############################################################
############################################################
############################################################
############ TRAIN ############
############################################################
############################################################
############################################################
############################################################
def fit_tree(X, dX, py_gini, py_leafs, pnode, depth, is_max, tree_max_depth, max_features, feature_importances,
tree_n_samples, keep_proba, unsupervised=False, new_syn_data_frac=0, min_py_sum_leaf=1):
"""
function grows a recursive disicion tree according to the objects X and their classifications y
"""
if len(X) == 0:
print('Warning: empty node')
return _tree()
n_features = X.shape[1]
n_objects_node = X.shape[0]
if unsupervised:
new_syn_data = False
if depth == 0:
new_syn_data = True
elif (n_objects_node > 50):
if (numpy.random.rand() < new_syn_data_frac):
new_syn_data = True
if new_syn_data:
#print('before:', X.shape, dX.shape, py_gini.shape, py_leafs.shape, pnode.shape, is_max.shape)
X, dX, py_gini, py_leafs, pnode, is_max = get_synthetic_data(X, dX, py_gini, py_leafs, pnode, is_max)
#print('after:', X.shape, dX.shape, py_gini.shape, py_leafs.shape, pnode.shape, is_max.shape)
n_objects_node = X.shape[0]
max_depth = depth + 1
if tree_max_depth:
max_depth = tree_max_depth
if depth < max_depth:
scaled_py_gini = numpy.multiply(py_gini, pnode[:,numpy.newaxis])
current_score, normalization, class_p_arr = best_split._gini_init(scaled_py_gini)
features_chosen_indices = m.choose_features(n_features, max_features)
best_gain, best_attribute, best_attribute_value = best_split.get_best_split(X, scaled_py_gini, current_score, features_chosen_indices, max_features)
# Caclculate split probabilities for each object
if best_gain > 0:
p_split_right = m.split_probability_all(X[:,best_attribute], dX[:,best_attribute], best_attribute_value)
p_split_left = 1 - p_split_right
pnode_right, pnode_left, best_right, best_left, is_max_right, is_max_left, pnode_right_tot = m.get_split_objects(pnode, p_split_right, p_split_left, is_max, n_objects_node, keep_proba)
# Check if the best split is valid (that is not a useless 0-everything split)
th = min_py_sum_leaf
if (numpy.sum(pnode_right) >= th) and (numpy.sum(pnode_left) >= th):
# add the impurity of the best split into the feature importance value
p = scaled_py_gini.sum() / tree_n_samples
feature_importances[best_attribute] += p * best_gain
# Split all the arrays according to the indicies we have for the object in each side of the split
X_right, X_left = m.pull_values(X, best_right, best_left)
dX_right, dX_left = m.pull_values(dX, best_right, best_left)
py_right, py_left = m.pull_values(py_gini, best_right, best_left)
py_leafs_right, py_leafs_left = m.pull_values(py_leafs, best_right, best_left)
# go to the next steps of the recursive process
depth = depth + 1
right_branch = fit_tree(X_right, dX_right, py_right, py_leafs_right, pnode_right, depth, is_max_right, tree_max_depth, max_features, feature_importances, tree_n_samples, keep_proba, unsupervised, new_syn_data_frac, min_py_sum_leaf)
left_branch = fit_tree(X_left, dX_left, py_left, py_leafs_left , pnode_left, depth, is_max_left, tree_max_depth, max_features, feature_importances, tree_n_samples, keep_proba, unsupervised, new_syn_data_frac, min_py_sum_leaf)
return _tree(feature_index=best_attribute, feature_threshold=best_attribute_value, true_branch=right_branch, false_branch=left_branch, p_right=pnode_right_tot)
class_probas = m.return_class_probas(pnode, py_leafs)
#if len(pnode) > 2500:
# print(len(pnode), best_gain, len(pnode_right), len(pnode_left), numpy.mean(p_split_right), numpy.mean(dX[:,best_attribute]), numpy.nanmin(X[:,best_attribute]), numpy.nanmax(X[:,best_attribute]), best_attribute_value )
return _tree(results= class_probas)
############################################################
############################################################
############################################################
############################################################
############ PREDICT ############
############################################################
############################################################
############################################################
############################################################
@jit(cache=cache, nopython=True)
def predict_all(node_tree_results, node_feature_idx, node_feature_th, node_true_branch, node_false_branch, node_p_right, X, dX, keep_proba, return_leafs):
nof_objects = X.shape[0]
nof_classes = len(node_tree_results[0])
result = numpy.zeros((nof_objects, nof_classes))
curr_node = 0
for i in range(nof_objects):
result[i] = predict_single(node_tree_results, node_feature_idx, node_feature_th, node_true_branch, node_false_branch, node_p_right, X[i], dX[i], curr_node, keep_proba, p_tree = 1.0, is_max = True, return_leafs=return_leafs)
return result
@jit(cache=cache, nopython=True)
def predict_single(node_tree_results, node_feature_idx, node_feature_th, node_true_branch, node_false_branch, node_p_right, x, dx, curr_node, keep_proba, p_tree = 1.0, is_max = True, return_leafs=False):
"""
function classifies a single object according to the trained tree
"""
node = curr_node
tree_results = node_tree_results[curr_node]
tree_feature_index = node_feature_idx[curr_node]
tree_feature_th = node_feature_th[curr_node]
true_branch_node = node_true_branch[curr_node]
false_branch_node = node_false_branch[curr_node]
p_right_node = node_p_right[curr_node]
nof_classes = len(tree_results)
if (tree_results[0] >= 0):
if return_leafs:
summed_prediction = tree_results*0 + node
else:
summed_prediction = tree_results * p_tree
else:
summed_prediction = numpy.zeros(nof_classes)
if is_max:
val = x[tree_feature_index]
delta = dx[tree_feature_index]
p_split = m.split_probability(val, delta, tree_feature_th)
if numpy.isnan(p_split):
p_split = p_right_node
p_true = p_tree * p_split
p_false = p_tree * (1 - p_split)
is_max_true = True
is_max_false = False
if p_split <= 0.5:
is_max_true = False
is_max_false = True
if ((p_true > keep_proba) or is_max_true):
summed_prediction += predict_single(node_tree_results, node_feature_idx, node_feature_th, node_true_branch, node_false_branch, node_p_right, x, dx, true_branch_node, keep_proba, p_true, is_max_true, return_leafs)
if ((p_false > keep_proba) or is_max_false):
summed_prediction += predict_single(node_tree_results, node_feature_idx, node_feature_th, node_true_branch, node_false_branch, node_p_right, x, dx, false_branch_node, keep_proba, p_false, is_max_false, return_leafs)
else:
is_max_true = False
is_max_false = False
val = x[tree_feature_index]
delta = dx[tree_feature_index]
p_split = m.split_probability(val, delta, tree_feature_th)
if numpy.isnan(p_split):
p_split = p_right_node
p_true = p_tree * p_split
p_false = p_tree * (1 - p_split)
if p_true > keep_proba:
summed_prediction += predict_single(node_tree_results, node_feature_idx, node_feature_th, node_true_branch, node_false_branch, node_p_right, x, dx, true_branch_node, keep_proba, p_true, is_max_true, return_leafs)
if p_false > keep_proba:
summed_prediction += predict_single(node_tree_results, node_feature_idx, node_feature_th, node_true_branch, node_false_branch, node_p_right, x, dx, false_branch_node, keep_proba, p_false, is_max_false, return_leafs)
return summed_prediction
| [
"numpy.multiply",
"numpy.random.rand",
"numpy.random.choice",
"numpy.where",
"numpy.sum",
"numpy.zeros",
"numba.jit",
"numpy.isnan",
"numpy.vstack",
"numpy.concatenate"
] | [((8750, 8781), 'numba.jit', 'jit', ([], {'cache': 'cache', 'nopython': '(True)'}), '(cache=cache, nopython=True)\n', (8753, 8781), False, 'from numba import jit\n'), ((9367, 9398), 'numba.jit', 'jit', ([], {'cache': 'cache', 'nopython': '(True)'}), '(cache=cache, nopython=True)\n', (9370, 9398), False, 'from numba import jit\n'), ((2307, 2327), 'numpy.zeros', 'numpy.zeros', (['X.shape'], {}), '(X.shape)\n', (2318, 2327), False, 'import numpy\n'), ((3136, 3165), 'numpy.vstack', 'numpy.vstack', (['[X_real, X_syn]'], {}), '([X_real, X_syn])\n', (3148, 3165), False, 'import numpy\n'), ((3178, 3209), 'numpy.vstack', 'numpy.vstack', (['[dX_real, dX_syn]'], {}), '([dX_real, dX_syn])\n', (3190, 3209), False, 'import numpy\n'), ((3223, 3255), 'numpy.zeros', 'numpy.zeros', (['[X_new.shape[0], 2]'], {}), '([X_new.shape[0], 2])\n', (3234, 3255), False, 'import numpy\n'), ((3360, 3389), 'numpy.zeros', 'numpy.zeros', (['[X_new.shape[0]]'], {}), '([X_new.shape[0]])\n', (3371, 3389), False, 'import numpy\n'), ((3480, 3525), 'numpy.concatenate', 'numpy.concatenate', (['[is_max_real, is_max_real]'], {}), '([is_max_real, is_max_real])\n', (3497, 3525), False, 'import numpy\n'), ((9024, 9063), 'numpy.zeros', 'numpy.zeros', (['(nof_objects, nof_classes)'], {}), '((nof_objects, nof_classes))\n', (9035, 9063), False, 'import numpy\n'), ((2485, 2533), 'numpy.random.choice', 'numpy.random.choice', (['feature_values', 'nof_objects'], {}), '(feature_values, nof_objects)\n', (2504, 2533), False, 'import numpy\n'), ((2752, 2778), 'numpy.where', 'numpy.where', (['(py[:, 1] == 0)'], {}), '(py[:, 1] == 0)\n', (2763, 2778), False, 'import numpy\n'), ((5388, 5436), 'numpy.multiply', 'numpy.multiply', (['py_gini', 'pnode[:, numpy.newaxis]'], {}), '(py_gini, pnode[:, numpy.newaxis])\n', (5402, 5436), False, 'import numpy\n'), ((10333, 10357), 'numpy.zeros', 'numpy.zeros', (['nof_classes'], {}), '(nof_classes)\n', (10344, 10357), False, 'import numpy\n'), ((10570, 10590), 'numpy.isnan', 'numpy.isnan', (['p_split'], {}), '(p_split)\n', (10581, 10590), False, 'import numpy\n'), ((11788, 11808), 'numpy.isnan', 'numpy.isnan', (['p_split'], {}), '(p_split)\n', (11799, 11808), False, 'import numpy\n'), ((4778, 4797), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (4795, 4797), False, 'import numpy\n'), ((6346, 6368), 'numpy.sum', 'numpy.sum', (['pnode_right'], {}), '(pnode_right)\n', (6355, 6368), False, 'import numpy\n'), ((6381, 6402), 'numpy.sum', 'numpy.sum', (['pnode_left'], {}), '(pnode_left)\n', (6390, 6402), False, 'import numpy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.